aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/bonding.txt96
-rw-r--r--Documentation/powerpc/booting-without-of.txt21
-rw-r--r--drivers/bluetooth/Kconfig1
-rw-r--r--drivers/bluetooth/hci_bcsp.c44
-rw-r--r--drivers/net/3c515.c4
-rw-r--r--drivers/net/3c523.c37
-rw-r--r--drivers/net/3c527.c45
-rw-r--r--drivers/net/8139cp.c43
-rw-r--r--drivers/net/8139too.c53
-rw-r--r--drivers/net/8390.h1
-rw-r--r--drivers/net/Kconfig33
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/a2065.c4
-rw-r--r--drivers/net/acenic.c21
-rw-r--r--drivers/net/acenic.h1
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atlx/atl1.c3
-rw-r--r--drivers/net/bnx2.c242
-rw-r--r--drivers/net/bnx2.h4
-rw-r--r--drivers/net/bnx2_fw.h80
-rw-r--r--drivers/net/bonding/bond_main.c706
-rw-r--r--drivers/net/bonding/bond_sysfs.c81
-rw-r--r--drivers/net/bonding/bonding.h13
-rw-r--r--drivers/net/cxgb3/adapter.h18
-rw-r--r--drivers/net/cxgb3/common.h1
-rw-r--r--drivers/net/cxgb3/cxgb3_ioctl.h1
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c19
-rw-r--r--drivers/net/cxgb3/sge.c391
-rw-r--r--drivers/net/cxgb3/t3_cpl.h11
-rw-r--r--drivers/net/declance.c4
-rw-r--r--drivers/net/dl2k.c8
-rw-r--r--drivers/net/dm9000.c6
-rw-r--r--drivers/net/e1000e/netdev.c5
-rw-r--r--drivers/net/forcedeth.c61
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c31
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/hamachi.c12
-rw-r--r--drivers/net/hamradio/6pack.c24
-rw-r--r--drivers/net/hplance.c4
-rw-r--r--drivers/net/igb/igb_main.c7
-rw-r--r--drivers/net/irda/donauboe.c6
-rw-r--r--drivers/net/irda/smsc-ircc2.c1
-rw-r--r--drivers/net/irda/smsc-ircc2.h1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c7
-rw-r--r--drivers/net/ixp2000/ixpdev.c4
-rw-r--r--drivers/net/lib8390.c100
-rw-r--r--drivers/net/mac8390.c8
-rw-r--r--drivers/net/macb.c6
-rw-r--r--drivers/net/macsonic.c19
-rw-r--r--drivers/net/myri10ge/myri10ge.c1076
-rw-r--r--drivers/net/natsemi.c4
-rw-r--r--drivers/net/niu.h2
-rw-r--r--drivers/net/ns83820.c9
-rw-r--r--drivers/net/pcmcia/3c574_cs.c47
-rw-r--r--drivers/net/pcmcia/3c589_cs.c49
-rw-r--r--drivers/net/pcmcia/axnet_cs.c70
-rw-r--r--drivers/net/pcnet32.c6
-rw-r--r--drivers/net/phy/Kconfig9
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/broadcom.c201
-rw-r--r--drivers/net/phy/mdio-ofgpio.c205
-rw-r--r--drivers/net/ppp_generic.c22
-rw-r--r--drivers/net/ps3_gelic_net.c10
-rw-r--r--drivers/net/ps3_gelic_net.h2
-rw-r--r--drivers/net/ps3_gelic_wireless.c206
-rw-r--r--drivers/net/ps3_gelic_wireless.h7
-rw-r--r--drivers/net/qla3xxx.c4
-rw-r--r--drivers/net/s2io.c6
-rw-r--r--drivers/net/s2io.h2
-rw-r--r--drivers/net/sb1250-mac.c2
-rw-r--r--drivers/net/sfc/Kconfig2
-rw-r--r--drivers/net/sfc/Makefile2
-rw-r--r--drivers/net/sfc/boards.c2
-rw-r--r--drivers/net/sfc/boards.h3
-rw-r--r--drivers/net/sfc/efx.c2
-rw-r--r--drivers/net/sfc/falcon.c74
-rw-r--r--drivers/net/sfc/i2c-direct.c381
-rw-r--r--drivers/net/sfc/i2c-direct.h91
-rw-r--r--drivers/net/sfc/net_driver.h11
-rw-r--r--drivers/net/sfc/sfe4001.c126
-rw-r--r--drivers/net/sh_eth.c1174
-rw-r--r--drivers/net/sh_eth.h464
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/sis900.c2
-rw-r--r--drivers/net/sky2.c175
-rw-r--r--drivers/net/sky2.h23
-rw-r--r--drivers/net/smc911x.c422
-rw-r--r--drivers/net/smc911x.h494
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/sunlance.c4
-rw-r--r--drivers/net/tg3.c1268
-rw-r--r--drivers/net/tg3.h40
-rw-r--r--drivers/net/tlan.c490
-rw-r--r--drivers/net/tlan.h26
-rw-r--r--drivers/net/tokenring/3c359.c20
-rw-r--r--drivers/net/tokenring/3c359.h2
-rw-r--r--drivers/net/tsi108_eth.c10
-rw-r--r--drivers/net/ucc_geth_ethtool.c4
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/hso.c2836
-rw-r--r--drivers/net/via-velocity.c25
-rw-r--r--drivers/net/wireless/adm8211.c49
-rw-r--r--drivers/net/wireless/adm8211.h1
-rw-r--r--drivers/net/wireless/airo.c57
-rw-r--r--drivers/net/wireless/arlan-main.c40
-rw-r--r--drivers/net/wireless/arlan.h1
-rw-r--r--drivers/net/wireless/ath5k/base.c105
-rw-r--r--drivers/net/wireless/ath5k/base.h4
-rw-r--r--drivers/net/wireless/atmel.c46
-rw-r--r--drivers/net/wireless/b43/b43.h47
-rw-r--r--drivers/net/wireless/b43/debugfs.c77
-rw-r--r--drivers/net/wireless/b43/debugfs.h1
-rw-r--r--drivers/net/wireless/b43/dma.c54
-rw-r--r--drivers/net/wireless/b43/dma.h3
-rw-r--r--drivers/net/wireless/b43/lo.c731
-rw-r--r--drivers/net/wireless/b43/lo.h115
-rw-r--r--drivers/net/wireless/b43/main.c283
-rw-r--r--drivers/net/wireless/b43/main.h3
-rw-r--r--drivers/net/wireless/b43/nphy.c2
-rw-r--r--drivers/net/wireless/b43/phy.c291
-rw-r--r--drivers/net/wireless/b43/phy.h16
-rw-r--r--drivers/net/wireless/b43/pio.c44
-rw-r--r--drivers/net/wireless/b43/pio.h8
-rw-r--r--drivers/net/wireless/b43/xmit.c70
-rw-r--r--drivers/net/wireless/b43/xmit.h4
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h17
-rw-r--r--drivers/net/wireless/b43legacy/dma.c43
-rw-r--r--drivers/net/wireless/b43legacy/dma.h7
-rw-r--r--drivers/net/wireless/b43legacy/main.c31
-rw-r--r--drivers/net/wireless/b43legacy/phy.c14
-rw-r--r--drivers/net/wireless/b43legacy/pio.c27
-rw-r--r--drivers/net/wireless/b43legacy/pio.h7
-rw-r--r--drivers/net/wireless/b43legacy/radio.c12
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c51
-rw-r--r--drivers/net/wireless/b43legacy/xmit.h2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig40
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h622
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-rs.c1172
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-rs.h95
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c3099
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h133
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1417
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c806
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.h109
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h (renamed from drivers/net/wireless/iwlwifi/iwl-4965-commands.h)354
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1039
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h125
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h38
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c101
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h (renamed from drivers/net/wireless/iwlwifi/iwl-4965.h)429
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c146
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h206
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h391
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c423
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h76
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h333
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c470
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c648
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c1393
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c163
-rw-r--r--drivers/net/wireless/iwlwifi/iwl4965-base.c3687
-rw-r--r--drivers/net/wireless/libertas/Makefile8
-rw-r--r--drivers/net/wireless/libertas/assoc.c8
-rw-r--r--drivers/net/wireless/libertas/cmd.c192
-rw-r--r--drivers/net/wireless/libertas/cmd.h8
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c25
-rw-r--r--drivers/net/wireless/libertas/decl.h8
-rw-r--r--drivers/net/wireless/libertas/defs.h14
-rw-r--r--drivers/net/wireless/libertas/dev.h8
-rw-r--r--drivers/net/wireless/libertas/host.h17
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h4
-rw-r--r--drivers/net/wireless/libertas/if_cs.c227
-rw-r--r--drivers/net/wireless/libertas/if_usb.c22
-rw-r--r--drivers/net/wireless/libertas/main.c254
-rw-r--r--drivers/net/wireless/libertas/persistcfg.c453
-rw-r--r--drivers/net/wireless/libertas/rx.c4
-rw-r--r--drivers/net/wireless/libertas/types.h30
-rw-r--r--drivers/net/wireless/libertas/wext.c32
-rw-r--r--drivers/net/wireless/p54/p54.h2
-rw-r--r--drivers/net/wireless/p54/p54common.c137
-rw-r--r--drivers/net/wireless/p54/p54common.h1
-rw-r--r--drivers/net/wireless/p54/p54pci.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c2
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig55
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c172
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c159
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c111
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h70
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c234
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c111
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c105
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h28
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c173
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h90
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c232
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h47
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c163
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c95
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h5
-rw-r--r--drivers/net/wireless/rtl8180_dev.c71
-rw-r--r--drivers/net/wireless/rtl8187.h6
-rw-r--r--drivers/net/wireless/rtl8187_dev.c55
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c184
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h16
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c29
-rw-r--r--drivers/ssb/pci.c20
-rw-r--r--include/linux/brcmphy.h6
-rw-r--r--include/linux/ieee80211.h52
-rw-r--r--include/linux/if_bridge.h2
-rw-r--r--include/linux/if_ppp.h2
-rw-r--r--include/linux/if_tun.h2
-rw-r--r--include/linux/ip6_tunnel.h4
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h10
-rw-r--r--include/linux/netfilter_bridge/ebt_ip6.h40
-rw-r--r--include/linux/netfilter_bridge/ebt_log.h3
-rw-r--r--include/linux/netfilter_ipv4.h1
-rw-r--r--include/linux/netfilter_ipv6.h1
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/ppp-comp.h2
-rw-r--r--include/linux/ppp_defs.h2
-rw-r--r--include/linux/smc911x.h12
-rw-r--r--include/linux/sunrpc/auth_gss.h2
-rw-r--r--include/linux/sunrpc/gss_api.h2
-rw-r--r--include/linux/sunrpc/svcauth_gss.h3
-rw-r--r--include/linux/tcp.h50
-rw-r--r--include/linux/tipc_config.h10
-rw-r--r--include/linux/wanrouter.h2
-rw-r--r--include/linux/wireless.h2
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/ieee80211.h11
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/net/inetpeer.h2
-rw-r--r--include/net/ip6_tunnel.h5
-rw-r--r--include/net/ipconfig.h2
-rw-r--r--include/net/ipip.h1
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/mac80211.h466
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h19
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sctp/structs.h4
-rw-r--r--include/net/sctp/user.h34
-rw-r--r--include/net/snmp.h2
-rw-r--r--include/net/tcp.h21
-rw-r--r--include/net/tipc/tipc_port.h3
-rw-r--r--include/net/udp.h2
-rw-r--r--include/net/wireless.h6
-rw-r--r--ipc/mqueue.c2
-rw-r--r--net/bluetooth/bnep/bnep.h4
-rw-r--r--net/bluetooth/bnep/core.c4
-rw-r--r--net/bluetooth/bnep/netdev.c4
-rw-r--r--net/bluetooth/bnep/sock.c4
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bridge/br.c2
-rw-r--r--net/bridge/br_device.c13
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/bridge/br_forward.c4
-rw-r--r--net/bridge/br_if.c2
-rw-r--r--net/bridge/br_input.c12
-rw-r--r--net/bridge/br_ioctl.c2
-rw-r--r--net/bridge/br_notify.c2
-rw-r--r--net/bridge/br_private.h3
-rw-r--r--net/bridge/br_private_stp.h2
-rw-r--r--net/bridge/br_stp.c2
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/br_stp_timer.c2
-rw-r--r--net/bridge/netfilter/Kconfig9
-rw-r--r--net/bridge/netfilter/Makefile1
-rw-r--r--net/bridge/netfilter/ebt_ip6.c144
-rw-r--r--net/bridge/netfilter/ebt_log.c64
-rw-r--r--net/core/net-sysfs.c9
-rw-r--r--net/core/rtnetlink.c20
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c2
-rw-r--r--net/core/sysctl_net_core.c39
-rw-r--r--net/ieee80211/ieee80211_rx.c2
-rw-r--r--net/ieee80211/ieee80211_tx.c86
-rw-r--r--net/ieee80211/ieee80211_wx.c89
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/fib_hash.c2
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/inetpeer.c2
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_fragment.c36
-rw-r--r--net/ipv4/ip_gre.c32
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipip.c24
-rw-r--r--net/ipv4/ipmr.c31
-rw-r--r--net/ipv4/ipvs/ip_vs_app.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_core.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_dh.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_est.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_ftp.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_nq.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_ah.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_esp.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_tcp.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_udp.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_rr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sched.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sed.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sh.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_wlc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_wrr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_xmit.c2
-rw-r--r--net/ipv4/netfilter/Kconfig12
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/ip_queue.c3
-rw-r--r--net/ipv4/netfilter/iptable_security.c180
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c5
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/protocol.c2
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c2
-rw-r--r--net/ipv4/tcp.c72
-rw-r--r--net/ipv4/tcp_diag.c2
-rw-r--r--net/ipv4/tcp_input.c42
-rw-r--r--net/ipv4/tcp_ipv4.c166
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c5
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/udplite.c2
-rw-r--r--net/ipv6/addrconf.c24
-rw-r--r--net/ipv6/addrlabel.c106
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/exthdrs.c2
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_input.c2
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_tunnel.c28
-rw-r--r--net/ipv6/ip6mr.c24
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/mcast.c3
-rw-r--r--net/ipv6/netfilter/Kconfig12
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/ip6_queue.c3
-rw-r--r--net/ipv6/netfilter/ip6table_security.c172
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c5
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/protocol.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/reassembly.c63
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/sit.c26
-rw-r--r--net/ipv6/sysctl_net_ipv6.c29
-rw-r--r--net/ipv6/tcp_ipv6.c138
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/udplite.c2
-rw-r--r--net/irda/irnet/irnet_ppp.c54
-rw-r--r--net/irda/irnet/irnet_ppp.h7
-rw-r--r--net/iucv/af_iucv.c1
-rw-r--r--net/iucv/iucv.c13
-rw-r--r--net/key/af_key.c622
-rw-r--r--net/mac80211/Kconfig14
-rw-r--r--net/mac80211/Makefile2
-rw-r--r--net/mac80211/aes_ccm.c2
-rw-r--r--net/mac80211/aes_ccm.h2
-rw-r--r--net/mac80211/cfg.c6
-rw-r--r--net/mac80211/debugfs.c43
-rw-r--r--net/mac80211/debugfs_key.c8
-rw-r--r--net/mac80211/debugfs_netdev.c11
-rw-r--r--net/mac80211/debugfs_sta.c56
-rw-r--r--net/mac80211/ieee80211_i.h92
-rw-r--r--net/mac80211/iface.c6
-rw-r--r--net/mac80211/key.c11
-rw-r--r--net/mac80211/key.h17
-rw-r--r--net/mac80211/main.c329
-rw-r--r--net/mac80211/mesh.c38
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/mesh_pathtbl.c53
-rw-r--r--net/mac80211/mesh_plink.c88
-rw-r--r--net/mac80211/michael.c106
-rw-r--r--net/mac80211/michael.h8
-rw-r--r--net/mac80211/mlme.c225
-rw-r--r--net/mac80211/rate.c12
-rw-r--r--net/mac80211/rate.h33
-rw-r--r--net/mac80211/rc80211_pid.h4
-rw-r--r--net/mac80211/rc80211_pid_algo.c22
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c8
-rw-r--r--net/mac80211/rx.c291
-rw-r--r--net/mac80211/sta_info.c22
-rw-r--r--net/mac80211/sta_info.h80
-rw-r--r--net/mac80211/tkip.c229
-rw-r--r--net/mac80211/tkip.h8
-rw-r--r--net/mac80211/tx.c863
-rw-r--r--net/mac80211/util.c59
-rw-r--r--net/mac80211/wep.c19
-rw-r--r--net/mac80211/wep.h2
-rw-r--r--net/mac80211/wext.c28
-rw-r--r--net/mac80211/wme.c147
-rw-r--r--net/mac80211/wme.h2
-rw-r--r--net/mac80211/wpa.c135
-rw-r--r--net/netfilter/nf_conntrack_core.c19
-rw-r--r--net/netfilter/nf_conntrack_extend.c10
-rw-r--r--net/netfilter/nf_conntrack_netlink.c30
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c80
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c9
-rw-r--r--net/netfilter/nfnetlink_queue.c1
-rw-r--r--net/netfilter/xt_CONNSECMARK.c10
-rw-r--r--net/netfilter/xt_SECMARK.c10
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sctp/associola.c3
-rw-r--r--net/sctp/proc.c141
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/sm_sideeffect.c17
-rw-r--r--net/sctp/socket.c306
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sysctl_net.c31
-rw-r--r--net/tipc/bcast.c4
-rw-r--r--net/tipc/cluster.c2
-rw-r--r--net/tipc/config.c11
-rw-r--r--net/tipc/core.c13
-rw-r--r--net/tipc/core.h126
-rw-r--r--net/tipc/dbg.c231
-rw-r--r--net/tipc/dbg.h12
-rw-r--r--net/tipc/discover.c14
-rw-r--r--net/tipc/discover.h2
-rw-r--r--net/tipc/link.c80
-rw-r--r--net/tipc/msg.c13
-rw-r--r--net/tipc/msg.h42
-rw-r--r--net/tipc/name_distr.c6
-rw-r--r--net/tipc/name_table.c14
-rw-r--r--net/tipc/net.c10
-rw-r--r--net/tipc/net.h2
-rw-r--r--net/tipc/netlink.c16
-rw-r--r--net/tipc/node.c26
-rw-r--r--net/tipc/port.c79
-rw-r--r--net/tipc/ref.c12
-rw-r--r--net/tipc/socket.c5
-rw-r--r--net/tipc/subscr.c249
-rw-r--r--net/tipc/subscr.h34
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/wanrouter/wanmain.c6
-rw-r--r--net/wanrouter/wanproc.c2
-rw-r--r--net/wireless/core.c33
-rw-r--r--net/wireless/radiotap.c16
489 files changed, 27379 insertions, 16992 deletions
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index a0cda062bc33..8e6b8d3c7410 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -289,35 +289,73 @@ downdelay
289fail_over_mac 289fail_over_mac
290 290
291 Specifies whether active-backup mode should set all slaves to 291 Specifies whether active-backup mode should set all slaves to
292 the same MAC address (the traditional behavior), or, when 292 the same MAC address at enslavement (the traditional
293 enabled, change the bond's MAC address when changing the 293 behavior), or, when enabled, perform special handling of the
294 active interface (i.e., fail over the MAC address itself). 294 bond's MAC address in accordance with the selected policy.
295 295
296 Fail over MAC is useful for devices that cannot ever alter 296 Possible values are:
297 their MAC address, or for devices that refuse incoming 297
298 broadcasts with their own source MAC (which interferes with 298 none or 0
299 the ARP monitor). 299
300 300 This setting disables fail_over_mac, and causes
301 The down side of fail over MAC is that every device on the 301 bonding to set all slaves of an active-backup bond to
302 network must be updated via gratuitous ARP, vs. just updating 302 the same MAC address at enslavement time. This is the
303 a switch or set of switches (which often takes place for any 303 default.
304 traffic, not just ARP traffic, if the switch snoops incoming 304
305 traffic to update its tables) for the traditional method. If 305 active or 1
306 the gratuitous ARP is lost, communication may be disrupted. 306
307 307 The "active" fail_over_mac policy indicates that the
308 When fail over MAC is used in conjuction with the mii monitor, 308 MAC address of the bond should always be the MAC
309 devices which assert link up prior to being able to actually 309 address of the currently active slave. The MAC
310 transmit and receive are particularly susecptible to loss of 310 address of the slaves is not changed; instead, the MAC
311 the gratuitous ARP, and an appropriate updelay setting may be 311 address of the bond changes during a failover.
312 required. 312
313 313 This policy is useful for devices that cannot ever
314 A value of 0 disables fail over MAC, and is the default. A 314 alter their MAC address, or for devices that refuse
315 value of 1 enables fail over MAC. This option is enabled 315 incoming broadcasts with their own source MAC (which
316 automatically if the first slave added cannot change its MAC 316 interferes with the ARP monitor).
317 address. This option may be modified via sysfs only when no 317
318 slaves are present in the bond. 318 The down side of this policy is that every device on
319 319 the network must be updated via gratuitous ARP,
320 This option was added in bonding version 3.2.0. 320 vs. just updating a switch or set of switches (which
321 often takes place for any traffic, not just ARP
322 traffic, if the switch snoops incoming traffic to
323 update its tables) for the traditional method. If the
324 gratuitous ARP is lost, communication may be
325 disrupted.
326
327 When this policy is used in conjuction with the mii
328 monitor, devices which assert link up prior to being
329 able to actually transmit and receive are particularly
330 susecptible to loss of the gratuitous ARP, and an
331 appropriate updelay setting may be required.
332
333 follow or 2
334
335 The "follow" fail_over_mac policy causes the MAC
336 address of the bond to be selected normally (normally
337 the MAC address of the first slave added to the bond).
338 However, the second and subsequent slaves are not set
339 to this MAC address while they are in a backup role; a
340 slave is programmed with the bond's MAC address at
341 failover time (and the formerly active slave receives
342 the newly active slave's MAC address).
343
344 This policy is useful for multiport devices that
345 either become confused or incur a performance penalty
346 when multiple ports are programmed with the same MAC
347 address.
348
349
350 The default policy is none, unless the first slave cannot
351 change its MAC address, in which case the active policy is
352 selected by default.
353
354 This option may be modified via sysfs only when no slaves are
355 present in the bond.
356
357 This option was added in bonding version 3.2.0. The "follow"
358 policy was added in bonding version 3.3.0.
321 359
322lacp_rate 360lacp_rate
323 361
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index 1d2a772506cf..46a9dba11f2f 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -58,6 +58,7 @@ Table of Contents
58 o) Xilinx IP cores 58 o) Xilinx IP cores
59 p) Freescale Synchronous Serial Interface 59 p) Freescale Synchronous Serial Interface
60 q) USB EHCI controllers 60 q) USB EHCI controllers
61 r) MDIO on GPIOs
61 62
62 VII - Marvell Discovery mv64[345]6x System Controller chips 63 VII - Marvell Discovery mv64[345]6x System Controller chips
63 1) The /system-controller node 64 1) The /system-controller node
@@ -2870,6 +2871,26 @@ platforms are moved over to use the flattened-device-tree model.
2870 reg = <0xe8000000 32>; 2871 reg = <0xe8000000 32>;
2871 }; 2872 };
2872 2873
2874 r) MDIO on GPIOs
2875
2876 Currently defined compatibles:
2877 - virtual,gpio-mdio
2878
2879 MDC and MDIO lines connected to GPIO controllers are listed in the
2880 gpios property as described in section VIII.1 in the following order:
2881
2882 MDC, MDIO.
2883
2884 Example:
2885
2886 mdio {
2887 compatible = "virtual,mdio-gpio";
2888 #address-cells = <1>;
2889 #size-cells = <0>;
2890 gpios = <&qe_pio_a 11
2891 &qe_pio_c 6>;
2892 };
2893
2873VII - Marvell Discovery mv64[345]6x System Controller chips 2894VII - Marvell Discovery mv64[345]6x System Controller chips
2874=========================================================== 2895===========================================================
2875 2896
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 075598e1c502..71a58a4e1795 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -71,6 +71,7 @@ config BT_HCIUART_H4
71config BT_HCIUART_BCSP 71config BT_HCIUART_BCSP
72 bool "BCSP protocol support" 72 bool "BCSP protocol support"
73 depends on BT_HCIUART 73 depends on BT_HCIUART
74 select CONFIG_BITREVERSE
74 help 75 help
75 BCSP (BlueCore Serial Protocol) is serial protocol for communication 76 BCSP (BlueCore Serial Protocol) is serial protocol for communication
76 between Bluetooth device and host. This protocol is required for non 77 between Bluetooth device and host. This protocol is required for non
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 696f7528f022..4d37bb312ee3 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -39,6 +39,8 @@
39#include <linux/signal.h> 39#include <linux/signal.h>
40#include <linux/ioctl.h> 40#include <linux/ioctl.h>
41#include <linux/skbuff.h> 41#include <linux/skbuff.h>
42#include <linux/bitrev.h>
43#include <asm/unaligned.h>
42 44
43#include <net/bluetooth/bluetooth.h> 45#include <net/bluetooth/bluetooth.h>
44#include <net/bluetooth/hci_core.h> 46#include <net/bluetooth/hci_core.h>
@@ -124,27 +126,6 @@ static void bcsp_crc_update(u16 *crc, u8 d)
124 *crc = reg; 126 *crc = reg;
125} 127}
126 128
127/*
128 Get reverse of generated crc
129
130 Implementation note
131 The crc generator (bcsp_crc_init() and bcsp_crc_update())
132 creates a reversed crc, so it needs to be swapped back before
133 being passed on.
134*/
135static u16 bcsp_crc_reverse(u16 crc)
136{
137 u16 b, rev;
138
139 for (b = 0, rev = 0; b < 16; b++) {
140 rev = rev << 1;
141 rev |= (crc & 1);
142 crc = crc >> 1;
143 }
144
145 return (rev);
146}
147
148/* ---- BCSP core ---- */ 129/* ---- BCSP core ---- */
149 130
150static void bcsp_slip_msgdelim(struct sk_buff *skb) 131static void bcsp_slip_msgdelim(struct sk_buff *skb)
@@ -235,10 +216,10 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
235 } 216 }
236 217
237 if (hciextn && chan == 5) { 218 if (hciextn && chan == 5) {
238 struct hci_command_hdr *hdr = (struct hci_command_hdr *) data; 219 __le16 opcode = ((struct hci_command_hdr *)data)->opcode;
239 220
240 /* Vendor specific commands */ 221 /* Vendor specific commands */
241 if (hci_opcode_ogf(__le16_to_cpu(hdr->opcode)) == 0x3f) { 222 if (hci_opcode_ogf(__le16_to_cpu(opcode)) == 0x3f) {
242 u8 desc = *(data + HCI_COMMAND_HDR_SIZE); 223 u8 desc = *(data + HCI_COMMAND_HDR_SIZE);
243 if ((desc & 0xf0) == 0xc0) { 224 if ((desc & 0xf0) == 0xc0) {
244 data += HCI_COMMAND_HDR_SIZE + 1; 225 data += HCI_COMMAND_HDR_SIZE + 1;
@@ -296,7 +277,7 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
296 277
297 /* Put CRC */ 278 /* Put CRC */
298 if (bcsp->use_crc) { 279 if (bcsp->use_crc) {
299 bcsp_txmsg_crc = bcsp_crc_reverse(bcsp_txmsg_crc); 280 bcsp_txmsg_crc = bitrev16(bcsp_txmsg_crc);
300 bcsp_slip_one_byte(nskb, (u8) ((bcsp_txmsg_crc >> 8) & 0x00ff)); 281 bcsp_slip_one_byte(nskb, (u8) ((bcsp_txmsg_crc >> 8) & 0x00ff));
301 bcsp_slip_one_byte(nskb, (u8) (bcsp_txmsg_crc & 0x00ff)); 282 bcsp_slip_one_byte(nskb, (u8) (bcsp_txmsg_crc & 0x00ff));
302 } 283 }
@@ -566,6 +547,11 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu)
566 bcsp->rx_skb = NULL; 547 bcsp->rx_skb = NULL;
567} 548}
568 549
550static u16 bscp_get_crc(struct bcsp_struct *bcsp)
551{
552 return get_unaligned_be16(&bcsp->rx_skb->data[bcsp->rx_skb->len - 2]);
553}
554
569/* Recv data */ 555/* Recv data */
570static int bcsp_recv(struct hci_uart *hu, void *data, int count) 556static int bcsp_recv(struct hci_uart *hu, void *data, int count)
571{ 557{
@@ -624,14 +610,10 @@ static int bcsp_recv(struct hci_uart *hu, void *data, int count)
624 continue; 610 continue;
625 611
626 case BCSP_W4_CRC: 612 case BCSP_W4_CRC:
627 if (bcsp_crc_reverse(bcsp->message_crc) != 613 if (bitrev16(bcsp->message_crc) != bscp_get_crc(bcsp)) {
628 (bcsp->rx_skb->data[bcsp->rx_skb->len - 2] << 8) +
629 bcsp->rx_skb->data[bcsp->rx_skb->len - 1]) {
630
631 BT_ERR ("Checksum failed: computed %04x received %04x", 614 BT_ERR ("Checksum failed: computed %04x received %04x",
632 bcsp_crc_reverse(bcsp->message_crc), 615 bitrev16(bcsp->message_crc),
633 (bcsp->rx_skb-> data[bcsp->rx_skb->len - 2] << 8) + 616 bscp_get_crc(bcsp));
634 bcsp->rx_skb->data[bcsp->rx_skb->len - 1]);
635 617
636 kfree_skb(bcsp->rx_skb); 618 kfree_skb(bcsp->rx_skb);
637 bcsp->rx_state = BCSP_W4_PKT_DELIMITER; 619 bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 105a8c7ca7e9..e4e3241628d6 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -572,12 +572,16 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
572 int irq; 572 int irq;
573 DECLARE_MAC_BUF(mac); 573 DECLARE_MAC_BUF(mac);
574 574
575#ifdef __ISAPNP__
575 if (idev) { 576 if (idev) {
576 irq = pnp_irq(idev, 0); 577 irq = pnp_irq(idev, 0);
577 vp->dev = &idev->dev; 578 vp->dev = &idev->dev;
578 } else { 579 } else {
579 irq = inw(ioaddr + 0x2002) & 15; 580 irq = inw(ioaddr + 0x2002) & 15;
580 } 581 }
582#else
583 irq = inw(ioaddr + 0x2002) & 15;
584#endif
581 585
582 dev->base_addr = ioaddr; 586 dev->base_addr = ioaddr;
583 dev->irq = irq; 587 dev->irq = irq;
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 239fc42fb8df..dc6e474229b1 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -202,7 +202,6 @@ static void elmc_xmt_int(struct net_device *dev);
202static void elmc_rnr_int(struct net_device *dev); 202static void elmc_rnr_int(struct net_device *dev);
203 203
204struct priv { 204struct priv {
205 struct net_device_stats stats;
206 unsigned long base; 205 unsigned long base;
207 char *memtop; 206 char *memtop;
208 unsigned long mapped_start; /* Start of ioremap */ 207 unsigned long mapped_start; /* Start of ioremap */
@@ -989,18 +988,18 @@ static void elmc_rcv_int(struct net_device *dev)
989 skb->protocol = eth_type_trans(skb, dev); 988 skb->protocol = eth_type_trans(skb, dev);
990 netif_rx(skb); 989 netif_rx(skb);
991 dev->last_rx = jiffies; 990 dev->last_rx = jiffies;
992 p->stats.rx_packets++; 991 dev->stats.rx_packets++;
993 p->stats.rx_bytes += totlen; 992 dev->stats.rx_bytes += totlen;
994 } else { 993 } else {
995 p->stats.rx_dropped++; 994 dev->stats.rx_dropped++;
996 } 995 }
997 } else { 996 } else {
998 printk(KERN_WARNING "%s: received oversized frame.\n", dev->name); 997 printk(KERN_WARNING "%s: received oversized frame.\n", dev->name);
999 p->stats.rx_dropped++; 998 dev->stats.rx_dropped++;
1000 } 999 }
1001 } else { /* frame !(ok), only with 'save-bad-frames' */ 1000 } else { /* frame !(ok), only with 'save-bad-frames' */
1002 printk(KERN_WARNING "%s: oops! rfd-error-status: %04x\n", dev->name, status); 1001 printk(KERN_WARNING "%s: oops! rfd-error-status: %04x\n", dev->name, status);
1003 p->stats.rx_errors++; 1002 dev->stats.rx_errors++;
1004 } 1003 }
1005 p->rfd_top->status = 0; 1004 p->rfd_top->status = 0;
1006 p->rfd_top->last = RFD_SUSP; 1005 p->rfd_top->last = RFD_SUSP;
@@ -1018,7 +1017,7 @@ static void elmc_rnr_int(struct net_device *dev)
1018{ 1017{
1019 struct priv *p = (struct priv *) dev->priv; 1018 struct priv *p = (struct priv *) dev->priv;
1020 1019
1021 p->stats.rx_errors++; 1020 dev->stats.rx_errors++;
1022 1021
1023 WAIT_4_SCB_CMD(); /* wait for the last cmd */ 1022 WAIT_4_SCB_CMD(); /* wait for the last cmd */
1024 p->scb->cmd = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */ 1023 p->scb->cmd = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
@@ -1046,24 +1045,24 @@ static void elmc_xmt_int(struct net_device *dev)
1046 printk(KERN_WARNING "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name); 1045 printk(KERN_WARNING "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
1047 } 1046 }
1048 if (status & STAT_OK) { 1047 if (status & STAT_OK) {
1049 p->stats.tx_packets++; 1048 dev->stats.tx_packets++;
1050 p->stats.collisions += (status & TCMD_MAXCOLLMASK); 1049 dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
1051 } else { 1050 } else {
1052 p->stats.tx_errors++; 1051 dev->stats.tx_errors++;
1053 if (status & TCMD_LATECOLL) { 1052 if (status & TCMD_LATECOLL) {
1054 printk(KERN_WARNING "%s: late collision detected.\n", dev->name); 1053 printk(KERN_WARNING "%s: late collision detected.\n", dev->name);
1055 p->stats.collisions++; 1054 dev->stats.collisions++;
1056 } else if (status & TCMD_NOCARRIER) { 1055 } else if (status & TCMD_NOCARRIER) {
1057 p->stats.tx_carrier_errors++; 1056 dev->stats.tx_carrier_errors++;
1058 printk(KERN_WARNING "%s: no carrier detected.\n", dev->name); 1057 printk(KERN_WARNING "%s: no carrier detected.\n", dev->name);
1059 } else if (status & TCMD_LOSTCTS) { 1058 } else if (status & TCMD_LOSTCTS) {
1060 printk(KERN_WARNING "%s: loss of CTS detected.\n", dev->name); 1059 printk(KERN_WARNING "%s: loss of CTS detected.\n", dev->name);
1061 } else if (status & TCMD_UNDERRUN) { 1060 } else if (status & TCMD_UNDERRUN) {
1062 p->stats.tx_fifo_errors++; 1061 dev->stats.tx_fifo_errors++;
1063 printk(KERN_WARNING "%s: DMA underrun detected.\n", dev->name); 1062 printk(KERN_WARNING "%s: DMA underrun detected.\n", dev->name);
1064 } else if (status & TCMD_MAXCOLL) { 1063 } else if (status & TCMD_MAXCOLL) {
1065 printk(KERN_WARNING "%s: Max. collisions exceeded.\n", dev->name); 1064 printk(KERN_WARNING "%s: Max. collisions exceeded.\n", dev->name);
1066 p->stats.collisions += 16; 1065 dev->stats.collisions += 16;
1067 } 1066 }
1068 } 1067 }
1069 1068
@@ -1215,12 +1214,12 @@ static struct net_device_stats *elmc_get_stats(struct net_device *dev)
1215 ovrn = p->scb->ovrn_errs; 1214 ovrn = p->scb->ovrn_errs;
1216 p->scb->ovrn_errs -= ovrn; 1215 p->scb->ovrn_errs -= ovrn;
1217 1216
1218 p->stats.rx_crc_errors += crc; 1217 dev->stats.rx_crc_errors += crc;
1219 p->stats.rx_fifo_errors += ovrn; 1218 dev->stats.rx_fifo_errors += ovrn;
1220 p->stats.rx_frame_errors += aln; 1219 dev->stats.rx_frame_errors += aln;
1221 p->stats.rx_dropped += rsc; 1220 dev->stats.rx_dropped += rsc;
1222 1221
1223 return &p->stats; 1222 return &dev->stats;
1224} 1223}
1225 1224
1226/******************************************************** 1225/********************************************************
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index fae295b6809c..6aca0c640f13 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -158,7 +158,6 @@ struct mc32_local
158 int slot; 158 int slot;
159 159
160 u32 base; 160 u32 base;
161 struct net_device_stats net_stats;
162 volatile struct mc32_mailbox *rx_box; 161 volatile struct mc32_mailbox *rx_box;
163 volatile struct mc32_mailbox *tx_box; 162 volatile struct mc32_mailbox *tx_box;
164 volatile struct mc32_mailbox *exec_box; 163 volatile struct mc32_mailbox *exec_box;
@@ -1093,24 +1092,24 @@ static void mc32_update_stats(struct net_device *dev)
1093 1092
1094 u32 rx_errors=0; 1093 u32 rx_errors=0;
1095 1094
1096 rx_errors+=lp->net_stats.rx_crc_errors +=st->rx_crc_errors; 1095 rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors;
1097 st->rx_crc_errors=0; 1096 st->rx_crc_errors=0;
1098 rx_errors+=lp->net_stats.rx_fifo_errors +=st->rx_overrun_errors; 1097 rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors;
1099 st->rx_overrun_errors=0; 1098 st->rx_overrun_errors=0;
1100 rx_errors+=lp->net_stats.rx_frame_errors +=st->rx_alignment_errors; 1099 rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors;
1101 st->rx_alignment_errors=0; 1100 st->rx_alignment_errors=0;
1102 rx_errors+=lp->net_stats.rx_length_errors+=st->rx_tooshort_errors; 1101 rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors;
1103 st->rx_tooshort_errors=0; 1102 st->rx_tooshort_errors=0;
1104 rx_errors+=lp->net_stats.rx_missed_errors+=st->rx_outofresource_errors; 1103 rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors;
1105 st->rx_outofresource_errors=0; 1104 st->rx_outofresource_errors=0;
1106 lp->net_stats.rx_errors=rx_errors; 1105 dev->stats.rx_errors=rx_errors;
1107 1106
1108 /* Number of packets which saw one collision */ 1107 /* Number of packets which saw one collision */
1109 lp->net_stats.collisions+=st->dataC[10]; 1108 dev->stats.collisions+=st->dataC[10];
1110 st->dataC[10]=0; 1109 st->dataC[10]=0;
1111 1110
1112 /* Number of packets which saw 2--15 collisions */ 1111 /* Number of packets which saw 2--15 collisions */
1113 lp->net_stats.collisions+=st->dataC[11]; 1112 dev->stats.collisions+=st->dataC[11];
1114 st->dataC[11]=0; 1113 st->dataC[11]=0;
1115} 1114}
1116 1115
@@ -1178,7 +1177,7 @@ static void mc32_rx_ring(struct net_device *dev)
1178 skb=dev_alloc_skb(length+2); 1177 skb=dev_alloc_skb(length+2);
1179 1178
1180 if(skb==NULL) { 1179 if(skb==NULL) {
1181 lp->net_stats.rx_dropped++; 1180 dev->stats.rx_dropped++;
1182 goto dropped; 1181 goto dropped;
1183 } 1182 }
1184 1183
@@ -1189,8 +1188,8 @@ static void mc32_rx_ring(struct net_device *dev)
1189 1188
1190 skb->protocol=eth_type_trans(skb,dev); 1189 skb->protocol=eth_type_trans(skb,dev);
1191 dev->last_rx = jiffies; 1190 dev->last_rx = jiffies;
1192 lp->net_stats.rx_packets++; 1191 dev->stats.rx_packets++;
1193 lp->net_stats.rx_bytes += length; 1192 dev->stats.rx_bytes += length;
1194 netif_rx(skb); 1193 netif_rx(skb);
1195 } 1194 }
1196 1195
@@ -1253,34 +1252,34 @@ static void mc32_tx_ring(struct net_device *dev)
1253 /* Not COMPLETED */ 1252 /* Not COMPLETED */
1254 break; 1253 break;
1255 } 1254 }
1256 lp->net_stats.tx_packets++; 1255 dev->stats.tx_packets++;
1257 if(!(np->status & (1<<6))) /* Not COMPLETED_OK */ 1256 if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
1258 { 1257 {
1259 lp->net_stats.tx_errors++; 1258 dev->stats.tx_errors++;
1260 1259
1261 switch(np->status&0x0F) 1260 switch(np->status&0x0F)
1262 { 1261 {
1263 case 1: 1262 case 1:
1264 lp->net_stats.tx_aborted_errors++; 1263 dev->stats.tx_aborted_errors++;
1265 break; /* Max collisions */ 1264 break; /* Max collisions */
1266 case 2: 1265 case 2:
1267 lp->net_stats.tx_fifo_errors++; 1266 dev->stats.tx_fifo_errors++;
1268 break; 1267 break;
1269 case 3: 1268 case 3:
1270 lp->net_stats.tx_carrier_errors++; 1269 dev->stats.tx_carrier_errors++;
1271 break; 1270 break;
1272 case 4: 1271 case 4:
1273 lp->net_stats.tx_window_errors++; 1272 dev->stats.tx_window_errors++;
1274 break; /* CTS Lost */ 1273 break; /* CTS Lost */
1275 case 5: 1274 case 5:
1276 lp->net_stats.tx_aborted_errors++; 1275 dev->stats.tx_aborted_errors++;
1277 break; /* Transmit timeout */ 1276 break; /* Transmit timeout */
1278 } 1277 }
1279 } 1278 }
1280 /* Packets are sent in order - this is 1279 /* Packets are sent in order - this is
1281 basically a FIFO queue of buffers matching 1280 basically a FIFO queue of buffers matching
1282 the card ring */ 1281 the card ring */
1283 lp->net_stats.tx_bytes+=lp->tx_ring[t].skb->len; 1282 dev->stats.tx_bytes+=lp->tx_ring[t].skb->len;
1284 dev_kfree_skb_irq(lp->tx_ring[t].skb); 1283 dev_kfree_skb_irq(lp->tx_ring[t].skb);
1285 lp->tx_ring[t].skb=NULL; 1284 lp->tx_ring[t].skb=NULL;
1286 atomic_inc(&lp->tx_count); 1285 atomic_inc(&lp->tx_count);
@@ -1367,7 +1366,7 @@ static irqreturn_t mc32_interrupt(int irq, void *dev_id)
1367 case 6: 1366 case 6:
1368 /* Out of RX buffers stat */ 1367 /* Out of RX buffers stat */
1369 /* Must restart rx */ 1368 /* Must restart rx */
1370 lp->net_stats.rx_dropped++; 1369 dev->stats.rx_dropped++;
1371 mc32_rx_ring(dev); 1370 mc32_rx_ring(dev);
1372 mc32_start_transceiver(dev); 1371 mc32_start_transceiver(dev);
1373 break; 1372 break;
@@ -1489,10 +1488,8 @@ static int mc32_close(struct net_device *dev)
1489 1488
1490static struct net_device_stats *mc32_get_stats(struct net_device *dev) 1489static struct net_device_stats *mc32_get_stats(struct net_device *dev)
1491{ 1490{
1492 struct mc32_local *lp = netdev_priv(dev);
1493
1494 mc32_update_stats(dev); 1491 mc32_update_stats(dev);
1495 return &lp->net_stats; 1492 return &dev->stats;
1496} 1493}
1497 1494
1498 1495
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index a453eda834d5..934db350e339 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -340,7 +340,6 @@ struct cp_private {
340 u32 rx_config; 340 u32 rx_config;
341 u16 cpcmd; 341 u16 cpcmd;
342 342
343 struct net_device_stats net_stats;
344 struct cp_extra_stats cp_stats; 343 struct cp_extra_stats cp_stats;
345 344
346 unsigned rx_head ____cacheline_aligned; 345 unsigned rx_head ____cacheline_aligned;
@@ -457,8 +456,8 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
457{ 456{
458 skb->protocol = eth_type_trans (skb, cp->dev); 457 skb->protocol = eth_type_trans (skb, cp->dev);
459 458
460 cp->net_stats.rx_packets++; 459 cp->dev->stats.rx_packets++;
461 cp->net_stats.rx_bytes += skb->len; 460 cp->dev->stats.rx_bytes += skb->len;
462 cp->dev->last_rx = jiffies; 461 cp->dev->last_rx = jiffies;
463 462
464#if CP_VLAN_TAG_USED 463#if CP_VLAN_TAG_USED
@@ -477,17 +476,17 @@ static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
477 printk (KERN_DEBUG 476 printk (KERN_DEBUG
478 "%s: rx err, slot %d status 0x%x len %d\n", 477 "%s: rx err, slot %d status 0x%x len %d\n",
479 cp->dev->name, rx_tail, status, len); 478 cp->dev->name, rx_tail, status, len);
480 cp->net_stats.rx_errors++; 479 cp->dev->stats.rx_errors++;
481 if (status & RxErrFrame) 480 if (status & RxErrFrame)
482 cp->net_stats.rx_frame_errors++; 481 cp->dev->stats.rx_frame_errors++;
483 if (status & RxErrCRC) 482 if (status & RxErrCRC)
484 cp->net_stats.rx_crc_errors++; 483 cp->dev->stats.rx_crc_errors++;
485 if ((status & RxErrRunt) || (status & RxErrLong)) 484 if ((status & RxErrRunt) || (status & RxErrLong))
486 cp->net_stats.rx_length_errors++; 485 cp->dev->stats.rx_length_errors++;
487 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) 486 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
488 cp->net_stats.rx_length_errors++; 487 cp->dev->stats.rx_length_errors++;
489 if (status & RxErrFIFO) 488 if (status & RxErrFIFO)
490 cp->net_stats.rx_fifo_errors++; 489 cp->dev->stats.rx_fifo_errors++;
491} 490}
492 491
493static inline unsigned int cp_rx_csum_ok (u32 status) 492static inline unsigned int cp_rx_csum_ok (u32 status)
@@ -539,7 +538,7 @@ rx_status_loop:
539 * that RX fragments are never encountered 538 * that RX fragments are never encountered
540 */ 539 */
541 cp_rx_err_acct(cp, rx_tail, status, len); 540 cp_rx_err_acct(cp, rx_tail, status, len);
542 cp->net_stats.rx_dropped++; 541 dev->stats.rx_dropped++;
543 cp->cp_stats.rx_frags++; 542 cp->cp_stats.rx_frags++;
544 goto rx_next; 543 goto rx_next;
545 } 544 }
@@ -556,7 +555,7 @@ rx_status_loop:
556 buflen = cp->rx_buf_sz + RX_OFFSET; 555 buflen = cp->rx_buf_sz + RX_OFFSET;
557 new_skb = dev_alloc_skb (buflen); 556 new_skb = dev_alloc_skb (buflen);
558 if (!new_skb) { 557 if (!new_skb) {
559 cp->net_stats.rx_dropped++; 558 dev->stats.rx_dropped++;
560 goto rx_next; 559 goto rx_next;
561 } 560 }
562 561
@@ -710,20 +709,20 @@ static void cp_tx (struct cp_private *cp)
710 if (netif_msg_tx_err(cp)) 709 if (netif_msg_tx_err(cp))
711 printk(KERN_DEBUG "%s: tx err, status 0x%x\n", 710 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
712 cp->dev->name, status); 711 cp->dev->name, status);
713 cp->net_stats.tx_errors++; 712 cp->dev->stats.tx_errors++;
714 if (status & TxOWC) 713 if (status & TxOWC)
715 cp->net_stats.tx_window_errors++; 714 cp->dev->stats.tx_window_errors++;
716 if (status & TxMaxCol) 715 if (status & TxMaxCol)
717 cp->net_stats.tx_aborted_errors++; 716 cp->dev->stats.tx_aborted_errors++;
718 if (status & TxLinkFail) 717 if (status & TxLinkFail)
719 cp->net_stats.tx_carrier_errors++; 718 cp->dev->stats.tx_carrier_errors++;
720 if (status & TxFIFOUnder) 719 if (status & TxFIFOUnder)
721 cp->net_stats.tx_fifo_errors++; 720 cp->dev->stats.tx_fifo_errors++;
722 } else { 721 } else {
723 cp->net_stats.collisions += 722 cp->dev->stats.collisions +=
724 ((status >> TxColCntShift) & TxColCntMask); 723 ((status >> TxColCntShift) & TxColCntMask);
725 cp->net_stats.tx_packets++; 724 cp->dev->stats.tx_packets++;
726 cp->net_stats.tx_bytes += skb->len; 725 cp->dev->stats.tx_bytes += skb->len;
727 if (netif_msg_tx_done(cp)) 726 if (netif_msg_tx_done(cp))
728 printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail); 727 printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
729 } 728 }
@@ -956,7 +955,7 @@ static void cp_set_rx_mode (struct net_device *dev)
956static void __cp_get_stats(struct cp_private *cp) 955static void __cp_get_stats(struct cp_private *cp)
957{ 956{
958 /* only lower 24 bits valid; write any value to clear */ 957 /* only lower 24 bits valid; write any value to clear */
959 cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff); 958 cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
960 cpw32 (RxMissed, 0); 959 cpw32 (RxMissed, 0);
961} 960}
962 961
@@ -971,7 +970,7 @@ static struct net_device_stats *cp_get_stats(struct net_device *dev)
971 __cp_get_stats(cp); 970 __cp_get_stats(cp);
972 spin_unlock_irqrestore(&cp->lock, flags); 971 spin_unlock_irqrestore(&cp->lock, flags);
973 972
974 return &cp->net_stats; 973 return &dev->stats;
975} 974}
976 975
977static void cp_stop_hw (struct cp_private *cp) 976static void cp_stop_hw (struct cp_private *cp)
@@ -1142,7 +1141,7 @@ static void cp_clean_rings (struct cp_private *cp)
1142 PCI_DMA_TODEVICE); 1141 PCI_DMA_TODEVICE);
1143 if (le32_to_cpu(desc->opts1) & LastFrag) 1142 if (le32_to_cpu(desc->opts1) & LastFrag)
1144 dev_kfree_skb(skb); 1143 dev_kfree_skb(skb);
1145 cp->net_stats.tx_dropped++; 1144 cp->dev->stats.tx_dropped++;
1146 } 1145 }
1147 } 1146 }
1148 1147
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 53bd903d2321..b23a00c5b84f 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -574,7 +574,6 @@ struct rtl8139_private {
574 u32 msg_enable; 574 u32 msg_enable;
575 struct napi_struct napi; 575 struct napi_struct napi;
576 struct net_device *dev; 576 struct net_device *dev;
577 struct net_device_stats stats;
578 577
579 unsigned char *rx_ring; 578 unsigned char *rx_ring;
580 unsigned int cur_rx; /* RX buf index of next pkt */ 579 unsigned int cur_rx; /* RX buf index of next pkt */
@@ -1711,7 +1710,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev)
1711 dev_kfree_skb(skb); 1710 dev_kfree_skb(skb);
1712 } else { 1711 } else {
1713 dev_kfree_skb(skb); 1712 dev_kfree_skb(skb);
1714 tp->stats.tx_dropped++; 1713 dev->stats.tx_dropped++;
1715 return 0; 1714 return 0;
1716 } 1715 }
1717 1716
@@ -1762,27 +1761,27 @@ static void rtl8139_tx_interrupt (struct net_device *dev,
1762 if (netif_msg_tx_err(tp)) 1761 if (netif_msg_tx_err(tp))
1763 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", 1762 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1764 dev->name, txstatus); 1763 dev->name, txstatus);
1765 tp->stats.tx_errors++; 1764 dev->stats.tx_errors++;
1766 if (txstatus & TxAborted) { 1765 if (txstatus & TxAborted) {
1767 tp->stats.tx_aborted_errors++; 1766 dev->stats.tx_aborted_errors++;
1768 RTL_W32 (TxConfig, TxClearAbt); 1767 RTL_W32 (TxConfig, TxClearAbt);
1769 RTL_W16 (IntrStatus, TxErr); 1768 RTL_W16 (IntrStatus, TxErr);
1770 wmb(); 1769 wmb();
1771 } 1770 }
1772 if (txstatus & TxCarrierLost) 1771 if (txstatus & TxCarrierLost)
1773 tp->stats.tx_carrier_errors++; 1772 dev->stats.tx_carrier_errors++;
1774 if (txstatus & TxOutOfWindow) 1773 if (txstatus & TxOutOfWindow)
1775 tp->stats.tx_window_errors++; 1774 dev->stats.tx_window_errors++;
1776 } else { 1775 } else {
1777 if (txstatus & TxUnderrun) { 1776 if (txstatus & TxUnderrun) {
1778 /* Add 64 to the Tx FIFO threshold. */ 1777 /* Add 64 to the Tx FIFO threshold. */
1779 if (tp->tx_flag < 0x00300000) 1778 if (tp->tx_flag < 0x00300000)
1780 tp->tx_flag += 0x00020000; 1779 tp->tx_flag += 0x00020000;
1781 tp->stats.tx_fifo_errors++; 1780 dev->stats.tx_fifo_errors++;
1782 } 1781 }
1783 tp->stats.collisions += (txstatus >> 24) & 15; 1782 dev->stats.collisions += (txstatus >> 24) & 15;
1784 tp->stats.tx_bytes += txstatus & 0x7ff; 1783 dev->stats.tx_bytes += txstatus & 0x7ff;
1785 tp->stats.tx_packets++; 1784 dev->stats.tx_packets++;
1786 } 1785 }
1787 1786
1788 dirty_tx++; 1787 dirty_tx++;
@@ -1818,7 +1817,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
1818 if (netif_msg_rx_err (tp)) 1817 if (netif_msg_rx_err (tp))
1819 printk(KERN_DEBUG "%s: Ethernet frame had errors, status %8.8x.\n", 1818 printk(KERN_DEBUG "%s: Ethernet frame had errors, status %8.8x.\n",
1820 dev->name, rx_status); 1819 dev->name, rx_status);
1821 tp->stats.rx_errors++; 1820 dev->stats.rx_errors++;
1822 if (!(rx_status & RxStatusOK)) { 1821 if (!(rx_status & RxStatusOK)) {
1823 if (rx_status & RxTooLong) { 1822 if (rx_status & RxTooLong) {
1824 DPRINTK ("%s: Oversized Ethernet frame, status %4.4x!\n", 1823 DPRINTK ("%s: Oversized Ethernet frame, status %4.4x!\n",
@@ -1826,11 +1825,11 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
1826 /* A.C.: The chip hangs here. */ 1825 /* A.C.: The chip hangs here. */
1827 } 1826 }
1828 if (rx_status & (RxBadSymbol | RxBadAlign)) 1827 if (rx_status & (RxBadSymbol | RxBadAlign))
1829 tp->stats.rx_frame_errors++; 1828 dev->stats.rx_frame_errors++;
1830 if (rx_status & (RxRunt | RxTooLong)) 1829 if (rx_status & (RxRunt | RxTooLong))
1831 tp->stats.rx_length_errors++; 1830 dev->stats.rx_length_errors++;
1832 if (rx_status & RxCRCErr) 1831 if (rx_status & RxCRCErr)
1833 tp->stats.rx_crc_errors++; 1832 dev->stats.rx_crc_errors++;
1834 } else { 1833 } else {
1835 tp->xstats.rx_lost_in_ring++; 1834 tp->xstats.rx_lost_in_ring++;
1836 } 1835 }
@@ -1913,9 +1912,9 @@ static void rtl8139_isr_ack(struct rtl8139_private *tp)
1913 /* Clear out errors and receive interrupts */ 1912 /* Clear out errors and receive interrupts */
1914 if (likely(status != 0)) { 1913 if (likely(status != 0)) {
1915 if (unlikely(status & (RxFIFOOver | RxOverflow))) { 1914 if (unlikely(status & (RxFIFOOver | RxOverflow))) {
1916 tp->stats.rx_errors++; 1915 tp->dev->stats.rx_errors++;
1917 if (status & RxFIFOOver) 1916 if (status & RxFIFOOver)
1918 tp->stats.rx_fifo_errors++; 1917 tp->dev->stats.rx_fifo_errors++;
1919 } 1918 }
1920 RTL_W16_F (IntrStatus, RxAckBits); 1919 RTL_W16_F (IntrStatus, RxAckBits);
1921 } 1920 }
@@ -2016,8 +2015,8 @@ no_early_rx:
2016 skb->protocol = eth_type_trans (skb, dev); 2015 skb->protocol = eth_type_trans (skb, dev);
2017 2016
2018 dev->last_rx = jiffies; 2017 dev->last_rx = jiffies;
2019 tp->stats.rx_bytes += pkt_size; 2018 dev->stats.rx_bytes += pkt_size;
2020 tp->stats.rx_packets++; 2019 dev->stats.rx_packets++;
2021 2020
2022 netif_receive_skb (skb); 2021 netif_receive_skb (skb);
2023 } else { 2022 } else {
@@ -2025,7 +2024,7 @@ no_early_rx:
2025 printk (KERN_WARNING 2024 printk (KERN_WARNING
2026 "%s: Memory squeeze, dropping packet.\n", 2025 "%s: Memory squeeze, dropping packet.\n",
2027 dev->name); 2026 dev->name);
2028 tp->stats.rx_dropped++; 2027 dev->stats.rx_dropped++;
2029 } 2028 }
2030 received++; 2029 received++;
2031 2030
@@ -2072,7 +2071,7 @@ static void rtl8139_weird_interrupt (struct net_device *dev,
2072 assert (ioaddr != NULL); 2071 assert (ioaddr != NULL);
2073 2072
2074 /* Update the error count. */ 2073 /* Update the error count. */
2075 tp->stats.rx_missed_errors += RTL_R32 (RxMissed); 2074 dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
2076 RTL_W32 (RxMissed, 0); 2075 RTL_W32 (RxMissed, 0);
2077 2076
2078 if ((status & RxUnderrun) && link_changed && 2077 if ((status & RxUnderrun) && link_changed &&
@@ -2082,12 +2081,12 @@ static void rtl8139_weird_interrupt (struct net_device *dev,
2082 } 2081 }
2083 2082
2084 if (status & (RxUnderrun | RxErr)) 2083 if (status & (RxUnderrun | RxErr))
2085 tp->stats.rx_errors++; 2084 dev->stats.rx_errors++;
2086 2085
2087 if (status & PCSTimeout) 2086 if (status & PCSTimeout)
2088 tp->stats.rx_length_errors++; 2087 dev->stats.rx_length_errors++;
2089 if (status & RxUnderrun) 2088 if (status & RxUnderrun)
2090 tp->stats.rx_fifo_errors++; 2089 dev->stats.rx_fifo_errors++;
2091 if (status & PCIErr) { 2090 if (status & PCIErr) {
2092 u16 pci_cmd_status; 2091 u16 pci_cmd_status;
2093 pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status); 2092 pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
@@ -2227,7 +2226,7 @@ static int rtl8139_close (struct net_device *dev)
2227 RTL_W16 (IntrMask, 0); 2226 RTL_W16 (IntrMask, 0);
2228 2227
2229 /* Update the error counts. */ 2228 /* Update the error counts. */
2230 tp->stats.rx_missed_errors += RTL_R32 (RxMissed); 2229 dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
2231 RTL_W32 (RxMissed, 0); 2230 RTL_W32 (RxMissed, 0);
2232 2231
2233 spin_unlock_irqrestore (&tp->lock, flags); 2232 spin_unlock_irqrestore (&tp->lock, flags);
@@ -2472,12 +2471,12 @@ static struct net_device_stats *rtl8139_get_stats (struct net_device *dev)
2472 2471
2473 if (netif_running(dev)) { 2472 if (netif_running(dev)) {
2474 spin_lock_irqsave (&tp->lock, flags); 2473 spin_lock_irqsave (&tp->lock, flags);
2475 tp->stats.rx_missed_errors += RTL_R32 (RxMissed); 2474 dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
2476 RTL_W32 (RxMissed, 0); 2475 RTL_W32 (RxMissed, 0);
2477 spin_unlock_irqrestore (&tp->lock, flags); 2476 spin_unlock_irqrestore (&tp->lock, flags);
2478 } 2477 }
2479 2478
2480 return &tp->stats; 2479 return &dev->stats;
2481} 2480}
2482 2481
2483/* Set or clear the multicast filter for this adaptor. 2482/* Set or clear the multicast filter for this adaptor.
@@ -2561,7 +2560,7 @@ static int rtl8139_suspend (struct pci_dev *pdev, pm_message_t state)
2561 RTL_W8 (ChipCmd, 0); 2560 RTL_W8 (ChipCmd, 0);
2562 2561
2563 /* Update the error counts. */ 2562 /* Update the error counts. */
2564 tp->stats.rx_missed_errors += RTL_R32 (RxMissed); 2563 dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
2565 RTL_W32 (RxMissed, 0); 2564 RTL_W32 (RxMissed, 0);
2566 2565
2567 spin_unlock_irqrestore (&tp->lock, flags); 2566 spin_unlock_irqrestore (&tp->lock, flags);
diff --git a/drivers/net/8390.h b/drivers/net/8390.h
index 04ddec0f4c61..cf020d45aea6 100644
--- a/drivers/net/8390.h
+++ b/drivers/net/8390.h
@@ -69,7 +69,6 @@ struct ei_device {
69 unsigned char reg0; /* Register '0' in a WD8013 */ 69 unsigned char reg0; /* Register '0' in a WD8013 */
70 unsigned char reg5; /* Register '5' in a WD8013 */ 70 unsigned char reg5; /* Register '5' in a WD8013 */
71 unsigned char saved_irq; /* Original dev->irq value. */ 71 unsigned char saved_irq; /* Original dev->irq value. */
72 struct net_device_stats stat; /* The new statistics table. */
73 u32 *reg_offset; /* Register mapping table */ 72 u32 *reg_offset; /* Register mapping table */
74 spinlock_t page_lock; /* Page register locks */ 73 spinlock_t page_lock; /* Page register locks */
75 unsigned long priv; /* Private field to store bus IDs etc. */ 74 unsigned long priv; /* Private field to store bus IDs etc. */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f4182cfffe9d..20b5367f7e0b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -524,6 +524,18 @@ config STNIC
524 524
525 If unsure, say N. 525 If unsure, say N.
526 526
527config SH_ETH
528 tristate "Renesas SuperH Ethernet support"
529 depends on SUPERH && \
530 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712)
531 select CRC32
532 select MII
533 select MDIO_BITBANG
534 select PHYLIB
535 help
536 Renesas SuperH Ethernet device driver.
537 This driver support SH7710 and SH7712.
538
527config SUNLANCE 539config SUNLANCE
528 tristate "Sun LANCE support" 540 tristate "Sun LANCE support"
529 depends on SBUS 541 depends on SBUS
@@ -955,7 +967,7 @@ config SMC911X
955 tristate "SMSC LAN911[5678] support" 967 tristate "SMSC LAN911[5678] support"
956 select CRC32 968 select CRC32
957 select MII 969 select MII
958 depends on ARCH_PXA || SH_MAGIC_PANEL_R2 970 depends on ARCH_PXA || SUPERH
959 help 971 help
960 This is a driver for SMSC's LAN911x series of Ethernet chipsets 972 This is a driver for SMSC's LAN911x series of Ethernet chipsets
961 including the new LAN9115, LAN9116, LAN9117, and LAN9118. 973 including the new LAN9115, LAN9116, LAN9117, and LAN9118.
@@ -1670,7 +1682,7 @@ config SUNDANCE_MMIO
1670 1682
1671config TLAN 1683config TLAN
1672 tristate "TI ThunderLAN support" 1684 tristate "TI ThunderLAN support"
1673 depends on NET_PCI && (PCI || EISA) && !64BIT 1685 depends on NET_PCI && (PCI || EISA)
1674 ---help--- 1686 ---help---
1675 If you have a PCI Ethernet network card based on the ThunderLAN chip 1687 If you have a PCI Ethernet network card based on the ThunderLAN chip
1676 which is supported by this driver, say Y and read the 1688 which is supported by this driver, say Y and read the
@@ -2228,6 +2240,7 @@ config VIA_VELOCITY
2228config TIGON3 2240config TIGON3
2229 tristate "Broadcom Tigon3 support" 2241 tristate "Broadcom Tigon3 support"
2230 depends on PCI 2242 depends on PCI
2243 select PHYLIB
2231 help 2244 help
2232 This driver supports Broadcom Tigon3 based gigabit Ethernet cards. 2245 This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
2233 2246
@@ -2283,6 +2296,19 @@ config GELIC_WIRELESS
2283 the driver automatically distinguishes the models, you can 2296 the driver automatically distinguishes the models, you can
2284 safely enable this option even if you have a wireless-less model. 2297 safely enable this option even if you have a wireless-less model.
2285 2298
2299config GELIC_WIRELESS_OLD_PSK_INTERFACE
2300 bool "PS3 Wireless private PSK interface (OBSOLETE)"
2301 depends on GELIC_WIRELESS
2302 help
2303 This option retains the obsolete private interface to pass
2304 the PSK from user space programs to the driver. The PSK
2305 stands for 'Pre Shared Key' and is used for WPA[2]-PSK
2306 (WPA-Personal) environment.
2307 If WPA[2]-PSK is used and you need to use old programs that
2308 support only this old interface, say Y. Otherwise N.
2309
2310 If unsure, say N.
2311
2286config GIANFAR 2312config GIANFAR
2287 tristate "Gianfar Ethernet" 2313 tristate "Gianfar Ethernet"
2288 depends on FSL_SOC 2314 depends on FSL_SOC
@@ -2407,8 +2433,9 @@ config CHELSIO_T1_NAPI
2407 2433
2408config CHELSIO_T3 2434config CHELSIO_T3
2409 tristate "Chelsio Communications T3 10Gb Ethernet support" 2435 tristate "Chelsio Communications T3 10Gb Ethernet support"
2410 depends on PCI 2436 depends on PCI && INET
2411 select FW_LOADER 2437 select FW_LOADER
2438 select INET_LRO
2412 help 2439 help
2413 This driver supports Chelsio T3-based gigabit and 10Gb Ethernet 2440 This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
2414 adapters. 2441 adapters.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index dcbfe8421154..c96fe2036800 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -80,6 +80,7 @@ obj-$(CONFIG_VIA_RHINE) += via-rhine.o
80obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o 80obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
81obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o 81obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
82obj-$(CONFIG_RIONET) += rionet.o 82obj-$(CONFIG_RIONET) += rionet.o
83obj-$(CONFIG_SH_ETH) += sh_eth.o
83 84
84# 85#
85# end link order section 86# end link order section
@@ -236,6 +237,7 @@ obj-$(CONFIG_USB_CATC) += usb/
236obj-$(CONFIG_USB_KAWETH) += usb/ 237obj-$(CONFIG_USB_KAWETH) += usb/
237obj-$(CONFIG_USB_PEGASUS) += usb/ 238obj-$(CONFIG_USB_PEGASUS) += usb/
238obj-$(CONFIG_USB_RTL8150) += usb/ 239obj-$(CONFIG_USB_RTL8150) += usb/
240obj-$(CONFIG_USB_HSO) += usb/
239obj-$(CONFIG_USB_USBNET) += usb/ 241obj-$(CONFIG_USB_USBNET) += usb/
240obj-$(CONFIG_USB_ZD1201) += usb/ 242obj-$(CONFIG_USB_ZD1201) += usb/
241 243
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index 6c5719ae8cca..9c0837435b68 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -475,16 +475,12 @@ static irqreturn_t lance_interrupt (int irq, void *dev_id)
475 return IRQ_HANDLED; 475 return IRQ_HANDLED;
476} 476}
477 477
478struct net_device *last_dev;
479
480static int lance_open (struct net_device *dev) 478static int lance_open (struct net_device *dev)
481{ 479{
482 struct lance_private *lp = netdev_priv(dev); 480 struct lance_private *lp = netdev_priv(dev);
483 volatile struct lance_regs *ll = lp->ll; 481 volatile struct lance_regs *ll = lp->ll;
484 int ret; 482 int ret;
485 483
486 last_dev = dev;
487
488 /* Stop the Lance */ 484 /* Stop the Lance */
489 ll->rap = LE_CSR0; 485 ll->rap = LE_CSR0;
490 ll->rdp = LE_C0_STOP; 486 ll->rdp = LE_C0_STOP;
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 6c192650d349..e4483de84e7f 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -1457,11 +1457,6 @@ static int __devinit ace_init(struct net_device *dev)
1457 ace_set_txprd(regs, ap, 0); 1457 ace_set_txprd(regs, ap, 0);
1458 writel(0, &regs->RxRetCsm); 1458 writel(0, &regs->RxRetCsm);
1459 1459
1460 /*
1461 * Zero the stats before starting the interface
1462 */
1463 memset(&ap->stats, 0, sizeof(ap->stats));
1464
1465 /* 1460 /*
1466 * Enable DMA engine now. 1461 * Enable DMA engine now.
1467 * If we do this sooner, Mckinley box pukes. 1462 * If we do this sooner, Mckinley box pukes.
@@ -2041,8 +2036,8 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
2041 netif_rx(skb); 2036 netif_rx(skb);
2042 2037
2043 dev->last_rx = jiffies; 2038 dev->last_rx = jiffies;
2044 ap->stats.rx_packets++; 2039 dev->stats.rx_packets++;
2045 ap->stats.rx_bytes += retdesc->size; 2040 dev->stats.rx_bytes += retdesc->size;
2046 2041
2047 idx = (idx + 1) % RX_RETURN_RING_ENTRIES; 2042 idx = (idx + 1) % RX_RETURN_RING_ENTRIES;
2048 } 2043 }
@@ -2090,8 +2085,8 @@ static inline void ace_tx_int(struct net_device *dev,
2090 } 2085 }
2091 2086
2092 if (skb) { 2087 if (skb) {
2093 ap->stats.tx_packets++; 2088 dev->stats.tx_packets++;
2094 ap->stats.tx_bytes += skb->len; 2089 dev->stats.tx_bytes += skb->len;
2095 dev_kfree_skb_irq(skb); 2090 dev_kfree_skb_irq(skb);
2096 info->skb = NULL; 2091 info->skb = NULL;
2097 } 2092 }
@@ -2863,11 +2858,11 @@ static struct net_device_stats *ace_get_stats(struct net_device *dev)
2863 struct ace_mac_stats __iomem *mac_stats = 2858 struct ace_mac_stats __iomem *mac_stats =
2864 (struct ace_mac_stats __iomem *)ap->regs->Stats; 2859 (struct ace_mac_stats __iomem *)ap->regs->Stats;
2865 2860
2866 ap->stats.rx_missed_errors = readl(&mac_stats->drop_space); 2861 dev->stats.rx_missed_errors = readl(&mac_stats->drop_space);
2867 ap->stats.multicast = readl(&mac_stats->kept_mc); 2862 dev->stats.multicast = readl(&mac_stats->kept_mc);
2868 ap->stats.collisions = readl(&mac_stats->coll); 2863 dev->stats.collisions = readl(&mac_stats->coll);
2869 2864
2870 return &ap->stats; 2865 return &dev->stats;
2871} 2866}
2872 2867
2873 2868
diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h
index 60ed1837fa8f..4487f32759a4 100644
--- a/drivers/net/acenic.h
+++ b/drivers/net/acenic.h
@@ -693,7 +693,6 @@ struct ace_private
693 __attribute__ ((aligned (SMP_CACHE_BYTES))); 693 __attribute__ ((aligned (SMP_CACHE_BYTES)));
694 u32 last_tx, last_std_rx, last_mini_rx; 694 u32 last_tx, last_std_rx, last_mini_rx;
695#endif 695#endif
696 struct net_device_stats stats;
697 int pci_using_dac; 696 int pci_using_dac;
698}; 697};
699 698
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 4cceaac8863a..0860cc280b01 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -243,7 +243,7 @@ struct lance_private {
243 243
244/* Possible memory/IO addresses for probing */ 244/* Possible memory/IO addresses for probing */
245 245
246struct lance_addr { 246static struct lance_addr {
247 unsigned long memaddr; 247 unsigned long memaddr;
248 unsigned long ioaddr; 248 unsigned long ioaddr;
249 int slow_flag; 249 int slow_flag;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 99e0b4cdc56f..919ffb9bfa4e 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1860,7 +1860,8 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
1860 1860
1861 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); 1861 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
1862 1862
1863 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); 1863 skb = netdev_alloc_skb(adapter->netdev,
1864 adapter->rx_buffer_len + NET_IP_ALIGN);
1864 if (unlikely(!skb)) { 1865 if (unlikely(!skb)) {
1865 /* Better luck next round */ 1866 /* Better luck next round */
1866 adapter->net_stats.rx_dropped++; 1867 adapter->net_stats.rx_dropped++;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 367b6d462708..2c52d2c7c495 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
56 56
57#define DRV_MODULE_NAME "bnx2" 57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": " 58#define PFX DRV_MODULE_NAME ": "
59#define DRV_MODULE_VERSION "1.7.5" 59#define DRV_MODULE_VERSION "1.7.6"
60#define DRV_MODULE_RELDATE "April 29, 2008" 60#define DRV_MODULE_RELDATE "May 16, 2008"
61 61
62#define RUN_AT(x) (jiffies + (x)) 62#define RUN_AT(x) (jiffies + (x))
63 63
@@ -1875,7 +1875,7 @@ bnx2_setup_phy(struct bnx2 *bp, u8 port)
1875} 1875}
1876 1876
1877static int 1877static int
1878bnx2_init_5709s_phy(struct bnx2 *bp) 1878bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1879{ 1879{
1880 u32 val; 1880 u32 val;
1881 1881
@@ -1890,7 +1890,8 @@ bnx2_init_5709s_phy(struct bnx2 *bp)
1890 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD); 1890 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1891 1891
1892 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); 1892 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1893 bnx2_reset_phy(bp); 1893 if (reset_phy)
1894 bnx2_reset_phy(bp);
1894 1895
1895 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG); 1896 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1896 1897
@@ -1924,11 +1925,12 @@ bnx2_init_5709s_phy(struct bnx2 *bp)
1924} 1925}
1925 1926
1926static int 1927static int
1927bnx2_init_5708s_phy(struct bnx2 *bp) 1928bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
1928{ 1929{
1929 u32 val; 1930 u32 val;
1930 1931
1931 bnx2_reset_phy(bp); 1932 if (reset_phy)
1933 bnx2_reset_phy(bp);
1932 1934
1933 bp->mii_up1 = BCM5708S_UP1; 1935 bp->mii_up1 = BCM5708S_UP1;
1934 1936
@@ -1981,9 +1983,10 @@ bnx2_init_5708s_phy(struct bnx2 *bp)
1981} 1983}
1982 1984
1983static int 1985static int
1984bnx2_init_5706s_phy(struct bnx2 *bp) 1986bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
1985{ 1987{
1986 bnx2_reset_phy(bp); 1988 if (reset_phy)
1989 bnx2_reset_phy(bp);
1987 1990
1988 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; 1991 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1989 1992
@@ -2018,11 +2021,12 @@ bnx2_init_5706s_phy(struct bnx2 *bp)
2018} 2021}
2019 2022
2020static int 2023static int
2021bnx2_init_copper_phy(struct bnx2 *bp) 2024bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2022{ 2025{
2023 u32 val; 2026 u32 val;
2024 2027
2025 bnx2_reset_phy(bp); 2028 if (reset_phy)
2029 bnx2_reset_phy(bp);
2026 2030
2027 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) { 2031 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2028 bnx2_write_phy(bp, 0x18, 0x0c00); 2032 bnx2_write_phy(bp, 0x18, 0x0c00);
@@ -2070,7 +2074,7 @@ bnx2_init_copper_phy(struct bnx2 *bp)
2070 2074
2071 2075
2072static int 2076static int
2073bnx2_init_phy(struct bnx2 *bp) 2077bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2074{ 2078{
2075 u32 val; 2079 u32 val;
2076 int rc = 0; 2080 int rc = 0;
@@ -2096,14 +2100,14 @@ bnx2_init_phy(struct bnx2 *bp)
2096 2100
2097 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { 2101 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2098 if (CHIP_NUM(bp) == CHIP_NUM_5706) 2102 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2099 rc = bnx2_init_5706s_phy(bp); 2103 rc = bnx2_init_5706s_phy(bp, reset_phy);
2100 else if (CHIP_NUM(bp) == CHIP_NUM_5708) 2104 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2101 rc = bnx2_init_5708s_phy(bp); 2105 rc = bnx2_init_5708s_phy(bp, reset_phy);
2102 else if (CHIP_NUM(bp) == CHIP_NUM_5709) 2106 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2103 rc = bnx2_init_5709s_phy(bp); 2107 rc = bnx2_init_5709s_phy(bp, reset_phy);
2104 } 2108 }
2105 else { 2109 else {
2106 rc = bnx2_init_copper_phy(bp); 2110 rc = bnx2_init_copper_phy(bp, reset_phy);
2107 } 2111 }
2108 2112
2109setup_phy: 2113setup_phy:
@@ -2620,7 +2624,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2620 2624
2621 pci_dma_sync_single_for_device(bp->pdev, 2625 pci_dma_sync_single_for_device(bp->pdev,
2622 pci_unmap_addr(cons_rx_buf, mapping), 2626 pci_unmap_addr(cons_rx_buf, mapping),
2623 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 2627 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2624 2628
2625 bnapi->rx_prod_bseq += bp->rx_buf_use_size; 2629 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2626 2630
@@ -2658,7 +2662,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2658 return err; 2662 return err;
2659 } 2663 }
2660 2664
2661 skb_reserve(skb, bp->rx_offset); 2665 skb_reserve(skb, BNX2_RX_OFFSET);
2662 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size, 2666 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2663 PCI_DMA_FROMDEVICE); 2667 PCI_DMA_FROMDEVICE);
2664 2668
@@ -2773,7 +2777,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2773 dma_addr = pci_unmap_addr(rx_buf, mapping); 2777 dma_addr = pci_unmap_addr(rx_buf, mapping);
2774 2778
2775 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, 2779 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2776 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 2780 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2781 PCI_DMA_FROMDEVICE);
2777 2782
2778 rx_hdr = (struct l2_fhdr *) skb->data; 2783 rx_hdr = (struct l2_fhdr *) skb->data;
2779 len = rx_hdr->l2_fhdr_pkt_len; 2784 len = rx_hdr->l2_fhdr_pkt_len;
@@ -2811,7 +2816,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2811 } 2816 }
2812 2817
2813 /* aligned copy */ 2818 /* aligned copy */
2814 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2, 2819 skb_copy_from_linear_data_offset(skb,
2820 BNX2_RX_OFFSET - 2,
2815 new_skb->data, len + 2); 2821 new_skb->data, len + 2);
2816 skb_reserve(new_skb, 2); 2822 skb_reserve(new_skb, 2);
2817 skb_put(new_skb, len); 2823 skb_put(new_skb, len);
@@ -3213,7 +3219,7 @@ load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3213} 3219}
3214 3220
3215static int 3221static int
3216load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw) 3222load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3217{ 3223{
3218 u32 offset; 3224 u32 offset;
3219 u32 val; 3225 u32 val;
@@ -3297,7 +3303,6 @@ load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3297static int 3303static int
3298bnx2_init_cpus(struct bnx2 *bp) 3304bnx2_init_cpus(struct bnx2 *bp)
3299{ 3305{
3300 struct cpu_reg cpu_reg;
3301 struct fw_info *fw; 3306 struct fw_info *fw;
3302 int rc, rv2p_len; 3307 int rc, rv2p_len;
3303 void *text, *rv2p; 3308 void *text, *rv2p;
@@ -3333,122 +3338,57 @@ bnx2_init_cpus(struct bnx2 *bp)
3333 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2); 3338 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3334 3339
3335 /* Initialize the RX Processor. */ 3340 /* Initialize the RX Processor. */
3336 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3337 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3338 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3339 cpu_reg.state = BNX2_RXP_CPU_STATE;
3340 cpu_reg.state_value_clear = 0xffffff;
3341 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3342 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3343 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3344 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3345 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3346 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3347 cpu_reg.mips_view_base = 0x8000000;
3348
3349 if (CHIP_NUM(bp) == CHIP_NUM_5709) 3341 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3350 fw = &bnx2_rxp_fw_09; 3342 fw = &bnx2_rxp_fw_09;
3351 else 3343 else
3352 fw = &bnx2_rxp_fw_06; 3344 fw = &bnx2_rxp_fw_06;
3353 3345
3354 fw->text = text; 3346 fw->text = text;
3355 rc = load_cpu_fw(bp, &cpu_reg, fw); 3347 rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3356 if (rc) 3348 if (rc)
3357 goto init_cpu_err; 3349 goto init_cpu_err;
3358 3350
3359 /* Initialize the TX Processor. */ 3351 /* Initialize the TX Processor. */
3360 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3361 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3362 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3363 cpu_reg.state = BNX2_TXP_CPU_STATE;
3364 cpu_reg.state_value_clear = 0xffffff;
3365 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3366 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3367 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3368 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3369 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3370 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3371 cpu_reg.mips_view_base = 0x8000000;
3372
3373 if (CHIP_NUM(bp) == CHIP_NUM_5709) 3352 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3374 fw = &bnx2_txp_fw_09; 3353 fw = &bnx2_txp_fw_09;
3375 else 3354 else
3376 fw = &bnx2_txp_fw_06; 3355 fw = &bnx2_txp_fw_06;
3377 3356
3378 fw->text = text; 3357 fw->text = text;
3379 rc = load_cpu_fw(bp, &cpu_reg, fw); 3358 rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3380 if (rc) 3359 if (rc)
3381 goto init_cpu_err; 3360 goto init_cpu_err;
3382 3361
3383 /* Initialize the TX Patch-up Processor. */ 3362 /* Initialize the TX Patch-up Processor. */
3384 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3385 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3386 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3387 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3388 cpu_reg.state_value_clear = 0xffffff;
3389 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3390 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3391 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3392 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3393 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3394 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3395 cpu_reg.mips_view_base = 0x8000000;
3396
3397 if (CHIP_NUM(bp) == CHIP_NUM_5709) 3363 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3398 fw = &bnx2_tpat_fw_09; 3364 fw = &bnx2_tpat_fw_09;
3399 else 3365 else
3400 fw = &bnx2_tpat_fw_06; 3366 fw = &bnx2_tpat_fw_06;
3401 3367
3402 fw->text = text; 3368 fw->text = text;
3403 rc = load_cpu_fw(bp, &cpu_reg, fw); 3369 rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3404 if (rc) 3370 if (rc)
3405 goto init_cpu_err; 3371 goto init_cpu_err;
3406 3372
3407 /* Initialize the Completion Processor. */ 3373 /* Initialize the Completion Processor. */
3408 cpu_reg.mode = BNX2_COM_CPU_MODE;
3409 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3410 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3411 cpu_reg.state = BNX2_COM_CPU_STATE;
3412 cpu_reg.state_value_clear = 0xffffff;
3413 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3414 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3415 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3416 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3417 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3418 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3419 cpu_reg.mips_view_base = 0x8000000;
3420
3421 if (CHIP_NUM(bp) == CHIP_NUM_5709) 3374 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3422 fw = &bnx2_com_fw_09; 3375 fw = &bnx2_com_fw_09;
3423 else 3376 else
3424 fw = &bnx2_com_fw_06; 3377 fw = &bnx2_com_fw_06;
3425 3378
3426 fw->text = text; 3379 fw->text = text;
3427 rc = load_cpu_fw(bp, &cpu_reg, fw); 3380 rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3428 if (rc) 3381 if (rc)
3429 goto init_cpu_err; 3382 goto init_cpu_err;
3430 3383
3431 /* Initialize the Command Processor. */ 3384 /* Initialize the Command Processor. */
3432 cpu_reg.mode = BNX2_CP_CPU_MODE;
3433 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3434 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3435 cpu_reg.state = BNX2_CP_CPU_STATE;
3436 cpu_reg.state_value_clear = 0xffffff;
3437 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3438 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3439 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3440 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3441 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3442 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3443 cpu_reg.mips_view_base = 0x8000000;
3444
3445 if (CHIP_NUM(bp) == CHIP_NUM_5709) 3385 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3446 fw = &bnx2_cp_fw_09; 3386 fw = &bnx2_cp_fw_09;
3447 else 3387 else
3448 fw = &bnx2_cp_fw_06; 3388 fw = &bnx2_cp_fw_06;
3449 3389
3450 fw->text = text; 3390 fw->text = text;
3451 rc = load_cpu_fw(bp, &cpu_reg, fw); 3391 rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3452 3392
3453init_cpu_err: 3393init_cpu_err:
3454 vfree(text); 3394 vfree(text);
@@ -4750,12 +4690,12 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4750 u32 rx_size, rx_space, jumbo_size; 4690 u32 rx_size, rx_space, jumbo_size;
4751 4691
4752 /* 8 for CRC and VLAN */ 4692 /* 8 for CRC and VLAN */
4753 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8; 4693 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4754 4694
4755 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD + 4695 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4756 sizeof(struct skb_shared_info); 4696 sizeof(struct skb_shared_info);
4757 4697
4758 bp->rx_copy_thresh = RX_COPY_THRESH; 4698 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4759 bp->rx_pg_ring_size = 0; 4699 bp->rx_pg_ring_size = 0;
4760 bp->rx_max_pg_ring = 0; 4700 bp->rx_max_pg_ring = 0;
4761 bp->rx_max_pg_ring_idx = 0; 4701 bp->rx_max_pg_ring_idx = 0;
@@ -4770,14 +4710,14 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4770 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size, 4710 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4771 MAX_RX_PG_RINGS); 4711 MAX_RX_PG_RINGS);
4772 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1; 4712 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4773 rx_size = RX_COPY_THRESH + bp->rx_offset; 4713 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4774 bp->rx_copy_thresh = 0; 4714 bp->rx_copy_thresh = 0;
4775 } 4715 }
4776 4716
4777 bp->rx_buf_use_size = rx_size; 4717 bp->rx_buf_use_size = rx_size;
4778 /* hw alignment */ 4718 /* hw alignment */
4779 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN; 4719 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4780 bp->rx_jumbo_thresh = rx_size - bp->rx_offset; 4720 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4781 bp->rx_ring_size = size; 4721 bp->rx_ring_size = size;
4782 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS); 4722 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4783 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1; 4723 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
@@ -4873,7 +4813,7 @@ bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4873} 4813}
4874 4814
4875static int 4815static int
4876bnx2_init_nic(struct bnx2 *bp) 4816bnx2_init_nic(struct bnx2 *bp, int reset_phy)
4877{ 4817{
4878 int rc; 4818 int rc;
4879 4819
@@ -4881,7 +4821,7 @@ bnx2_init_nic(struct bnx2 *bp)
4881 return rc; 4821 return rc;
4882 4822
4883 spin_lock_bh(&bp->phy_lock); 4823 spin_lock_bh(&bp->phy_lock);
4884 bnx2_init_phy(bp); 4824 bnx2_init_phy(bp, reset_phy);
4885 bnx2_set_link(bp); 4825 bnx2_set_link(bp);
4886 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) 4826 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
4887 bnx2_remote_phy_event(bp); 4827 bnx2_remote_phy_event(bp);
@@ -5221,7 +5161,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5221 rx_skb = rx_buf->skb; 5161 rx_skb = rx_buf->skb;
5222 5162
5223 rx_hdr = (struct l2_fhdr *) rx_skb->data; 5163 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5224 skb_reserve(rx_skb, bp->rx_offset); 5164 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5225 5165
5226 pci_dma_sync_single_for_cpu(bp->pdev, 5166 pci_dma_sync_single_for_cpu(bp->pdev,
5227 pci_unmap_addr(rx_buf, mapping), 5167 pci_unmap_addr(rx_buf, mapping),
@@ -5269,7 +5209,7 @@ bnx2_test_loopback(struct bnx2 *bp)
5269 5209
5270 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); 5210 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5271 spin_lock_bh(&bp->phy_lock); 5211 spin_lock_bh(&bp->phy_lock);
5272 bnx2_init_phy(bp); 5212 bnx2_init_phy(bp, 1);
5273 spin_unlock_bh(&bp->phy_lock); 5213 spin_unlock_bh(&bp->phy_lock);
5274 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK)) 5214 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5275 rc |= BNX2_MAC_LOOPBACK_FAILED; 5215 rc |= BNX2_MAC_LOOPBACK_FAILED;
@@ -5659,7 +5599,7 @@ bnx2_open(struct net_device *dev)
5659 return rc; 5599 return rc;
5660 } 5600 }
5661 5601
5662 rc = bnx2_init_nic(bp); 5602 rc = bnx2_init_nic(bp, 1);
5663 5603
5664 if (rc) { 5604 if (rc) {
5665 bnx2_napi_disable(bp); 5605 bnx2_napi_disable(bp);
@@ -5691,7 +5631,7 @@ bnx2_open(struct net_device *dev)
5691 5631
5692 bnx2_setup_int_mode(bp, 1); 5632 bnx2_setup_int_mode(bp, 1);
5693 5633
5694 rc = bnx2_init_nic(bp); 5634 rc = bnx2_init_nic(bp, 0);
5695 5635
5696 if (!rc) 5636 if (!rc)
5697 rc = bnx2_request_irq(bp); 5637 rc = bnx2_request_irq(bp);
@@ -5726,7 +5666,7 @@ bnx2_reset_task(struct work_struct *work)
5726 5666
5727 bnx2_netif_stop(bp); 5667 bnx2_netif_stop(bp);
5728 5668
5729 bnx2_init_nic(bp); 5669 bnx2_init_nic(bp, 1);
5730 5670
5731 atomic_set(&bp->intr_sem, 1); 5671 atomic_set(&bp->intr_sem, 1);
5732 bnx2_netif_start(bp); 5672 bnx2_netif_start(bp);
@@ -6414,7 +6354,7 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6414 6354
6415 if (netif_running(bp->dev)) { 6355 if (netif_running(bp->dev)) {
6416 bnx2_netif_stop(bp); 6356 bnx2_netif_stop(bp);
6417 bnx2_init_nic(bp); 6357 bnx2_init_nic(bp, 0);
6418 bnx2_netif_start(bp); 6358 bnx2_netif_start(bp);
6419 } 6359 }
6420 6360
@@ -6457,7 +6397,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6457 rc = bnx2_alloc_mem(bp); 6397 rc = bnx2_alloc_mem(bp);
6458 if (rc) 6398 if (rc)
6459 return rc; 6399 return rc;
6460 bnx2_init_nic(bp); 6400 bnx2_init_nic(bp, 0);
6461 bnx2_netif_start(bp); 6401 bnx2_netif_start(bp);
6462 } 6402 }
6463 return 0; 6403 return 0;
@@ -6725,7 +6665,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6725 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); 6665 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6726 } 6666 }
6727 else { 6667 else {
6728 bnx2_init_nic(bp); 6668 bnx2_init_nic(bp, 1);
6729 bnx2_netif_start(bp); 6669 bnx2_netif_start(bp);
6730 } 6670 }
6731 6671
@@ -7108,6 +7048,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7108 } 7048 }
7109 7049
7110 pci_set_master(pdev); 7050 pci_set_master(pdev);
7051 pci_save_state(pdev);
7111 7052
7112 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 7053 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7113 if (bp->pm_cap == 0) { 7054 if (bp->pm_cap == 0) {
@@ -7294,8 +7235,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7294 bp->mac_addr[4] = (u8) (reg >> 8); 7235 bp->mac_addr[4] = (u8) (reg >> 8);
7295 bp->mac_addr[5] = (u8) reg; 7236 bp->mac_addr[5] = (u8) reg;
7296 7237
7297 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
7298
7299 bp->tx_ring_size = MAX_TX_DESC_CNT; 7238 bp->tx_ring_size = MAX_TX_DESC_CNT;
7300 bnx2_set_rx_ring_size(bp, 255); 7239 bnx2_set_rx_ring_size(bp, 255);
7301 7240
@@ -7612,11 +7551,97 @@ bnx2_resume(struct pci_dev *pdev)
7612 7551
7613 bnx2_set_power_state(bp, PCI_D0); 7552 bnx2_set_power_state(bp, PCI_D0);
7614 netif_device_attach(dev); 7553 netif_device_attach(dev);
7615 bnx2_init_nic(bp); 7554 bnx2_init_nic(bp, 1);
7616 bnx2_netif_start(bp); 7555 bnx2_netif_start(bp);
7617 return 0; 7556 return 0;
7618} 7557}
7619 7558
7559/**
7560 * bnx2_io_error_detected - called when PCI error is detected
7561 * @pdev: Pointer to PCI device
7562 * @state: The current pci connection state
7563 *
7564 * This function is called after a PCI bus error affecting
7565 * this device has been detected.
7566 */
7567static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7568 pci_channel_state_t state)
7569{
7570 struct net_device *dev = pci_get_drvdata(pdev);
7571 struct bnx2 *bp = netdev_priv(dev);
7572
7573 rtnl_lock();
7574 netif_device_detach(dev);
7575
7576 if (netif_running(dev)) {
7577 bnx2_netif_stop(bp);
7578 del_timer_sync(&bp->timer);
7579 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7580 }
7581
7582 pci_disable_device(pdev);
7583 rtnl_unlock();
7584
7585 /* Request a slot slot reset. */
7586 return PCI_ERS_RESULT_NEED_RESET;
7587}
7588
7589/**
7590 * bnx2_io_slot_reset - called after the pci bus has been reset.
7591 * @pdev: Pointer to PCI device
7592 *
7593 * Restart the card from scratch, as if from a cold-boot.
7594 */
7595static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7596{
7597 struct net_device *dev = pci_get_drvdata(pdev);
7598 struct bnx2 *bp = netdev_priv(dev);
7599
7600 rtnl_lock();
7601 if (pci_enable_device(pdev)) {
7602 dev_err(&pdev->dev,
7603 "Cannot re-enable PCI device after reset.\n");
7604 rtnl_unlock();
7605 return PCI_ERS_RESULT_DISCONNECT;
7606 }
7607 pci_set_master(pdev);
7608 pci_restore_state(pdev);
7609
7610 if (netif_running(dev)) {
7611 bnx2_set_power_state(bp, PCI_D0);
7612 bnx2_init_nic(bp, 1);
7613 }
7614
7615 rtnl_unlock();
7616 return PCI_ERS_RESULT_RECOVERED;
7617}
7618
7619/**
7620 * bnx2_io_resume - called when traffic can start flowing again.
7621 * @pdev: Pointer to PCI device
7622 *
7623 * This callback is called when the error recovery driver tells us that
7624 * its OK to resume normal operation.
7625 */
7626static void bnx2_io_resume(struct pci_dev *pdev)
7627{
7628 struct net_device *dev = pci_get_drvdata(pdev);
7629 struct bnx2 *bp = netdev_priv(dev);
7630
7631 rtnl_lock();
7632 if (netif_running(dev))
7633 bnx2_netif_start(bp);
7634
7635 netif_device_attach(dev);
7636 rtnl_unlock();
7637}
7638
7639static struct pci_error_handlers bnx2_err_handler = {
7640 .error_detected = bnx2_io_error_detected,
7641 .slot_reset = bnx2_io_slot_reset,
7642 .resume = bnx2_io_resume,
7643};
7644
7620static struct pci_driver bnx2_pci_driver = { 7645static struct pci_driver bnx2_pci_driver = {
7621 .name = DRV_MODULE_NAME, 7646 .name = DRV_MODULE_NAME,
7622 .id_table = bnx2_pci_tbl, 7647 .id_table = bnx2_pci_tbl,
@@ -7624,6 +7649,7 @@ static struct pci_driver bnx2_pci_driver = {
7624 .remove = __devexit_p(bnx2_remove_one), 7649 .remove = __devexit_p(bnx2_remove_one),
7625 .suspend = bnx2_suspend, 7650 .suspend = bnx2_suspend,
7626 .resume = bnx2_resume, 7651 .resume = bnx2_resume,
7652 .err_handler = &bnx2_err_handler,
7627}; 7653};
7628 7654
7629static int __init bnx2_init(void) 7655static int __init bnx2_init(void)
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 2377cc13bf61..be7ccb5b77da 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -309,6 +309,7 @@ struct l2_fhdr {
309#endif 309#endif
310}; 310};
311 311
312#define BNX2_RX_OFFSET (sizeof(struct l2_fhdr) + 2)
312 313
313/* 314/*
314 * l2_context definition 315 * l2_context definition
@@ -6412,7 +6413,7 @@ struct l2_fhdr {
6412#define MAX_ETHERNET_PACKET_SIZE 1514 6413#define MAX_ETHERNET_PACKET_SIZE 1514
6413#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9014 6414#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9014
6414 6415
6415#define RX_COPY_THRESH 128 6416#define BNX2_RX_COPY_THRESH 128
6416 6417
6417#define BNX2_MISC_ENABLE_DEFAULT 0x17ffffff 6418#define BNX2_MISC_ENABLE_DEFAULT 0x17ffffff
6418 6419
@@ -6627,7 +6628,6 @@ struct bnx2 {
6627 struct vlan_group *vlgrp; 6628 struct vlan_group *vlgrp;
6628#endif 6629#endif
6629 6630
6630 u32 rx_offset;
6631 u32 rx_buf_use_size; /* useable size */ 6631 u32 rx_buf_use_size; /* useable size */
6632 u32 rx_buf_size; /* with alignment */ 6632 u32 rx_buf_size; /* with alignment */
6633 u32 rx_copy_thresh; 6633 u32 rx_copy_thresh;
diff --git a/drivers/net/bnx2_fw.h b/drivers/net/bnx2_fw.h
index 3b839d4626fe..e4b1de435567 100644
--- a/drivers/net/bnx2_fw.h
+++ b/drivers/net/bnx2_fw.h
@@ -886,6 +886,23 @@ static struct fw_info bnx2_com_fw_06 = {
886 .rodata = bnx2_COM_b06FwRodata, 886 .rodata = bnx2_COM_b06FwRodata,
887}; 887};
888 888
889/* Initialized Values for the Completion Processor. */
890static const struct cpu_reg cpu_reg_com = {
891 .mode = BNX2_COM_CPU_MODE,
892 .mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT,
893 .mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA,
894 .state = BNX2_COM_CPU_STATE,
895 .state_value_clear = 0xffffff,
896 .gpr0 = BNX2_COM_CPU_REG_FILE,
897 .evmask = BNX2_COM_CPU_EVENT_MASK,
898 .pc = BNX2_COM_CPU_PROGRAM_COUNTER,
899 .inst = BNX2_COM_CPU_INSTRUCTION,
900 .bp = BNX2_COM_CPU_HW_BREAKPOINT,
901 .spad_base = BNX2_COM_SCRATCH,
902 .mips_view_base = 0x8000000,
903};
904
905
889static u8 bnx2_CP_b06FwText[] = { 906static u8 bnx2_CP_b06FwText[] = {
890 0x9d, 0xbc, 0x0d, 0x78, 0x13, 0xe7, 0x99, 0x2e, 0x7c, 0xcf, 0x48, 0xb2, 907 0x9d, 0xbc, 0x0d, 0x78, 0x13, 0xe7, 0x99, 0x2e, 0x7c, 0xcf, 0x48, 0xb2,
891 0x65, 0x5b, 0xb6, 0xc7, 0xb6, 0x0c, 0x22, 0x65, 0x41, 0x83, 0x47, 0x20, 908 0x65, 0x5b, 0xb6, 0xc7, 0xb6, 0x0c, 0x22, 0x65, 0x41, 0x83, 0x47, 0x20,
@@ -2167,6 +2184,22 @@ static struct fw_info bnx2_cp_fw_06 = {
2167 .rodata = bnx2_CP_b06FwRodata, 2184 .rodata = bnx2_CP_b06FwRodata,
2168}; 2185};
2169 2186
2187/* Initialized Values the Command Processor. */
2188static const struct cpu_reg cpu_reg_cp = {
2189 .mode = BNX2_CP_CPU_MODE,
2190 .mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT,
2191 .mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA,
2192 .state = BNX2_CP_CPU_STATE,
2193 .state_value_clear = 0xffffff,
2194 .gpr0 = BNX2_CP_CPU_REG_FILE,
2195 .evmask = BNX2_CP_CPU_EVENT_MASK,
2196 .pc = BNX2_CP_CPU_PROGRAM_COUNTER,
2197 .inst = BNX2_CP_CPU_INSTRUCTION,
2198 .bp = BNX2_CP_CPU_HW_BREAKPOINT,
2199 .spad_base = BNX2_CP_SCRATCH,
2200 .mips_view_base = 0x8000000,
2201};
2202
2170static u8 bnx2_RXP_b06FwText[] = { 2203static u8 bnx2_RXP_b06FwText[] = {
2171 0xec, 0x5b, 0x5d, 0x70, 0x5c, 0xd7, 0x5d, 0xff, 0xdf, 0xb3, 0x2b, 0x69, 2204 0xec, 0x5b, 0x5d, 0x70, 0x5c, 0xd7, 0x5d, 0xff, 0xdf, 0xb3, 0x2b, 0x69,
2172 0x2d, 0x4b, 0xf2, 0x95, 0xbc, 0x71, 0x56, 0xa9, 0x92, 0xec, 0x5a, 0x57, 2205 0x2d, 0x4b, 0xf2, 0x95, 0xbc, 0x71, 0x56, 0xa9, 0x92, 0xec, 0x5a, 0x57,
@@ -2946,6 +2979,22 @@ static struct fw_info bnx2_rxp_fw_06 = {
2946 .rodata = bnx2_RXP_b06FwRodata, 2979 .rodata = bnx2_RXP_b06FwRodata,
2947}; 2980};
2948 2981
2982/* Initialized Values for the RX Processor. */
2983static const struct cpu_reg cpu_reg_rxp = {
2984 .mode = BNX2_RXP_CPU_MODE,
2985 .mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT,
2986 .mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA,
2987 .state = BNX2_RXP_CPU_STATE,
2988 .state_value_clear = 0xffffff,
2989 .gpr0 = BNX2_RXP_CPU_REG_FILE,
2990 .evmask = BNX2_RXP_CPU_EVENT_MASK,
2991 .pc = BNX2_RXP_CPU_PROGRAM_COUNTER,
2992 .inst = BNX2_RXP_CPU_INSTRUCTION,
2993 .bp = BNX2_RXP_CPU_HW_BREAKPOINT,
2994 .spad_base = BNX2_RXP_SCRATCH,
2995 .mips_view_base = 0x8000000,
2996};
2997
2949static u8 bnx2_rv2p_proc1[] = { 2998static u8 bnx2_rv2p_proc1[] = {
2950 /* Date: 12/07/2007 15:02 */ 2999 /* Date: 12/07/2007 15:02 */
2951 0xd5, 0x56, 0x41, 0x6b, 0x13, 0x51, 0x10, 0x9e, 0xdd, 0x6c, 0xbb, 0xdb, 3000 0xd5, 0x56, 0x41, 0x6b, 0x13, 0x51, 0x10, 0x9e, 0xdd, 0x6c, 0xbb, 0xdb,
@@ -3651,6 +3700,22 @@ static struct fw_info bnx2_tpat_fw_06 = {
3651 .rodata = bnx2_TPAT_b06FwRodata, 3700 .rodata = bnx2_TPAT_b06FwRodata,
3652}; 3701};
3653 3702
3703/* Initialized Values for the TX Patch-up Processor. */
3704static const struct cpu_reg cpu_reg_tpat = {
3705 .mode = BNX2_TPAT_CPU_MODE,
3706 .mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT,
3707 .mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA,
3708 .state = BNX2_TPAT_CPU_STATE,
3709 .state_value_clear = 0xffffff,
3710 .gpr0 = BNX2_TPAT_CPU_REG_FILE,
3711 .evmask = BNX2_TPAT_CPU_EVENT_MASK,
3712 .pc = BNX2_TPAT_CPU_PROGRAM_COUNTER,
3713 .inst = BNX2_TPAT_CPU_INSTRUCTION,
3714 .bp = BNX2_TPAT_CPU_HW_BREAKPOINT,
3715 .spad_base = BNX2_TPAT_SCRATCH,
3716 .mips_view_base = 0x8000000,
3717};
3718
3654static u8 bnx2_TXP_b06FwText[] = { 3719static u8 bnx2_TXP_b06FwText[] = {
3655 0xad, 0x7b, 0x7f, 0x70, 0x9b, 0x75, 0x7a, 0xe7, 0xe7, 0xd5, 0x0f, 0x5b, 3720 0xad, 0x7b, 0x7f, 0x70, 0x9b, 0x75, 0x7a, 0xe7, 0xe7, 0xd5, 0x0f, 0x5b,
3656 0xb2, 0x65, 0x59, 0x0e, 0x4a, 0x90, 0x77, 0xbd, 0x8d, 0x5e, 0xf4, 0xca, 3721 0xb2, 0x65, 0x59, 0x0e, 0x4a, 0x90, 0x77, 0xbd, 0x8d, 0x5e, 0xf4, 0xca,
@@ -4531,3 +4596,18 @@ static struct fw_info bnx2_txp_fw_06 = {
4531 .rodata = bnx2_TXP_b06FwRodata, 4596 .rodata = bnx2_TXP_b06FwRodata,
4532}; 4597};
4533 4598
4599/* Initialized Values for the TX Processor. */
4600static const struct cpu_reg cpu_reg_txp = {
4601 .mode = BNX2_TXP_CPU_MODE,
4602 .mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT,
4603 .mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA,
4604 .state = BNX2_TXP_CPU_STATE,
4605 .state_value_clear = 0xffffff,
4606 .gpr0 = BNX2_TXP_CPU_REG_FILE,
4607 .evmask = BNX2_TXP_CPU_EVENT_MASK,
4608 .pc = BNX2_TXP_CPU_PROGRAM_COUNTER,
4609 .inst = BNX2_TXP_CPU_INSTRUCTION,
4610 .bp = BNX2_TXP_CPU_HW_BREAKPOINT,
4611 .spad_base = BNX2_TXP_SCRATCH,
4612 .mips_view_base = 0x8000000,
4613};
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 50a40e433154..5b4af3cc2a44 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -88,6 +88,7 @@
88#define BOND_LINK_ARP_INTERV 0 88#define BOND_LINK_ARP_INTERV 0
89 89
90static int max_bonds = BOND_DEFAULT_MAX_BONDS; 90static int max_bonds = BOND_DEFAULT_MAX_BONDS;
91static int num_grat_arp = 1;
91static int miimon = BOND_LINK_MON_INTERV; 92static int miimon = BOND_LINK_MON_INTERV;
92static int updelay = 0; 93static int updelay = 0;
93static int downdelay = 0; 94static int downdelay = 0;
@@ -99,11 +100,13 @@ static char *xmit_hash_policy = NULL;
99static int arp_interval = BOND_LINK_ARP_INTERV; 100static int arp_interval = BOND_LINK_ARP_INTERV;
100static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, }; 101static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, };
101static char *arp_validate = NULL; 102static char *arp_validate = NULL;
102static int fail_over_mac = 0; 103static char *fail_over_mac = NULL;
103struct bond_params bonding_defaults; 104struct bond_params bonding_defaults;
104 105
105module_param(max_bonds, int, 0); 106module_param(max_bonds, int, 0);
106MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); 107MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
108module_param(num_grat_arp, int, 0644);
109MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event");
107module_param(miimon, int, 0); 110module_param(miimon, int, 0);
108MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); 111MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
109module_param(updelay, int, 0); 112module_param(updelay, int, 0);
@@ -133,8 +136,8 @@ module_param_array(arp_ip_target, charp, NULL, 0);
133MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); 136MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
134module_param(arp_validate, charp, 0); 137module_param(arp_validate, charp, 0);
135MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all"); 138MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all");
136module_param(fail_over_mac, int, 0); 139module_param(fail_over_mac, charp, 0);
137MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. 0 of off (default), 1 for on."); 140MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow");
138 141
139/*----------------------------- Global variables ----------------------------*/ 142/*----------------------------- Global variables ----------------------------*/
140 143
@@ -187,6 +190,13 @@ struct bond_parm_tbl arp_validate_tbl[] = {
187{ NULL, -1}, 190{ NULL, -1},
188}; 191};
189 192
193struct bond_parm_tbl fail_over_mac_tbl[] = {
194{ "none", BOND_FOM_NONE},
195{ "active", BOND_FOM_ACTIVE},
196{ "follow", BOND_FOM_FOLLOW},
197{ NULL, -1},
198};
199
190/*-------------------------- Forward declarations ---------------------------*/ 200/*-------------------------- Forward declarations ---------------------------*/
191 201
192static void bond_send_gratuitous_arp(struct bonding *bond); 202static void bond_send_gratuitous_arp(struct bonding *bond);
@@ -261,14 +271,14 @@ static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id)
261 */ 271 */
262static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id) 272static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
263{ 273{
264 struct vlan_entry *vlan, *next; 274 struct vlan_entry *vlan;
265 int res = -ENODEV; 275 int res = -ENODEV;
266 276
267 dprintk("bond: %s, vlan id %d\n", bond->dev->name, vlan_id); 277 dprintk("bond: %s, vlan id %d\n", bond->dev->name, vlan_id);
268 278
269 write_lock_bh(&bond->lock); 279 write_lock_bh(&bond->lock);
270 280
271 list_for_each_entry_safe(vlan, next, &bond->vlan_list, vlan_list) { 281 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
272 if (vlan->vlan_id == vlan_id) { 282 if (vlan->vlan_id == vlan_id) {
273 list_del(&vlan->vlan_list); 283 list_del(&vlan->vlan_list);
274 284
@@ -970,6 +980,82 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, struct
970 } 980 }
971} 981}
972 982
983/*
984 * bond_do_fail_over_mac
985 *
986 * Perform special MAC address swapping for fail_over_mac settings
987 *
988 * Called with RTNL, bond->lock for read, curr_slave_lock for write_bh.
989 */
990static void bond_do_fail_over_mac(struct bonding *bond,
991 struct slave *new_active,
992 struct slave *old_active)
993{
994 u8 tmp_mac[ETH_ALEN];
995 struct sockaddr saddr;
996 int rv;
997
998 switch (bond->params.fail_over_mac) {
999 case BOND_FOM_ACTIVE:
1000 if (new_active)
1001 memcpy(bond->dev->dev_addr, new_active->dev->dev_addr,
1002 new_active->dev->addr_len);
1003 break;
1004 case BOND_FOM_FOLLOW:
1005 /*
1006 * if new_active && old_active, swap them
1007 * if just old_active, do nothing (going to no active slave)
1008 * if just new_active, set new_active to bond's MAC
1009 */
1010 if (!new_active)
1011 return;
1012
1013 write_unlock_bh(&bond->curr_slave_lock);
1014 read_unlock(&bond->lock);
1015
1016 if (old_active) {
1017 memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
1018 memcpy(saddr.sa_data, old_active->dev->dev_addr,
1019 ETH_ALEN);
1020 saddr.sa_family = new_active->dev->type;
1021 } else {
1022 memcpy(saddr.sa_data, bond->dev->dev_addr, ETH_ALEN);
1023 saddr.sa_family = bond->dev->type;
1024 }
1025
1026 rv = dev_set_mac_address(new_active->dev, &saddr);
1027 if (rv) {
1028 printk(KERN_ERR DRV_NAME
1029 ": %s: Error %d setting MAC of slave %s\n",
1030 bond->dev->name, -rv, new_active->dev->name);
1031 goto out;
1032 }
1033
1034 if (!old_active)
1035 goto out;
1036
1037 memcpy(saddr.sa_data, tmp_mac, ETH_ALEN);
1038 saddr.sa_family = old_active->dev->type;
1039
1040 rv = dev_set_mac_address(old_active->dev, &saddr);
1041 if (rv)
1042 printk(KERN_ERR DRV_NAME
1043 ": %s: Error %d setting MAC of slave %s\n",
1044 bond->dev->name, -rv, new_active->dev->name);
1045out:
1046 read_lock(&bond->lock);
1047 write_lock_bh(&bond->curr_slave_lock);
1048 break;
1049 default:
1050 printk(KERN_ERR DRV_NAME
1051 ": %s: bond_do_fail_over_mac impossible: bad policy %d\n",
1052 bond->dev->name, bond->params.fail_over_mac);
1053 break;
1054 }
1055
1056}
1057
1058
973/** 1059/**
974 * find_best_interface - select the best available slave to be the active one 1060 * find_best_interface - select the best available slave to be the active one
975 * @bond: our bonding struct 1061 * @bond: our bonding struct
@@ -1037,7 +1123,8 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
1037 * because it is apparently the best available slave we have, even though its 1123 * because it is apparently the best available slave we have, even though its
1038 * updelay hasn't timed out yet. 1124 * updelay hasn't timed out yet.
1039 * 1125 *
1040 * Warning: Caller must hold curr_slave_lock for writing. 1126 * If new_active is not NULL, caller must hold bond->lock for read and
1127 * curr_slave_lock for write_bh.
1041 */ 1128 */
1042void bond_change_active_slave(struct bonding *bond, struct slave *new_active) 1129void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1043{ 1130{
@@ -1048,6 +1135,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1048 } 1135 }
1049 1136
1050 if (new_active) { 1137 if (new_active) {
1138 new_active->jiffies = jiffies;
1139
1051 if (new_active->link == BOND_LINK_BACK) { 1140 if (new_active->link == BOND_LINK_BACK) {
1052 if (USES_PRIMARY(bond->params.mode)) { 1141 if (USES_PRIMARY(bond->params.mode)) {
1053 printk(KERN_INFO DRV_NAME 1142 printk(KERN_INFO DRV_NAME
@@ -1059,7 +1148,6 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1059 1148
1060 new_active->delay = 0; 1149 new_active->delay = 0;
1061 new_active->link = BOND_LINK_UP; 1150 new_active->link = BOND_LINK_UP;
1062 new_active->jiffies = jiffies;
1063 1151
1064 if (bond->params.mode == BOND_MODE_8023AD) { 1152 if (bond->params.mode == BOND_MODE_8023AD) {
1065 bond_3ad_handle_link_change(new_active, BOND_LINK_UP); 1153 bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
@@ -1103,20 +1191,21 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1103 bond_set_slave_active_flags(new_active); 1191 bond_set_slave_active_flags(new_active);
1104 } 1192 }
1105 1193
1106 /* when bonding does not set the slave MAC address, the bond MAC
1107 * address is the one of the active slave.
1108 */
1109 if (new_active && bond->params.fail_over_mac) 1194 if (new_active && bond->params.fail_over_mac)
1110 memcpy(bond->dev->dev_addr, new_active->dev->dev_addr, 1195 bond_do_fail_over_mac(bond, new_active, old_active);
1111 new_active->dev->addr_len); 1196
1197 bond->send_grat_arp = bond->params.num_grat_arp;
1112 if (bond->curr_active_slave && 1198 if (bond->curr_active_slave &&
1113 test_bit(__LINK_STATE_LINKWATCH_PENDING, 1199 test_bit(__LINK_STATE_LINKWATCH_PENDING,
1114 &bond->curr_active_slave->dev->state)) { 1200 &bond->curr_active_slave->dev->state)) {
1115 dprintk("delaying gratuitous arp on %s\n", 1201 dprintk("delaying gratuitous arp on %s\n",
1116 bond->curr_active_slave->dev->name); 1202 bond->curr_active_slave->dev->name);
1117 bond->send_grat_arp = 1; 1203 } else {
1118 } else 1204 if (bond->send_grat_arp > 0) {
1119 bond_send_gratuitous_arp(bond); 1205 bond_send_gratuitous_arp(bond);
1206 bond->send_grat_arp--;
1207 }
1208 }
1120 } 1209 }
1121} 1210}
1122 1211
@@ -1129,7 +1218,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1129 * - The primary_slave has got its link back. 1218 * - The primary_slave has got its link back.
1130 * - A slave has got its link back and there's no old curr_active_slave. 1219 * - A slave has got its link back and there's no old curr_active_slave.
1131 * 1220 *
1132 * Warning: Caller must hold curr_slave_lock for writing. 1221 * Caller must hold bond->lock for read and curr_slave_lock for write_bh.
1133 */ 1222 */
1134void bond_select_active_slave(struct bonding *bond) 1223void bond_select_active_slave(struct bonding *bond)
1135{ 1224{
@@ -1376,14 +1465,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1376 printk(KERN_WARNING DRV_NAME 1465 printk(KERN_WARNING DRV_NAME
1377 ": %s: Warning: The first slave device " 1466 ": %s: Warning: The first slave device "
1378 "specified does not support setting the MAC " 1467 "specified does not support setting the MAC "
1379 "address. Enabling the fail_over_mac option.", 1468 "address. Setting fail_over_mac to active.",
1380 bond_dev->name); 1469 bond_dev->name);
1381 bond->params.fail_over_mac = 1; 1470 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1382 } else if (!bond->params.fail_over_mac) { 1471 } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1383 printk(KERN_ERR DRV_NAME 1472 printk(KERN_ERR DRV_NAME
1384 ": %s: Error: The slave device specified " 1473 ": %s: Error: The slave device specified "
1385 "does not support setting the MAC address, " 1474 "does not support setting the MAC address, "
1386 "but fail_over_mac is not enabled.\n" 1475 "but fail_over_mac is not set to active.\n"
1387 , bond_dev->name); 1476 , bond_dev->name);
1388 res = -EOPNOTSUPP; 1477 res = -EOPNOTSUPP;
1389 goto err_undo_flags; 1478 goto err_undo_flags;
@@ -1490,6 +1579,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1490 1579
1491 bond_compute_features(bond); 1580 bond_compute_features(bond);
1492 1581
1582 write_unlock_bh(&bond->lock);
1583
1584 read_lock(&bond->lock);
1585
1493 new_slave->last_arp_rx = jiffies; 1586 new_slave->last_arp_rx = jiffies;
1494 1587
1495 if (bond->params.miimon && !bond->params.use_carrier) { 1588 if (bond->params.miimon && !bond->params.use_carrier) {
@@ -1566,6 +1659,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1566 } 1659 }
1567 } 1660 }
1568 1661
1662 write_lock_bh(&bond->curr_slave_lock);
1663
1569 switch (bond->params.mode) { 1664 switch (bond->params.mode) {
1570 case BOND_MODE_ACTIVEBACKUP: 1665 case BOND_MODE_ACTIVEBACKUP:
1571 bond_set_slave_inactive_flags(new_slave); 1666 bond_set_slave_inactive_flags(new_slave);
@@ -1613,9 +1708,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1613 break; 1708 break;
1614 } /* switch(bond_mode) */ 1709 } /* switch(bond_mode) */
1615 1710
1711 write_unlock_bh(&bond->curr_slave_lock);
1712
1616 bond_set_carrier(bond); 1713 bond_set_carrier(bond);
1617 1714
1618 write_unlock_bh(&bond->lock); 1715 read_unlock(&bond->lock);
1619 1716
1620 res = bond_create_slave_symlinks(bond_dev, slave_dev); 1717 res = bond_create_slave_symlinks(bond_dev, slave_dev);
1621 if (res) 1718 if (res)
@@ -1639,6 +1736,10 @@ err_unset_master:
1639 1736
1640err_restore_mac: 1737err_restore_mac:
1641 if (!bond->params.fail_over_mac) { 1738 if (!bond->params.fail_over_mac) {
1739 /* XXX TODO - fom follow mode needs to change master's
1740 * MAC if this slave's MAC is in use by the bond, or at
1741 * least print a warning.
1742 */
1642 memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN); 1743 memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN);
1643 addr.sa_family = slave_dev->type; 1744 addr.sa_family = slave_dev->type;
1644 dev_set_mac_address(slave_dev, &addr); 1745 dev_set_mac_address(slave_dev, &addr);
@@ -1693,20 +1794,18 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1693 return -EINVAL; 1794 return -EINVAL;
1694 } 1795 }
1695 1796
1696 mac_addr_differ = memcmp(bond_dev->dev_addr, 1797 if (!bond->params.fail_over_mac) {
1697 slave->perm_hwaddr, 1798 mac_addr_differ = memcmp(bond_dev->dev_addr, slave->perm_hwaddr,
1698 ETH_ALEN); 1799 ETH_ALEN);
1699 if (!mac_addr_differ && (bond->slave_cnt > 1)) { 1800 if (!mac_addr_differ && (bond->slave_cnt > 1))
1700 printk(KERN_WARNING DRV_NAME 1801 printk(KERN_WARNING DRV_NAME
1701 ": %s: Warning: the permanent HWaddr of %s - " 1802 ": %s: Warning: the permanent HWaddr of %s - "
1702 "%s - is still in use by %s. " 1803 "%s - is still in use by %s. "
1703 "Set the HWaddr of %s to a different address " 1804 "Set the HWaddr of %s to a different address "
1704 "to avoid conflicts.\n", 1805 "to avoid conflicts.\n",
1705 bond_dev->name, 1806 bond_dev->name, slave_dev->name,
1706 slave_dev->name, 1807 print_mac(mac, slave->perm_hwaddr),
1707 print_mac(mac, slave->perm_hwaddr), 1808 bond_dev->name, slave_dev->name);
1708 bond_dev->name,
1709 slave_dev->name);
1710 } 1809 }
1711 1810
1712 /* Inform AD package of unbinding of slave. */ 1811 /* Inform AD package of unbinding of slave. */
@@ -1833,7 +1932,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1833 /* close slave before restoring its mac address */ 1932 /* close slave before restoring its mac address */
1834 dev_close(slave_dev); 1933 dev_close(slave_dev);
1835 1934
1836 if (!bond->params.fail_over_mac) { 1935 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1837 /* restore original ("permanent") mac address */ 1936 /* restore original ("permanent") mac address */
1838 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); 1937 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
1839 addr.sa_family = slave_dev->type; 1938 addr.sa_family = slave_dev->type;
@@ -2144,7 +2243,7 @@ static int __bond_mii_monitor(struct bonding *bond, int have_locks)
2144 dprintk("sending delayed gratuitous arp on on %s\n", 2243 dprintk("sending delayed gratuitous arp on on %s\n",
2145 bond->curr_active_slave->dev->name); 2244 bond->curr_active_slave->dev->name);
2146 bond_send_gratuitous_arp(bond); 2245 bond_send_gratuitous_arp(bond);
2147 bond->send_grat_arp = 0; 2246 bond->send_grat_arp--;
2148 } 2247 }
2149 } 2248 }
2150 read_lock(&bond->curr_slave_lock); 2249 read_lock(&bond->curr_slave_lock);
@@ -2397,7 +2496,7 @@ void bond_mii_monitor(struct work_struct *work)
2397 read_lock(&bond->lock); 2496 read_lock(&bond->lock);
2398 } 2497 }
2399 2498
2400 delay = ((bond->params.miimon * HZ) / 1000) ? : 1; 2499 delay = msecs_to_jiffies(bond->params.miimon);
2401 read_unlock(&bond->lock); 2500 read_unlock(&bond->lock);
2402 queue_delayed_work(bond->wq, &bond->mii_work, delay); 2501 queue_delayed_work(bond->wq, &bond->mii_work, delay);
2403} 2502}
@@ -2426,37 +2525,14 @@ out:
2426 return addr; 2525 return addr;
2427} 2526}
2428 2527
2429static int bond_has_ip(struct bonding *bond)
2430{
2431 struct vlan_entry *vlan, *vlan_next;
2432
2433 if (bond->master_ip)
2434 return 1;
2435
2436 if (list_empty(&bond->vlan_list))
2437 return 0;
2438
2439 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
2440 vlan_list) {
2441 if (vlan->vlan_ip)
2442 return 1;
2443 }
2444
2445 return 0;
2446}
2447
2448static int bond_has_this_ip(struct bonding *bond, __be32 ip) 2528static int bond_has_this_ip(struct bonding *bond, __be32 ip)
2449{ 2529{
2450 struct vlan_entry *vlan, *vlan_next; 2530 struct vlan_entry *vlan;
2451 2531
2452 if (ip == bond->master_ip) 2532 if (ip == bond->master_ip)
2453 return 1; 2533 return 1;
2454 2534
2455 if (list_empty(&bond->vlan_list)) 2535 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
2456 return 0;
2457
2458 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
2459 vlan_list) {
2460 if (ip == vlan->vlan_ip) 2536 if (ip == vlan->vlan_ip)
2461 return 1; 2537 return 1;
2462 } 2538 }
@@ -2498,7 +2574,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2498{ 2574{
2499 int i, vlan_id, rv; 2575 int i, vlan_id, rv;
2500 __be32 *targets = bond->params.arp_targets; 2576 __be32 *targets = bond->params.arp_targets;
2501 struct vlan_entry *vlan, *vlan_next; 2577 struct vlan_entry *vlan;
2502 struct net_device *vlan_dev; 2578 struct net_device *vlan_dev;
2503 struct flowi fl; 2579 struct flowi fl;
2504 struct rtable *rt; 2580 struct rtable *rt;
@@ -2545,8 +2621,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2545 } 2621 }
2546 2622
2547 vlan_id = 0; 2623 vlan_id = 0;
2548 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, 2624 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
2549 vlan_list) {
2550 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 2625 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
2551 if (vlan_dev == rt->u.dst.dev) { 2626 if (vlan_dev == rt->u.dst.dev) {
2552 vlan_id = vlan->vlan_id; 2627 vlan_id = vlan->vlan_id;
@@ -2707,7 +2782,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2707 2782
2708 read_lock(&bond->lock); 2783 read_lock(&bond->lock);
2709 2784
2710 delta_in_ticks = (bond->params.arp_interval * HZ) / 1000; 2785 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
2711 2786
2712 if (bond->kill_timers) { 2787 if (bond->kill_timers) {
2713 goto out; 2788 goto out;
@@ -2764,8 +2839,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2764 * if we don't know our ip yet 2839 * if we don't know our ip yet
2765 */ 2840 */
2766 if (time_after_eq(jiffies, slave->dev->trans_start + 2*delta_in_ticks) || 2841 if (time_after_eq(jiffies, slave->dev->trans_start + 2*delta_in_ticks) ||
2767 (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks) && 2842 (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) {
2768 bond_has_ip(bond))) {
2769 2843
2770 slave->link = BOND_LINK_DOWN; 2844 slave->link = BOND_LINK_DOWN;
2771 slave->state = BOND_STATE_BACKUP; 2845 slave->state = BOND_STATE_BACKUP;
@@ -2813,246 +2887,299 @@ out:
2813} 2887}
2814 2888
2815/* 2889/*
2816 * When using arp monitoring in active-backup mode, this function is 2890 * Called to inspect slaves for active-backup mode ARP monitor link state
2817 * called to determine if any backup slaves have went down or a new 2891 * changes. Sets new_link in slaves to specify what action should take
2818 * current slave needs to be found. 2892 * place for the slave. Returns 0 if no changes are found, >0 if changes
2819 * The backup slaves never generate traffic, they are considered up by merely 2893 * to link states must be committed.
2820 * receiving traffic. If the current slave goes down, each backup slave will 2894 *
2821 * be given the opportunity to tx/rx an arp before being taken down - this 2895 * Called with bond->lock held for read.
2822 * prevents all slaves from being taken down due to the current slave not
2823 * sending any traffic for the backups to receive. The arps are not necessarily
2824 * necessary, any tx and rx traffic will keep the current slave up. While any
2825 * rx traffic will keep the backup slaves up, the current slave is responsible
2826 * for generating traffic to keep them up regardless of any other traffic they
2827 * may have received.
2828 * see loadbalance_arp_monitor for arp monitoring in load balancing mode
2829 */ 2896 */
2830void bond_activebackup_arp_mon(struct work_struct *work) 2897static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2831{ 2898{
2832 struct bonding *bond = container_of(work, struct bonding,
2833 arp_work.work);
2834 struct slave *slave; 2899 struct slave *slave;
2835 int delta_in_ticks; 2900 int i, commit = 0;
2836 int i;
2837 2901
2838 read_lock(&bond->lock); 2902 bond_for_each_slave(bond, slave, i) {
2903 slave->new_link = BOND_LINK_NOCHANGE;
2839 2904
2840 delta_in_ticks = (bond->params.arp_interval * HZ) / 1000; 2905 if (slave->link != BOND_LINK_UP) {
2906 if (time_before_eq(jiffies, slave_last_rx(bond, slave) +
2907 delta_in_ticks)) {
2908 slave->new_link = BOND_LINK_UP;
2909 commit++;
2910 }
2841 2911
2842 if (bond->kill_timers) { 2912 continue;
2843 goto out; 2913 }
2844 }
2845 2914
2846 if (bond->slave_cnt == 0) { 2915 /*
2847 goto re_arm; 2916 * Give slaves 2*delta after being enslaved or made
2917 * active. This avoids bouncing, as the last receive
2918 * times need a full ARP monitor cycle to be updated.
2919 */
2920 if (!time_after_eq(jiffies, slave->jiffies +
2921 2 * delta_in_ticks))
2922 continue;
2923
2924 /*
2925 * Backup slave is down if:
2926 * - No current_arp_slave AND
2927 * - more than 3*delta since last receive AND
2928 * - the bond has an IP address
2929 *
2930 * Note: a non-null current_arp_slave indicates
2931 * the curr_active_slave went down and we are
2932 * searching for a new one; under this condition
2933 * we only take the curr_active_slave down - this
2934 * gives each slave a chance to tx/rx traffic
2935 * before being taken out
2936 */
2937 if (slave->state == BOND_STATE_BACKUP &&
2938 !bond->current_arp_slave &&
2939 time_after(jiffies, slave_last_rx(bond, slave) +
2940 3 * delta_in_ticks)) {
2941 slave->new_link = BOND_LINK_DOWN;
2942 commit++;
2943 }
2944
2945 /*
2946 * Active slave is down if:
2947 * - more than 2*delta since transmitting OR
2948 * - (more than 2*delta since receive AND
2949 * the bond has an IP address)
2950 */
2951 if ((slave->state == BOND_STATE_ACTIVE) &&
2952 (time_after_eq(jiffies, slave->dev->trans_start +
2953 2 * delta_in_ticks) ||
2954 (time_after_eq(jiffies, slave_last_rx(bond, slave)
2955 + 2 * delta_in_ticks)))) {
2956 slave->new_link = BOND_LINK_DOWN;
2957 commit++;
2958 }
2848 } 2959 }
2849 2960
2850 /* determine if any slave has come up or any backup slave has 2961 read_lock(&bond->curr_slave_lock);
2851 * gone down 2962
2852 * TODO: what about up/down delay in arp mode? it wasn't here before 2963 /*
2853 * so it can wait 2964 * Trigger a commit if the primary option setting has changed.
2854 */ 2965 */
2855 bond_for_each_slave(bond, slave, i) { 2966 if (bond->primary_slave &&
2856 if (slave->link != BOND_LINK_UP) { 2967 (bond->primary_slave != bond->curr_active_slave) &&
2857 if (time_before_eq(jiffies, 2968 (bond->primary_slave->link == BOND_LINK_UP))
2858 slave_last_rx(bond, slave) + delta_in_ticks)) { 2969 commit++;
2859 2970
2860 slave->link = BOND_LINK_UP; 2971 read_unlock(&bond->curr_slave_lock);
2861 2972
2862 write_lock_bh(&bond->curr_slave_lock); 2973 return commit;
2974}
2863 2975
2864 if ((!bond->curr_active_slave) && 2976/*
2865 time_before_eq(jiffies, slave->dev->trans_start + delta_in_ticks)) { 2977 * Called to commit link state changes noted by inspection step of
2866 bond_change_active_slave(bond, slave); 2978 * active-backup mode ARP monitor.
2867 bond->current_arp_slave = NULL; 2979 *
2868 } else if (bond->curr_active_slave != slave) { 2980 * Called with RTNL and bond->lock for read.
2869 /* this slave has just come up but we 2981 */
2870 * already have a current slave; this 2982static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
2871 * can also happen if bond_enslave adds 2983{
2872 * a new slave that is up while we are 2984 struct slave *slave;
2873 * searching for a new slave 2985 int i;
2874 */
2875 bond_set_slave_inactive_flags(slave);
2876 bond->current_arp_slave = NULL;
2877 }
2878 2986
2879 bond_set_carrier(bond); 2987 bond_for_each_slave(bond, slave, i) {
2988 switch (slave->new_link) {
2989 case BOND_LINK_NOCHANGE:
2990 continue;
2880 2991
2881 if (slave == bond->curr_active_slave) { 2992 case BOND_LINK_UP:
2882 printk(KERN_INFO DRV_NAME 2993 write_lock_bh(&bond->curr_slave_lock);
2883 ": %s: %s is up and now the "
2884 "active interface\n",
2885 bond->dev->name,
2886 slave->dev->name);
2887 netif_carrier_on(bond->dev);
2888 } else {
2889 printk(KERN_INFO DRV_NAME
2890 ": %s: backup interface %s is "
2891 "now up\n",
2892 bond->dev->name,
2893 slave->dev->name);
2894 }
2895 2994
2896 write_unlock_bh(&bond->curr_slave_lock); 2995 if (!bond->curr_active_slave &&
2897 } 2996 time_before_eq(jiffies, slave->dev->trans_start +
2898 } else { 2997 delta_in_ticks)) {
2899 read_lock(&bond->curr_slave_lock); 2998 slave->link = BOND_LINK_UP;
2999 bond_change_active_slave(bond, slave);
3000 bond->current_arp_slave = NULL;
2900 3001
2901 if ((slave != bond->curr_active_slave) && 3002 printk(KERN_INFO DRV_NAME
2902 (!bond->current_arp_slave) && 3003 ": %s: %s is up and now the "
2903 (time_after_eq(jiffies, slave_last_rx(bond, slave) + 3*delta_in_ticks) && 3004 "active interface\n",
2904 bond_has_ip(bond))) { 3005 bond->dev->name, slave->dev->name);
2905 /* a backup slave has gone down; three times 3006
2906 * the delta allows the current slave to be 3007 } else if (bond->curr_active_slave != slave) {
2907 * taken out before the backup slave. 3008 /* this slave has just come up but we
2908 * note: a non-null current_arp_slave indicates 3009 * already have a current slave; this can
2909 * the curr_active_slave went down and we are 3010 * also happen if bond_enslave adds a new
2910 * searching for a new one; under this 3011 * slave that is up while we are searching
2911 * condition we only take the curr_active_slave 3012 * for a new slave
2912 * down - this gives each slave a chance to
2913 * tx/rx traffic before being taken out
2914 */ 3013 */
3014 slave->link = BOND_LINK_UP;
3015 bond_set_slave_inactive_flags(slave);
3016 bond->current_arp_slave = NULL;
2915 3017
2916 read_unlock(&bond->curr_slave_lock); 3018 printk(KERN_INFO DRV_NAME
3019 ": %s: backup interface %s is now up\n",
3020 bond->dev->name, slave->dev->name);
3021 }
2917 3022
2918 slave->link = BOND_LINK_DOWN; 3023 write_unlock_bh(&bond->curr_slave_lock);
2919 3024
2920 if (slave->link_failure_count < UINT_MAX) { 3025 break;
2921 slave->link_failure_count++; 3026
2922 } 3027 case BOND_LINK_DOWN:
3028 if (slave->link_failure_count < UINT_MAX)
3029 slave->link_failure_count++;
3030
3031 slave->link = BOND_LINK_DOWN;
3032
3033 if (slave == bond->curr_active_slave) {
3034 printk(KERN_INFO DRV_NAME
3035 ": %s: link status down for active "
3036 "interface %s, disabling it\n",
3037 bond->dev->name, slave->dev->name);
2923 3038
2924 bond_set_slave_inactive_flags(slave); 3039 bond_set_slave_inactive_flags(slave);
2925 3040
3041 write_lock_bh(&bond->curr_slave_lock);
3042
3043 bond_select_active_slave(bond);
3044 if (bond->curr_active_slave)
3045 bond->curr_active_slave->jiffies =
3046 jiffies;
3047
3048 write_unlock_bh(&bond->curr_slave_lock);
3049
3050 bond->current_arp_slave = NULL;
3051
3052 } else if (slave->state == BOND_STATE_BACKUP) {
2926 printk(KERN_INFO DRV_NAME 3053 printk(KERN_INFO DRV_NAME
2927 ": %s: backup interface %s is now down\n", 3054 ": %s: backup interface %s is now down\n",
2928 bond->dev->name, 3055 bond->dev->name, slave->dev->name);
2929 slave->dev->name); 3056
2930 } else { 3057 bond_set_slave_inactive_flags(slave);
2931 read_unlock(&bond->curr_slave_lock);
2932 } 3058 }
3059 break;
3060
3061 default:
3062 printk(KERN_ERR DRV_NAME
3063 ": %s: impossible: new_link %d on slave %s\n",
3064 bond->dev->name, slave->new_link,
3065 slave->dev->name);
2933 } 3066 }
2934 } 3067 }
2935 3068
2936 read_lock(&bond->curr_slave_lock); 3069 /*
2937 slave = bond->curr_active_slave; 3070 * No race with changes to primary via sysfs, as we hold rtnl.
2938 read_unlock(&bond->curr_slave_lock); 3071 */
2939 3072 if (bond->primary_slave &&
2940 if (slave) { 3073 (bond->primary_slave != bond->curr_active_slave) &&
2941 /* if we have sent traffic in the past 2*arp_intervals but 3074 (bond->primary_slave->link == BOND_LINK_UP)) {
2942 * haven't xmit and rx traffic in that time interval, select 3075 write_lock_bh(&bond->curr_slave_lock);
2943 * a different slave. slave->jiffies is only updated when 3076 bond_change_active_slave(bond, bond->primary_slave);
2944 * a slave first becomes the curr_active_slave - not necessarily 3077 write_unlock_bh(&bond->curr_slave_lock);
2945 * after every arp; this ensures the slave has a full 2*delta 3078 }
2946 * before being taken out. if a primary is being used, check
2947 * if it is up and needs to take over as the curr_active_slave
2948 */
2949 if ((time_after_eq(jiffies, slave->dev->trans_start + 2*delta_in_ticks) ||
2950 (time_after_eq(jiffies, slave_last_rx(bond, slave) + 2*delta_in_ticks) &&
2951 bond_has_ip(bond))) &&
2952 time_after_eq(jiffies, slave->jiffies + 2*delta_in_ticks)) {
2953 3079
2954 slave->link = BOND_LINK_DOWN; 3080 bond_set_carrier(bond);
3081}
2955 3082
2956 if (slave->link_failure_count < UINT_MAX) { 3083/*
2957 slave->link_failure_count++; 3084 * Send ARP probes for active-backup mode ARP monitor.
2958 } 3085 *
3086 * Called with bond->lock held for read.
3087 */
3088static void bond_ab_arp_probe(struct bonding *bond)
3089{
3090 struct slave *slave;
3091 int i;
2959 3092
2960 printk(KERN_INFO DRV_NAME 3093 read_lock(&bond->curr_slave_lock);
2961 ": %s: link status down for active interface "
2962 "%s, disabling it\n",
2963 bond->dev->name,
2964 slave->dev->name);
2965 3094
2966 write_lock_bh(&bond->curr_slave_lock); 3095 if (bond->current_arp_slave && bond->curr_active_slave)
3096 printk("PROBE: c_arp %s && cas %s BAD\n",
3097 bond->current_arp_slave->dev->name,
3098 bond->curr_active_slave->dev->name);
2967 3099
2968 bond_select_active_slave(bond); 3100 if (bond->curr_active_slave) {
2969 slave = bond->curr_active_slave; 3101 bond_arp_send_all(bond, bond->curr_active_slave);
3102 read_unlock(&bond->curr_slave_lock);
3103 return;
3104 }
2970 3105
2971 write_unlock_bh(&bond->curr_slave_lock); 3106 read_unlock(&bond->curr_slave_lock);
2972 3107
2973 bond->current_arp_slave = slave; 3108 /* if we don't have a curr_active_slave, search for the next available
3109 * backup slave from the current_arp_slave and make it the candidate
3110 * for becoming the curr_active_slave
3111 */
2974 3112
2975 if (slave) { 3113 if (!bond->current_arp_slave) {
2976 slave->jiffies = jiffies; 3114 bond->current_arp_slave = bond->first_slave;
2977 } 3115 if (!bond->current_arp_slave)
2978 } else if ((bond->primary_slave) && 3116 return;
2979 (bond->primary_slave != slave) && 3117 }
2980 (bond->primary_slave->link == BOND_LINK_UP)) {
2981 /* at this point, slave is the curr_active_slave */
2982 printk(KERN_INFO DRV_NAME
2983 ": %s: changing from interface %s to primary "
2984 "interface %s\n",
2985 bond->dev->name,
2986 slave->dev->name,
2987 bond->primary_slave->dev->name);
2988 3118
2989 /* primary is up so switch to it */ 3119 bond_set_slave_inactive_flags(bond->current_arp_slave);
2990 write_lock_bh(&bond->curr_slave_lock);
2991 bond_change_active_slave(bond, bond->primary_slave);
2992 write_unlock_bh(&bond->curr_slave_lock);
2993 3120
2994 slave = bond->primary_slave; 3121 /* search for next candidate */
3122 bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave->next) {
3123 if (IS_UP(slave->dev)) {
3124 slave->link = BOND_LINK_BACK;
3125 bond_set_slave_active_flags(slave);
3126 bond_arp_send_all(bond, slave);
2995 slave->jiffies = jiffies; 3127 slave->jiffies = jiffies;
2996 } else { 3128 bond->current_arp_slave = slave;
2997 bond->current_arp_slave = NULL; 3129 break;
2998 } 3130 }
2999 3131
3000 /* the current slave must tx an arp to ensure backup slaves 3132 /* if the link state is up at this point, we
3001 * rx traffic 3133 * mark it down - this can happen if we have
3134 * simultaneous link failures and
3135 * reselect_active_interface doesn't make this
3136 * one the current slave so it is still marked
3137 * up when it is actually down
3002 */ 3138 */
3003 if (slave && bond_has_ip(bond)) { 3139 if (slave->link == BOND_LINK_UP) {
3004 bond_arp_send_all(bond, slave); 3140 slave->link = BOND_LINK_DOWN;
3141 if (slave->link_failure_count < UINT_MAX)
3142 slave->link_failure_count++;
3143
3144 bond_set_slave_inactive_flags(slave);
3145
3146 printk(KERN_INFO DRV_NAME
3147 ": %s: backup interface %s is now down.\n",
3148 bond->dev->name, slave->dev->name);
3005 } 3149 }
3006 } 3150 }
3151}
3007 3152
3008 /* if we don't have a curr_active_slave, search for the next available 3153void bond_activebackup_arp_mon(struct work_struct *work)
3009 * backup slave from the current_arp_slave and make it the candidate 3154{
3010 * for becoming the curr_active_slave 3155 struct bonding *bond = container_of(work, struct bonding,
3011 */ 3156 arp_work.work);
3012 if (!slave) { 3157 int delta_in_ticks;
3013 if (!bond->current_arp_slave) {
3014 bond->current_arp_slave = bond->first_slave;
3015 }
3016 3158
3017 if (bond->current_arp_slave) { 3159 read_lock(&bond->lock);
3018 bond_set_slave_inactive_flags(bond->current_arp_slave);
3019 3160
3020 /* search for next candidate */ 3161 if (bond->kill_timers)
3021 bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave->next) { 3162 goto out;
3022 if (IS_UP(slave->dev)) {
3023 slave->link = BOND_LINK_BACK;
3024 bond_set_slave_active_flags(slave);
3025 bond_arp_send_all(bond, slave);
3026 slave->jiffies = jiffies;
3027 bond->current_arp_slave = slave;
3028 break;
3029 }
3030 3163
3031 /* if the link state is up at this point, we 3164 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3032 * mark it down - this can happen if we have
3033 * simultaneous link failures and
3034 * reselect_active_interface doesn't make this
3035 * one the current slave so it is still marked
3036 * up when it is actually down
3037 */
3038 if (slave->link == BOND_LINK_UP) {
3039 slave->link = BOND_LINK_DOWN;
3040 if (slave->link_failure_count < UINT_MAX) {
3041 slave->link_failure_count++;
3042 }
3043 3165
3044 bond_set_slave_inactive_flags(slave); 3166 if (bond->slave_cnt == 0)
3167 goto re_arm;
3045 3168
3046 printk(KERN_INFO DRV_NAME 3169 if (bond_ab_arp_inspect(bond, delta_in_ticks)) {
3047 ": %s: backup interface %s is " 3170 read_unlock(&bond->lock);
3048 "now down.\n", 3171 rtnl_lock();
3049 bond->dev->name, 3172 read_lock(&bond->lock);
3050 slave->dev->name); 3173
3051 } 3174 bond_ab_arp_commit(bond, delta_in_ticks);
3052 } 3175
3053 } 3176 read_unlock(&bond->lock);
3177 rtnl_unlock();
3178 read_lock(&bond->lock);
3054 } 3179 }
3055 3180
3181 bond_ab_arp_probe(bond);
3182
3056re_arm: 3183re_arm:
3057 if (bond->params.arp_interval) { 3184 if (bond->params.arp_interval) {
3058 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 3185 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
@@ -3128,7 +3255,8 @@ static void bond_info_show_master(struct seq_file *seq)
3128 3255
3129 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP && 3256 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
3130 bond->params.fail_over_mac) 3257 bond->params.fail_over_mac)
3131 seq_printf(seq, " (fail_over_mac)"); 3258 seq_printf(seq, " (fail_over_mac %s)",
3259 fail_over_mac_tbl[bond->params.fail_over_mac].modename);
3132 3260
3133 seq_printf(seq, "\n"); 3261 seq_printf(seq, "\n");
3134 3262
@@ -3500,13 +3628,13 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3500{ 3628{
3501 struct in_ifaddr *ifa = ptr; 3629 struct in_ifaddr *ifa = ptr;
3502 struct net_device *vlan_dev, *event_dev = ifa->ifa_dev->dev; 3630 struct net_device *vlan_dev, *event_dev = ifa->ifa_dev->dev;
3503 struct bonding *bond, *bond_next; 3631 struct bonding *bond;
3504 struct vlan_entry *vlan, *vlan_next; 3632 struct vlan_entry *vlan;
3505 3633
3506 if (dev_net(ifa->ifa_dev->dev) != &init_net) 3634 if (dev_net(ifa->ifa_dev->dev) != &init_net)
3507 return NOTIFY_DONE; 3635 return NOTIFY_DONE;
3508 3636
3509 list_for_each_entry_safe(bond, bond_next, &bond_dev_list, bond_list) { 3637 list_for_each_entry(bond, &bond_dev_list, bond_list) {
3510 if (bond->dev == event_dev) { 3638 if (bond->dev == event_dev) {
3511 switch (event) { 3639 switch (event) {
3512 case NETDEV_UP: 3640 case NETDEV_UP:
@@ -3520,11 +3648,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3520 } 3648 }
3521 } 3649 }
3522 3650
3523 if (list_empty(&bond->vlan_list)) 3651 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
3524 continue;
3525
3526 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
3527 vlan_list) {
3528 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 3652 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
3529 if (vlan_dev == event_dev) { 3653 if (vlan_dev == event_dev) {
3530 switch (event) { 3654 switch (event) {
@@ -4060,10 +4184,10 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
4060 dprintk("bond=%p, name=%s\n", bond, (bond_dev ? bond_dev->name : "None")); 4184 dprintk("bond=%p, name=%s\n", bond, (bond_dev ? bond_dev->name : "None"));
4061 4185
4062 /* 4186 /*
4063 * If fail_over_mac is enabled, do nothing and return success. 4187 * If fail_over_mac is set to active, do nothing and return
4064 * Returning an error causes ifenslave to fail. 4188 * success. Returning an error causes ifenslave to fail.
4065 */ 4189 */
4066 if (bond->params.fail_over_mac) 4190 if (bond->params.fail_over_mac == BOND_FOM_ACTIVE)
4067 return 0; 4191 return 0;
4068 4192
4069 if (!is_valid_ether_addr(sa->sa_data)) { 4193 if (!is_valid_ether_addr(sa->sa_data)) {
@@ -4568,7 +4692,7 @@ int bond_parse_parm(const char *buf, struct bond_parm_tbl *tbl)
4568 4692
4569static int bond_check_params(struct bond_params *params) 4693static int bond_check_params(struct bond_params *params)
4570{ 4694{
4571 int arp_validate_value; 4695 int arp_validate_value, fail_over_mac_value;
4572 4696
4573 /* 4697 /*
4574 * Convert string parameters. 4698 * Convert string parameters.
@@ -4658,6 +4782,13 @@ static int bond_check_params(struct bond_params *params)
4658 use_carrier = 1; 4782 use_carrier = 1;
4659 } 4783 }
4660 4784
4785 if (num_grat_arp < 0 || num_grat_arp > 255) {
4786 printk(KERN_WARNING DRV_NAME
4787 ": Warning: num_grat_arp (%d) not in range 0-255 so it "
4788 "was reset to 1 \n", num_grat_arp);
4789 num_grat_arp = 1;
4790 }
4791
4661 /* reset values for 802.3ad */ 4792 /* reset values for 802.3ad */
4662 if (bond_mode == BOND_MODE_8023AD) { 4793 if (bond_mode == BOND_MODE_8023AD) {
4663 if (!miimon) { 4794 if (!miimon) {
@@ -4836,15 +4967,29 @@ static int bond_check_params(struct bond_params *params)
4836 primary = NULL; 4967 primary = NULL;
4837 } 4968 }
4838 4969
4839 if (fail_over_mac && (bond_mode != BOND_MODE_ACTIVEBACKUP)) 4970 if (fail_over_mac) {
4840 printk(KERN_WARNING DRV_NAME 4971 fail_over_mac_value = bond_parse_parm(fail_over_mac,
4841 ": Warning: fail_over_mac only affects " 4972 fail_over_mac_tbl);
4842 "active-backup mode.\n"); 4973 if (fail_over_mac_value == -1) {
4974 printk(KERN_ERR DRV_NAME
4975 ": Error: invalid fail_over_mac \"%s\"\n",
4976 arp_validate == NULL ? "NULL" : arp_validate);
4977 return -EINVAL;
4978 }
4979
4980 if (bond_mode != BOND_MODE_ACTIVEBACKUP)
4981 printk(KERN_WARNING DRV_NAME
4982 ": Warning: fail_over_mac only affects "
4983 "active-backup mode.\n");
4984 } else {
4985 fail_over_mac_value = BOND_FOM_NONE;
4986 }
4843 4987
4844 /* fill params struct with the proper values */ 4988 /* fill params struct with the proper values */
4845 params->mode = bond_mode; 4989 params->mode = bond_mode;
4846 params->xmit_policy = xmit_hashtype; 4990 params->xmit_policy = xmit_hashtype;
4847 params->miimon = miimon; 4991 params->miimon = miimon;
4992 params->num_grat_arp = num_grat_arp;
4848 params->arp_interval = arp_interval; 4993 params->arp_interval = arp_interval;
4849 params->arp_validate = arp_validate_value; 4994 params->arp_validate = arp_validate_value;
4850 params->updelay = updelay; 4995 params->updelay = updelay;
@@ -4852,7 +4997,7 @@ static int bond_check_params(struct bond_params *params)
4852 params->use_carrier = use_carrier; 4997 params->use_carrier = use_carrier;
4853 params->lacp_fast = lacp_fast; 4998 params->lacp_fast = lacp_fast;
4854 params->primary[0] = 0; 4999 params->primary[0] = 0;
4855 params->fail_over_mac = fail_over_mac; 5000 params->fail_over_mac = fail_over_mac_value;
4856 5001
4857 if (primary) { 5002 if (primary) {
4858 strncpy(params->primary, primary, IFNAMSIZ); 5003 strncpy(params->primary, primary, IFNAMSIZ);
@@ -4871,10 +5016,10 @@ static struct lock_class_key bonding_netdev_xmit_lock_key;
4871 * Caller must NOT hold rtnl_lock; we need to release it here before we 5016 * Caller must NOT hold rtnl_lock; we need to release it here before we
4872 * set up our sysfs entries. 5017 * set up our sysfs entries.
4873 */ 5018 */
4874int bond_create(char *name, struct bond_params *params, struct bonding **newbond) 5019int bond_create(char *name, struct bond_params *params)
4875{ 5020{
4876 struct net_device *bond_dev; 5021 struct net_device *bond_dev;
4877 struct bonding *bond, *nxt; 5022 struct bonding *bond;
4878 int res; 5023 int res;
4879 5024
4880 rtnl_lock(); 5025 rtnl_lock();
@@ -4882,7 +5027,7 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
4882 5027
4883 /* Check to see if the bond already exists. */ 5028 /* Check to see if the bond already exists. */
4884 if (name) { 5029 if (name) {
4885 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) 5030 list_for_each_entry(bond, &bond_dev_list, bond_list)
4886 if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) { 5031 if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) {
4887 printk(KERN_ERR DRV_NAME 5032 printk(KERN_ERR DRV_NAME
4888 ": cannot add bond %s; it already exists\n", 5033 ": cannot add bond %s; it already exists\n",
@@ -4925,9 +5070,6 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
4925 5070
4926 lockdep_set_class(&bond_dev->_xmit_lock, &bonding_netdev_xmit_lock_key); 5071 lockdep_set_class(&bond_dev->_xmit_lock, &bonding_netdev_xmit_lock_key);
4927 5072
4928 if (newbond)
4929 *newbond = bond_dev->priv;
4930
4931 netif_carrier_off(bond_dev); 5073 netif_carrier_off(bond_dev);
4932 5074
4933 up_write(&bonding_rwsem); 5075 up_write(&bonding_rwsem);
@@ -4957,7 +5099,7 @@ static int __init bonding_init(void)
4957{ 5099{
4958 int i; 5100 int i;
4959 int res; 5101 int res;
4960 struct bonding *bond, *nxt; 5102 struct bonding *bond;
4961 5103
4962 printk(KERN_INFO "%s", version); 5104 printk(KERN_INFO "%s", version);
4963 5105
@@ -4973,7 +5115,7 @@ static int __init bonding_init(void)
4973 init_rwsem(&bonding_rwsem); 5115 init_rwsem(&bonding_rwsem);
4974 5116
4975 for (i = 0; i < max_bonds; i++) { 5117 for (i = 0; i < max_bonds; i++) {
4976 res = bond_create(NULL, &bonding_defaults, NULL); 5118 res = bond_create(NULL, &bonding_defaults);
4977 if (res) 5119 if (res)
4978 goto err; 5120 goto err;
4979 } 5121 }
@@ -4987,7 +5129,7 @@ static int __init bonding_init(void)
4987 5129
4988 goto out; 5130 goto out;
4989err: 5131err:
4990 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) { 5132 list_for_each_entry(bond, &bond_dev_list, bond_list) {
4991 bond_work_cancel_all(bond); 5133 bond_work_cancel_all(bond);
4992 destroy_workqueue(bond->wq); 5134 destroy_workqueue(bond->wq);
4993 } 5135 }
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 08f3d396bcd6..dd265c69b0df 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -50,6 +50,7 @@ extern struct bond_parm_tbl bond_mode_tbl[];
50extern struct bond_parm_tbl bond_lacp_tbl[]; 50extern struct bond_parm_tbl bond_lacp_tbl[];
51extern struct bond_parm_tbl xmit_hashtype_tbl[]; 51extern struct bond_parm_tbl xmit_hashtype_tbl[];
52extern struct bond_parm_tbl arp_validate_tbl[]; 52extern struct bond_parm_tbl arp_validate_tbl[];
53extern struct bond_parm_tbl fail_over_mac_tbl[];
53 54
54static int expected_refcount = -1; 55static int expected_refcount = -1;
55static struct class *netdev_class; 56static struct class *netdev_class;
@@ -111,7 +112,6 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t
111 char *ifname; 112 char *ifname;
112 int rv, res = count; 113 int rv, res = count;
113 struct bonding *bond; 114 struct bonding *bond;
114 struct bonding *nxt;
115 115
116 sscanf(buffer, "%16s", command); /* IFNAMSIZ*/ 116 sscanf(buffer, "%16s", command); /* IFNAMSIZ*/
117 ifname = command + 1; 117 ifname = command + 1;
@@ -122,7 +122,7 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t
122 if (command[0] == '+') { 122 if (command[0] == '+') {
123 printk(KERN_INFO DRV_NAME 123 printk(KERN_INFO DRV_NAME
124 ": %s is being created...\n", ifname); 124 ": %s is being created...\n", ifname);
125 rv = bond_create(ifname, &bonding_defaults, &bond); 125 rv = bond_create(ifname, &bonding_defaults);
126 if (rv) { 126 if (rv) {
127 printk(KERN_INFO DRV_NAME ": Bond creation failed.\n"); 127 printk(KERN_INFO DRV_NAME ": Bond creation failed.\n");
128 res = rv; 128 res = rv;
@@ -134,7 +134,7 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t
134 rtnl_lock(); 134 rtnl_lock();
135 down_write(&bonding_rwsem); 135 down_write(&bonding_rwsem);
136 136
137 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) 137 list_for_each_entry(bond, &bond_dev_list, bond_list)
138 if (strnicmp(bond->dev->name, ifname, IFNAMSIZ) == 0) { 138 if (strnicmp(bond->dev->name, ifname, IFNAMSIZ) == 0) {
139 /* check the ref count on the bond's kobject. 139 /* check the ref count on the bond's kobject.
140 * If it's > expected, then there's a file open, 140 * If it's > expected, then there's a file open,
@@ -548,42 +548,37 @@ static ssize_t bonding_show_fail_over_mac(struct device *d, struct device_attrib
548{ 548{
549 struct bonding *bond = to_bond(d); 549 struct bonding *bond = to_bond(d);
550 550
551 return sprintf(buf, "%d\n", bond->params.fail_over_mac) + 1; 551 return sprintf(buf, "%s %d\n",
552 fail_over_mac_tbl[bond->params.fail_over_mac].modename,
553 bond->params.fail_over_mac);
552} 554}
553 555
554static ssize_t bonding_store_fail_over_mac(struct device *d, struct device_attribute *attr, const char *buf, size_t count) 556static ssize_t bonding_store_fail_over_mac(struct device *d, struct device_attribute *attr, const char *buf, size_t count)
555{ 557{
556 int new_value; 558 int new_value;
557 int ret = count;
558 struct bonding *bond = to_bond(d); 559 struct bonding *bond = to_bond(d);
559 560
560 if (bond->slave_cnt != 0) { 561 if (bond->slave_cnt != 0) {
561 printk(KERN_ERR DRV_NAME 562 printk(KERN_ERR DRV_NAME
562 ": %s: Can't alter fail_over_mac with slaves in bond.\n", 563 ": %s: Can't alter fail_over_mac with slaves in bond.\n",
563 bond->dev->name); 564 bond->dev->name);
564 ret = -EPERM; 565 return -EPERM;
565 goto out;
566 } 566 }
567 567
568 if (sscanf(buf, "%d", &new_value) != 1) { 568 new_value = bond_parse_parm(buf, fail_over_mac_tbl);
569 if (new_value < 0) {
569 printk(KERN_ERR DRV_NAME 570 printk(KERN_ERR DRV_NAME
570 ": %s: no fail_over_mac value specified.\n", 571 ": %s: Ignoring invalid fail_over_mac value %s.\n",
571 bond->dev->name); 572 bond->dev->name, buf);
572 ret = -EINVAL; 573 return -EINVAL;
573 goto out;
574 } 574 }
575 575
576 if ((new_value == 0) || (new_value == 1)) { 576 bond->params.fail_over_mac = new_value;
577 bond->params.fail_over_mac = new_value; 577 printk(KERN_INFO DRV_NAME ": %s: Setting fail_over_mac to %s (%d).\n",
578 printk(KERN_INFO DRV_NAME ": %s: Setting fail_over_mac to %d.\n", 578 bond->dev->name, fail_over_mac_tbl[new_value].modename,
579 bond->dev->name, new_value); 579 new_value);
580 } else { 580
581 printk(KERN_INFO DRV_NAME 581 return count;
582 ": %s: Ignoring invalid fail_over_mac value %d.\n",
583 bond->dev->name, new_value);
584 }
585out:
586 return ret;
587} 582}
588 583
589static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR, bonding_show_fail_over_mac, bonding_store_fail_over_mac); 584static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR, bonding_show_fail_over_mac, bonding_store_fail_over_mac);
@@ -952,6 +947,45 @@ out:
952static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bonding_store_lacp); 947static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bonding_store_lacp);
953 948
954/* 949/*
950 * Show and set the number of grat ARP to send after a failover event.
951 */
952static ssize_t bonding_show_n_grat_arp(struct device *d,
953 struct device_attribute *attr,
954 char *buf)
955{
956 struct bonding *bond = to_bond(d);
957
958 return sprintf(buf, "%d\n", bond->params.num_grat_arp);
959}
960
961static ssize_t bonding_store_n_grat_arp(struct device *d,
962 struct device_attribute *attr,
963 const char *buf, size_t count)
964{
965 int new_value, ret = count;
966 struct bonding *bond = to_bond(d);
967
968 if (sscanf(buf, "%d", &new_value) != 1) {
969 printk(KERN_ERR DRV_NAME
970 ": %s: no num_grat_arp value specified.\n",
971 bond->dev->name);
972 ret = -EINVAL;
973 goto out;
974 }
975 if (new_value < 0 || new_value > 255) {
976 printk(KERN_ERR DRV_NAME
977 ": %s: Invalid num_grat_arp value %d not in range 0-255; rejected.\n",
978 bond->dev->name, new_value);
979 ret = -EINVAL;
980 goto out;
981 } else {
982 bond->params.num_grat_arp = new_value;
983 }
984out:
985 return ret;
986}
987static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR, bonding_show_n_grat_arp, bonding_store_n_grat_arp);
988/*
955 * Show and set the MII monitor interval. There are two tricky bits 989 * Show and set the MII monitor interval. There are two tricky bits
956 * here. First, if MII monitoring is activated, then we must disable 990 * here. First, if MII monitoring is activated, then we must disable
957 * ARP monitoring. Second, if the timer isn't running, we must 991 * ARP monitoring. Second, if the timer isn't running, we must
@@ -1388,6 +1422,7 @@ static struct attribute *per_bond_attrs[] = {
1388 &dev_attr_updelay.attr, 1422 &dev_attr_updelay.attr,
1389 &dev_attr_lacp_rate.attr, 1423 &dev_attr_lacp_rate.attr,
1390 &dev_attr_xmit_hash_policy.attr, 1424 &dev_attr_xmit_hash_policy.attr,
1425 &dev_attr_num_grat_arp.attr,
1391 &dev_attr_miimon.attr, 1426 &dev_attr_miimon.attr,
1392 &dev_attr_primary.attr, 1427 &dev_attr_primary.attr,
1393 &dev_attr_use_carrier.attr, 1428 &dev_attr_use_carrier.attr,
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index a3c74e20aa53..89fd9963db7a 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -125,6 +125,7 @@ struct bond_params {
125 int mode; 125 int mode;
126 int xmit_policy; 126 int xmit_policy;
127 int miimon; 127 int miimon;
128 int num_grat_arp;
128 int arp_interval; 129 int arp_interval;
129 int arp_validate; 130 int arp_validate;
130 int use_carrier; 131 int use_carrier;
@@ -157,6 +158,7 @@ struct slave {
157 unsigned long jiffies; 158 unsigned long jiffies;
158 unsigned long last_arp_rx; 159 unsigned long last_arp_rx;
159 s8 link; /* one of BOND_LINK_XXXX */ 160 s8 link; /* one of BOND_LINK_XXXX */
161 s8 new_link;
160 s8 state; /* one of BOND_STATE_XXXX */ 162 s8 state; /* one of BOND_STATE_XXXX */
161 u32 original_flags; 163 u32 original_flags;
162 u32 original_mtu; 164 u32 original_mtu;
@@ -169,6 +171,11 @@ struct slave {
169}; 171};
170 172
171/* 173/*
174 * Link pseudo-state only used internally by monitors
175 */
176#define BOND_LINK_NOCHANGE -1
177
178/*
172 * Here are the locking policies for the two bonding locks: 179 * Here are the locking policies for the two bonding locks:
173 * 180 *
174 * 1) Get bond->lock when reading/writing slave list. 181 * 1) Get bond->lock when reading/writing slave list.
@@ -241,6 +248,10 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
241 return (struct bonding *)slave->dev->master->priv; 248 return (struct bonding *)slave->dev->master->priv;
242} 249}
243 250
251#define BOND_FOM_NONE 0
252#define BOND_FOM_ACTIVE 1
253#define BOND_FOM_FOLLOW 2
254
244#define BOND_ARP_VALIDATE_NONE 0 255#define BOND_ARP_VALIDATE_NONE 0
245#define BOND_ARP_VALIDATE_ACTIVE (1 << BOND_STATE_ACTIVE) 256#define BOND_ARP_VALIDATE_ACTIVE (1 << BOND_STATE_ACTIVE)
246#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP) 257#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP)
@@ -301,7 +312,7 @@ static inline void bond_unset_master_alb_flags(struct bonding *bond)
301 312
302struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); 313struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
303int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 314int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
304int bond_create(char *name, struct bond_params *params, struct bonding **newbond); 315int bond_create(char *name, struct bond_params *params);
305void bond_destroy(struct bonding *bond); 316void bond_destroy(struct bonding *bond);
306int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev); 317int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev);
307int bond_create_sysfs(void); 318int bond_create_sysfs(void);
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index acebe431d068..271140433b09 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -42,6 +42,7 @@
42#include <linux/cache.h> 42#include <linux/cache.h>
43#include <linux/mutex.h> 43#include <linux/mutex.h>
44#include <linux/bitops.h> 44#include <linux/bitops.h>
45#include <linux/inet_lro.h>
45#include "t3cdev.h" 46#include "t3cdev.h"
46#include <asm/io.h> 47#include <asm/io.h>
47 48
@@ -92,6 +93,7 @@ struct sge_fl { /* SGE per free-buffer list state */
92 unsigned int gen; /* free list generation */ 93 unsigned int gen; /* free list generation */
93 struct fl_pg_chunk pg_chunk;/* page chunk cache */ 94 struct fl_pg_chunk pg_chunk;/* page chunk cache */
94 unsigned int use_pages; /* whether FL uses pages or sk_buffs */ 95 unsigned int use_pages; /* whether FL uses pages or sk_buffs */
96 unsigned int order; /* order of page allocations */
95 struct rx_desc *desc; /* address of HW Rx descriptor ring */ 97 struct rx_desc *desc; /* address of HW Rx descriptor ring */
96 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ 98 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
97 dma_addr_t phys_addr; /* physical address of HW ring start */ 99 dma_addr_t phys_addr; /* physical address of HW ring start */
@@ -116,12 +118,15 @@ struct sge_rspq { /* state for an SGE response queue */
116 unsigned int polling; /* is the queue serviced through NAPI? */ 118 unsigned int polling; /* is the queue serviced through NAPI? */
117 unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */ 119 unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
118 unsigned int next_holdoff; /* holdoff time for next interrupt */ 120 unsigned int next_holdoff; /* holdoff time for next interrupt */
121 unsigned int rx_recycle_buf; /* whether recycling occurred
122 within current sop-eop */
119 struct rsp_desc *desc; /* address of HW response ring */ 123 struct rsp_desc *desc; /* address of HW response ring */
120 dma_addr_t phys_addr; /* physical address of the ring */ 124 dma_addr_t phys_addr; /* physical address of the ring */
121 unsigned int cntxt_id; /* SGE context id for the response q */ 125 unsigned int cntxt_id; /* SGE context id for the response q */
122 spinlock_t lock; /* guards response processing */ 126 spinlock_t lock; /* guards response processing */
123 struct sk_buff *rx_head; /* offload packet receive queue head */ 127 struct sk_buff *rx_head; /* offload packet receive queue head */
124 struct sk_buff *rx_tail; /* offload packet receive queue tail */ 128 struct sk_buff *rx_tail; /* offload packet receive queue tail */
129 struct sk_buff *pg_skb; /* used to build frag list in napi handler */
125 130
126 unsigned long offload_pkts; 131 unsigned long offload_pkts;
127 unsigned long offload_bundles; 132 unsigned long offload_bundles;
@@ -169,16 +174,29 @@ enum { /* per port SGE statistics */
169 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ 174 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
170 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ 175 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
171 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ 176 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
177 SGE_PSTAT_LRO_AGGR, /* # of page chunks added to LRO sessions */
178 SGE_PSTAT_LRO_FLUSHED, /* # of flushed LRO sessions */
179 SGE_PSTAT_LRO_NO_DESC, /* # of overflown LRO sessions */
172 180
173 SGE_PSTAT_MAX /* must be last */ 181 SGE_PSTAT_MAX /* must be last */
174}; 182};
175 183
184#define T3_MAX_LRO_SES 8
185#define T3_MAX_LRO_MAX_PKTS 64
186
176struct sge_qset { /* an SGE queue set */ 187struct sge_qset { /* an SGE queue set */
177 struct adapter *adap; 188 struct adapter *adap;
178 struct napi_struct napi; 189 struct napi_struct napi;
179 struct sge_rspq rspq; 190 struct sge_rspq rspq;
180 struct sge_fl fl[SGE_RXQ_PER_SET]; 191 struct sge_fl fl[SGE_RXQ_PER_SET];
181 struct sge_txq txq[SGE_TXQ_PER_SET]; 192 struct sge_txq txq[SGE_TXQ_PER_SET];
193 struct net_lro_mgr lro_mgr;
194 struct net_lro_desc lro_desc[T3_MAX_LRO_SES];
195 struct skb_frag_struct *lro_frag_tbl;
196 int lro_nfrags;
197 int lro_enabled;
198 int lro_frag_len;
199 void *lro_va;
182 struct net_device *netdev; 200 struct net_device *netdev;
183 unsigned long txq_stopped; /* which Tx queues are stopped */ 201 unsigned long txq_stopped; /* which Tx queues are stopped */
184 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ 202 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 579bee42a5cb..d444f5881f56 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -351,6 +351,7 @@ struct tp_params {
351 351
352struct qset_params { /* SGE queue set parameters */ 352struct qset_params { /* SGE queue set parameters */
353 unsigned int polling; /* polling/interrupt service for rspq */ 353 unsigned int polling; /* polling/interrupt service for rspq */
354 unsigned int lro; /* large receive offload */
354 unsigned int coalesce_usecs; /* irq coalescing timer */ 355 unsigned int coalesce_usecs; /* irq coalescing timer */
355 unsigned int rspq_size; /* # of entries in response queue */ 356 unsigned int rspq_size; /* # of entries in response queue */
356 unsigned int fl_size; /* # of entries in regular free list */ 357 unsigned int fl_size; /* # of entries in regular free list */
diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/cxgb3/cxgb3_ioctl.h
index 0a82fcddf2d8..68200a14065e 100644
--- a/drivers/net/cxgb3/cxgb3_ioctl.h
+++ b/drivers/net/cxgb3/cxgb3_ioctl.h
@@ -90,6 +90,7 @@ struct ch_qset_params {
90 int32_t fl_size[2]; 90 int32_t fl_size[2];
91 int32_t intr_lat; 91 int32_t intr_lat;
92 int32_t polling; 92 int32_t polling;
93 int32_t lro;
93 int32_t cong_thres; 94 int32_t cong_thres;
94}; 95};
95 96
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 3a3127216791..5447f3e60f07 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1212,6 +1212,9 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
1212 "VLANinsertions ", 1212 "VLANinsertions ",
1213 "TxCsumOffload ", 1213 "TxCsumOffload ",
1214 "RxCsumGood ", 1214 "RxCsumGood ",
1215 "LroAggregated ",
1216 "LroFlushed ",
1217 "LroNoDesc ",
1215 "RxDrops ", 1218 "RxDrops ",
1216 1219
1217 "CheckTXEnToggled ", 1220 "CheckTXEnToggled ",
@@ -1340,6 +1343,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1340 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS); 1343 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1341 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); 1344 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1342 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); 1345 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1346 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1347 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1348 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1343 *data++ = s->rx_cong_drops; 1349 *data++ = s->rx_cong_drops;
1344 1350
1345 *data++ = s->num_toggled; 1351 *data++ = s->num_toggled;
@@ -1558,6 +1564,13 @@ static int set_rx_csum(struct net_device *dev, u32 data)
1558 struct port_info *p = netdev_priv(dev); 1564 struct port_info *p = netdev_priv(dev);
1559 1565
1560 p->rx_csum_offload = data; 1566 p->rx_csum_offload = data;
1567 if (!data) {
1568 struct adapter *adap = p->adapter;
1569 int i;
1570
1571 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1572 adap->sge.qs[i].lro_enabled = 0;
1573 }
1561 return 0; 1574 return 0;
1562} 1575}
1563 1576
@@ -1830,6 +1843,11 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1830 } 1843 }
1831 } 1844 }
1832 } 1845 }
1846 if (t.lro >= 0) {
1847 struct sge_qset *qs = &adapter->sge.qs[t.qset_idx];
1848 q->lro = t.lro;
1849 qs->lro_enabled = t.lro;
1850 }
1833 break; 1851 break;
1834 } 1852 }
1835 case CHELSIO_GET_QSET_PARAMS:{ 1853 case CHELSIO_GET_QSET_PARAMS:{
@@ -1849,6 +1867,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1849 t.fl_size[0] = q->fl_size; 1867 t.fl_size[0] = q->fl_size;
1850 t.fl_size[1] = q->jumbo_size; 1868 t.fl_size[1] = q->jumbo_size;
1851 t.polling = q->polling; 1869 t.polling = q->polling;
1870 t.lro = q->lro;
1852 t.intr_lat = q->coalesce_usecs; 1871 t.intr_lat = q->coalesce_usecs;
1853 t.cong_thres = q->cong_thres; 1872 t.cong_thres = q->cong_thres;
1854 1873
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 796eb305cdc3..a96331c875e6 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -55,6 +55,9 @@
55 * directly. 55 * directly.
56 */ 56 */
57#define FL0_PG_CHUNK_SIZE 2048 57#define FL0_PG_CHUNK_SIZE 2048
58#define FL0_PG_ORDER 0
59#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
60#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
58 61
59#define SGE_RX_DROP_THRES 16 62#define SGE_RX_DROP_THRES 16
60 63
@@ -359,7 +362,7 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
359 } 362 }
360 363
361 if (q->pg_chunk.page) { 364 if (q->pg_chunk.page) {
362 __free_page(q->pg_chunk.page); 365 __free_pages(q->pg_chunk.page, q->order);
363 q->pg_chunk.page = NULL; 366 q->pg_chunk.page = NULL;
364 } 367 }
365} 368}
@@ -376,13 +379,16 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
376 * Add a buffer of the given length to the supplied HW and SW Rx 379 * Add a buffer of the given length to the supplied HW and SW Rx
377 * descriptors. 380 * descriptors.
378 */ 381 */
379static inline void add_one_rx_buf(void *va, unsigned int len, 382static inline int add_one_rx_buf(void *va, unsigned int len,
380 struct rx_desc *d, struct rx_sw_desc *sd, 383 struct rx_desc *d, struct rx_sw_desc *sd,
381 unsigned int gen, struct pci_dev *pdev) 384 unsigned int gen, struct pci_dev *pdev)
382{ 385{
383 dma_addr_t mapping; 386 dma_addr_t mapping;
384 387
385 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); 388 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
389 if (unlikely(pci_dma_mapping_error(mapping)))
390 return -ENOMEM;
391
386 pci_unmap_addr_set(sd, dma_addr, mapping); 392 pci_unmap_addr_set(sd, dma_addr, mapping);
387 393
388 d->addr_lo = cpu_to_be32(mapping); 394 d->addr_lo = cpu_to_be32(mapping);
@@ -390,12 +396,14 @@ static inline void add_one_rx_buf(void *va, unsigned int len,
390 wmb(); 396 wmb();
391 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); 397 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
392 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); 398 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
399 return 0;
393} 400}
394 401
395static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp) 402static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
403 unsigned int order)
396{ 404{
397 if (!q->pg_chunk.page) { 405 if (!q->pg_chunk.page) {
398 q->pg_chunk.page = alloc_page(gfp); 406 q->pg_chunk.page = alloc_pages(gfp, order);
399 if (unlikely(!q->pg_chunk.page)) 407 if (unlikely(!q->pg_chunk.page))
400 return -ENOMEM; 408 return -ENOMEM;
401 q->pg_chunk.va = page_address(q->pg_chunk.page); 409 q->pg_chunk.va = page_address(q->pg_chunk.page);
@@ -404,7 +412,7 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
404 sd->pg_chunk = q->pg_chunk; 412 sd->pg_chunk = q->pg_chunk;
405 413
406 q->pg_chunk.offset += q->buf_size; 414 q->pg_chunk.offset += q->buf_size;
407 if (q->pg_chunk.offset == PAGE_SIZE) 415 if (q->pg_chunk.offset == (PAGE_SIZE << order))
408 q->pg_chunk.page = NULL; 416 q->pg_chunk.page = NULL;
409 else { 417 else {
410 q->pg_chunk.va += q->buf_size; 418 q->pg_chunk.va += q->buf_size;
@@ -424,15 +432,18 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
424 * allocated with the supplied gfp flags. The caller must assure that 432 * allocated with the supplied gfp flags. The caller must assure that
425 * @n does not exceed the queue's capacity. 433 * @n does not exceed the queue's capacity.
426 */ 434 */
427static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) 435static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
428{ 436{
429 void *buf_start; 437 void *buf_start;
430 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 438 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
431 struct rx_desc *d = &q->desc[q->pidx]; 439 struct rx_desc *d = &q->desc[q->pidx];
440 unsigned int count = 0;
432 441
433 while (n--) { 442 while (n--) {
443 int err;
444
434 if (q->use_pages) { 445 if (q->use_pages) {
435 if (unlikely(alloc_pg_chunk(q, sd, gfp))) { 446 if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) {
436nomem: q->alloc_failed++; 447nomem: q->alloc_failed++;
437 break; 448 break;
438 } 449 }
@@ -447,8 +458,16 @@ nomem: q->alloc_failed++;
447 buf_start = skb->data; 458 buf_start = skb->data;
448 } 459 }
449 460
450 add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen, 461 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
451 adap->pdev); 462 adap->pdev);
463 if (unlikely(err)) {
464 if (!q->use_pages) {
465 kfree_skb(sd->skb);
466 sd->skb = NULL;
467 }
468 break;
469 }
470
452 d++; 471 d++;
453 sd++; 472 sd++;
454 if (++q->pidx == q->size) { 473 if (++q->pidx == q->size) {
@@ -458,14 +477,19 @@ nomem: q->alloc_failed++;
458 d = q->desc; 477 d = q->desc;
459 } 478 }
460 q->credits++; 479 q->credits++;
480 count++;
461 } 481 }
462 wmb(); 482 wmb();
463 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); 483 if (likely(count))
484 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
485
486 return count;
464} 487}
465 488
466static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) 489static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
467{ 490{
468 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC); 491 refill_fl(adap, fl, min(16U, fl->size - fl->credits),
492 GFP_ATOMIC | __GFP_COMP);
469} 493}
470 494
471/** 495/**
@@ -560,6 +584,8 @@ static void t3_reset_qset(struct sge_qset *q)
560 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); 584 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
561 q->txq_stopped = 0; 585 q->txq_stopped = 0;
562 memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer)); 586 memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer));
587 kfree(q->lro_frag_tbl);
588 q->lro_nfrags = q->lro_frag_len = 0;
563} 589}
564 590
565 591
@@ -740,19 +766,22 @@ use_orig_buf:
740 * that are page chunks rather than sk_buffs. 766 * that are page chunks rather than sk_buffs.
741 */ 767 */
742static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, 768static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
743 unsigned int len, unsigned int drop_thres) 769 struct sge_rspq *q, unsigned int len,
770 unsigned int drop_thres)
744{ 771{
745 struct sk_buff *skb = NULL; 772 struct sk_buff *newskb, *skb;
746 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 773 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
747 774
748 if (len <= SGE_RX_COPY_THRES) { 775 newskb = skb = q->pg_skb;
749 skb = alloc_skb(len, GFP_ATOMIC); 776
750 if (likely(skb != NULL)) { 777 if (!skb && (len <= SGE_RX_COPY_THRES)) {
751 __skb_put(skb, len); 778 newskb = alloc_skb(len, GFP_ATOMIC);
779 if (likely(newskb != NULL)) {
780 __skb_put(newskb, len);
752 pci_dma_sync_single_for_cpu(adap->pdev, 781 pci_dma_sync_single_for_cpu(adap->pdev,
753 pci_unmap_addr(sd, dma_addr), len, 782 pci_unmap_addr(sd, dma_addr), len,
754 PCI_DMA_FROMDEVICE); 783 PCI_DMA_FROMDEVICE);
755 memcpy(skb->data, sd->pg_chunk.va, len); 784 memcpy(newskb->data, sd->pg_chunk.va, len);
756 pci_dma_sync_single_for_device(adap->pdev, 785 pci_dma_sync_single_for_device(adap->pdev,
757 pci_unmap_addr(sd, dma_addr), len, 786 pci_unmap_addr(sd, dma_addr), len,
758 PCI_DMA_FROMDEVICE); 787 PCI_DMA_FROMDEVICE);
@@ -761,14 +790,16 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
761recycle: 790recycle:
762 fl->credits--; 791 fl->credits--;
763 recycle_rx_buf(adap, fl, fl->cidx); 792 recycle_rx_buf(adap, fl, fl->cidx);
764 return skb; 793 q->rx_recycle_buf++;
794 return newskb;
765 } 795 }
766 796
767 if (unlikely(fl->credits <= drop_thres)) 797 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
768 goto recycle; 798 goto recycle;
769 799
770 skb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); 800 if (!skb)
771 if (unlikely(!skb)) { 801 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
802 if (unlikely(!newskb)) {
772 if (!drop_thres) 803 if (!drop_thres)
773 return NULL; 804 return NULL;
774 goto recycle; 805 goto recycle;
@@ -776,21 +807,29 @@ recycle:
776 807
777 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), 808 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
778 fl->buf_size, PCI_DMA_FROMDEVICE); 809 fl->buf_size, PCI_DMA_FROMDEVICE);
779 __skb_put(skb, SGE_RX_PULL_LEN); 810 if (!skb) {
780 memcpy(skb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); 811 __skb_put(newskb, SGE_RX_PULL_LEN);
781 skb_fill_page_desc(skb, 0, sd->pg_chunk.page, 812 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
782 sd->pg_chunk.offset + SGE_RX_PULL_LEN, 813 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
783 len - SGE_RX_PULL_LEN); 814 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
784 skb->len = len; 815 len - SGE_RX_PULL_LEN);
785 skb->data_len = len - SGE_RX_PULL_LEN; 816 newskb->len = len;
786 skb->truesize += skb->data_len; 817 newskb->data_len = len - SGE_RX_PULL_LEN;
818 } else {
819 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
820 sd->pg_chunk.page,
821 sd->pg_chunk.offset, len);
822 newskb->len += len;
823 newskb->data_len += len;
824 }
825 newskb->truesize += newskb->data_len;
787 826
788 fl->credits--; 827 fl->credits--;
789 /* 828 /*
790 * We do not refill FLs here, we let the caller do it to overlap a 829 * We do not refill FLs here, we let the caller do it to overlap a
791 * prefetch. 830 * prefetch.
792 */ 831 */
793 return skb; 832 return newskb;
794} 833}
795 834
796/** 835/**
@@ -1831,9 +1870,10 @@ static void restart_tx(struct sge_qset *qs)
1831 * if it was immediate data in a response. 1870 * if it was immediate data in a response.
1832 */ 1871 */
1833static void rx_eth(struct adapter *adap, struct sge_rspq *rq, 1872static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1834 struct sk_buff *skb, int pad) 1873 struct sk_buff *skb, int pad, int lro)
1835{ 1874{
1836 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); 1875 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1876 struct sge_qset *qs = rspq_to_qset(rq);
1837 struct port_info *pi; 1877 struct port_info *pi;
1838 1878
1839 skb_pull(skb, sizeof(*p) + pad); 1879 skb_pull(skb, sizeof(*p) + pad);
@@ -1850,18 +1890,202 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1850 if (unlikely(p->vlan_valid)) { 1890 if (unlikely(p->vlan_valid)) {
1851 struct vlan_group *grp = pi->vlan_grp; 1891 struct vlan_group *grp = pi->vlan_grp;
1852 1892
1853 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++; 1893 qs->port_stats[SGE_PSTAT_VLANEX]++;
1854 if (likely(grp)) 1894 if (likely(grp))
1855 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan), 1895 if (lro)
1856 rq->polling); 1896 lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb,
1897 grp,
1898 ntohs(p->vlan),
1899 p);
1900 else
1901 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1902 rq->polling);
1857 else 1903 else
1858 dev_kfree_skb_any(skb); 1904 dev_kfree_skb_any(skb);
1859 } else if (rq->polling) 1905 } else if (rq->polling) {
1860 netif_receive_skb(skb); 1906 if (lro)
1861 else 1907 lro_receive_skb(&qs->lro_mgr, skb, p);
1908 else
1909 netif_receive_skb(skb);
1910 } else
1862 netif_rx(skb); 1911 netif_rx(skb);
1863} 1912}
1864 1913
1914static inline int is_eth_tcp(u32 rss)
1915{
1916 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
1917}
1918
1919/**
1920 * lro_frame_ok - check if an ingress packet is eligible for LRO
1921 * @p: the CPL header of the packet
1922 *
1923 * Returns true if a received packet is eligible for LRO.
1924 * The following conditions must be true:
1925 * - packet is TCP/IP Ethernet II (checked elsewhere)
1926 * - not an IP fragment
1927 * - no IP options
1928 * - TCP/IP checksums are correct
1929 * - the packet is for this host
1930 */
1931static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
1932{
1933 const struct ethhdr *eh = (struct ethhdr *)(p + 1);
1934 const struct iphdr *ih = (struct iphdr *)(eh + 1);
1935
1936 return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) &&
1937 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
1938}
1939
1940#define TCP_FLAG_MASK (TCP_FLAG_CWR | TCP_FLAG_ECE | TCP_FLAG_URG |\
1941 TCP_FLAG_ACK | TCP_FLAG_PSH | TCP_FLAG_RST |\
1942 TCP_FLAG_SYN | TCP_FLAG_FIN)
1943#define TSTAMP_WORD ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |\
1944 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
1945
1946/**
1947 * lro_segment_ok - check if a TCP segment is eligible for LRO
1948 * @tcph: the TCP header of the packet
1949 *
1950 * Returns true if a TCP packet is eligible for LRO. This requires that
1951 * the packet have only the ACK flag set and no TCP options besides
1952 * time stamps.
1953 */
1954static inline int lro_segment_ok(const struct tcphdr *tcph)
1955{
1956 int optlen;
1957
1958 if (unlikely((tcp_flag_word(tcph) & TCP_FLAG_MASK) != TCP_FLAG_ACK))
1959 return 0;
1960
1961 optlen = (tcph->doff << 2) - sizeof(*tcph);
1962 if (optlen) {
1963 const u32 *opt = (const u32 *)(tcph + 1);
1964
1965 if (optlen != TCPOLEN_TSTAMP_ALIGNED ||
1966 *opt != htonl(TSTAMP_WORD) || !opt[2])
1967 return 0;
1968 }
1969 return 1;
1970}
1971
1972static int t3_get_lro_header(void **eh, void **iph, void **tcph,
1973 u64 *hdr_flags, void *priv)
1974{
1975 const struct cpl_rx_pkt *cpl = priv;
1976
1977 if (!lro_frame_ok(cpl))
1978 return -1;
1979
1980 *eh = (struct ethhdr *)(cpl + 1);
1981 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
1982 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
1983
1984 if (!lro_segment_ok(*tcph))
1985 return -1;
1986
1987 *hdr_flags = LRO_IPV4 | LRO_TCP;
1988 return 0;
1989}
1990
1991static int t3_get_skb_header(struct sk_buff *skb,
1992 void **iph, void **tcph, u64 *hdr_flags,
1993 void *priv)
1994{
1995 void *eh;
1996
1997 return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv);
1998}
1999
2000static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh,
2001 void **iph, void **tcph, u64 *hdr_flags,
2002 void *priv)
2003{
2004 return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv);
2005}
2006
2007/**
2008 * lro_add_page - add a page chunk to an LRO session
2009 * @adap: the adapter
2010 * @qs: the associated queue set
2011 * @fl: the free list containing the page chunk to add
2012 * @len: packet length
2013 * @complete: Indicates the last fragment of a frame
2014 *
2015 * Add a received packet contained in a page chunk to an existing LRO
2016 * session.
2017 */
2018static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2019 struct sge_fl *fl, int len, int complete)
2020{
2021 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2022 struct cpl_rx_pkt *cpl;
2023 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl;
2024 int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len;
2025 int offset = 0;
2026
2027 if (!nr_frags) {
2028 offset = 2 + sizeof(struct cpl_rx_pkt);
2029 qs->lro_va = cpl = sd->pg_chunk.va + 2;
2030 }
2031
2032 fl->credits--;
2033
2034 len -= offset;
2035 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
2036 fl->buf_size, PCI_DMA_FROMDEVICE);
2037
2038 rx_frag += nr_frags;
2039 rx_frag->page = sd->pg_chunk.page;
2040 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2041 rx_frag->size = len;
2042 frag_len += len;
2043 qs->lro_nfrags++;
2044 qs->lro_frag_len = frag_len;
2045
2046 if (!complete)
2047 return;
2048
2049 qs->lro_nfrags = qs->lro_frag_len = 0;
2050 cpl = qs->lro_va;
2051
2052 if (unlikely(cpl->vlan_valid)) {
2053 struct net_device *dev = qs->netdev;
2054 struct port_info *pi = netdev_priv(dev);
2055 struct vlan_group *grp = pi->vlan_grp;
2056
2057 if (likely(grp != NULL)) {
2058 lro_vlan_hwaccel_receive_frags(&qs->lro_mgr,
2059 qs->lro_frag_tbl,
2060 frag_len, frag_len,
2061 grp, ntohs(cpl->vlan),
2062 cpl, 0);
2063 return;
2064 }
2065 }
2066 lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl,
2067 frag_len, frag_len, cpl, 0);
2068}
2069
2070/**
2071 * init_lro_mgr - initialize a LRO manager object
2072 * @lro_mgr: the LRO manager object
2073 */
2074static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr)
2075{
2076 lro_mgr->dev = qs->netdev;
2077 lro_mgr->features = LRO_F_NAPI;
2078 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
2079 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2080 lro_mgr->max_desc = T3_MAX_LRO_SES;
2081 lro_mgr->lro_arr = qs->lro_desc;
2082 lro_mgr->get_frag_header = t3_get_frag_header;
2083 lro_mgr->get_skb_header = t3_get_skb_header;
2084 lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS;
2085 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2086 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2087}
2088
1865/** 2089/**
1866 * handle_rsp_cntrl_info - handles control information in a response 2090 * handle_rsp_cntrl_info - handles control information in a response
1867 * @qs: the queue set corresponding to the response 2091 * @qs: the queue set corresponding to the response
@@ -1947,6 +2171,12 @@ static inline int is_new_response(const struct rsp_desc *r,
1947 return (r->intr_gen & F_RSPD_GEN2) == q->gen; 2171 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1948} 2172}
1949 2173
2174static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2175{
2176 q->pg_skb = NULL;
2177 q->rx_recycle_buf = 0;
2178}
2179
1950#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS) 2180#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1951#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \ 2181#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1952 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \ 2182 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
@@ -1984,10 +2214,11 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
1984 q->next_holdoff = q->holdoff_tmr; 2214 q->next_holdoff = q->holdoff_tmr;
1985 2215
1986 while (likely(budget_left && is_new_response(r, q))) { 2216 while (likely(budget_left && is_new_response(r, q))) {
1987 int eth, ethpad = 2; 2217 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
1988 struct sk_buff *skb = NULL; 2218 struct sk_buff *skb = NULL;
1989 u32 len, flags = ntohl(r->flags); 2219 u32 len, flags = ntohl(r->flags);
1990 __be32 rss_hi = *(const __be32 *)r, rss_lo = r->rss_hdr.rss_hash_val; 2220 __be32 rss_hi = *(const __be32 *)r,
2221 rss_lo = r->rss_hdr.rss_hash_val;
1991 2222
1992 eth = r->rss_hdr.opcode == CPL_RX_PKT; 2223 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1993 2224
@@ -2015,6 +2246,9 @@ no_mem:
2015 } else if ((len = ntohl(r->len_cq)) != 0) { 2246 } else if ((len = ntohl(r->len_cq)) != 0) {
2016 struct sge_fl *fl; 2247 struct sge_fl *fl;
2017 2248
2249 if (eth)
2250 lro = qs->lro_enabled && is_eth_tcp(rss_hi);
2251
2018 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; 2252 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2019 if (fl->use_pages) { 2253 if (fl->use_pages) {
2020 void *addr = fl->sdesc[fl->cidx].pg_chunk.va; 2254 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
@@ -2024,9 +2258,18 @@ no_mem:
2024 prefetch(addr + L1_CACHE_BYTES); 2258 prefetch(addr + L1_CACHE_BYTES);
2025#endif 2259#endif
2026 __refill_fl(adap, fl); 2260 __refill_fl(adap, fl);
2261 if (lro > 0) {
2262 lro_add_page(adap, qs, fl,
2263 G_RSPD_LEN(len),
2264 flags & F_RSPD_EOP);
2265 goto next_fl;
2266 }
2027 2267
2028 skb = get_packet_pg(adap, fl, G_RSPD_LEN(len), 2268 skb = get_packet_pg(adap, fl, q,
2029 eth ? SGE_RX_DROP_THRES : 0); 2269 G_RSPD_LEN(len),
2270 eth ?
2271 SGE_RX_DROP_THRES : 0);
2272 q->pg_skb = skb;
2030 } else 2273 } else
2031 skb = get_packet(adap, fl, G_RSPD_LEN(len), 2274 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2032 eth ? SGE_RX_DROP_THRES : 0); 2275 eth ? SGE_RX_DROP_THRES : 0);
@@ -2036,7 +2279,7 @@ no_mem:
2036 q->rx_drops++; 2279 q->rx_drops++;
2037 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT)) 2280 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2038 __skb_pull(skb, 2); 2281 __skb_pull(skb, 2);
2039 2282next_fl:
2040 if (++fl->cidx == fl->size) 2283 if (++fl->cidx == fl->size)
2041 fl->cidx = 0; 2284 fl->cidx = 0;
2042 } else 2285 } else
@@ -2060,9 +2303,13 @@ no_mem:
2060 q->credits = 0; 2303 q->credits = 0;
2061 } 2304 }
2062 2305
2063 if (likely(skb != NULL)) { 2306 packet_complete = flags &
2307 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2308 F_RSPD_ASYNC_NOTIF);
2309
2310 if (skb != NULL && packet_complete) {
2064 if (eth) 2311 if (eth)
2065 rx_eth(adap, q, skb, ethpad); 2312 rx_eth(adap, q, skb, ethpad, lro);
2066 else { 2313 else {
2067 q->offload_pkts++; 2314 q->offload_pkts++;
2068 /* Preserve the RSS info in csum & priority */ 2315 /* Preserve the RSS info in csum & priority */
@@ -2072,11 +2319,19 @@ no_mem:
2072 offload_skbs, 2319 offload_skbs,
2073 ngathered); 2320 ngathered);
2074 } 2321 }
2322
2323 if (flags & F_RSPD_EOP)
2324 clear_rspq_bufstate(q);
2075 } 2325 }
2076 --budget_left; 2326 --budget_left;
2077 } 2327 }
2078 2328
2079 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); 2329 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2330 lro_flush_all(&qs->lro_mgr);
2331 qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated;
2332 qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed;
2333 qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc;
2334
2080 if (sleeping) 2335 if (sleeping)
2081 check_ring_db(adap, qs, sleeping); 2336 check_ring_db(adap, qs, sleeping);
2082 2337
@@ -2618,8 +2873,9 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2618 int irq_vec_idx, const struct qset_params *p, 2873 int irq_vec_idx, const struct qset_params *p,
2619 int ntxq, struct net_device *dev) 2874 int ntxq, struct net_device *dev)
2620{ 2875{
2621 int i, ret = -ENOMEM; 2876 int i, avail, ret = -ENOMEM;
2622 struct sge_qset *q = &adapter->sge.qs[id]; 2877 struct sge_qset *q = &adapter->sge.qs[id];
2878 struct net_lro_mgr *lro_mgr = &q->lro_mgr;
2623 2879
2624 init_qset_cntxt(q, id); 2880 init_qset_cntxt(q, id);
2625 init_timer(&q->tx_reclaim_timer); 2881 init_timer(&q->tx_reclaim_timer);
@@ -2687,11 +2943,23 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2687#else 2943#else
2688 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); 2944 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
2689#endif 2945#endif
2690 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; 2946#if FL1_PG_CHUNK_SIZE > 0
2947 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
2948#else
2691 q->fl[1].buf_size = is_offload(adapter) ? 2949 q->fl[1].buf_size = is_offload(adapter) ?
2692 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 2950 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2693 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt); 2951 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
2952#endif
2694 2953
2954 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2955 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
2956 q->fl[0].order = FL0_PG_ORDER;
2957 q->fl[1].order = FL1_PG_ORDER;
2958
2959 q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1,
2960 sizeof(struct skb_frag_struct),
2961 GFP_KERNEL);
2962 q->lro_nfrags = q->lro_frag_len = 0;
2695 spin_lock_irq(&adapter->sge.reg_lock); 2963 spin_lock_irq(&adapter->sge.reg_lock);
2696 2964
2697 /* FL threshold comparison uses < */ 2965 /* FL threshold comparison uses < */
@@ -2742,8 +3010,23 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2742 q->netdev = dev; 3010 q->netdev = dev;
2743 t3_update_qset_coalesce(q, p); 3011 t3_update_qset_coalesce(q, p);
2744 3012
2745 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL); 3013 init_lro_mgr(q, lro_mgr);
2746 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL); 3014
3015 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3016 GFP_KERNEL | __GFP_COMP);
3017 if (!avail) {
3018 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3019 goto err;
3020 }
3021 if (avail < q->fl[0].size)
3022 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3023 avail);
3024
3025 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3026 GFP_KERNEL | __GFP_COMP);
3027 if (avail < q->fl[1].size)
3028 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3029 avail);
2747 refill_rspq(adapter, &q->rspq, q->rspq.size - 1); 3030 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2748 3031
2749 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | 3032 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
@@ -2752,9 +3035,9 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2752 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 3035 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2753 return 0; 3036 return 0;
2754 3037
2755 err_unlock: 3038err_unlock:
2756 spin_unlock_irq(&adapter->sge.reg_lock); 3039 spin_unlock_irq(&adapter->sge.reg_lock);
2757 err: 3040err:
2758 t3_free_qset(adapter, q); 3041 t3_free_qset(adapter, q);
2759 return ret; 3042 return ret;
2760} 3043}
@@ -2876,7 +3159,7 @@ void t3_sge_prep(struct adapter *adap, struct sge_params *p)
2876 q->coalesce_usecs = 5; 3159 q->coalesce_usecs = 5;
2877 q->rspq_size = 1024; 3160 q->rspq_size = 1024;
2878 q->fl_size = 1024; 3161 q->fl_size = 1024;
2879 q->jumbo_size = 512; 3162 q->jumbo_size = 512;
2880 q->txq_size[TXQ_ETH] = 1024; 3163 q->txq_size[TXQ_ETH] = 1024;
2881 q->txq_size[TXQ_OFLD] = 1024; 3164 q->txq_size[TXQ_OFLD] = 1024;
2882 q->txq_size[TXQ_CTRL] = 256; 3165 q->txq_size[TXQ_CTRL] = 256;
diff --git a/drivers/net/cxgb3/t3_cpl.h b/drivers/net/cxgb3/t3_cpl.h
index b7a1a310dfd4..a666c5d51cc0 100644
--- a/drivers/net/cxgb3/t3_cpl.h
+++ b/drivers/net/cxgb3/t3_cpl.h
@@ -174,6 +174,13 @@ enum { /* TCP congestion control algorithms */
174 CONG_ALG_HIGHSPEED 174 CONG_ALG_HIGHSPEED
175}; 175};
176 176
177enum { /* RSS hash type */
178 RSS_HASH_NONE = 0,
179 RSS_HASH_2_TUPLE = 1,
180 RSS_HASH_4_TUPLE = 2,
181 RSS_HASH_TCPV6 = 3
182};
183
177union opcode_tid { 184union opcode_tid {
178 __be32 opcode_tid; 185 __be32 opcode_tid;
179 __u8 opcode; 186 __u8 opcode;
@@ -184,6 +191,10 @@ union opcode_tid {
184#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF) 191#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
185#define G_TID(x) ((x) & 0xFFFFFF) 192#define G_TID(x) ((x) & 0xFFFFFF)
186 193
194#define S_HASHTYPE 22
195#define M_HASHTYPE 0x3
196#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE)
197
187/* tid is assumed to be 24-bits */ 198/* tid is assumed to be 24-bits */
188#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid)) 199#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
189 200
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 6b1e77cc069e..3e3506411ac0 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -773,8 +773,6 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id)
773 return IRQ_HANDLED; 773 return IRQ_HANDLED;
774} 774}
775 775
776struct net_device *last_dev = 0;
777
778static int lance_open(struct net_device *dev) 776static int lance_open(struct net_device *dev)
779{ 777{
780 volatile u16 *ib = (volatile u16 *)dev->mem_start; 778 volatile u16 *ib = (volatile u16 *)dev->mem_start;
@@ -782,8 +780,6 @@ static int lance_open(struct net_device *dev)
782 volatile struct lance_regs *ll = lp->ll; 780 volatile struct lance_regs *ll = lp->ll;
783 int status = 0; 781 int status = 0;
784 782
785 last_dev = dev;
786
787 /* Stop the Lance */ 783 /* Stop the Lance */
788 writereg(&ll->rap, LE_CSR0); 784 writereg(&ll->rap, LE_CSR0);
789 writereg(&ll->rdp, LE_C0_STOP); 785 writereg(&ll->rdp, LE_C0_STOP);
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index e233d04a2132..8277e89e552d 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -499,7 +499,7 @@ rio_timer (unsigned long data)
499 entry = np->old_rx % RX_RING_SIZE; 499 entry = np->old_rx % RX_RING_SIZE;
500 /* Dropped packets don't need to re-allocate */ 500 /* Dropped packets don't need to re-allocate */
501 if (np->rx_skbuff[entry] == NULL) { 501 if (np->rx_skbuff[entry] == NULL) {
502 skb = dev_alloc_skb (np->rx_buf_sz); 502 skb = netdev_alloc_skb (dev, np->rx_buf_sz);
503 if (skb == NULL) { 503 if (skb == NULL) {
504 np->rx_ring[entry].fraginfo = 0; 504 np->rx_ring[entry].fraginfo = 0;
505 printk (KERN_INFO 505 printk (KERN_INFO
@@ -570,7 +570,7 @@ alloc_list (struct net_device *dev)
570 /* Allocate the rx buffers */ 570 /* Allocate the rx buffers */
571 for (i = 0; i < RX_RING_SIZE; i++) { 571 for (i = 0; i < RX_RING_SIZE; i++) {
572 /* Allocated fixed size of skbuff */ 572 /* Allocated fixed size of skbuff */
573 struct sk_buff *skb = dev_alloc_skb (np->rx_buf_sz); 573 struct sk_buff *skb = netdev_alloc_skb (dev, np->rx_buf_sz);
574 np->rx_skbuff[i] = skb; 574 np->rx_skbuff[i] = skb;
575 if (skb == NULL) { 575 if (skb == NULL) {
576 printk (KERN_ERR 576 printk (KERN_ERR
@@ -867,7 +867,7 @@ receive_packet (struct net_device *dev)
867 PCI_DMA_FROMDEVICE); 867 PCI_DMA_FROMDEVICE);
868 skb_put (skb = np->rx_skbuff[entry], pkt_len); 868 skb_put (skb = np->rx_skbuff[entry], pkt_len);
869 np->rx_skbuff[entry] = NULL; 869 np->rx_skbuff[entry] = NULL;
870 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { 870 } else if ((skb = netdev_alloc_skb(dev, pkt_len + 2))) {
871 pci_dma_sync_single_for_cpu(np->pdev, 871 pci_dma_sync_single_for_cpu(np->pdev,
872 desc_to_dma(desc), 872 desc_to_dma(desc),
873 np->rx_buf_sz, 873 np->rx_buf_sz,
@@ -904,7 +904,7 @@ receive_packet (struct net_device *dev)
904 struct sk_buff *skb; 904 struct sk_buff *skb;
905 /* Dropped packets don't need to re-allocate */ 905 /* Dropped packets don't need to re-allocate */
906 if (np->rx_skbuff[entry] == NULL) { 906 if (np->rx_skbuff[entry] == NULL) {
907 skb = dev_alloc_skb (np->rx_buf_sz); 907 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
908 if (skb == NULL) { 908 if (skb == NULL) {
909 np->rx_ring[entry].fraginfo = 0; 909 np->rx_ring[entry].fraginfo = 0;
910 printk (KERN_INFO 910 printk (KERN_INFO
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 864295e081b6..08a7365a7d10 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -718,7 +718,7 @@ dm9000_probe(struct platform_device *pdev)
718 718
719 if (!is_valid_ether_addr(ndev->dev_addr)) { 719 if (!is_valid_ether_addr(ndev->dev_addr)) {
720 /* try reading from mac */ 720 /* try reading from mac */
721 721
722 mac_src = "chip"; 722 mac_src = "chip";
723 for (i = 0; i < 6; i++) 723 for (i = 0; i < 6; i++)
724 ndev->dev_addr[i] = ior(db, i+DM9000_PAR); 724 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
@@ -768,7 +768,7 @@ dm9000_open(struct net_device *dev)
768 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 768 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
769 irqflags = DEFAULT_TRIGGER; 769 irqflags = DEFAULT_TRIGGER;
770 } 770 }
771 771
772 irqflags |= IRQF_SHARED; 772 irqflags |= IRQF_SHARED;
773 773
774 if (request_irq(dev->irq, &dm9000_interrupt, irqflags, dev->name, dev)) 774 if (request_irq(dev->irq, &dm9000_interrupt, irqflags, dev->name, dev))
@@ -1115,7 +1115,7 @@ static int dm9000_wait_eeprom(board_info_t *db)
1115 /* The DM9000 data sheets say we should be able to 1115 /* The DM9000 data sheets say we should be able to
1116 * poll the ERRE bit in EPCR to wait for the EEPROM 1116 * poll the ERRE bit in EPCR to wait for the EEPROM
1117 * operation. From testing several chips, this bit 1117 * operation. From testing several chips, this bit
1118 * does not seem to work. 1118 * does not seem to work.
1119 * 1119 *
1120 * We attempt to use the bit, but fall back to the 1120 * We attempt to use the bit, but fall back to the
1121 * timeout (which is why we do not return an error 1121 * timeout (which is why we do not return an error
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index cab1835173cd..ccb8ca2cbb2b 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4343,6 +4343,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4343 netdev->features |= NETIF_F_TSO; 4343 netdev->features |= NETIF_F_TSO;
4344 netdev->features |= NETIF_F_TSO6; 4344 netdev->features |= NETIF_F_TSO6;
4345 4345
4346 netdev->vlan_features |= NETIF_F_TSO;
4347 netdev->vlan_features |= NETIF_F_TSO6;
4348 netdev->vlan_features |= NETIF_F_HW_CSUM;
4349 netdev->vlan_features |= NETIF_F_SG;
4350
4346 if (pci_using_dac) 4351 if (pci_using_dac)
4347 netdev->features |= NETIF_F_HIGHDMA; 4352 netdev->features |= NETIF_F_HIGHDMA;
4348 4353
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 2cb244763292..c980ce9719af 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -426,6 +426,7 @@ union ring_type {
426#define NV_PCI_REGSZ_VER1 0x270 426#define NV_PCI_REGSZ_VER1 0x270
427#define NV_PCI_REGSZ_VER2 0x2d4 427#define NV_PCI_REGSZ_VER2 0x2d4
428#define NV_PCI_REGSZ_VER3 0x604 428#define NV_PCI_REGSZ_VER3 0x604
429#define NV_PCI_REGSZ_MAX 0x604
429 430
430/* various timeout delays: all in usec */ 431/* various timeout delays: all in usec */
431#define NV_TXRX_RESET_DELAY 4 432#define NV_TXRX_RESET_DELAY 4
@@ -784,6 +785,9 @@ struct fe_priv {
784 785
785 /* flow control */ 786 /* flow control */
786 u32 pause_flags; 787 u32 pause_flags;
788
789 /* power saved state */
790 u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
787}; 791};
788 792
789/* 793/*
@@ -5805,50 +5809,66 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5805{ 5809{
5806 struct net_device *dev = pci_get_drvdata(pdev); 5810 struct net_device *dev = pci_get_drvdata(pdev);
5807 struct fe_priv *np = netdev_priv(dev); 5811 struct fe_priv *np = netdev_priv(dev);
5812 u8 __iomem *base = get_hwbase(dev);
5813 int i;
5808 5814
5809 if (!netif_running(dev)) 5815 if (netif_running(dev)) {
5810 goto out; 5816 // Gross.
5811 5817 nv_close(dev);
5818 }
5812 netif_device_detach(dev); 5819 netif_device_detach(dev);
5813 5820
5814 // Gross. 5821 /* save non-pci configuration space */
5815 nv_close(dev); 5822 for (i = 0;i <= np->register_size/sizeof(u32); i++)
5823 np->saved_config_space[i] = readl(base + i*sizeof(u32));
5816 5824
5817 pci_save_state(pdev); 5825 pci_save_state(pdev);
5818 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); 5826 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
5827 pci_disable_device(pdev);
5819 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5828 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5820out:
5821 return 0; 5829 return 0;
5822} 5830}
5823 5831
5824static int nv_resume(struct pci_dev *pdev) 5832static int nv_resume(struct pci_dev *pdev)
5825{ 5833{
5826 struct net_device *dev = pci_get_drvdata(pdev); 5834 struct net_device *dev = pci_get_drvdata(pdev);
5835 struct fe_priv *np = netdev_priv(dev);
5827 u8 __iomem *base = get_hwbase(dev); 5836 u8 __iomem *base = get_hwbase(dev);
5828 int rc = 0; 5837 int i, rc = 0;
5829 u32 txreg;
5830
5831 if (!netif_running(dev))
5832 goto out;
5833
5834 netif_device_attach(dev);
5835 5838
5836 pci_set_power_state(pdev, PCI_D0); 5839 pci_set_power_state(pdev, PCI_D0);
5837 pci_restore_state(pdev); 5840 pci_restore_state(pdev);
5841 /* ack any pending wake events, disable PME */
5838 pci_enable_wake(pdev, PCI_D0, 0); 5842 pci_enable_wake(pdev, PCI_D0, 0);
5839 5843
5840 /* restore mac address reverse flag */ 5844 /* restore non-pci configuration space */
5841 txreg = readl(base + NvRegTransmitPoll); 5845 for (i = 0;i <= np->register_size/sizeof(u32); i++)
5842 txreg |= NVREG_TRANSMITPOLL_MAC_ADDR_REV; 5846 writel(np->saved_config_space[i], base+i*sizeof(u32));
5843 writel(txreg, base + NvRegTransmitPoll);
5844 5847
5845 rc = nv_open(dev); 5848 netif_device_attach(dev);
5846 nv_set_multicast(dev); 5849 if (netif_running(dev)) {
5847out: 5850 rc = nv_open(dev);
5851 nv_set_multicast(dev);
5852 }
5848 return rc; 5853 return rc;
5849} 5854}
5855
5856static void nv_shutdown(struct pci_dev *pdev)
5857{
5858 struct net_device *dev = pci_get_drvdata(pdev);
5859 struct fe_priv *np = netdev_priv(dev);
5860
5861 if (netif_running(dev))
5862 nv_close(dev);
5863
5864 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
5865 pci_enable_wake(pdev, PCI_D3cold, np->wolenabled);
5866 pci_disable_device(pdev);
5867 pci_set_power_state(pdev, PCI_D3hot);
5868}
5850#else 5869#else
5851#define nv_suspend NULL 5870#define nv_suspend NULL
5871#define nv_shutdown NULL
5852#define nv_resume NULL 5872#define nv_resume NULL
5853#endif /* CONFIG_PM */ 5873#endif /* CONFIG_PM */
5854 5874
@@ -6019,6 +6039,7 @@ static struct pci_driver driver = {
6019 .remove = __devexit_p(nv_remove), 6039 .remove = __devexit_p(nv_remove),
6020 .suspend = nv_suspend, 6040 .suspend = nv_suspend,
6021 .resume = nv_resume, 6041 .resume = nv_resume,
6042 .shutdown = nv_shutdown,
6022}; 6043};
6023 6044
6024static int __init init_nic(void) 6045static int __init init_nic(void)
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index a5baaf59ff66..fb7c47790bd6 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -43,6 +43,7 @@
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44 44
45#ifdef CONFIG_PPC_CPM_NEW_BINDING 45#ifdef CONFIG_PPC_CPM_NEW_BINDING
46#include <linux/of_gpio.h>
46#include <asm/of_platform.h> 47#include <asm/of_platform.h>
47#endif 48#endif
48 49
@@ -1172,8 +1173,7 @@ static int __devinit find_phy(struct device_node *np,
1172 struct fs_platform_info *fpi) 1173 struct fs_platform_info *fpi)
1173{ 1174{
1174 struct device_node *phynode, *mdionode; 1175 struct device_node *phynode, *mdionode;
1175 struct resource res; 1176 int ret = 0, len, bus_id;
1176 int ret = 0, len;
1177 const u32 *data; 1177 const u32 *data;
1178 1178
1179 data = of_get_property(np, "fixed-link", NULL); 1179 data = of_get_property(np, "fixed-link", NULL);
@@ -1190,19 +1190,28 @@ static int __devinit find_phy(struct device_node *np,
1190 if (!phynode) 1190 if (!phynode)
1191 return -EINVAL; 1191 return -EINVAL;
1192 1192
1193 mdionode = of_get_parent(phynode); 1193 data = of_get_property(phynode, "reg", &len);
1194 if (!mdionode) 1194 if (!data || len != 4) {
1195 ret = -EINVAL;
1195 goto out_put_phy; 1196 goto out_put_phy;
1197 }
1196 1198
1197 ret = of_address_to_resource(mdionode, 0, &res); 1199 mdionode = of_get_parent(phynode);
1198 if (ret) 1200 if (!mdionode) {
1199 goto out_put_mdio; 1201 ret = -EINVAL;
1202 goto out_put_phy;
1203 }
1200 1204
1201 data = of_get_property(phynode, "reg", &len); 1205 bus_id = of_get_gpio(mdionode, 0);
1202 if (!data || len != 4) 1206 if (bus_id < 0) {
1203 goto out_put_mdio; 1207 struct resource res;
1208 ret = of_address_to_resource(mdionode, 0, &res);
1209 if (ret)
1210 goto out_put_mdio;
1211 bus_id = res.start;
1212 }
1204 1213
1205 snprintf(fpi->bus_id, 16, "%x:%02x", res.start, *data); 1214 snprintf(fpi->bus_id, 16, "%x:%02x", bus_id, *data);
1206 1215
1207out_put_mdio: 1216out_put_mdio:
1208 of_node_put(mdionode); 1217 of_node_put(mdionode);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 25bdd0832df5..393a0f175302 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -928,7 +928,7 @@ rx_irq_fail:
928tx_irq_fail: 928tx_irq_fail:
929 free_irq(priv->interruptError, dev); 929 free_irq(priv->interruptError, dev);
930err_irq_fail: 930err_irq_fail:
931err_rxalloc_fail: 931err_rxalloc_fail:
932rx_skb_fail: 932rx_skb_fail:
933 free_skb_resources(priv); 933 free_skb_resources(priv);
934tx_skb_fail: 934tx_skb_fail:
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index e5c2380f50ca..3199526bcecb 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1140,11 +1140,11 @@ static void hamachi_tx_timeout(struct net_device *dev)
1140 } 1140 }
1141 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1141 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1142 for (i = 0; i < RX_RING_SIZE; i++) { 1142 for (i = 0; i < RX_RING_SIZE; i++) {
1143 struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz); 1143 struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz);
1144 hmp->rx_skbuff[i] = skb; 1144 hmp->rx_skbuff[i] = skb;
1145 if (skb == NULL) 1145 if (skb == NULL)
1146 break; 1146 break;
1147 skb->dev = dev; /* Mark as being used by this device. */ 1147
1148 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1148 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1149 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 1149 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1150 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1150 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
@@ -1178,14 +1178,6 @@ static void hamachi_init_ring(struct net_device *dev)
1178 hmp->cur_rx = hmp->cur_tx = 0; 1178 hmp->cur_rx = hmp->cur_tx = 0;
1179 hmp->dirty_rx = hmp->dirty_tx = 0; 1179 hmp->dirty_rx = hmp->dirty_tx = 0;
1180 1180
1181#if 0
1182 /* This is wrong. I'm not sure what the original plan was, but this
1183 * is wrong. An MTU of 1 gets you a buffer of 1536, while an MTU
1184 * of 1501 gets a buffer of 1533? -KDU
1185 */
1186 hmp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1187#endif
1188 /* My attempt at a reasonable correction */
1189 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the 1181 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1190 * card needs room to do 8 byte alignment, +2 so we can reserve 1182 * card needs room to do 8 byte alignment, +2 so we can reserve
1191 * the first 2 bytes, and +16 gets room for the status word from the 1183 * the first 2 bytes, and +16 gets room for the status word from the
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 9d5721287d6f..06ad9f302b5a 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -99,9 +99,6 @@ struct sixpack {
99 unsigned int rx_count; 99 unsigned int rx_count;
100 unsigned int rx_count_cooked; 100 unsigned int rx_count_cooked;
101 101
102 /* 6pack interface statistics. */
103 struct net_device_stats stats;
104
105 int mtu; /* Our mtu (to spot changes!) */ 102 int mtu; /* Our mtu (to spot changes!) */
106 int buffsize; /* Max buffers sizes */ 103 int buffsize; /* Max buffers sizes */
107 104
@@ -237,7 +234,7 @@ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len)
237 return; 234 return;
238 235
239out_drop: 236out_drop:
240 sp->stats.tx_dropped++; 237 sp->dev->stats.tx_dropped++;
241 netif_start_queue(sp->dev); 238 netif_start_queue(sp->dev);
242 if (net_ratelimit()) 239 if (net_ratelimit())
243 printk(KERN_DEBUG "%s: %s - dropped.\n", sp->dev->name, msg); 240 printk(KERN_DEBUG "%s: %s - dropped.\n", sp->dev->name, msg);
@@ -252,7 +249,7 @@ static int sp_xmit(struct sk_buff *skb, struct net_device *dev)
252 spin_lock_bh(&sp->lock); 249 spin_lock_bh(&sp->lock);
253 /* We were not busy, so we are now... :-) */ 250 /* We were not busy, so we are now... :-) */
254 netif_stop_queue(dev); 251 netif_stop_queue(dev);
255 sp->stats.tx_bytes += skb->len; 252 dev->stats.tx_bytes += skb->len;
256 sp_encaps(sp, skb->data, skb->len); 253 sp_encaps(sp, skb->data, skb->len);
257 spin_unlock_bh(&sp->lock); 254 spin_unlock_bh(&sp->lock);
258 255
@@ -298,12 +295,6 @@ static int sp_header(struct sk_buff *skb, struct net_device *dev,
298 return 0; 295 return 0;
299} 296}
300 297
301static struct net_device_stats *sp_get_stats(struct net_device *dev)
302{
303 struct sixpack *sp = netdev_priv(dev);
304 return &sp->stats;
305}
306
307static int sp_set_mac_address(struct net_device *dev, void *addr) 298static int sp_set_mac_address(struct net_device *dev, void *addr)
308{ 299{
309 struct sockaddr_ax25 *sa = addr; 300 struct sockaddr_ax25 *sa = addr;
@@ -338,7 +329,6 @@ static void sp_setup(struct net_device *dev)
338 dev->destructor = free_netdev; 329 dev->destructor = free_netdev;
339 dev->stop = sp_close; 330 dev->stop = sp_close;
340 331
341 dev->get_stats = sp_get_stats;
342 dev->set_mac_address = sp_set_mac_address; 332 dev->set_mac_address = sp_set_mac_address;
343 dev->hard_header_len = AX25_MAX_HEADER_LEN; 333 dev->hard_header_len = AX25_MAX_HEADER_LEN;
344 dev->header_ops = &sp_header_ops; 334 dev->header_ops = &sp_header_ops;
@@ -370,7 +360,7 @@ static void sp_bump(struct sixpack *sp, char cmd)
370 360
371 count = sp->rcount + 1; 361 count = sp->rcount + 1;
372 362
373 sp->stats.rx_bytes += count; 363 sp->dev->stats.rx_bytes += count;
374 364
375 if ((skb = dev_alloc_skb(count)) == NULL) 365 if ((skb = dev_alloc_skb(count)) == NULL)
376 goto out_mem; 366 goto out_mem;
@@ -382,12 +372,12 @@ static void sp_bump(struct sixpack *sp, char cmd)
382 skb->protocol = ax25_type_trans(skb, sp->dev); 372 skb->protocol = ax25_type_trans(skb, sp->dev);
383 netif_rx(skb); 373 netif_rx(skb);
384 sp->dev->last_rx = jiffies; 374 sp->dev->last_rx = jiffies;
385 sp->stats.rx_packets++; 375 sp->dev->stats.rx_packets++;
386 376
387 return; 377 return;
388 378
389out_mem: 379out_mem:
390 sp->stats.rx_dropped++; 380 sp->dev->stats.rx_dropped++;
391} 381}
392 382
393 383
@@ -436,7 +426,7 @@ static void sixpack_write_wakeup(struct tty_struct *tty)
436 if (sp->xleft <= 0) { 426 if (sp->xleft <= 0) {
437 /* Now serial buffer is almost free & we can start 427 /* Now serial buffer is almost free & we can start
438 * transmission of another packet */ 428 * transmission of another packet */
439 sp->stats.tx_packets++; 429 sp->dev->stats.tx_packets++;
440 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 430 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
441 sp->tx_enable = 0; 431 sp->tx_enable = 0;
442 netif_wake_queue(sp->dev); 432 netif_wake_queue(sp->dev);
@@ -484,7 +474,7 @@ static void sixpack_receive_buf(struct tty_struct *tty,
484 count--; 474 count--;
485 if (fp && *fp++) { 475 if (fp && *fp++) {
486 if (!test_and_set_bit(SIXPF_ERROR, &sp->flags)) 476 if (!test_and_set_bit(SIXPF_ERROR, &sp->flags))
487 sp->stats.rx_errors++; 477 sp->dev->stats.rx_errors++;
488 continue; 478 continue;
489 } 479 }
490 } 480 }
diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c
index be6e5bc7c881..2e802634d366 100644
--- a/drivers/net/hplance.c
+++ b/drivers/net/hplance.c
@@ -220,12 +220,12 @@ static int hplance_close(struct net_device *dev)
220 return 0; 220 return 0;
221} 221}
222 222
223int __init hplance_init_module(void) 223static int __init hplance_init_module(void)
224{ 224{
225 return dio_register_driver(&hplance_driver); 225 return dio_register_driver(&hplance_driver);
226} 226}
227 227
228void __exit hplance_cleanup_module(void) 228static void __exit hplance_cleanup_module(void)
229{ 229{
230 dio_unregister_driver(&hplance_driver); 230 dio_unregister_driver(&hplance_driver);
231} 231}
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index ae398f04c7b4..7b6b780dc8a6 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -967,8 +967,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
967 NETIF_F_HW_VLAN_FILTER; 967 NETIF_F_HW_VLAN_FILTER;
968 968
969 netdev->features |= NETIF_F_TSO; 969 netdev->features |= NETIF_F_TSO;
970
971 netdev->features |= NETIF_F_TSO6; 970 netdev->features |= NETIF_F_TSO6;
971
972 netdev->vlan_features |= NETIF_F_TSO;
973 netdev->vlan_features |= NETIF_F_TSO6;
974 netdev->vlan_features |= NETIF_F_HW_CSUM;
975 netdev->vlan_features |= NETIF_F_SG;
976
972 if (pci_using_dac) 977 if (pci_using_dac)
973 netdev->features |= NETIF_F_HIGHDMA; 978 netdev->features |= NETIF_F_HIGHDMA;
974 979
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 1257e1a7e819..34ad189fff67 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -49,10 +49,6 @@
49/* Look at toshoboe.h (currently in include/net/irda) for details of */ 49/* Look at toshoboe.h (currently in include/net/irda) for details of */
50/* Where to get documentation on the chip */ 50/* Where to get documentation on the chip */
51 51
52
53static char *rcsid =
54 "$Id: donauboe.c V2.18 ven jan 10 03:14:16 2003$";
55
56/* See below for a description of the logic in this driver */ 52/* See below for a description of the logic in this driver */
57 53
58/* User servicable parts */ 54/* User servicable parts */
@@ -1677,7 +1673,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1677 1673
1678 pci_set_drvdata(pci_dev,self); 1674 pci_set_drvdata(pci_dev,self);
1679 1675
1680 printk (KERN_INFO DRIVER_NAME ": Using multiple tasks, version %s\n", rcsid); 1676 printk (KERN_INFO DRIVER_NAME ": Using multiple tasks\n");
1681 1677
1682 return 0; 1678 return 0;
1683 1679
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index cfe0194fef71..78dc8e7837f0 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -1,5 +1,4 @@
1/********************************************************************* 1/*********************************************************************
2 * $Id: smsc-ircc2.c,v 1.19.2.5 2002/10/27 11:34:26 dip Exp $
3 * 2 *
4 * Description: Driver for the SMC Infrared Communications Controller 3 * Description: Driver for the SMC Infrared Communications Controller
5 * Status: Experimental. 4 * Status: Experimental.
diff --git a/drivers/net/irda/smsc-ircc2.h b/drivers/net/irda/smsc-ircc2.h
index 0c36286d87f7..317b7fd69bb3 100644
--- a/drivers/net/irda/smsc-ircc2.h
+++ b/drivers/net/irda/smsc-ircc2.h
@@ -1,5 +1,4 @@
1/********************************************************************* 1/*********************************************************************
2 * $Id: smsc-ircc2.h,v 1.12.2.1 2002/10/27 10:52:37 dip Exp $
3 * 2 *
4 * Description: Definitions for the SMC IrCC chipset 3 * Description: Definitions for the SMC IrCC chipset
5 * Status: Experimental. 4 * Status: Experimental.
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 7b859220c255..0d37c9025be4 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3518,8 +3518,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3518 NETIF_F_HW_VLAN_FILTER; 3518 NETIF_F_HW_VLAN_FILTER;
3519 3519
3520 netdev->features |= NETIF_F_TSO; 3520 netdev->features |= NETIF_F_TSO;
3521
3522 netdev->features |= NETIF_F_TSO6; 3521 netdev->features |= NETIF_F_TSO6;
3522
3523 netdev->vlan_features |= NETIF_F_TSO;
3524 netdev->vlan_features |= NETIF_F_TSO6;
3525 netdev->vlan_features |= NETIF_F_HW_CSUM;
3526 netdev->vlan_features |= NETIF_F_SG;
3527
3523 if (pci_using_dac) 3528 if (pci_using_dac)
3524 netdev->features |= NETIF_F_HIGHDMA; 3529 netdev->features |= NETIF_F_HIGHDMA;
3525 3530
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 484cb2ba717f..7111c65f0b30 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -108,14 +108,14 @@ static int ixpdev_rx(struct net_device *dev, int processed, int budget)
108 if (unlikely(!netif_running(nds[desc->channel]))) 108 if (unlikely(!netif_running(nds[desc->channel])))
109 goto err; 109 goto err;
110 110
111 skb = dev_alloc_skb(desc->pkt_length + 2); 111 skb = netdev_alloc_skb(dev, desc->pkt_length + 2);
112 if (likely(skb != NULL)) { 112 if (likely(skb != NULL)) {
113 skb_reserve(skb, 2); 113 skb_reserve(skb, 2);
114 skb_copy_to_linear_data(skb, buf, desc->pkt_length); 114 skb_copy_to_linear_data(skb, buf, desc->pkt_length);
115 skb_put(skb, desc->pkt_length); 115 skb_put(skb, desc->pkt_length);
116 skb->protocol = eth_type_trans(skb, nds[desc->channel]); 116 skb->protocol = eth_type_trans(skb, nds[desc->channel]);
117 117
118 skb->dev->last_rx = jiffies; 118 dev->last_rx = jiffies;
119 119
120 netif_receive_skb(skb); 120 netif_receive_skb(skb);
121 } 121 }
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 0c5447dac03b..00d59ab2f8ac 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -150,19 +150,19 @@ static void __NS8390_init(struct net_device *dev, int startp);
150 * card means that approach caused horrible problems like losing serial data 150 * card means that approach caused horrible problems like losing serial data
151 * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA 151 * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA
152 * chips with FPGA front ends. 152 * chips with FPGA front ends.
153 * 153 *
154 * Ok the logic behind the 8390 is very simple: 154 * Ok the logic behind the 8390 is very simple:
155 * 155 *
156 * Things to know 156 * Things to know
157 * - IRQ delivery is asynchronous to the PCI bus 157 * - IRQ delivery is asynchronous to the PCI bus
158 * - Blocking the local CPU IRQ via spin locks was too slow 158 * - Blocking the local CPU IRQ via spin locks was too slow
159 * - The chip has register windows needing locking work 159 * - The chip has register windows needing locking work
160 * 160 *
161 * So the path was once (I say once as people appear to have changed it 161 * So the path was once (I say once as people appear to have changed it
162 * in the mean time and it now looks rather bogus if the changes to use 162 * in the mean time and it now looks rather bogus if the changes to use
163 * disable_irq_nosync_irqsave are disabling the local IRQ) 163 * disable_irq_nosync_irqsave are disabling the local IRQ)
164 * 164 *
165 * 165 *
166 * Take the page lock 166 * Take the page lock
167 * Mask the IRQ on chip 167 * Mask the IRQ on chip
168 * Disable the IRQ (but not mask locally- someone seems to have 168 * Disable the IRQ (but not mask locally- someone seems to have
@@ -170,22 +170,22 @@ static void __NS8390_init(struct net_device *dev, int startp);
170 * [This must be _nosync as the page lock may otherwise 170 * [This must be _nosync as the page lock may otherwise
171 * deadlock us] 171 * deadlock us]
172 * Drop the page lock and turn IRQs back on 172 * Drop the page lock and turn IRQs back on
173 * 173 *
174 * At this point an existing IRQ may still be running but we can't 174 * At this point an existing IRQ may still be running but we can't
175 * get a new one 175 * get a new one
176 * 176 *
177 * Take the lock (so we know the IRQ has terminated) but don't mask 177 * Take the lock (so we know the IRQ has terminated) but don't mask
178 * the IRQs on the processor 178 * the IRQs on the processor
179 * Set irqlock [for debug] 179 * Set irqlock [for debug]
180 * 180 *
181 * Transmit (slow as ****) 181 * Transmit (slow as ****)
182 * 182 *
183 * re-enable the IRQ 183 * re-enable the IRQ
184 * 184 *
185 * 185 *
186 * We have to use disable_irq because otherwise you will get delayed 186 * We have to use disable_irq because otherwise you will get delayed
187 * interrupts on the APIC bus deadlocking the transmit path. 187 * interrupts on the APIC bus deadlocking the transmit path.
188 * 188 *
189 * Quite hairy but the chip simply wasn't designed for SMP and you can't 189 * Quite hairy but the chip simply wasn't designed for SMP and you can't
190 * even ACK an interrupt without risking corrupting other parallel 190 * even ACK an interrupt without risking corrupting other parallel
191 * activities on the chip." [lkml, 25 Jul 2007] 191 * activities on the chip." [lkml, 25 Jul 2007]
@@ -265,7 +265,7 @@ static void ei_tx_timeout(struct net_device *dev)
265 int txsr, isr, tickssofar = jiffies - dev->trans_start; 265 int txsr, isr, tickssofar = jiffies - dev->trans_start;
266 unsigned long flags; 266 unsigned long flags;
267 267
268 ei_local->stat.tx_errors++; 268 dev->stats.tx_errors++;
269 269
270 spin_lock_irqsave(&ei_local->page_lock, flags); 270 spin_lock_irqsave(&ei_local->page_lock, flags);
271 txsr = ei_inb(e8390_base+EN0_TSR); 271 txsr = ei_inb(e8390_base+EN0_TSR);
@@ -276,7 +276,7 @@ static void ei_tx_timeout(struct net_device *dev)
276 dev->name, (txsr & ENTSR_ABT) ? "excess collisions." : 276 dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
277 (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar); 277 (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
278 278
279 if (!isr && !ei_local->stat.tx_packets) 279 if (!isr && !dev->stats.tx_packets)
280 { 280 {
281 /* The 8390 probably hasn't gotten on the cable yet. */ 281 /* The 8390 probably hasn't gotten on the cable yet. */
282 ei_local->interface_num ^= 1; /* Try a different xcvr. */ 282 ei_local->interface_num ^= 1; /* Try a different xcvr. */
@@ -374,7 +374,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
374 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); 374 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
375 spin_unlock(&ei_local->page_lock); 375 spin_unlock(&ei_local->page_lock);
376 enable_irq_lockdep_irqrestore(dev->irq, &flags); 376 enable_irq_lockdep_irqrestore(dev->irq, &flags);
377 ei_local->stat.tx_errors++; 377 dev->stats.tx_errors++;
378 return 1; 378 return 1;
379 } 379 }
380 380
@@ -417,7 +417,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
417 enable_irq_lockdep_irqrestore(dev->irq, &flags); 417 enable_irq_lockdep_irqrestore(dev->irq, &flags);
418 418
419 dev_kfree_skb (skb); 419 dev_kfree_skb (skb);
420 ei_local->stat.tx_bytes += send_length; 420 dev->stats.tx_bytes += send_length;
421 421
422 return 0; 422 return 0;
423} 423}
@@ -493,9 +493,9 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
493 493
494 if (interrupts & ENISR_COUNTERS) 494 if (interrupts & ENISR_COUNTERS)
495 { 495 {
496 ei_local->stat.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0); 496 dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
497 ei_local->stat.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1); 497 dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1);
498 ei_local->stat.rx_missed_errors+= ei_inb_p(e8390_base + EN0_COUNTER2); 498 dev->stats.rx_missed_errors+= ei_inb_p(e8390_base + EN0_COUNTER2);
499 ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */ 499 ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
500 } 500 }
501 501
@@ -553,7 +553,8 @@ static void __ei_poll(struct net_device *dev)
553static void ei_tx_err(struct net_device *dev) 553static void ei_tx_err(struct net_device *dev)
554{ 554{
555 unsigned long e8390_base = dev->base_addr; 555 unsigned long e8390_base = dev->base_addr;
556 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 556 /* ei_local is used on some platforms via the EI_SHIFT macro */
557 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
557 unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR); 558 unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
558 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); 559 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
559 560
@@ -578,10 +579,10 @@ static void ei_tx_err(struct net_device *dev)
578 ei_tx_intr(dev); 579 ei_tx_intr(dev);
579 else 580 else
580 { 581 {
581 ei_local->stat.tx_errors++; 582 dev->stats.tx_errors++;
582 if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++; 583 if (txsr & ENTSR_CRS) dev->stats.tx_carrier_errors++;
583 if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++; 584 if (txsr & ENTSR_CDH) dev->stats.tx_heartbeat_errors++;
584 if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++; 585 if (txsr & ENTSR_OWC) dev->stats.tx_window_errors++;
585 } 586 }
586} 587}
587 588
@@ -645,25 +646,25 @@ static void ei_tx_intr(struct net_device *dev)
645 646
646 /* Minimize Tx latency: update the statistics after we restart TXing. */ 647 /* Minimize Tx latency: update the statistics after we restart TXing. */
647 if (status & ENTSR_COL) 648 if (status & ENTSR_COL)
648 ei_local->stat.collisions++; 649 dev->stats.collisions++;
649 if (status & ENTSR_PTX) 650 if (status & ENTSR_PTX)
650 ei_local->stat.tx_packets++; 651 dev->stats.tx_packets++;
651 else 652 else
652 { 653 {
653 ei_local->stat.tx_errors++; 654 dev->stats.tx_errors++;
654 if (status & ENTSR_ABT) 655 if (status & ENTSR_ABT)
655 { 656 {
656 ei_local->stat.tx_aborted_errors++; 657 dev->stats.tx_aborted_errors++;
657 ei_local->stat.collisions += 16; 658 dev->stats.collisions += 16;
658 } 659 }
659 if (status & ENTSR_CRS) 660 if (status & ENTSR_CRS)
660 ei_local->stat.tx_carrier_errors++; 661 dev->stats.tx_carrier_errors++;
661 if (status & ENTSR_FU) 662 if (status & ENTSR_FU)
662 ei_local->stat.tx_fifo_errors++; 663 dev->stats.tx_fifo_errors++;
663 if (status & ENTSR_CDH) 664 if (status & ENTSR_CDH)
664 ei_local->stat.tx_heartbeat_errors++; 665 dev->stats.tx_heartbeat_errors++;
665 if (status & ENTSR_OWC) 666 if (status & ENTSR_OWC)
666 ei_local->stat.tx_window_errors++; 667 dev->stats.tx_window_errors++;
667 } 668 }
668 netif_wake_queue(dev); 669 netif_wake_queue(dev);
669} 670}
@@ -730,7 +731,7 @@ static void ei_receive(struct net_device *dev)
730 && rx_frame.next != next_frame + 1 - num_rx_pages) { 731 && rx_frame.next != next_frame + 1 - num_rx_pages) {
731 ei_local->current_page = rxing_page; 732 ei_local->current_page = rxing_page;
732 ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY); 733 ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
733 ei_local->stat.rx_errors++; 734 dev->stats.rx_errors++;
734 continue; 735 continue;
735 } 736 }
736 737
@@ -740,8 +741,8 @@ static void ei_receive(struct net_device *dev)
740 printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n", 741 printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
741 dev->name, rx_frame.count, rx_frame.status, 742 dev->name, rx_frame.count, rx_frame.status,
742 rx_frame.next); 743 rx_frame.next);
743 ei_local->stat.rx_errors++; 744 dev->stats.rx_errors++;
744 ei_local->stat.rx_length_errors++; 745 dev->stats.rx_length_errors++;
745 } 746 }
746 else if ((pkt_stat & 0x0F) == ENRSR_RXOK) 747 else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
747 { 748 {
@@ -753,7 +754,7 @@ static void ei_receive(struct net_device *dev)
753 if (ei_debug > 1) 754 if (ei_debug > 1)
754 printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n", 755 printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
755 dev->name, pkt_len); 756 dev->name, pkt_len);
756 ei_local->stat.rx_dropped++; 757 dev->stats.rx_dropped++;
757 break; 758 break;
758 } 759 }
759 else 760 else
@@ -764,10 +765,10 @@ static void ei_receive(struct net_device *dev)
764 skb->protocol=eth_type_trans(skb,dev); 765 skb->protocol=eth_type_trans(skb,dev);
765 netif_rx(skb); 766 netif_rx(skb);
766 dev->last_rx = jiffies; 767 dev->last_rx = jiffies;
767 ei_local->stat.rx_packets++; 768 dev->stats.rx_packets++;
768 ei_local->stat.rx_bytes += pkt_len; 769 dev->stats.rx_bytes += pkt_len;
769 if (pkt_stat & ENRSR_PHY) 770 if (pkt_stat & ENRSR_PHY)
770 ei_local->stat.multicast++; 771 dev->stats.multicast++;
771 } 772 }
772 } 773 }
773 else 774 else
@@ -776,10 +777,10 @@ static void ei_receive(struct net_device *dev)
776 printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n", 777 printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
777 dev->name, rx_frame.status, rx_frame.next, 778 dev->name, rx_frame.status, rx_frame.next,
778 rx_frame.count); 779 rx_frame.count);
779 ei_local->stat.rx_errors++; 780 dev->stats.rx_errors++;
780 /* NB: The NIC counts CRC, frame and missed errors. */ 781 /* NB: The NIC counts CRC, frame and missed errors. */
781 if (pkt_stat & ENRSR_FO) 782 if (pkt_stat & ENRSR_FO)
782 ei_local->stat.rx_fifo_errors++; 783 dev->stats.rx_fifo_errors++;
783 } 784 }
784 next_frame = rx_frame.next; 785 next_frame = rx_frame.next;
785 786
@@ -816,7 +817,8 @@ static void ei_rx_overrun(struct net_device *dev)
816{ 817{
817 unsigned long e8390_base = dev->base_addr; 818 unsigned long e8390_base = dev->base_addr;
818 unsigned char was_txing, must_resend = 0; 819 unsigned char was_txing, must_resend = 0;
819 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 820 /* ei_local is used on some platforms via the EI_SHIFT macro */
821 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
820 822
821 /* 823 /*
822 * Record whether a Tx was in progress and then issue the 824 * Record whether a Tx was in progress and then issue the
@@ -827,7 +829,7 @@ static void ei_rx_overrun(struct net_device *dev)
827 829
828 if (ei_debug > 1) 830 if (ei_debug > 1)
829 printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name); 831 printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
830 ei_local->stat.rx_over_errors++; 832 dev->stats.rx_over_errors++;
831 833
832 /* 834 /*
833 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. 835 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
@@ -889,16 +891,16 @@ static struct net_device_stats *get_stats(struct net_device *dev)
889 891
890 /* If the card is stopped, just return the present stats. */ 892 /* If the card is stopped, just return the present stats. */
891 if (!netif_running(dev)) 893 if (!netif_running(dev))
892 return &ei_local->stat; 894 return &dev->stats;
893 895
894 spin_lock_irqsave(&ei_local->page_lock,flags); 896 spin_lock_irqsave(&ei_local->page_lock,flags);
895 /* Read the counter registers, assuming we are in page 0. */ 897 /* Read the counter registers, assuming we are in page 0. */
896 ei_local->stat.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0); 898 dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
897 ei_local->stat.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1); 899 dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1);
898 ei_local->stat.rx_missed_errors+= ei_inb_p(ioaddr + EN0_COUNTER2); 900 dev->stats.rx_missed_errors+= ei_inb_p(ioaddr + EN0_COUNTER2);
899 spin_unlock_irqrestore(&ei_local->page_lock, flags); 901 spin_unlock_irqrestore(&ei_local->page_lock, flags);
900 902
901 return &ei_local->stat; 903 return &dev->stats;
902} 904}
903 905
904/* 906/*
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index 9e700749bb31..98e3eb2697c9 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -117,8 +117,6 @@ enum mac8390_access {
117 ACCESS_16, 117 ACCESS_16,
118}; 118};
119 119
120extern enum mac8390_type mac8390_ident(struct nubus_dev * dev);
121extern int mac8390_memsize(unsigned long membase);
122extern int mac8390_memtest(struct net_device * dev); 120extern int mac8390_memtest(struct net_device * dev);
123static int mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev, 121static int mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev,
124 enum mac8390_type type); 122 enum mac8390_type type);
@@ -163,7 +161,7 @@ static void slow_sane_block_output(struct net_device *dev, int count,
163static void word_memcpy_tocard(void *tp, const void *fp, int count); 161static void word_memcpy_tocard(void *tp, const void *fp, int count);
164static void word_memcpy_fromcard(void *tp, const void *fp, int count); 162static void word_memcpy_fromcard(void *tp, const void *fp, int count);
165 163
166enum mac8390_type __init mac8390_ident(struct nubus_dev * dev) 164static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
167{ 165{
168 switch (dev->dr_sw) { 166 switch (dev->dr_sw) {
169 case NUBUS_DRSW_3COM: 167 case NUBUS_DRSW_3COM:
@@ -234,7 +232,7 @@ enum mac8390_type __init mac8390_ident(struct nubus_dev * dev)
234 return MAC8390_NONE; 232 return MAC8390_NONE;
235} 233}
236 234
237enum mac8390_access __init mac8390_testio(volatile unsigned long membase) 235static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
238{ 236{
239 unsigned long outdata = 0xA5A0B5B0; 237 unsigned long outdata = 0xA5A0B5B0;
240 unsigned long indata = 0x00000000; 238 unsigned long indata = 0x00000000;
@@ -252,7 +250,7 @@ enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
252 return ACCESS_UNKNOWN; 250 return ACCESS_UNKNOWN;
253} 251}
254 252
255int __init mac8390_memsize(unsigned long membase) 253static int __init mac8390_memsize(unsigned long membase)
256{ 254{
257 unsigned long flags; 255 unsigned long flags;
258 int i, j; 256 int i, j;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 92dccd43bdca..e34630252cef 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -80,8 +80,12 @@ static void __init macb_get_hwaddr(struct macb *bp)
80 addr[4] = top & 0xff; 80 addr[4] = top & 0xff;
81 addr[5] = (top >> 8) & 0xff; 81 addr[5] = (top >> 8) & 0xff;
82 82
83 if (is_valid_ether_addr(addr)) 83 if (is_valid_ether_addr(addr)) {
84 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 84 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
85 } else {
86 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
87 random_ether_addr(bp->dev->dev_addr);
88 }
85} 89}
86 90
87static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 91static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index b267161418ea..e64c2086d33c 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -83,9 +83,6 @@ static unsigned int sonic_debug = 1;
83 83
84static int sonic_version_printed; 84static int sonic_version_printed;
85 85
86extern int mac_onboard_sonic_probe(struct net_device* dev);
87extern int mac_nubus_sonic_probe(struct net_device* dev);
88
89/* For onboard SONIC */ 86/* For onboard SONIC */
90#define ONBOARD_SONIC_REGISTERS 0x50F0A000 87#define ONBOARD_SONIC_REGISTERS 0x50F0A000
91#define ONBOARD_SONIC_PROM_BASE 0x50f08000 88#define ONBOARD_SONIC_PROM_BASE 0x50f08000
@@ -170,7 +167,7 @@ static int macsonic_close(struct net_device* dev)
170 return err; 167 return err;
171} 168}
172 169
173int __init macsonic_init(struct net_device* dev) 170static int __init macsonic_init(struct net_device *dev)
174{ 171{
175 struct sonic_local* lp = netdev_priv(dev); 172 struct sonic_local* lp = netdev_priv(dev);
176 173
@@ -218,7 +215,7 @@ int __init macsonic_init(struct net_device* dev)
218 return 0; 215 return 0;
219} 216}
220 217
221int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev) 218static int __init mac_onboard_sonic_ethernet_addr(struct net_device *dev)
222{ 219{
223 struct sonic_local *lp = netdev_priv(dev); 220 struct sonic_local *lp = netdev_priv(dev);
224 const int prom_addr = ONBOARD_SONIC_PROM_BASE; 221 const int prom_addr = ONBOARD_SONIC_PROM_BASE;
@@ -284,7 +281,7 @@ int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev)
284 } else return 0; 281 } else return 0;
285} 282}
286 283
287int __init mac_onboard_sonic_probe(struct net_device* dev) 284static int __init mac_onboard_sonic_probe(struct net_device *dev)
288{ 285{
289 /* Bwahahaha */ 286 /* Bwahahaha */
290 static int once_is_more_than_enough; 287 static int once_is_more_than_enough;
@@ -405,9 +402,9 @@ int __init mac_onboard_sonic_probe(struct net_device* dev)
405 return macsonic_init(dev); 402 return macsonic_init(dev);
406} 403}
407 404
408int __init mac_nubus_sonic_ethernet_addr(struct net_device* dev, 405static int __init mac_nubus_sonic_ethernet_addr(struct net_device *dev,
409 unsigned long prom_addr, 406 unsigned long prom_addr,
410 int id) 407 int id)
411{ 408{
412 int i; 409 int i;
413 for(i = 0; i < 6; i++) 410 for(i = 0; i < 6; i++)
@@ -420,7 +417,7 @@ int __init mac_nubus_sonic_ethernet_addr(struct net_device* dev,
420 return 0; 417 return 0;
421} 418}
422 419
423int __init macsonic_ident(struct nubus_dev* ndev) 420static int __init macsonic_ident(struct nubus_dev *ndev)
424{ 421{
425 if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC && 422 if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC &&
426 ndev->dr_sw == NUBUS_DRSW_SONIC_LC) 423 ndev->dr_sw == NUBUS_DRSW_SONIC_LC)
@@ -445,7 +442,7 @@ int __init macsonic_ident(struct nubus_dev* ndev)
445 return -1; 442 return -1;
446} 443}
447 444
448int __init mac_nubus_sonic_probe(struct net_device* dev) 445static int __init mac_nubus_sonic_probe(struct net_device *dev)
449{ 446{
450 static int slots; 447 static int slots;
451 struct nubus_dev* ndev = NULL; 448 struct nubus_dev* ndev = NULL;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index e0d76c75aea0..9a68d2ea5f3e 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -49,6 +49,7 @@
49#include <linux/if_ether.h> 49#include <linux/if_ether.h>
50#include <linux/if_vlan.h> 50#include <linux/if_vlan.h>
51#include <linux/inet_lro.h> 51#include <linux/inet_lro.h>
52#include <linux/dca.h>
52#include <linux/ip.h> 53#include <linux/ip.h>
53#include <linux/inet.h> 54#include <linux/inet.h>
54#include <linux/in.h> 55#include <linux/in.h>
@@ -185,11 +186,18 @@ struct myri10ge_slice_state {
185 dma_addr_t fw_stats_bus; 186 dma_addr_t fw_stats_bus;
186 int watchdog_tx_done; 187 int watchdog_tx_done;
187 int watchdog_tx_req; 188 int watchdog_tx_req;
189#ifdef CONFIG_DCA
190 int cached_dca_tag;
191 int cpu;
192 __be32 __iomem *dca_tag;
193#endif
194 char irq_desc[32];
188}; 195};
189 196
190struct myri10ge_priv { 197struct myri10ge_priv {
191 struct myri10ge_slice_state ss; 198 struct myri10ge_slice_state *ss;
192 int tx_boundary; /* boundary transmits cannot cross */ 199 int tx_boundary; /* boundary transmits cannot cross */
200 int num_slices;
193 int running; /* running? */ 201 int running; /* running? */
194 int csum_flag; /* rx_csums? */ 202 int csum_flag; /* rx_csums? */
195 int small_bytes; 203 int small_bytes;
@@ -208,6 +216,11 @@ struct myri10ge_priv {
208 dma_addr_t cmd_bus; 216 dma_addr_t cmd_bus;
209 struct pci_dev *pdev; 217 struct pci_dev *pdev;
210 int msi_enabled; 218 int msi_enabled;
219 int msix_enabled;
220 struct msix_entry *msix_vectors;
221#ifdef CONFIG_DCA
222 int dca_enabled;
223#endif
211 u32 link_state; 224 u32 link_state;
212 unsigned int rdma_tags_available; 225 unsigned int rdma_tags_available;
213 int intr_coal_delay; 226 int intr_coal_delay;
@@ -244,6 +257,8 @@ struct myri10ge_priv {
244 257
245static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat"; 258static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
246static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat"; 259static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
260static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat";
261static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat";
247 262
248static char *myri10ge_fw_name = NULL; 263static char *myri10ge_fw_name = NULL;
249module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); 264module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
@@ -321,6 +336,18 @@ static int myri10ge_wcfifo = 0;
321module_param(myri10ge_wcfifo, int, S_IRUGO); 336module_param(myri10ge_wcfifo, int, S_IRUGO);
322MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled"); 337MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled");
323 338
339static int myri10ge_max_slices = 1;
340module_param(myri10ge_max_slices, int, S_IRUGO);
341MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
342
343static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
344module_param(myri10ge_rss_hash, int, S_IRUGO);
345MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do");
346
347static int myri10ge_dca = 1;
348module_param(myri10ge_dca, int, S_IRUGO);
349MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible");
350
324#define MYRI10GE_FW_OFFSET 1024*1024 351#define MYRI10GE_FW_OFFSET 1024*1024
325#define MYRI10GE_HIGHPART_TO_U32(X) \ 352#define MYRI10GE_HIGHPART_TO_U32(X) \
326(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0) 353(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
@@ -657,7 +684,7 @@ static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
657 return 0; 684 return 0;
658} 685}
659 686
660static int myri10ge_load_firmware(struct myri10ge_priv *mgp) 687static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
661{ 688{
662 char __iomem *submit; 689 char __iomem *submit;
663 __be32 buf[16] __attribute__ ((__aligned__(8))); 690 __be32 buf[16] __attribute__ ((__aligned__(8)));
@@ -667,6 +694,8 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
667 size = 0; 694 size = 0;
668 status = myri10ge_load_hotplug_firmware(mgp, &size); 695 status = myri10ge_load_hotplug_firmware(mgp, &size);
669 if (status) { 696 if (status) {
697 if (!adopt)
698 return status;
670 dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n"); 699 dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
671 700
672 /* Do not attempt to adopt firmware if there 701 /* Do not attempt to adopt firmware if there
@@ -859,8 +888,12 @@ abort:
859static int myri10ge_reset(struct myri10ge_priv *mgp) 888static int myri10ge_reset(struct myri10ge_priv *mgp)
860{ 889{
861 struct myri10ge_cmd cmd; 890 struct myri10ge_cmd cmd;
862 int status; 891 struct myri10ge_slice_state *ss;
892 int i, status;
863 size_t bytes; 893 size_t bytes;
894#ifdef CONFIG_DCA
895 unsigned long dca_tag_off;
896#endif
864 897
865 /* try to send a reset command to the card to see if it 898 /* try to send a reset command to the card to see if it
866 * is alive */ 899 * is alive */
@@ -872,20 +905,74 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
872 } 905 }
873 906
874 (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST); 907 (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST);
908 /*
909 * Use non-ndis mcp_slot (eg, 4 bytes total,
910 * no toeplitz hash value returned. Older firmware will
911 * not understand this command, but will use the correct
912 * sized mcp_slot, so we ignore error returns
913 */
914 cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN;
915 (void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0);
875 916
876 /* Now exchange information about interrupts */ 917 /* Now exchange information about interrupts */
877 918
878 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); 919 bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry);
879 memset(mgp->ss.rx_done.entry, 0, bytes);
880 cmd.data0 = (u32) bytes; 920 cmd.data0 = (u32) bytes;
881 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); 921 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
882 cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.rx_done.bus); 922
883 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.rx_done.bus); 923 /*
884 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0); 924 * Even though we already know how many slices are supported
925 * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
926 * has magic side effects, and must be called after a reset.
927 * It must be called prior to calling any RSS related cmds,
928 * including assigning an interrupt queue for anything but
929 * slice 0. It must also be called *after*
930 * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
931 * the firmware to compute offsets.
932 */
933
934 if (mgp->num_slices > 1) {
935
936 /* ask the maximum number of slices it supports */
937 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
938 &cmd, 0);
939 if (status != 0) {
940 dev_err(&mgp->pdev->dev,
941 "failed to get number of slices\n");
942 }
943
944 /*
945 * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
946 * to setting up the interrupt queue DMA
947 */
948
949 cmd.data0 = mgp->num_slices;
950 cmd.data1 = 1; /* use MSI-X */
951 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
952 &cmd, 0);
953 if (status != 0) {
954 dev_err(&mgp->pdev->dev,
955 "failed to set number of slices\n");
956
957 return status;
958 }
959 }
960 for (i = 0; i < mgp->num_slices; i++) {
961 ss = &mgp->ss[i];
962 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus);
963 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus);
964 cmd.data2 = i;
965 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
966 &cmd, 0);
967 };
885 968
886 status |= 969 status |=
887 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); 970 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
888 mgp->ss.irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); 971 for (i = 0; i < mgp->num_slices; i++) {
972 ss = &mgp->ss[i];
973 ss->irq_claim =
974 (__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i);
975 }
889 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, 976 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
890 &cmd, 0); 977 &cmd, 0);
891 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); 978 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
@@ -899,24 +986,116 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
899 } 986 }
900 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); 987 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
901 988
902 memset(mgp->ss.rx_done.entry, 0, bytes); 989#ifdef CONFIG_DCA
990 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
991 dca_tag_off = cmd.data0;
992 for (i = 0; i < mgp->num_slices; i++) {
993 ss = &mgp->ss[i];
994 if (status == 0) {
995 ss->dca_tag = (__iomem __be32 *)
996 (mgp->sram + dca_tag_off + 4 * i);
997 } else {
998 ss->dca_tag = NULL;
999 }
1000 }
1001#endif /* CONFIG_DCA */
903 1002
904 /* reset mcp/driver shared state back to 0 */ 1003 /* reset mcp/driver shared state back to 0 */
905 mgp->ss.tx.req = 0; 1004
906 mgp->ss.tx.done = 0;
907 mgp->ss.tx.pkt_start = 0;
908 mgp->ss.tx.pkt_done = 0;
909 mgp->ss.rx_big.cnt = 0;
910 mgp->ss.rx_small.cnt = 0;
911 mgp->ss.rx_done.idx = 0;
912 mgp->ss.rx_done.cnt = 0;
913 mgp->link_changes = 0; 1005 mgp->link_changes = 0;
1006 for (i = 0; i < mgp->num_slices; i++) {
1007 ss = &mgp->ss[i];
1008
1009 memset(ss->rx_done.entry, 0, bytes);
1010 ss->tx.req = 0;
1011 ss->tx.done = 0;
1012 ss->tx.pkt_start = 0;
1013 ss->tx.pkt_done = 0;
1014 ss->rx_big.cnt = 0;
1015 ss->rx_small.cnt = 0;
1016 ss->rx_done.idx = 0;
1017 ss->rx_done.cnt = 0;
1018 ss->tx.wake_queue = 0;
1019 ss->tx.stop_queue = 0;
1020 }
1021
914 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); 1022 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
915 myri10ge_change_pause(mgp, mgp->pause); 1023 myri10ge_change_pause(mgp, mgp->pause);
916 myri10ge_set_multicast_list(mgp->dev); 1024 myri10ge_set_multicast_list(mgp->dev);
917 return status; 1025 return status;
918} 1026}
919 1027
1028#ifdef CONFIG_DCA
1029static void
1030myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
1031{
1032 ss->cpu = cpu;
1033 ss->cached_dca_tag = tag;
1034 put_be32(htonl(tag), ss->dca_tag);
1035}
1036
1037static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
1038{
1039 int cpu = get_cpu();
1040 int tag;
1041
1042 if (cpu != ss->cpu) {
1043 tag = dca_get_tag(cpu);
1044 if (ss->cached_dca_tag != tag)
1045 myri10ge_write_dca(ss, cpu, tag);
1046 }
1047 put_cpu();
1048}
1049
1050static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
1051{
1052 int err, i;
1053 struct pci_dev *pdev = mgp->pdev;
1054
1055 if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled)
1056 return;
1057 if (!myri10ge_dca) {
1058 dev_err(&pdev->dev, "dca disabled by administrator\n");
1059 return;
1060 }
1061 err = dca_add_requester(&pdev->dev);
1062 if (err) {
1063 dev_err(&pdev->dev,
1064 "dca_add_requester() failed, err=%d\n", err);
1065 return;
1066 }
1067 mgp->dca_enabled = 1;
1068 for (i = 0; i < mgp->num_slices; i++)
1069 myri10ge_write_dca(&mgp->ss[i], -1, 0);
1070}
1071
1072static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
1073{
1074 struct pci_dev *pdev = mgp->pdev;
1075 int err;
1076
1077 if (!mgp->dca_enabled)
1078 return;
1079 mgp->dca_enabled = 0;
1080 err = dca_remove_requester(&pdev->dev);
1081}
1082
1083static int myri10ge_notify_dca_device(struct device *dev, void *data)
1084{
1085 struct myri10ge_priv *mgp;
1086 unsigned long event;
1087
1088 mgp = dev_get_drvdata(dev);
1089 event = *(unsigned long *)data;
1090
1091 if (event == DCA_PROVIDER_ADD)
1092 myri10ge_setup_dca(mgp);
1093 else if (event == DCA_PROVIDER_REMOVE)
1094 myri10ge_teardown_dca(mgp);
1095 return 0;
1096}
1097#endif /* CONFIG_DCA */
1098
920static inline void 1099static inline void
921myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst, 1100myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
922 struct mcp_kreq_ether_recv *src) 1101 struct mcp_kreq_ether_recv *src)
@@ -1095,9 +1274,10 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx,
1095 rx_frags[0].size -= MXGEFW_PAD; 1274 rx_frags[0].size -= MXGEFW_PAD;
1096 len -= MXGEFW_PAD; 1275 len -= MXGEFW_PAD;
1097 lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags, 1276 lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
1098 len, len,
1099 /* opaque, will come back in get_frag_header */ 1277 /* opaque, will come back in get_frag_header */
1278 len, len,
1100 (void *)(__force unsigned long)csum, csum); 1279 (void *)(__force unsigned long)csum, csum);
1280
1101 return 1; 1281 return 1;
1102 } 1282 }
1103 1283
@@ -1236,7 +1416,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1236 1416
1237static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) 1417static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
1238{ 1418{
1239 struct mcp_irq_data *stats = mgp->ss.fw_stats; 1419 struct mcp_irq_data *stats = mgp->ss[0].fw_stats;
1240 1420
1241 if (unlikely(stats->stats_updated)) { 1421 if (unlikely(stats->stats_updated)) {
1242 unsigned link_up = ntohl(stats->link_up); 1422 unsigned link_up = ntohl(stats->link_up);
@@ -1283,6 +1463,11 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
1283 struct net_device *netdev = ss->mgp->dev; 1463 struct net_device *netdev = ss->mgp->dev;
1284 int work_done; 1464 int work_done;
1285 1465
1466#ifdef CONFIG_DCA
1467 if (ss->mgp->dca_enabled)
1468 myri10ge_update_dca(ss);
1469#endif
1470
1286 /* process as many rx events as NAPI will allow */ 1471 /* process as many rx events as NAPI will allow */
1287 work_done = myri10ge_clean_rx_done(ss, budget); 1472 work_done = myri10ge_clean_rx_done(ss, budget);
1288 1473
@@ -1302,6 +1487,13 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1302 u32 send_done_count; 1487 u32 send_done_count;
1303 int i; 1488 int i;
1304 1489
1490 /* an interrupt on a non-zero slice is implicitly valid
1491 * since MSI-X irqs are not shared */
1492 if (ss != mgp->ss) {
1493 netif_rx_schedule(ss->dev, &ss->napi);
1494 return (IRQ_HANDLED);
1495 }
1496
1305 /* make sure it is our IRQ, and that the DMA has finished */ 1497 /* make sure it is our IRQ, and that the DMA has finished */
1306 if (unlikely(!stats->valid)) 1498 if (unlikely(!stats->valid))
1307 return (IRQ_NONE); 1499 return (IRQ_NONE);
@@ -1311,7 +1503,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1311 if (stats->valid & 1) 1503 if (stats->valid & 1)
1312 netif_rx_schedule(ss->dev, &ss->napi); 1504 netif_rx_schedule(ss->dev, &ss->napi);
1313 1505
1314 if (!mgp->msi_enabled) { 1506 if (!mgp->msi_enabled && !mgp->msix_enabled) {
1315 put_be32(0, mgp->irq_deassert); 1507 put_be32(0, mgp->irq_deassert);
1316 if (!myri10ge_deassert_wait) 1508 if (!myri10ge_deassert_wait)
1317 stats->valid = 0; 1509 stats->valid = 0;
@@ -1446,10 +1638,10 @@ myri10ge_get_ringparam(struct net_device *netdev,
1446{ 1638{
1447 struct myri10ge_priv *mgp = netdev_priv(netdev); 1639 struct myri10ge_priv *mgp = netdev_priv(netdev);
1448 1640
1449 ring->rx_mini_max_pending = mgp->ss.rx_small.mask + 1; 1641 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
1450 ring->rx_max_pending = mgp->ss.rx_big.mask + 1; 1642 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
1451 ring->rx_jumbo_max_pending = 0; 1643 ring->rx_jumbo_max_pending = 0;
1452 ring->tx_max_pending = mgp->ss.rx_small.mask + 1; 1644 ring->tx_max_pending = mgp->ss[0].rx_small.mask + 1;
1453 ring->rx_mini_pending = ring->rx_mini_max_pending; 1645 ring->rx_mini_pending = ring->rx_mini_max_pending;
1454 ring->rx_pending = ring->rx_max_pending; 1646 ring->rx_pending = ring->rx_max_pending;
1455 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; 1647 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
@@ -1497,9 +1689,12 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
1497 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", 1689 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
1498 "tx_heartbeat_errors", "tx_window_errors", 1690 "tx_heartbeat_errors", "tx_window_errors",
1499 /* device-specific stats */ 1691 /* device-specific stats */
1500 "tx_boundary", "WC", "irq", "MSI", 1692 "tx_boundary", "WC", "irq", "MSI", "MSIX",
1501 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", 1693 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1502 "serial_number", "watchdog_resets", 1694 "serial_number", "watchdog_resets",
1695#ifdef CONFIG_DCA
1696 "dca_capable", "dca_enabled",
1697#endif
1503 "link_changes", "link_up", "dropped_link_overflow", 1698 "link_changes", "link_up", "dropped_link_overflow",
1504 "dropped_link_error_or_filtered", 1699 "dropped_link_error_or_filtered",
1505 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", 1700 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
@@ -1524,23 +1719,31 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
1524static void 1719static void
1525myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) 1720myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
1526{ 1721{
1722 struct myri10ge_priv *mgp = netdev_priv(netdev);
1723 int i;
1724
1527 switch (stringset) { 1725 switch (stringset) {
1528 case ETH_SS_STATS: 1726 case ETH_SS_STATS:
1529 memcpy(data, *myri10ge_gstrings_main_stats, 1727 memcpy(data, *myri10ge_gstrings_main_stats,
1530 sizeof(myri10ge_gstrings_main_stats)); 1728 sizeof(myri10ge_gstrings_main_stats));
1531 data += sizeof(myri10ge_gstrings_main_stats); 1729 data += sizeof(myri10ge_gstrings_main_stats);
1532 memcpy(data, *myri10ge_gstrings_slice_stats, 1730 for (i = 0; i < mgp->num_slices; i++) {
1533 sizeof(myri10ge_gstrings_slice_stats)); 1731 memcpy(data, *myri10ge_gstrings_slice_stats,
1534 data += sizeof(myri10ge_gstrings_slice_stats); 1732 sizeof(myri10ge_gstrings_slice_stats));
1733 data += sizeof(myri10ge_gstrings_slice_stats);
1734 }
1535 break; 1735 break;
1536 } 1736 }
1537} 1737}
1538 1738
1539static int myri10ge_get_sset_count(struct net_device *netdev, int sset) 1739static int myri10ge_get_sset_count(struct net_device *netdev, int sset)
1540{ 1740{
1741 struct myri10ge_priv *mgp = netdev_priv(netdev);
1742
1541 switch (sset) { 1743 switch (sset) {
1542 case ETH_SS_STATS: 1744 case ETH_SS_STATS:
1543 return MYRI10GE_MAIN_STATS_LEN + MYRI10GE_SLICE_STATS_LEN; 1745 return MYRI10GE_MAIN_STATS_LEN +
1746 mgp->num_slices * MYRI10GE_SLICE_STATS_LEN;
1544 default: 1747 default:
1545 return -EOPNOTSUPP; 1748 return -EOPNOTSUPP;
1546 } 1749 }
@@ -1552,6 +1755,7 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1552{ 1755{
1553 struct myri10ge_priv *mgp = netdev_priv(netdev); 1756 struct myri10ge_priv *mgp = netdev_priv(netdev);
1554 struct myri10ge_slice_state *ss; 1757 struct myri10ge_slice_state *ss;
1758 int slice;
1555 int i; 1759 int i;
1556 1760
1557 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) 1761 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
@@ -1561,15 +1765,20 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1561 data[i++] = (unsigned int)mgp->wc_enabled; 1765 data[i++] = (unsigned int)mgp->wc_enabled;
1562 data[i++] = (unsigned int)mgp->pdev->irq; 1766 data[i++] = (unsigned int)mgp->pdev->irq;
1563 data[i++] = (unsigned int)mgp->msi_enabled; 1767 data[i++] = (unsigned int)mgp->msi_enabled;
1768 data[i++] = (unsigned int)mgp->msix_enabled;
1564 data[i++] = (unsigned int)mgp->read_dma; 1769 data[i++] = (unsigned int)mgp->read_dma;
1565 data[i++] = (unsigned int)mgp->write_dma; 1770 data[i++] = (unsigned int)mgp->write_dma;
1566 data[i++] = (unsigned int)mgp->read_write_dma; 1771 data[i++] = (unsigned int)mgp->read_write_dma;
1567 data[i++] = (unsigned int)mgp->serial_number; 1772 data[i++] = (unsigned int)mgp->serial_number;
1568 data[i++] = (unsigned int)mgp->watchdog_resets; 1773 data[i++] = (unsigned int)mgp->watchdog_resets;
1774#ifdef CONFIG_DCA
1775 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
1776 data[i++] = (unsigned int)(mgp->dca_enabled);
1777#endif
1569 data[i++] = (unsigned int)mgp->link_changes; 1778 data[i++] = (unsigned int)mgp->link_changes;
1570 1779
1571 /* firmware stats are useful only in the first slice */ 1780 /* firmware stats are useful only in the first slice */
1572 ss = &mgp->ss; 1781 ss = &mgp->ss[0];
1573 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); 1782 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
1574 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); 1783 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
1575 data[i++] = 1784 data[i++] =
@@ -1585,24 +1794,27 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1585 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); 1794 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
1586 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); 1795 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
1587 1796
1588 data[i++] = 0; 1797 for (slice = 0; slice < mgp->num_slices; slice++) {
1589 data[i++] = (unsigned int)ss->tx.pkt_start; 1798 ss = &mgp->ss[slice];
1590 data[i++] = (unsigned int)ss->tx.pkt_done; 1799 data[i++] = slice;
1591 data[i++] = (unsigned int)ss->tx.req; 1800 data[i++] = (unsigned int)ss->tx.pkt_start;
1592 data[i++] = (unsigned int)ss->tx.done; 1801 data[i++] = (unsigned int)ss->tx.pkt_done;
1593 data[i++] = (unsigned int)ss->rx_small.cnt; 1802 data[i++] = (unsigned int)ss->tx.req;
1594 data[i++] = (unsigned int)ss->rx_big.cnt; 1803 data[i++] = (unsigned int)ss->tx.done;
1595 data[i++] = (unsigned int)ss->tx.wake_queue; 1804 data[i++] = (unsigned int)ss->rx_small.cnt;
1596 data[i++] = (unsigned int)ss->tx.stop_queue; 1805 data[i++] = (unsigned int)ss->rx_big.cnt;
1597 data[i++] = (unsigned int)ss->tx.linearized; 1806 data[i++] = (unsigned int)ss->tx.wake_queue;
1598 data[i++] = ss->rx_done.lro_mgr.stats.aggregated; 1807 data[i++] = (unsigned int)ss->tx.stop_queue;
1599 data[i++] = ss->rx_done.lro_mgr.stats.flushed; 1808 data[i++] = (unsigned int)ss->tx.linearized;
1600 if (ss->rx_done.lro_mgr.stats.flushed) 1809 data[i++] = ss->rx_done.lro_mgr.stats.aggregated;
1601 data[i++] = ss->rx_done.lro_mgr.stats.aggregated / 1810 data[i++] = ss->rx_done.lro_mgr.stats.flushed;
1602 ss->rx_done.lro_mgr.stats.flushed; 1811 if (ss->rx_done.lro_mgr.stats.flushed)
1603 else 1812 data[i++] = ss->rx_done.lro_mgr.stats.aggregated /
1604 data[i++] = 0; 1813 ss->rx_done.lro_mgr.stats.flushed;
1605 data[i++] = ss->rx_done.lro_mgr.stats.no_desc; 1814 else
1815 data[i++] = 0;
1816 data[i++] = ss->rx_done.lro_mgr.stats.no_desc;
1817 }
1606} 1818}
1607 1819
1608static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) 1820static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
@@ -1645,12 +1857,15 @@ static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
1645 struct net_device *dev = mgp->dev; 1857 struct net_device *dev = mgp->dev;
1646 int tx_ring_size, rx_ring_size; 1858 int tx_ring_size, rx_ring_size;
1647 int tx_ring_entries, rx_ring_entries; 1859 int tx_ring_entries, rx_ring_entries;
1648 int i, status; 1860 int i, slice, status;
1649 size_t bytes; 1861 size_t bytes;
1650 1862
1651 /* get ring sizes */ 1863 /* get ring sizes */
1864 slice = ss - mgp->ss;
1865 cmd.data0 = slice;
1652 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); 1866 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
1653 tx_ring_size = cmd.data0; 1867 tx_ring_size = cmd.data0;
1868 cmd.data0 = slice;
1654 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); 1869 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
1655 if (status != 0) 1870 if (status != 0)
1656 return status; 1871 return status;
@@ -1715,15 +1930,17 @@ static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
1715 mgp->small_bytes + MXGEFW_PAD, 0); 1930 mgp->small_bytes + MXGEFW_PAD, 0);
1716 1931
1717 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { 1932 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
1718 printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n", 1933 printk(KERN_ERR
1719 dev->name, ss->rx_small.fill_cnt); 1934 "myri10ge: %s:slice-%d: alloced only %d small bufs\n",
1935 dev->name, slice, ss->rx_small.fill_cnt);
1720 goto abort_with_rx_small_ring; 1936 goto abort_with_rx_small_ring;
1721 } 1937 }
1722 1938
1723 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); 1939 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
1724 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { 1940 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
1725 printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n", 1941 printk(KERN_ERR
1726 dev->name, ss->rx_big.fill_cnt); 1942 "myri10ge: %s:slice-%d: alloced only %d big bufs\n",
1943 dev->name, slice, ss->rx_big.fill_cnt);
1727 goto abort_with_rx_big_ring; 1944 goto abort_with_rx_big_ring;
1728 } 1945 }
1729 1946
@@ -1775,6 +1992,10 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
1775 struct myri10ge_tx_buf *tx; 1992 struct myri10ge_tx_buf *tx;
1776 int i, len, idx; 1993 int i, len, idx;
1777 1994
1995 /* If not allocated, skip it */
1996 if (ss->tx.req_list == NULL)
1997 return;
1998
1778 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { 1999 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
1779 idx = i & ss->rx_big.mask; 2000 idx = i & ss->rx_big.mask;
1780 if (i == ss->rx_big.fill_cnt - 1) 2001 if (i == ss->rx_big.fill_cnt - 1)
@@ -1837,25 +2058,67 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
1837static int myri10ge_request_irq(struct myri10ge_priv *mgp) 2058static int myri10ge_request_irq(struct myri10ge_priv *mgp)
1838{ 2059{
1839 struct pci_dev *pdev = mgp->pdev; 2060 struct pci_dev *pdev = mgp->pdev;
2061 struct myri10ge_slice_state *ss;
2062 struct net_device *netdev = mgp->dev;
2063 int i;
1840 int status; 2064 int status;
1841 2065
2066 mgp->msi_enabled = 0;
2067 mgp->msix_enabled = 0;
2068 status = 0;
1842 if (myri10ge_msi) { 2069 if (myri10ge_msi) {
1843 status = pci_enable_msi(pdev); 2070 if (mgp->num_slices > 1) {
1844 if (status != 0) 2071 status =
1845 dev_err(&pdev->dev, 2072 pci_enable_msix(pdev, mgp->msix_vectors,
1846 "Error %d setting up MSI; falling back to xPIC\n", 2073 mgp->num_slices);
1847 status); 2074 if (status == 0) {
1848 else 2075 mgp->msix_enabled = 1;
1849 mgp->msi_enabled = 1; 2076 } else {
1850 } else { 2077 dev_err(&pdev->dev,
1851 mgp->msi_enabled = 0; 2078 "Error %d setting up MSI-X\n", status);
2079 return status;
2080 }
2081 }
2082 if (mgp->msix_enabled == 0) {
2083 status = pci_enable_msi(pdev);
2084 if (status != 0) {
2085 dev_err(&pdev->dev,
2086 "Error %d setting up MSI; falling back to xPIC\n",
2087 status);
2088 } else {
2089 mgp->msi_enabled = 1;
2090 }
2091 }
1852 } 2092 }
1853 status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED, 2093 if (mgp->msix_enabled) {
1854 mgp->dev->name, mgp); 2094 for (i = 0; i < mgp->num_slices; i++) {
1855 if (status != 0) { 2095 ss = &mgp->ss[i];
1856 dev_err(&pdev->dev, "failed to allocate IRQ\n"); 2096 snprintf(ss->irq_desc, sizeof(ss->irq_desc),
1857 if (mgp->msi_enabled) 2097 "%s:slice-%d", netdev->name, i);
1858 pci_disable_msi(pdev); 2098 status = request_irq(mgp->msix_vectors[i].vector,
2099 myri10ge_intr, 0, ss->irq_desc,
2100 ss);
2101 if (status != 0) {
2102 dev_err(&pdev->dev,
2103 "slice %d failed to allocate IRQ\n", i);
2104 i--;
2105 while (i >= 0) {
2106 free_irq(mgp->msix_vectors[i].vector,
2107 &mgp->ss[i]);
2108 i--;
2109 }
2110 pci_disable_msix(pdev);
2111 return status;
2112 }
2113 }
2114 } else {
2115 status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
2116 mgp->dev->name, &mgp->ss[0]);
2117 if (status != 0) {
2118 dev_err(&pdev->dev, "failed to allocate IRQ\n");
2119 if (mgp->msi_enabled)
2120 pci_disable_msi(pdev);
2121 }
1859 } 2122 }
1860 return status; 2123 return status;
1861} 2124}
@@ -1863,10 +2126,18 @@ static int myri10ge_request_irq(struct myri10ge_priv *mgp)
1863static void myri10ge_free_irq(struct myri10ge_priv *mgp) 2126static void myri10ge_free_irq(struct myri10ge_priv *mgp)
1864{ 2127{
1865 struct pci_dev *pdev = mgp->pdev; 2128 struct pci_dev *pdev = mgp->pdev;
2129 int i;
1866 2130
1867 free_irq(pdev->irq, mgp); 2131 if (mgp->msix_enabled) {
2132 for (i = 0; i < mgp->num_slices; i++)
2133 free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]);
2134 } else {
2135 free_irq(pdev->irq, &mgp->ss[0]);
2136 }
1868 if (mgp->msi_enabled) 2137 if (mgp->msi_enabled)
1869 pci_disable_msi(pdev); 2138 pci_disable_msi(pdev);
2139 if (mgp->msix_enabled)
2140 pci_disable_msix(pdev);
1870} 2141}
1871 2142
1872static int 2143static int
@@ -1928,12 +2199,82 @@ myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
1928 return 0; 2199 return 0;
1929} 2200}
1930 2201
2202static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
2203{
2204 struct myri10ge_cmd cmd;
2205 struct myri10ge_slice_state *ss;
2206 int status;
2207
2208 ss = &mgp->ss[slice];
2209 cmd.data0 = 0; /* single slice for now */
2210 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0);
2211 ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
2212 (mgp->sram + cmd.data0);
2213
2214 cmd.data0 = slice;
2215 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET,
2216 &cmd, 0);
2217 ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
2218 (mgp->sram + cmd.data0);
2219
2220 cmd.data0 = slice;
2221 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
2222 ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
2223 (mgp->sram + cmd.data0);
2224
2225 if (myri10ge_wcfifo && mgp->wc_enabled) {
2226 ss->tx.wc_fifo = (u8 __iomem *)
2227 mgp->sram + MXGEFW_ETH_SEND_4 + 64 * slice;
2228 ss->rx_small.wc_fifo = (u8 __iomem *)
2229 mgp->sram + MXGEFW_ETH_RECV_SMALL + 64 * slice;
2230 ss->rx_big.wc_fifo = (u8 __iomem *)
2231 mgp->sram + MXGEFW_ETH_RECV_BIG + 64 * slice;
2232 } else {
2233 ss->tx.wc_fifo = NULL;
2234 ss->rx_small.wc_fifo = NULL;
2235 ss->rx_big.wc_fifo = NULL;
2236 }
2237 return status;
2238
2239}
2240
2241static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice)
2242{
2243 struct myri10ge_cmd cmd;
2244 struct myri10ge_slice_state *ss;
2245 int status;
2246
2247 ss = &mgp->ss[slice];
2248 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus);
2249 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus);
2250 cmd.data2 = sizeof(struct mcp_irq_data);
2251 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
2252 if (status == -ENOSYS) {
2253 dma_addr_t bus = ss->fw_stats_bus;
2254 if (slice != 0)
2255 return -EINVAL;
2256 bus += offsetof(struct mcp_irq_data, send_done_count);
2257 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
2258 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
2259 status = myri10ge_send_cmd(mgp,
2260 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
2261 &cmd, 0);
2262 /* Firmware cannot support multicast without STATS_DMA_V2 */
2263 mgp->fw_multicast_support = 0;
2264 } else {
2265 mgp->fw_multicast_support = 1;
2266 }
2267 return 0;
2268}
2269
1931static int myri10ge_open(struct net_device *dev) 2270static int myri10ge_open(struct net_device *dev)
1932{ 2271{
2272 struct myri10ge_slice_state *ss;
1933 struct myri10ge_priv *mgp = netdev_priv(dev); 2273 struct myri10ge_priv *mgp = netdev_priv(dev);
1934 struct myri10ge_cmd cmd; 2274 struct myri10ge_cmd cmd;
2275 int i, status, big_pow2, slice;
2276 u8 *itable;
1935 struct net_lro_mgr *lro_mgr; 2277 struct net_lro_mgr *lro_mgr;
1936 int status, big_pow2;
1937 2278
1938 if (mgp->running != MYRI10GE_ETH_STOPPED) 2279 if (mgp->running != MYRI10GE_ETH_STOPPED)
1939 return -EBUSY; 2280 return -EBUSY;
@@ -1945,6 +2286,48 @@ static int myri10ge_open(struct net_device *dev)
1945 goto abort_with_nothing; 2286 goto abort_with_nothing;
1946 } 2287 }
1947 2288
2289 if (mgp->num_slices > 1) {
2290 cmd.data0 = mgp->num_slices;
2291 cmd.data1 = 1; /* use MSI-X */
2292 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
2293 &cmd, 0);
2294 if (status != 0) {
2295 printk(KERN_ERR
2296 "myri10ge: %s: failed to set number of slices\n",
2297 dev->name);
2298 goto abort_with_nothing;
2299 }
2300 /* setup the indirection table */
2301 cmd.data0 = mgp->num_slices;
2302 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
2303 &cmd, 0);
2304
2305 status |= myri10ge_send_cmd(mgp,
2306 MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
2307 &cmd, 0);
2308 if (status != 0) {
2309 printk(KERN_ERR
2310 "myri10ge: %s: failed to setup rss tables\n",
2311 dev->name);
2312 }
2313
2314 /* just enable an identity mapping */
2315 itable = mgp->sram + cmd.data0;
2316 for (i = 0; i < mgp->num_slices; i++)
2317 __raw_writeb(i, &itable[i]);
2318
2319 cmd.data0 = 1;
2320 cmd.data1 = myri10ge_rss_hash;
2321 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
2322 &cmd, 0);
2323 if (status != 0) {
2324 printk(KERN_ERR
2325 "myri10ge: %s: failed to enable slices\n",
2326 dev->name);
2327 goto abort_with_nothing;
2328 }
2329 }
2330
1948 status = myri10ge_request_irq(mgp); 2331 status = myri10ge_request_irq(mgp);
1949 if (status != 0) 2332 if (status != 0)
1950 goto abort_with_nothing; 2333 goto abort_with_nothing;
@@ -1968,41 +2351,6 @@ static int myri10ge_open(struct net_device *dev)
1968 if (myri10ge_small_bytes > 0) 2351 if (myri10ge_small_bytes > 0)
1969 mgp->small_bytes = myri10ge_small_bytes; 2352 mgp->small_bytes = myri10ge_small_bytes;
1970 2353
1971 /* get the lanai pointers to the send and receive rings */
1972
1973 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0);
1974 mgp->ss.tx.lanai =
1975 (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0);
1976
1977 status |=
1978 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0);
1979 mgp->ss.rx_small.lanai =
1980 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
1981
1982 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
1983 mgp->ss.rx_big.lanai =
1984 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
1985
1986 if (status != 0) {
1987 printk(KERN_ERR
1988 "myri10ge: %s: failed to get ring sizes or locations\n",
1989 dev->name);
1990 mgp->running = MYRI10GE_ETH_STOPPED;
1991 goto abort_with_irq;
1992 }
1993
1994 if (myri10ge_wcfifo && mgp->wc_enabled) {
1995 mgp->ss.tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4;
1996 mgp->ss.rx_small.wc_fifo =
1997 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL;
1998 mgp->ss.rx_big.wc_fifo =
1999 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG;
2000 } else {
2001 mgp->ss.tx.wc_fifo = NULL;
2002 mgp->ss.rx_small.wc_fifo = NULL;
2003 mgp->ss.rx_big.wc_fifo = NULL;
2004 }
2005
2006 /* Firmware needs the big buff size as a power of 2. Lie and 2354 /* Firmware needs the big buff size as a power of 2. Lie and
2007 * tell him the buffer is larger, because we only use 1 2355 * tell him the buffer is larger, because we only use 1
2008 * buffer/pkt, and the mtu will prevent overruns. 2356 * buffer/pkt, and the mtu will prevent overruns.
@@ -2017,9 +2365,44 @@ static int myri10ge_open(struct net_device *dev)
2017 mgp->big_bytes = big_pow2; 2365 mgp->big_bytes = big_pow2;
2018 } 2366 }
2019 2367
2020 status = myri10ge_allocate_rings(&mgp->ss); 2368 /* setup the per-slice data structures */
2021 if (status != 0) 2369 for (slice = 0; slice < mgp->num_slices; slice++) {
2022 goto abort_with_irq; 2370 ss = &mgp->ss[slice];
2371
2372 status = myri10ge_get_txrx(mgp, slice);
2373 if (status != 0) {
2374 printk(KERN_ERR
2375 "myri10ge: %s: failed to get ring sizes or locations\n",
2376 dev->name);
2377 goto abort_with_rings;
2378 }
2379 status = myri10ge_allocate_rings(ss);
2380 if (status != 0)
2381 goto abort_with_rings;
2382 if (slice == 0)
2383 status = myri10ge_set_stats(mgp, slice);
2384 if (status) {
2385 printk(KERN_ERR
2386 "myri10ge: %s: Couldn't set stats DMA\n",
2387 dev->name);
2388 goto abort_with_rings;
2389 }
2390
2391 lro_mgr = &ss->rx_done.lro_mgr;
2392 lro_mgr->dev = dev;
2393 lro_mgr->features = LRO_F_NAPI;
2394 lro_mgr->ip_summed = CHECKSUM_COMPLETE;
2395 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2396 lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
2397 lro_mgr->lro_arr = ss->rx_done.lro_desc;
2398 lro_mgr->get_frag_header = myri10ge_get_frag_header;
2399 lro_mgr->max_aggr = myri10ge_lro_max_pkts;
2400 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2401 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2402
2403 /* must happen prior to any irq */
2404 napi_enable(&(ss)->napi);
2405 }
2023 2406
2024 /* now give firmware buffers sizes, and MTU */ 2407 /* now give firmware buffers sizes, and MTU */
2025 cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN; 2408 cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
@@ -2036,25 +2419,15 @@ static int myri10ge_open(struct net_device *dev)
2036 goto abort_with_rings; 2419 goto abort_with_rings;
2037 } 2420 }
2038 2421
2039 cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.fw_stats_bus); 2422 /*
2040 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.fw_stats_bus); 2423 * Set Linux style TSO mode; this is needed only on newer
2041 cmd.data2 = sizeof(struct mcp_irq_data); 2424 * firmware versions. Older versions default to Linux
2042 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); 2425 * style TSO
2043 if (status == -ENOSYS) { 2426 */
2044 dma_addr_t bus = mgp->ss.fw_stats_bus; 2427 cmd.data0 = 0;
2045 bus += offsetof(struct mcp_irq_data, send_done_count); 2428 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
2046 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); 2429 if (status && status != -ENOSYS) {
2047 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); 2430 printk(KERN_ERR "myri10ge: %s: Couldn't set TSO mode\n",
2048 status = myri10ge_send_cmd(mgp,
2049 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
2050 &cmd, 0);
2051 /* Firmware cannot support multicast without STATS_DMA_V2 */
2052 mgp->fw_multicast_support = 0;
2053 } else {
2054 mgp->fw_multicast_support = 1;
2055 }
2056 if (status) {
2057 printk(KERN_ERR "myri10ge: %s: Couldn't set stats DMA\n",
2058 dev->name); 2431 dev->name);
2059 goto abort_with_rings; 2432 goto abort_with_rings;
2060 } 2433 }
@@ -2062,21 +2435,6 @@ static int myri10ge_open(struct net_device *dev)
2062 mgp->link_state = ~0U; 2435 mgp->link_state = ~0U;
2063 mgp->rdma_tags_available = 15; 2436 mgp->rdma_tags_available = 15;
2064 2437
2065 lro_mgr = &mgp->ss.rx_done.lro_mgr;
2066 lro_mgr->dev = dev;
2067 lro_mgr->features = LRO_F_NAPI;
2068 lro_mgr->ip_summed = CHECKSUM_COMPLETE;
2069 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2070 lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
2071 lro_mgr->lro_arr = mgp->ss.rx_done.lro_desc;
2072 lro_mgr->get_frag_header = myri10ge_get_frag_header;
2073 lro_mgr->max_aggr = myri10ge_lro_max_pkts;
2074 lro_mgr->frag_align_pad = 2;
2075 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2076 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2077
2078 napi_enable(&mgp->ss.napi); /* must happen prior to any irq */
2079
2080 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); 2438 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
2081 if (status) { 2439 if (status) {
2082 printk(KERN_ERR "myri10ge: %s: Couldn't bring up link\n", 2440 printk(KERN_ERR "myri10ge: %s: Couldn't bring up link\n",
@@ -2084,8 +2442,6 @@ static int myri10ge_open(struct net_device *dev)
2084 goto abort_with_rings; 2442 goto abort_with_rings;
2085 } 2443 }
2086 2444
2087 mgp->ss.tx.wake_queue = 0;
2088 mgp->ss.tx.stop_queue = 0;
2089 mgp->running = MYRI10GE_ETH_RUNNING; 2445 mgp->running = MYRI10GE_ETH_RUNNING;
2090 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; 2446 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
2091 add_timer(&mgp->watchdog_timer); 2447 add_timer(&mgp->watchdog_timer);
@@ -2093,9 +2449,9 @@ static int myri10ge_open(struct net_device *dev)
2093 return 0; 2449 return 0;
2094 2450
2095abort_with_rings: 2451abort_with_rings:
2096 myri10ge_free_rings(&mgp->ss); 2452 for (i = 0; i < mgp->num_slices; i++)
2453 myri10ge_free_rings(&mgp->ss[i]);
2097 2454
2098abort_with_irq:
2099 myri10ge_free_irq(mgp); 2455 myri10ge_free_irq(mgp);
2100 2456
2101abort_with_nothing: 2457abort_with_nothing:
@@ -2108,16 +2464,19 @@ static int myri10ge_close(struct net_device *dev)
2108 struct myri10ge_priv *mgp = netdev_priv(dev); 2464 struct myri10ge_priv *mgp = netdev_priv(dev);
2109 struct myri10ge_cmd cmd; 2465 struct myri10ge_cmd cmd;
2110 int status, old_down_cnt; 2466 int status, old_down_cnt;
2467 int i;
2111 2468
2112 if (mgp->running != MYRI10GE_ETH_RUNNING) 2469 if (mgp->running != MYRI10GE_ETH_RUNNING)
2113 return 0; 2470 return 0;
2114 2471
2115 if (mgp->ss.tx.req_bytes == NULL) 2472 if (mgp->ss[0].tx.req_bytes == NULL)
2116 return 0; 2473 return 0;
2117 2474
2118 del_timer_sync(&mgp->watchdog_timer); 2475 del_timer_sync(&mgp->watchdog_timer);
2119 mgp->running = MYRI10GE_ETH_STOPPING; 2476 mgp->running = MYRI10GE_ETH_STOPPING;
2120 napi_disable(&mgp->ss.napi); 2477 for (i = 0; i < mgp->num_slices; i++) {
2478 napi_disable(&mgp->ss[i].napi);
2479 }
2121 netif_carrier_off(dev); 2480 netif_carrier_off(dev);
2122 netif_stop_queue(dev); 2481 netif_stop_queue(dev);
2123 old_down_cnt = mgp->down_cnt; 2482 old_down_cnt = mgp->down_cnt;
@@ -2133,7 +2492,8 @@ static int myri10ge_close(struct net_device *dev)
2133 2492
2134 netif_tx_disable(dev); 2493 netif_tx_disable(dev);
2135 myri10ge_free_irq(mgp); 2494 myri10ge_free_irq(mgp);
2136 myri10ge_free_rings(&mgp->ss); 2495 for (i = 0; i < mgp->num_slices; i++)
2496 myri10ge_free_rings(&mgp->ss[i]);
2137 2497
2138 mgp->running = MYRI10GE_ETH_STOPPED; 2498 mgp->running = MYRI10GE_ETH_STOPPED;
2139 return 0; 2499 return 0;
@@ -2254,7 +2614,7 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev)
2254 u8 flags, odd_flag; 2614 u8 flags, odd_flag;
2255 2615
2256 /* always transmit through slot 0 */ 2616 /* always transmit through slot 0 */
2257 ss = &mgp->ss; 2617 ss = mgp->ss;
2258 tx = &ss->tx; 2618 tx = &ss->tx;
2259again: 2619again:
2260 req = tx->req_list; 2620 req = tx->req_list;
@@ -2559,7 +2919,21 @@ drop:
2559static struct net_device_stats *myri10ge_get_stats(struct net_device *dev) 2919static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
2560{ 2920{
2561 struct myri10ge_priv *mgp = netdev_priv(dev); 2921 struct myri10ge_priv *mgp = netdev_priv(dev);
2562 return &mgp->stats; 2922 struct myri10ge_slice_netstats *slice_stats;
2923 struct net_device_stats *stats = &mgp->stats;
2924 int i;
2925
2926 memset(stats, 0, sizeof(*stats));
2927 for (i = 0; i < mgp->num_slices; i++) {
2928 slice_stats = &mgp->ss[i].stats;
2929 stats->rx_packets += slice_stats->rx_packets;
2930 stats->tx_packets += slice_stats->tx_packets;
2931 stats->rx_bytes += slice_stats->rx_bytes;
2932 stats->tx_bytes += slice_stats->tx_bytes;
2933 stats->rx_dropped += slice_stats->rx_dropped;
2934 stats->tx_dropped += slice_stats->tx_dropped;
2935 }
2936 return stats;
2563} 2937}
2564 2938
2565static void myri10ge_set_multicast_list(struct net_device *dev) 2939static void myri10ge_set_multicast_list(struct net_device *dev)
@@ -2770,10 +3144,10 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
2770 * 3144 *
2771 * If the driver can neither enable ECRC nor verify that it has 3145 * If the driver can neither enable ECRC nor verify that it has
2772 * already been enabled, then it must use a firmware image which works 3146 * already been enabled, then it must use a firmware image which works
2773 * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it 3147 * around unaligned completion packets (myri10ge_rss_ethp_z8e.dat), and it
2774 * should also ensure that it never gives the device a Read-DMA which is 3148 * should also ensure that it never gives the device a Read-DMA which is
2775 * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is 3149 * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
2776 * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat) 3150 * enabled, then the driver should use the aligned (myri10ge_rss_eth_z8e.dat)
2777 * firmware image, and set tx_boundary to 4KB. 3151 * firmware image, and set tx_boundary to 4KB.
2778 */ 3152 */
2779 3153
@@ -2802,7 +3176,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
2802 * completions) in order to see if it works on this host. 3176 * completions) in order to see if it works on this host.
2803 */ 3177 */
2804 mgp->fw_name = myri10ge_fw_aligned; 3178 mgp->fw_name = myri10ge_fw_aligned;
2805 status = myri10ge_load_firmware(mgp); 3179 status = myri10ge_load_firmware(mgp, 1);
2806 if (status != 0) { 3180 if (status != 0) {
2807 goto abort; 3181 goto abort;
2808 } 3182 }
@@ -2983,6 +3357,7 @@ static void myri10ge_watchdog(struct work_struct *work)
2983 struct myri10ge_tx_buf *tx; 3357 struct myri10ge_tx_buf *tx;
2984 u32 reboot; 3358 u32 reboot;
2985 int status; 3359 int status;
3360 int i;
2986 u16 cmd, vendor; 3361 u16 cmd, vendor;
2987 3362
2988 mgp->watchdog_resets++; 3363 mgp->watchdog_resets++;
@@ -3030,20 +3405,26 @@ static void myri10ge_watchdog(struct work_struct *work)
3030 3405
3031 printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n", 3406 printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n",
3032 mgp->dev->name); 3407 mgp->dev->name);
3033 tx = &mgp->ss.tx; 3408 for (i = 0; i < mgp->num_slices; i++) {
3034 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", 3409 tx = &mgp->ss[i].tx;
3035 mgp->dev->name, tx->req, tx->done, 3410 printk(KERN_INFO
3036 tx->pkt_start, tx->pkt_done, 3411 "myri10ge: %s: (%d): %d %d %d %d %d\n",
3037 (int)ntohl(mgp->ss.fw_stats->send_done_count)); 3412 mgp->dev->name, i, tx->req, tx->done,
3038 msleep(2000); 3413 tx->pkt_start, tx->pkt_done,
3039 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", 3414 (int)ntohl(mgp->ss[i].fw_stats->
3040 mgp->dev->name, tx->req, tx->done, 3415 send_done_count));
3041 tx->pkt_start, tx->pkt_done, 3416 msleep(2000);
3042 (int)ntohl(mgp->ss.fw_stats->send_done_count)); 3417 printk(KERN_INFO
3418 "myri10ge: %s: (%d): %d %d %d %d %d\n",
3419 mgp->dev->name, i, tx->req, tx->done,
3420 tx->pkt_start, tx->pkt_done,
3421 (int)ntohl(mgp->ss[i].fw_stats->
3422 send_done_count));
3423 }
3043 } 3424 }
3044 rtnl_lock(); 3425 rtnl_lock();
3045 myri10ge_close(mgp->dev); 3426 myri10ge_close(mgp->dev);
3046 status = myri10ge_load_firmware(mgp); 3427 status = myri10ge_load_firmware(mgp, 1);
3047 if (status != 0) 3428 if (status != 0)
3048 printk(KERN_ERR "myri10ge: %s: failed to load firmware\n", 3429 printk(KERN_ERR "myri10ge: %s: failed to load firmware\n",
3049 mgp->dev->name); 3430 mgp->dev->name);
@@ -3063,47 +3444,241 @@ static void myri10ge_watchdog_timer(unsigned long arg)
3063{ 3444{
3064 struct myri10ge_priv *mgp; 3445 struct myri10ge_priv *mgp;
3065 struct myri10ge_slice_state *ss; 3446 struct myri10ge_slice_state *ss;
3447 int i, reset_needed;
3066 u32 rx_pause_cnt; 3448 u32 rx_pause_cnt;
3067 3449
3068 mgp = (struct myri10ge_priv *)arg; 3450 mgp = (struct myri10ge_priv *)arg;
3069 3451
3070 rx_pause_cnt = ntohl(mgp->ss.fw_stats->dropped_pause); 3452 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
3453 for (i = 0, reset_needed = 0;
3454 i < mgp->num_slices && reset_needed == 0; ++i) {
3455
3456 ss = &mgp->ss[i];
3457 if (ss->rx_small.watchdog_needed) {
3458 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
3459 mgp->small_bytes + MXGEFW_PAD,
3460 1);
3461 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
3462 myri10ge_fill_thresh)
3463 ss->rx_small.watchdog_needed = 0;
3464 }
3465 if (ss->rx_big.watchdog_needed) {
3466 myri10ge_alloc_rx_pages(mgp, &ss->rx_big,
3467 mgp->big_bytes, 1);
3468 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
3469 myri10ge_fill_thresh)
3470 ss->rx_big.watchdog_needed = 0;
3471 }
3071 3472
3072 ss = &mgp->ss; 3473 if (ss->tx.req != ss->tx.done &&
3073 if (ss->rx_small.watchdog_needed) { 3474 ss->tx.done == ss->watchdog_tx_done &&
3074 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, 3475 ss->watchdog_tx_req != ss->watchdog_tx_done) {
3075 mgp->small_bytes + MXGEFW_PAD, 1); 3476 /* nic seems like it might be stuck.. */
3076 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= 3477 if (rx_pause_cnt != mgp->watchdog_pause) {
3077 myri10ge_fill_thresh) 3478 if (net_ratelimit())
3078 ss->rx_small.watchdog_needed = 0; 3479 printk(KERN_WARNING "myri10ge %s:"
3079 } 3480 "TX paused, check link partner\n",
3080 if (ss->rx_big.watchdog_needed) { 3481 mgp->dev->name);
3081 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 1); 3482 } else {
3082 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= 3483 reset_needed = 1;
3083 myri10ge_fill_thresh) 3484 }
3084 ss->rx_big.watchdog_needed = 0;
3085 }
3086
3087 if (ss->tx.req != ss->tx.done &&
3088 ss->tx.done == ss->watchdog_tx_done &&
3089 ss->watchdog_tx_req != ss->watchdog_tx_done) {
3090 /* nic seems like it might be stuck.. */
3091 if (rx_pause_cnt != mgp->watchdog_pause) {
3092 if (net_ratelimit())
3093 printk(KERN_WARNING "myri10ge %s:"
3094 "TX paused, check link partner\n",
3095 mgp->dev->name);
3096 } else {
3097 schedule_work(&mgp->watchdog_work);
3098 return;
3099 } 3485 }
3486 ss->watchdog_tx_done = ss->tx.done;
3487 ss->watchdog_tx_req = ss->tx.req;
3100 } 3488 }
3101 /* rearm timer */
3102 mod_timer(&mgp->watchdog_timer,
3103 jiffies + myri10ge_watchdog_timeout * HZ);
3104 ss->watchdog_tx_done = ss->tx.done;
3105 ss->watchdog_tx_req = ss->tx.req;
3106 mgp->watchdog_pause = rx_pause_cnt; 3489 mgp->watchdog_pause = rx_pause_cnt;
3490
3491 if (reset_needed) {
3492 schedule_work(&mgp->watchdog_work);
3493 } else {
3494 /* rearm timer */
3495 mod_timer(&mgp->watchdog_timer,
3496 jiffies + myri10ge_watchdog_timeout * HZ);
3497 }
3498}
3499
3500static void myri10ge_free_slices(struct myri10ge_priv *mgp)
3501{
3502 struct myri10ge_slice_state *ss;
3503 struct pci_dev *pdev = mgp->pdev;
3504 size_t bytes;
3505 int i;
3506
3507 if (mgp->ss == NULL)
3508 return;
3509
3510 for (i = 0; i < mgp->num_slices; i++) {
3511 ss = &mgp->ss[i];
3512 if (ss->rx_done.entry != NULL) {
3513 bytes = mgp->max_intr_slots *
3514 sizeof(*ss->rx_done.entry);
3515 dma_free_coherent(&pdev->dev, bytes,
3516 ss->rx_done.entry, ss->rx_done.bus);
3517 ss->rx_done.entry = NULL;
3518 }
3519 if (ss->fw_stats != NULL) {
3520 bytes = sizeof(*ss->fw_stats);
3521 dma_free_coherent(&pdev->dev, bytes,
3522 ss->fw_stats, ss->fw_stats_bus);
3523 ss->fw_stats = NULL;
3524 }
3525 }
3526 kfree(mgp->ss);
3527 mgp->ss = NULL;
3528}
3529
3530static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
3531{
3532 struct myri10ge_slice_state *ss;
3533 struct pci_dev *pdev = mgp->pdev;
3534 size_t bytes;
3535 int i;
3536
3537 bytes = sizeof(*mgp->ss) * mgp->num_slices;
3538 mgp->ss = kzalloc(bytes, GFP_KERNEL);
3539 if (mgp->ss == NULL) {
3540 return -ENOMEM;
3541 }
3542
3543 for (i = 0; i < mgp->num_slices; i++) {
3544 ss = &mgp->ss[i];
3545 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
3546 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
3547 &ss->rx_done.bus,
3548 GFP_KERNEL);
3549 if (ss->rx_done.entry == NULL)
3550 goto abort;
3551 memset(ss->rx_done.entry, 0, bytes);
3552 bytes = sizeof(*ss->fw_stats);
3553 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
3554 &ss->fw_stats_bus,
3555 GFP_KERNEL);
3556 if (ss->fw_stats == NULL)
3557 goto abort;
3558 ss->mgp = mgp;
3559 ss->dev = mgp->dev;
3560 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
3561 myri10ge_napi_weight);
3562 }
3563 return 0;
3564abort:
3565 myri10ge_free_slices(mgp);
3566 return -ENOMEM;
3567}
3568
3569/*
3570 * This function determines the number of slices supported.
3571 * The number slices is the minumum of the number of CPUS,
3572 * the number of MSI-X irqs supported, the number of slices
3573 * supported by the firmware
3574 */
3575static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3576{
3577 struct myri10ge_cmd cmd;
3578 struct pci_dev *pdev = mgp->pdev;
3579 char *old_fw;
3580 int i, status, ncpus, msix_cap;
3581
3582 mgp->num_slices = 1;
3583 msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3584 ncpus = num_online_cpus();
3585
3586 if (myri10ge_max_slices == 1 || msix_cap == 0 ||
3587 (myri10ge_max_slices == -1 && ncpus < 2))
3588 return;
3589
3590 /* try to load the slice aware rss firmware */
3591 old_fw = mgp->fw_name;
3592 if (old_fw == myri10ge_fw_aligned)
3593 mgp->fw_name = myri10ge_fw_rss_aligned;
3594 else
3595 mgp->fw_name = myri10ge_fw_rss_unaligned;
3596 status = myri10ge_load_firmware(mgp, 0);
3597 if (status != 0) {
3598 dev_info(&pdev->dev, "Rss firmware not found\n");
3599 return;
3600 }
3601
3602 /* hit the board with a reset to ensure it is alive */
3603 memset(&cmd, 0, sizeof(cmd));
3604 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
3605 if (status != 0) {
3606 dev_err(&mgp->pdev->dev, "failed reset\n");
3607 goto abort_with_fw;
3608 return;
3609 }
3610
3611 mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot);
3612
3613 /* tell it the size of the interrupt queues */
3614 cmd.data0 = mgp->max_intr_slots * sizeof(struct mcp_slot);
3615 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
3616 if (status != 0) {
3617 dev_err(&mgp->pdev->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
3618 goto abort_with_fw;
3619 }
3620
3621 /* ask the maximum number of slices it supports */
3622 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd, 0);
3623 if (status != 0)
3624 goto abort_with_fw;
3625 else
3626 mgp->num_slices = cmd.data0;
3627
3628 /* Only allow multiple slices if MSI-X is usable */
3629 if (!myri10ge_msi) {
3630 goto abort_with_fw;
3631 }
3632
3633 /* if the admin did not specify a limit to how many
3634 * slices we should use, cap it automatically to the
3635 * number of CPUs currently online */
3636 if (myri10ge_max_slices == -1)
3637 myri10ge_max_slices = ncpus;
3638
3639 if (mgp->num_slices > myri10ge_max_slices)
3640 mgp->num_slices = myri10ge_max_slices;
3641
3642 /* Now try to allocate as many MSI-X vectors as we have
3643 * slices. We give up on MSI-X if we can only get a single
3644 * vector. */
3645
3646 mgp->msix_vectors = kzalloc(mgp->num_slices *
3647 sizeof(*mgp->msix_vectors), GFP_KERNEL);
3648 if (mgp->msix_vectors == NULL)
3649 goto disable_msix;
3650 for (i = 0; i < mgp->num_slices; i++) {
3651 mgp->msix_vectors[i].entry = i;
3652 }
3653
3654 while (mgp->num_slices > 1) {
3655 /* make sure it is a power of two */
3656 while (!is_power_of_2(mgp->num_slices))
3657 mgp->num_slices--;
3658 if (mgp->num_slices == 1)
3659 goto disable_msix;
3660 status = pci_enable_msix(pdev, mgp->msix_vectors,
3661 mgp->num_slices);
3662 if (status == 0) {
3663 pci_disable_msix(pdev);
3664 return;
3665 }
3666 if (status > 0)
3667 mgp->num_slices = status;
3668 else
3669 goto disable_msix;
3670 }
3671
3672disable_msix:
3673 if (mgp->msix_vectors != NULL) {
3674 kfree(mgp->msix_vectors);
3675 mgp->msix_vectors = NULL;
3676 }
3677
3678abort_with_fw:
3679 mgp->num_slices = 1;
3680 mgp->fw_name = old_fw;
3681 myri10ge_load_firmware(mgp, 0);
3107} 3682}
3108 3683
3109static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3684static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -3111,7 +3686,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3111 struct net_device *netdev; 3686 struct net_device *netdev;
3112 struct myri10ge_priv *mgp; 3687 struct myri10ge_priv *mgp;
3113 struct device *dev = &pdev->dev; 3688 struct device *dev = &pdev->dev;
3114 size_t bytes;
3115 int i; 3689 int i;
3116 int status = -ENXIO; 3690 int status = -ENXIO;
3117 int dac_enabled; 3691 int dac_enabled;
@@ -3126,7 +3700,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3126 3700
3127 mgp = netdev_priv(netdev); 3701 mgp = netdev_priv(netdev);
3128 mgp->dev = netdev; 3702 mgp->dev = netdev;
3129 netif_napi_add(netdev, &mgp->ss.napi, myri10ge_poll, myri10ge_napi_weight);
3130 mgp->pdev = pdev; 3703 mgp->pdev = pdev;
3131 mgp->csum_flag = MXGEFW_FLAGS_CKSUM; 3704 mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
3132 mgp->pause = myri10ge_flow_control; 3705 mgp->pause = myri10ge_flow_control;
@@ -3172,11 +3745,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3172 if (mgp->cmd == NULL) 3745 if (mgp->cmd == NULL)
3173 goto abort_with_netdev; 3746 goto abort_with_netdev;
3174 3747
3175 mgp->ss.fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats),
3176 &mgp->ss.fw_stats_bus, GFP_KERNEL);
3177 if (mgp->ss.fw_stats == NULL)
3178 goto abort_with_cmd;
3179
3180 mgp->board_span = pci_resource_len(pdev, 0); 3748 mgp->board_span = pci_resource_len(pdev, 0);
3181 mgp->iomem_base = pci_resource_start(pdev, 0); 3749 mgp->iomem_base = pci_resource_start(pdev, 0);
3182 mgp->mtrr = -1; 3750 mgp->mtrr = -1;
@@ -3213,28 +3781,28 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3213 for (i = 0; i < ETH_ALEN; i++) 3781 for (i = 0; i < ETH_ALEN; i++)
3214 netdev->dev_addr[i] = mgp->mac_addr[i]; 3782 netdev->dev_addr[i] = mgp->mac_addr[i];
3215 3783
3216 /* allocate rx done ring */
3217 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
3218 mgp->ss.rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
3219 &mgp->ss.rx_done.bus, GFP_KERNEL);
3220 if (mgp->ss.rx_done.entry == NULL)
3221 goto abort_with_ioremap;
3222 memset(mgp->ss.rx_done.entry, 0, bytes);
3223
3224 myri10ge_select_firmware(mgp); 3784 myri10ge_select_firmware(mgp);
3225 3785
3226 status = myri10ge_load_firmware(mgp); 3786 status = myri10ge_load_firmware(mgp, 1);
3227 if (status != 0) { 3787 if (status != 0) {
3228 dev_err(&pdev->dev, "failed to load firmware\n"); 3788 dev_err(&pdev->dev, "failed to load firmware\n");
3229 goto abort_with_rx_done; 3789 goto abort_with_ioremap;
3790 }
3791 myri10ge_probe_slices(mgp);
3792 status = myri10ge_alloc_slices(mgp);
3793 if (status != 0) {
3794 dev_err(&pdev->dev, "failed to alloc slice state\n");
3795 goto abort_with_firmware;
3230 } 3796 }
3231 3797
3232 status = myri10ge_reset(mgp); 3798 status = myri10ge_reset(mgp);
3233 if (status != 0) { 3799 if (status != 0) {
3234 dev_err(&pdev->dev, "failed reset\n"); 3800 dev_err(&pdev->dev, "failed reset\n");
3235 goto abort_with_firmware; 3801 goto abort_with_slices;
3236 } 3802 }
3237 3803#ifdef CONFIG_DCA
3804 myri10ge_setup_dca(mgp);
3805#endif
3238 pci_set_drvdata(pdev, mgp); 3806 pci_set_drvdata(pdev, mgp);
3239 if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU) 3807 if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU)
3240 myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; 3808 myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
@@ -3277,24 +3845,27 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3277 dev_err(&pdev->dev, "register_netdev failed: %d\n", status); 3845 dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
3278 goto abort_with_state; 3846 goto abort_with_state;
3279 } 3847 }
3280 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", 3848 if (mgp->msix_enabled)
3281 (mgp->msi_enabled ? "MSI" : "xPIC"), 3849 dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n",
3282 netdev->irq, mgp->tx_boundary, mgp->fw_name, 3850 mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
3283 (mgp->wc_enabled ? "Enabled" : "Disabled")); 3851 (mgp->wc_enabled ? "Enabled" : "Disabled"));
3852 else
3853 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
3854 mgp->msi_enabled ? "MSI" : "xPIC",
3855 netdev->irq, mgp->tx_boundary, mgp->fw_name,
3856 (mgp->wc_enabled ? "Enabled" : "Disabled"));
3284 3857
3285 return 0; 3858 return 0;
3286 3859
3287abort_with_state: 3860abort_with_state:
3288 pci_restore_state(pdev); 3861 pci_restore_state(pdev);
3289 3862
3863abort_with_slices:
3864 myri10ge_free_slices(mgp);
3865
3290abort_with_firmware: 3866abort_with_firmware:
3291 myri10ge_dummy_rdma(mgp, 0); 3867 myri10ge_dummy_rdma(mgp, 0);
3292 3868
3293abort_with_rx_done:
3294 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
3295 dma_free_coherent(&pdev->dev, bytes,
3296 mgp->ss.rx_done.entry, mgp->ss.rx_done.bus);
3297
3298abort_with_ioremap: 3869abort_with_ioremap:
3299 iounmap(mgp->sram); 3870 iounmap(mgp->sram);
3300 3871
@@ -3303,10 +3874,6 @@ abort_with_wc:
3303 if (mgp->mtrr >= 0) 3874 if (mgp->mtrr >= 0)
3304 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 3875 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
3305#endif 3876#endif
3306 dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats),
3307 mgp->ss.fw_stats, mgp->ss.fw_stats_bus);
3308
3309abort_with_cmd:
3310 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3877 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
3311 mgp->cmd, mgp->cmd_bus); 3878 mgp->cmd, mgp->cmd_bus);
3312 3879
@@ -3327,7 +3894,6 @@ static void myri10ge_remove(struct pci_dev *pdev)
3327{ 3894{
3328 struct myri10ge_priv *mgp; 3895 struct myri10ge_priv *mgp;
3329 struct net_device *netdev; 3896 struct net_device *netdev;
3330 size_t bytes;
3331 3897
3332 mgp = pci_get_drvdata(pdev); 3898 mgp = pci_get_drvdata(pdev);
3333 if (mgp == NULL) 3899 if (mgp == NULL)
@@ -3337,24 +3903,23 @@ static void myri10ge_remove(struct pci_dev *pdev)
3337 netdev = mgp->dev; 3903 netdev = mgp->dev;
3338 unregister_netdev(netdev); 3904 unregister_netdev(netdev);
3339 3905
3906#ifdef CONFIG_DCA
3907 myri10ge_teardown_dca(mgp);
3908#endif
3340 myri10ge_dummy_rdma(mgp, 0); 3909 myri10ge_dummy_rdma(mgp, 0);
3341 3910
3342 /* avoid a memory leak */ 3911 /* avoid a memory leak */
3343 pci_restore_state(pdev); 3912 pci_restore_state(pdev);
3344 3913
3345 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
3346 dma_free_coherent(&pdev->dev, bytes,
3347 mgp->ss.rx_done.entry, mgp->ss.rx_done.bus);
3348
3349 iounmap(mgp->sram); 3914 iounmap(mgp->sram);
3350 3915
3351#ifdef CONFIG_MTRR 3916#ifdef CONFIG_MTRR
3352 if (mgp->mtrr >= 0) 3917 if (mgp->mtrr >= 0)
3353 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 3918 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
3354#endif 3919#endif
3355 dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), 3920 myri10ge_free_slices(mgp);
3356 mgp->ss.fw_stats, mgp->ss.fw_stats_bus); 3921 if (mgp->msix_vectors != NULL)
3357 3922 kfree(mgp->msix_vectors);
3358 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3923 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
3359 mgp->cmd, mgp->cmd_bus); 3924 mgp->cmd, mgp->cmd_bus);
3360 3925
@@ -3383,10 +3948,42 @@ static struct pci_driver myri10ge_driver = {
3383#endif 3948#endif
3384}; 3949};
3385 3950
3951#ifdef CONFIG_DCA
3952static int
3953myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
3954{
3955 int err = driver_for_each_device(&myri10ge_driver.driver,
3956 NULL, &event,
3957 myri10ge_notify_dca_device);
3958
3959 if (err)
3960 return NOTIFY_BAD;
3961 return NOTIFY_DONE;
3962}
3963
3964static struct notifier_block myri10ge_dca_notifier = {
3965 .notifier_call = myri10ge_notify_dca,
3966 .next = NULL,
3967 .priority = 0,
3968};
3969#endif /* CONFIG_DCA */
3970
3386static __init int myri10ge_init_module(void) 3971static __init int myri10ge_init_module(void)
3387{ 3972{
3388 printk(KERN_INFO "%s: Version %s\n", myri10ge_driver.name, 3973 printk(KERN_INFO "%s: Version %s\n", myri10ge_driver.name,
3389 MYRI10GE_VERSION_STR); 3974 MYRI10GE_VERSION_STR);
3975
3976 if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_SRC_PORT ||
3977 myri10ge_rss_hash < MXGEFW_RSS_HASH_TYPE_IPV4) {
3978 printk(KERN_ERR
3979 "%s: Illegal rssh hash type %d, defaulting to source port\n",
3980 myri10ge_driver.name, myri10ge_rss_hash);
3981 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
3982 }
3983#ifdef CONFIG_DCA
3984 dca_register_notify(&myri10ge_dca_notifier);
3985#endif
3986
3390 return pci_register_driver(&myri10ge_driver); 3987 return pci_register_driver(&myri10ge_driver);
3391} 3988}
3392 3989
@@ -3394,6 +3991,9 @@ module_init(myri10ge_init_module);
3394 3991
3395static __exit void myri10ge_cleanup_module(void) 3992static __exit void myri10ge_cleanup_module(void)
3396{ 3993{
3994#ifdef CONFIG_DCA
3995 dca_unregister_notify(&myri10ge_dca_notifier);
3996#endif
3397 pci_unregister_driver(&myri10ge_driver); 3997 pci_unregister_driver(&myri10ge_driver);
3398} 3998}
3399 3999
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 46119bb3770a..b238ed0e8ace 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -664,7 +664,7 @@ static ssize_t natsemi_show_##_name(struct device *dev, \
664NATSEMI_ATTR(dspcfg_workaround); 664NATSEMI_ATTR(dspcfg_workaround);
665 665
666static ssize_t natsemi_show_dspcfg_workaround(struct device *dev, 666static ssize_t natsemi_show_dspcfg_workaround(struct device *dev,
667 struct device_attribute *attr, 667 struct device_attribute *attr,
668 char *buf) 668 char *buf)
669{ 669{
670 struct netdev_private *np = netdev_priv(to_net_dev(dev)); 670 struct netdev_private *np = netdev_priv(to_net_dev(dev));
@@ -687,7 +687,7 @@ static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
687 || !strncmp("0", buf, count - 1)) 687 || !strncmp("0", buf, count - 1))
688 new_setting = 0; 688 new_setting = 0;
689 else 689 else
690 return count; 690 return count;
691 691
692 spin_lock_irqsave(&np->lock, flags); 692 spin_lock_irqsave(&np->lock, flags);
693 693
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 12fd570b9423..c6fa883daa22 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -281,7 +281,7 @@
281#define XMAC_ADDR1 0x000a8UL 281#define XMAC_ADDR1 0x000a8UL
282#define XMAC_ADDR1_ADDR1 0x000000000000ffffULL 282#define XMAC_ADDR1_ADDR1 0x000000000000ffffULL
283 283
284#define XMAC_ADDR2 0x000b0UL 284#define XMAC_ADDR2 0x000b0UL
285#define XMAC_ADDR2_ADDR2 0x000000000000ffffULL 285#define XMAC_ADDR2_ADDR2 0x000000000000ffffULL
286 286
287#define XMAC_ADDR_CMPEN 0x00208UL 287#define XMAC_ADDR_CMPEN 0x00208UL
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index b42c05f84be1..ff449619f047 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -585,16 +585,13 @@ static inline int rx_refill(struct net_device *ndev, gfp_t gfp)
585 for (i=0; i<NR_RX_DESC; i++) { 585 for (i=0; i<NR_RX_DESC; i++) {
586 struct sk_buff *skb; 586 struct sk_buff *skb;
587 long res; 587 long res;
588
588 /* extra 16 bytes for alignment */ 589 /* extra 16 bytes for alignment */
589 skb = __dev_alloc_skb(REAL_RX_BUF_SIZE+16, gfp); 590 skb = __netdev_alloc_skb(ndev, REAL_RX_BUF_SIZE+16, gfp);
590 if (unlikely(!skb)) 591 if (unlikely(!skb))
591 break; 592 break;
592 593
593 res = (long)skb->data & 0xf; 594 skb_reserve(skb, skb->data - PTR_ALIGN(skb->data, 16));
594 res = 0x10 - res;
595 res &= 0xf;
596 skb_reserve(skb, res);
597
598 if (gfp != GFP_ATOMIC) 595 if (gfp != GFP_ATOMIC)
599 spin_lock_irqsave(&dev->rx_info.lock, flags); 596 spin_lock_irqsave(&dev->rx_info.lock, flags);
600 res = ns83820_add_rx_skb(dev, skb); 597 res = ns83820_add_rx_skb(dev, skb);
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 3b78a3819bb3..7112fd5e0e1b 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -208,7 +208,6 @@ enum Window4 { /* Window 4: Xcvr/media bits. */
208struct el3_private { 208struct el3_private {
209 struct pcmcia_device *p_dev; 209 struct pcmcia_device *p_dev;
210 dev_node_t node; 210 dev_node_t node;
211 struct net_device_stats stats;
212 u16 advertising, partner; /* NWay media advertisement */ 211 u16 advertising, partner; /* NWay media advertisement */
213 unsigned char phys; /* MII device address */ 212 unsigned char phys; /* MII device address */
214 unsigned int autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */ 213 unsigned int autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */
@@ -741,12 +740,11 @@ static int el3_open(struct net_device *dev)
741 740
742static void el3_tx_timeout(struct net_device *dev) 741static void el3_tx_timeout(struct net_device *dev)
743{ 742{
744 struct el3_private *lp = netdev_priv(dev);
745 unsigned int ioaddr = dev->base_addr; 743 unsigned int ioaddr = dev->base_addr;
746 744
747 printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name); 745 printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name);
748 dump_status(dev); 746 dump_status(dev);
749 lp->stats.tx_errors++; 747 dev->stats.tx_errors++;
750 dev->trans_start = jiffies; 748 dev->trans_start = jiffies;
751 /* Issue TX_RESET and TX_START commands. */ 749 /* Issue TX_RESET and TX_START commands. */
752 tc574_wait_for_completion(dev, TxReset); 750 tc574_wait_for_completion(dev, TxReset);
@@ -756,7 +754,6 @@ static void el3_tx_timeout(struct net_device *dev)
756 754
757static void pop_tx_status(struct net_device *dev) 755static void pop_tx_status(struct net_device *dev)
758{ 756{
759 struct el3_private *lp = netdev_priv(dev);
760 unsigned int ioaddr = dev->base_addr; 757 unsigned int ioaddr = dev->base_addr;
761 int i; 758 int i;
762 759
@@ -772,7 +769,7 @@ static void pop_tx_status(struct net_device *dev)
772 DEBUG(1, "%s: transmit error: status 0x%02x\n", 769 DEBUG(1, "%s: transmit error: status 0x%02x\n",
773 dev->name, tx_status); 770 dev->name, tx_status);
774 outw(TxEnable, ioaddr + EL3_CMD); 771 outw(TxEnable, ioaddr + EL3_CMD);
775 lp->stats.tx_aborted_errors++; 772 dev->stats.tx_aborted_errors++;
776 } 773 }
777 outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */ 774 outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
778 } 775 }
@@ -987,7 +984,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
987 update_stats(dev); 984 update_stats(dev);
988 spin_unlock_irqrestore(&lp->window_lock, flags); 985 spin_unlock_irqrestore(&lp->window_lock, flags);
989 } 986 }
990 return &lp->stats; 987 return &dev->stats;
991} 988}
992 989
993/* Update statistics. 990/* Update statistics.
@@ -996,7 +993,6 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
996 */ 993 */
997static void update_stats(struct net_device *dev) 994static void update_stats(struct net_device *dev)
998{ 995{
999 struct el3_private *lp = netdev_priv(dev);
1000 unsigned int ioaddr = dev->base_addr; 996 unsigned int ioaddr = dev->base_addr;
1001 u8 rx, tx, up; 997 u8 rx, tx, up;
1002 998
@@ -1008,15 +1004,15 @@ static void update_stats(struct net_device *dev)
1008 /* Unlike the 3c509 we need not turn off stats updates while reading. */ 1004 /* Unlike the 3c509 we need not turn off stats updates while reading. */
1009 /* Switch to the stats window, and read everything. */ 1005 /* Switch to the stats window, and read everything. */
1010 EL3WINDOW(6); 1006 EL3WINDOW(6);
1011 lp->stats.tx_carrier_errors += inb(ioaddr + 0); 1007 dev->stats.tx_carrier_errors += inb(ioaddr + 0);
1012 lp->stats.tx_heartbeat_errors += inb(ioaddr + 1); 1008 dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
1013 /* Multiple collisions. */ inb(ioaddr + 2); 1009 /* Multiple collisions. */ inb(ioaddr + 2);
1014 lp->stats.collisions += inb(ioaddr + 3); 1010 dev->stats.collisions += inb(ioaddr + 3);
1015 lp->stats.tx_window_errors += inb(ioaddr + 4); 1011 dev->stats.tx_window_errors += inb(ioaddr + 4);
1016 lp->stats.rx_fifo_errors += inb(ioaddr + 5); 1012 dev->stats.rx_fifo_errors += inb(ioaddr + 5);
1017 lp->stats.tx_packets += inb(ioaddr + 6); 1013 dev->stats.tx_packets += inb(ioaddr + 6);
1018 up = inb(ioaddr + 9); 1014 up = inb(ioaddr + 9);
1019 lp->stats.tx_packets += (up&0x30) << 4; 1015 dev->stats.tx_packets += (up&0x30) << 4;
1020 /* Rx packets */ inb(ioaddr + 7); 1016 /* Rx packets */ inb(ioaddr + 7);
1021 /* Tx deferrals */ inb(ioaddr + 8); 1017 /* Tx deferrals */ inb(ioaddr + 8);
1022 rx = inw(ioaddr + 10); 1018 rx = inw(ioaddr + 10);
@@ -1026,14 +1022,13 @@ static void update_stats(struct net_device *dev)
1026 /* BadSSD */ inb(ioaddr + 12); 1022 /* BadSSD */ inb(ioaddr + 12);
1027 up = inb(ioaddr + 13); 1023 up = inb(ioaddr + 13);
1028 1024
1029 lp->stats.tx_bytes += tx + ((up & 0xf0) << 12); 1025 dev->stats.tx_bytes += tx + ((up & 0xf0) << 12);
1030 1026
1031 EL3WINDOW(1); 1027 EL3WINDOW(1);
1032} 1028}
1033 1029
1034static int el3_rx(struct net_device *dev, int worklimit) 1030static int el3_rx(struct net_device *dev, int worklimit)
1035{ 1031{
1036 struct el3_private *lp = netdev_priv(dev);
1037 unsigned int ioaddr = dev->base_addr; 1032 unsigned int ioaddr = dev->base_addr;
1038 short rx_status; 1033 short rx_status;
1039 1034
@@ -1043,14 +1038,14 @@ static int el3_rx(struct net_device *dev, int worklimit)
1043 (--worklimit >= 0)) { 1038 (--worklimit >= 0)) {
1044 if (rx_status & 0x4000) { /* Error, update stats. */ 1039 if (rx_status & 0x4000) { /* Error, update stats. */
1045 short error = rx_status & 0x3800; 1040 short error = rx_status & 0x3800;
1046 lp->stats.rx_errors++; 1041 dev->stats.rx_errors++;
1047 switch (error) { 1042 switch (error) {
1048 case 0x0000: lp->stats.rx_over_errors++; break; 1043 case 0x0000: dev->stats.rx_over_errors++; break;
1049 case 0x0800: lp->stats.rx_length_errors++; break; 1044 case 0x0800: dev->stats.rx_length_errors++; break;
1050 case 0x1000: lp->stats.rx_frame_errors++; break; 1045 case 0x1000: dev->stats.rx_frame_errors++; break;
1051 case 0x1800: lp->stats.rx_length_errors++; break; 1046 case 0x1800: dev->stats.rx_length_errors++; break;
1052 case 0x2000: lp->stats.rx_frame_errors++; break; 1047 case 0x2000: dev->stats.rx_frame_errors++; break;
1053 case 0x2800: lp->stats.rx_crc_errors++; break; 1048 case 0x2800: dev->stats.rx_crc_errors++; break;
1054 } 1049 }
1055 } else { 1050 } else {
1056 short pkt_len = rx_status & 0x7ff; 1051 short pkt_len = rx_status & 0x7ff;
@@ -1067,12 +1062,12 @@ static int el3_rx(struct net_device *dev, int worklimit)
1067 skb->protocol = eth_type_trans(skb, dev); 1062 skb->protocol = eth_type_trans(skb, dev);
1068 netif_rx(skb); 1063 netif_rx(skb);
1069 dev->last_rx = jiffies; 1064 dev->last_rx = jiffies;
1070 lp->stats.rx_packets++; 1065 dev->stats.rx_packets++;
1071 lp->stats.rx_bytes += pkt_len; 1066 dev->stats.rx_bytes += pkt_len;
1072 } else { 1067 } else {
1073 DEBUG(1, "%s: couldn't allocate a sk_buff of" 1068 DEBUG(1, "%s: couldn't allocate a sk_buff of"
1074 " size %d.\n", dev->name, pkt_len); 1069 " size %d.\n", dev->name, pkt_len);
1075 lp->stats.rx_dropped++; 1070 dev->stats.rx_dropped++;
1076 } 1071 }
1077 } 1072 }
1078 tc574_wait_for_completion(dev, RxDiscard); 1073 tc574_wait_for_completion(dev, RxDiscard);
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 1b1abb19c911..549a64558420 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -107,7 +107,6 @@ enum RxFilter {
107struct el3_private { 107struct el3_private {
108 struct pcmcia_device *p_dev; 108 struct pcmcia_device *p_dev;
109 dev_node_t node; 109 dev_node_t node;
110 struct net_device_stats stats;
111 /* For transceiver monitoring */ 110 /* For transceiver monitoring */
112 struct timer_list media; 111 struct timer_list media;
113 u16 media_status; 112 u16 media_status;
@@ -566,12 +565,11 @@ static int el3_open(struct net_device *dev)
566 565
567static void el3_tx_timeout(struct net_device *dev) 566static void el3_tx_timeout(struct net_device *dev)
568{ 567{
569 struct el3_private *lp = netdev_priv(dev);
570 unsigned int ioaddr = dev->base_addr; 568 unsigned int ioaddr = dev->base_addr;
571 569
572 printk(KERN_WARNING "%s: Transmit timed out!\n", dev->name); 570 printk(KERN_WARNING "%s: Transmit timed out!\n", dev->name);
573 dump_status(dev); 571 dump_status(dev);
574 lp->stats.tx_errors++; 572 dev->stats.tx_errors++;
575 dev->trans_start = jiffies; 573 dev->trans_start = jiffies;
576 /* Issue TX_RESET and TX_START commands. */ 574 /* Issue TX_RESET and TX_START commands. */
577 tc589_wait_for_completion(dev, TxReset); 575 tc589_wait_for_completion(dev, TxReset);
@@ -581,7 +579,6 @@ static void el3_tx_timeout(struct net_device *dev)
581 579
582static void pop_tx_status(struct net_device *dev) 580static void pop_tx_status(struct net_device *dev)
583{ 581{
584 struct el3_private *lp = netdev_priv(dev);
585 unsigned int ioaddr = dev->base_addr; 582 unsigned int ioaddr = dev->base_addr;
586 int i; 583 int i;
587 584
@@ -596,7 +593,7 @@ static void pop_tx_status(struct net_device *dev)
596 DEBUG(1, "%s: transmit error: status 0x%02x\n", 593 DEBUG(1, "%s: transmit error: status 0x%02x\n",
597 dev->name, tx_status); 594 dev->name, tx_status);
598 outw(TxEnable, ioaddr + EL3_CMD); 595 outw(TxEnable, ioaddr + EL3_CMD);
599 lp->stats.tx_aborted_errors++; 596 dev->stats.tx_aborted_errors++;
600 } 597 }
601 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ 598 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
602 } 599 }
@@ -614,7 +611,7 @@ static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
614 611
615 spin_lock_irqsave(&priv->lock, flags); 612 spin_lock_irqsave(&priv->lock, flags);
616 613
617 priv->stats.tx_bytes += skb->len; 614 dev->stats.tx_bytes += skb->len;
618 615
619 /* Put out the doubleword header... */ 616 /* Put out the doubleword header... */
620 outw(skb->len, ioaddr + TX_FIFO); 617 outw(skb->len, ioaddr + TX_FIFO);
@@ -764,7 +761,7 @@ static void media_check(unsigned long arg)
764 outw(StatsDisable, ioaddr + EL3_CMD); 761 outw(StatsDisable, ioaddr + EL3_CMD);
765 errs = inb(ioaddr + 0); 762 errs = inb(ioaddr + 0);
766 outw(StatsEnable, ioaddr + EL3_CMD); 763 outw(StatsEnable, ioaddr + EL3_CMD);
767 lp->stats.tx_carrier_errors += errs; 764 dev->stats.tx_carrier_errors += errs;
768 if (errs || (lp->media_status & 0x0010)) media |= 0x0010; 765 if (errs || (lp->media_status & 0x0010)) media |= 0x0010;
769 } 766 }
770 767
@@ -814,7 +811,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
814 update_stats(dev); 811 update_stats(dev);
815 spin_unlock_irqrestore(&lp->lock, flags); 812 spin_unlock_irqrestore(&lp->lock, flags);
816 } 813 }
817 return &lp->stats; 814 return &dev->stats;
818} 815}
819 816
820/* 817/*
@@ -827,7 +824,6 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
827*/ 824*/
828static void update_stats(struct net_device *dev) 825static void update_stats(struct net_device *dev)
829{ 826{
830 struct el3_private *lp = netdev_priv(dev);
831 unsigned int ioaddr = dev->base_addr; 827 unsigned int ioaddr = dev->base_addr;
832 828
833 DEBUG(2, "%s: updating the statistics.\n", dev->name); 829 DEBUG(2, "%s: updating the statistics.\n", dev->name);
@@ -835,13 +831,13 @@ static void update_stats(struct net_device *dev)
835 outw(StatsDisable, ioaddr + EL3_CMD); 831 outw(StatsDisable, ioaddr + EL3_CMD);
836 /* Switch to the stats window, and read everything. */ 832 /* Switch to the stats window, and read everything. */
837 EL3WINDOW(6); 833 EL3WINDOW(6);
838 lp->stats.tx_carrier_errors += inb(ioaddr + 0); 834 dev->stats.tx_carrier_errors += inb(ioaddr + 0);
839 lp->stats.tx_heartbeat_errors += inb(ioaddr + 1); 835 dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
840 /* Multiple collisions. */ inb(ioaddr + 2); 836 /* Multiple collisions. */ inb(ioaddr + 2);
841 lp->stats.collisions += inb(ioaddr + 3); 837 dev->stats.collisions += inb(ioaddr + 3);
842 lp->stats.tx_window_errors += inb(ioaddr + 4); 838 dev->stats.tx_window_errors += inb(ioaddr + 4);
843 lp->stats.rx_fifo_errors += inb(ioaddr + 5); 839 dev->stats.rx_fifo_errors += inb(ioaddr + 5);
844 lp->stats.tx_packets += inb(ioaddr + 6); 840 dev->stats.tx_packets += inb(ioaddr + 6);
845 /* Rx packets */ inb(ioaddr + 7); 841 /* Rx packets */ inb(ioaddr + 7);
846 /* Tx deferrals */ inb(ioaddr + 8); 842 /* Tx deferrals */ inb(ioaddr + 8);
847 /* Rx octets */ inw(ioaddr + 10); 843 /* Rx octets */ inw(ioaddr + 10);
@@ -854,7 +850,6 @@ static void update_stats(struct net_device *dev)
854 850
855static int el3_rx(struct net_device *dev) 851static int el3_rx(struct net_device *dev)
856{ 852{
857 struct el3_private *lp = netdev_priv(dev);
858 unsigned int ioaddr = dev->base_addr; 853 unsigned int ioaddr = dev->base_addr;
859 int worklimit = 32; 854 int worklimit = 32;
860 short rx_status; 855 short rx_status;
@@ -865,14 +860,14 @@ static int el3_rx(struct net_device *dev)
865 (--worklimit >= 0)) { 860 (--worklimit >= 0)) {
866 if (rx_status & 0x4000) { /* Error, update stats. */ 861 if (rx_status & 0x4000) { /* Error, update stats. */
867 short error = rx_status & 0x3800; 862 short error = rx_status & 0x3800;
868 lp->stats.rx_errors++; 863 dev->stats.rx_errors++;
869 switch (error) { 864 switch (error) {
870 case 0x0000: lp->stats.rx_over_errors++; break; 865 case 0x0000: dev->stats.rx_over_errors++; break;
871 case 0x0800: lp->stats.rx_length_errors++; break; 866 case 0x0800: dev->stats.rx_length_errors++; break;
872 case 0x1000: lp->stats.rx_frame_errors++; break; 867 case 0x1000: dev->stats.rx_frame_errors++; break;
873 case 0x1800: lp->stats.rx_length_errors++; break; 868 case 0x1800: dev->stats.rx_length_errors++; break;
874 case 0x2000: lp->stats.rx_frame_errors++; break; 869 case 0x2000: dev->stats.rx_frame_errors++; break;
875 case 0x2800: lp->stats.rx_crc_errors++; break; 870 case 0x2800: dev->stats.rx_crc_errors++; break;
876 } 871 }
877 } else { 872 } else {
878 short pkt_len = rx_status & 0x7ff; 873 short pkt_len = rx_status & 0x7ff;
@@ -889,12 +884,12 @@ static int el3_rx(struct net_device *dev)
889 skb->protocol = eth_type_trans(skb, dev); 884 skb->protocol = eth_type_trans(skb, dev);
890 netif_rx(skb); 885 netif_rx(skb);
891 dev->last_rx = jiffies; 886 dev->last_rx = jiffies;
892 lp->stats.rx_packets++; 887 dev->stats.rx_packets++;
893 lp->stats.rx_bytes += pkt_len; 888 dev->stats.rx_bytes += pkt_len;
894 } else { 889 } else {
895 DEBUG(1, "%s: couldn't allocate a sk_buff of" 890 DEBUG(1, "%s: couldn't allocate a sk_buff of"
896 " size %d.\n", dev->name, pkt_len); 891 " size %d.\n", dev->name, pkt_len);
897 lp->stats.rx_dropped++; 892 dev->stats.rx_dropped++;
898 } 893 }
899 } 894 }
900 /* Pop the top of the Rx FIFO */ 895 /* Pop the top of the Rx FIFO */
@@ -929,7 +924,7 @@ static int el3_close(struct net_device *dev)
929 DEBUG(1, "%s: shutting down ethercard.\n", dev->name); 924 DEBUG(1, "%s: shutting down ethercard.\n", dev->name);
930 925
931 if (pcmcia_dev_present(link)) { 926 if (pcmcia_dev_present(link)) {
932 /* Turn off statistics ASAP. We update lp->stats below. */ 927 /* Turn off statistics ASAP. We update dev->stats below. */
933 outw(StatsDisable, ioaddr + EL3_CMD); 928 outw(StatsDisable, ioaddr + EL3_CMD);
934 929
935 /* Disable the receiver and transmitter. */ 930 /* Disable the receiver and transmitter. */
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index ce95c5d168fe..d7018ff9e171 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1021,7 +1021,7 @@ static void ei_tx_timeout(struct net_device *dev)
1021 int txsr, isr, tickssofar = jiffies - dev->trans_start; 1021 int txsr, isr, tickssofar = jiffies - dev->trans_start;
1022 unsigned long flags; 1022 unsigned long flags;
1023 1023
1024 ei_local->stat.tx_errors++; 1024 dev->stats.tx_errors++;
1025 1025
1026 spin_lock_irqsave(&ei_local->page_lock, flags); 1026 spin_lock_irqsave(&ei_local->page_lock, flags);
1027 txsr = inb(e8390_base+EN0_TSR); 1027 txsr = inb(e8390_base+EN0_TSR);
@@ -1032,7 +1032,7 @@ static void ei_tx_timeout(struct net_device *dev)
1032 dev->name, (txsr & ENTSR_ABT) ? "excess collisions." : 1032 dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
1033 (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar); 1033 (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
1034 1034
1035 if (!isr && !ei_local->stat.tx_packets) 1035 if (!isr && !dev->stats.tx_packets)
1036 { 1036 {
1037 /* The 8390 probably hasn't gotten on the cable yet. */ 1037 /* The 8390 probably hasn't gotten on the cable yet. */
1038 ei_local->interface_num ^= 1; /* Try a different xcvr. */ 1038 ei_local->interface_num ^= 1; /* Try a different xcvr. */
@@ -1122,7 +1122,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
1122 netif_stop_queue(dev); 1122 netif_stop_queue(dev);
1123 outb_p(ENISR_ALL, e8390_base + EN0_IMR); 1123 outb_p(ENISR_ALL, e8390_base + EN0_IMR);
1124 spin_unlock_irqrestore(&ei_local->page_lock, flags); 1124 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1125 ei_local->stat.tx_errors++; 1125 dev->stats.tx_errors++;
1126 return 1; 1126 return 1;
1127 } 1127 }
1128 1128
@@ -1170,7 +1170,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
1170 spin_unlock_irqrestore(&ei_local->page_lock, flags); 1170 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1171 1171
1172 dev_kfree_skb (skb); 1172 dev_kfree_skb (skb);
1173 ei_local->stat.tx_bytes += send_length; 1173 dev->stats.tx_bytes += send_length;
1174 1174
1175 return 0; 1175 return 0;
1176} 1176}
@@ -1262,9 +1262,9 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
1262 1262
1263 if (interrupts & ENISR_COUNTERS) 1263 if (interrupts & ENISR_COUNTERS)
1264 { 1264 {
1265 ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0); 1265 dev->stats.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0);
1266 ei_local->stat.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1); 1266 dev->stats.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1);
1267 ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2); 1267 dev->stats.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2);
1268 } 1268 }
1269 } 1269 }
1270 1270
@@ -1309,7 +1309,6 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
1309static void ei_tx_err(struct net_device *dev) 1309static void ei_tx_err(struct net_device *dev)
1310{ 1310{
1311 long e8390_base = dev->base_addr; 1311 long e8390_base = dev->base_addr;
1312 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
1313 unsigned char txsr = inb_p(e8390_base+EN0_TSR); 1312 unsigned char txsr = inb_p(e8390_base+EN0_TSR);
1314 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); 1313 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
1315 1314
@@ -1332,10 +1331,10 @@ static void ei_tx_err(struct net_device *dev)
1332 ei_tx_intr(dev); 1331 ei_tx_intr(dev);
1333 else 1332 else
1334 { 1333 {
1335 ei_local->stat.tx_errors++; 1334 dev->stats.tx_errors++;
1336 if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++; 1335 if (txsr & ENTSR_CRS) dev->stats.tx_carrier_errors++;
1337 if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++; 1336 if (txsr & ENTSR_CDH) dev->stats.tx_heartbeat_errors++;
1338 if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++; 1337 if (txsr & ENTSR_OWC) dev->stats.tx_window_errors++;
1339 } 1338 }
1340} 1339}
1341 1340
@@ -1397,25 +1396,25 @@ static void ei_tx_intr(struct net_device *dev)
1397 1396
1398 /* Minimize Tx latency: update the statistics after we restart TXing. */ 1397 /* Minimize Tx latency: update the statistics after we restart TXing. */
1399 if (status & ENTSR_COL) 1398 if (status & ENTSR_COL)
1400 ei_local->stat.collisions++; 1399 dev->stats.collisions++;
1401 if (status & ENTSR_PTX) 1400 if (status & ENTSR_PTX)
1402 ei_local->stat.tx_packets++; 1401 dev->stats.tx_packets++;
1403 else 1402 else
1404 { 1403 {
1405 ei_local->stat.tx_errors++; 1404 dev->stats.tx_errors++;
1406 if (status & ENTSR_ABT) 1405 if (status & ENTSR_ABT)
1407 { 1406 {
1408 ei_local->stat.tx_aborted_errors++; 1407 dev->stats.tx_aborted_errors++;
1409 ei_local->stat.collisions += 16; 1408 dev->stats.collisions += 16;
1410 } 1409 }
1411 if (status & ENTSR_CRS) 1410 if (status & ENTSR_CRS)
1412 ei_local->stat.tx_carrier_errors++; 1411 dev->stats.tx_carrier_errors++;
1413 if (status & ENTSR_FU) 1412 if (status & ENTSR_FU)
1414 ei_local->stat.tx_fifo_errors++; 1413 dev->stats.tx_fifo_errors++;
1415 if (status & ENTSR_CDH) 1414 if (status & ENTSR_CDH)
1416 ei_local->stat.tx_heartbeat_errors++; 1415 dev->stats.tx_heartbeat_errors++;
1417 if (status & ENTSR_OWC) 1416 if (status & ENTSR_OWC)
1418 ei_local->stat.tx_window_errors++; 1417 dev->stats.tx_window_errors++;
1419 } 1418 }
1420 netif_wake_queue(dev); 1419 netif_wake_queue(dev);
1421} 1420}
@@ -1476,8 +1475,8 @@ static void ei_receive(struct net_device *dev)
1476 printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n", 1475 printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
1477 dev->name, rx_frame.count, rx_frame.status, 1476 dev->name, rx_frame.count, rx_frame.status,
1478 rx_frame.next); 1477 rx_frame.next);
1479 ei_local->stat.rx_errors++; 1478 dev->stats.rx_errors++;
1480 ei_local->stat.rx_length_errors++; 1479 dev->stats.rx_length_errors++;
1481 } 1480 }
1482 else if ((pkt_stat & 0x0F) == ENRSR_RXOK) 1481 else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
1483 { 1482 {
@@ -1489,7 +1488,7 @@ static void ei_receive(struct net_device *dev)
1489 if (ei_debug > 1) 1488 if (ei_debug > 1)
1490 printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n", 1489 printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
1491 dev->name, pkt_len); 1490 dev->name, pkt_len);
1492 ei_local->stat.rx_dropped++; 1491 dev->stats.rx_dropped++;
1493 break; 1492 break;
1494 } 1493 }
1495 else 1494 else
@@ -1500,10 +1499,10 @@ static void ei_receive(struct net_device *dev)
1500 skb->protocol=eth_type_trans(skb,dev); 1499 skb->protocol=eth_type_trans(skb,dev);
1501 netif_rx(skb); 1500 netif_rx(skb);
1502 dev->last_rx = jiffies; 1501 dev->last_rx = jiffies;
1503 ei_local->stat.rx_packets++; 1502 dev->stats.rx_packets++;
1504 ei_local->stat.rx_bytes += pkt_len; 1503 dev->stats.rx_bytes += pkt_len;
1505 if (pkt_stat & ENRSR_PHY) 1504 if (pkt_stat & ENRSR_PHY)
1506 ei_local->stat.multicast++; 1505 dev->stats.multicast++;
1507 } 1506 }
1508 } 1507 }
1509 else 1508 else
@@ -1512,10 +1511,10 @@ static void ei_receive(struct net_device *dev)
1512 printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n", 1511 printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
1513 dev->name, rx_frame.status, rx_frame.next, 1512 dev->name, rx_frame.status, rx_frame.next,
1514 rx_frame.count); 1513 rx_frame.count);
1515 ei_local->stat.rx_errors++; 1514 dev->stats.rx_errors++;
1516 /* NB: The NIC counts CRC, frame and missed errors. */ 1515 /* NB: The NIC counts CRC, frame and missed errors. */
1517 if (pkt_stat & ENRSR_FO) 1516 if (pkt_stat & ENRSR_FO)
1518 ei_local->stat.rx_fifo_errors++; 1517 dev->stats.rx_fifo_errors++;
1519 } 1518 }
1520 next_frame = rx_frame.next; 1519 next_frame = rx_frame.next;
1521 1520
@@ -1550,7 +1549,6 @@ static void ei_rx_overrun(struct net_device *dev)
1550 axnet_dev_t *info = PRIV(dev); 1549 axnet_dev_t *info = PRIV(dev);
1551 long e8390_base = dev->base_addr; 1550 long e8390_base = dev->base_addr;
1552 unsigned char was_txing, must_resend = 0; 1551 unsigned char was_txing, must_resend = 0;
1553 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
1554 1552
1555 /* 1553 /*
1556 * Record whether a Tx was in progress and then issue the 1554 * Record whether a Tx was in progress and then issue the
@@ -1561,7 +1559,7 @@ static void ei_rx_overrun(struct net_device *dev)
1561 1559
1562 if (ei_debug > 1) 1560 if (ei_debug > 1)
1563 printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name); 1561 printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
1564 ei_local->stat.rx_over_errors++; 1562 dev->stats.rx_over_errors++;
1565 1563
1566 /* 1564 /*
1567 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. 1565 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
@@ -1622,16 +1620,16 @@ static struct net_device_stats *get_stats(struct net_device *dev)
1622 1620
1623 /* If the card is stopped, just return the present stats. */ 1621 /* If the card is stopped, just return the present stats. */
1624 if (!netif_running(dev)) 1622 if (!netif_running(dev))
1625 return &ei_local->stat; 1623 return &dev->stats;
1626 1624
1627 spin_lock_irqsave(&ei_local->page_lock,flags); 1625 spin_lock_irqsave(&ei_local->page_lock,flags);
1628 /* Read the counter registers, assuming we are in page 0. */ 1626 /* Read the counter registers, assuming we are in page 0. */
1629 ei_local->stat.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0); 1627 dev->stats.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0);
1630 ei_local->stat.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1); 1628 dev->stats.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1);
1631 ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2); 1629 dev->stats.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2);
1632 spin_unlock_irqrestore(&ei_local->page_lock, flags); 1630 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1633 1631
1634 return &ei_local->stat; 1632 return &dev->stats;
1635} 1633}
1636 1634
1637/* 1635/*
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 1c89b97f4e09..ca8c0e037400 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1973,7 +1973,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1973 err_free_ring: 1973 err_free_ring:
1974 pcnet32_free_ring(dev); 1974 pcnet32_free_ring(dev);
1975 err_free_consistent: 1975 err_free_consistent:
1976 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), 1976 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
1977 lp->init_block, lp->init_dma_addr); 1977 lp->init_block, lp->init_dma_addr);
1978 err_free_netdev: 1978 err_free_netdev:
1979 free_netdev(dev); 1979 free_netdev(dev);
@@ -2953,7 +2953,7 @@ static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
2953 unregister_netdev(dev); 2953 unregister_netdev(dev);
2954 pcnet32_free_ring(dev); 2954 pcnet32_free_ring(dev);
2955 release_region(dev->base_addr, PCNET32_TOTAL_SIZE); 2955 release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
2956 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), 2956 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
2957 lp->init_block, lp->init_dma_addr); 2957 lp->init_block, lp->init_dma_addr);
2958 free_netdev(dev); 2958 free_netdev(dev);
2959 pci_disable_device(pdev); 2959 pci_disable_device(pdev);
@@ -3036,7 +3036,7 @@ static void __exit pcnet32_cleanup_module(void)
3036 unregister_netdev(pcnet32_dev); 3036 unregister_netdev(pcnet32_dev);
3037 pcnet32_free_ring(pcnet32_dev); 3037 pcnet32_free_ring(pcnet32_dev);
3038 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); 3038 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
3039 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), 3039 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
3040 lp->init_block, lp->init_dma_addr); 3040 lp->init_block, lp->init_dma_addr);
3041 free_netdev(pcnet32_dev); 3041 free_netdev(pcnet32_dev);
3042 pcnet32_dev = next_dev; 3042 pcnet32_dev = next_dev;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 6eb2d31d1e34..d55932acd887 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -53,7 +53,8 @@ config SMSC_PHY
53config BROADCOM_PHY 53config BROADCOM_PHY
54 tristate "Drivers for Broadcom PHYs" 54 tristate "Drivers for Broadcom PHYs"
55 ---help--- 55 ---help---
56 Currently supports the BCM5411, BCM5421 and BCM5461 PHYs. 56 Currently supports the BCM5411, BCM5421, BCM5461, BCM5464, BCM5481
57 and BCM5482 PHYs.
57 58
58config ICPLUS_PHY 59config ICPLUS_PHY
59 tristate "Drivers for ICPlus PHYs" 60 tristate "Drivers for ICPlus PHYs"
@@ -83,4 +84,10 @@ config MDIO_BITBANG
83 84
84 If in doubt, say N. 85 If in doubt, say N.
85 86
87config MDIO_OF_GPIO
88 tristate "Support for GPIO lib-based bitbanged MDIO buses"
89 depends on MDIO_BITBANG && OF_GPIO
90 ---help---
91 Supports GPIO lib-based MDIO busses.
92
86endif # PHYLIB 93endif # PHYLIB
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 5997d6ef702b..eee329fa6f53 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_ICPLUS_PHY) += icplus.o
15obj-$(CONFIG_REALTEK_PHY) += realtek.o 15obj-$(CONFIG_REALTEK_PHY) += realtek.o
16obj-$(CONFIG_FIXED_PHY) += fixed.o 16obj-$(CONFIG_FIXED_PHY) += fixed.o
17obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o 17obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
18obj-$(CONFIG_MDIO_OF_GPIO) += mdio-ofgpio.o
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 60c5cfe96918..4b4dc98ad165 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -24,6 +24,12 @@
24#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */ 24#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
25#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */ 25#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
26 26
27#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
28#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
29#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
30#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
31
32#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */
27#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */ 33#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */
28#define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */ 34#define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */
29#define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */ 35#define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */
@@ -42,10 +48,120 @@
42#define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */ 48#define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */
43#define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */ 49#define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */
44 50
51#define MII_BCM54XX_SHD 0x1c /* 0x1c shadow registers */
52#define MII_BCM54XX_SHD_WRITE 0x8000
53#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10)
54#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0)
55
56/*
57 * Broadcom LED source encodings. These are used in BCM5461, BCM5481,
58 * BCM5482, and possibly some others.
59 */
60#define BCM_LED_SRC_LINKSPD1 0x0
61#define BCM_LED_SRC_LINKSPD2 0x1
62#define BCM_LED_SRC_XMITLED 0x2
63#define BCM_LED_SRC_ACTIVITYLED 0x3
64#define BCM_LED_SRC_FDXLED 0x4
65#define BCM_LED_SRC_SLAVE 0x5
66#define BCM_LED_SRC_INTR 0x6
67#define BCM_LED_SRC_QUALITY 0x7
68#define BCM_LED_SRC_RCVLED 0x8
69#define BCM_LED_SRC_MULTICOLOR1 0xa
70#define BCM_LED_SRC_OPENSHORT 0xb
71#define BCM_LED_SRC_OFF 0xe /* Tied high */
72#define BCM_LED_SRC_ON 0xf /* Tied low */
73
74/*
75 * BCM5482: Shadow registers
76 * Shadow values go into bits [14:10] of register 0x1c to select a shadow
77 * register to access.
78 */
79#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
80 /* LED3 / ~LINKSPD[2] selector */
81#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
82 /* LED1 / ~LINKSPD[1] selector */
83#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
84#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
85#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
86#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
87#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
88#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
89
90/*
91 * BCM5482: Secondary SerDes registers
92 */
93#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */
94#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */
95#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */
96#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
97#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
98
99/*
100 * Device flags for PHYs that can be configured for different operating
101 * modes.
102 */
103#define PHY_BCM_FLAGS_VALID 0x80000000
104#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020
105#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010
106#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
107#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
108
45MODULE_DESCRIPTION("Broadcom PHY driver"); 109MODULE_DESCRIPTION("Broadcom PHY driver");
46MODULE_AUTHOR("Maciej W. Rozycki"); 110MODULE_AUTHOR("Maciej W. Rozycki");
47MODULE_LICENSE("GPL"); 111MODULE_LICENSE("GPL");
48 112
113/*
114 * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T
115 * 0x1c shadow registers.
116 */
117static int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow)
118{
119 phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
120 return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD));
121}
122
123static int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow, u16 val)
124{
125 return phy_write(phydev, MII_BCM54XX_SHD,
126 MII_BCM54XX_SHD_WRITE |
127 MII_BCM54XX_SHD_VAL(shadow) |
128 MII_BCM54XX_SHD_DATA(val));
129}
130
131/*
132 * Indirect register access functions for the Expansion Registers
133 * and Secondary SerDes registers (when sec_serdes=1).
134 */
135static int bcm54xx_exp_read(struct phy_device *phydev,
136 int sec_serdes, u8 regnum)
137{
138 int val;
139
140 phy_write(phydev, MII_BCM54XX_EXP_SEL,
141 (sec_serdes ? MII_BCM54XX_EXP_SEL_SSD :
142 MII_BCM54XX_EXP_SEL_ER) |
143 regnum);
144 val = phy_read(phydev, MII_BCM54XX_EXP_DATA);
145 phy_write(phydev, MII_BCM54XX_EXP_SEL, regnum);
146
147 return val;
148}
149
150static int bcm54xx_exp_write(struct phy_device *phydev,
151 int sec_serdes, u8 regnum, u16 val)
152{
153 int ret;
154
155 phy_write(phydev, MII_BCM54XX_EXP_SEL,
156 (sec_serdes ? MII_BCM54XX_EXP_SEL_SSD :
157 MII_BCM54XX_EXP_SEL_ER) |
158 regnum);
159 ret = phy_write(phydev, MII_BCM54XX_EXP_DATA, val);
160 phy_write(phydev, MII_BCM54XX_EXP_SEL, regnum);
161
162 return ret;
163}
164
49static int bcm54xx_config_init(struct phy_device *phydev) 165static int bcm54xx_config_init(struct phy_device *phydev)
50{ 166{
51 int reg, err; 167 int reg, err;
@@ -70,6 +186,87 @@ static int bcm54xx_config_init(struct phy_device *phydev)
70 return 0; 186 return 0;
71} 187}
72 188
189static int bcm5482_config_init(struct phy_device *phydev)
190{
191 int err, reg;
192
193 err = bcm54xx_config_init(phydev);
194
195 if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX) {
196 /*
197 * Enable secondary SerDes and its use as an LED source
198 */
199 reg = bcm54xx_shadow_read(phydev, BCM5482_SHD_SSD);
200 bcm54xx_shadow_write(phydev, BCM5482_SHD_SSD,
201 reg |
202 BCM5482_SHD_SSD_LEDM |
203 BCM5482_SHD_SSD_EN);
204
205 /*
206 * Enable SGMII slave mode and auto-detection
207 */
208 reg = bcm54xx_exp_read(phydev, 1, BCM5482_SSD_SGMII_SLAVE);
209 bcm54xx_exp_write(phydev, 1, BCM5482_SSD_SGMII_SLAVE,
210 reg |
211 BCM5482_SSD_SGMII_SLAVE_EN |
212 BCM5482_SSD_SGMII_SLAVE_AD);
213
214 /*
215 * Disable secondary SerDes powerdown
216 */
217 reg = bcm54xx_exp_read(phydev, 1, BCM5482_SSD_1000BX_CTL);
218 bcm54xx_exp_write(phydev, 1, BCM5482_SSD_1000BX_CTL,
219 reg & ~BCM5482_SSD_1000BX_CTL_PWRDOWN);
220
221 /*
222 * Select 1000BASE-X register set (primary SerDes)
223 */
224 reg = bcm54xx_shadow_read(phydev, BCM5482_SHD_MODE);
225 bcm54xx_shadow_write(phydev, BCM5482_SHD_MODE,
226 reg | BCM5482_SHD_MODE_1000BX);
227
228 /*
229 * LED1=ACTIVITYLED, LED3=LINKSPD[2]
230 * (Use LED1 as secondary SerDes ACTIVITY LED)
231 */
232 bcm54xx_shadow_write(phydev, BCM5482_SHD_LEDS1,
233 BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_ACTIVITYLED) |
234 BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_LINKSPD2));
235
236 /*
237 * Auto-negotiation doesn't seem to work quite right
238 * in this mode, so we disable it and force it to the
239 * right speed/duplex setting. Only 'link status'
240 * is important.
241 */
242 phydev->autoneg = AUTONEG_DISABLE;
243 phydev->speed = SPEED_1000;
244 phydev->duplex = DUPLEX_FULL;
245 }
246
247 return err;
248}
249
250static int bcm5482_read_status(struct phy_device *phydev)
251{
252 int err;
253
254 err = genphy_read_status(phydev);
255
256 if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX) {
257 /*
258 * Only link status matters for 1000Base-X mode, so force
259 * 1000 Mbit/s full-duplex status
260 */
261 if (phydev->link) {
262 phydev->speed = SPEED_1000;
263 phydev->duplex = DUPLEX_FULL;
264 }
265 }
266
267 return err;
268}
269
73static int bcm54xx_ack_interrupt(struct phy_device *phydev) 270static int bcm54xx_ack_interrupt(struct phy_device *phydev)
74{ 271{
75 int reg; 272 int reg;
@@ -210,9 +407,9 @@ static struct phy_driver bcm5482_driver = {
210 .name = "Broadcom BCM5482", 407 .name = "Broadcom BCM5482",
211 .features = PHY_GBIT_FEATURES, 408 .features = PHY_GBIT_FEATURES,
212 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 409 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
213 .config_init = bcm54xx_config_init, 410 .config_init = bcm5482_config_init,
214 .config_aneg = genphy_config_aneg, 411 .config_aneg = genphy_config_aneg,
215 .read_status = genphy_read_status, 412 .read_status = bcm5482_read_status,
216 .ack_interrupt = bcm54xx_ack_interrupt, 413 .ack_interrupt = bcm54xx_ack_interrupt,
217 .config_intr = bcm54xx_config_intr, 414 .config_intr = bcm54xx_config_intr,
218 .driver = { .owner = THIS_MODULE }, 415 .driver = { .owner = THIS_MODULE },
diff --git a/drivers/net/phy/mdio-ofgpio.c b/drivers/net/phy/mdio-ofgpio.c
new file mode 100644
index 000000000000..7edfc0c34835
--- /dev/null
+++ b/drivers/net/phy/mdio-ofgpio.c
@@ -0,0 +1,205 @@
1/*
2 * OpenFirmware GPIO based MDIO bitbang driver.
3 *
4 * Copyright (c) 2008 CSE Semaphore Belgium.
5 * by Laurent Pinchart <laurentp@cse-semaphore.com>
6 *
7 * Based on earlier work by
8 *
9 * Copyright (c) 2003 Intracom S.A.
10 * by Pantelis Antoniou <panto@intracom.gr>
11 *
12 * 2005 (c) MontaVista Software, Inc.
13 * Vitaly Bordug <vbordug@ru.mvista.com>
14 *
15 * This file is licensed under the terms of the GNU General Public License
16 * version 2. This program is licensed "as is" without any warranty of any
17 * kind, whether express or implied.
18 */
19
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/mdio-bitbang.h>
25#include <linux/of_gpio.h>
26#include <linux/of_platform.h>
27
28struct mdio_gpio_info {
29 struct mdiobb_ctrl ctrl;
30 int mdc, mdio;
31};
32
33static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
34{
35 struct mdio_gpio_info *bitbang =
36 container_of(ctrl, struct mdio_gpio_info, ctrl);
37
38 if (dir)
39 gpio_direction_output(bitbang->mdio, 1);
40 else
41 gpio_direction_input(bitbang->mdio);
42}
43
44static int mdio_read(struct mdiobb_ctrl *ctrl)
45{
46 struct mdio_gpio_info *bitbang =
47 container_of(ctrl, struct mdio_gpio_info, ctrl);
48
49 return gpio_get_value(bitbang->mdio);
50}
51
52static void mdio(struct mdiobb_ctrl *ctrl, int what)
53{
54 struct mdio_gpio_info *bitbang =
55 container_of(ctrl, struct mdio_gpio_info, ctrl);
56
57 gpio_set_value(bitbang->mdio, what);
58}
59
60static void mdc(struct mdiobb_ctrl *ctrl, int what)
61{
62 struct mdio_gpio_info *bitbang =
63 container_of(ctrl, struct mdio_gpio_info, ctrl);
64
65 gpio_set_value(bitbang->mdc, what);
66}
67
68static struct mdiobb_ops mdio_gpio_ops = {
69 .owner = THIS_MODULE,
70 .set_mdc = mdc,
71 .set_mdio_dir = mdio_dir,
72 .set_mdio_data = mdio,
73 .get_mdio_data = mdio_read,
74};
75
76static int __devinit mdio_ofgpio_bitbang_init(struct mii_bus *bus,
77 struct device_node *np)
78{
79 struct mdio_gpio_info *bitbang = bus->priv;
80
81 bitbang->mdc = of_get_gpio(np, 0);
82 bitbang->mdio = of_get_gpio(np, 1);
83
84 if (bitbang->mdc < 0 || bitbang->mdio < 0)
85 return -ENODEV;
86
87 snprintf(bus->id, MII_BUS_ID_SIZE, "%x", bitbang->mdc);
88 return 0;
89}
90
91static void __devinit add_phy(struct mii_bus *bus, struct device_node *np)
92{
93 const u32 *data;
94 int len, id, irq;
95
96 data = of_get_property(np, "reg", &len);
97 if (!data || len != 4)
98 return;
99
100 id = *data;
101 bus->phy_mask &= ~(1 << id);
102
103 irq = of_irq_to_resource(np, 0, NULL);
104 if (irq != NO_IRQ)
105 bus->irq[id] = irq;
106}
107
108static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
109 const struct of_device_id *match)
110{
111 struct device_node *np = NULL;
112 struct mii_bus *new_bus;
113 struct mdio_gpio_info *bitbang;
114 int ret = -ENOMEM;
115 int i;
116
117 bitbang = kzalloc(sizeof(struct mdio_gpio_info), GFP_KERNEL);
118 if (!bitbang)
119 goto out;
120
121 bitbang->ctrl.ops = &mdio_gpio_ops;
122
123 new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
124 if (!new_bus)
125 goto out_free_priv;
126
127 new_bus->name = "GPIO Bitbanged MII",
128
129 ret = mdio_ofgpio_bitbang_init(new_bus, ofdev->node);
130 if (ret)
131 goto out_free_bus;
132
133 new_bus->phy_mask = ~0;
134 new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
135 if (!new_bus->irq)
136 goto out_free_bus;
137
138 for (i = 0; i < PHY_MAX_ADDR; i++)
139 new_bus->irq[i] = -1;
140
141 while ((np = of_get_next_child(ofdev->node, np)))
142 if (!strcmp(np->type, "ethernet-phy"))
143 add_phy(new_bus, np);
144
145 new_bus->dev = &ofdev->dev;
146 dev_set_drvdata(&ofdev->dev, new_bus);
147
148 ret = mdiobus_register(new_bus);
149 if (ret)
150 goto out_free_irqs;
151
152 return 0;
153
154out_free_irqs:
155 dev_set_drvdata(&ofdev->dev, NULL);
156 kfree(new_bus->irq);
157out_free_bus:
158 kfree(new_bus);
159out_free_priv:
160 free_mdio_bitbang(new_bus);
161out:
162 return ret;
163}
164
165static int mdio_ofgpio_remove(struct of_device *ofdev)
166{
167 struct mii_bus *bus = dev_get_drvdata(&ofdev->dev);
168 struct mdio_gpio_info *bitbang = bus->priv;
169
170 mdiobus_unregister(bus);
171 free_mdio_bitbang(bus);
172 dev_set_drvdata(&ofdev->dev, NULL);
173 kfree(bus->irq);
174 kfree(bitbang);
175 kfree(bus);
176
177 return 0;
178}
179
180static struct of_device_id mdio_ofgpio_match[] = {
181 {
182 .compatible = "virtual,mdio-gpio",
183 },
184 {},
185};
186
187static struct of_platform_driver mdio_ofgpio_driver = {
188 .name = "mdio-gpio",
189 .match_table = mdio_ofgpio_match,
190 .probe = mdio_ofgpio_probe,
191 .remove = mdio_ofgpio_remove,
192};
193
194static int mdio_ofgpio_init(void)
195{
196 return of_register_platform_driver(&mdio_ofgpio_driver);
197}
198
199static void mdio_ofgpio_exit(void)
200{
201 of_unregister_platform_driver(&mdio_ofgpio_driver);
202}
203
204module_init(mdio_ofgpio_init);
205module_exit(mdio_ofgpio_exit);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 1f4ca2b54a73..c926bf0b190e 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -361,7 +361,7 @@ static int ppp_open(struct inode *inode, struct file *file)
361 return 0; 361 return 0;
362} 362}
363 363
364static int ppp_release(struct inode *inode, struct file *file) 364static int ppp_release(struct inode *unused, struct file *file)
365{ 365{
366 struct ppp_file *pf = file->private_data; 366 struct ppp_file *pf = file->private_data;
367 struct ppp *ppp; 367 struct ppp *ppp;
@@ -545,8 +545,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
545} 545}
546#endif /* CONFIG_PPP_FILTER */ 546#endif /* CONFIG_PPP_FILTER */
547 547
548static int ppp_ioctl(struct inode *inode, struct file *file, 548static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
549 unsigned int cmd, unsigned long arg)
550{ 549{
551 struct ppp_file *pf = file->private_data; 550 struct ppp_file *pf = file->private_data;
552 struct ppp *ppp; 551 struct ppp *ppp;
@@ -574,24 +573,29 @@ static int ppp_ioctl(struct inode *inode, struct file *file,
574 * this fd and reopening /dev/ppp. 573 * this fd and reopening /dev/ppp.
575 */ 574 */
576 err = -EINVAL; 575 err = -EINVAL;
576 lock_kernel();
577 if (pf->kind == INTERFACE) { 577 if (pf->kind == INTERFACE) {
578 ppp = PF_TO_PPP(pf); 578 ppp = PF_TO_PPP(pf);
579 if (file == ppp->owner) 579 if (file == ppp->owner)
580 ppp_shutdown_interface(ppp); 580 ppp_shutdown_interface(ppp);
581 } 581 }
582 if (atomic_read(&file->f_count) <= 2) { 582 if (atomic_read(&file->f_count) <= 2) {
583 ppp_release(inode, file); 583 ppp_release(NULL, file);
584 err = 0; 584 err = 0;
585 } else 585 } else
586 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n", 586 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n",
587 atomic_read(&file->f_count)); 587 atomic_read(&file->f_count));
588 unlock_kernel();
588 return err; 589 return err;
589 } 590 }
590 591
591 if (pf->kind == CHANNEL) { 592 if (pf->kind == CHANNEL) {
592 struct channel *pch = PF_TO_CHANNEL(pf); 593 struct channel *pch;
593 struct ppp_channel *chan; 594 struct ppp_channel *chan;
594 595
596 lock_kernel();
597 pch = PF_TO_CHANNEL(pf);
598
595 switch (cmd) { 599 switch (cmd) {
596 case PPPIOCCONNECT: 600 case PPPIOCCONNECT:
597 if (get_user(unit, p)) 601 if (get_user(unit, p))
@@ -611,6 +615,7 @@ static int ppp_ioctl(struct inode *inode, struct file *file,
611 err = chan->ops->ioctl(chan, cmd, arg); 615 err = chan->ops->ioctl(chan, cmd, arg);
612 up_read(&pch->chan_sem); 616 up_read(&pch->chan_sem);
613 } 617 }
618 unlock_kernel();
614 return err; 619 return err;
615 } 620 }
616 621
@@ -620,6 +625,7 @@ static int ppp_ioctl(struct inode *inode, struct file *file,
620 return -EINVAL; 625 return -EINVAL;
621 } 626 }
622 627
628 lock_kernel();
623 ppp = PF_TO_PPP(pf); 629 ppp = PF_TO_PPP(pf);
624 switch (cmd) { 630 switch (cmd) {
625 case PPPIOCSMRU: 631 case PPPIOCSMRU:
@@ -767,7 +773,7 @@ static int ppp_ioctl(struct inode *inode, struct file *file,
767 default: 773 default:
768 err = -ENOTTY; 774 err = -ENOTTY;
769 } 775 }
770 776 unlock_kernel();
771 return err; 777 return err;
772} 778}
773 779
@@ -779,6 +785,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
779 struct channel *chan; 785 struct channel *chan;
780 int __user *p = (int __user *)arg; 786 int __user *p = (int __user *)arg;
781 787
788 lock_kernel();
782 switch (cmd) { 789 switch (cmd) {
783 case PPPIOCNEWUNIT: 790 case PPPIOCNEWUNIT:
784 /* Create a new ppp unit */ 791 /* Create a new ppp unit */
@@ -827,6 +834,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
827 default: 834 default:
828 err = -ENOTTY; 835 err = -ENOTTY;
829 } 836 }
837 unlock_kernel();
830 return err; 838 return err;
831} 839}
832 840
@@ -835,7 +843,7 @@ static const struct file_operations ppp_device_fops = {
835 .read = ppp_read, 843 .read = ppp_read,
836 .write = ppp_write, 844 .write = ppp_write,
837 .poll = ppp_poll, 845 .poll = ppp_poll,
838 .ioctl = ppp_ioctl, 846 .unlocked_ioctl = ppp_ioctl,
839 .open = ppp_open, 847 .open = ppp_open,
840 .release = ppp_release 848 .release = ppp_release
841}; 849};
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index e365efb3c627..2eb54fd7bed5 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -110,7 +110,7 @@ static void gelic_card_get_ether_port_status(struct gelic_card *card,
110void gelic_card_up(struct gelic_card *card) 110void gelic_card_up(struct gelic_card *card)
111{ 111{
112 pr_debug("%s: called\n", __func__); 112 pr_debug("%s: called\n", __func__);
113 down(&card->updown_lock); 113 mutex_lock(&card->updown_lock);
114 if (atomic_inc_return(&card->users) == 1) { 114 if (atomic_inc_return(&card->users) == 1) {
115 pr_debug("%s: real do\n", __func__); 115 pr_debug("%s: real do\n", __func__);
116 /* enable irq */ 116 /* enable irq */
@@ -120,7 +120,7 @@ void gelic_card_up(struct gelic_card *card)
120 120
121 napi_enable(&card->napi); 121 napi_enable(&card->napi);
122 } 122 }
123 up(&card->updown_lock); 123 mutex_unlock(&card->updown_lock);
124 pr_debug("%s: done\n", __func__); 124 pr_debug("%s: done\n", __func__);
125} 125}
126 126
@@ -128,7 +128,7 @@ void gelic_card_down(struct gelic_card *card)
128{ 128{
129 u64 mask; 129 u64 mask;
130 pr_debug("%s: called\n", __func__); 130 pr_debug("%s: called\n", __func__);
131 down(&card->updown_lock); 131 mutex_lock(&card->updown_lock);
132 if (atomic_dec_if_positive(&card->users) == 0) { 132 if (atomic_dec_if_positive(&card->users) == 0) {
133 pr_debug("%s: real do\n", __func__); 133 pr_debug("%s: real do\n", __func__);
134 napi_disable(&card->napi); 134 napi_disable(&card->napi);
@@ -146,7 +146,7 @@ void gelic_card_down(struct gelic_card *card)
146 /* stop tx */ 146 /* stop tx */
147 gelic_card_disable_txdmac(card); 147 gelic_card_disable_txdmac(card);
148 } 148 }
149 up(&card->updown_lock); 149 mutex_unlock(&card->updown_lock);
150 pr_debug("%s: done\n", __func__); 150 pr_debug("%s: done\n", __func__);
151} 151}
152 152
@@ -1534,7 +1534,7 @@ static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
1534 INIT_WORK(&card->tx_timeout_task, gelic_net_tx_timeout_task); 1534 INIT_WORK(&card->tx_timeout_task, gelic_net_tx_timeout_task);
1535 init_waitqueue_head(&card->waitq); 1535 init_waitqueue_head(&card->waitq);
1536 atomic_set(&card->tx_timeout_task_counter, 0); 1536 atomic_set(&card->tx_timeout_task_counter, 0);
1537 init_MUTEX(&card->updown_lock); 1537 mutex_init(&card->updown_lock);
1538 atomic_set(&card->users, 0); 1538 atomic_set(&card->users, 0);
1539 1539
1540 return card; 1540 return card;
diff --git a/drivers/net/ps3_gelic_net.h b/drivers/net/ps3_gelic_net.h
index 520f143c2c09..8b413868bbe2 100644
--- a/drivers/net/ps3_gelic_net.h
+++ b/drivers/net/ps3_gelic_net.h
@@ -298,7 +298,7 @@ struct gelic_card {
298 wait_queue_head_t waitq; 298 wait_queue_head_t waitq;
299 299
300 /* only first user should up the card */ 300 /* only first user should up the card */
301 struct semaphore updown_lock; 301 struct mutex updown_lock;
302 atomic_t users; 302 atomic_t users;
303 303
304 u64 ether_port_status; 304 u64 ether_port_status;
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 1dae1f2ed813..aa963ac1e37b 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -45,7 +45,8 @@
45#include "ps3_gelic_wireless.h" 45#include "ps3_gelic_wireless.h"
46 46
47 47
48static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan); 48static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan,
49 u8 *essid, size_t essid_len);
49static int gelic_wl_try_associate(struct net_device *netdev); 50static int gelic_wl_try_associate(struct net_device *netdev);
50 51
51/* 52/*
@@ -105,6 +106,7 @@ static const struct eurus_cmd_arg_info cmd_info[GELIC_EURUS_CMD_MAX_INDEX] = {
105 [GELIC_EURUS_CMD_GET_WEP_CFG] = { .post_arg = 1}, 106 [GELIC_EURUS_CMD_GET_WEP_CFG] = { .post_arg = 1},
106 [GELIC_EURUS_CMD_GET_WPA_CFG] = { .post_arg = 1}, 107 [GELIC_EURUS_CMD_GET_WPA_CFG] = { .post_arg = 1},
107 [GELIC_EURUS_CMD_GET_RSSI_CFG] = { .post_arg = 1}, 108 [GELIC_EURUS_CMD_GET_RSSI_CFG] = { .post_arg = 1},
109 [GELIC_EURUS_CMD_START_SCAN] = { .pre_arg = 1},
108 [GELIC_EURUS_CMD_GET_SCAN] = { .post_arg = 1}, 110 [GELIC_EURUS_CMD_GET_SCAN] = { .post_arg = 1},
109}; 111};
110 112
@@ -163,7 +165,9 @@ static void gelic_eurus_sync_cmd_worker(struct work_struct *work)
163 card = port_to_card(wl_port(wl)); 165 card = port_to_card(wl_port(wl));
164 166
165 if (cmd_info[cmd->cmd].pre_arg) { 167 if (cmd_info[cmd->cmd].pre_arg) {
166 arg1 = ps3_mm_phys_to_lpar(__pa(cmd->buffer)); 168 arg1 = (cmd->buffer) ?
169 ps3_mm_phys_to_lpar(__pa(cmd->buffer)) :
170 0;
167 arg2 = cmd->buf_size; 171 arg2 = cmd->buf_size;
168 } else { 172 } else {
169 arg1 = 0; 173 arg1 = 0;
@@ -240,12 +244,12 @@ static u32 gelic_wl_get_link(struct net_device *netdev)
240 u32 ret; 244 u32 ret;
241 245
242 pr_debug("%s: <-\n", __func__); 246 pr_debug("%s: <-\n", __func__);
243 down(&wl->assoc_stat_lock); 247 mutex_lock(&wl->assoc_stat_lock);
244 if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) 248 if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED)
245 ret = 1; 249 ret = 1;
246 else 250 else
247 ret = 0; 251 ret = 0;
248 up(&wl->assoc_stat_lock); 252 mutex_unlock(&wl->assoc_stat_lock);
249 pr_debug("%s: ->\n", __func__); 253 pr_debug("%s: ->\n", __func__);
250 return ret; 254 return ret;
251} 255}
@@ -350,7 +354,8 @@ static int gelic_wl_get_range(struct net_device *netdev,
350 354
351 /* encryption capability */ 355 /* encryption capability */
352 range->enc_capa = IW_ENC_CAPA_WPA | 356 range->enc_capa = IW_ENC_CAPA_WPA |
353 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; 357 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP |
358 IW_ENC_CAPA_4WAY_HANDSHAKE;
354 if (wpa2_capable()) 359 if (wpa2_capable())
355 range->enc_capa |= IW_ENC_CAPA_WPA2; 360 range->enc_capa |= IW_ENC_CAPA_WPA2;
356 range->encoding_size[0] = 5; /* 40bit WEP */ 361 range->encoding_size[0] = 5; /* 40bit WEP */
@@ -359,6 +364,9 @@ static int gelic_wl_get_range(struct net_device *netdev,
359 range->num_encoding_sizes = 3; 364 range->num_encoding_sizes = 3;
360 range->max_encoding_tokens = GELIC_WEP_KEYS; 365 range->max_encoding_tokens = GELIC_WEP_KEYS;
361 366
367 /* scan capability */
368 range->scan_capa = IW_SCAN_CAPA_ESSID;
369
362 pr_debug("%s: ->\n", __func__); 370 pr_debug("%s: ->\n", __func__);
363 return 0; 371 return 0;
364 372
@@ -370,8 +378,18 @@ static int gelic_wl_set_scan(struct net_device *netdev,
370 union iwreq_data *wrqu, char *extra) 378 union iwreq_data *wrqu, char *extra)
371{ 379{
372 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 380 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
373 381 struct iw_scan_req *req;
374 return gelic_wl_start_scan(wl, 1); 382 u8 *essid = NULL;
383 size_t essid_len = 0;
384
385 if (wrqu->data.length == sizeof(struct iw_scan_req) &&
386 wrqu->data.flags & IW_SCAN_THIS_ESSID) {
387 req = (struct iw_scan_req*)extra;
388 essid = req->essid;
389 essid_len = req->essid_len;
390 pr_debug("%s: ESSID scan =%s\n", __func__, essid);
391 }
392 return gelic_wl_start_scan(wl, 1, essid, essid_len);
375} 393}
376 394
377#define OUI_LEN 3 395#define OUI_LEN 3
@@ -695,7 +713,7 @@ static int gelic_wl_get_scan(struct net_device *netdev,
695 unsigned long this_time = jiffies; 713 unsigned long this_time = jiffies;
696 714
697 pr_debug("%s: <-\n", __func__); 715 pr_debug("%s: <-\n", __func__);
698 if (down_interruptible(&wl->scan_lock)) 716 if (mutex_lock_interruptible(&wl->scan_lock))
699 return -EAGAIN; 717 return -EAGAIN;
700 718
701 switch (wl->scan_stat) { 719 switch (wl->scan_stat) {
@@ -733,7 +751,7 @@ static int gelic_wl_get_scan(struct net_device *netdev,
733 wrqu->data.length = ev - extra; 751 wrqu->data.length = ev - extra;
734 wrqu->data.flags = 0; 752 wrqu->data.flags = 0;
735out: 753out:
736 up(&wl->scan_lock); 754 mutex_unlock(&wl->scan_lock);
737 pr_debug("%s: -> %d %d\n", __func__, ret, wrqu->data.length); 755 pr_debug("%s: -> %d %d\n", __func__, ret, wrqu->data.length);
738 return ret; 756 return ret;
739} 757}
@@ -979,7 +997,7 @@ static int gelic_wl_get_essid(struct net_device *netdev,
979 unsigned long irqflag; 997 unsigned long irqflag;
980 998
981 pr_debug("%s: <- \n", __func__); 999 pr_debug("%s: <- \n", __func__);
982 down(&wl->assoc_stat_lock); 1000 mutex_lock(&wl->assoc_stat_lock);
983 spin_lock_irqsave(&wl->lock, irqflag); 1001 spin_lock_irqsave(&wl->lock, irqflag);
984 if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) || 1002 if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) ||
985 wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) { 1003 wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) {
@@ -989,7 +1007,7 @@ static int gelic_wl_get_essid(struct net_device *netdev,
989 } else 1007 } else
990 data->essid.flags = 0; 1008 data->essid.flags = 0;
991 1009
992 up(&wl->assoc_stat_lock); 1010 mutex_unlock(&wl->assoc_stat_lock);
993 spin_unlock_irqrestore(&wl->lock, irqflag); 1011 spin_unlock_irqrestore(&wl->lock, irqflag);
994 pr_debug("%s: -> len=%d \n", __func__, data->essid.length); 1012 pr_debug("%s: -> len=%d \n", __func__, data->essid.length);
995 1013
@@ -1170,7 +1188,7 @@ static int gelic_wl_get_ap(struct net_device *netdev,
1170 unsigned long irqflag; 1188 unsigned long irqflag;
1171 1189
1172 pr_debug("%s: <-\n", __func__); 1190 pr_debug("%s: <-\n", __func__);
1173 down(&wl->assoc_stat_lock); 1191 mutex_lock(&wl->assoc_stat_lock);
1174 spin_lock_irqsave(&wl->lock, irqflag); 1192 spin_lock_irqsave(&wl->lock, irqflag);
1175 if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) { 1193 if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) {
1176 data->ap_addr.sa_family = ARPHRD_ETHER; 1194 data->ap_addr.sa_family = ARPHRD_ETHER;
@@ -1180,7 +1198,7 @@ static int gelic_wl_get_ap(struct net_device *netdev,
1180 memset(data->ap_addr.sa_data, 0, ETH_ALEN); 1198 memset(data->ap_addr.sa_data, 0, ETH_ALEN);
1181 1199
1182 spin_unlock_irqrestore(&wl->lock, irqflag); 1200 spin_unlock_irqrestore(&wl->lock, irqflag);
1183 up(&wl->assoc_stat_lock); 1201 mutex_unlock(&wl->assoc_stat_lock);
1184 pr_debug("%s: ->\n", __func__); 1202 pr_debug("%s: ->\n", __func__);
1185 return 0; 1203 return 0;
1186} 1204}
@@ -1256,42 +1274,19 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
1256 set_bit(key_index, &wl->key_enabled); 1274 set_bit(key_index, &wl->key_enabled);
1257 /* remember wep info changed */ 1275 /* remember wep info changed */
1258 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); 1276 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
1259 } else if ((alg == IW_ENCODE_ALG_TKIP) || (alg == IW_ENCODE_ALG_CCMP)) { 1277 } else if (alg == IW_ENCODE_ALG_PMK) {
1260 pr_debug("%s: TKIP/CCMP requested alg=%d\n", __func__, alg); 1278 if (ext->key_len != WPA_PSK_LEN) {
1261 /* check key length */ 1279 pr_err("%s: PSK length wrong %d\n", __func__,
1262 if (IW_ENCODING_TOKEN_MAX < ext->key_len) { 1280 ext->key_len);
1263 pr_info("%s: key is too long %d\n", __func__,
1264 ext->key_len);
1265 ret = -EINVAL; 1281 ret = -EINVAL;
1266 goto done; 1282 goto done;
1267 } 1283 }
1268 if (alg == IW_ENCODE_ALG_CCMP) { 1284 memset(wl->psk, 0, sizeof(wl->psk));
1269 pr_debug("%s: AES selected\n", __func__); 1285 memcpy(wl->psk, ext->key, ext->key_len);
1270 wl->group_cipher_method = GELIC_WL_CIPHER_AES; 1286 wl->psk_len = ext->key_len;
1271 wl->pairwise_cipher_method = GELIC_WL_CIPHER_AES; 1287 wl->psk_type = GELIC_EURUS_WPA_PSK_BIN;
1272 wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA2; 1288 /* remember PSK configured */
1273 } else { 1289 set_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat);
1274 pr_debug("%s: TKIP selected, WPA forced\n", __func__);
1275 wl->group_cipher_method = GELIC_WL_CIPHER_TKIP;
1276 wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP;
1277 /* FIXME: how do we do if WPA2 + TKIP? */
1278 wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA;
1279 }
1280 if (flags & IW_ENCODE_RESTRICTED)
1281 BUG();
1282 wl->auth_method = GELIC_EURUS_AUTH_OPEN;
1283 /* We should use same key for both and unicast */
1284 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
1285 pr_debug("%s: group key \n", __func__);
1286 else
1287 pr_debug("%s: unicast key \n", __func__);
1288 /* OK, update the key */
1289 wl->key_len[key_index] = ext->key_len;
1290 memset(wl->key[key_index], 0, IW_ENCODING_TOKEN_MAX);
1291 memcpy(wl->key[key_index], ext->key, ext->key_len);
1292 set_bit(key_index, &wl->key_enabled);
1293 /* remember info changed */
1294 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
1295 } 1290 }
1296done: 1291done:
1297 spin_unlock_irqrestore(&wl->lock, irqflag); 1292 spin_unlock_irqrestore(&wl->lock, irqflag);
@@ -1397,6 +1392,7 @@ static int gelic_wl_get_mode(struct net_device *netdev,
1397 return 0; 1392 return 0;
1398} 1393}
1399 1394
1395#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
1400/* SIOCIWFIRSTPRIV */ 1396/* SIOCIWFIRSTPRIV */
1401static int hex2bin(u8 *str, u8 *bin, unsigned int len) 1397static int hex2bin(u8 *str, u8 *bin, unsigned int len)
1402{ 1398{
@@ -1501,6 +1497,7 @@ static int gelic_wl_priv_get_psk(struct net_device *net_dev,
1501 pr_debug("%s:-> %d\n", __func__, data->data.length); 1497 pr_debug("%s:-> %d\n", __func__, data->data.length);
1502 return 0; 1498 return 0;
1503} 1499}
1500#endif
1504 1501
1505/* SIOCGIWNICKN */ 1502/* SIOCGIWNICKN */
1506static int gelic_wl_get_nick(struct net_device *net_dev, 1503static int gelic_wl_get_nick(struct net_device *net_dev,
@@ -1524,15 +1521,20 @@ static struct iw_statistics *gelic_wl_get_wireless_stats(
1524 struct gelic_eurus_cmd *cmd; 1521 struct gelic_eurus_cmd *cmd;
1525 struct iw_statistics *is; 1522 struct iw_statistics *is;
1526 struct gelic_eurus_rssi_info *rssi; 1523 struct gelic_eurus_rssi_info *rssi;
1524 void *buf;
1527 1525
1528 pr_debug("%s: <-\n", __func__); 1526 pr_debug("%s: <-\n", __func__);
1529 1527
1528 buf = (void *)__get_free_page(GFP_KERNEL);
1529 if (!buf)
1530 return NULL;
1531
1530 is = &wl->iwstat; 1532 is = &wl->iwstat;
1531 memset(is, 0, sizeof(*is)); 1533 memset(is, 0, sizeof(*is));
1532 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_RSSI_CFG, 1534 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_RSSI_CFG,
1533 wl->buf, sizeof(*rssi)); 1535 buf, sizeof(*rssi));
1534 if (cmd && !cmd->status && !cmd->cmd_status) { 1536 if (cmd && !cmd->status && !cmd->cmd_status) {
1535 rssi = wl->buf; 1537 rssi = buf;
1536 is->qual.level = be16_to_cpu(rssi->rssi); 1538 is->qual.level = be16_to_cpu(rssi->rssi);
1537 is->qual.updated = IW_QUAL_LEVEL_UPDATED | 1539 is->qual.updated = IW_QUAL_LEVEL_UPDATED |
1538 IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID; 1540 IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID;
@@ -1541,6 +1543,7 @@ static struct iw_statistics *gelic_wl_get_wireless_stats(
1541 is->qual.updated = IW_QUAL_ALL_INVALID; 1543 is->qual.updated = IW_QUAL_ALL_INVALID;
1542 1544
1543 kfree(cmd); 1545 kfree(cmd);
1546 free_page((unsigned long)buf);
1544 pr_debug("%s: ->\n", __func__); 1547 pr_debug("%s: ->\n", __func__);
1545 return is; 1548 return is;
1546} 1549}
@@ -1548,13 +1551,16 @@ static struct iw_statistics *gelic_wl_get_wireless_stats(
1548/* 1551/*
1549 * scanning helpers 1552 * scanning helpers
1550 */ 1553 */
1551static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan) 1554static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan,
1555 u8 *essid, size_t essid_len)
1552{ 1556{
1553 struct gelic_eurus_cmd *cmd; 1557 struct gelic_eurus_cmd *cmd;
1554 int ret = 0; 1558 int ret = 0;
1559 void *buf = NULL;
1560 size_t len;
1555 1561
1556 pr_debug("%s: <- always=%d\n", __func__, always_scan); 1562 pr_debug("%s: <- always=%d\n", __func__, always_scan);
1557 if (down_interruptible(&wl->scan_lock)) 1563 if (mutex_lock_interruptible(&wl->scan_lock))
1558 return -ERESTARTSYS; 1564 return -ERESTARTSYS;
1559 1565
1560 /* 1566 /*
@@ -1574,12 +1580,27 @@ static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan)
1574 complete(&wl->scan_done); 1580 complete(&wl->scan_done);
1575 goto out; 1581 goto out;
1576 } 1582 }
1583
1584 /* ESSID scan ? */
1585 if (essid_len && essid) {
1586 buf = (void *)__get_free_page(GFP_KERNEL);
1587 if (!buf) {
1588 ret = -ENOMEM;
1589 goto out;
1590 }
1591 len = IW_ESSID_MAX_SIZE; /* hypervisor always requires 32 */
1592 memset(buf, 0, len);
1593 memcpy(buf, essid, essid_len);
1594 pr_debug("%s: essid scan='%s'\n", __func__, (char *)buf);
1595 } else
1596 len = 0;
1597
1577 /* 1598 /*
1578 * issue start scan request 1599 * issue start scan request
1579 */ 1600 */
1580 wl->scan_stat = GELIC_WL_SCAN_STAT_SCANNING; 1601 wl->scan_stat = GELIC_WL_SCAN_STAT_SCANNING;
1581 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_START_SCAN, 1602 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_START_SCAN,
1582 NULL, 0); 1603 buf, len);
1583 if (!cmd || cmd->status || cmd->cmd_status) { 1604 if (!cmd || cmd->status || cmd->cmd_status) {
1584 wl->scan_stat = GELIC_WL_SCAN_STAT_INIT; 1605 wl->scan_stat = GELIC_WL_SCAN_STAT_INIT;
1585 complete(&wl->scan_done); 1606 complete(&wl->scan_done);
@@ -1588,7 +1609,8 @@ static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan)
1588 } 1609 }
1589 kfree(cmd); 1610 kfree(cmd);
1590out: 1611out:
1591 up(&wl->scan_lock); 1612 free_page((unsigned long)buf);
1613 mutex_unlock(&wl->scan_lock);
1592 pr_debug("%s: ->\n", __func__); 1614 pr_debug("%s: ->\n", __func__);
1593 return ret; 1615 return ret;
1594} 1616}
@@ -1607,10 +1629,17 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
1607 union iwreq_data data; 1629 union iwreq_data data;
1608 unsigned long this_time = jiffies; 1630 unsigned long this_time = jiffies;
1609 unsigned int data_len, i, found, r; 1631 unsigned int data_len, i, found, r;
1632 void *buf;
1610 DECLARE_MAC_BUF(mac); 1633 DECLARE_MAC_BUF(mac);
1611 1634
1612 pr_debug("%s:start\n", __func__); 1635 pr_debug("%s:start\n", __func__);
1613 down(&wl->scan_lock); 1636 mutex_lock(&wl->scan_lock);
1637
1638 buf = (void *)__get_free_page(GFP_KERNEL);
1639 if (!buf) {
1640 pr_info("%s: scan buffer alloc failed\n", __func__);
1641 goto out;
1642 }
1614 1643
1615 if (wl->scan_stat != GELIC_WL_SCAN_STAT_SCANNING) { 1644 if (wl->scan_stat != GELIC_WL_SCAN_STAT_SCANNING) {
1616 /* 1645 /*
@@ -1622,7 +1651,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
1622 } 1651 }
1623 1652
1624 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_SCAN, 1653 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_SCAN,
1625 wl->buf, PAGE_SIZE); 1654 buf, PAGE_SIZE);
1626 if (!cmd || cmd->status || cmd->cmd_status) { 1655 if (!cmd || cmd->status || cmd->cmd_status) {
1627 wl->scan_stat = GELIC_WL_SCAN_STAT_INIT; 1656 wl->scan_stat = GELIC_WL_SCAN_STAT_INIT;
1628 pr_info("%s:cmd failed\n", __func__); 1657 pr_info("%s:cmd failed\n", __func__);
@@ -1649,7 +1678,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
1649 } 1678 }
1650 1679
1651 /* put them in the newtork_list */ 1680 /* put them in the newtork_list */
1652 for (i = 0, scan_info_size = 0, scan_info = wl->buf; 1681 for (i = 0, scan_info_size = 0, scan_info = buf;
1653 scan_info_size < data_len; 1682 scan_info_size < data_len;
1654 i++, scan_info_size += be16_to_cpu(scan_info->size), 1683 i++, scan_info_size += be16_to_cpu(scan_info->size),
1655 scan_info = (void *)scan_info + be16_to_cpu(scan_info->size)) { 1684 scan_info = (void *)scan_info + be16_to_cpu(scan_info->size)) {
@@ -1726,8 +1755,9 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
1726 wireless_send_event(port_to_netdev(wl_port(wl)), SIOCGIWSCAN, &data, 1755 wireless_send_event(port_to_netdev(wl_port(wl)), SIOCGIWSCAN, &data,
1727 NULL); 1756 NULL);
1728out: 1757out:
1758 free_page((unsigned long)buf);
1729 complete(&wl->scan_done); 1759 complete(&wl->scan_done);
1730 up(&wl->scan_lock); 1760 mutex_unlock(&wl->scan_lock);
1731 pr_debug("%s:end\n", __func__); 1761 pr_debug("%s:end\n", __func__);
1732} 1762}
1733 1763
@@ -1848,7 +1878,10 @@ static int gelic_wl_do_wep_setup(struct gelic_wl_info *wl)
1848 1878
1849 pr_debug("%s: <-\n", __func__); 1879 pr_debug("%s: <-\n", __func__);
1850 /* we can assume no one should uses the buffer */ 1880 /* we can assume no one should uses the buffer */
1851 wep = wl->buf; 1881 wep = (struct gelic_eurus_wep_cfg *)__get_free_page(GFP_KERNEL);
1882 if (!wep)
1883 return -ENOMEM;
1884
1852 memset(wep, 0, sizeof(*wep)); 1885 memset(wep, 0, sizeof(*wep));
1853 1886
1854 if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) { 1887 if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) {
@@ -1898,6 +1931,7 @@ static int gelic_wl_do_wep_setup(struct gelic_wl_info *wl)
1898 1931
1899 kfree(cmd); 1932 kfree(cmd);
1900out: 1933out:
1934 free_page((unsigned long)wep);
1901 pr_debug("%s: ->\n", __func__); 1935 pr_debug("%s: ->\n", __func__);
1902 return ret; 1936 return ret;
1903} 1937}
@@ -1941,7 +1975,10 @@ static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl)
1941 1975
1942 pr_debug("%s: <-\n", __func__); 1976 pr_debug("%s: <-\n", __func__);
1943 /* we can assume no one should uses the buffer */ 1977 /* we can assume no one should uses the buffer */
1944 wpa = wl->buf; 1978 wpa = (struct gelic_eurus_wpa_cfg *)__get_free_page(GFP_KERNEL);
1979 if (!wpa)
1980 return -ENOMEM;
1981
1945 memset(wpa, 0, sizeof(*wpa)); 1982 memset(wpa, 0, sizeof(*wpa));
1946 1983
1947 if (!test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat)) 1984 if (!test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat))
@@ -2000,6 +2037,7 @@ static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl)
2000 else if (cmd->status || cmd->cmd_status) 2037 else if (cmd->status || cmd->cmd_status)
2001 ret = -ENXIO; 2038 ret = -ENXIO;
2002 kfree(cmd); 2039 kfree(cmd);
2040 free_page((unsigned long)wpa);
2003 pr_debug("%s: --> %d\n", __func__, ret); 2041 pr_debug("%s: --> %d\n", __func__, ret);
2004 return ret; 2042 return ret;
2005} 2043}
@@ -2018,7 +2056,10 @@ static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
2018 pr_debug("%s: <-\n", __func__); 2056 pr_debug("%s: <-\n", __func__);
2019 2057
2020 /* do common config */ 2058 /* do common config */
2021 common = wl->buf; 2059 common = (struct gelic_eurus_common_cfg *)__get_free_page(GFP_KERNEL);
2060 if (!common)
2061 return -ENOMEM;
2062
2022 memset(common, 0, sizeof(*common)); 2063 memset(common, 0, sizeof(*common));
2023 common->bss_type = cpu_to_be16(GELIC_EURUS_BSS_INFRA); 2064 common->bss_type = cpu_to_be16(GELIC_EURUS_BSS_INFRA);
2024 common->op_mode = cpu_to_be16(GELIC_EURUS_OPMODE_11BG); 2065 common->op_mode = cpu_to_be16(GELIC_EURUS_OPMODE_11BG);
@@ -2104,6 +2145,7 @@ static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
2104 pr_info("%s: connected\n", __func__); 2145 pr_info("%s: connected\n", __func__);
2105 } 2146 }
2106out: 2147out:
2148 free_page((unsigned long)common);
2107 pr_debug("%s: ->\n", __func__); 2149 pr_debug("%s: ->\n", __func__);
2108 return ret; 2150 return ret;
2109} 2151}
@@ -2151,7 +2193,7 @@ static void gelic_wl_disconnect_event(struct gelic_wl_info *wl,
2151 * As it waits with timeout, just leave assoc_done 2193 * As it waits with timeout, just leave assoc_done
2152 * uncompleted, then it terminates with timeout 2194 * uncompleted, then it terminates with timeout
2153 */ 2195 */
2154 if (down_trylock(&wl->assoc_stat_lock)) { 2196 if (!mutex_trylock(&wl->assoc_stat_lock)) {
2155 pr_debug("%s: already locked\n", __func__); 2197 pr_debug("%s: already locked\n", __func__);
2156 lock = 0; 2198 lock = 0;
2157 } else { 2199 } else {
@@ -2170,7 +2212,7 @@ static void gelic_wl_disconnect_event(struct gelic_wl_info *wl,
2170 netif_carrier_off(port_to_netdev(wl_port(wl))); 2212 netif_carrier_off(port_to_netdev(wl_port(wl)));
2171 2213
2172 if (lock) 2214 if (lock)
2173 up(&wl->assoc_stat_lock); 2215 mutex_unlock(&wl->assoc_stat_lock);
2174} 2216}
2175/* 2217/*
2176 * event worker 2218 * event worker
@@ -2255,15 +2297,30 @@ static void gelic_wl_assoc_worker(struct work_struct *work)
2255 2297
2256 struct gelic_wl_scan_info *best_bss; 2298 struct gelic_wl_scan_info *best_bss;
2257 int ret; 2299 int ret;
2300 unsigned long irqflag;
2301 u8 *essid;
2302 size_t essid_len;
2258 2303
2259 wl = container_of(work, struct gelic_wl_info, assoc_work.work); 2304 wl = container_of(work, struct gelic_wl_info, assoc_work.work);
2260 2305
2261 down(&wl->assoc_stat_lock); 2306 mutex_lock(&wl->assoc_stat_lock);
2262 2307
2263 if (wl->assoc_stat != GELIC_WL_ASSOC_STAT_DISCONN) 2308 if (wl->assoc_stat != GELIC_WL_ASSOC_STAT_DISCONN)
2264 goto out; 2309 goto out;
2265 2310
2266 ret = gelic_wl_start_scan(wl, 0); 2311 spin_lock_irqsave(&wl->lock, irqflag);
2312 if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat)) {
2313 pr_debug("%s: assoc ESSID configured %s\n", __func__,
2314 wl->essid);
2315 essid = wl->essid;
2316 essid_len = wl->essid_len;
2317 } else {
2318 essid = NULL;
2319 essid_len = 0;
2320 }
2321 spin_unlock_irqrestore(&wl->lock, irqflag);
2322
2323 ret = gelic_wl_start_scan(wl, 0, essid, essid_len);
2267 if (ret == -ERESTARTSYS) { 2324 if (ret == -ERESTARTSYS) {
2268 pr_debug("%s: scan start failed association\n", __func__); 2325 pr_debug("%s: scan start failed association\n", __func__);
2269 schedule_delayed_work(&wl->assoc_work, HZ/10); /*FIXME*/ 2326 schedule_delayed_work(&wl->assoc_work, HZ/10); /*FIXME*/
@@ -2282,7 +2339,7 @@ static void gelic_wl_assoc_worker(struct work_struct *work)
2282 wait_for_completion(&wl->scan_done); 2339 wait_for_completion(&wl->scan_done);
2283 2340
2284 pr_debug("%s: scan done\n", __func__); 2341 pr_debug("%s: scan done\n", __func__);
2285 down(&wl->scan_lock); 2342 mutex_lock(&wl->scan_lock);
2286 if (wl->scan_stat != GELIC_WL_SCAN_STAT_GOT_LIST) { 2343 if (wl->scan_stat != GELIC_WL_SCAN_STAT_GOT_LIST) {
2287 gelic_wl_send_iwap_event(wl, NULL); 2344 gelic_wl_send_iwap_event(wl, NULL);
2288 pr_info("%s: no scan list. association failed\n", __func__); 2345 pr_info("%s: no scan list. association failed\n", __func__);
@@ -2302,9 +2359,9 @@ static void gelic_wl_assoc_worker(struct work_struct *work)
2302 if (ret) 2359 if (ret)
2303 pr_info("%s: association failed %d\n", __func__, ret); 2360 pr_info("%s: association failed %d\n", __func__, ret);
2304scan_lock_out: 2361scan_lock_out:
2305 up(&wl->scan_lock); 2362 mutex_unlock(&wl->scan_lock);
2306out: 2363out:
2307 up(&wl->assoc_stat_lock); 2364 mutex_unlock(&wl->assoc_stat_lock);
2308} 2365}
2309/* 2366/*
2310 * Interrupt handler 2367 * Interrupt handler
@@ -2351,6 +2408,7 @@ static const iw_handler gelic_wl_wext_handler[] =
2351 IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick, 2408 IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick,
2352}; 2409};
2353 2410
2411#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
2354static struct iw_priv_args gelic_wl_private_args[] = 2412static struct iw_priv_args gelic_wl_private_args[] =
2355{ 2413{
2356 { 2414 {
@@ -2372,15 +2430,18 @@ static const iw_handler gelic_wl_private_handler[] =
2372 gelic_wl_priv_set_psk, 2430 gelic_wl_priv_set_psk,
2373 gelic_wl_priv_get_psk, 2431 gelic_wl_priv_get_psk,
2374}; 2432};
2433#endif
2375 2434
2376static const struct iw_handler_def gelic_wl_wext_handler_def = { 2435static const struct iw_handler_def gelic_wl_wext_handler_def = {
2377 .num_standard = ARRAY_SIZE(gelic_wl_wext_handler), 2436 .num_standard = ARRAY_SIZE(gelic_wl_wext_handler),
2378 .standard = gelic_wl_wext_handler, 2437 .standard = gelic_wl_wext_handler,
2379 .get_wireless_stats = gelic_wl_get_wireless_stats, 2438 .get_wireless_stats = gelic_wl_get_wireless_stats,
2439#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
2380 .num_private = ARRAY_SIZE(gelic_wl_private_handler), 2440 .num_private = ARRAY_SIZE(gelic_wl_private_handler),
2381 .num_private_args = ARRAY_SIZE(gelic_wl_private_args), 2441 .num_private_args = ARRAY_SIZE(gelic_wl_private_args),
2382 .private = gelic_wl_private_handler, 2442 .private = gelic_wl_private_handler,
2383 .private_args = gelic_wl_private_args, 2443 .private_args = gelic_wl_private_args,
2444#endif
2384}; 2445};
2385 2446
2386static struct net_device *gelic_wl_alloc(struct gelic_card *card) 2447static struct net_device *gelic_wl_alloc(struct gelic_card *card)
@@ -2431,8 +2492,8 @@ static struct net_device *gelic_wl_alloc(struct gelic_card *card)
2431 2492
2432 INIT_DELAYED_WORK(&wl->event_work, gelic_wl_event_worker); 2493 INIT_DELAYED_WORK(&wl->event_work, gelic_wl_event_worker);
2433 INIT_DELAYED_WORK(&wl->assoc_work, gelic_wl_assoc_worker); 2494 INIT_DELAYED_WORK(&wl->assoc_work, gelic_wl_assoc_worker);
2434 init_MUTEX(&wl->scan_lock); 2495 mutex_init(&wl->scan_lock);
2435 init_MUTEX(&wl->assoc_stat_lock); 2496 mutex_init(&wl->assoc_stat_lock);
2436 2497
2437 init_completion(&wl->scan_done); 2498 init_completion(&wl->scan_done);
2438 /* for the case that no scan request is issued and stop() is called */ 2499 /* for the case that no scan request is issued and stop() is called */
@@ -2446,16 +2507,9 @@ static struct net_device *gelic_wl_alloc(struct gelic_card *card)
2446 BUILD_BUG_ON(PAGE_SIZE < 2507 BUILD_BUG_ON(PAGE_SIZE <
2447 sizeof(struct gelic_eurus_scan_info) * 2508 sizeof(struct gelic_eurus_scan_info) *
2448 GELIC_EURUS_MAX_SCAN); 2509 GELIC_EURUS_MAX_SCAN);
2449 wl->buf = (void *)get_zeroed_page(GFP_KERNEL);
2450 if (!wl->buf) {
2451 pr_info("%s:buffer allocation failed\n", __func__);
2452 goto fail_getpage;
2453 }
2454 pr_debug("%s:end\n", __func__); 2510 pr_debug("%s:end\n", __func__);
2455 return netdev; 2511 return netdev;
2456 2512
2457fail_getpage:
2458 destroy_workqueue(wl->event_queue);
2459fail_event_workqueue: 2513fail_event_workqueue:
2460 destroy_workqueue(wl->eurus_cmd_queue); 2514 destroy_workqueue(wl->eurus_cmd_queue);
2461fail_cmd_workqueue: 2515fail_cmd_workqueue:
@@ -2474,8 +2528,6 @@ static void gelic_wl_free(struct gelic_wl_info *wl)
2474 2528
2475 pr_debug("%s: <-\n", __func__); 2529 pr_debug("%s: <-\n", __func__);
2476 2530
2477 free_page((unsigned long)wl->buf);
2478
2479 pr_debug("%s: destroy queues\n", __func__); 2531 pr_debug("%s: destroy queues\n", __func__);
2480 destroy_workqueue(wl->eurus_cmd_queue); 2532 destroy_workqueue(wl->eurus_cmd_queue);
2481 destroy_workqueue(wl->event_queue); 2533 destroy_workqueue(wl->event_queue);
diff --git a/drivers/net/ps3_gelic_wireless.h b/drivers/net/ps3_gelic_wireless.h
index 103697166720..5339e0078d18 100644
--- a/drivers/net/ps3_gelic_wireless.h
+++ b/drivers/net/ps3_gelic_wireless.h
@@ -241,7 +241,7 @@ enum gelic_wl_assoc_state {
241#define GELIC_WEP_KEYS 4 241#define GELIC_WEP_KEYS 4
242struct gelic_wl_info { 242struct gelic_wl_info {
243 /* bss list */ 243 /* bss list */
244 struct semaphore scan_lock; 244 struct mutex scan_lock;
245 struct list_head network_list; 245 struct list_head network_list;
246 struct list_head network_free_list; 246 struct list_head network_free_list;
247 struct gelic_wl_scan_info *networks; 247 struct gelic_wl_scan_info *networks;
@@ -266,7 +266,7 @@ struct gelic_wl_info {
266 enum gelic_wl_wpa_level wpa_level; /* wpa/wpa2 */ 266 enum gelic_wl_wpa_level wpa_level; /* wpa/wpa2 */
267 267
268 /* association handling */ 268 /* association handling */
269 struct semaphore assoc_stat_lock; 269 struct mutex assoc_stat_lock;
270 struct delayed_work assoc_work; 270 struct delayed_work assoc_work;
271 enum gelic_wl_assoc_state assoc_stat; 271 enum gelic_wl_assoc_state assoc_stat;
272 struct completion assoc_done; 272 struct completion assoc_done;
@@ -288,9 +288,6 @@ struct gelic_wl_info {
288 u8 active_bssid[ETH_ALEN]; /* associated bssid */ 288 u8 active_bssid[ETH_ALEN]; /* associated bssid */
289 unsigned int essid_len; 289 unsigned int essid_len;
290 290
291 /* buffer for hypervisor IO */
292 void *buf;
293
294 struct iw_public_data wireless_data; 291 struct iw_public_data wireless_data;
295 struct iw_statistics iwstat; 292 struct iw_statistics iwstat;
296}; 293};
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index b7f7b2227d56..5f608780c3e8 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -1437,9 +1437,9 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1437 reg &= ~PHY_GIG_ALL_PARAMS; 1437 reg &= ~PHY_GIG_ALL_PARAMS;
1438 1438
1439 if(portConfiguration & PORT_CONFIG_1000MB_SPEED) { 1439 if(portConfiguration & PORT_CONFIG_1000MB_SPEED) {
1440 if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) 1440 if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
1441 reg |= PHY_GIG_ADV_1000F; 1441 reg |= PHY_GIG_ADV_1000F;
1442 else 1442 else
1443 reg |= PHY_GIG_ADV_1000H; 1443 reg |= PHY_GIG_ADV_1000H;
1444 } 1444 }
1445 1445
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index b5c1e663417d..5694e894fc7a 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2566,7 +2566,7 @@ static int fill_rx_buffers(struct ring_info *ring)
2566 if (block_no) 2566 if (block_no)
2567 rxd_index += (block_no * ring->rxd_count); 2567 rxd_index += (block_no * ring->rxd_count);
2568 2568
2569 if ((block_no == block_no1) && 2569 if ((block_no == block_no1) &&
2570 (off == ring->rx_curr_get_info.offset) && 2570 (off == ring->rx_curr_get_info.offset) &&
2571 (rxdp->Host_Control)) { 2571 (rxdp->Host_Control)) {
2572 DBG_PRINT(INTR_DBG, "%s: Get and Put", 2572 DBG_PRINT(INTR_DBG, "%s: Get and Put",
@@ -2612,7 +2612,7 @@ static int fill_rx_buffers(struct ring_info *ring)
2612 first_rxdp->Control_1 |= RXD_OWN_XENA; 2612 first_rxdp->Control_1 |= RXD_OWN_XENA;
2613 } 2613 }
2614 stats->mem_alloc_fail_cnt++; 2614 stats->mem_alloc_fail_cnt++;
2615 2615
2616 return -ENOMEM ; 2616 return -ENOMEM ;
2617 } 2617 }
2618 stats->mem_allocated += skb->truesize; 2618 stats->mem_allocated += skb->truesize;
@@ -6999,7 +6999,7 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
6999 &skb,(u64 *)&temp0_64, 6999 &skb,(u64 *)&temp0_64,
7000 (u64 *)&temp1_64, 7000 (u64 *)&temp1_64,
7001 (u64 *)&temp2_64, 7001 (u64 *)&temp2_64,
7002 size) == ENOMEM) { 7002 size) == -ENOMEM) {
7003 return 0; 7003 return 0;
7004 } 7004 }
7005 7005
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 4706f7f9acb6..d0a84ba887a5 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -752,7 +752,7 @@ struct ring_info {
752 752
753 /* interface MTU value */ 753 /* interface MTU value */
754 unsigned mtu; 754 unsigned mtu;
755 755
756 /* Buffer Address store. */ 756 /* Buffer Address store. */
757 struct buffAdd **ba; 757 struct buffAdd **ba;
758 758
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 33bb18f810fb..fe41e4ec21ec 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -1064,7 +1064,7 @@ static void sbmac_netpoll(struct net_device *netdev)
1064 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), 1064 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
1065 sc->sbm_imr); 1065 sc->sbm_imr);
1066#else 1066#else
1067 __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | 1067 __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
1068 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr); 1068 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
1069#endif 1069#endif
1070} 1070}
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
index dbad95c295bd..3be13b592b4d 100644
--- a/drivers/net/sfc/Kconfig
+++ b/drivers/net/sfc/Kconfig
@@ -4,6 +4,8 @@ config SFC
4 select MII 4 select MII
5 select INET_LRO 5 select INET_LRO
6 select CRC32 6 select CRC32
7 select I2C
8 select I2C_ALGOBIT
7 help 9 help
8 This driver supports 10-gigabit Ethernet cards based on 10 This driver supports 10-gigabit Ethernet cards based on
9 the Solarflare Communications Solarstorm SFC4000 controller. 11 the Solarflare Communications Solarstorm SFC4000 controller.
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index 1d2daeec7ac1..c8f5704c8fb1 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,5 +1,5 @@
1sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \ 1sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \
2 i2c-direct.o selftest.o ethtool.o xfp_phy.o \ 2 selftest.o ethtool.o xfp_phy.o \
3 mdio_10g.o tenxpress.o boards.o sfe4001.o 3 mdio_10g.o tenxpress.o boards.o sfe4001.o
4 4
5obj-$(CONFIG_SFC) += sfc.o 5obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
index 7fc0328dc055..d3d3dd0a1170 100644
--- a/drivers/net/sfc/boards.c
+++ b/drivers/net/sfc/boards.c
@@ -109,7 +109,7 @@ static struct efx_board_data board_data[] = {
109 [EFX_BOARD_INVALID] = 109 [EFX_BOARD_INVALID] =
110 {NULL, NULL, dummy_init}, 110 {NULL, NULL, dummy_init},
111 [EFX_BOARD_SFE4001] = 111 [EFX_BOARD_SFE4001] =
112 {"SFE4001", "10GBASE-T adapter", sfe4001_poweron}, 112 {"SFE4001", "10GBASE-T adapter", sfe4001_init},
113 [EFX_BOARD_SFE4002] = 113 [EFX_BOARD_SFE4002] =
114 {"SFE4002", "XFP adapter", sfe4002_init}, 114 {"SFE4002", "XFP adapter", sfe4002_init},
115}; 115};
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
index 695764dc2e64..e5e844359ce7 100644
--- a/drivers/net/sfc/boards.h
+++ b/drivers/net/sfc/boards.h
@@ -20,8 +20,7 @@ enum efx_board_type {
20}; 20};
21 21
22extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); 22extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
23extern int sfe4001_poweron(struct efx_nic *efx); 23extern int sfe4001_init(struct efx_nic *efx);
24extern void sfe4001_poweroff(struct efx_nic *efx);
25/* Are we putting the PHY into flash config mode */ 24/* Are we putting the PHY into flash config mode */
26extern unsigned int sfe4001_phy_flash_cfg; 25extern unsigned int sfe4001_phy_flash_cfg;
27 26
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 449760642e31..74265d8553b8 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1815,6 +1815,7 @@ static struct efx_board efx_dummy_board_info = {
1815 .init = efx_nic_dummy_op_int, 1815 .init = efx_nic_dummy_op_int,
1816 .init_leds = efx_port_dummy_op_int, 1816 .init_leds = efx_port_dummy_op_int,
1817 .set_fault_led = efx_port_dummy_op_blink, 1817 .set_fault_led = efx_port_dummy_op_blink,
1818 .fini = efx_port_dummy_op_void,
1818}; 1819};
1819 1820
1820/************************************************************************** 1821/**************************************************************************
@@ -1941,6 +1942,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
1941 efx_fini_port(efx); 1942 efx_fini_port(efx);
1942 1943
1943 /* Shutdown the board, then the NIC and board state */ 1944 /* Shutdown the board, then the NIC and board state */
1945 efx->board_info.fini(efx);
1944 falcon_fini_interrupt(efx); 1946 falcon_fini_interrupt(efx);
1945 1947
1946 efx_fini_napi(efx); 1948 efx_fini_napi(efx);
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 790db89db345..630406e142e5 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -13,6 +13,8 @@
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/i2c.h>
17#include <linux/i2c-algo-bit.h>
16#include "net_driver.h" 18#include "net_driver.h"
17#include "bitfield.h" 19#include "bitfield.h"
18#include "efx.h" 20#include "efx.h"
@@ -36,10 +38,12 @@
36 * struct falcon_nic_data - Falcon NIC state 38 * struct falcon_nic_data - Falcon NIC state
37 * @next_buffer_table: First available buffer table id 39 * @next_buffer_table: First available buffer table id
38 * @pci_dev2: The secondary PCI device if present 40 * @pci_dev2: The secondary PCI device if present
41 * @i2c_data: Operations and state for I2C bit-bashing algorithm
39 */ 42 */
40struct falcon_nic_data { 43struct falcon_nic_data {
41 unsigned next_buffer_table; 44 unsigned next_buffer_table;
42 struct pci_dev *pci_dev2; 45 struct pci_dev *pci_dev2;
46 struct i2c_algo_bit_data i2c_data;
43}; 47};
44 48
45/************************************************************************** 49/**************************************************************************
@@ -175,39 +179,57 @@ static inline int falcon_event_present(efx_qword_t *event)
175 * 179 *
176 ************************************************************************** 180 **************************************************************************
177 */ 181 */
178static void falcon_setsdascl(struct efx_i2c_interface *i2c) 182static void falcon_setsda(void *data, int state)
179{ 183{
184 struct efx_nic *efx = (struct efx_nic *)data;
180 efx_oword_t reg; 185 efx_oword_t reg;
181 186
182 falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER); 187 falcon_read(efx, &reg, GPIO_CTL_REG_KER);
183 EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, (i2c->scl ? 0 : 1)); 188 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, !state);
184 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, (i2c->sda ? 0 : 1)); 189 falcon_write(efx, &reg, GPIO_CTL_REG_KER);
185 falcon_write(i2c->efx, &reg, GPIO_CTL_REG_KER);
186} 190}
187 191
188static int falcon_getsda(struct efx_i2c_interface *i2c) 192static void falcon_setscl(void *data, int state)
189{ 193{
194 struct efx_nic *efx = (struct efx_nic *)data;
190 efx_oword_t reg; 195 efx_oword_t reg;
191 196
192 falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER); 197 falcon_read(efx, &reg, GPIO_CTL_REG_KER);
198 EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, !state);
199 falcon_write(efx, &reg, GPIO_CTL_REG_KER);
200}
201
202static int falcon_getsda(void *data)
203{
204 struct efx_nic *efx = (struct efx_nic *)data;
205 efx_oword_t reg;
206
207 falcon_read(efx, &reg, GPIO_CTL_REG_KER);
193 return EFX_OWORD_FIELD(reg, GPIO3_IN); 208 return EFX_OWORD_FIELD(reg, GPIO3_IN);
194} 209}
195 210
196static int falcon_getscl(struct efx_i2c_interface *i2c) 211static int falcon_getscl(void *data)
197{ 212{
213 struct efx_nic *efx = (struct efx_nic *)data;
198 efx_oword_t reg; 214 efx_oword_t reg;
199 215
200 falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER); 216 falcon_read(efx, &reg, GPIO_CTL_REG_KER);
201 return EFX_DWORD_FIELD(reg, GPIO0_IN); 217 return EFX_OWORD_FIELD(reg, GPIO0_IN);
202} 218}
203 219
204static struct efx_i2c_bit_operations falcon_i2c_bit_operations = { 220static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
205 .setsda = falcon_setsdascl, 221 .setsda = falcon_setsda,
206 .setscl = falcon_setsdascl, 222 .setscl = falcon_setscl,
207 .getsda = falcon_getsda, 223 .getsda = falcon_getsda,
208 .getscl = falcon_getscl, 224 .getscl = falcon_getscl,
209 .udelay = 100, 225 .udelay = 5,
210 .mdelay = 10, 226 /*
227 * This is the number of system clock ticks after which
228 * i2c-algo-bit gives up waiting for SCL to become high.
229 * It must be at least 2 since the first tick can happen
230 * immediately after it starts waiting.
231 */
232 .timeout = 2,
211}; 233};
212 234
213/************************************************************************** 235/**************************************************************************
@@ -2405,12 +2427,6 @@ int falcon_probe_nic(struct efx_nic *efx)
2405 struct falcon_nic_data *nic_data; 2427 struct falcon_nic_data *nic_data;
2406 int rc; 2428 int rc;
2407 2429
2408 /* Initialise I2C interface state */
2409 efx->i2c.efx = efx;
2410 efx->i2c.op = &falcon_i2c_bit_operations;
2411 efx->i2c.sda = 1;
2412 efx->i2c.scl = 1;
2413
2414 /* Allocate storage for hardware specific data */ 2430 /* Allocate storage for hardware specific data */
2415 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 2431 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2416 efx->nic_data = nic_data; 2432 efx->nic_data = nic_data;
@@ -2461,6 +2477,18 @@ int falcon_probe_nic(struct efx_nic *efx)
2461 if (rc) 2477 if (rc)
2462 goto fail5; 2478 goto fail5;
2463 2479
2480 /* Initialise I2C adapter */
2481 efx->i2c_adap.owner = THIS_MODULE;
2482 efx->i2c_adap.class = I2C_CLASS_HWMON;
2483 nic_data->i2c_data = falcon_i2c_bit_operations;
2484 nic_data->i2c_data.data = efx;
2485 efx->i2c_adap.algo_data = &nic_data->i2c_data;
2486 efx->i2c_adap.dev.parent = &efx->pci_dev->dev;
2487 strcpy(efx->i2c_adap.name, "SFC4000 GPIO");
2488 rc = i2c_bit_add_bus(&efx->i2c_adap);
2489 if (rc)
2490 goto fail5;
2491
2464 return 0; 2492 return 0;
2465 2493
2466 fail5: 2494 fail5:
@@ -2635,6 +2663,10 @@ int falcon_init_nic(struct efx_nic *efx)
2635void falcon_remove_nic(struct efx_nic *efx) 2663void falcon_remove_nic(struct efx_nic *efx)
2636{ 2664{
2637 struct falcon_nic_data *nic_data = efx->nic_data; 2665 struct falcon_nic_data *nic_data = efx->nic_data;
2666 int rc;
2667
2668 rc = i2c_del_adapter(&efx->i2c_adap);
2669 BUG_ON(rc);
2638 2670
2639 falcon_free_buffer(efx, &efx->irq_status); 2671 falcon_free_buffer(efx, &efx->irq_status);
2640 2672
diff --git a/drivers/net/sfc/i2c-direct.c b/drivers/net/sfc/i2c-direct.c
deleted file mode 100644
index b6c62d0ed9c2..000000000000
--- a/drivers/net/sfc/i2c-direct.c
+++ /dev/null
@@ -1,381 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/delay.h>
12#include "net_driver.h"
13#include "i2c-direct.h"
14
15/*
16 * I2C data (SDA) and clock (SCL) line read/writes with appropriate
17 * delays.
18 */
19
20static inline void setsda(struct efx_i2c_interface *i2c, int state)
21{
22 udelay(i2c->op->udelay);
23 i2c->sda = state;
24 i2c->op->setsda(i2c);
25 udelay(i2c->op->udelay);
26}
27
28static inline void setscl(struct efx_i2c_interface *i2c, int state)
29{
30 udelay(i2c->op->udelay);
31 i2c->scl = state;
32 i2c->op->setscl(i2c);
33 udelay(i2c->op->udelay);
34}
35
36static inline int getsda(struct efx_i2c_interface *i2c)
37{
38 int sda;
39
40 udelay(i2c->op->udelay);
41 sda = i2c->op->getsda(i2c);
42 udelay(i2c->op->udelay);
43 return sda;
44}
45
46static inline int getscl(struct efx_i2c_interface *i2c)
47{
48 int scl;
49
50 udelay(i2c->op->udelay);
51 scl = i2c->op->getscl(i2c);
52 udelay(i2c->op->udelay);
53 return scl;
54}
55
56/*
57 * I2C low-level protocol operations
58 *
59 */
60
61static inline void i2c_release(struct efx_i2c_interface *i2c)
62{
63 EFX_WARN_ON_PARANOID(!i2c->scl);
64 EFX_WARN_ON_PARANOID(!i2c->sda);
65 /* Devices may time out if operations do not end */
66 setscl(i2c, 1);
67 setsda(i2c, 1);
68 EFX_BUG_ON_PARANOID(getsda(i2c) != 1);
69 EFX_BUG_ON_PARANOID(getscl(i2c) != 1);
70}
71
72static inline void i2c_start(struct efx_i2c_interface *i2c)
73{
74 /* We may be restarting immediately after a {send,recv}_bit,
75 * so SCL will not necessarily already be high.
76 */
77 EFX_WARN_ON_PARANOID(!i2c->sda);
78 setscl(i2c, 1);
79 setsda(i2c, 0);
80 setscl(i2c, 0);
81 setsda(i2c, 1);
82}
83
84static inline void i2c_send_bit(struct efx_i2c_interface *i2c, int bit)
85{
86 EFX_WARN_ON_PARANOID(i2c->scl != 0);
87 setsda(i2c, bit);
88 setscl(i2c, 1);
89 setscl(i2c, 0);
90 setsda(i2c, 1);
91}
92
93static inline int i2c_recv_bit(struct efx_i2c_interface *i2c)
94{
95 int bit;
96
97 EFX_WARN_ON_PARANOID(i2c->scl != 0);
98 EFX_WARN_ON_PARANOID(!i2c->sda);
99 setscl(i2c, 1);
100 bit = getsda(i2c);
101 setscl(i2c, 0);
102 return bit;
103}
104
105static inline void i2c_stop(struct efx_i2c_interface *i2c)
106{
107 EFX_WARN_ON_PARANOID(i2c->scl != 0);
108 setsda(i2c, 0);
109 setscl(i2c, 1);
110 setsda(i2c, 1);
111}
112
113/*
114 * I2C mid-level protocol operations
115 *
116 */
117
118/* Sends a byte via the I2C bus and checks for an acknowledgement from
119 * the slave device.
120 */
121static int i2c_send_byte(struct efx_i2c_interface *i2c, u8 byte)
122{
123 int i;
124
125 /* Send byte */
126 for (i = 0; i < 8; i++) {
127 i2c_send_bit(i2c, !!(byte & 0x80));
128 byte <<= 1;
129 }
130
131 /* Check for acknowledgement from slave */
132 return (i2c_recv_bit(i2c) == 0 ? 0 : -EIO);
133}
134
135/* Receives a byte via the I2C bus and sends ACK/NACK to the slave device. */
136static u8 i2c_recv_byte(struct efx_i2c_interface *i2c, int ack)
137{
138 u8 value = 0;
139 int i;
140
141 /* Receive byte */
142 for (i = 0; i < 8; i++)
143 value = (value << 1) | i2c_recv_bit(i2c);
144
145 /* Send ACK/NACK */
146 i2c_send_bit(i2c, (ack ? 0 : 1));
147
148 return value;
149}
150
151/* Calculate command byte for a read operation */
152static inline u8 i2c_read_cmd(u8 device_id)
153{
154 return ((device_id << 1) | 1);
155}
156
157/* Calculate command byte for a write operation */
158static inline u8 i2c_write_cmd(u8 device_id)
159{
160 return ((device_id << 1) | 0);
161}
162
163int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id)
164{
165 int rc;
166
167 /* If someone is driving the bus low we just give up. */
168 if (getsda(i2c) == 0 || getscl(i2c) == 0) {
169 EFX_ERR(i2c->efx, "%s someone is holding the I2C bus low."
170 " Giving up.\n", __func__);
171 return -EFAULT;
172 }
173
174 /* Pretend to initiate a device write */
175 i2c_start(i2c);
176 rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
177 if (rc)
178 goto out;
179
180 out:
181 i2c_stop(i2c);
182 i2c_release(i2c);
183
184 return rc;
185}
186
187/* This performs a fast read of one or more consecutive bytes from an
188 * I2C device. Not all devices support consecutive reads of more than
189 * one byte; for these devices use efx_i2c_read() instead.
190 */
191int efx_i2c_fast_read(struct efx_i2c_interface *i2c,
192 u8 device_id, u8 offset, u8 *data, unsigned int len)
193{
194 int i;
195 int rc;
196
197 EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
198 EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
199 EFX_WARN_ON_PARANOID(data == NULL);
200 EFX_WARN_ON_PARANOID(len < 1);
201
202 /* Select device and starting offset */
203 i2c_start(i2c);
204 rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
205 if (rc)
206 goto out;
207 rc = i2c_send_byte(i2c, offset);
208 if (rc)
209 goto out;
210
211 /* Read data from device */
212 i2c_start(i2c);
213 rc = i2c_send_byte(i2c, i2c_read_cmd(device_id));
214 if (rc)
215 goto out;
216 for (i = 0; i < (len - 1); i++)
217 /* Read and acknowledge all but the last byte */
218 data[i] = i2c_recv_byte(i2c, 1);
219 /* Read last byte with no acknowledgement */
220 data[i] = i2c_recv_byte(i2c, 0);
221
222 out:
223 i2c_stop(i2c);
224 i2c_release(i2c);
225
226 return rc;
227}
228
229/* This performs a fast write of one or more consecutive bytes to an
230 * I2C device. Not all devices support consecutive writes of more
231 * than one byte; for these devices use efx_i2c_write() instead.
232 */
233int efx_i2c_fast_write(struct efx_i2c_interface *i2c,
234 u8 device_id, u8 offset,
235 const u8 *data, unsigned int len)
236{
237 int i;
238 int rc;
239
240 EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
241 EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
242 EFX_WARN_ON_PARANOID(len < 1);
243
244 /* Select device and starting offset */
245 i2c_start(i2c);
246 rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
247 if (rc)
248 goto out;
249 rc = i2c_send_byte(i2c, offset);
250 if (rc)
251 goto out;
252
253 /* Write data to device */
254 for (i = 0; i < len; i++) {
255 rc = i2c_send_byte(i2c, data[i]);
256 if (rc)
257 goto out;
258 }
259
260 out:
261 i2c_stop(i2c);
262 i2c_release(i2c);
263
264 return rc;
265}
266
267/* I2C byte-by-byte read */
268int efx_i2c_read(struct efx_i2c_interface *i2c,
269 u8 device_id, u8 offset, u8 *data, unsigned int len)
270{
271 int rc;
272
273 /* i2c_fast_read with length 1 is a single byte read */
274 for (; len > 0; offset++, data++, len--) {
275 rc = efx_i2c_fast_read(i2c, device_id, offset, data, 1);
276 if (rc)
277 return rc;
278 }
279
280 return 0;
281}
282
283/* I2C byte-by-byte write */
284int efx_i2c_write(struct efx_i2c_interface *i2c,
285 u8 device_id, u8 offset, const u8 *data, unsigned int len)
286{
287 int rc;
288
289 /* i2c_fast_write with length 1 is a single byte write */
290 for (; len > 0; offset++, data++, len--) {
291 rc = efx_i2c_fast_write(i2c, device_id, offset, data, 1);
292 if (rc)
293 return rc;
294 mdelay(i2c->op->mdelay);
295 }
296
297 return 0;
298}
299
300
301/* This is just a slightly neater wrapper round efx_i2c_fast_write
302 * in the case where the target doesn't take an offset
303 */
304int efx_i2c_send_bytes(struct efx_i2c_interface *i2c,
305 u8 device_id, const u8 *data, unsigned int len)
306{
307 return efx_i2c_fast_write(i2c, device_id, data[0], data + 1, len - 1);
308}
309
310/* I2C receiving of bytes - does not send an offset byte */
311int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id,
312 u8 *bytes, unsigned int len)
313{
314 int i;
315 int rc;
316
317 EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
318 EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
319 EFX_WARN_ON_PARANOID(len < 1);
320
321 /* Select device */
322 i2c_start(i2c);
323
324 /* Read data from device */
325 rc = i2c_send_byte(i2c, i2c_read_cmd(device_id));
326 if (rc)
327 goto out;
328
329 for (i = 0; i < (len - 1); i++)
330 /* Read and acknowledge all but the last byte */
331 bytes[i] = i2c_recv_byte(i2c, 1);
332 /* Read last byte with no acknowledgement */
333 bytes[i] = i2c_recv_byte(i2c, 0);
334
335 out:
336 i2c_stop(i2c);
337 i2c_release(i2c);
338
339 return rc;
340}
341
342/* SMBus and some I2C devices will time out if the I2C clock is
343 * held low for too long. This is most likely to happen in virtualised
344 * systems (when the entire domain is descheduled) but could in
345 * principle happen due to preemption on any busy system (and given the
346 * potential length of an I2C operation turning preemption off is not
347 * a sensible option). The following functions deal with the failure by
348 * retrying up to a fixed number of times.
349 */
350
351#define I2C_MAX_RETRIES (10)
352
353/* The timeout problem will result in -EIO. If the wrapped function
354 * returns any other error, pass this up and do not retry. */
355#define RETRY_WRAPPER(_f) \
356 int retries = I2C_MAX_RETRIES; \
357 int rc; \
358 while (retries) { \
359 rc = _f; \
360 if (rc != -EIO) \
361 return rc; \
362 retries--; \
363 } \
364 return rc; \
365
366int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c, u8 device_id)
367{
368 RETRY_WRAPPER(efx_i2c_check_presence(i2c, device_id))
369}
370
371int efx_i2c_read_retry(struct efx_i2c_interface *i2c,
372 u8 device_id, u8 offset, u8 *data, unsigned int len)
373{
374 RETRY_WRAPPER(efx_i2c_read(i2c, device_id, offset, data, len))
375}
376
377int efx_i2c_write_retry(struct efx_i2c_interface *i2c,
378 u8 device_id, u8 offset, const u8 *data, unsigned int len)
379{
380 RETRY_WRAPPER(efx_i2c_write(i2c, device_id, offset, data, len))
381}
diff --git a/drivers/net/sfc/i2c-direct.h b/drivers/net/sfc/i2c-direct.h
deleted file mode 100644
index 291e561071f5..000000000000
--- a/drivers/net/sfc/i2c-direct.h
+++ /dev/null
@@ -1,91 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_I2C_DIRECT_H
12#define EFX_I2C_DIRECT_H
13
14#include "net_driver.h"
15
16/*
17 * Direct control of an I2C bus
18 */
19
20struct efx_i2c_interface;
21
22/**
23 * struct efx_i2c_bit_operations - I2C bus direct control methods
24 *
25 * I2C bus direct control methods.
26 *
27 * @setsda: Set state of SDA line
28 * @setscl: Set state of SCL line
29 * @getsda: Get state of SDA line
30 * @getscl: Get state of SCL line
31 * @udelay: Delay between each bit operation
32 * @mdelay: Delay between each byte write
33 */
34struct efx_i2c_bit_operations {
35 void (*setsda) (struct efx_i2c_interface *i2c);
36 void (*setscl) (struct efx_i2c_interface *i2c);
37 int (*getsda) (struct efx_i2c_interface *i2c);
38 int (*getscl) (struct efx_i2c_interface *i2c);
39 unsigned int udelay;
40 unsigned int mdelay;
41};
42
43/**
44 * struct efx_i2c_interface - an I2C interface
45 *
46 * An I2C interface.
47 *
48 * @efx: Attached Efx NIC
49 * @op: I2C bus control methods
50 * @sda: Current output state of SDA line
51 * @scl: Current output state of SCL line
52 */
53struct efx_i2c_interface {
54 struct efx_nic *efx;
55 struct efx_i2c_bit_operations *op;
56 unsigned int sda:1;
57 unsigned int scl:1;
58};
59
60extern int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id);
61extern int efx_i2c_fast_read(struct efx_i2c_interface *i2c,
62 u8 device_id, u8 offset,
63 u8 *data, unsigned int len);
64extern int efx_i2c_fast_write(struct efx_i2c_interface *i2c,
65 u8 device_id, u8 offset,
66 const u8 *data, unsigned int len);
67extern int efx_i2c_read(struct efx_i2c_interface *i2c,
68 u8 device_id, u8 offset, u8 *data, unsigned int len);
69extern int efx_i2c_write(struct efx_i2c_interface *i2c,
70 u8 device_id, u8 offset,
71 const u8 *data, unsigned int len);
72
73extern int efx_i2c_send_bytes(struct efx_i2c_interface *i2c, u8 device_id,
74 const u8 *bytes, unsigned int len);
75
76extern int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id,
77 u8 *bytes, unsigned int len);
78
79
80/* Versions of the API that retry on failure. */
81extern int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c,
82 u8 device_id);
83
84extern int efx_i2c_read_retry(struct efx_i2c_interface *i2c,
85 u8 device_id, u8 offset, u8 *data, unsigned int len);
86
87extern int efx_i2c_write_retry(struct efx_i2c_interface *i2c,
88 u8 device_id, u8 offset,
89 const u8 *data, unsigned int len);
90
91#endif /* EFX_I2C_DIRECT_H */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 5e20e7551dae..d803b86c647c 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -26,10 +26,10 @@
26#include <linux/highmem.h> 26#include <linux/highmem.h>
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/inet_lro.h> 28#include <linux/inet_lro.h>
29#include <linux/i2c.h>
29 30
30#include "enum.h" 31#include "enum.h"
31#include "bitfield.h" 32#include "bitfield.h"
32#include "i2c-direct.h"
33 33
34#define EFX_MAX_LRO_DESCRIPTORS 8 34#define EFX_MAX_LRO_DESCRIPTORS 8
35#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS 35#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS
@@ -418,7 +418,10 @@ struct efx_blinker {
418 * @init_leds: Sets up board LEDs 418 * @init_leds: Sets up board LEDs
419 * @set_fault_led: Turns the fault LED on or off 419 * @set_fault_led: Turns the fault LED on or off
420 * @blink: Starts/stops blinking 420 * @blink: Starts/stops blinking
421 * @fini: Cleanup function
421 * @blinker: used to blink LEDs in software 422 * @blinker: used to blink LEDs in software
423 * @hwmon_client: I2C client for hardware monitor
424 * @ioexp_client: I2C client for power/port control
422 */ 425 */
423struct efx_board { 426struct efx_board {
424 int type; 427 int type;
@@ -431,7 +434,9 @@ struct efx_board {
431 int (*init_leds)(struct efx_nic *efx); 434 int (*init_leds)(struct efx_nic *efx);
432 void (*set_fault_led) (struct efx_nic *efx, int state); 435 void (*set_fault_led) (struct efx_nic *efx, int state);
433 void (*blink) (struct efx_nic *efx, int start); 436 void (*blink) (struct efx_nic *efx, int start);
437 void (*fini) (struct efx_nic *nic);
434 struct efx_blinker blinker; 438 struct efx_blinker blinker;
439 struct i2c_client *hwmon_client, *ioexp_client;
435}; 440};
436 441
437#define STRING_TABLE_LOOKUP(val, member) \ 442#define STRING_TABLE_LOOKUP(val, member) \
@@ -618,7 +623,7 @@ union efx_multicast_hash {
618 * @membase: Memory BAR value 623 * @membase: Memory BAR value
619 * @biu_lock: BIU (bus interface unit) lock 624 * @biu_lock: BIU (bus interface unit) lock
620 * @interrupt_mode: Interrupt mode 625 * @interrupt_mode: Interrupt mode
621 * @i2c: I2C interface 626 * @i2c_adap: I2C adapter
622 * @board_info: Board-level information 627 * @board_info: Board-level information
623 * @state: Device state flag. Serialised by the rtnl_lock. 628 * @state: Device state flag. Serialised by the rtnl_lock.
624 * @reset_pending: Pending reset method (normally RESET_TYPE_NONE) 629 * @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
@@ -686,7 +691,7 @@ struct efx_nic {
686 spinlock_t biu_lock; 691 spinlock_t biu_lock;
687 enum efx_int_mode interrupt_mode; 692 enum efx_int_mode interrupt_mode;
688 693
689 struct efx_i2c_interface i2c; 694 struct i2c_adapter i2c_adap;
690 struct efx_board board_info; 695 struct efx_board board_info;
691 696
692 enum nic_state state; 697 enum nic_state state;
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index 66a0d1442aba..b27849523990 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -106,28 +106,27 @@
106 106
107static const u8 xgphy_max_temperature = 90; 107static const u8 xgphy_max_temperature = 90;
108 108
109void sfe4001_poweroff(struct efx_nic *efx) 109static void sfe4001_poweroff(struct efx_nic *efx)
110{ 110{
111 struct efx_i2c_interface *i2c = &efx->i2c; 111 struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
112 struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
112 113
113 u8 cfg, out, in; 114 /* Turn off all power rails and disable outputs */
115 i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff);
116 i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff);
117 i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
114 118
115 EFX_INFO(efx, "%s\n", __func__); 119 /* Clear any over-temperature alert */
116 120 i2c_smbus_read_byte_data(hwmon_client, RSL);
117 /* Turn off all power rails */ 121}
118 out = 0xff;
119 efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
120
121 /* Disable port 1 outputs on IO expander */
122 cfg = 0xff;
123 efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
124 122
125 /* Disable port 0 outputs on IO expander */ 123static void sfe4001_fini(struct efx_nic *efx)
126 cfg = 0xff; 124{
127 efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); 125 EFX_INFO(efx, "%s\n", __func__);
128 126
129 /* Clear any over-temperature alert */ 127 sfe4001_poweroff(efx);
130 efx_i2c_read(i2c, MAX6647, RSL, &in, 1); 128 i2c_unregister_device(efx->board_info.ioexp_client);
129 i2c_unregister_device(efx->board_info.hwmon_client);
131} 130}
132 131
133/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected 132/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected
@@ -143,14 +142,26 @@ MODULE_PARM_DESC(phy_flash_cfg,
143 * be turned on before the PHY can be used. 142 * be turned on before the PHY can be used.
144 * Context: Process context, rtnl lock held 143 * Context: Process context, rtnl lock held
145 */ 144 */
146int sfe4001_poweron(struct efx_nic *efx) 145int sfe4001_init(struct efx_nic *efx)
147{ 146{
148 struct efx_i2c_interface *i2c = &efx->i2c; 147 struct i2c_client *hwmon_client, *ioexp_client;
149 unsigned int count; 148 unsigned int count;
150 int rc; 149 int rc;
151 u8 out, in, cfg; 150 u8 out;
152 efx_dword_t reg; 151 efx_dword_t reg;
153 152
153 hwmon_client = i2c_new_dummy(&efx->i2c_adap, MAX6647);
154 if (!hwmon_client)
155 return -EIO;
156 efx->board_info.hwmon_client = hwmon_client;
157
158 ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
159 if (!ioexp_client) {
160 rc = -EIO;
161 goto fail_hwmon;
162 }
163 efx->board_info.ioexp_client = ioexp_client;
164
154 /* 10Xpress has fixed-function LED pins, so there is no board-specific 165 /* 10Xpress has fixed-function LED pins, so there is no board-specific
155 * blink code. */ 166 * blink code. */
156 efx->board_info.blink = tenxpress_phy_blink; 167 efx->board_info.blink = tenxpress_phy_blink;
@@ -166,44 +177,45 @@ int sfe4001_poweron(struct efx_nic *efx)
166 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC); 177 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
167 udelay(10); 178 udelay(10);
168 179
180 efx->board_info.fini = sfe4001_fini;
181
169 /* Set DSP over-temperature alert threshold */ 182 /* Set DSP over-temperature alert threshold */
170 EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature); 183 EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature);
171 rc = efx_i2c_write(i2c, MAX6647, WLHO, 184 rc = i2c_smbus_write_byte_data(hwmon_client, WLHO,
172 &xgphy_max_temperature, 1); 185 xgphy_max_temperature);
173 if (rc) 186 if (rc)
174 goto fail1; 187 goto fail_ioexp;
175 188
176 /* Read it back and verify */ 189 /* Read it back and verify */
177 rc = efx_i2c_read(i2c, MAX6647, RLHN, &in, 1); 190 rc = i2c_smbus_read_byte_data(hwmon_client, RLHN);
178 if (rc) 191 if (rc < 0)
179 goto fail1; 192 goto fail_ioexp;
180 if (in != xgphy_max_temperature) { 193 if (rc != xgphy_max_temperature) {
181 rc = -EFAULT; 194 rc = -EFAULT;
182 goto fail1; 195 goto fail_ioexp;
183 } 196 }
184 197
185 /* Clear any previous over-temperature alert */ 198 /* Clear any previous over-temperature alert */
186 rc = efx_i2c_read(i2c, MAX6647, RSL, &in, 1); 199 rc = i2c_smbus_read_byte_data(hwmon_client, RSL);
187 if (rc) 200 if (rc < 0)
188 goto fail1; 201 goto fail_ioexp;
189 202
190 /* Enable port 0 and port 1 outputs on IO expander */ 203 /* Enable port 0 and port 1 outputs on IO expander */
191 cfg = 0x00; 204 rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
192 rc = efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
193 if (rc) 205 if (rc)
194 goto fail1; 206 goto fail_ioexp;
195 cfg = 0xff & ~(1 << P1_SPARE_LBN); 207 rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
196 rc = efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); 208 0xff & ~(1 << P1_SPARE_LBN));
197 if (rc) 209 if (rc)
198 goto fail2; 210 goto fail_on;
199 211
200 /* Turn all power off then wait 1 sec. This ensures PHY is reset */ 212 /* Turn all power off then wait 1 sec. This ensures PHY is reset */
201 out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) | 213 out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
202 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) | 214 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
203 (0 << P0_EN_1V0X_LBN)); 215 (0 << P0_EN_1V0X_LBN));
204 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 216 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
205 if (rc) 217 if (rc)
206 goto fail3; 218 goto fail_on;
207 219
208 schedule_timeout_uninterruptible(HZ); 220 schedule_timeout_uninterruptible(HZ);
209 count = 0; 221 count = 0;
@@ -215,26 +227,26 @@ int sfe4001_poweron(struct efx_nic *efx)
215 if (sfe4001_phy_flash_cfg) 227 if (sfe4001_phy_flash_cfg)
216 out |= 1 << P0_EN_3V3X_LBN; 228 out |= 1 << P0_EN_3V3X_LBN;
217 229
218 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 230 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
219 if (rc) 231 if (rc)
220 goto fail3; 232 goto fail_on;
221 msleep(10); 233 msleep(10);
222 234
223 /* Turn on 1V power rail */ 235 /* Turn on 1V power rail */
224 out &= ~(1 << P0_EN_1V0X_LBN); 236 out &= ~(1 << P0_EN_1V0X_LBN);
225 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 237 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
226 if (rc) 238 if (rc)
227 goto fail3; 239 goto fail_on;
228 240
229 EFX_INFO(efx, "waiting for power (attempt %d)...\n", count); 241 EFX_INFO(efx, "waiting for power (attempt %d)...\n", count);
230 242
231 schedule_timeout_uninterruptible(HZ); 243 schedule_timeout_uninterruptible(HZ);
232 244
233 /* Check DSP is powered */ 245 /* Check DSP is powered */
234 rc = efx_i2c_read(i2c, PCA9539, P1_IN, &in, 1); 246 rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
235 if (rc) 247 if (rc < 0)
236 goto fail3; 248 goto fail_on;
237 if (in & (1 << P1_AFE_PWD_LBN)) 249 if (rc & (1 << P1_AFE_PWD_LBN))
238 goto done; 250 goto done;
239 251
240 /* DSP doesn't look powered in flash config mode */ 252 /* DSP doesn't look powered in flash config mode */
@@ -244,23 +256,17 @@ int sfe4001_poweron(struct efx_nic *efx)
244 256
245 EFX_INFO(efx, "timed out waiting for power\n"); 257 EFX_INFO(efx, "timed out waiting for power\n");
246 rc = -ETIMEDOUT; 258 rc = -ETIMEDOUT;
247 goto fail3; 259 goto fail_on;
248 260
249done: 261done:
250 EFX_INFO(efx, "PHY is powered on\n"); 262 EFX_INFO(efx, "PHY is powered on\n");
251 return 0; 263 return 0;
252 264
253fail3: 265fail_on:
254 /* Turn off all power rails */ 266 sfe4001_poweroff(efx);
255 out = 0xff; 267fail_ioexp:
256 efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 268 i2c_unregister_device(ioexp_client);
257 /* Disable port 1 outputs on IO expander */ 269fail_hwmon:
258 out = 0xff; 270 i2c_unregister_device(hwmon_client);
259 efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1);
260fail2:
261 /* Disable port 0 outputs on IO expander */
262 out = 0xff;
263 efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1);
264fail1:
265 return rc; 271 return rc;
266} 272}
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
new file mode 100644
index 000000000000..f64d987140a9
--- /dev/null
+++ b/drivers/net/sh_eth.c
@@ -0,0 +1,1174 @@
1/*
2 * SuperH Ethernet device driver
3 *
4 * Copyright (C) 2006,2007 Nobuhiro Iwamatsu
5 * Copyright (C) 2008 Renesas Solutions Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 */
22
23#include <linux/version.h>
24#include <linux/init.h>
25#include <linux/dma-mapping.h>
26#include <linux/etherdevice.h>
27#include <linux/delay.h>
28#include <linux/platform_device.h>
29#include <linux/mdio-bitbang.h>
30#include <linux/netdevice.h>
31#include <linux/phy.h>
32#include <linux/cache.h>
33#include <linux/io.h>
34
35#include "sh_eth.h"
36
37/*
38 * Program the hardware MAC address from dev->dev_addr.
39 */
40static void update_mac_address(struct net_device *ndev)
41{
42 u32 ioaddr = ndev->base_addr;
43
44 ctrl_outl((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
45 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
46 ioaddr + MAHR);
47 ctrl_outl((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
48 ioaddr + MALR);
49}
50
51/*
52 * Get MAC address from SuperH MAC address register
53 *
54 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
55 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
56 * When you want use this device, you must set MAC address in bootloader.
57 *
58 */
59static void read_mac_address(struct net_device *ndev)
60{
61 u32 ioaddr = ndev->base_addr;
62
63 ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24);
64 ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF;
65 ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF;
66 ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF);
67 ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF;
68 ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF);
69}
70
71struct bb_info {
72 struct mdiobb_ctrl ctrl;
73 u32 addr;
74 u32 mmd_msk;/* MMD */
75 u32 mdo_msk;
76 u32 mdi_msk;
77 u32 mdc_msk;
78};
79
80/* PHY bit set */
81static void bb_set(u32 addr, u32 msk)
82{
83 ctrl_outl(ctrl_inl(addr) | msk, addr);
84}
85
86/* PHY bit clear */
87static void bb_clr(u32 addr, u32 msk)
88{
89 ctrl_outl((ctrl_inl(addr) & ~msk), addr);
90}
91
92/* PHY bit read */
93static int bb_read(u32 addr, u32 msk)
94{
95 return (ctrl_inl(addr) & msk) != 0;
96}
97
98/* Data I/O pin control */
99static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
100{
101 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
102 if (bit)
103 bb_set(bitbang->addr, bitbang->mmd_msk);
104 else
105 bb_clr(bitbang->addr, bitbang->mmd_msk);
106}
107
108/* Set bit data*/
109static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
110{
111 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
112
113 if (bit)
114 bb_set(bitbang->addr, bitbang->mdo_msk);
115 else
116 bb_clr(bitbang->addr, bitbang->mdo_msk);
117}
118
119/* Get bit data*/
120static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
121{
122 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
123 return bb_read(bitbang->addr, bitbang->mdi_msk);
124}
125
126/* MDC pin control */
127static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
128{
129 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
130
131 if (bit)
132 bb_set(bitbang->addr, bitbang->mdc_msk);
133 else
134 bb_clr(bitbang->addr, bitbang->mdc_msk);
135}
136
137/* mdio bus control struct */
138static struct mdiobb_ops bb_ops = {
139 .owner = THIS_MODULE,
140 .set_mdc = sh_mdc_ctrl,
141 .set_mdio_dir = sh_mmd_ctrl,
142 .set_mdio_data = sh_set_mdio,
143 .get_mdio_data = sh_get_mdio,
144};
145
146static void sh_eth_reset(struct net_device *ndev)
147{
148 u32 ioaddr = ndev->base_addr;
149
150 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
151 mdelay(3);
152 ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
153}
154
155/* free skb and descriptor buffer */
156static void sh_eth_ring_free(struct net_device *ndev)
157{
158 struct sh_eth_private *mdp = netdev_priv(ndev);
159 int i;
160
161 /* Free Rx skb ringbuffer */
162 if (mdp->rx_skbuff) {
163 for (i = 0; i < RX_RING_SIZE; i++) {
164 if (mdp->rx_skbuff[i])
165 dev_kfree_skb(mdp->rx_skbuff[i]);
166 }
167 }
168 kfree(mdp->rx_skbuff);
169
170 /* Free Tx skb ringbuffer */
171 if (mdp->tx_skbuff) {
172 for (i = 0; i < TX_RING_SIZE; i++) {
173 if (mdp->tx_skbuff[i])
174 dev_kfree_skb(mdp->tx_skbuff[i]);
175 }
176 }
177 kfree(mdp->tx_skbuff);
178}
179
180/* format skb and descriptor buffer */
181static void sh_eth_ring_format(struct net_device *ndev)
182{
183 struct sh_eth_private *mdp = netdev_priv(ndev);
184 int i;
185 struct sk_buff *skb;
186 struct sh_eth_rxdesc *rxdesc = NULL;
187 struct sh_eth_txdesc *txdesc = NULL;
188 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
189 int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
190
191 mdp->cur_rx = mdp->cur_tx = 0;
192 mdp->dirty_rx = mdp->dirty_tx = 0;
193
194 memset(mdp->rx_ring, 0, rx_ringsize);
195
196 /* build Rx ring buffer */
197 for (i = 0; i < RX_RING_SIZE; i++) {
198 /* skb */
199 mdp->rx_skbuff[i] = NULL;
200 skb = dev_alloc_skb(mdp->rx_buf_sz);
201 mdp->rx_skbuff[i] = skb;
202 if (skb == NULL)
203 break;
204 skb->dev = ndev; /* Mark as being used by this device. */
205 skb_reserve(skb, RX_OFFSET);
206
207 /* RX descriptor */
208 rxdesc = &mdp->rx_ring[i];
209 rxdesc->addr = (u32)skb->data & ~0x3UL;
210 rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
211
212 /* The size of the buffer is 16 byte boundary. */
213 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
214 }
215
216 mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
217
218 /* Mark the last entry as wrapping the ring. */
219 rxdesc->status |= cpu_to_le32(RC_RDEL);
220
221 memset(mdp->tx_ring, 0, tx_ringsize);
222
223 /* build Tx ring buffer */
224 for (i = 0; i < TX_RING_SIZE; i++) {
225 mdp->tx_skbuff[i] = NULL;
226 txdesc = &mdp->tx_ring[i];
227 txdesc->status = cpu_to_le32(TD_TFP);
228 txdesc->buffer_length = 0;
229 }
230
231 txdesc->status |= cpu_to_le32(TD_TDLE);
232}
233
234/* Get skb and descriptor buffer */
235static int sh_eth_ring_init(struct net_device *ndev)
236{
237 struct sh_eth_private *mdp = netdev_priv(ndev);
238 int rx_ringsize, tx_ringsize, ret = 0;
239
240 /*
241 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
242 * card needs room to do 8 byte alignment, +2 so we can reserve
243 * the first 2 bytes, and +16 gets room for the status word from the
244 * card.
245 */
246 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
247 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
248
249 /* Allocate RX and TX skb rings */
250 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
251 GFP_KERNEL);
252 if (!mdp->rx_skbuff) {
253 printk(KERN_ERR "%s: Cannot allocate Rx skb\n", ndev->name);
254 ret = -ENOMEM;
255 return ret;
256 }
257
258 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
259 GFP_KERNEL);
260 if (!mdp->tx_skbuff) {
261 printk(KERN_ERR "%s: Cannot allocate Tx skb\n", ndev->name);
262 ret = -ENOMEM;
263 goto skb_ring_free;
264 }
265
266 /* Allocate all Rx descriptors. */
267 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
268 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
269 GFP_KERNEL);
270
271 if (!mdp->rx_ring) {
272 printk(KERN_ERR "%s: Cannot allocate Rx Ring (size %d bytes)\n",
273 ndev->name, rx_ringsize);
274 ret = -ENOMEM;
275 goto desc_ring_free;
276 }
277
278 mdp->dirty_rx = 0;
279
280 /* Allocate all Tx descriptors. */
281 tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
282 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
283 GFP_KERNEL);
284 if (!mdp->tx_ring) {
285 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
286 ndev->name, tx_ringsize);
287 ret = -ENOMEM;
288 goto desc_ring_free;
289 }
290 return ret;
291
292desc_ring_free:
293 /* free DMA buffer */
294 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
295
296skb_ring_free:
297 /* Free Rx and Tx skb ring buffer */
298 sh_eth_ring_free(ndev);
299
300 return ret;
301}
302
303static int sh_eth_dev_init(struct net_device *ndev)
304{
305 int ret = 0;
306 struct sh_eth_private *mdp = netdev_priv(ndev);
307 u32 ioaddr = ndev->base_addr;
308 u_int32_t rx_int_var, tx_int_var;
309 u32 val;
310
311 /* Soft Reset */
312 sh_eth_reset(ndev);
313
314 ctrl_outl(RPADIR_PADS1, ioaddr + RPADIR); /* SH7712-DMA-RX-PAD2 */
315
316 /* all sh_eth int mask */
317 ctrl_outl(0, ioaddr + EESIPR);
318
319 /* FIFO size set */
320 ctrl_outl(0, ioaddr + EDMR); /* Endian change */
321
322 ctrl_outl((FIFO_SIZE_T | FIFO_SIZE_R), ioaddr + FDR);
323 ctrl_outl(0, ioaddr + TFTR);
324
325 ctrl_outl(RMCR_RST, ioaddr + RMCR);
326
327 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
328 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
329 ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);
330
331 ctrl_outl((FIFO_F_D_RFF | FIFO_F_D_RFD), ioaddr + FCFTR);
332 ctrl_outl(0, ioaddr + TRIMD);
333
334 /* Descriptor format */
335 sh_eth_ring_format(ndev);
336
337 ctrl_outl((u32)mdp->rx_ring, ioaddr + RDLAR);
338 ctrl_outl((u32)mdp->tx_ring, ioaddr + TDLAR);
339
340 ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
341 ctrl_outl((DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff), ioaddr + EESIPR);
342
343 /* PAUSE Prohibition */
344 val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) |
345 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
346
347 ctrl_outl(val, ioaddr + ECMR);
348 ctrl_outl(ECSR_BRCRX | ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD |
349 ECSIPR_MPDIP, ioaddr + ECSR);
350 ctrl_outl(ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | ECSIPR_LCHNGIP |
351 ECSIPR_ICDIP | ECSIPR_MPDIP, ioaddr + ECSIPR);
352
353 /* Set MAC address */
354 update_mac_address(ndev);
355
356 /* mask reset */
357#if defined(CONFIG_CPU_SUBTYPE_SH7710)
358 ctrl_outl(APR_AP, ioaddr + APR);
359 ctrl_outl(MPR_MP, ioaddr + MPR);
360 ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
361 ctrl_outl(BCFR_UNLIMITED, ioaddr + BCFR);
362#endif
363 /* Setting the Rx mode will start the Rx process. */
364 ctrl_outl(EDRRR_R, ioaddr + EDRRR);
365
366 netif_start_queue(ndev);
367
368 return ret;
369}
370
371/* free Tx skb function */
372static int sh_eth_txfree(struct net_device *ndev)
373{
374 struct sh_eth_private *mdp = netdev_priv(ndev);
375 struct sh_eth_txdesc *txdesc;
376 int freeNum = 0;
377 int entry = 0;
378
379 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
380 entry = mdp->dirty_tx % TX_RING_SIZE;
381 txdesc = &mdp->tx_ring[entry];
382 if (txdesc->status & cpu_to_le32(TD_TACT))
383 break;
384 /* Free the original skb. */
385 if (mdp->tx_skbuff[entry]) {
386 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
387 mdp->tx_skbuff[entry] = NULL;
388 freeNum++;
389 }
390 txdesc->status = cpu_to_le32(TD_TFP);
391 if (entry >= TX_RING_SIZE - 1)
392 txdesc->status |= cpu_to_le32(TD_TDLE);
393
394 mdp->stats.tx_packets++;
395 mdp->stats.tx_bytes += txdesc->buffer_length;
396 }
397 return freeNum;
398}
399
400/* Packet receive function */
401static int sh_eth_rx(struct net_device *ndev)
402{
403 struct sh_eth_private *mdp = netdev_priv(ndev);
404 struct sh_eth_rxdesc *rxdesc;
405
406 int entry = mdp->cur_rx % RX_RING_SIZE;
407 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
408 struct sk_buff *skb;
409 u16 pkt_len = 0;
410 u32 desc_status;
411
412 rxdesc = &mdp->rx_ring[entry];
413 while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {
414 desc_status = le32_to_cpu(rxdesc->status);
415 pkt_len = rxdesc->frame_length;
416
417 if (--boguscnt < 0)
418 break;
419
420 if (!(desc_status & RDFEND))
421 mdp->stats.rx_length_errors++;
422
423 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
424 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
425 mdp->stats.rx_errors++;
426 if (desc_status & RD_RFS1)
427 mdp->stats.rx_crc_errors++;
428 if (desc_status & RD_RFS2)
429 mdp->stats.rx_frame_errors++;
430 if (desc_status & RD_RFS3)
431 mdp->stats.rx_length_errors++;
432 if (desc_status & RD_RFS4)
433 mdp->stats.rx_length_errors++;
434 if (desc_status & RD_RFS6)
435 mdp->stats.rx_missed_errors++;
436 if (desc_status & RD_RFS10)
437 mdp->stats.rx_over_errors++;
438 } else {
439 swaps((char *)(rxdesc->addr & ~0x3), pkt_len + 2);
440 skb = mdp->rx_skbuff[entry];
441 mdp->rx_skbuff[entry] = NULL;
442 skb_put(skb, pkt_len);
443 skb->protocol = eth_type_trans(skb, ndev);
444 netif_rx(skb);
445 ndev->last_rx = jiffies;
446 mdp->stats.rx_packets++;
447 mdp->stats.rx_bytes += pkt_len;
448 }
449 rxdesc->status |= cpu_to_le32(RD_RACT);
450 entry = (++mdp->cur_rx) % RX_RING_SIZE;
451 }
452
453 /* Refill the Rx ring buffers. */
454 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
455 entry = mdp->dirty_rx % RX_RING_SIZE;
456 rxdesc = &mdp->rx_ring[entry];
457 if (mdp->rx_skbuff[entry] == NULL) {
458 skb = dev_alloc_skb(mdp->rx_buf_sz);
459 mdp->rx_skbuff[entry] = skb;
460 if (skb == NULL)
461 break; /* Better luck next round. */
462 skb->dev = ndev;
463 skb_reserve(skb, RX_OFFSET);
464 rxdesc->addr = (u32)skb->data & ~0x3UL;
465 }
466 /* The size of the buffer is 16 byte boundary. */
467 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
468 if (entry >= RX_RING_SIZE - 1)
469 rxdesc->status |=
470 cpu_to_le32(RD_RACT | RD_RFP | RC_RDEL);
471 else
472 rxdesc->status |=
473 cpu_to_le32(RD_RACT | RD_RFP);
474 }
475
476 /* Restart Rx engine if stopped. */
477 /* If we don't need to check status, don't. -KDU */
478 ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);
479
480 return 0;
481}
482
483/* error control function */
484static void sh_eth_error(struct net_device *ndev, int intr_status)
485{
486 struct sh_eth_private *mdp = netdev_priv(ndev);
487 u32 ioaddr = ndev->base_addr;
488 u32 felic_stat;
489
490 if (intr_status & EESR_ECI) {
491 felic_stat = ctrl_inl(ioaddr + ECSR);
492 ctrl_outl(felic_stat, ioaddr + ECSR); /* clear int */
493 if (felic_stat & ECSR_ICD)
494 mdp->stats.tx_carrier_errors++;
495 if (felic_stat & ECSR_LCHNG) {
496 /* Link Changed */
497 u32 link_stat = (ctrl_inl(ioaddr + PSR));
498 if (!(link_stat & PHY_ST_LINK)) {
499 /* Link Down : disable tx and rx */
500 ctrl_outl(ctrl_inl(ioaddr + ECMR) &
501 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
502 } else {
503 /* Link Up */
504 ctrl_outl(ctrl_inl(ioaddr + EESIPR) &
505 ~DMAC_M_ECI, ioaddr + EESIPR);
506 /*clear int */
507 ctrl_outl(ctrl_inl(ioaddr + ECSR),
508 ioaddr + ECSR);
509 ctrl_outl(ctrl_inl(ioaddr + EESIPR) |
510 DMAC_M_ECI, ioaddr + EESIPR);
511 /* enable tx and rx */
512 ctrl_outl(ctrl_inl(ioaddr + ECMR) |
513 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
514 }
515 }
516 }
517
518 if (intr_status & EESR_TWB) {
519 /* Write buck end. unused write back interrupt */
520 if (intr_status & EESR_TABT) /* Transmit Abort int */
521 mdp->stats.tx_aborted_errors++;
522 }
523
524 if (intr_status & EESR_RABT) {
525 /* Receive Abort int */
526 if (intr_status & EESR_RFRMER) {
527 /* Receive Frame Overflow int */
528 mdp->stats.rx_frame_errors++;
529 printk(KERN_ERR "Receive Frame Overflow\n");
530 }
531 }
532
533 if (intr_status & EESR_ADE) {
534 if (intr_status & EESR_TDE) {
535 if (intr_status & EESR_TFE)
536 mdp->stats.tx_fifo_errors++;
537 }
538 }
539
540 if (intr_status & EESR_RDE) {
541 /* Receive Descriptor Empty int */
542 mdp->stats.rx_over_errors++;
543
544 if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R)
545 ctrl_outl(EDRRR_R, ioaddr + EDRRR);
546 printk(KERN_ERR "Receive Descriptor Empty\n");
547 }
548 if (intr_status & EESR_RFE) {
549 /* Receive FIFO Overflow int */
550 mdp->stats.rx_fifo_errors++;
551 printk(KERN_ERR "Receive FIFO Overflow\n");
552 }
553 if (intr_status &
554 (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE)) {
555 /* Tx error */
556 u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
557 /* dmesg */
558 printk(KERN_ERR "%s:TX error. status=%8.8x cur_tx=%8.8x ",
559 ndev->name, intr_status, mdp->cur_tx);
560 printk(KERN_ERR "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
561 mdp->dirty_tx, (u32) ndev->state, edtrr);
562 /* dirty buffer free */
563 sh_eth_txfree(ndev);
564
565 /* SH7712 BUG */
566 if (edtrr ^ EDTRR_TRNS) {
567 /* tx dma start */
568 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
569 }
570 /* wakeup */
571 netif_wake_queue(ndev);
572 }
573}
574
575static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
576{
577 struct net_device *ndev = netdev;
578 struct sh_eth_private *mdp = netdev_priv(ndev);
579 u32 ioaddr, boguscnt = RX_RING_SIZE;
580 u32 intr_status = 0;
581
582 ioaddr = ndev->base_addr;
583 spin_lock(&mdp->lock);
584
585 intr_status = ctrl_inl(ioaddr + EESR);
586 /* Clear interrupt */
587 ctrl_outl(intr_status, ioaddr + EESR);
588
589 if (intr_status & (EESR_FRC | EESR_RINT8 |
590 EESR_RINT5 | EESR_RINT4 | EESR_RINT3 | EESR_RINT2 |
591 EESR_RINT1))
592 sh_eth_rx(ndev);
593 if (intr_status & (EESR_FTC |
594 EESR_TINT4 | EESR_TINT3 | EESR_TINT2 | EESR_TINT1)) {
595
596 sh_eth_txfree(ndev);
597 netif_wake_queue(ndev);
598 }
599
600 if (intr_status & EESR_ERR_CHECK)
601 sh_eth_error(ndev, intr_status);
602
603 if (--boguscnt < 0) {
604 printk(KERN_WARNING
605 "%s: Too much work at interrupt, status=0x%4.4x.\n",
606 ndev->name, intr_status);
607 }
608
609 spin_unlock(&mdp->lock);
610
611 return IRQ_HANDLED;
612}
613
614static void sh_eth_timer(unsigned long data)
615{
616 struct net_device *ndev = (struct net_device *)data;
617 struct sh_eth_private *mdp = netdev_priv(ndev);
618
619 mod_timer(&mdp->timer, jiffies + (10 * HZ));
620}
621
622/* PHY state control function */
623static void sh_eth_adjust_link(struct net_device *ndev)
624{
625 struct sh_eth_private *mdp = netdev_priv(ndev);
626 struct phy_device *phydev = mdp->phydev;
627 u32 ioaddr = ndev->base_addr;
628 int new_state = 0;
629
630 if (phydev->link != PHY_DOWN) {
631 if (phydev->duplex != mdp->duplex) {
632 new_state = 1;
633 mdp->duplex = phydev->duplex;
634 }
635
636 if (phydev->speed != mdp->speed) {
637 new_state = 1;
638 mdp->speed = phydev->speed;
639 }
640 if (mdp->link == PHY_DOWN) {
641 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF)
642 | ECMR_DM, ioaddr + ECMR);
643 new_state = 1;
644 mdp->link = phydev->link;
645 netif_schedule(ndev);
646 netif_carrier_on(ndev);
647 netif_start_queue(ndev);
648 }
649 } else if (mdp->link) {
650 new_state = 1;
651 mdp->link = PHY_DOWN;
652 mdp->speed = 0;
653 mdp->duplex = -1;
654 netif_stop_queue(ndev);
655 netif_carrier_off(ndev);
656 }
657
658 if (new_state)
659 phy_print_status(phydev);
660}
661
662/* PHY init function */
663static int sh_eth_phy_init(struct net_device *ndev)
664{
665 struct sh_eth_private *mdp = netdev_priv(ndev);
666 char phy_id[BUS_ID_SIZE];
667 struct phy_device *phydev = NULL;
668
669 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT,
670 mdp->mii_bus->id , mdp->phy_id);
671
672 mdp->link = PHY_DOWN;
673 mdp->speed = 0;
674 mdp->duplex = -1;
675
676 /* Try connect to PHY */
677 phydev = phy_connect(ndev, phy_id, &sh_eth_adjust_link,
678 0, PHY_INTERFACE_MODE_MII);
679 if (IS_ERR(phydev)) {
680 dev_err(&ndev->dev, "phy_connect failed\n");
681 return PTR_ERR(phydev);
682 }
683 dev_info(&ndev->dev, "attached phy %i to driver %s\n",
684 phydev->addr, phydev->drv->name);
685
686 mdp->phydev = phydev;
687
688 return 0;
689}
690
691/* PHY control start function */
692static int sh_eth_phy_start(struct net_device *ndev)
693{
694 struct sh_eth_private *mdp = netdev_priv(ndev);
695 int ret;
696
697 ret = sh_eth_phy_init(ndev);
698 if (ret)
699 return ret;
700
701 /* reset phy - this also wakes it from PDOWN */
702 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
703 phy_start(mdp->phydev);
704
705 return 0;
706}
707
708/* network device open function */
709static int sh_eth_open(struct net_device *ndev)
710{
711 int ret = 0;
712 struct sh_eth_private *mdp = netdev_priv(ndev);
713
714 ret = request_irq(ndev->irq, &sh_eth_interrupt, 0, ndev->name, ndev);
715 if (ret) {
716 printk(KERN_ERR "Can not assign IRQ number to %s\n", CARDNAME);
717 return ret;
718 }
719
720 /* Descriptor set */
721 ret = sh_eth_ring_init(ndev);
722 if (ret)
723 goto out_free_irq;
724
725 /* device init */
726 ret = sh_eth_dev_init(ndev);
727 if (ret)
728 goto out_free_irq;
729
730 /* PHY control start*/
731 ret = sh_eth_phy_start(ndev);
732 if (ret)
733 goto out_free_irq;
734
735 /* Set the timer to check for link beat. */
736 init_timer(&mdp->timer);
737 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
738 setup_timer(&mdp->timer, sh_eth_timer, ndev);
739
740 return ret;
741
742out_free_irq:
743 free_irq(ndev->irq, ndev);
744 return ret;
745}
746
747/* Timeout function */
748static void sh_eth_tx_timeout(struct net_device *ndev)
749{
750 struct sh_eth_private *mdp = netdev_priv(ndev);
751 u32 ioaddr = ndev->base_addr;
752 struct sh_eth_rxdesc *rxdesc;
753 int i;
754
755 netif_stop_queue(ndev);
756
757 /* worning message out. */
758 printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
759 " resetting...\n", ndev->name, (int)ctrl_inl(ioaddr + EESR));
760
761 /* tx_errors count up */
762 mdp->stats.tx_errors++;
763
764 /* timer off */
765 del_timer_sync(&mdp->timer);
766
767 /* Free all the skbuffs in the Rx queue. */
768 for (i = 0; i < RX_RING_SIZE; i++) {
769 rxdesc = &mdp->rx_ring[i];
770 rxdesc->status = 0;
771 rxdesc->addr = 0xBADF00D0;
772 if (mdp->rx_skbuff[i])
773 dev_kfree_skb(mdp->rx_skbuff[i]);
774 mdp->rx_skbuff[i] = NULL;
775 }
776 for (i = 0; i < TX_RING_SIZE; i++) {
777 if (mdp->tx_skbuff[i])
778 dev_kfree_skb(mdp->tx_skbuff[i]);
779 mdp->tx_skbuff[i] = NULL;
780 }
781
782 /* device init */
783 sh_eth_dev_init(ndev);
784
785 /* timer on */
786 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
787 add_timer(&mdp->timer);
788}
789
790/* Packet transmit function */
791static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
792{
793 struct sh_eth_private *mdp = netdev_priv(ndev);
794 struct sh_eth_txdesc *txdesc;
795 u32 entry;
796 int flags;
797
798 spin_lock_irqsave(&mdp->lock, flags);
799 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
800 if (!sh_eth_txfree(ndev)) {
801 netif_stop_queue(ndev);
802 spin_unlock_irqrestore(&mdp->lock, flags);
803 return 1;
804 }
805 }
806 spin_unlock_irqrestore(&mdp->lock, flags);
807
808 entry = mdp->cur_tx % TX_RING_SIZE;
809 mdp->tx_skbuff[entry] = skb;
810 txdesc = &mdp->tx_ring[entry];
811 txdesc->addr = (u32)(skb->data);
812 /* soft swap. */
813 swaps((char *)(txdesc->addr & ~0x3), skb->len + 2);
814 /* write back */
815 __flush_purge_region(skb->data, skb->len);
816 if (skb->len < ETHERSMALL)
817 txdesc->buffer_length = ETHERSMALL;
818 else
819 txdesc->buffer_length = skb->len;
820
821 if (entry >= TX_RING_SIZE - 1)
822 txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);
823 else
824 txdesc->status |= cpu_to_le32(TD_TACT);
825
826 mdp->cur_tx++;
827
828 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
829 ndev->trans_start = jiffies;
830
831 return 0;
832}
833
834/* device close function */
835static int sh_eth_close(struct net_device *ndev)
836{
837 struct sh_eth_private *mdp = netdev_priv(ndev);
838 u32 ioaddr = ndev->base_addr;
839 int ringsize;
840
841 netif_stop_queue(ndev);
842
843 /* Disable interrupts by clearing the interrupt mask. */
844 ctrl_outl(0x0000, ioaddr + EESIPR);
845
846 /* Stop the chip's Tx and Rx processes. */
847 ctrl_outl(0, ioaddr + EDTRR);
848 ctrl_outl(0, ioaddr + EDRRR);
849
850 /* PHY Disconnect */
851 if (mdp->phydev) {
852 phy_stop(mdp->phydev);
853 phy_disconnect(mdp->phydev);
854 }
855
856 free_irq(ndev->irq, ndev);
857
858 del_timer_sync(&mdp->timer);
859
860 /* Free all the skbuffs in the Rx queue. */
861 sh_eth_ring_free(ndev);
862
863 /* free DMA buffer */
864 ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
865 dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
866
867 /* free DMA buffer */
868 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
869 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
870
871 return 0;
872}
873
874static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
875{
876 struct sh_eth_private *mdp = netdev_priv(ndev);
877 u32 ioaddr = ndev->base_addr;
878
879 mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR);
880 ctrl_outl(0, ioaddr + TROCR); /* (write clear) */
881 mdp->stats.collisions += ctrl_inl(ioaddr + CDCR);
882 ctrl_outl(0, ioaddr + CDCR); /* (write clear) */
883 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR);
884 ctrl_outl(0, ioaddr + LCCR); /* (write clear) */
885 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
886 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */
887
888 return &mdp->stats;
889}
890
891/* ioctl to device funciotn*/
892static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
893 int cmd)
894{
895 struct sh_eth_private *mdp = netdev_priv(ndev);
896 struct phy_device *phydev = mdp->phydev;
897
898 if (!netif_running(ndev))
899 return -EINVAL;
900
901 if (!phydev)
902 return -ENODEV;
903
904 return phy_mii_ioctl(phydev, if_mii(rq), cmd);
905}
906
907
908/* Multicast reception directions set */
909static void sh_eth_set_multicast_list(struct net_device *ndev)
910{
911 u32 ioaddr = ndev->base_addr;
912
913 if (ndev->flags & IFF_PROMISC) {
914 /* Set promiscuous. */
915 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
916 ioaddr + ECMR);
917 } else {
918 /* Normal, unicast/broadcast-only mode. */
919 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
920 ioaddr + ECMR);
921 }
922}
923
924/* SuperH's TSU register init function */
925static void sh_eth_tsu_init(u32 ioaddr)
926{
927 ctrl_outl(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */
928 ctrl_outl(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */
929 ctrl_outl(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */
930 ctrl_outl(0xc, ioaddr + TSU_BSYSL0);
931 ctrl_outl(0xc, ioaddr + TSU_BSYSL1);
932 ctrl_outl(0, ioaddr + TSU_PRISL0);
933 ctrl_outl(0, ioaddr + TSU_PRISL1);
934 ctrl_outl(0, ioaddr + TSU_FWSL0);
935 ctrl_outl(0, ioaddr + TSU_FWSL1);
936 ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
937 ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
938 ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
939 ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
940 ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
941 ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
942 ctrl_outl(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */
943 ctrl_outl(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */
944 ctrl_outl(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */
945 ctrl_outl(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */
946}
947
948/* MDIO bus release function */
949static int sh_mdio_release(struct net_device *ndev)
950{
951 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
952
953 /* unregister mdio bus */
954 mdiobus_unregister(bus);
955
956 /* remove mdio bus info from net_device */
957 dev_set_drvdata(&ndev->dev, NULL);
958
959 /* free bitbang info */
960 free_mdio_bitbang(bus);
961
962 return 0;
963}
964
965/* MDIO bus init function */
966static int sh_mdio_init(struct net_device *ndev, int id)
967{
968 int ret, i;
969 struct bb_info *bitbang;
970 struct sh_eth_private *mdp = netdev_priv(ndev);
971
972 /* create bit control struct for PHY */
973 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
974 if (!bitbang) {
975 ret = -ENOMEM;
976 goto out;
977 }
978
979 /* bitbang init */
980 bitbang->addr = ndev->base_addr + PIR;
981 bitbang->mdi_msk = 0x08;
982 bitbang->mdo_msk = 0x04;
983 bitbang->mmd_msk = 0x02;/* MMD */
984 bitbang->mdc_msk = 0x01;
985 bitbang->ctrl.ops = &bb_ops;
986
987 /* MII contorller setting */
988 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
989 if (!mdp->mii_bus) {
990 ret = -ENOMEM;
991 goto out_free_bitbang;
992 }
993
994 /* Hook up MII support for ethtool */
995 mdp->mii_bus->name = "sh_mii";
996 mdp->mii_bus->dev = &ndev->dev;
997 mdp->mii_bus->id = id;
998
999 /* PHY IRQ */
1000 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1001 if (!mdp->mii_bus->irq) {
1002 ret = -ENOMEM;
1003 goto out_free_bus;
1004 }
1005
1006 for (i = 0; i < PHY_MAX_ADDR; i++)
1007 mdp->mii_bus->irq[i] = PHY_POLL;
1008
1009 /* regist mdio bus */
1010 ret = mdiobus_register(mdp->mii_bus);
1011 if (ret)
1012 goto out_free_irq;
1013
1014 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
1015
1016 return 0;
1017
1018out_free_irq:
1019 kfree(mdp->mii_bus->irq);
1020
1021out_free_bus:
1022 kfree(mdp->mii_bus);
1023
1024out_free_bitbang:
1025 kfree(bitbang);
1026
1027out:
1028 return ret;
1029}
1030
1031static int sh_eth_drv_probe(struct platform_device *pdev)
1032{
1033 int ret, i, devno = 0;
1034 struct resource *res;
1035 struct net_device *ndev = NULL;
1036 struct sh_eth_private *mdp;
1037
1038 /* get base addr */
1039 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1040 if (unlikely(res == NULL)) {
1041 dev_err(&pdev->dev, "invalid resource\n");
1042 ret = -EINVAL;
1043 goto out;
1044 }
1045
1046 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
1047 if (!ndev) {
1048 printk(KERN_ERR "%s: could not allocate device.\n", CARDNAME);
1049 ret = -ENOMEM;
1050 goto out;
1051 }
1052
1053 /* The sh Ether-specific entries in the device structure. */
1054 ndev->base_addr = res->start;
1055 devno = pdev->id;
1056 if (devno < 0)
1057 devno = 0;
1058
1059 ndev->dma = -1;
1060 ndev->irq = platform_get_irq(pdev, 0);
1061 if (ndev->irq < 0) {
1062 ret = -ENODEV;
1063 goto out_release;
1064 }
1065
1066 SET_NETDEV_DEV(ndev, &pdev->dev);
1067
1068 /* Fill in the fields of the device structure with ethernet values. */
1069 ether_setup(ndev);
1070
1071 mdp = netdev_priv(ndev);
1072 spin_lock_init(&mdp->lock);
1073
1074 /* get PHY ID */
1075 mdp->phy_id = (int)pdev->dev.platform_data;
1076
1077 /* set function */
1078 ndev->open = sh_eth_open;
1079 ndev->hard_start_xmit = sh_eth_start_xmit;
1080 ndev->stop = sh_eth_close;
1081 ndev->get_stats = sh_eth_get_stats;
1082 ndev->set_multicast_list = sh_eth_set_multicast_list;
1083 ndev->do_ioctl = sh_eth_do_ioctl;
1084 ndev->tx_timeout = sh_eth_tx_timeout;
1085 ndev->watchdog_timeo = TX_TIMEOUT;
1086
1087 mdp->post_rx = POST_RX >> (devno << 1);
1088 mdp->post_fw = POST_FW >> (devno << 1);
1089
1090 /* read and set MAC address */
1091 read_mac_address(ndev);
1092
1093 /* First device only init */
1094 if (!devno) {
1095 /* reset device */
1096 ctrl_outl(ARSTR_ARSTR, ndev->base_addr + ARSTR);
1097 mdelay(1);
1098
1099 /* TSU init (Init only)*/
1100 sh_eth_tsu_init(SH_TSU_ADDR);
1101 }
1102
1103 /* network device register */
1104 ret = register_netdev(ndev);
1105 if (ret)
1106 goto out_release;
1107
1108 /* mdio bus init */
1109 ret = sh_mdio_init(ndev, pdev->id);
1110 if (ret)
1111 goto out_unregister;
1112
1113 /* pritnt device infomation */
1114 printk(KERN_INFO "%s: %s at 0x%x, ",
1115 ndev->name, CARDNAME, (u32) ndev->base_addr);
1116
1117 for (i = 0; i < 5; i++)
1118 printk(KERN_INFO "%2.2x:", ndev->dev_addr[i]);
1119 printk(KERN_INFO "%2.2x, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
1120
1121 platform_set_drvdata(pdev, ndev);
1122
1123 return ret;
1124
1125out_unregister:
1126 unregister_netdev(ndev);
1127
1128out_release:
1129 /* net_dev free */
1130 if (ndev)
1131 free_netdev(ndev);
1132
1133out:
1134 return ret;
1135}
1136
1137static int sh_eth_drv_remove(struct platform_device *pdev)
1138{
1139 struct net_device *ndev = platform_get_drvdata(pdev);
1140
1141 sh_mdio_release(ndev);
1142 unregister_netdev(ndev);
1143 flush_scheduled_work();
1144
1145 free_netdev(ndev);
1146 platform_set_drvdata(pdev, NULL);
1147
1148 return 0;
1149}
1150
1151static struct platform_driver sh_eth_driver = {
1152 .probe = sh_eth_drv_probe,
1153 .remove = sh_eth_drv_remove,
1154 .driver = {
1155 .name = CARDNAME,
1156 },
1157};
1158
1159static int __init sh_eth_init(void)
1160{
1161 return platform_driver_register(&sh_eth_driver);
1162}
1163
1164static void __exit sh_eth_cleanup(void)
1165{
1166 platform_driver_unregister(&sh_eth_driver);
1167}
1168
1169module_init(sh_eth_init);
1170module_exit(sh_eth_cleanup);
1171
1172MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
1173MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
1174MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
new file mode 100644
index 000000000000..ca2db6bb3c61
--- /dev/null
+++ b/drivers/net/sh_eth.h
@@ -0,0 +1,464 @@
1/*
2 * SuperH Ethernet device driver
3 *
4 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
5 * Copyright (C) 2008 Renesas Solutions Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 */
22
23#ifndef __SH_ETH_H__
24#define __SH_ETH_H__
25
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/spinlock.h>
29#include <linux/workqueue.h>
30#include <linux/netdevice.h>
31#include <linux/phy.h>
32
33#define CARDNAME "sh-eth"
34#define TX_TIMEOUT (5*HZ)
35
36#define TX_RING_SIZE 128 /* Tx ring size */
37#define RX_RING_SIZE 128 /* Rx ring size */
38#define RX_OFFSET 2 /* skb offset */
39#define ETHERSMALL 60
40#define PKT_BUF_SZ 1538
41
42/* Chip Base Address */
43#define SH_ETH0_BASE 0xA7000000
44#define SH_ETH1_BASE 0xA7000400
45#define SH_TSU_ADDR 0xA7000804
46
47/* Chip Registers */
48/* E-DMAC */
49#define EDMR 0x0000
50#define EDTRR 0x0004
51#define EDRRR 0x0008
52#define TDLAR 0x000C
53#define RDLAR 0x0010
54#define EESR 0x0014
55#define EESIPR 0x0018
56#define TRSCER 0x001C
57#define RMFCR 0x0020
58#define TFTR 0x0024
59#define FDR 0x0028
60#define RMCR 0x002C
61#define EDOCR 0x0030
62#define FCFTR 0x0034
63#define RPADIR 0x0038
64#define TRIMD 0x003C
65#define RBWAR 0x0040
66#define RDFAR 0x0044
67#define TBRAR 0x004C
68#define TDFAR 0x0050
69/* Ether Register */
70#define ECMR 0x0160
71#define ECSR 0x0164
72#define ECSIPR 0x0168
73#define PIR 0x016C
74#define MAHR 0x0170
75#define MALR 0x0174
76#define RFLR 0x0178
77#define PSR 0x017C
78#define TROCR 0x0180
79#define CDCR 0x0184
80#define LCCR 0x0188
81#define CNDCR 0x018C
82#define CEFCR 0x0194
83#define FRECR 0x0198
84#define TSFRCR 0x019C
85#define TLFRCR 0x01A0
86#define RFCR 0x01A4
87#define MAFCR 0x01A8
88#define IPGR 0x01B4
89#if defined(CONFIG_CPU_SUBTYPE_SH7710)
90#define APR 0x01B8
91#define MPR 0x01BC
92#define TPAUSER 0x1C4
93#define BCFR 0x1CC
94#endif /* CONFIG_CPU_SH7710 */
95
96#define ARSTR 0x0800
97
98/* TSU */
99#define TSU_CTRST 0x004
100#define TSU_FWEN0 0x010
101#define TSU_FWEN1 0x014
102#define TSU_FCM 0x018
103#define TSU_BSYSL0 0x020
104#define TSU_BSYSL1 0x024
105#define TSU_PRISL0 0x028
106#define TSU_PRISL1 0x02C
107#define TSU_FWSL0 0x030
108#define TSU_FWSL1 0x034
109#define TSU_FWSLC 0x038
110#define TSU_QTAGM0 0x040
111#define TSU_QTAGM1 0x044
112#define TSU_ADQT0 0x048
113#define TSU_ADQT1 0x04C
114#define TSU_FWSR 0x050
115#define TSU_FWINMK 0x054
116#define TSU_ADSBSY 0x060
117#define TSU_TEN 0x064
118#define TSU_POST1 0x070
119#define TSU_POST2 0x074
120#define TSU_POST3 0x078
121#define TSU_POST4 0x07C
122#define TXNLCR0 0x080
123#define TXALCR0 0x084
124#define RXNLCR0 0x088
125#define RXALCR0 0x08C
126#define FWNLCR0 0x090
127#define FWALCR0 0x094
128#define TXNLCR1 0x0A0
129#define TXALCR1 0x0A4
130#define RXNLCR1 0x0A8
131#define RXALCR1 0x0AC
132#define FWNLCR1 0x0B0
133#define FWALCR1 0x0B4
134
135#define TSU_ADRH0 0x0100
136#define TSU_ADRL0 0x0104
137#define TSU_ADRL31 0x01FC
138
139/* Register's bits */
140
141/* EDMR */
142enum DMAC_M_BIT {
143 EDMR_DL1 = 0x20, EDMR_DL0 = 0x10, EDMR_SRST = 0x01,
144};
145
146/* EDTRR */
147enum DMAC_T_BIT {
148 EDTRR_TRNS = 0x01,
149};
150
151/* EDRRR*/
152enum EDRRR_R_BIT {
153 EDRRR_R = 0x01,
154};
155
156/* TPAUSER */
157enum TPAUSER_BIT {
158 TPAUSER_TPAUSE = 0x0000ffff,
159 TPAUSER_UNLIMITED = 0,
160};
161
162/* BCFR */
163enum BCFR_BIT {
164 BCFR_RPAUSE = 0x0000ffff,
165 BCFR_UNLIMITED = 0,
166};
167
168/* PIR */
169enum PIR_BIT {
170 PIR_MDI = 0x08, PIR_MDO = 0x04, PIR_MMD = 0x02, PIR_MDC = 0x01,
171};
172
173/* PSR */
174enum PHY_STATUS_BIT { PHY_ST_LINK = 0x01, };
175
176/* EESR */
177enum EESR_BIT {
178 EESR_TWB = 0x40000000, EESR_TABT = 0x04000000,
179 EESR_RABT = 0x02000000, EESR_RFRMER = 0x01000000,
180 EESR_ADE = 0x00800000, EESR_ECI = 0x00400000,
181 EESR_FTC = 0x00200000, EESR_TDE = 0x00100000,
182 EESR_TFE = 0x00080000, EESR_FRC = 0x00040000,
183 EESR_RDE = 0x00020000, EESR_RFE = 0x00010000,
184 EESR_TINT4 = 0x00000800, EESR_TINT3 = 0x00000400,
185 EESR_TINT2 = 0x00000200, EESR_TINT1 = 0x00000100,
186 EESR_RINT8 = 0x00000080, EESR_RINT5 = 0x00000010,
187 EESR_RINT4 = 0x00000008, EESR_RINT3 = 0x00000004,
188 EESR_RINT2 = 0x00000002, EESR_RINT1 = 0x00000001,
189};
190
191#define EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE \
192 | EESR_RFRMER | EESR_ADE | EESR_TFE | EESR_TDE | EESR_ECI)
193
194/* EESIPR */
195enum DMAC_IM_BIT {
196 DMAC_M_TWB = 0x40000000, DMAC_M_TABT = 0x04000000,
197 DMAC_M_RABT = 0x02000000,
198 DMAC_M_RFRMER = 0x01000000, DMAC_M_ADF = 0x00800000,
199 DMAC_M_ECI = 0x00400000, DMAC_M_FTC = 0x00200000,
200 DMAC_M_TDE = 0x00100000, DMAC_M_TFE = 0x00080000,
201 DMAC_M_FRC = 0x00040000, DMAC_M_RDE = 0x00020000,
202 DMAC_M_RFE = 0x00010000, DMAC_M_TINT4 = 0x00000800,
203 DMAC_M_TINT3 = 0x00000400, DMAC_M_TINT2 = 0x00000200,
204 DMAC_M_TINT1 = 0x00000100, DMAC_M_RINT8 = 0x00000080,
205 DMAC_M_RINT5 = 0x00000010, DMAC_M_RINT4 = 0x00000008,
206 DMAC_M_RINT3 = 0x00000004, DMAC_M_RINT2 = 0x00000002,
207 DMAC_M_RINT1 = 0x00000001,
208};
209
210/* Receive descriptor bit */
211enum RD_STS_BIT {
212 RD_RACT = 0x80000000, RC_RDEL = 0x40000000,
213 RC_RFP1 = 0x20000000, RC_RFP0 = 0x10000000,
214 RD_RFE = 0x08000000, RD_RFS10 = 0x00000200,
215 RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080,
216 RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020,
217 RD_RFS5 = 0x00000010, RD_RFS4 = 0x00000008,
218 RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002,
219 RD_RFS1 = 0x00000001,
220};
221#define RDF1ST RC_RFP1
222#define RDFEND RC_RFP0
223#define RD_RFP (RC_RFP1|RC_RFP0)
224
225/* FCFTR */
226enum FCFTR_BIT {
227 FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000,
228 FCFTR_RFF0 = 0x00010000, FCFTR_RFD2 = 0x00000004,
229 FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001,
230};
231#define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0)
232#define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0)
233
234/* Transfer descriptor bit */
235enum TD_STS_BIT {
236 TD_TACT = 0x80000000, TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000,
237 TD_TFP0 = 0x10000000,
238};
239#define TDF1ST TD_TFP1
240#define TDFEND TD_TFP0
241#define TD_TFP (TD_TFP1|TD_TFP0)
242
243/* RMCR */
244enum RECV_RST_BIT { RMCR_RST = 0x01, };
245/* ECMR */
246enum FELIC_MODE_BIT {
247 ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
248 ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
249 ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
250 ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004, ECMR_DM = 0x00000002,
251 ECMR_PRM = 0x00000001,
252};
253
254/* ECSR */
255enum ECSR_STATUS_BIT {
256 ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10, ECSR_LCHNG = 0x04,
257 ECSR_MPD = 0x02, ECSR_ICD = 0x01,
258};
259
260/* ECSIPR */
261enum ECSIPR_STATUS_MASK_BIT {
262 ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10, ECSIPR_LCHNGIP = 0x04,
263 ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01,
264};
265
266/* APR */
267enum APR_BIT {
268 APR_AP = 0x00000001,
269};
270
271/* MPR */
272enum MPR_BIT {
273 MPR_MP = 0x00000001,
274};
275
276/* TRSCER */
277enum DESC_I_BIT {
278 DESC_I_TINT4 = 0x0800, DESC_I_TINT3 = 0x0400, DESC_I_TINT2 = 0x0200,
279 DESC_I_TINT1 = 0x0100, DESC_I_RINT8 = 0x0080, DESC_I_RINT5 = 0x0010,
280 DESC_I_RINT4 = 0x0008, DESC_I_RINT3 = 0x0004, DESC_I_RINT2 = 0x0002,
281 DESC_I_RINT1 = 0x0001,
282};
283
284/* RPADIR */
285enum RPADIR_BIT {
286 RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
287 RPADIR_PADR = 0x0003f,
288};
289
290/* FDR */
291enum FIFO_SIZE_BIT {
292 FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007,
293};
294enum phy_offsets {
295 PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
296 PHY_ANA = 4, PHY_ANL = 5, PHY_ANE = 6,
297 PHY_16 = 16,
298};
299
300/* PHY_CTRL */
301enum PHY_CTRL_BIT {
302 PHY_C_RESET = 0x8000, PHY_C_LOOPBK = 0x4000, PHY_C_SPEEDSL = 0x2000,
303 PHY_C_ANEGEN = 0x1000, PHY_C_PWRDN = 0x0800, PHY_C_ISO = 0x0400,
304 PHY_C_RANEG = 0x0200, PHY_C_DUPLEX = 0x0100, PHY_C_COLT = 0x0080,
305};
306#define DM9161_PHY_C_ANEGEN 0 /* auto nego special */
307
308/* PHY_STAT */
309enum PHY_STAT_BIT {
310 PHY_S_100T4 = 0x8000, PHY_S_100X_F = 0x4000, PHY_S_100X_H = 0x2000,
311 PHY_S_10T_F = 0x1000, PHY_S_10T_H = 0x0800, PHY_S_ANEGC = 0x0020,
312 PHY_S_RFAULT = 0x0010, PHY_S_ANEGA = 0x0008, PHY_S_LINK = 0x0004,
313 PHY_S_JAB = 0x0002, PHY_S_EXTD = 0x0001,
314};
315
316/* PHY_ANA */
317enum PHY_ANA_BIT {
318 PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000,
319 PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100,
320 PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020,
321 PHY_A_SEL = 0x001f,
322};
323/* PHY_ANL */
324enum PHY_ANL_BIT {
325 PHY_L_NP = 0x8000, PHY_L_ACK = 0x4000, PHY_L_RF = 0x2000,
326 PHY_L_FCS = 0x0400, PHY_L_T4 = 0x0200, PHY_L_FDX = 0x0100,
327 PHY_L_HDX = 0x0080, PHY_L_10FDX = 0x0040, PHY_L_10HDX = 0x0020,
328 PHY_L_SEL = 0x001f,
329};
330
331/* PHY_ANE */
332enum PHY_ANE_BIT {
333 PHY_E_PDF = 0x0010, PHY_E_LPNPA = 0x0008, PHY_E_NPA = 0x0004,
334 PHY_E_PRX = 0x0002, PHY_E_LPANEGA = 0x0001,
335};
336
337/* DM9161 */
338enum PHY_16_BIT {
339 PHY_16_BP4B45 = 0x8000, PHY_16_BPSCR = 0x4000, PHY_16_BPALIGN = 0x2000,
340 PHY_16_BP_ADPOK = 0x1000, PHY_16_Repeatmode = 0x0800,
341 PHY_16_TXselect = 0x0400,
342 PHY_16_Rsvd = 0x0200, PHY_16_RMIIEnable = 0x0100,
343 PHY_16_Force100LNK = 0x0080,
344 PHY_16_APDLED_CTL = 0x0040, PHY_16_COLLED_CTL = 0x0020,
345 PHY_16_RPDCTR_EN = 0x0010,
346 PHY_16_ResetStMch = 0x0008, PHY_16_PreamSupr = 0x0004,
347 PHY_16_Sleepmode = 0x0002,
348 PHY_16_RemoteLoopOut = 0x0001,
349};
350
351#define POST_RX 0x08
352#define POST_FW 0x04
353#define POST0_RX (POST_RX)
354#define POST0_FW (POST_FW)
355#define POST1_RX (POST_RX >> 2)
356#define POST1_FW (POST_FW >> 2)
357#define POST_ALL (POST0_RX | POST0_FW | POST1_RX | POST1_FW)
358
359/* ARSTR */
360enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
361
362/* TSU_FWEN0 */
363enum TSU_FWEN0_BIT {
364 TSU_FWEN0_0 = 0x00000001,
365};
366
367/* TSU_ADSBSY */
368enum TSU_ADSBSY_BIT {
369 TSU_ADSBSY_0 = 0x00000001,
370};
371
372/* TSU_TEN */
373enum TSU_TEN_BIT {
374 TSU_TEN_0 = 0x80000000,
375};
376
377/* TSU_FWSL0 */
378enum TSU_FWSL0_BIT {
379 TSU_FWSL0_FW50 = 0x1000, TSU_FWSL0_FW40 = 0x0800,
380 TSU_FWSL0_FW30 = 0x0400, TSU_FWSL0_FW20 = 0x0200,
381 TSU_FWSL0_FW10 = 0x0100, TSU_FWSL0_RMSA0 = 0x0010,
382};
383
384/* TSU_FWSLC */
385enum TSU_FWSLC_BIT {
386 TSU_FWSLC_POSTENU = 0x2000, TSU_FWSLC_POSTENL = 0x1000,
387 TSU_FWSLC_CAMSEL03 = 0x0080, TSU_FWSLC_CAMSEL02 = 0x0040,
388 TSU_FWSLC_CAMSEL01 = 0x0020, TSU_FWSLC_CAMSEL00 = 0x0010,
389 TSU_FWSLC_CAMSEL13 = 0x0008, TSU_FWSLC_CAMSEL12 = 0x0004,
390 TSU_FWSLC_CAMSEL11 = 0x0002, TSU_FWSLC_CAMSEL10 = 0x0001,
391};
392
393/*
394 * The sh ether Tx buffer descriptors.
395 * This structure should be 20 bytes.
396 */
397struct sh_eth_txdesc {
398 u32 status; /* TD0 */
399#if defined(CONFIG_CPU_LITTLE_ENDIAN)
400 u16 pad0; /* TD1 */
401 u16 buffer_length; /* TD1 */
402#else
403 u16 buffer_length; /* TD1 */
404 u16 pad0; /* TD1 */
405#endif
406 u32 addr; /* TD2 */
407 u32 pad1; /* padding data */
408};
409
410/*
411 * The sh ether Rx buffer descriptors.
412 * This structure should be 20 bytes.
413 */
414struct sh_eth_rxdesc {
415 u32 status; /* RD0 */
416#if defined(CONFIG_CPU_LITTLE_ENDIAN)
417 u16 frame_length; /* RD1 */
418 u16 buffer_length; /* RD1 */
419#else
420 u16 buffer_length; /* RD1 */
421 u16 frame_length; /* RD1 */
422#endif
423 u32 addr; /* RD2 */
424 u32 pad0; /* padding data */
425};
426
427struct sh_eth_private {
428 dma_addr_t rx_desc_dma;
429 dma_addr_t tx_desc_dma;
430 struct sh_eth_rxdesc *rx_ring;
431 struct sh_eth_txdesc *tx_ring;
432 struct sk_buff **rx_skbuff;
433 struct sk_buff **tx_skbuff;
434 struct net_device_stats stats;
435 struct timer_list timer;
436 spinlock_t lock;
437 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
438 u32 cur_tx, dirty_tx;
439 u32 rx_buf_sz; /* Based on MTU+slack. */
440 /* MII transceiver section. */
441 u32 phy_id; /* PHY ID */
442 struct mii_bus *mii_bus; /* MDIO bus control */
443 struct phy_device *phydev; /* PHY device control */
444 enum phy_state link;
445 int msg_enable;
446 int speed;
447 int duplex;
448 u32 rx_int_var, tx_int_var; /* interrupt control variables */
449 char post_rx; /* POST receive */
450 char post_fw; /* POST forward */
451 struct net_device_stats tsu_stats; /* TSU forward status */
452};
453
454static void swaps(char *src, int len)
455{
456#ifdef __LITTLE_ENDIAN__
457 u32 *p = (u32 *)src;
458 u32 *maxp;
459 maxp = p + ((len + sizeof(u32) - 1) / sizeof(u32));
460
461 for (; p < maxp; p++)
462 *p = swab32(*p);
463#endif
464}
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index abc63b0663be..3fe01763760e 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1656,7 +1656,7 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
1656 SIS_PCI_COMMIT(); 1656 SIS_PCI_COMMIT();
1657} 1657}
1658 1658
1659static int __devinit sis190_get_mac_addr(struct pci_dev *pdev, 1659static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1660 struct net_device *dev) 1660 struct net_device *dev)
1661{ 1661{
1662 int rc; 1662 int rc;
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index ec95e493ac1c..fa3a460f8e2f 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1766,7 +1766,7 @@ static int sis900_rx(struct net_device *net_dev)
1766 skb = sis_priv->rx_skbuff[entry]; 1766 skb = sis_priv->rx_skbuff[entry];
1767 net_dev->stats.rx_dropped++; 1767 net_dev->stats.rx_dropped++;
1768 goto refill_rx_ring; 1768 goto refill_rx_ring;
1769 } 1769 }
1770 1770
1771 /* This situation should never happen, but due to 1771 /* This situation should never happen, but due to
1772 some unknow bugs, it is possible that 1772 some unknow bugs, it is possible that
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 62436b3a18c6..7f1cfc48e1b2 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -284,6 +284,86 @@ static void sky2_power_aux(struct sky2_hw *hw)
284 PC_VAUX_ON | PC_VCC_OFF)); 284 PC_VAUX_ON | PC_VCC_OFF));
285} 285}
286 286
287static void sky2_power_state(struct sky2_hw *hw, pci_power_t state)
288{
289 u16 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
290 int pex = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
291 u32 reg;
292
293 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
294
295 switch (state) {
296 case PCI_D0:
297 break;
298
299 case PCI_D1:
300 power_control |= 1;
301 break;
302
303 case PCI_D2:
304 power_control |= 2;
305 break;
306
307 case PCI_D3hot:
308 case PCI_D3cold:
309 power_control |= 3;
310 if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
311 /* additional power saving measurements */
312 reg = sky2_pci_read32(hw, PCI_DEV_REG4);
313
314 /* set gating core clock for LTSSM in L1 state */
315 reg |= P_PEX_LTSSM_STAT(P_PEX_LTSSM_L1_STAT) |
316 /* auto clock gated scheme controlled by CLKREQ */
317 P_ASPM_A1_MODE_SELECT |
318 /* enable Gate Root Core Clock */
319 P_CLK_GATE_ROOT_COR_ENA;
320
321 if (pex && (hw->flags & SKY2_HW_CLK_POWER)) {
322 /* enable Clock Power Management (CLKREQ) */
323 u16 ctrl = sky2_pci_read16(hw, pex + PCI_EXP_DEVCTL);
324
325 ctrl |= PCI_EXP_DEVCTL_AUX_PME;
326 sky2_pci_write16(hw, pex + PCI_EXP_DEVCTL, ctrl);
327 } else
328 /* force CLKREQ Enable in Our4 (A1b only) */
329 reg |= P_ASPM_FORCE_CLKREQ_ENA;
330
331 /* set Mask Register for Release/Gate Clock */
332 sky2_pci_write32(hw, PCI_DEV_REG5,
333 P_REL_PCIE_EXIT_L1_ST | P_GAT_PCIE_ENTER_L1_ST |
334 P_REL_PCIE_RX_EX_IDLE | P_GAT_PCIE_RX_EL_IDLE |
335 P_REL_GPHY_LINK_UP | P_GAT_GPHY_LINK_DOWN);
336 } else
337 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_CLK_HALT);
338
339 /* put CPU into reset state */
340 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_RESET);
341 if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev == CHIP_REV_YU_SU_A0)
342 /* put CPU into halt state */
343 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_HALTED);
344
345 if (pex && !(hw->flags & SKY2_HW_RAM_BUFFER)) {
346 reg = sky2_pci_read32(hw, PCI_DEV_REG1);
347 /* force to PCIe L1 */
348 reg |= PCI_FORCE_PEX_L1;
349 sky2_pci_write32(hw, PCI_DEV_REG1, reg);
350 }
351 break;
352
353 default:
354 dev_warn(&hw->pdev->dev, PFX "Invalid power state (%d) ",
355 state);
356 return;
357 }
358
359 power_control |= PCI_PM_CTRL_PME_ENABLE;
360 /* Finally, set the new power state. */
361 sky2_pci_write32(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
362
363 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
364 sky2_pci_read32(hw, B0_CTST);
365}
366
287static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) 367static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
288{ 368{
289 u16 reg; 369 u16 reg;
@@ -619,28 +699,71 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
619 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); 699 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
620} 700}
621 701
622static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) 702static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
703static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
704
705static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
623{ 706{
624 u32 reg1; 707 u32 reg1;
625 static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
626 static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
627 708
628 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 709 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
629 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 710 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
630 /* Turn on/off phy power saving */ 711 reg1 &= ~phy_power[port];
631 if (onoff)
632 reg1 &= ~phy_power[port];
633 else
634 reg1 |= phy_power[port];
635 712
636 if (onoff && hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 713 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
637 reg1 |= coma_mode[port]; 714 reg1 |= coma_mode[port];
638 715
639 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 716 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
640 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 717 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
641 sky2_pci_read32(hw, PCI_DEV_REG1); 718 sky2_pci_read32(hw, PCI_DEV_REG1);
719}
720
721static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
722{
723 u32 reg1;
724 u16 ctrl;
725
726 /* release GPHY Control reset */
727 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
642 728
643 udelay(100); 729 /* release GMAC reset */
730 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
731
732 if (hw->flags & SKY2_HW_NEWER_PHY) {
733 /* select page 2 to access MAC control register */
734 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
735
736 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
737 /* allow GMII Power Down */
738 ctrl &= ~PHY_M_MAC_GMIF_PUP;
739 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
740
741 /* set page register back to 0 */
742 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
743 }
744
745 /* setup General Purpose Control Register */
746 gma_write16(hw, port, GM_GP_CTRL,
747 GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 | GM_GPCR_AU_ALL_DIS);
748
749 if (hw->chip_id != CHIP_ID_YUKON_EC) {
750 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
751 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
752
753 /* enable Power Down */
754 ctrl |= PHY_M_PC_POW_D_ENA;
755 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
756 }
757
758 /* set IEEE compatible Power Down Mode (dev. #4.99) */
759 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
760 }
761
762 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
763 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
764 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
765 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
766 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
644} 767}
645 768
646/* Force a renegotiation */ 769/* Force a renegotiation */
@@ -675,8 +798,11 @@ static void sky2_wol_init(struct sky2_port *sky2)
675 798
676 sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full); 799 sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
677 sky2->flow_mode = FC_NONE; 800 sky2->flow_mode = FC_NONE;
678 sky2_phy_power(hw, port, 1); 801
679 sky2_phy_reinit(sky2); 802 spin_lock_bh(&sky2->phy_lock);
803 sky2_phy_power_up(hw, port);
804 sky2_phy_init(hw, port);
805 spin_unlock_bh(&sky2->phy_lock);
680 806
681 sky2->flow_mode = save_mode; 807 sky2->flow_mode = save_mode;
682 sky2->advertising = ctrl; 808 sky2->advertising = ctrl;
@@ -781,6 +907,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
781 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); 907 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
782 908
783 spin_lock_bh(&sky2->phy_lock); 909 spin_lock_bh(&sky2->phy_lock);
910 sky2_phy_power_up(hw, port);
784 sky2_phy_init(hw, port); 911 sky2_phy_init(hw, port);
785 spin_unlock_bh(&sky2->phy_lock); 912 spin_unlock_bh(&sky2->phy_lock);
786 913
@@ -1385,8 +1512,6 @@ static int sky2_up(struct net_device *dev)
1385 if (!sky2->rx_ring) 1512 if (!sky2->rx_ring)
1386 goto err_out; 1513 goto err_out;
1387 1514
1388 sky2_phy_power(hw, port, 1);
1389
1390 sky2_mac_init(hw, port); 1515 sky2_mac_init(hw, port);
1391 1516
1392 /* Register is number of 4K blocks on internal RAM buffer. */ 1517 /* Register is number of 4K blocks on internal RAM buffer. */
@@ -1767,7 +1892,7 @@ static int sky2_down(struct net_device *dev)
1767 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 1892 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1768 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 1893 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1769 1894
1770 sky2_phy_power(hw, port, 0); 1895 sky2_phy_power_down(hw, port);
1771 1896
1772 netif_carrier_off(dev); 1897 netif_carrier_off(dev);
1773 1898
@@ -2741,6 +2866,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2741 hw->flags = SKY2_HW_GIGABIT 2866 hw->flags = SKY2_HW_GIGABIT
2742 | SKY2_HW_NEWER_PHY 2867 | SKY2_HW_NEWER_PHY
2743 | SKY2_HW_ADV_POWER_CTL; 2868 | SKY2_HW_ADV_POWER_CTL;
2869
2870 /* check for Rev. A1 dev 4200 */
2871 if (sky2_read16(hw, Q_ADDR(Q_XA1, Q_WM)) == 0)
2872 hw->flags |= SKY2_HW_CLK_POWER;
2744 break; 2873 break;
2745 2874
2746 case CHIP_ID_YUKON_EX: 2875 case CHIP_ID_YUKON_EX:
@@ -2791,6 +2920,11 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2791 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') 2920 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
2792 hw->flags |= SKY2_HW_FIBRE_PHY; 2921 hw->flags |= SKY2_HW_FIBRE_PHY;
2793 2922
2923 hw->pm_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PM);
2924 if (hw->pm_cap == 0) {
2925 dev_err(&hw->pdev->dev, "cannot find PowerManagement capability\n");
2926 return -EIO;
2927 }
2794 2928
2795 hw->ports = 1; 2929 hw->ports = 1;
2796 t8 = sky2_read8(hw, B2_Y2_HW_RES); 2930 t8 = sky2_read8(hw, B2_Y2_HW_RES);
@@ -3378,7 +3512,7 @@ static void sky2_led(struct sky2_port *sky2, enum led_mode mode)
3378 3512
3379 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 3513 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
3380 } else 3514 } else
3381 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 3515 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
3382 PHY_M_LED_MO_DUP(mode) | 3516 PHY_M_LED_MO_DUP(mode) |
3383 PHY_M_LED_MO_10(mode) | 3517 PHY_M_LED_MO_10(mode) |
3384 PHY_M_LED_MO_100(mode) | 3518 PHY_M_LED_MO_100(mode) |
@@ -4362,7 +4496,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4362 4496
4363 pci_save_state(pdev); 4497 pci_save_state(pdev);
4364 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 4498 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
4365 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4499 sky2_power_state(hw, pci_choose_state(pdev, state));
4366 4500
4367 return 0; 4501 return 0;
4368} 4502}
@@ -4375,9 +4509,7 @@ static int sky2_resume(struct pci_dev *pdev)
4375 if (!hw) 4509 if (!hw)
4376 return 0; 4510 return 0;
4377 4511
4378 err = pci_set_power_state(pdev, PCI_D0); 4512 sky2_power_state(hw, PCI_D0);
4379 if (err)
4380 goto out;
4381 4513
4382 err = pci_restore_state(pdev); 4514 err = pci_restore_state(pdev);
4383 if (err) 4515 if (err)
@@ -4447,8 +4579,7 @@ static void sky2_shutdown(struct pci_dev *pdev)
4447 pci_enable_wake(pdev, PCI_D3cold, wol); 4579 pci_enable_wake(pdev, PCI_D3cold, wol);
4448 4580
4449 pci_disable_device(pdev); 4581 pci_disable_device(pdev);
4450 pci_set_power_state(pdev, PCI_D3hot); 4582 sky2_power_state(hw, PCI_D3hot);
4451
4452} 4583}
4453 4584
4454static struct pci_driver sky2_driver = { 4585static struct pci_driver sky2_driver = {
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index c0a5eea20007..1fa82bf029d9 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -28,6 +28,11 @@ enum pci_dev_reg_1 {
28 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */ 28 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
29 PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */ 29 PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */
30 PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */ 30 PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */
31
32 PCI_PHY_LNK_TIM_MSK= 3L<<8,/* Bit 9.. 8: GPHY Link Trigger Timer */
33 PCI_ENA_L1_EVENT = 1<<7, /* Enable PEX L1 Event */
34 PCI_ENA_GPHY_LNK = 1<<6, /* Enable PEX L1 on GPHY Link down */
35 PCI_FORCE_PEX_L1 = 1<<5, /* Force to PEX L1 */
31}; 36};
32 37
33enum pci_dev_reg_2 { 38enum pci_dev_reg_2 {
@@ -45,7 +50,11 @@ enum pci_dev_reg_2 {
45 50
46/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */ 51/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */
47enum pci_dev_reg_4 { 52enum pci_dev_reg_4 {
48 /* (Link Training & Status State Machine) */ 53 /* (Link Training & Status State Machine) */
54 P_PEX_LTSSM_STAT_MSK = 0x7fL<<25, /* Bit 31..25: PEX LTSSM Mask */
55#define P_PEX_LTSSM_STAT(x) ((x << 25) & P_PEX_LTSSM_STAT_MSK)
56 P_PEX_LTSSM_L1_STAT = 0x34,
57 P_PEX_LTSSM_DET_STAT = 0x01,
49 P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */ 58 P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */
50 /* (Active State Power Management) */ 59 /* (Active State Power Management) */
51 P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */ 60 P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */
@@ -454,6 +463,9 @@ enum yukon_ex_rev {
454 CHIP_REV_YU_EX_A0 = 1, 463 CHIP_REV_YU_EX_A0 = 1,
455 CHIP_REV_YU_EX_B0 = 2, 464 CHIP_REV_YU_EX_B0 = 2,
456}; 465};
466enum yukon_supr_rev {
467 CHIP_REV_YU_SU_A0 = 0,
468};
457 469
458 470
459/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ 471/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
@@ -1143,6 +1155,12 @@ enum {
1143 PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */ 1155 PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */
1144}; 1156};
1145 1157
1158/* for Yukon-EC Ultra Gigabit Ethernet PHY (88E1149 only) */
1159enum {
1160 PHY_M_PC_COP_TX_DIS = 1<<3, /* Copper Transmitter Disable */
1161 PHY_M_PC_POW_D_ENA = 1<<2, /* Power Down Enable */
1162};
1163
1146/* for 10/100 Fast Ethernet PHY (88E3082 only) */ 1164/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1147enum { 1165enum {
1148 PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */ 1166 PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
@@ -1411,6 +1429,7 @@ enum {
1411/***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/ 1429/***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/
1412enum { 1430enum {
1413 PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */ 1431 PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */
1432 PHY_M_MAC_GMIF_PUP = 1<<3, /* GMII Power Up (88E1149 only) */
1414 PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */ 1433 PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */
1415 PHY_M_MAC_MD_COPPER = 5,/* Copper only */ 1434 PHY_M_MAC_MD_COPPER = 5,/* Copper only */
1416 PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */ 1435 PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */
@@ -2052,7 +2071,9 @@ struct sky2_hw {
2052#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ 2071#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
2053#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2072#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
2054#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2073#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
2074#define SKY2_HW_CLK_POWER 0x00000100 /* clock power management */
2055 2075
2076 int pm_cap;
2056 u8 chip_id; 2077 u8 chip_id;
2057 u8 chip_rev; 2078 u8 chip_rev;
2058 u8 pmd_type; 2079 u8 pmd_type;
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index e2ee91a6ae7e..c5871624f972 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -106,55 +106,6 @@ MODULE_ALIAS("platform:smc911x");
106 */ 106 */
107#define POWER_DOWN 1 107#define POWER_DOWN 1
108 108
109
110/* store this information for the driver.. */
111struct smc911x_local {
112 /*
113 * If I have to wait until the DMA is finished and ready to reload a
114 * packet, I will store the skbuff here. Then, the DMA will send it
115 * out and free it.
116 */
117 struct sk_buff *pending_tx_skb;
118
119 /* version/revision of the SMC911x chip */
120 u16 version;
121 u16 revision;
122
123 /* FIFO sizes */
124 int tx_fifo_kb;
125 int tx_fifo_size;
126 int rx_fifo_size;
127 int afc_cfg;
128
129 /* Contains the current active receive/phy mode */
130 int ctl_rfduplx;
131 int ctl_rspeed;
132
133 u32 msg_enable;
134 u32 phy_type;
135 struct mii_if_info mii;
136
137 /* work queue */
138 struct work_struct phy_configure;
139
140 int tx_throttle;
141 spinlock_t lock;
142
143 struct net_device *netdev;
144
145#ifdef SMC_USE_DMA
146 /* DMA needs the physical address of the chip */
147 u_long physaddr;
148 int rxdma;
149 int txdma;
150 int rxdma_active;
151 int txdma_active;
152 struct sk_buff *current_rx_skb;
153 struct sk_buff *current_tx_skb;
154 struct device *dev;
155#endif
156};
157
158#if SMC_DEBUG > 0 109#if SMC_DEBUG > 0
159#define DBG(n, args...) \ 110#define DBG(n, args...) \
160 do { \ 111 do { \
@@ -202,24 +153,24 @@ static void PRINT_PKT(u_char *buf, int length)
202 153
203 154
204/* this enables an interrupt in the interrupt mask register */ 155/* this enables an interrupt in the interrupt mask register */
205#define SMC_ENABLE_INT(x) do { \ 156#define SMC_ENABLE_INT(lp, x) do { \
206 unsigned int __mask; \ 157 unsigned int __mask; \
207 unsigned long __flags; \ 158 unsigned long __flags; \
208 spin_lock_irqsave(&lp->lock, __flags); \ 159 spin_lock_irqsave(&lp->lock, __flags); \
209 __mask = SMC_GET_INT_EN(); \ 160 __mask = SMC_GET_INT_EN((lp)); \
210 __mask |= (x); \ 161 __mask |= (x); \
211 SMC_SET_INT_EN(__mask); \ 162 SMC_SET_INT_EN((lp), __mask); \
212 spin_unlock_irqrestore(&lp->lock, __flags); \ 163 spin_unlock_irqrestore(&lp->lock, __flags); \
213} while (0) 164} while (0)
214 165
215/* this disables an interrupt from the interrupt mask register */ 166/* this disables an interrupt from the interrupt mask register */
216#define SMC_DISABLE_INT(x) do { \ 167#define SMC_DISABLE_INT(lp, x) do { \
217 unsigned int __mask; \ 168 unsigned int __mask; \
218 unsigned long __flags; \ 169 unsigned long __flags; \
219 spin_lock_irqsave(&lp->lock, __flags); \ 170 spin_lock_irqsave(&lp->lock, __flags); \
220 __mask = SMC_GET_INT_EN(); \ 171 __mask = SMC_GET_INT_EN((lp)); \
221 __mask &= ~(x); \ 172 __mask &= ~(x); \
222 SMC_SET_INT_EN(__mask); \ 173 SMC_SET_INT_EN((lp), __mask); \
223 spin_unlock_irqrestore(&lp->lock, __flags); \ 174 spin_unlock_irqrestore(&lp->lock, __flags); \
224} while (0) 175} while (0)
225 176
@@ -228,7 +179,6 @@ static void PRINT_PKT(u_char *buf, int length)
228 */ 179 */
229static void smc911x_reset(struct net_device *dev) 180static void smc911x_reset(struct net_device *dev)
230{ 181{
231 unsigned long ioaddr = dev->base_addr;
232 struct smc911x_local *lp = netdev_priv(dev); 182 struct smc911x_local *lp = netdev_priv(dev);
233 unsigned int reg, timeout=0, resets=1; 183 unsigned int reg, timeout=0, resets=1;
234 unsigned long flags; 184 unsigned long flags;
@@ -236,13 +186,13 @@ static void smc911x_reset(struct net_device *dev)
236 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 186 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
237 187
238 /* Take out of PM setting first */ 188 /* Take out of PM setting first */
239 if ((SMC_GET_PMT_CTRL() & PMT_CTRL_READY_) == 0) { 189 if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
240 /* Write to the bytetest will take out of powerdown */ 190 /* Write to the bytetest will take out of powerdown */
241 SMC_SET_BYTE_TEST(0); 191 SMC_SET_BYTE_TEST(lp, 0);
242 timeout=10; 192 timeout=10;
243 do { 193 do {
244 udelay(10); 194 udelay(10);
245 reg = SMC_GET_PMT_CTRL() & PMT_CTRL_READY_; 195 reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_;
246 } while (--timeout && !reg); 196 } while (--timeout && !reg);
247 if (timeout == 0) { 197 if (timeout == 0) {
248 PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name); 198 PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name);
@@ -252,15 +202,15 @@ static void smc911x_reset(struct net_device *dev)
252 202
253 /* Disable all interrupts */ 203 /* Disable all interrupts */
254 spin_lock_irqsave(&lp->lock, flags); 204 spin_lock_irqsave(&lp->lock, flags);
255 SMC_SET_INT_EN(0); 205 SMC_SET_INT_EN(lp, 0);
256 spin_unlock_irqrestore(&lp->lock, flags); 206 spin_unlock_irqrestore(&lp->lock, flags);
257 207
258 while (resets--) { 208 while (resets--) {
259 SMC_SET_HW_CFG(HW_CFG_SRST_); 209 SMC_SET_HW_CFG(lp, HW_CFG_SRST_);
260 timeout=10; 210 timeout=10;
261 do { 211 do {
262 udelay(10); 212 udelay(10);
263 reg = SMC_GET_HW_CFG(); 213 reg = SMC_GET_HW_CFG(lp);
264 /* If chip indicates reset timeout then try again */ 214 /* If chip indicates reset timeout then try again */
265 if (reg & HW_CFG_SRST_TO_) { 215 if (reg & HW_CFG_SRST_TO_) {
266 PRINTK("%s: chip reset timeout, retrying...\n", dev->name); 216 PRINTK("%s: chip reset timeout, retrying...\n", dev->name);
@@ -276,7 +226,7 @@ static void smc911x_reset(struct net_device *dev)
276 226
277 /* make sure EEPROM has finished loading before setting GPIO_CFG */ 227 /* make sure EEPROM has finished loading before setting GPIO_CFG */
278 timeout=1000; 228 timeout=1000;
279 while ( timeout-- && (SMC_GET_E2P_CMD() & E2P_CMD_EPC_BUSY_)) { 229 while ( timeout-- && (SMC_GET_E2P_CMD(lp) & E2P_CMD_EPC_BUSY_)) {
280 udelay(10); 230 udelay(10);
281 } 231 }
282 if (timeout == 0){ 232 if (timeout == 0){
@@ -285,24 +235,24 @@ static void smc911x_reset(struct net_device *dev)
285 } 235 }
286 236
287 /* Initialize interrupts */ 237 /* Initialize interrupts */
288 SMC_SET_INT_EN(0); 238 SMC_SET_INT_EN(lp, 0);
289 SMC_ACK_INT(-1); 239 SMC_ACK_INT(lp, -1);
290 240
291 /* Reset the FIFO level and flow control settings */ 241 /* Reset the FIFO level and flow control settings */
292 SMC_SET_HW_CFG((lp->tx_fifo_kb & 0xF) << 16); 242 SMC_SET_HW_CFG(lp, (lp->tx_fifo_kb & 0xF) << 16);
293//TODO: Figure out what appropriate pause time is 243//TODO: Figure out what appropriate pause time is
294 SMC_SET_FLOW(FLOW_FCPT_ | FLOW_FCEN_); 244 SMC_SET_FLOW(lp, FLOW_FCPT_ | FLOW_FCEN_);
295 SMC_SET_AFC_CFG(lp->afc_cfg); 245 SMC_SET_AFC_CFG(lp, lp->afc_cfg);
296 246
297 247
298 /* Set to LED outputs */ 248 /* Set to LED outputs */
299 SMC_SET_GPIO_CFG(0x70070000); 249 SMC_SET_GPIO_CFG(lp, 0x70070000);
300 250
301 /* 251 /*
302 * Deassert IRQ for 1*10us for edge type interrupts 252 * Deassert IRQ for 1*10us for edge type interrupts
303 * and drive IRQ pin push-pull 253 * and drive IRQ pin push-pull
304 */ 254 */
305 SMC_SET_IRQ_CFG( (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_ ); 255 SMC_SET_IRQ_CFG(lp, (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_);
306 256
307 /* clear anything saved */ 257 /* clear anything saved */
308 if (lp->pending_tx_skb != NULL) { 258 if (lp->pending_tx_skb != NULL) {
@@ -318,46 +268,45 @@ static void smc911x_reset(struct net_device *dev)
318 */ 268 */
319static void smc911x_enable(struct net_device *dev) 269static void smc911x_enable(struct net_device *dev)
320{ 270{
321 unsigned long ioaddr = dev->base_addr;
322 struct smc911x_local *lp = netdev_priv(dev); 271 struct smc911x_local *lp = netdev_priv(dev);
323 unsigned mask, cfg, cr; 272 unsigned mask, cfg, cr;
324 unsigned long flags; 273 unsigned long flags;
325 274
326 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 275 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
327 276
328 SMC_SET_MAC_ADDR(dev->dev_addr); 277 SMC_SET_MAC_ADDR(lp, dev->dev_addr);
329 278
330 /* Enable TX */ 279 /* Enable TX */
331 cfg = SMC_GET_HW_CFG(); 280 cfg = SMC_GET_HW_CFG(lp);
332 cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF; 281 cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF;
333 cfg |= HW_CFG_SF_; 282 cfg |= HW_CFG_SF_;
334 SMC_SET_HW_CFG(cfg); 283 SMC_SET_HW_CFG(lp, cfg);
335 SMC_SET_FIFO_TDA(0xFF); 284 SMC_SET_FIFO_TDA(lp, 0xFF);
336 /* Update TX stats on every 64 packets received or every 1 sec */ 285 /* Update TX stats on every 64 packets received or every 1 sec */
337 SMC_SET_FIFO_TSL(64); 286 SMC_SET_FIFO_TSL(lp, 64);
338 SMC_SET_GPT_CFG(GPT_CFG_TIMER_EN_ | 10000); 287 SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
339 288
340 spin_lock_irqsave(&lp->lock, flags); 289 spin_lock_irqsave(&lp->lock, flags);
341 SMC_GET_MAC_CR(cr); 290 SMC_GET_MAC_CR(lp, cr);
342 cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_; 291 cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_;
343 SMC_SET_MAC_CR(cr); 292 SMC_SET_MAC_CR(lp, cr);
344 SMC_SET_TX_CFG(TX_CFG_TX_ON_); 293 SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_);
345 spin_unlock_irqrestore(&lp->lock, flags); 294 spin_unlock_irqrestore(&lp->lock, flags);
346 295
347 /* Add 2 byte padding to start of packets */ 296 /* Add 2 byte padding to start of packets */
348 SMC_SET_RX_CFG((2<<8) & RX_CFG_RXDOFF_); 297 SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_);
349 298
350 /* Turn on receiver and enable RX */ 299 /* Turn on receiver and enable RX */
351 if (cr & MAC_CR_RXEN_) 300 if (cr & MAC_CR_RXEN_)
352 DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name); 301 DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name);
353 302
354 spin_lock_irqsave(&lp->lock, flags); 303 spin_lock_irqsave(&lp->lock, flags);
355 SMC_SET_MAC_CR( cr | MAC_CR_RXEN_ ); 304 SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
356 spin_unlock_irqrestore(&lp->lock, flags); 305 spin_unlock_irqrestore(&lp->lock, flags);
357 306
358 /* Interrupt on every received packet */ 307 /* Interrupt on every received packet */
359 SMC_SET_FIFO_RSA(0x01); 308 SMC_SET_FIFO_RSA(lp, 0x01);
360 SMC_SET_FIFO_RSL(0x00); 309 SMC_SET_FIFO_RSL(lp, 0x00);
361 310
362 /* now, enable interrupts */ 311 /* now, enable interrupts */
363 mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ | 312 mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ |
@@ -368,7 +317,7 @@ static void smc911x_enable(struct net_device *dev)
368 else { 317 else {
369 mask|=INT_EN_RDFO_EN_; 318 mask|=INT_EN_RDFO_EN_;
370 } 319 }
371 SMC_ENABLE_INT(mask); 320 SMC_ENABLE_INT(lp, mask);
372} 321}
373 322
374/* 323/*
@@ -376,7 +325,6 @@ static void smc911x_enable(struct net_device *dev)
376 */ 325 */
377static void smc911x_shutdown(struct net_device *dev) 326static void smc911x_shutdown(struct net_device *dev)
378{ 327{
379 unsigned long ioaddr = dev->base_addr;
380 struct smc911x_local *lp = netdev_priv(dev); 328 struct smc911x_local *lp = netdev_priv(dev);
381 unsigned cr; 329 unsigned cr;
382 unsigned long flags; 330 unsigned long flags;
@@ -384,35 +332,35 @@ static void smc911x_shutdown(struct net_device *dev)
384 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __FUNCTION__); 332 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __FUNCTION__);
385 333
386 /* Disable IRQ's */ 334 /* Disable IRQ's */
387 SMC_SET_INT_EN(0); 335 SMC_SET_INT_EN(lp, 0);
388 336
389 /* Turn of Rx and TX */ 337 /* Turn of Rx and TX */
390 spin_lock_irqsave(&lp->lock, flags); 338 spin_lock_irqsave(&lp->lock, flags);
391 SMC_GET_MAC_CR(cr); 339 SMC_GET_MAC_CR(lp, cr);
392 cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_); 340 cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_);
393 SMC_SET_MAC_CR(cr); 341 SMC_SET_MAC_CR(lp, cr);
394 SMC_SET_TX_CFG(TX_CFG_STOP_TX_); 342 SMC_SET_TX_CFG(lp, TX_CFG_STOP_TX_);
395 spin_unlock_irqrestore(&lp->lock, flags); 343 spin_unlock_irqrestore(&lp->lock, flags);
396} 344}
397 345
398static inline void smc911x_drop_pkt(struct net_device *dev) 346static inline void smc911x_drop_pkt(struct net_device *dev)
399{ 347{
400 unsigned long ioaddr = dev->base_addr; 348 struct smc911x_local *lp = netdev_priv(dev);
401 unsigned int fifo_count, timeout, reg; 349 unsigned int fifo_count, timeout, reg;
402 350
403 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __FUNCTION__); 351 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __FUNCTION__);
404 fifo_count = SMC_GET_RX_FIFO_INF() & 0xFFFF; 352 fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
405 if (fifo_count <= 4) { 353 if (fifo_count <= 4) {
406 /* Manually dump the packet data */ 354 /* Manually dump the packet data */
407 while (fifo_count--) 355 while (fifo_count--)
408 SMC_GET_RX_FIFO(); 356 SMC_GET_RX_FIFO(lp);
409 } else { 357 } else {
410 /* Fast forward through the bad packet */ 358 /* Fast forward through the bad packet */
411 SMC_SET_RX_DP_CTRL(RX_DP_CTRL_FFWD_BUSY_); 359 SMC_SET_RX_DP_CTRL(lp, RX_DP_CTRL_FFWD_BUSY_);
412 timeout=50; 360 timeout=50;
413 do { 361 do {
414 udelay(10); 362 udelay(10);
415 reg = SMC_GET_RX_DP_CTRL() & RX_DP_CTRL_FFWD_BUSY_; 363 reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_;
416 } while (--timeout && reg); 364 } while (--timeout && reg);
417 if (timeout == 0) { 365 if (timeout == 0) {
418 PRINTK("%s: timeout waiting for RX fast forward\n", dev->name); 366 PRINTK("%s: timeout waiting for RX fast forward\n", dev->name);
@@ -428,14 +376,14 @@ static inline void smc911x_drop_pkt(struct net_device *dev)
428 */ 376 */
429static inline void smc911x_rcv(struct net_device *dev) 377static inline void smc911x_rcv(struct net_device *dev)
430{ 378{
431 unsigned long ioaddr = dev->base_addr; 379 struct smc911x_local *lp = netdev_priv(dev);
432 unsigned int pkt_len, status; 380 unsigned int pkt_len, status;
433 struct sk_buff *skb; 381 struct sk_buff *skb;
434 unsigned char *data; 382 unsigned char *data;
435 383
436 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", 384 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
437 dev->name, __FUNCTION__); 385 dev->name, __FUNCTION__);
438 status = SMC_GET_RX_STS_FIFO(); 386 status = SMC_GET_RX_STS_FIFO(lp);
439 DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n", 387 DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n",
440 dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff); 388 dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
441 pkt_len = (status & RX_STS_PKT_LEN_) >> 16; 389 pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
@@ -472,24 +420,23 @@ static inline void smc911x_rcv(struct net_device *dev)
472 skb_put(skb,pkt_len-4); 420 skb_put(skb,pkt_len-4);
473#ifdef SMC_USE_DMA 421#ifdef SMC_USE_DMA
474 { 422 {
475 struct smc911x_local *lp = netdev_priv(dev);
476 unsigned int fifo; 423 unsigned int fifo;
477 /* Lower the FIFO threshold if possible */ 424 /* Lower the FIFO threshold if possible */
478 fifo = SMC_GET_FIFO_INT(); 425 fifo = SMC_GET_FIFO_INT(lp);
479 if (fifo & 0xFF) fifo--; 426 if (fifo & 0xFF) fifo--;
480 DBG(SMC_DEBUG_RX, "%s: Setting RX stat FIFO threshold to %d\n", 427 DBG(SMC_DEBUG_RX, "%s: Setting RX stat FIFO threshold to %d\n",
481 dev->name, fifo & 0xff); 428 dev->name, fifo & 0xff);
482 SMC_SET_FIFO_INT(fifo); 429 SMC_SET_FIFO_INT(lp, fifo);
483 /* Setup RX DMA */ 430 /* Setup RX DMA */
484 SMC_SET_RX_CFG(RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_)); 431 SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_));
485 lp->rxdma_active = 1; 432 lp->rxdma_active = 1;
486 lp->current_rx_skb = skb; 433 lp->current_rx_skb = skb;
487 SMC_PULL_DATA(data, (pkt_len+2+15) & ~15); 434 SMC_PULL_DATA(lp, data, (pkt_len+2+15) & ~15);
488 /* Packet processing deferred to DMA RX interrupt */ 435 /* Packet processing deferred to DMA RX interrupt */
489 } 436 }
490#else 437#else
491 SMC_SET_RX_CFG(RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_)); 438 SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
492 SMC_PULL_DATA(data, pkt_len+2+3); 439 SMC_PULL_DATA(lp, data, pkt_len+2+3);
493 440
494 DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name); 441 DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name);
495 PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64); 442 PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
@@ -508,7 +455,6 @@ static inline void smc911x_rcv(struct net_device *dev)
508static void smc911x_hardware_send_pkt(struct net_device *dev) 455static void smc911x_hardware_send_pkt(struct net_device *dev)
509{ 456{
510 struct smc911x_local *lp = netdev_priv(dev); 457 struct smc911x_local *lp = netdev_priv(dev);
511 unsigned long ioaddr = dev->base_addr;
512 struct sk_buff *skb; 458 struct sk_buff *skb;
513 unsigned int cmdA, cmdB, len; 459 unsigned int cmdA, cmdB, len;
514 unsigned char *buf; 460 unsigned char *buf;
@@ -541,8 +487,8 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
541 487
542 DBG(SMC_DEBUG_TX, "%s: TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n", 488 DBG(SMC_DEBUG_TX, "%s: TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
543 dev->name, len, len, buf, cmdA, cmdB); 489 dev->name, len, len, buf, cmdA, cmdB);
544 SMC_SET_TX_FIFO(cmdA); 490 SMC_SET_TX_FIFO(lp, cmdA);
545 SMC_SET_TX_FIFO(cmdB); 491 SMC_SET_TX_FIFO(lp, cmdB);
546 492
547 DBG(SMC_DEBUG_PKTS, "%s: Transmitted packet\n", dev->name); 493 DBG(SMC_DEBUG_PKTS, "%s: Transmitted packet\n", dev->name);
548 PRINT_PKT(buf, len <= 64 ? len : 64); 494 PRINT_PKT(buf, len <= 64 ? len : 64);
@@ -550,10 +496,10 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
550 /* Send pkt via PIO or DMA */ 496 /* Send pkt via PIO or DMA */
551#ifdef SMC_USE_DMA 497#ifdef SMC_USE_DMA
552 lp->current_tx_skb = skb; 498 lp->current_tx_skb = skb;
553 SMC_PUSH_DATA(buf, len); 499 SMC_PUSH_DATA(lp, buf, len);
554 /* DMA complete IRQ will free buffer and set jiffies */ 500 /* DMA complete IRQ will free buffer and set jiffies */
555#else 501#else
556 SMC_PUSH_DATA(buf, len); 502 SMC_PUSH_DATA(lp, buf, len);
557 dev->trans_start = jiffies; 503 dev->trans_start = jiffies;
558 dev_kfree_skb(skb); 504 dev_kfree_skb(skb);
559#endif 505#endif
@@ -562,7 +508,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
562 netif_wake_queue(dev); 508 netif_wake_queue(dev);
563 } 509 }
564 spin_unlock_irqrestore(&lp->lock, flags); 510 spin_unlock_irqrestore(&lp->lock, flags);
565 SMC_ENABLE_INT(INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_); 511 SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_);
566} 512}
567 513
568/* 514/*
@@ -574,7 +520,6 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
574static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 520static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
575{ 521{
576 struct smc911x_local *lp = netdev_priv(dev); 522 struct smc911x_local *lp = netdev_priv(dev);
577 unsigned long ioaddr = dev->base_addr;
578 unsigned int free; 523 unsigned int free;
579 unsigned long flags; 524 unsigned long flags;
580 525
@@ -583,7 +528,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
583 528
584 BUG_ON(lp->pending_tx_skb != NULL); 529 BUG_ON(lp->pending_tx_skb != NULL);
585 530
586 free = SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TDFREE_; 531 free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
587 DBG(SMC_DEBUG_TX, "%s: TX free space %d\n", dev->name, free); 532 DBG(SMC_DEBUG_TX, "%s: TX free space %d\n", dev->name, free);
588 533
589 /* Turn off the flow when running out of space in FIFO */ 534 /* Turn off the flow when running out of space in FIFO */
@@ -592,7 +537,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
592 dev->name, free); 537 dev->name, free);
593 spin_lock_irqsave(&lp->lock, flags); 538 spin_lock_irqsave(&lp->lock, flags);
594 /* Reenable when at least 1 packet of size MTU present */ 539 /* Reenable when at least 1 packet of size MTU present */
595 SMC_SET_FIFO_TDA((SMC911X_TX_FIFO_LOW_THRESHOLD)/64); 540 SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
596 lp->tx_throttle = 1; 541 lp->tx_throttle = 1;
597 netif_stop_queue(dev); 542 netif_stop_queue(dev);
598 spin_unlock_irqrestore(&lp->lock, flags); 543 spin_unlock_irqrestore(&lp->lock, flags);
@@ -647,7 +592,6 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
647 */ 592 */
648static void smc911x_tx(struct net_device *dev) 593static void smc911x_tx(struct net_device *dev)
649{ 594{
650 unsigned long ioaddr = dev->base_addr;
651 struct smc911x_local *lp = netdev_priv(dev); 595 struct smc911x_local *lp = netdev_priv(dev);
652 unsigned int tx_status; 596 unsigned int tx_status;
653 597
@@ -655,11 +599,11 @@ static void smc911x_tx(struct net_device *dev)
655 dev->name, __FUNCTION__); 599 dev->name, __FUNCTION__);
656 600
657 /* Collect the TX status */ 601 /* Collect the TX status */
658 while (((SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TSUSED_) >> 16) != 0) { 602 while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
659 DBG(SMC_DEBUG_TX, "%s: Tx stat FIFO used 0x%04x\n", 603 DBG(SMC_DEBUG_TX, "%s: Tx stat FIFO used 0x%04x\n",
660 dev->name, 604 dev->name,
661 (SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TSUSED_) >> 16); 605 (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
662 tx_status = SMC_GET_TX_STS_FIFO(); 606 tx_status = SMC_GET_TX_STS_FIFO(lp);
663 dev->stats.tx_packets++; 607 dev->stats.tx_packets++;
664 dev->stats.tx_bytes+=tx_status>>16; 608 dev->stats.tx_bytes+=tx_status>>16;
665 DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n", 609 DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n",
@@ -697,10 +641,10 @@ static void smc911x_tx(struct net_device *dev)
697 641
698static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg) 642static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
699{ 643{
700 unsigned long ioaddr = dev->base_addr; 644 struct smc911x_local *lp = netdev_priv(dev);
701 unsigned int phydata; 645 unsigned int phydata;
702 646
703 SMC_GET_MII(phyreg, phyaddr, phydata); 647 SMC_GET_MII(lp, phyreg, phyaddr, phydata);
704 648
705 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n", 649 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
706 __FUNCTION__, phyaddr, phyreg, phydata); 650 __FUNCTION__, phyaddr, phyreg, phydata);
@@ -714,12 +658,12 @@ static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
714static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg, 658static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
715 int phydata) 659 int phydata)
716{ 660{
717 unsigned long ioaddr = dev->base_addr; 661 struct smc911x_local *lp = netdev_priv(dev);
718 662
719 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", 663 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
720 __FUNCTION__, phyaddr, phyreg, phydata); 664 __FUNCTION__, phyaddr, phyreg, phydata);
721 665
722 SMC_SET_MII(phyreg, phyaddr, phydata); 666 SMC_SET_MII(lp, phyreg, phyaddr, phydata);
723} 667}
724 668
725/* 669/*
@@ -728,7 +672,6 @@ static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
728 */ 672 */
729static void smc911x_phy_detect(struct net_device *dev) 673static void smc911x_phy_detect(struct net_device *dev)
730{ 674{
731 unsigned long ioaddr = dev->base_addr;
732 struct smc911x_local *lp = netdev_priv(dev); 675 struct smc911x_local *lp = netdev_priv(dev);
733 int phyaddr; 676 int phyaddr;
734 unsigned int cfg, id1, id2; 677 unsigned int cfg, id1, id2;
@@ -744,30 +687,30 @@ static void smc911x_phy_detect(struct net_device *dev)
744 switch(lp->version) { 687 switch(lp->version) {
745 case 0x115: 688 case 0x115:
746 case 0x117: 689 case 0x117:
747 cfg = SMC_GET_HW_CFG(); 690 cfg = SMC_GET_HW_CFG(lp);
748 if (cfg & HW_CFG_EXT_PHY_DET_) { 691 if (cfg & HW_CFG_EXT_PHY_DET_) {
749 cfg &= ~HW_CFG_PHY_CLK_SEL_; 692 cfg &= ~HW_CFG_PHY_CLK_SEL_;
750 cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; 693 cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
751 SMC_SET_HW_CFG(cfg); 694 SMC_SET_HW_CFG(lp, cfg);
752 udelay(10); /* Wait for clocks to stop */ 695 udelay(10); /* Wait for clocks to stop */
753 696
754 cfg |= HW_CFG_EXT_PHY_EN_; 697 cfg |= HW_CFG_EXT_PHY_EN_;
755 SMC_SET_HW_CFG(cfg); 698 SMC_SET_HW_CFG(lp, cfg);
756 udelay(10); /* Wait for clocks to stop */ 699 udelay(10); /* Wait for clocks to stop */
757 700
758 cfg &= ~HW_CFG_PHY_CLK_SEL_; 701 cfg &= ~HW_CFG_PHY_CLK_SEL_;
759 cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; 702 cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
760 SMC_SET_HW_CFG(cfg); 703 SMC_SET_HW_CFG(lp, cfg);
761 udelay(10); /* Wait for clocks to stop */ 704 udelay(10); /* Wait for clocks to stop */
762 705
763 cfg |= HW_CFG_SMI_SEL_; 706 cfg |= HW_CFG_SMI_SEL_;
764 SMC_SET_HW_CFG(cfg); 707 SMC_SET_HW_CFG(lp, cfg);
765 708
766 for (phyaddr = 1; phyaddr < 32; ++phyaddr) { 709 for (phyaddr = 1; phyaddr < 32; ++phyaddr) {
767 710
768 /* Read the PHY identifiers */ 711 /* Read the PHY identifiers */
769 SMC_GET_PHY_ID1(phyaddr & 31, id1); 712 SMC_GET_PHY_ID1(lp, phyaddr & 31, id1);
770 SMC_GET_PHY_ID2(phyaddr & 31, id2); 713 SMC_GET_PHY_ID2(lp, phyaddr & 31, id2);
771 714
772 /* Make sure it is a valid identifier */ 715 /* Make sure it is a valid identifier */
773 if (id1 != 0x0000 && id1 != 0xffff && 716 if (id1 != 0x0000 && id1 != 0xffff &&
@@ -782,8 +725,8 @@ static void smc911x_phy_detect(struct net_device *dev)
782 } 725 }
783 default: 726 default:
784 /* Internal media only */ 727 /* Internal media only */
785 SMC_GET_PHY_ID1(1, id1); 728 SMC_GET_PHY_ID1(lp, 1, id1);
786 SMC_GET_PHY_ID2(1, id2); 729 SMC_GET_PHY_ID2(lp, 1, id2);
787 /* Save the PHY's address */ 730 /* Save the PHY's address */
788 lp->mii.phy_id = 1; 731 lp->mii.phy_id = 1;
789 lp->phy_type = id1 << 16 | id2; 732 lp->phy_type = id1 << 16 | id2;
@@ -800,16 +743,15 @@ static void smc911x_phy_detect(struct net_device *dev)
800static int smc911x_phy_fixed(struct net_device *dev) 743static int smc911x_phy_fixed(struct net_device *dev)
801{ 744{
802 struct smc911x_local *lp = netdev_priv(dev); 745 struct smc911x_local *lp = netdev_priv(dev);
803 unsigned long ioaddr = dev->base_addr;
804 int phyaddr = lp->mii.phy_id; 746 int phyaddr = lp->mii.phy_id;
805 int bmcr; 747 int bmcr;
806 748
807 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 749 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
808 750
809 /* Enter Link Disable state */ 751 /* Enter Link Disable state */
810 SMC_GET_PHY_BMCR(phyaddr, bmcr); 752 SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
811 bmcr |= BMCR_PDOWN; 753 bmcr |= BMCR_PDOWN;
812 SMC_SET_PHY_BMCR(phyaddr, bmcr); 754 SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
813 755
814 /* 756 /*
815 * Set our fixed capabilities 757 * Set our fixed capabilities
@@ -823,11 +765,11 @@ static int smc911x_phy_fixed(struct net_device *dev)
823 bmcr |= BMCR_SPEED100; 765 bmcr |= BMCR_SPEED100;
824 766
825 /* Write our capabilities to the phy control register */ 767 /* Write our capabilities to the phy control register */
826 SMC_SET_PHY_BMCR(phyaddr, bmcr); 768 SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
827 769
828 /* Re-Configure the Receive/Phy Control register */ 770 /* Re-Configure the Receive/Phy Control register */
829 bmcr &= ~BMCR_PDOWN; 771 bmcr &= ~BMCR_PDOWN;
830 SMC_SET_PHY_BMCR(phyaddr, bmcr); 772 SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
831 773
832 return 1; 774 return 1;
833} 775}
@@ -847,7 +789,6 @@ static int smc911x_phy_fixed(struct net_device *dev)
847static int smc911x_phy_reset(struct net_device *dev, int phy) 789static int smc911x_phy_reset(struct net_device *dev, int phy)
848{ 790{
849 struct smc911x_local *lp = netdev_priv(dev); 791 struct smc911x_local *lp = netdev_priv(dev);
850 unsigned long ioaddr = dev->base_addr;
851 int timeout; 792 int timeout;
852 unsigned long flags; 793 unsigned long flags;
853 unsigned int reg; 794 unsigned int reg;
@@ -855,15 +796,15 @@ static int smc911x_phy_reset(struct net_device *dev, int phy)
855 DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__); 796 DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__);
856 797
857 spin_lock_irqsave(&lp->lock, flags); 798 spin_lock_irqsave(&lp->lock, flags);
858 reg = SMC_GET_PMT_CTRL(); 799 reg = SMC_GET_PMT_CTRL(lp);
859 reg &= ~0xfffff030; 800 reg &= ~0xfffff030;
860 reg |= PMT_CTRL_PHY_RST_; 801 reg |= PMT_CTRL_PHY_RST_;
861 SMC_SET_PMT_CTRL(reg); 802 SMC_SET_PMT_CTRL(lp, reg);
862 spin_unlock_irqrestore(&lp->lock, flags); 803 spin_unlock_irqrestore(&lp->lock, flags);
863 for (timeout = 2; timeout; timeout--) { 804 for (timeout = 2; timeout; timeout--) {
864 msleep(50); 805 msleep(50);
865 spin_lock_irqsave(&lp->lock, flags); 806 spin_lock_irqsave(&lp->lock, flags);
866 reg = SMC_GET_PMT_CTRL(); 807 reg = SMC_GET_PMT_CTRL(lp);
867 spin_unlock_irqrestore(&lp->lock, flags); 808 spin_unlock_irqrestore(&lp->lock, flags);
868 if (!(reg & PMT_CTRL_PHY_RST_)) { 809 if (!(reg & PMT_CTRL_PHY_RST_)) {
869 /* extra delay required because the phy may 810 /* extra delay required because the phy may
@@ -888,13 +829,13 @@ static int smc911x_phy_reset(struct net_device *dev, int phy)
888 */ 829 */
889static void smc911x_phy_powerdown(struct net_device *dev, int phy) 830static void smc911x_phy_powerdown(struct net_device *dev, int phy)
890{ 831{
891 unsigned long ioaddr = dev->base_addr; 832 struct smc911x_local *lp = netdev_priv(dev);
892 unsigned int bmcr; 833 unsigned int bmcr;
893 834
894 /* Enter Link Disable state */ 835 /* Enter Link Disable state */
895 SMC_GET_PHY_BMCR(phy, bmcr); 836 SMC_GET_PHY_BMCR(lp, phy, bmcr);
896 bmcr |= BMCR_PDOWN; 837 bmcr |= BMCR_PDOWN;
897 SMC_SET_PHY_BMCR(phy, bmcr); 838 SMC_SET_PHY_BMCR(lp, phy, bmcr);
898} 839}
899 840
900/* 841/*
@@ -908,7 +849,6 @@ static void smc911x_phy_powerdown(struct net_device *dev, int phy)
908static void smc911x_phy_check_media(struct net_device *dev, int init) 849static void smc911x_phy_check_media(struct net_device *dev, int init)
909{ 850{
910 struct smc911x_local *lp = netdev_priv(dev); 851 struct smc911x_local *lp = netdev_priv(dev);
911 unsigned long ioaddr = dev->base_addr;
912 int phyaddr = lp->mii.phy_id; 852 int phyaddr = lp->mii.phy_id;
913 unsigned int bmcr, cr; 853 unsigned int bmcr, cr;
914 854
@@ -916,8 +856,8 @@ static void smc911x_phy_check_media(struct net_device *dev, int init)
916 856
917 if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { 857 if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
918 /* duplex state has changed */ 858 /* duplex state has changed */
919 SMC_GET_PHY_BMCR(phyaddr, bmcr); 859 SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
920 SMC_GET_MAC_CR(cr); 860 SMC_GET_MAC_CR(lp, cr);
921 if (lp->mii.full_duplex) { 861 if (lp->mii.full_duplex) {
922 DBG(SMC_DEBUG_MISC, "%s: Configuring for full-duplex mode\n", dev->name); 862 DBG(SMC_DEBUG_MISC, "%s: Configuring for full-duplex mode\n", dev->name);
923 bmcr |= BMCR_FULLDPLX; 863 bmcr |= BMCR_FULLDPLX;
@@ -927,8 +867,8 @@ static void smc911x_phy_check_media(struct net_device *dev, int init)
927 bmcr &= ~BMCR_FULLDPLX; 867 bmcr &= ~BMCR_FULLDPLX;
928 cr &= ~MAC_CR_RCVOWN_; 868 cr &= ~MAC_CR_RCVOWN_;
929 } 869 }
930 SMC_SET_PHY_BMCR(phyaddr, bmcr); 870 SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
931 SMC_SET_MAC_CR(cr); 871 SMC_SET_MAC_CR(lp, cr);
932 } 872 }
933} 873}
934 874
@@ -946,7 +886,6 @@ static void smc911x_phy_configure(struct work_struct *work)
946 struct smc911x_local *lp = container_of(work, struct smc911x_local, 886 struct smc911x_local *lp = container_of(work, struct smc911x_local,
947 phy_configure); 887 phy_configure);
948 struct net_device *dev = lp->netdev; 888 struct net_device *dev = lp->netdev;
949 unsigned long ioaddr = dev->base_addr;
950 int phyaddr = lp->mii.phy_id; 889 int phyaddr = lp->mii.phy_id;
951 int my_phy_caps; /* My PHY capabilities */ 890 int my_phy_caps; /* My PHY capabilities */
952 int my_ad_caps; /* My Advertised capabilities */ 891 int my_ad_caps; /* My Advertised capabilities */
@@ -971,7 +910,7 @@ static void smc911x_phy_configure(struct work_struct *work)
971 * Enable PHY Interrupts (for register 18) 910 * Enable PHY Interrupts (for register 18)
972 * Interrupts listed here are enabled 911 * Interrupts listed here are enabled
973 */ 912 */
974 SMC_SET_PHY_INT_MASK(phyaddr, PHY_INT_MASK_ENERGY_ON_ | 913 SMC_SET_PHY_INT_MASK(lp, phyaddr, PHY_INT_MASK_ENERGY_ON_ |
975 PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ | 914 PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ |
976 PHY_INT_MASK_LINK_DOWN_); 915 PHY_INT_MASK_LINK_DOWN_);
977 916
@@ -982,7 +921,7 @@ static void smc911x_phy_configure(struct work_struct *work)
982 } 921 }
983 922
984 /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */ 923 /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
985 SMC_GET_PHY_BMSR(phyaddr, my_phy_caps); 924 SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps);
986 if (!(my_phy_caps & BMSR_ANEGCAPABLE)) { 925 if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
987 printk(KERN_INFO "Auto negotiation NOT supported\n"); 926 printk(KERN_INFO "Auto negotiation NOT supported\n");
988 smc911x_phy_fixed(dev); 927 smc911x_phy_fixed(dev);
@@ -1011,7 +950,7 @@ static void smc911x_phy_configure(struct work_struct *work)
1011 my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL); 950 my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
1012 951
1013 /* Update our Auto-Neg Advertisement Register */ 952 /* Update our Auto-Neg Advertisement Register */
1014 SMC_SET_PHY_MII_ADV(phyaddr, my_ad_caps); 953 SMC_SET_PHY_MII_ADV(lp, phyaddr, my_ad_caps);
1015 lp->mii.advertising = my_ad_caps; 954 lp->mii.advertising = my_ad_caps;
1016 955
1017 /* 956 /*
@@ -1020,13 +959,13 @@ static void smc911x_phy_configure(struct work_struct *work)
1020 * the link does not come up. 959 * the link does not come up.
1021 */ 960 */
1022 udelay(10); 961 udelay(10);
1023 SMC_GET_PHY_MII_ADV(phyaddr, status); 962 SMC_GET_PHY_MII_ADV(lp, phyaddr, status);
1024 963
1025 DBG(SMC_DEBUG_MISC, "%s: phy caps=0x%04x\n", dev->name, my_phy_caps); 964 DBG(SMC_DEBUG_MISC, "%s: phy caps=0x%04x\n", dev->name, my_phy_caps);
1026 DBG(SMC_DEBUG_MISC, "%s: phy advertised caps=0x%04x\n", dev->name, my_ad_caps); 965 DBG(SMC_DEBUG_MISC, "%s: phy advertised caps=0x%04x\n", dev->name, my_ad_caps);
1027 966
1028 /* Restart auto-negotiation process in order to advertise my caps */ 967 /* Restart auto-negotiation process in order to advertise my caps */
1029 SMC_SET_PHY_BMCR(phyaddr, BMCR_ANENABLE | BMCR_ANRESTART); 968 SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART);
1030 969
1031 smc911x_phy_check_media(dev, 1); 970 smc911x_phy_check_media(dev, 1);
1032 971
@@ -1043,7 +982,6 @@ smc911x_phy_configure_exit:
1043static void smc911x_phy_interrupt(struct net_device *dev) 982static void smc911x_phy_interrupt(struct net_device *dev)
1044{ 983{
1045 struct smc911x_local *lp = netdev_priv(dev); 984 struct smc911x_local *lp = netdev_priv(dev);
1046 unsigned long ioaddr = dev->base_addr;
1047 int phyaddr = lp->mii.phy_id; 985 int phyaddr = lp->mii.phy_id;
1048 int status; 986 int status;
1049 987
@@ -1054,11 +992,11 @@ static void smc911x_phy_interrupt(struct net_device *dev)
1054 992
1055 smc911x_phy_check_media(dev, 0); 993 smc911x_phy_check_media(dev, 0);
1056 /* read to clear status bits */ 994 /* read to clear status bits */
1057 SMC_GET_PHY_INT_SRC(phyaddr,status); 995 SMC_GET_PHY_INT_SRC(lp, phyaddr,status);
1058 DBG(SMC_DEBUG_MISC, "%s: PHY interrupt status 0x%04x\n", 996 DBG(SMC_DEBUG_MISC, "%s: PHY interrupt status 0x%04x\n",
1059 dev->name, status & 0xffff); 997 dev->name, status & 0xffff);
1060 DBG(SMC_DEBUG_MISC, "%s: AFC_CFG 0x%08x\n", 998 DBG(SMC_DEBUG_MISC, "%s: AFC_CFG 0x%08x\n",
1061 dev->name, SMC_GET_AFC_CFG()); 999 dev->name, SMC_GET_AFC_CFG(lp));
1062} 1000}
1063 1001
1064/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/ 1002/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
@@ -1070,7 +1008,6 @@ static void smc911x_phy_interrupt(struct net_device *dev)
1070static irqreturn_t smc911x_interrupt(int irq, void *dev_id) 1008static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
1071{ 1009{
1072 struct net_device *dev = dev_id; 1010 struct net_device *dev = dev_id;
1073 unsigned long ioaddr = dev->base_addr;
1074 struct smc911x_local *lp = netdev_priv(dev); 1011 struct smc911x_local *lp = netdev_priv(dev);
1075 unsigned int status, mask, timeout; 1012 unsigned int status, mask, timeout;
1076 unsigned int rx_overrun=0, cr, pkts; 1013 unsigned int rx_overrun=0, cr, pkts;
@@ -1081,21 +1018,21 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
1081 spin_lock_irqsave(&lp->lock, flags); 1018 spin_lock_irqsave(&lp->lock, flags);
1082 1019
1083 /* Spurious interrupt check */ 1020 /* Spurious interrupt check */
1084 if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) != 1021 if ((SMC_GET_IRQ_CFG(lp) & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
1085 (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) { 1022 (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
1086 spin_unlock_irqrestore(&lp->lock, flags); 1023 spin_unlock_irqrestore(&lp->lock, flags);
1087 return IRQ_NONE; 1024 return IRQ_NONE;
1088 } 1025 }
1089 1026
1090 mask = SMC_GET_INT_EN(); 1027 mask = SMC_GET_INT_EN(lp);
1091 SMC_SET_INT_EN(0); 1028 SMC_SET_INT_EN(lp, 0);
1092 1029
1093 /* set a timeout value, so I don't stay here forever */ 1030 /* set a timeout value, so I don't stay here forever */
1094 timeout = 8; 1031 timeout = 8;
1095 1032
1096 1033
1097 do { 1034 do {
1098 status = SMC_GET_INT(); 1035 status = SMC_GET_INT(lp);
1099 1036
1100 DBG(SMC_DEBUG_MISC, "%s: INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n", 1037 DBG(SMC_DEBUG_MISC, "%s: INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
1101 dev->name, status, mask, status & ~mask); 1038 dev->name, status, mask, status & ~mask);
@@ -1106,53 +1043,53 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
1106 1043
1107 /* Handle SW interrupt condition */ 1044 /* Handle SW interrupt condition */
1108 if (status & INT_STS_SW_INT_) { 1045 if (status & INT_STS_SW_INT_) {
1109 SMC_ACK_INT(INT_STS_SW_INT_); 1046 SMC_ACK_INT(lp, INT_STS_SW_INT_);
1110 mask &= ~INT_EN_SW_INT_EN_; 1047 mask &= ~INT_EN_SW_INT_EN_;
1111 } 1048 }
1112 /* Handle various error conditions */ 1049 /* Handle various error conditions */
1113 if (status & INT_STS_RXE_) { 1050 if (status & INT_STS_RXE_) {
1114 SMC_ACK_INT(INT_STS_RXE_); 1051 SMC_ACK_INT(lp, INT_STS_RXE_);
1115 dev->stats.rx_errors++; 1052 dev->stats.rx_errors++;
1116 } 1053 }
1117 if (status & INT_STS_RXDFH_INT_) { 1054 if (status & INT_STS_RXDFH_INT_) {
1118 SMC_ACK_INT(INT_STS_RXDFH_INT_); 1055 SMC_ACK_INT(lp, INT_STS_RXDFH_INT_);
1119 dev->stats.rx_dropped+=SMC_GET_RX_DROP(); 1056 dev->stats.rx_dropped+=SMC_GET_RX_DROP(lp);
1120 } 1057 }
1121 /* Undocumented interrupt-what is the right thing to do here? */ 1058 /* Undocumented interrupt-what is the right thing to do here? */
1122 if (status & INT_STS_RXDF_INT_) { 1059 if (status & INT_STS_RXDF_INT_) {
1123 SMC_ACK_INT(INT_STS_RXDF_INT_); 1060 SMC_ACK_INT(lp, INT_STS_RXDF_INT_);
1124 } 1061 }
1125 1062
1126 /* Rx Data FIFO exceeds set level */ 1063 /* Rx Data FIFO exceeds set level */
1127 if (status & INT_STS_RDFL_) { 1064 if (status & INT_STS_RDFL_) {
1128 if (IS_REV_A(lp->revision)) { 1065 if (IS_REV_A(lp->revision)) {
1129 rx_overrun=1; 1066 rx_overrun=1;
1130 SMC_GET_MAC_CR(cr); 1067 SMC_GET_MAC_CR(lp, cr);
1131 cr &= ~MAC_CR_RXEN_; 1068 cr &= ~MAC_CR_RXEN_;
1132 SMC_SET_MAC_CR(cr); 1069 SMC_SET_MAC_CR(lp, cr);
1133 DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); 1070 DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
1134 dev->stats.rx_errors++; 1071 dev->stats.rx_errors++;
1135 dev->stats.rx_fifo_errors++; 1072 dev->stats.rx_fifo_errors++;
1136 } 1073 }
1137 SMC_ACK_INT(INT_STS_RDFL_); 1074 SMC_ACK_INT(lp, INT_STS_RDFL_);
1138 } 1075 }
1139 if (status & INT_STS_RDFO_) { 1076 if (status & INT_STS_RDFO_) {
1140 if (!IS_REV_A(lp->revision)) { 1077 if (!IS_REV_A(lp->revision)) {
1141 SMC_GET_MAC_CR(cr); 1078 SMC_GET_MAC_CR(lp, cr);
1142 cr &= ~MAC_CR_RXEN_; 1079 cr &= ~MAC_CR_RXEN_;
1143 SMC_SET_MAC_CR(cr); 1080 SMC_SET_MAC_CR(lp, cr);
1144 rx_overrun=1; 1081 rx_overrun=1;
1145 DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); 1082 DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
1146 dev->stats.rx_errors++; 1083 dev->stats.rx_errors++;
1147 dev->stats.rx_fifo_errors++; 1084 dev->stats.rx_fifo_errors++;
1148 } 1085 }
1149 SMC_ACK_INT(INT_STS_RDFO_); 1086 SMC_ACK_INT(lp, INT_STS_RDFO_);
1150 } 1087 }
1151 /* Handle receive condition */ 1088 /* Handle receive condition */
1152 if ((status & INT_STS_RSFL_) || rx_overrun) { 1089 if ((status & INT_STS_RSFL_) || rx_overrun) {
1153 unsigned int fifo; 1090 unsigned int fifo;
1154 DBG(SMC_DEBUG_RX, "%s: RX irq\n", dev->name); 1091 DBG(SMC_DEBUG_RX, "%s: RX irq\n", dev->name);
1155 fifo = SMC_GET_RX_FIFO_INF(); 1092 fifo = SMC_GET_RX_FIFO_INF(lp);
1156 pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16; 1093 pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16;
1157 DBG(SMC_DEBUG_RX, "%s: Rx FIFO pkts %d, bytes %d\n", 1094 DBG(SMC_DEBUG_RX, "%s: Rx FIFO pkts %d, bytes %d\n",
1158 dev->name, pkts, fifo & 0xFFFF ); 1095 dev->name, pkts, fifo & 0xFFFF );
@@ -1163,61 +1100,61 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
1163 DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, 1100 DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
1164 "%s: RX DMA active\n", dev->name); 1101 "%s: RX DMA active\n", dev->name);
1165 /* The DMA is already running so up the IRQ threshold */ 1102 /* The DMA is already running so up the IRQ threshold */
1166 fifo = SMC_GET_FIFO_INT() & ~0xFF; 1103 fifo = SMC_GET_FIFO_INT(lp) & ~0xFF;
1167 fifo |= pkts & 0xFF; 1104 fifo |= pkts & 0xFF;
1168 DBG(SMC_DEBUG_RX, 1105 DBG(SMC_DEBUG_RX,
1169 "%s: Setting RX stat FIFO threshold to %d\n", 1106 "%s: Setting RX stat FIFO threshold to %d\n",
1170 dev->name, fifo & 0xff); 1107 dev->name, fifo & 0xff);
1171 SMC_SET_FIFO_INT(fifo); 1108 SMC_SET_FIFO_INT(lp, fifo);
1172 } else 1109 } else
1173#endif 1110#endif
1174 smc911x_rcv(dev); 1111 smc911x_rcv(dev);
1175 } 1112 }
1176 SMC_ACK_INT(INT_STS_RSFL_); 1113 SMC_ACK_INT(lp, INT_STS_RSFL_);
1177 } 1114 }
1178 /* Handle transmit FIFO available */ 1115 /* Handle transmit FIFO available */
1179 if (status & INT_STS_TDFA_) { 1116 if (status & INT_STS_TDFA_) {
1180 DBG(SMC_DEBUG_TX, "%s: TX data FIFO space available irq\n", dev->name); 1117 DBG(SMC_DEBUG_TX, "%s: TX data FIFO space available irq\n", dev->name);
1181 SMC_SET_FIFO_TDA(0xFF); 1118 SMC_SET_FIFO_TDA(lp, 0xFF);
1182 lp->tx_throttle = 0; 1119 lp->tx_throttle = 0;
1183#ifdef SMC_USE_DMA 1120#ifdef SMC_USE_DMA
1184 if (!lp->txdma_active) 1121 if (!lp->txdma_active)
1185#endif 1122#endif
1186 netif_wake_queue(dev); 1123 netif_wake_queue(dev);
1187 SMC_ACK_INT(INT_STS_TDFA_); 1124 SMC_ACK_INT(lp, INT_STS_TDFA_);
1188 } 1125 }
1189 /* Handle transmit done condition */ 1126 /* Handle transmit done condition */
1190#if 1 1127#if 1
1191 if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) { 1128 if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) {
1192 DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, 1129 DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC,
1193 "%s: Tx stat FIFO limit (%d) /GPT irq\n", 1130 "%s: Tx stat FIFO limit (%d) /GPT irq\n",
1194 dev->name, (SMC_GET_FIFO_INT() & 0x00ff0000) >> 16); 1131 dev->name, (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
1195 smc911x_tx(dev); 1132 smc911x_tx(dev);
1196 SMC_SET_GPT_CFG(GPT_CFG_TIMER_EN_ | 10000); 1133 SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
1197 SMC_ACK_INT(INT_STS_TSFL_); 1134 SMC_ACK_INT(lp, INT_STS_TSFL_);
1198 SMC_ACK_INT(INT_STS_TSFL_ | INT_STS_GPT_INT_); 1135 SMC_ACK_INT(lp, INT_STS_TSFL_ | INT_STS_GPT_INT_);
1199 } 1136 }
1200#else 1137#else
1201 if (status & INT_STS_TSFL_) { 1138 if (status & INT_STS_TSFL_) {
1202 DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq \n", dev->name, ); 1139 DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq \n", dev->name, );
1203 smc911x_tx(dev); 1140 smc911x_tx(dev);
1204 SMC_ACK_INT(INT_STS_TSFL_); 1141 SMC_ACK_INT(lp, INT_STS_TSFL_);
1205 } 1142 }
1206 1143
1207 if (status & INT_STS_GPT_INT_) { 1144 if (status & INT_STS_GPT_INT_) {
1208 DBG(SMC_DEBUG_RX, "%s: IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n", 1145 DBG(SMC_DEBUG_RX, "%s: IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
1209 dev->name, 1146 dev->name,
1210 SMC_GET_IRQ_CFG(), 1147 SMC_GET_IRQ_CFG(lp),
1211 SMC_GET_FIFO_INT(), 1148 SMC_GET_FIFO_INT(lp),
1212 SMC_GET_RX_CFG()); 1149 SMC_GET_RX_CFG(lp));
1213 DBG(SMC_DEBUG_RX, "%s: Rx Stat FIFO Used 0x%02x " 1150 DBG(SMC_DEBUG_RX, "%s: Rx Stat FIFO Used 0x%02x "
1214 "Data FIFO Used 0x%04x Stat FIFO 0x%08x\n", 1151 "Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
1215 dev->name, 1152 dev->name,
1216 (SMC_GET_RX_FIFO_INF() & 0x00ff0000) >> 16, 1153 (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
1217 SMC_GET_RX_FIFO_INF() & 0xffff, 1154 SMC_GET_RX_FIFO_INF(lp) & 0xffff,
1218 SMC_GET_RX_STS_FIFO_PEEK()); 1155 SMC_GET_RX_STS_FIFO_PEEK(lp));
1219 SMC_SET_GPT_CFG(GPT_CFG_TIMER_EN_ | 10000); 1156 SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
1220 SMC_ACK_INT(INT_STS_GPT_INT_); 1157 SMC_ACK_INT(lp, INT_STS_GPT_INT_);
1221 } 1158 }
1222#endif 1159#endif
1223 1160
@@ -1225,12 +1162,12 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
1225 if (status & INT_STS_PHY_INT_) { 1162 if (status & INT_STS_PHY_INT_) {
1226 DBG(SMC_DEBUG_MISC, "%s: PHY irq\n", dev->name); 1163 DBG(SMC_DEBUG_MISC, "%s: PHY irq\n", dev->name);
1227 smc911x_phy_interrupt(dev); 1164 smc911x_phy_interrupt(dev);
1228 SMC_ACK_INT(INT_STS_PHY_INT_); 1165 SMC_ACK_INT(lp, INT_STS_PHY_INT_);
1229 } 1166 }
1230 } while (--timeout); 1167 } while (--timeout);
1231 1168
1232 /* restore mask state */ 1169 /* restore mask state */
1233 SMC_SET_INT_EN(mask); 1170 SMC_SET_INT_EN(lp, mask);
1234 1171
1235 DBG(SMC_DEBUG_MISC, "%s: Interrupt done (%d loops)\n", 1172 DBG(SMC_DEBUG_MISC, "%s: Interrupt done (%d loops)\n",
1236 dev->name, 8-timeout); 1173 dev->name, 8-timeout);
@@ -1332,22 +1269,21 @@ static void smc911x_poll_controller(struct net_device *dev)
1332static void smc911x_timeout(struct net_device *dev) 1269static void smc911x_timeout(struct net_device *dev)
1333{ 1270{
1334 struct smc911x_local *lp = netdev_priv(dev); 1271 struct smc911x_local *lp = netdev_priv(dev);
1335 unsigned long ioaddr = dev->base_addr;
1336 int status, mask; 1272 int status, mask;
1337 unsigned long flags; 1273 unsigned long flags;
1338 1274
1339 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1275 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
1340 1276
1341 spin_lock_irqsave(&lp->lock, flags); 1277 spin_lock_irqsave(&lp->lock, flags);
1342 status = SMC_GET_INT(); 1278 status = SMC_GET_INT(lp);
1343 mask = SMC_GET_INT_EN(); 1279 mask = SMC_GET_INT_EN(lp);
1344 spin_unlock_irqrestore(&lp->lock, flags); 1280 spin_unlock_irqrestore(&lp->lock, flags);
1345 DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x \n", 1281 DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x \n",
1346 dev->name, status, mask); 1282 dev->name, status, mask);
1347 1283
1348 /* Dump the current TX FIFO contents and restart */ 1284 /* Dump the current TX FIFO contents and restart */
1349 mask = SMC_GET_TX_CFG(); 1285 mask = SMC_GET_TX_CFG(lp);
1350 SMC_SET_TX_CFG(mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_); 1286 SMC_SET_TX_CFG(lp, mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_);
1351 /* 1287 /*
1352 * Reconfiguring the PHY doesn't seem like a bad idea here, but 1288 * Reconfiguring the PHY doesn't seem like a bad idea here, but
1353 * smc911x_phy_configure() calls msleep() which calls schedule_timeout() 1289 * smc911x_phy_configure() calls msleep() which calls schedule_timeout()
@@ -1370,7 +1306,6 @@ static void smc911x_timeout(struct net_device *dev)
1370static void smc911x_set_multicast_list(struct net_device *dev) 1306static void smc911x_set_multicast_list(struct net_device *dev)
1371{ 1307{
1372 struct smc911x_local *lp = netdev_priv(dev); 1308 struct smc911x_local *lp = netdev_priv(dev);
1373 unsigned long ioaddr = dev->base_addr;
1374 unsigned int multicast_table[2]; 1309 unsigned int multicast_table[2];
1375 unsigned int mcr, update_multicast = 0; 1310 unsigned int mcr, update_multicast = 0;
1376 unsigned long flags; 1311 unsigned long flags;
@@ -1378,7 +1313,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
1378 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1313 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
1379 1314
1380 spin_lock_irqsave(&lp->lock, flags); 1315 spin_lock_irqsave(&lp->lock, flags);
1381 SMC_GET_MAC_CR(mcr); 1316 SMC_GET_MAC_CR(lp, mcr);
1382 spin_unlock_irqrestore(&lp->lock, flags); 1317 spin_unlock_irqrestore(&lp->lock, flags);
1383 1318
1384 if (dev->flags & IFF_PROMISC) { 1319 if (dev->flags & IFF_PROMISC) {
@@ -1455,13 +1390,13 @@ static void smc911x_set_multicast_list(struct net_device *dev)
1455 } 1390 }
1456 1391
1457 spin_lock_irqsave(&lp->lock, flags); 1392 spin_lock_irqsave(&lp->lock, flags);
1458 SMC_SET_MAC_CR(mcr); 1393 SMC_SET_MAC_CR(lp, mcr);
1459 if (update_multicast) { 1394 if (update_multicast) {
1460 DBG(SMC_DEBUG_MISC, 1395 DBG(SMC_DEBUG_MISC,
1461 "%s: update mcast hash table 0x%08x 0x%08x\n", 1396 "%s: update mcast hash table 0x%08x 0x%08x\n",
1462 dev->name, multicast_table[0], multicast_table[1]); 1397 dev->name, multicast_table[0], multicast_table[1]);
1463 SMC_SET_HASHL(multicast_table[0]); 1398 SMC_SET_HASHL(lp, multicast_table[0]);
1464 SMC_SET_HASHH(multicast_table[1]); 1399 SMC_SET_HASHH(lp, multicast_table[1]);
1465 } 1400 }
1466 spin_unlock_irqrestore(&lp->lock, flags); 1401 spin_unlock_irqrestore(&lp->lock, flags);
1467} 1402}
@@ -1545,7 +1480,6 @@ static int
1545smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) 1480smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1546{ 1481{
1547 struct smc911x_local *lp = netdev_priv(dev); 1482 struct smc911x_local *lp = netdev_priv(dev);
1548 unsigned long ioaddr = dev->base_addr;
1549 int ret, status; 1483 int ret, status;
1550 unsigned long flags; 1484 unsigned long flags;
1551 1485
@@ -1573,7 +1507,7 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1573 else 1507 else
1574 cmd->transceiver = XCVR_EXTERNAL; 1508 cmd->transceiver = XCVR_EXTERNAL;
1575 cmd->port = 0; 1509 cmd->port = 0;
1576 SMC_GET_PHY_SPECIAL(lp->mii.phy_id, status); 1510 SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status);
1577 cmd->duplex = 1511 cmd->duplex =
1578 (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ? 1512 (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
1579 DUPLEX_FULL : DUPLEX_HALF; 1513 DUPLEX_FULL : DUPLEX_HALF;
@@ -1654,7 +1588,6 @@ static int smc911x_ethtool_getregslen(struct net_device *dev)
1654static void smc911x_ethtool_getregs(struct net_device *dev, 1588static void smc911x_ethtool_getregs(struct net_device *dev,
1655 struct ethtool_regs* regs, void *buf) 1589 struct ethtool_regs* regs, void *buf)
1656{ 1590{
1657 unsigned long ioaddr = dev->base_addr;
1658 struct smc911x_local *lp = netdev_priv(dev); 1591 struct smc911x_local *lp = netdev_priv(dev);
1659 unsigned long flags; 1592 unsigned long flags;
1660 u32 reg,i,j=0; 1593 u32 reg,i,j=0;
@@ -1662,17 +1595,17 @@ static void smc911x_ethtool_getregs(struct net_device *dev,
1662 1595
1663 regs->version = lp->version; 1596 regs->version = lp->version;
1664 for(i=ID_REV;i<=E2P_CMD;i+=4) { 1597 for(i=ID_REV;i<=E2P_CMD;i+=4) {
1665 data[j++] = SMC_inl(ioaddr,i); 1598 data[j++] = SMC_inl(lp, i);
1666 } 1599 }
1667 for(i=MAC_CR;i<=WUCSR;i++) { 1600 for(i=MAC_CR;i<=WUCSR;i++) {
1668 spin_lock_irqsave(&lp->lock, flags); 1601 spin_lock_irqsave(&lp->lock, flags);
1669 SMC_GET_MAC_CSR(i, reg); 1602 SMC_GET_MAC_CSR(lp, i, reg);
1670 spin_unlock_irqrestore(&lp->lock, flags); 1603 spin_unlock_irqrestore(&lp->lock, flags);
1671 data[j++] = reg; 1604 data[j++] = reg;
1672 } 1605 }
1673 for(i=0;i<=31;i++) { 1606 for(i=0;i<=31;i++) {
1674 spin_lock_irqsave(&lp->lock, flags); 1607 spin_lock_irqsave(&lp->lock, flags);
1675 SMC_GET_MII(i, lp->mii.phy_id, reg); 1608 SMC_GET_MII(lp, i, lp->mii.phy_id, reg);
1676 spin_unlock_irqrestore(&lp->lock, flags); 1609 spin_unlock_irqrestore(&lp->lock, flags);
1677 data[j++] = reg & 0xFFFF; 1610 data[j++] = reg & 0xFFFF;
1678 } 1611 }
@@ -1680,11 +1613,11 @@ static void smc911x_ethtool_getregs(struct net_device *dev,
1680 1613
1681static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev) 1614static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
1682{ 1615{
1683 unsigned long ioaddr = dev->base_addr; 1616 struct smc911x_local *lp = netdev_priv(dev);
1684 unsigned int timeout; 1617 unsigned int timeout;
1685 int e2p_cmd; 1618 int e2p_cmd;
1686 1619
1687 e2p_cmd = SMC_GET_E2P_CMD(); 1620 e2p_cmd = SMC_GET_E2P_CMD(lp);
1688 for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) { 1621 for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
1689 if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) { 1622 if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
1690 PRINTK("%s: %s timeout waiting for EEPROM to respond\n", 1623 PRINTK("%s: %s timeout waiting for EEPROM to respond\n",
@@ -1692,7 +1625,7 @@ static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
1692 return -EFAULT; 1625 return -EFAULT;
1693 } 1626 }
1694 mdelay(1); 1627 mdelay(1);
1695 e2p_cmd = SMC_GET_E2P_CMD(); 1628 e2p_cmd = SMC_GET_E2P_CMD(lp);
1696 } 1629 }
1697 if (timeout == 0) { 1630 if (timeout == 0) {
1698 PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n", 1631 PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n",
@@ -1705,12 +1638,12 @@ static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
1705static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev, 1638static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
1706 int cmd, int addr) 1639 int cmd, int addr)
1707{ 1640{
1708 unsigned long ioaddr = dev->base_addr; 1641 struct smc911x_local *lp = netdev_priv(dev);
1709 int ret; 1642 int ret;
1710 1643
1711 if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) 1644 if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
1712 return ret; 1645 return ret;
1713 SMC_SET_E2P_CMD(E2P_CMD_EPC_BUSY_ | 1646 SMC_SET_E2P_CMD(lp, E2P_CMD_EPC_BUSY_ |
1714 ((cmd) & (0x7<<28)) | 1647 ((cmd) & (0x7<<28)) |
1715 ((addr) & 0xFF)); 1648 ((addr) & 0xFF));
1716 return 0; 1649 return 0;
@@ -1719,24 +1652,24 @@ static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
1719static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev, 1652static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev,
1720 u8 *data) 1653 u8 *data)
1721{ 1654{
1722 unsigned long ioaddr = dev->base_addr; 1655 struct smc911x_local *lp = netdev_priv(dev);
1723 int ret; 1656 int ret;
1724 1657
1725 if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) 1658 if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
1726 return ret; 1659 return ret;
1727 *data = SMC_GET_E2P_DATA(); 1660 *data = SMC_GET_E2P_DATA(lp);
1728 return 0; 1661 return 0;
1729} 1662}
1730 1663
1731static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev, 1664static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev,
1732 u8 data) 1665 u8 data)
1733{ 1666{
1734 unsigned long ioaddr = dev->base_addr; 1667 struct smc911x_local *lp = netdev_priv(dev);
1735 int ret; 1668 int ret;
1736 1669
1737 if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) 1670 if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
1738 return ret; 1671 return ret;
1739 SMC_SET_E2P_DATA(data); 1672 SMC_SET_E2P_DATA(lp, data);
1740 return 0; 1673 return 0;
1741} 1674}
1742 1675
@@ -1803,8 +1736,9 @@ static const struct ethtool_ops smc911x_ethtool_ops = {
1803 * This routine has a simple purpose -- make the SMC chip generate an 1736 * This routine has a simple purpose -- make the SMC chip generate an
1804 * interrupt, so an auto-detect routine can detect it, and find the IRQ, 1737 * interrupt, so an auto-detect routine can detect it, and find the IRQ,
1805 */ 1738 */
1806static int __init smc911x_findirq(unsigned long ioaddr) 1739static int __init smc911x_findirq(struct net_device *dev)
1807{ 1740{
1741 struct smc911x_local *lp = netdev_priv(dev);
1808 int timeout = 20; 1742 int timeout = 20;
1809 unsigned long cookie; 1743 unsigned long cookie;
1810 1744
@@ -1816,7 +1750,7 @@ static int __init smc911x_findirq(unsigned long ioaddr)
1816 * Force a SW interrupt 1750 * Force a SW interrupt
1817 */ 1751 */
1818 1752
1819 SMC_SET_INT_EN(INT_EN_SW_INT_EN_); 1753 SMC_SET_INT_EN(lp, INT_EN_SW_INT_EN_);
1820 1754
1821 /* 1755 /*
1822 * Wait until positive that the interrupt has been generated 1756 * Wait until positive that the interrupt has been generated
@@ -1824,7 +1758,7 @@ static int __init smc911x_findirq(unsigned long ioaddr)
1824 do { 1758 do {
1825 int int_status; 1759 int int_status;
1826 udelay(10); 1760 udelay(10);
1827 int_status = SMC_GET_INT_EN(); 1761 int_status = SMC_GET_INT_EN(lp);
1828 if (int_status & INT_EN_SW_INT_EN_) 1762 if (int_status & INT_EN_SW_INT_EN_)
1829 break; /* got the interrupt */ 1763 break; /* got the interrupt */
1830 } while (--timeout); 1764 } while (--timeout);
@@ -1837,7 +1771,7 @@ static int __init smc911x_findirq(unsigned long ioaddr)
1837 */ 1771 */
1838 1772
1839 /* and disable all interrupts again */ 1773 /* and disable all interrupts again */
1840 SMC_SET_INT_EN(0); 1774 SMC_SET_INT_EN(lp, 0);
1841 1775
1842 /* and return what I found */ 1776 /* and return what I found */
1843 return probe_irq_off(cookie); 1777 return probe_irq_off(cookie);
@@ -1866,17 +1800,18 @@ static int __init smc911x_findirq(unsigned long ioaddr)
1866 * o actually GRAB the irq. 1800 * o actually GRAB the irq.
1867 * o GRAB the region 1801 * o GRAB the region
1868 */ 1802 */
1869static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr) 1803static int __init smc911x_probe(struct net_device *dev)
1870{ 1804{
1871 struct smc911x_local *lp = netdev_priv(dev); 1805 struct smc911x_local *lp = netdev_priv(dev);
1872 int i, retval; 1806 int i, retval;
1873 unsigned int val, chip_id, revision; 1807 unsigned int val, chip_id, revision;
1874 const char *version_string; 1808 const char *version_string;
1809 unsigned long irq_flags;
1875 1810
1876 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1811 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
1877 1812
1878 /* First, see if the endian word is recognized */ 1813 /* First, see if the endian word is recognized */
1879 val = SMC_GET_BYTE_TEST(); 1814 val = SMC_GET_BYTE_TEST(lp);
1880 DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val); 1815 DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
1881 if (val != 0x87654321) { 1816 if (val != 0x87654321) {
1882 printk(KERN_ERR "Invalid chip endian 0x08%x\n",val); 1817 printk(KERN_ERR "Invalid chip endian 0x08%x\n",val);
@@ -1889,7 +1824,7 @@ static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr)
1889 * recognize. These might need to be added to later, 1824 * recognize. These might need to be added to later,
1890 * as future revisions could be added. 1825 * as future revisions could be added.
1891 */ 1826 */
1892 chip_id = SMC_GET_PN(); 1827 chip_id = SMC_GET_PN(lp);
1893 DBG(SMC_DEBUG_MISC, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id); 1828 DBG(SMC_DEBUG_MISC, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id);
1894 for(i=0;chip_ids[i].id != 0; i++) { 1829 for(i=0;chip_ids[i].id != 0; i++) {
1895 if (chip_ids[i].id == chip_id) break; 1830 if (chip_ids[i].id == chip_id) break;
@@ -1901,7 +1836,7 @@ static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr)
1901 } 1836 }
1902 version_string = chip_ids[i].name; 1837 version_string = chip_ids[i].name;
1903 1838
1904 revision = SMC_GET_REV(); 1839 revision = SMC_GET_REV(lp);
1905 DBG(SMC_DEBUG_MISC, "%s: revision = 0x%04x\n", CARDNAME, revision); 1840 DBG(SMC_DEBUG_MISC, "%s: revision = 0x%04x\n", CARDNAME, revision);
1906 1841
1907 /* At this point I'll assume that the chip is an SMC911x. */ 1842 /* At this point I'll assume that the chip is an SMC911x. */
@@ -1915,7 +1850,6 @@ static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr)
1915 } 1850 }
1916 1851
1917 /* fill in some of the fields */ 1852 /* fill in some of the fields */
1918 dev->base_addr = ioaddr;
1919 lp->version = chip_ids[i].id; 1853 lp->version = chip_ids[i].id;
1920 lp->revision = revision; 1854 lp->revision = revision;
1921 lp->tx_fifo_kb = tx_fifo_kb; 1855 lp->tx_fifo_kb = tx_fifo_kb;
@@ -1974,7 +1908,7 @@ static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr)
1974 spin_lock_init(&lp->lock); 1908 spin_lock_init(&lp->lock);
1975 1909
1976 /* Get the MAC address */ 1910 /* Get the MAC address */
1977 SMC_GET_MAC_ADDR(dev->dev_addr); 1911 SMC_GET_MAC_ADDR(lp, dev->dev_addr);
1978 1912
1979 /* now, reset the chip, and put it into a known state */ 1913 /* now, reset the chip, and put it into a known state */
1980 smc911x_reset(dev); 1914 smc911x_reset(dev);
@@ -1991,7 +1925,7 @@ static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr)
1991 1925
1992 trials = 3; 1926 trials = 3;
1993 while (trials--) { 1927 while (trials--) {
1994 dev->irq = smc911x_findirq(ioaddr); 1928 dev->irq = smc911x_findirq(dev);
1995 if (dev->irq) 1929 if (dev->irq)
1996 break; 1930 break;
1997 /* kick the card and try again */ 1931 /* kick the card and try again */
@@ -2039,9 +1973,15 @@ static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr)
2039 lp->ctl_rfduplx = 1; 1973 lp->ctl_rfduplx = 1;
2040 lp->ctl_rspeed = 100; 1974 lp->ctl_rspeed = 100;
2041 1975
1976#ifdef SMC_DYNAMIC_BUS_CONFIG
1977 irq_flags = lp->cfg.irq_flags;
1978#else
1979 irq_flags = IRQF_SHARED | SMC_IRQ_SENSE;
1980#endif
1981
2042 /* Grab the IRQ */ 1982 /* Grab the IRQ */
2043 retval = request_irq(dev->irq, &smc911x_interrupt, 1983 retval = request_irq(dev->irq, &smc911x_interrupt,
2044 IRQF_SHARED | SMC_IRQ_SENSE, dev->name, dev); 1984 irq_flags, dev->name, dev);
2045 if (retval) 1985 if (retval)
2046 goto err_out; 1986 goto err_out;
2047 1987
@@ -2111,6 +2051,7 @@ err_out:
2111 */ 2051 */
2112static int smc911x_drv_probe(struct platform_device *pdev) 2052static int smc911x_drv_probe(struct platform_device *pdev)
2113{ 2053{
2054 struct smc91x_platdata *pd = pdev->dev.platform_data;
2114 struct net_device *ndev; 2055 struct net_device *ndev;
2115 struct resource *res; 2056 struct resource *res;
2116 struct smc911x_local *lp; 2057 struct smc911x_local *lp;
@@ -2144,6 +2085,13 @@ static int smc911x_drv_probe(struct platform_device *pdev)
2144 ndev->irq = platform_get_irq(pdev, 0); 2085 ndev->irq = platform_get_irq(pdev, 0);
2145 lp = netdev_priv(ndev); 2086 lp = netdev_priv(ndev);
2146 lp->netdev = ndev; 2087 lp->netdev = ndev;
2088#ifdef SMC_DYNAMIC_BUS_CONFIG
2089 if (!pd) {
2090 ret = -EINVAL;
2091 goto release_both;
2092 }
2093 memcpy(&lp->cfg, pd, sizeof(lp->cfg));
2094#endif
2147 2095
2148 addr = ioremap(res->start, SMC911X_IO_EXTENT); 2096 addr = ioremap(res->start, SMC911X_IO_EXTENT);
2149 if (!addr) { 2097 if (!addr) {
@@ -2152,7 +2100,9 @@ static int smc911x_drv_probe(struct platform_device *pdev)
2152 } 2100 }
2153 2101
2154 platform_set_drvdata(pdev, ndev); 2102 platform_set_drvdata(pdev, ndev);
2155 ret = smc911x_probe(ndev, (unsigned long)addr); 2103 lp->base = addr;
2104 ndev->base_addr = res->start;
2105 ret = smc911x_probe(ndev);
2156 if (ret != 0) { 2106 if (ret != 0) {
2157 platform_set_drvdata(pdev, NULL); 2107 platform_set_drvdata(pdev, NULL);
2158 iounmap(addr); 2108 iounmap(addr);
@@ -2176,6 +2126,7 @@ out:
2176static int smc911x_drv_remove(struct platform_device *pdev) 2126static int smc911x_drv_remove(struct platform_device *pdev)
2177{ 2127{
2178 struct net_device *ndev = platform_get_drvdata(pdev); 2128 struct net_device *ndev = platform_get_drvdata(pdev);
2129 struct smc911x_local *lp = netdev_priv(ndev);
2179 struct resource *res; 2130 struct resource *res;
2180 2131
2181 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2132 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
@@ -2187,7 +2138,6 @@ static int smc911x_drv_remove(struct platform_device *pdev)
2187 2138
2188#ifdef SMC_USE_DMA 2139#ifdef SMC_USE_DMA
2189 { 2140 {
2190 struct smc911x_local *lp = netdev_priv(ndev);
2191 if (lp->rxdma != -1) { 2141 if (lp->rxdma != -1) {
2192 SMC_DMA_FREE(dev, lp->rxdma); 2142 SMC_DMA_FREE(dev, lp->rxdma);
2193 } 2143 }
@@ -2196,7 +2146,7 @@ static int smc911x_drv_remove(struct platform_device *pdev)
2196 } 2146 }
2197 } 2147 }
2198#endif 2148#endif
2199 iounmap((void *)ndev->base_addr); 2149 iounmap(lp->base);
2200 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2150 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2201 release_mem_region(res->start, SMC911X_IO_EXTENT); 2151 release_mem_region(res->start, SMC911X_IO_EXTENT);
2202 2152
@@ -2207,7 +2157,7 @@ static int smc911x_drv_remove(struct platform_device *pdev)
2207static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state) 2157static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
2208{ 2158{
2209 struct net_device *ndev = platform_get_drvdata(dev); 2159 struct net_device *ndev = platform_get_drvdata(dev);
2210 unsigned long ioaddr = ndev->base_addr; 2160 struct smc911x_local *lp = netdev_priv(ndev);
2211 2161
2212 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2162 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
2213 if (ndev) { 2163 if (ndev) {
@@ -2216,7 +2166,7 @@ static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
2216 smc911x_shutdown(ndev); 2166 smc911x_shutdown(ndev);
2217#if POWER_DOWN 2167#if POWER_DOWN
2218 /* Set D2 - Energy detect only setting */ 2168 /* Set D2 - Energy detect only setting */
2219 SMC_SET_PMT_CTRL(2<<12); 2169 SMC_SET_PMT_CTRL(lp, 2<<12);
2220#endif 2170#endif
2221 } 2171 }
2222 } 2172 }
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h
index 7defa63b9c74..76c17c28fab4 100644
--- a/drivers/net/smc911x.h
+++ b/drivers/net/smc911x.h
@@ -29,6 +29,7 @@
29#ifndef _SMC911X_H_ 29#ifndef _SMC911X_H_
30#define _SMC911X_H_ 30#define _SMC911X_H_
31 31
32#include <linux/smc911x.h>
32/* 33/*
33 * Use the DMA feature on PXA chips 34 * Use the DMA feature on PXA chips
34 */ 35 */
@@ -38,42 +39,160 @@
38 #define SMC_USE_32BIT 1 39 #define SMC_USE_32BIT 1
39 #define SMC_IRQ_SENSE IRQF_TRIGGER_FALLING 40 #define SMC_IRQ_SENSE IRQF_TRIGGER_FALLING
40#elif defined(CONFIG_SH_MAGIC_PANEL_R2) 41#elif defined(CONFIG_SH_MAGIC_PANEL_R2)
41 #define SMC_USE_SH_DMA 0
42 #define SMC_USE_16BIT 0 42 #define SMC_USE_16BIT 0
43 #define SMC_USE_32BIT 1 43 #define SMC_USE_32BIT 1
44 #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW 44 #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
45#else
46/*
47 * Default configuration
48 */
49
50#define SMC_DYNAMIC_BUS_CONFIG
45#endif 51#endif
46 52
53/* store this information for the driver.. */
54struct smc911x_local {
55 /*
56 * If I have to wait until the DMA is finished and ready to reload a
57 * packet, I will store the skbuff here. Then, the DMA will send it
58 * out and free it.
59 */
60 struct sk_buff *pending_tx_skb;
61
62 /* version/revision of the SMC911x chip */
63 u16 version;
64 u16 revision;
65
66 /* FIFO sizes */
67 int tx_fifo_kb;
68 int tx_fifo_size;
69 int rx_fifo_size;
70 int afc_cfg;
71
72 /* Contains the current active receive/phy mode */
73 int ctl_rfduplx;
74 int ctl_rspeed;
75
76 u32 msg_enable;
77 u32 phy_type;
78 struct mii_if_info mii;
79
80 /* work queue */
81 struct work_struct phy_configure;
82
83 int tx_throttle;
84 spinlock_t lock;
85
86 struct net_device *netdev;
87
88#ifdef SMC_USE_DMA
89 /* DMA needs the physical address of the chip */
90 u_long physaddr;
91 int rxdma;
92 int txdma;
93 int rxdma_active;
94 int txdma_active;
95 struct sk_buff *current_rx_skb;
96 struct sk_buff *current_tx_skb;
97 struct device *dev;
98#endif
99 void __iomem *base;
100#ifdef SMC_DYNAMIC_BUS_CONFIG
101 struct smc911x_platdata cfg;
102#endif
103};
47 104
48/* 105/*
49 * Define the bus width specific IO macros 106 * Define the bus width specific IO macros
50 */ 107 */
51 108
109#ifdef SMC_DYNAMIC_BUS_CONFIG
110static inline unsigned int SMC_inl(struct smc911x_local *lp, int reg)
111{
112 void __iomem *ioaddr = lp->base + reg;
113
114 if (lp->cfg.flags & SMC911X_USE_32BIT)
115 return readl(ioaddr);
116
117 if (lp->cfg.flags & SMC911X_USE_16BIT)
118 return readw(ioaddr) | (readw(ioaddr + 2) << 16);
119
120 BUG();
121}
122
123static inline void SMC_outl(unsigned int value, struct smc911x_local *lp,
124 int reg)
125{
126 void __iomem *ioaddr = lp->base + reg;
127
128 if (lp->cfg.flags & SMC911X_USE_32BIT) {
129 writel(value, ioaddr);
130 return;
131 }
132
133 if (lp->cfg.flags & SMC911X_USE_16BIT) {
134 writew(value & 0xffff, ioaddr);
135 writew(value >> 16, ioaddr + 2);
136 return;
137 }
138
139 BUG();
140}
141
142static inline void SMC_insl(struct smc911x_local *lp, int reg,
143 void *addr, unsigned int count)
144{
145 void __iomem *ioaddr = lp->base + reg;
146
147 if (lp->cfg.flags & SMC911X_USE_32BIT) {
148 readsl(ioaddr, addr, count);
149 return;
150 }
151
152 if (lp->cfg.flags & SMC911X_USE_16BIT) {
153 readsw(ioaddr, addr, count * 2);
154 return;
155 }
156
157 BUG();
158}
159
160static inline void SMC_outsl(struct smc911x_local *lp, int reg,
161 void *addr, unsigned int count)
162{
163 void __iomem *ioaddr = lp->base + reg;
164
165 if (lp->cfg.flags & SMC911X_USE_32BIT) {
166 writesl(ioaddr, addr, count);
167 return;
168 }
169
170 if (lp->cfg.flags & SMC911X_USE_16BIT) {
171 writesw(ioaddr, addr, count * 2);
172 return;
173 }
174
175 BUG();
176}
177#else
52#if SMC_USE_16BIT 178#if SMC_USE_16BIT
53#define SMC_inb(a, r) readb((a) + (r)) 179#define SMC_inl(lp, r) ((readw((lp)->base + (r)) & 0xFFFF) + (readw((lp)->base + (r) + 2) << 16))
54#define SMC_inw(a, r) readw((a) + (r)) 180#define SMC_outl(v, lp, r) \
55#define SMC_inl(a, r) ((SMC_inw(a, r) & 0xFFFF)+(SMC_inw(a+2, r)<<16))
56#define SMC_outb(v, a, r) writeb(v, (a) + (r))
57#define SMC_outw(v, a, r) writew(v, (a) + (r))
58#define SMC_outl(v, a, r) \
59 do{ \ 181 do{ \
60 writel(v & 0xFFFF, (a) + (r)); \ 182 writew(v & 0xFFFF, (lp)->base + (r)); \
61 writel(v >> 16, (a) + (r) + 2); \ 183 writew(v >> 16, (lp)->base + (r) + 2); \
62 } while (0) 184 } while (0)
63#define SMC_insl(a, r, p, l) readsw((short*)((a) + (r)), p, l*2) 185#define SMC_insl(lp, r, p, l) readsw((short*)((lp)->base + (r)), p, l*2)
64#define SMC_outsl(a, r, p, l) writesw((short*)((a) + (r)), p, l*2) 186#define SMC_outsl(lp, r, p, l) writesw((short*)((lp)->base + (r)), p, l*2)
65 187
66#elif SMC_USE_32BIT 188#elif SMC_USE_32BIT
67#define SMC_inb(a, r) readb((a) + (r)) 189#define SMC_inl(lp, r) readl((lp)->base + (r))
68#define SMC_inw(a, r) readw((a) + (r)) 190#define SMC_outl(v, lp, r) writel(v, (lp)->base + (r))
69#define SMC_inl(a, r) readl((a) + (r)) 191#define SMC_insl(lp, r, p, l) readsl((int*)((lp)->base + (r)), p, l)
70#define SMC_outb(v, a, r) writeb(v, (a) + (r)) 192#define SMC_outsl(lp, r, p, l) writesl((int*)((lp)->base + (r)), p, l)
71#define SMC_outl(v, a, r) writel(v, (a) + (r))
72#define SMC_insl(a, r, p, l) readsl((int*)((a) + (r)), p, l)
73#define SMC_outsl(a, r, p, l) writesl((int*)((a) + (r)), p, l)
74 193
75#endif /* SMC_USE_16BIT */ 194#endif /* SMC_USE_16BIT */
76 195#endif /* SMC_DYNAMIC_BUS_CONFIG */
77 196
78 197
79#ifdef SMC_USE_PXA_DMA 198#ifdef SMC_USE_PXA_DMA
@@ -110,22 +229,22 @@ static int rx_dmalen, tx_dmalen;
110 229
111#ifdef SMC_insl 230#ifdef SMC_insl
112#undef SMC_insl 231#undef SMC_insl
113#define SMC_insl(a, r, p, l) \ 232#define SMC_insl(lp, r, p, l) \
114 smc_pxa_dma_insl(lp->dev, a, lp->physaddr, r, lp->rxdma, p, l) 233 smc_pxa_dma_insl(lp, lp->physaddr, r, lp->rxdma, p, l)
115 234
116static inline void 235static inline void
117smc_pxa_dma_insl(struct device *dev, u_long ioaddr, u_long physaddr, 236smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr,
118 int reg, int dma, u_char *buf, int len) 237 int reg, int dma, u_char *buf, int len)
119{ 238{
120 /* 64 bit alignment is required for memory to memory DMA */ 239 /* 64 bit alignment is required for memory to memory DMA */
121 if ((long)buf & 4) { 240 if ((long)buf & 4) {
122 *((u32 *)buf) = SMC_inl(ioaddr, reg); 241 *((u32 *)buf) = SMC_inl(lp, reg);
123 buf += 4; 242 buf += 4;
124 len--; 243 len--;
125 } 244 }
126 245
127 len *= 4; 246 len *= 4;
128 rx_dmabuf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE); 247 rx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_FROM_DEVICE);
129 rx_dmalen = len; 248 rx_dmalen = len;
130 DCSR(dma) = DCSR_NODESC; 249 DCSR(dma) = DCSR_NODESC;
131 DTADR(dma) = rx_dmabuf; 250 DTADR(dma) = rx_dmabuf;
@@ -136,52 +255,24 @@ smc_pxa_dma_insl(struct device *dev, u_long ioaddr, u_long physaddr,
136} 255}
137#endif 256#endif
138 257
139#ifdef SMC_insw
140#undef SMC_insw
141#define SMC_insw(a, r, p, l) \
142 smc_pxa_dma_insw(lp->dev, a, lp->physaddr, r, lp->rxdma, p, l)
143
144static inline void
145smc_pxa_dma_insw(struct device *dev, u_long ioaddr, u_long physaddr,
146 int reg, int dma, u_char *buf, int len)
147{
148 /* 64 bit alignment is required for memory to memory DMA */
149 while ((long)buf & 6) {
150 *((u16 *)buf) = SMC_inw(ioaddr, reg);
151 buf += 2;
152 len--;
153 }
154
155 len *= 2;
156 rx_dmabuf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
157 rx_dmalen = len;
158 DCSR(dma) = DCSR_NODESC;
159 DTADR(dma) = rx_dmabuf;
160 DSADR(dma) = physaddr + reg;
161 DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
162 DCMD_WIDTH2 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen));
163 DCSR(dma) = DCSR_NODESC | DCSR_RUN;
164}
165#endif
166
167#ifdef SMC_outsl 258#ifdef SMC_outsl
168#undef SMC_outsl 259#undef SMC_outsl
169#define SMC_outsl(a, r, p, l) \ 260#define SMC_outsl(lp, r, p, l) \
170 smc_pxa_dma_outsl(lp->dev, a, lp->physaddr, r, lp->txdma, p, l) 261 smc_pxa_dma_outsl(lp, lp->physaddr, r, lp->txdma, p, l)
171 262
172static inline void 263static inline void
173smc_pxa_dma_outsl(struct device *dev, u_long ioaddr, u_long physaddr, 264smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr,
174 int reg, int dma, u_char *buf, int len) 265 int reg, int dma, u_char *buf, int len)
175{ 266{
176 /* 64 bit alignment is required for memory to memory DMA */ 267 /* 64 bit alignment is required for memory to memory DMA */
177 if ((long)buf & 4) { 268 if ((long)buf & 4) {
178 SMC_outl(*((u32 *)buf), ioaddr, reg); 269 SMC_outl(*((u32 *)buf), lp, reg);
179 buf += 4; 270 buf += 4;
180 len--; 271 len--;
181 } 272 }
182 273
183 len *= 4; 274 len *= 4;
184 tx_dmabuf = dma_map_single(dev, buf, len, DMA_TO_DEVICE); 275 tx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_TO_DEVICE);
185 tx_dmalen = len; 276 tx_dmalen = len;
186 DCSR(dma) = DCSR_NODESC; 277 DCSR(dma) = DCSR_NODESC;
187 DSADR(dma) = tx_dmabuf; 278 DSADR(dma) = tx_dmabuf;
@@ -191,35 +282,6 @@ smc_pxa_dma_outsl(struct device *dev, u_long ioaddr, u_long physaddr,
191 DCSR(dma) = DCSR_NODESC | DCSR_RUN; 282 DCSR(dma) = DCSR_NODESC | DCSR_RUN;
192} 283}
193#endif 284#endif
194
195#ifdef SMC_outsw
196#undef SMC_outsw
197#define SMC_outsw(a, r, p, l) \
198 smc_pxa_dma_outsw(lp->dev, a, lp->physaddr, r, lp->txdma, p, l)
199
200static inline void
201smc_pxa_dma_outsw(struct device *dev, u_long ioaddr, u_long physaddr,
202 int reg, int dma, u_char *buf, int len)
203{
204 /* 64 bit alignment is required for memory to memory DMA */
205 while ((long)buf & 6) {
206 SMC_outw(*((u16 *)buf), ioaddr, reg);
207 buf += 2;
208 len--;
209 }
210
211 len *= 2;
212 tx_dmabuf = dma_map_single(dev, buf, len, DMA_TO_DEVICE);
213 tx_dmalen = len;
214 DCSR(dma) = DCSR_NODESC;
215 DSADR(dma) = tx_dmabuf;
216 DTADR(dma) = physaddr + reg;
217 DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 |
218 DCMD_WIDTH2 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen));
219 DCSR(dma) = DCSR_NODESC | DCSR_RUN;
220}
221#endif
222
223#endif /* SMC_USE_PXA_DMA */ 285#endif /* SMC_USE_PXA_DMA */
224 286
225 287
@@ -629,213 +691,213 @@ static const struct chip_id chip_ids[] = {
629 * capabilities. Please use those and not the in/out primitives. 691 * capabilities. Please use those and not the in/out primitives.
630 */ 692 */
631/* FIFO read/write macros */ 693/* FIFO read/write macros */
632#define SMC_PUSH_DATA(p, l) SMC_outsl( ioaddr, TX_DATA_FIFO, p, (l) >> 2 ) 694#define SMC_PUSH_DATA(lp, p, l) SMC_outsl( lp, TX_DATA_FIFO, p, (l) >> 2 )
633#define SMC_PULL_DATA(p, l) SMC_insl ( ioaddr, RX_DATA_FIFO, p, (l) >> 2 ) 695#define SMC_PULL_DATA(lp, p, l) SMC_insl ( lp, RX_DATA_FIFO, p, (l) >> 2 )
634#define SMC_SET_TX_FIFO(x) SMC_outl( x, ioaddr, TX_DATA_FIFO ) 696#define SMC_SET_TX_FIFO(lp, x) SMC_outl( x, lp, TX_DATA_FIFO )
635#define SMC_GET_RX_FIFO() SMC_inl( ioaddr, RX_DATA_FIFO ) 697#define SMC_GET_RX_FIFO(lp) SMC_inl( lp, RX_DATA_FIFO )
636 698
637 699
638/* I/O mapped register read/write macros */ 700/* I/O mapped register read/write macros */
639#define SMC_GET_TX_STS_FIFO() SMC_inl( ioaddr, TX_STATUS_FIFO ) 701#define SMC_GET_TX_STS_FIFO(lp) SMC_inl( lp, TX_STATUS_FIFO )
640#define SMC_GET_RX_STS_FIFO() SMC_inl( ioaddr, RX_STATUS_FIFO ) 702#define SMC_GET_RX_STS_FIFO(lp) SMC_inl( lp, RX_STATUS_FIFO )
641#define SMC_GET_RX_STS_FIFO_PEEK() SMC_inl( ioaddr, RX_STATUS_FIFO_PEEK ) 703#define SMC_GET_RX_STS_FIFO_PEEK(lp) SMC_inl( lp, RX_STATUS_FIFO_PEEK )
642#define SMC_GET_PN() (SMC_inl( ioaddr, ID_REV ) >> 16) 704#define SMC_GET_PN(lp) (SMC_inl( lp, ID_REV ) >> 16)
643#define SMC_GET_REV() (SMC_inl( ioaddr, ID_REV ) & 0xFFFF) 705#define SMC_GET_REV(lp) (SMC_inl( lp, ID_REV ) & 0xFFFF)
644#define SMC_GET_IRQ_CFG() SMC_inl( ioaddr, INT_CFG ) 706#define SMC_GET_IRQ_CFG(lp) SMC_inl( lp, INT_CFG )
645#define SMC_SET_IRQ_CFG(x) SMC_outl( x, ioaddr, INT_CFG ) 707#define SMC_SET_IRQ_CFG(lp, x) SMC_outl( x, lp, INT_CFG )
646#define SMC_GET_INT() SMC_inl( ioaddr, INT_STS ) 708#define SMC_GET_INT(lp) SMC_inl( lp, INT_STS )
647#define SMC_ACK_INT(x) SMC_outl( x, ioaddr, INT_STS ) 709#define SMC_ACK_INT(lp, x) SMC_outl( x, lp, INT_STS )
648#define SMC_GET_INT_EN() SMC_inl( ioaddr, INT_EN ) 710#define SMC_GET_INT_EN(lp) SMC_inl( lp, INT_EN )
649#define SMC_SET_INT_EN(x) SMC_outl( x, ioaddr, INT_EN ) 711#define SMC_SET_INT_EN(lp, x) SMC_outl( x, lp, INT_EN )
650#define SMC_GET_BYTE_TEST() SMC_inl( ioaddr, BYTE_TEST ) 712#define SMC_GET_BYTE_TEST(lp) SMC_inl( lp, BYTE_TEST )
651#define SMC_SET_BYTE_TEST(x) SMC_outl( x, ioaddr, BYTE_TEST ) 713#define SMC_SET_BYTE_TEST(lp, x) SMC_outl( x, lp, BYTE_TEST )
652#define SMC_GET_FIFO_INT() SMC_inl( ioaddr, FIFO_INT ) 714#define SMC_GET_FIFO_INT(lp) SMC_inl( lp, FIFO_INT )
653#define SMC_SET_FIFO_INT(x) SMC_outl( x, ioaddr, FIFO_INT ) 715#define SMC_SET_FIFO_INT(lp, x) SMC_outl( x, lp, FIFO_INT )
654#define SMC_SET_FIFO_TDA(x) \ 716#define SMC_SET_FIFO_TDA(lp, x) \
655 do { \ 717 do { \
656 unsigned long __flags; \ 718 unsigned long __flags; \
657 int __mask; \ 719 int __mask; \
658 local_irq_save(__flags); \ 720 local_irq_save(__flags); \
659 __mask = SMC_GET_FIFO_INT() & ~(0xFF<<24); \ 721 __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<24); \
660 SMC_SET_FIFO_INT( __mask | (x)<<24 ); \ 722 SMC_SET_FIFO_INT( (lp), __mask | (x)<<24 ); \
661 local_irq_restore(__flags); \ 723 local_irq_restore(__flags); \
662 } while (0) 724 } while (0)
663#define SMC_SET_FIFO_TSL(x) \ 725#define SMC_SET_FIFO_TSL(lp, x) \
664 do { \ 726 do { \
665 unsigned long __flags; \ 727 unsigned long __flags; \
666 int __mask; \ 728 int __mask; \
667 local_irq_save(__flags); \ 729 local_irq_save(__flags); \
668 __mask = SMC_GET_FIFO_INT() & ~(0xFF<<16); \ 730 __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<16); \
669 SMC_SET_FIFO_INT( __mask | (((x) & 0xFF)<<16)); \ 731 SMC_SET_FIFO_INT( (lp), __mask | (((x) & 0xFF)<<16)); \
670 local_irq_restore(__flags); \ 732 local_irq_restore(__flags); \
671 } while (0) 733 } while (0)
672#define SMC_SET_FIFO_RSA(x) \ 734#define SMC_SET_FIFO_RSA(lp, x) \
673 do { \ 735 do { \
674 unsigned long __flags; \ 736 unsigned long __flags; \
675 int __mask; \ 737 int __mask; \
676 local_irq_save(__flags); \ 738 local_irq_save(__flags); \
677 __mask = SMC_GET_FIFO_INT() & ~(0xFF<<8); \ 739 __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<8); \
678 SMC_SET_FIFO_INT( __mask | (((x) & 0xFF)<<8)); \ 740 SMC_SET_FIFO_INT( (lp), __mask | (((x) & 0xFF)<<8)); \
679 local_irq_restore(__flags); \ 741 local_irq_restore(__flags); \
680 } while (0) 742 } while (0)
681#define SMC_SET_FIFO_RSL(x) \ 743#define SMC_SET_FIFO_RSL(lp, x) \
682 do { \ 744 do { \
683 unsigned long __flags; \ 745 unsigned long __flags; \
684 int __mask; \ 746 int __mask; \
685 local_irq_save(__flags); \ 747 local_irq_save(__flags); \
686 __mask = SMC_GET_FIFO_INT() & ~0xFF; \ 748 __mask = SMC_GET_FIFO_INT((lp)) & ~0xFF; \
687 SMC_SET_FIFO_INT( __mask | ((x) & 0xFF)); \ 749 SMC_SET_FIFO_INT( (lp),__mask | ((x) & 0xFF)); \
688 local_irq_restore(__flags); \ 750 local_irq_restore(__flags); \
689 } while (0) 751 } while (0)
690#define SMC_GET_RX_CFG() SMC_inl( ioaddr, RX_CFG ) 752#define SMC_GET_RX_CFG(lp) SMC_inl( lp, RX_CFG )
691#define SMC_SET_RX_CFG(x) SMC_outl( x, ioaddr, RX_CFG ) 753#define SMC_SET_RX_CFG(lp, x) SMC_outl( x, lp, RX_CFG )
692#define SMC_GET_TX_CFG() SMC_inl( ioaddr, TX_CFG ) 754#define SMC_GET_TX_CFG(lp) SMC_inl( lp, TX_CFG )
693#define SMC_SET_TX_CFG(x) SMC_outl( x, ioaddr, TX_CFG ) 755#define SMC_SET_TX_CFG(lp, x) SMC_outl( x, lp, TX_CFG )
694#define SMC_GET_HW_CFG() SMC_inl( ioaddr, HW_CFG ) 756#define SMC_GET_HW_CFG(lp) SMC_inl( lp, HW_CFG )
695#define SMC_SET_HW_CFG(x) SMC_outl( x, ioaddr, HW_CFG ) 757#define SMC_SET_HW_CFG(lp, x) SMC_outl( x, lp, HW_CFG )
696#define SMC_GET_RX_DP_CTRL() SMC_inl( ioaddr, RX_DP_CTRL ) 758#define SMC_GET_RX_DP_CTRL(lp) SMC_inl( lp, RX_DP_CTRL )
697#define SMC_SET_RX_DP_CTRL(x) SMC_outl( x, ioaddr, RX_DP_CTRL ) 759#define SMC_SET_RX_DP_CTRL(lp, x) SMC_outl( x, lp, RX_DP_CTRL )
698#define SMC_GET_PMT_CTRL() SMC_inl( ioaddr, PMT_CTRL ) 760#define SMC_GET_PMT_CTRL(lp) SMC_inl( lp, PMT_CTRL )
699#define SMC_SET_PMT_CTRL(x) SMC_outl( x, ioaddr, PMT_CTRL ) 761#define SMC_SET_PMT_CTRL(lp, x) SMC_outl( x, lp, PMT_CTRL )
700#define SMC_GET_GPIO_CFG() SMC_inl( ioaddr, GPIO_CFG ) 762#define SMC_GET_GPIO_CFG(lp) SMC_inl( lp, GPIO_CFG )
701#define SMC_SET_GPIO_CFG(x) SMC_outl( x, ioaddr, GPIO_CFG ) 763#define SMC_SET_GPIO_CFG(lp, x) SMC_outl( x, lp, GPIO_CFG )
702#define SMC_GET_RX_FIFO_INF() SMC_inl( ioaddr, RX_FIFO_INF ) 764#define SMC_GET_RX_FIFO_INF(lp) SMC_inl( lp, RX_FIFO_INF )
703#define SMC_SET_RX_FIFO_INF(x) SMC_outl( x, ioaddr, RX_FIFO_INF ) 765#define SMC_SET_RX_FIFO_INF(lp, x) SMC_outl( x, lp, RX_FIFO_INF )
704#define SMC_GET_TX_FIFO_INF() SMC_inl( ioaddr, TX_FIFO_INF ) 766#define SMC_GET_TX_FIFO_INF(lp) SMC_inl( lp, TX_FIFO_INF )
705#define SMC_SET_TX_FIFO_INF(x) SMC_outl( x, ioaddr, TX_FIFO_INF ) 767#define SMC_SET_TX_FIFO_INF(lp, x) SMC_outl( x, lp, TX_FIFO_INF )
706#define SMC_GET_GPT_CFG() SMC_inl( ioaddr, GPT_CFG ) 768#define SMC_GET_GPT_CFG(lp) SMC_inl( lp, GPT_CFG )
707#define SMC_SET_GPT_CFG(x) SMC_outl( x, ioaddr, GPT_CFG ) 769#define SMC_SET_GPT_CFG(lp, x) SMC_outl( x, lp, GPT_CFG )
708#define SMC_GET_RX_DROP() SMC_inl( ioaddr, RX_DROP ) 770#define SMC_GET_RX_DROP(lp) SMC_inl( lp, RX_DROP )
709#define SMC_SET_RX_DROP(x) SMC_outl( x, ioaddr, RX_DROP ) 771#define SMC_SET_RX_DROP(lp, x) SMC_outl( x, lp, RX_DROP )
710#define SMC_GET_MAC_CMD() SMC_inl( ioaddr, MAC_CSR_CMD ) 772#define SMC_GET_MAC_CMD(lp) SMC_inl( lp, MAC_CSR_CMD )
711#define SMC_SET_MAC_CMD(x) SMC_outl( x, ioaddr, MAC_CSR_CMD ) 773#define SMC_SET_MAC_CMD(lp, x) SMC_outl( x, lp, MAC_CSR_CMD )
712#define SMC_GET_MAC_DATA() SMC_inl( ioaddr, MAC_CSR_DATA ) 774#define SMC_GET_MAC_DATA(lp) SMC_inl( lp, MAC_CSR_DATA )
713#define SMC_SET_MAC_DATA(x) SMC_outl( x, ioaddr, MAC_CSR_DATA ) 775#define SMC_SET_MAC_DATA(lp, x) SMC_outl( x, lp, MAC_CSR_DATA )
714#define SMC_GET_AFC_CFG() SMC_inl( ioaddr, AFC_CFG ) 776#define SMC_GET_AFC_CFG(lp) SMC_inl( lp, AFC_CFG )
715#define SMC_SET_AFC_CFG(x) SMC_outl( x, ioaddr, AFC_CFG ) 777#define SMC_SET_AFC_CFG(lp, x) SMC_outl( x, lp, AFC_CFG )
716#define SMC_GET_E2P_CMD() SMC_inl( ioaddr, E2P_CMD ) 778#define SMC_GET_E2P_CMD(lp) SMC_inl( lp, E2P_CMD )
717#define SMC_SET_E2P_CMD(x) SMC_outl( x, ioaddr, E2P_CMD ) 779#define SMC_SET_E2P_CMD(lp, x) SMC_outl( x, lp, E2P_CMD )
718#define SMC_GET_E2P_DATA() SMC_inl( ioaddr, E2P_DATA ) 780#define SMC_GET_E2P_DATA(lp) SMC_inl( lp, E2P_DATA )
719#define SMC_SET_E2P_DATA(x) SMC_outl( x, ioaddr, E2P_DATA ) 781#define SMC_SET_E2P_DATA(lp, x) SMC_outl( x, lp, E2P_DATA )
720 782
721/* MAC register read/write macros */ 783/* MAC register read/write macros */
722#define SMC_GET_MAC_CSR(a,v) \ 784#define SMC_GET_MAC_CSR(lp,a,v) \
723 do { \ 785 do { \
724 while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \ 786 while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
725 SMC_SET_MAC_CMD(MAC_CSR_CMD_CSR_BUSY_ | \ 787 SMC_SET_MAC_CMD((lp),MAC_CSR_CMD_CSR_BUSY_ | \
726 MAC_CSR_CMD_R_NOT_W_ | (a) ); \ 788 MAC_CSR_CMD_R_NOT_W_ | (a) ); \
727 while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \ 789 while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
728 v = SMC_GET_MAC_DATA(); \ 790 v = SMC_GET_MAC_DATA((lp)); \
729 } while (0) 791 } while (0)
730#define SMC_SET_MAC_CSR(a,v) \ 792#define SMC_SET_MAC_CSR(lp,a,v) \
731 do { \ 793 do { \
732 while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \ 794 while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
733 SMC_SET_MAC_DATA(v); \ 795 SMC_SET_MAC_DATA((lp), v); \
734 SMC_SET_MAC_CMD(MAC_CSR_CMD_CSR_BUSY_ | (a) ); \ 796 SMC_SET_MAC_CMD((lp), MAC_CSR_CMD_CSR_BUSY_ | (a) ); \
735 while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \ 797 while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
736 } while (0) 798 } while (0)
737#define SMC_GET_MAC_CR(x) SMC_GET_MAC_CSR( MAC_CR, x ) 799#define SMC_GET_MAC_CR(lp, x) SMC_GET_MAC_CSR( (lp), MAC_CR, x )
738#define SMC_SET_MAC_CR(x) SMC_SET_MAC_CSR( MAC_CR, x ) 800#define SMC_SET_MAC_CR(lp, x) SMC_SET_MAC_CSR( (lp), MAC_CR, x )
739#define SMC_GET_ADDRH(x) SMC_GET_MAC_CSR( ADDRH, x ) 801#define SMC_GET_ADDRH(lp, x) SMC_GET_MAC_CSR( (lp), ADDRH, x )
740#define SMC_SET_ADDRH(x) SMC_SET_MAC_CSR( ADDRH, x ) 802#define SMC_SET_ADDRH(lp, x) SMC_SET_MAC_CSR( (lp), ADDRH, x )
741#define SMC_GET_ADDRL(x) SMC_GET_MAC_CSR( ADDRL, x ) 803#define SMC_GET_ADDRL(lp, x) SMC_GET_MAC_CSR( (lp), ADDRL, x )
742#define SMC_SET_ADDRL(x) SMC_SET_MAC_CSR( ADDRL, x ) 804#define SMC_SET_ADDRL(lp, x) SMC_SET_MAC_CSR( (lp), ADDRL, x )
743#define SMC_GET_HASHH(x) SMC_GET_MAC_CSR( HASHH, x ) 805#define SMC_GET_HASHH(lp, x) SMC_GET_MAC_CSR( (lp), HASHH, x )
744#define SMC_SET_HASHH(x) SMC_SET_MAC_CSR( HASHH, x ) 806#define SMC_SET_HASHH(lp, x) SMC_SET_MAC_CSR( (lp), HASHH, x )
745#define SMC_GET_HASHL(x) SMC_GET_MAC_CSR( HASHL, x ) 807#define SMC_GET_HASHL(lp, x) SMC_GET_MAC_CSR( (lp), HASHL, x )
746#define SMC_SET_HASHL(x) SMC_SET_MAC_CSR( HASHL, x ) 808#define SMC_SET_HASHL(lp, x) SMC_SET_MAC_CSR( (lp), HASHL, x )
747#define SMC_GET_MII_ACC(x) SMC_GET_MAC_CSR( MII_ACC, x ) 809#define SMC_GET_MII_ACC(lp, x) SMC_GET_MAC_CSR( (lp), MII_ACC, x )
748#define SMC_SET_MII_ACC(x) SMC_SET_MAC_CSR( MII_ACC, x ) 810#define SMC_SET_MII_ACC(lp, x) SMC_SET_MAC_CSR( (lp), MII_ACC, x )
749#define SMC_GET_MII_DATA(x) SMC_GET_MAC_CSR( MII_DATA, x ) 811#define SMC_GET_MII_DATA(lp, x) SMC_GET_MAC_CSR( (lp), MII_DATA, x )
750#define SMC_SET_MII_DATA(x) SMC_SET_MAC_CSR( MII_DATA, x ) 812#define SMC_SET_MII_DATA(lp, x) SMC_SET_MAC_CSR( (lp), MII_DATA, x )
751#define SMC_GET_FLOW(x) SMC_GET_MAC_CSR( FLOW, x ) 813#define SMC_GET_FLOW(lp, x) SMC_GET_MAC_CSR( (lp), FLOW, x )
752#define SMC_SET_FLOW(x) SMC_SET_MAC_CSR( FLOW, x ) 814#define SMC_SET_FLOW(lp, x) SMC_SET_MAC_CSR( (lp), FLOW, x )
753#define SMC_GET_VLAN1(x) SMC_GET_MAC_CSR( VLAN1, x ) 815#define SMC_GET_VLAN1(lp, x) SMC_GET_MAC_CSR( (lp), VLAN1, x )
754#define SMC_SET_VLAN1(x) SMC_SET_MAC_CSR( VLAN1, x ) 816#define SMC_SET_VLAN1(lp, x) SMC_SET_MAC_CSR( (lp), VLAN1, x )
755#define SMC_GET_VLAN2(x) SMC_GET_MAC_CSR( VLAN2, x ) 817#define SMC_GET_VLAN2(lp, x) SMC_GET_MAC_CSR( (lp), VLAN2, x )
756#define SMC_SET_VLAN2(x) SMC_SET_MAC_CSR( VLAN2, x ) 818#define SMC_SET_VLAN2(lp, x) SMC_SET_MAC_CSR( (lp), VLAN2, x )
757#define SMC_SET_WUFF(x) SMC_SET_MAC_CSR( WUFF, x ) 819#define SMC_SET_WUFF(lp, x) SMC_SET_MAC_CSR( (lp), WUFF, x )
758#define SMC_GET_WUCSR(x) SMC_GET_MAC_CSR( WUCSR, x ) 820#define SMC_GET_WUCSR(lp, x) SMC_GET_MAC_CSR( (lp), WUCSR, x )
759#define SMC_SET_WUCSR(x) SMC_SET_MAC_CSR( WUCSR, x ) 821#define SMC_SET_WUCSR(lp, x) SMC_SET_MAC_CSR( (lp), WUCSR, x )
760 822
761/* PHY register read/write macros */ 823/* PHY register read/write macros */
762#define SMC_GET_MII(a,phy,v) \ 824#define SMC_GET_MII(lp,a,phy,v) \
763 do { \ 825 do { \
764 u32 __v; \ 826 u32 __v; \
765 do { \ 827 do { \
766 SMC_GET_MII_ACC(__v); \ 828 SMC_GET_MII_ACC((lp), __v); \
767 } while ( __v & MII_ACC_MII_BUSY_ ); \ 829 } while ( __v & MII_ACC_MII_BUSY_ ); \
768 SMC_SET_MII_ACC( ((phy)<<11) | ((a)<<6) | \ 830 SMC_SET_MII_ACC( (lp), ((phy)<<11) | ((a)<<6) | \
769 MII_ACC_MII_BUSY_); \ 831 MII_ACC_MII_BUSY_); \
770 do { \ 832 do { \
771 SMC_GET_MII_ACC(__v); \ 833 SMC_GET_MII_ACC( (lp), __v); \
772 } while ( __v & MII_ACC_MII_BUSY_ ); \ 834 } while ( __v & MII_ACC_MII_BUSY_ ); \
773 SMC_GET_MII_DATA(v); \ 835 SMC_GET_MII_DATA((lp), v); \
774 } while (0) 836 } while (0)
775#define SMC_SET_MII(a,phy,v) \ 837#define SMC_SET_MII(lp,a,phy,v) \
776 do { \ 838 do { \
777 u32 __v; \ 839 u32 __v; \
778 do { \ 840 do { \
779 SMC_GET_MII_ACC(__v); \ 841 SMC_GET_MII_ACC((lp), __v); \
780 } while ( __v & MII_ACC_MII_BUSY_ ); \ 842 } while ( __v & MII_ACC_MII_BUSY_ ); \
781 SMC_SET_MII_DATA(v); \ 843 SMC_SET_MII_DATA((lp), v); \
782 SMC_SET_MII_ACC( ((phy)<<11) | ((a)<<6) | \ 844 SMC_SET_MII_ACC( (lp), ((phy)<<11) | ((a)<<6) | \
783 MII_ACC_MII_BUSY_ | \ 845 MII_ACC_MII_BUSY_ | \
784 MII_ACC_MII_WRITE_ ); \ 846 MII_ACC_MII_WRITE_ ); \
785 do { \ 847 do { \
786 SMC_GET_MII_ACC(__v); \ 848 SMC_GET_MII_ACC((lp), __v); \
787 } while ( __v & MII_ACC_MII_BUSY_ ); \ 849 } while ( __v & MII_ACC_MII_BUSY_ ); \
788 } while (0) 850 } while (0)
789#define SMC_GET_PHY_BMCR(phy,x) SMC_GET_MII( MII_BMCR, phy, x ) 851#define SMC_GET_PHY_BMCR(lp,phy,x) SMC_GET_MII( (lp), MII_BMCR, phy, x )
790#define SMC_SET_PHY_BMCR(phy,x) SMC_SET_MII( MII_BMCR, phy, x ) 852#define SMC_SET_PHY_BMCR(lp,phy,x) SMC_SET_MII( (lp), MII_BMCR, phy, x )
791#define SMC_GET_PHY_BMSR(phy,x) SMC_GET_MII( MII_BMSR, phy, x ) 853#define SMC_GET_PHY_BMSR(lp,phy,x) SMC_GET_MII( (lp), MII_BMSR, phy, x )
792#define SMC_GET_PHY_ID1(phy,x) SMC_GET_MII( MII_PHYSID1, phy, x ) 854#define SMC_GET_PHY_ID1(lp,phy,x) SMC_GET_MII( (lp), MII_PHYSID1, phy, x )
793#define SMC_GET_PHY_ID2(phy,x) SMC_GET_MII( MII_PHYSID2, phy, x ) 855#define SMC_GET_PHY_ID2(lp,phy,x) SMC_GET_MII( (lp), MII_PHYSID2, phy, x )
794#define SMC_GET_PHY_MII_ADV(phy,x) SMC_GET_MII( MII_ADVERTISE, phy, x ) 856#define SMC_GET_PHY_MII_ADV(lp,phy,x) SMC_GET_MII( (lp), MII_ADVERTISE, phy, x )
795#define SMC_SET_PHY_MII_ADV(phy,x) SMC_SET_MII( MII_ADVERTISE, phy, x ) 857#define SMC_SET_PHY_MII_ADV(lp,phy,x) SMC_SET_MII( (lp), MII_ADVERTISE, phy, x )
796#define SMC_GET_PHY_MII_LPA(phy,x) SMC_GET_MII( MII_LPA, phy, x ) 858#define SMC_GET_PHY_MII_LPA(lp,phy,x) SMC_GET_MII( (lp), MII_LPA, phy, x )
797#define SMC_SET_PHY_MII_LPA(phy,x) SMC_SET_MII( MII_LPA, phy, x ) 859#define SMC_SET_PHY_MII_LPA(lp,phy,x) SMC_SET_MII( (lp), MII_LPA, phy, x )
798#define SMC_GET_PHY_CTRL_STS(phy,x) SMC_GET_MII( PHY_MODE_CTRL_STS, phy, x ) 860#define SMC_GET_PHY_CTRL_STS(lp,phy,x) SMC_GET_MII( (lp), PHY_MODE_CTRL_STS, phy, x )
799#define SMC_SET_PHY_CTRL_STS(phy,x) SMC_SET_MII( PHY_MODE_CTRL_STS, phy, x ) 861#define SMC_SET_PHY_CTRL_STS(lp,phy,x) SMC_SET_MII( (lp), PHY_MODE_CTRL_STS, phy, x )
800#define SMC_GET_PHY_INT_SRC(phy,x) SMC_GET_MII( PHY_INT_SRC, phy, x ) 862#define SMC_GET_PHY_INT_SRC(lp,phy,x) SMC_GET_MII( (lp), PHY_INT_SRC, phy, x )
801#define SMC_SET_PHY_INT_SRC(phy,x) SMC_SET_MII( PHY_INT_SRC, phy, x ) 863#define SMC_SET_PHY_INT_SRC(lp,phy,x) SMC_SET_MII( (lp), PHY_INT_SRC, phy, x )
802#define SMC_GET_PHY_INT_MASK(phy,x) SMC_GET_MII( PHY_INT_MASK, phy, x ) 864#define SMC_GET_PHY_INT_MASK(lp,phy,x) SMC_GET_MII( (lp), PHY_INT_MASK, phy, x )
803#define SMC_SET_PHY_INT_MASK(phy,x) SMC_SET_MII( PHY_INT_MASK, phy, x ) 865#define SMC_SET_PHY_INT_MASK(lp,phy,x) SMC_SET_MII( (lp), PHY_INT_MASK, phy, x )
804#define SMC_GET_PHY_SPECIAL(phy,x) SMC_GET_MII( PHY_SPECIAL, phy, x ) 866#define SMC_GET_PHY_SPECIAL(lp,phy,x) SMC_GET_MII( (lp), PHY_SPECIAL, phy, x )
805 867
806 868
807 869
808/* Misc read/write macros */ 870/* Misc read/write macros */
809 871
810#ifndef SMC_GET_MAC_ADDR 872#ifndef SMC_GET_MAC_ADDR
811#define SMC_GET_MAC_ADDR(addr) \ 873#define SMC_GET_MAC_ADDR(lp, addr) \
812 do { \ 874 do { \
813 unsigned int __v; \ 875 unsigned int __v; \
814 \ 876 \
815 SMC_GET_MAC_CSR(ADDRL, __v); \ 877 SMC_GET_MAC_CSR((lp), ADDRL, __v); \
816 addr[0] = __v; addr[1] = __v >> 8; \ 878 addr[0] = __v; addr[1] = __v >> 8; \
817 addr[2] = __v >> 16; addr[3] = __v >> 24; \ 879 addr[2] = __v >> 16; addr[3] = __v >> 24; \
818 SMC_GET_MAC_CSR(ADDRH, __v); \ 880 SMC_GET_MAC_CSR((lp), ADDRH, __v); \
819 addr[4] = __v; addr[5] = __v >> 8; \ 881 addr[4] = __v; addr[5] = __v >> 8; \
820 } while (0) 882 } while (0)
821#endif 883#endif
822 884
823#define SMC_SET_MAC_ADDR(addr) \ 885#define SMC_SET_MAC_ADDR(lp, addr) \
824 do { \ 886 do { \
825 SMC_SET_MAC_CSR(ADDRL, \ 887 SMC_SET_MAC_CSR((lp), ADDRL, \
826 addr[0] | \ 888 addr[0] | \
827 (addr[1] << 8) | \ 889 (addr[1] << 8) | \
828 (addr[2] << 16) | \ 890 (addr[2] << 16) | \
829 (addr[3] << 24)); \ 891 (addr[3] << 24)); \
830 SMC_SET_MAC_CSR(ADDRH, addr[4]|(addr[5] << 8));\ 892 SMC_SET_MAC_CSR((lp), ADDRH, addr[4]|(addr[5] << 8));\
831 } while (0) 893 } while (0)
832 894
833 895
834#define SMC_WRITE_EEPROM_CMD(cmd, addr) \ 896#define SMC_WRITE_EEPROM_CMD(lp, cmd, addr) \
835 do { \ 897 do { \
836 while (SMC_GET_E2P_CMD() & MAC_CSR_CMD_CSR_BUSY_); \ 898 while (SMC_GET_E2P_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
837 SMC_SET_MAC_CMD(MAC_CSR_CMD_R_NOT_W_ | a ); \ 899 SMC_SET_MAC_CMD((lp), MAC_CSR_CMD_R_NOT_W_ | a ); \
838 while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \ 900 while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
839 } while (0) 901 } while (0)
840 902
841#endif /* _SMC911X_H_ */ 903#endif /* _SMC911X_H_ */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 477671606273..00aa0b108cb9 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1704,7 +1704,7 @@ spider_net_poll_controller(struct net_device *netdev)
1704 * 1704 *
1705 * spider_net_enable_interrupt enables several interrupts 1705 * spider_net_enable_interrupt enables several interrupts
1706 */ 1706 */
1707static void 1707static void
1708spider_net_enable_interrupts(struct spider_net_card *card) 1708spider_net_enable_interrupts(struct spider_net_card *card)
1709{ 1709{
1710 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 1710 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
@@ -1721,7 +1721,7 @@ spider_net_enable_interrupts(struct spider_net_card *card)
1721 * 1721 *
1722 * spider_net_disable_interrupts disables all the interrupts 1722 * spider_net_disable_interrupts disables all the interrupts
1723 */ 1723 */
1724static void 1724static void
1725spider_net_disable_interrupts(struct spider_net_card *card) 1725spider_net_disable_interrupts(struct spider_net_card *card)
1726{ 1726{
1727 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); 1727 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 26ade68aeabf..4e994f87469e 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -915,15 +915,11 @@ static void build_fake_packet(struct lance_private *lp)
915 lp->tx_new = TX_NEXT(entry); 915 lp->tx_new = TX_NEXT(entry);
916} 916}
917 917
918struct net_device *last_dev;
919
920static int lance_open(struct net_device *dev) 918static int lance_open(struct net_device *dev)
921{ 919{
922 struct lance_private *lp = netdev_priv(dev); 920 struct lance_private *lp = netdev_priv(dev);
923 int status = 0; 921 int status = 0;
924 922
925 last_dev = dev;
926
927 STOP_LANCE(lp); 923 STOP_LANCE(lp);
928 924
929 if (request_irq(dev->irq, &lance_interrupt, IRQF_SHARED, 925 if (request_irq(dev->irq, &lance_interrupt, IRQF_SHARED,
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index cc4bde852542..633c128a6228 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -32,6 +32,8 @@
32#include <linux/skbuff.h> 32#include <linux/skbuff.h>
33#include <linux/ethtool.h> 33#include <linux/ethtool.h>
34#include <linux/mii.h> 34#include <linux/mii.h>
35#include <linux/phy.h>
36#include <linux/brcmphy.h>
35#include <linux/if_vlan.h> 37#include <linux/if_vlan.h>
36#include <linux/ip.h> 38#include <linux/ip.h>
37#include <linux/tcp.h> 39#include <linux/tcp.h>
@@ -64,8 +66,8 @@
64 66
65#define DRV_MODULE_NAME "tg3" 67#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": " 68#define PFX DRV_MODULE_NAME ": "
67#define DRV_MODULE_VERSION "3.92.1" 69#define DRV_MODULE_VERSION "3.93"
68#define DRV_MODULE_RELDATE "June 9, 2008" 70#define DRV_MODULE_RELDATE "May 22, 2008"
69 71
70#define TG3_DEF_MAC_MODE 0 72#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0 73#define TG3_DEF_RX_MODE 0
@@ -203,6 +205,7 @@ static struct pci_device_id tg3_pci_tbl[] = {
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 209 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 210 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -804,6 +807,569 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
804 return ret; 807 return ret;
805} 808}
806 809
810static int tg3_bmcr_reset(struct tg3 *tp)
811{
812 u32 phy_control;
813 int limit, err;
814
815 /* OK, reset it, and poll the BMCR_RESET bit until it
816 * clears or we time out.
817 */
818 phy_control = BMCR_RESET;
819 err = tg3_writephy(tp, MII_BMCR, phy_control);
820 if (err != 0)
821 return -EBUSY;
822
823 limit = 5000;
824 while (limit--) {
825 err = tg3_readphy(tp, MII_BMCR, &phy_control);
826 if (err != 0)
827 return -EBUSY;
828
829 if ((phy_control & BMCR_RESET) == 0) {
830 udelay(40);
831 break;
832 }
833 udelay(10);
834 }
835 if (limit <= 0)
836 return -EBUSY;
837
838 return 0;
839}
840
841static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
842{
843 struct tg3 *tp = (struct tg3 *)bp->priv;
844 u32 val;
845
846 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
847 return -EAGAIN;
848
849 if (tg3_readphy(tp, reg, &val))
850 return -EIO;
851
852 return val;
853}
854
855static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
856{
857 struct tg3 *tp = (struct tg3 *)bp->priv;
858
859 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
860 return -EAGAIN;
861
862 if (tg3_writephy(tp, reg, val))
863 return -EIO;
864
865 return 0;
866}
867
868static int tg3_mdio_reset(struct mii_bus *bp)
869{
870 return 0;
871}
872
873static void tg3_mdio_config(struct tg3 *tp)
874{
875 u32 val;
876
877 if (tp->mdio_bus.phy_map[PHY_ADDR]->interface !=
878 PHY_INTERFACE_MODE_RGMII)
879 return;
880
881 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
882 MAC_PHYCFG1_RGMII_SND_STAT_EN);
883 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
884 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
885 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
886 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
887 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
888 }
889 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
890
891 val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
892 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
893 val |= MAC_PHYCFG2_INBAND_ENABLE;
894 tw32(MAC_PHYCFG2, val);
895
896 val = tr32(MAC_EXT_RGMII_MODE);
897 val &= ~(MAC_RGMII_MODE_RX_INT_B |
898 MAC_RGMII_MODE_RX_QUALITY |
899 MAC_RGMII_MODE_RX_ACTIVITY |
900 MAC_RGMII_MODE_RX_ENG_DET |
901 MAC_RGMII_MODE_TX_ENABLE |
902 MAC_RGMII_MODE_TX_LOWPWR |
903 MAC_RGMII_MODE_TX_RESET);
904 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
905 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
906 val |= MAC_RGMII_MODE_RX_INT_B |
907 MAC_RGMII_MODE_RX_QUALITY |
908 MAC_RGMII_MODE_RX_ACTIVITY |
909 MAC_RGMII_MODE_RX_ENG_DET;
910 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
911 val |= MAC_RGMII_MODE_TX_ENABLE |
912 MAC_RGMII_MODE_TX_LOWPWR |
913 MAC_RGMII_MODE_TX_RESET;
914 }
915 tw32(MAC_EXT_RGMII_MODE, val);
916}
917
918static void tg3_mdio_start(struct tg3 *tp)
919{
920 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
921 mutex_lock(&tp->mdio_bus.mdio_lock);
922 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
923 mutex_unlock(&tp->mdio_bus.mdio_lock);
924 }
925
926 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
927 tw32_f(MAC_MI_MODE, tp->mi_mode);
928 udelay(80);
929
930 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
931 tg3_mdio_config(tp);
932}
933
934static void tg3_mdio_stop(struct tg3 *tp)
935{
936 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
937 mutex_lock(&tp->mdio_bus.mdio_lock);
938 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
939 mutex_unlock(&tp->mdio_bus.mdio_lock);
940 }
941}
942
943static int tg3_mdio_init(struct tg3 *tp)
944{
945 int i;
946 u32 reg;
947 struct phy_device *phydev;
948 struct mii_bus *mdio_bus = &tp->mdio_bus;
949
950 tg3_mdio_start(tp);
951
952 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
953 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
954 return 0;
955
956 memset(mdio_bus, 0, sizeof(*mdio_bus));
957
958 mdio_bus->name = "tg3 mdio bus";
959 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%x",
960 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
961 mdio_bus->priv = tp;
962 mdio_bus->dev = &tp->pdev->dev;
963 mdio_bus->read = &tg3_mdio_read;
964 mdio_bus->write = &tg3_mdio_write;
965 mdio_bus->reset = &tg3_mdio_reset;
966 mdio_bus->phy_mask = ~(1 << PHY_ADDR);
967 mdio_bus->irq = &tp->mdio_irq[0];
968
969 for (i = 0; i < PHY_MAX_ADDR; i++)
970 mdio_bus->irq[i] = PHY_POLL;
971
972 /* The bus registration will look for all the PHYs on the mdio bus.
973 * Unfortunately, it does not ensure the PHY is powered up before
974 * accessing the PHY ID registers. A chip reset is the
975 * quickest way to bring the device back to an operational state..
976 */
977 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
978 tg3_bmcr_reset(tp);
979
980 i = mdiobus_register(mdio_bus);
981 if (i) {
982 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
983 tp->dev->name, i);
984 return i;
985 }
986
987 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
988
989 phydev = tp->mdio_bus.phy_map[PHY_ADDR];
990
991 switch (phydev->phy_id) {
992 case TG3_PHY_ID_BCM50610:
993 phydev->interface = PHY_INTERFACE_MODE_RGMII;
994 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
995 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
996 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
997 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
998 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
999 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1000 break;
1001 case TG3_PHY_ID_BCMAC131:
1002 phydev->interface = PHY_INTERFACE_MODE_MII;
1003 break;
1004 }
1005
1006 tg3_mdio_config(tp);
1007
1008 return 0;
1009}
1010
1011static void tg3_mdio_fini(struct tg3 *tp)
1012{
1013 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1014 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1015 mdiobus_unregister(&tp->mdio_bus);
1016 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1017 }
1018}
1019
1020/* tp->lock is held. */
1021static void tg3_wait_for_event_ack(struct tg3 *tp)
1022{
1023 int i;
1024
1025 /* Wait for up to 2.5 milliseconds */
1026 for (i = 0; i < 250000; i++) {
1027 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1028 break;
1029 udelay(10);
1030 }
1031}
1032
1033/* tp->lock is held. */
1034static void tg3_ump_link_report(struct tg3 *tp)
1035{
1036 u32 reg;
1037 u32 val;
1038
1039 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1040 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1041 return;
1042
1043 tg3_wait_for_event_ack(tp);
1044
1045 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1046
1047 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1048
1049 val = 0;
1050 if (!tg3_readphy(tp, MII_BMCR, &reg))
1051 val = reg << 16;
1052 if (!tg3_readphy(tp, MII_BMSR, &reg))
1053 val |= (reg & 0xffff);
1054 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1055
1056 val = 0;
1057 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1058 val = reg << 16;
1059 if (!tg3_readphy(tp, MII_LPA, &reg))
1060 val |= (reg & 0xffff);
1061 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1062
1063 val = 0;
1064 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1065 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1066 val = reg << 16;
1067 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1068 val |= (reg & 0xffff);
1069 }
1070 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1071
1072 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1073 val = reg << 16;
1074 else
1075 val = 0;
1076 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1077
1078 val = tr32(GRC_RX_CPU_EVENT);
1079 val |= GRC_RX_CPU_DRIVER_EVENT;
1080 tw32_f(GRC_RX_CPU_EVENT, val);
1081}
1082
1083static void tg3_link_report(struct tg3 *tp)
1084{
1085 if (!netif_carrier_ok(tp->dev)) {
1086 if (netif_msg_link(tp))
1087 printk(KERN_INFO PFX "%s: Link is down.\n",
1088 tp->dev->name);
1089 tg3_ump_link_report(tp);
1090 } else if (netif_msg_link(tp)) {
1091 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1092 tp->dev->name,
1093 (tp->link_config.active_speed == SPEED_1000 ?
1094 1000 :
1095 (tp->link_config.active_speed == SPEED_100 ?
1096 100 : 10)),
1097 (tp->link_config.active_duplex == DUPLEX_FULL ?
1098 "full" : "half"));
1099
1100 printk(KERN_INFO PFX
1101 "%s: Flow control is %s for TX and %s for RX.\n",
1102 tp->dev->name,
1103 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1104 "on" : "off",
1105 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1106 "on" : "off");
1107 tg3_ump_link_report(tp);
1108 }
1109}
1110
1111static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1112{
1113 u16 miireg;
1114
1115 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1116 miireg = ADVERTISE_PAUSE_CAP;
1117 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1118 miireg = ADVERTISE_PAUSE_ASYM;
1119 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1120 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1121 else
1122 miireg = 0;
1123
1124 return miireg;
1125}
1126
1127static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1128{
1129 u16 miireg;
1130
1131 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1132 miireg = ADVERTISE_1000XPAUSE;
1133 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1134 miireg = ADVERTISE_1000XPSE_ASYM;
1135 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1136 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1137 else
1138 miireg = 0;
1139
1140 return miireg;
1141}
1142
1143static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1144{
1145 u8 cap = 0;
1146
1147 if (lcladv & ADVERTISE_PAUSE_CAP) {
1148 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1149 if (rmtadv & LPA_PAUSE_CAP)
1150 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1151 else if (rmtadv & LPA_PAUSE_ASYM)
1152 cap = TG3_FLOW_CTRL_RX;
1153 } else {
1154 if (rmtadv & LPA_PAUSE_CAP)
1155 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1156 }
1157 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1158 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1159 cap = TG3_FLOW_CTRL_TX;
1160 }
1161
1162 return cap;
1163}
1164
1165static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1166{
1167 u8 cap = 0;
1168
1169 if (lcladv & ADVERTISE_1000XPAUSE) {
1170 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1171 if (rmtadv & LPA_1000XPAUSE)
1172 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1173 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1174 cap = TG3_FLOW_CTRL_RX;
1175 } else {
1176 if (rmtadv & LPA_1000XPAUSE)
1177 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1178 }
1179 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1180 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1181 cap = TG3_FLOW_CTRL_TX;
1182 }
1183
1184 return cap;
1185}
1186
1187static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1188{
1189 u8 autoneg;
1190 u8 flowctrl = 0;
1191 u32 old_rx_mode = tp->rx_mode;
1192 u32 old_tx_mode = tp->tx_mode;
1193
1194 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1195 autoneg = tp->mdio_bus.phy_map[PHY_ADDR]->autoneg;
1196 else
1197 autoneg = tp->link_config.autoneg;
1198
1199 if (autoneg == AUTONEG_ENABLE &&
1200 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1201 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1202 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1203 else
1204 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1205 } else
1206 flowctrl = tp->link_config.flowctrl;
1207
1208 tp->link_config.active_flowctrl = flowctrl;
1209
1210 if (flowctrl & TG3_FLOW_CTRL_RX)
1211 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1212 else
1213 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1214
1215 if (old_rx_mode != tp->rx_mode)
1216 tw32_f(MAC_RX_MODE, tp->rx_mode);
1217
1218 if (flowctrl & TG3_FLOW_CTRL_TX)
1219 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1220 else
1221 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1222
1223 if (old_tx_mode != tp->tx_mode)
1224 tw32_f(MAC_TX_MODE, tp->tx_mode);
1225}
1226
1227static void tg3_adjust_link(struct net_device *dev)
1228{
1229 u8 oldflowctrl, linkmesg = 0;
1230 u32 mac_mode, lcl_adv, rmt_adv;
1231 struct tg3 *tp = netdev_priv(dev);
1232 struct phy_device *phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1233
1234 spin_lock(&tp->lock);
1235
1236 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1237 MAC_MODE_HALF_DUPLEX);
1238
1239 oldflowctrl = tp->link_config.active_flowctrl;
1240
1241 if (phydev->link) {
1242 lcl_adv = 0;
1243 rmt_adv = 0;
1244
1245 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1246 mac_mode |= MAC_MODE_PORT_MODE_MII;
1247 else
1248 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1249
1250 if (phydev->duplex == DUPLEX_HALF)
1251 mac_mode |= MAC_MODE_HALF_DUPLEX;
1252 else {
1253 lcl_adv = tg3_advert_flowctrl_1000T(
1254 tp->link_config.flowctrl);
1255
1256 if (phydev->pause)
1257 rmt_adv = LPA_PAUSE_CAP;
1258 if (phydev->asym_pause)
1259 rmt_adv |= LPA_PAUSE_ASYM;
1260 }
1261
1262 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1263 } else
1264 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1265
1266 if (mac_mode != tp->mac_mode) {
1267 tp->mac_mode = mac_mode;
1268 tw32_f(MAC_MODE, tp->mac_mode);
1269 udelay(40);
1270 }
1271
1272 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1273 tw32(MAC_TX_LENGTHS,
1274 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1275 (6 << TX_LENGTHS_IPG_SHIFT) |
1276 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1277 else
1278 tw32(MAC_TX_LENGTHS,
1279 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1280 (6 << TX_LENGTHS_IPG_SHIFT) |
1281 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1282
1283 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1284 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1285 phydev->speed != tp->link_config.active_speed ||
1286 phydev->duplex != tp->link_config.active_duplex ||
1287 oldflowctrl != tp->link_config.active_flowctrl)
1288 linkmesg = 1;
1289
1290 tp->link_config.active_speed = phydev->speed;
1291 tp->link_config.active_duplex = phydev->duplex;
1292
1293 spin_unlock(&tp->lock);
1294
1295 if (linkmesg)
1296 tg3_link_report(tp);
1297}
1298
1299static int tg3_phy_init(struct tg3 *tp)
1300{
1301 struct phy_device *phydev;
1302
1303 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1304 return 0;
1305
1306 /* Bring the PHY back to a known state. */
1307 tg3_bmcr_reset(tp);
1308
1309 phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1310
1311 /* Attach the MAC to the PHY. */
1312 phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1313 phydev->dev_flags, phydev->interface);
1314 if (IS_ERR(phydev)) {
1315 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1316 return PTR_ERR(phydev);
1317 }
1318
1319 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1320
1321 /* Mask with MAC supported features. */
1322 phydev->supported &= (PHY_GBIT_FEATURES |
1323 SUPPORTED_Pause |
1324 SUPPORTED_Asym_Pause);
1325
1326 phydev->advertising = phydev->supported;
1327
1328 printk(KERN_INFO
1329 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1330 tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1331
1332 return 0;
1333}
1334
1335static void tg3_phy_start(struct tg3 *tp)
1336{
1337 struct phy_device *phydev;
1338
1339 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1340 return;
1341
1342 phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1343
1344 if (tp->link_config.phy_is_low_power) {
1345 tp->link_config.phy_is_low_power = 0;
1346 phydev->speed = tp->link_config.orig_speed;
1347 phydev->duplex = tp->link_config.orig_duplex;
1348 phydev->autoneg = tp->link_config.orig_autoneg;
1349 phydev->advertising = tp->link_config.orig_advertising;
1350 }
1351
1352 phy_start(phydev);
1353
1354 phy_start_aneg(phydev);
1355}
1356
1357static void tg3_phy_stop(struct tg3 *tp)
1358{
1359 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1360 return;
1361
1362 phy_stop(tp->mdio_bus.phy_map[PHY_ADDR]);
1363}
1364
1365static void tg3_phy_fini(struct tg3 *tp)
1366{
1367 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1368 phy_disconnect(tp->mdio_bus.phy_map[PHY_ADDR]);
1369 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1370 }
1371}
1372
807static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1373static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
808{ 1374{
809 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); 1375 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
@@ -861,37 +1427,6 @@ static void tg3_phy_set_wirespeed(struct tg3 *tp)
861 (val | (1 << 15) | (1 << 4))); 1427 (val | (1 << 15) | (1 << 4)));
862} 1428}
863 1429
864static int tg3_bmcr_reset(struct tg3 *tp)
865{
866 u32 phy_control;
867 int limit, err;
868
869 /* OK, reset it, and poll the BMCR_RESET bit until it
870 * clears or we time out.
871 */
872 phy_control = BMCR_RESET;
873 err = tg3_writephy(tp, MII_BMCR, phy_control);
874 if (err != 0)
875 return -EBUSY;
876
877 limit = 5000;
878 while (limit--) {
879 err = tg3_readphy(tp, MII_BMCR, &phy_control);
880 if (err != 0)
881 return -EBUSY;
882
883 if ((phy_control & BMCR_RESET) == 0) {
884 udelay(40);
885 break;
886 }
887 udelay(10);
888 }
889 if (limit <= 0)
890 return -EBUSY;
891
892 return 0;
893}
894
895static void tg3_phy_apply_otp(struct tg3 *tp) 1430static void tg3_phy_apply_otp(struct tg3 *tp)
896{ 1431{
897 u32 otp, phy; 1432 u32 otp, phy;
@@ -1115,8 +1650,6 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1115 return err; 1650 return err;
1116} 1651}
1117 1652
1118static void tg3_link_report(struct tg3 *);
1119
1120/* This will reset the tigon3 PHY if there is no valid 1653/* This will reset the tigon3 PHY if there is no valid
1121 * link unless the FORCE argument is non-zero. 1654 * link unless the FORCE argument is non-zero.
1122 */ 1655 */
@@ -1421,7 +1954,7 @@ static void tg3_power_down_phy(struct tg3 *tp)
1421 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 1954 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1422 udelay(40); 1955 udelay(40);
1423 return; 1956 return;
1424 } else { 1957 } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1425 tg3_writephy(tp, MII_TG3_EXT_CTRL, 1958 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1426 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 1959 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1427 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2); 1960 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
@@ -1495,7 +2028,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1495 "requested.\n", 2028 "requested.\n",
1496 tp->dev->name, state); 2029 tp->dev->name, state);
1497 return -EINVAL; 2030 return -EINVAL;
1498 }; 2031 }
1499 2032
1500 power_control |= PCI_PM_CTRL_PME_ENABLE; 2033 power_control |= PCI_PM_CTRL_PME_ENABLE;
1501 2034
@@ -1503,18 +2036,55 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1503 tw32(TG3PCI_MISC_HOST_CTRL, 2036 tw32(TG3PCI_MISC_HOST_CTRL,
1504 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 2037 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1505 2038
1506 if (tp->link_config.phy_is_low_power == 0) { 2039 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
1507 tp->link_config.phy_is_low_power = 1; 2040 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
1508 tp->link_config.orig_speed = tp->link_config.speed; 2041 !tp->link_config.phy_is_low_power) {
1509 tp->link_config.orig_duplex = tp->link_config.duplex; 2042 struct phy_device *phydev;
1510 tp->link_config.orig_autoneg = tp->link_config.autoneg; 2043 u32 advertising;
1511 } 2044
2045 phydev = tp->mdio_bus.phy_map[PHY_ADDR];
2046
2047 tp->link_config.phy_is_low_power = 1;
2048
2049 tp->link_config.orig_speed = phydev->speed;
2050 tp->link_config.orig_duplex = phydev->duplex;
2051 tp->link_config.orig_autoneg = phydev->autoneg;
2052 tp->link_config.orig_advertising = phydev->advertising;
2053
2054 advertising = ADVERTISED_TP |
2055 ADVERTISED_Pause |
2056 ADVERTISED_Autoneg |
2057 ADVERTISED_10baseT_Half;
2058
2059 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2060 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2061 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2062 advertising |=
2063 ADVERTISED_100baseT_Half |
2064 ADVERTISED_100baseT_Full |
2065 ADVERTISED_10baseT_Full;
2066 else
2067 advertising |= ADVERTISED_10baseT_Full;
2068 }
1512 2069
1513 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { 2070 phydev->advertising = advertising;
1514 tp->link_config.speed = SPEED_10; 2071
1515 tp->link_config.duplex = DUPLEX_HALF; 2072 phy_start_aneg(phydev);
1516 tp->link_config.autoneg = AUTONEG_ENABLE; 2073 }
1517 tg3_setup_phy(tp, 0); 2074 } else {
2075 if (tp->link_config.phy_is_low_power == 0) {
2076 tp->link_config.phy_is_low_power = 1;
2077 tp->link_config.orig_speed = tp->link_config.speed;
2078 tp->link_config.orig_duplex = tp->link_config.duplex;
2079 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2080 }
2081
2082 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2083 tp->link_config.speed = SPEED_10;
2084 tp->link_config.duplex = DUPLEX_HALF;
2085 tp->link_config.autoneg = AUTONEG_ENABLE;
2086 tg3_setup_phy(tp, 0);
2087 }
1518 } 2088 }
1519 2089
1520 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 2090 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
@@ -1545,8 +2115,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1545 u32 mac_mode; 2115 u32 mac_mode;
1546 2116
1547 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 2117 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1548 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); 2118 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1549 udelay(40); 2119 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2120 udelay(40);
2121 }
1550 2122
1551 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 2123 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1552 mac_mode = MAC_MODE_PORT_MODE_GMII; 2124 mac_mode = MAC_MODE_PORT_MODE_GMII;
@@ -1671,212 +2243,6 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1671 return 0; 2243 return 0;
1672} 2244}
1673 2245
1674/* tp->lock is held. */
1675static void tg3_wait_for_event_ack(struct tg3 *tp)
1676{
1677 int i;
1678
1679 /* Wait for up to 2.5 milliseconds */
1680 for (i = 0; i < 250000; i++) {
1681 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1682 break;
1683 udelay(10);
1684 }
1685}
1686
1687/* tp->lock is held. */
1688static void tg3_ump_link_report(struct tg3 *tp)
1689{
1690 u32 reg;
1691 u32 val;
1692
1693 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1694 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1695 return;
1696
1697 tg3_wait_for_event_ack(tp);
1698
1699 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1700
1701 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1702
1703 val = 0;
1704 if (!tg3_readphy(tp, MII_BMCR, &reg))
1705 val = reg << 16;
1706 if (!tg3_readphy(tp, MII_BMSR, &reg))
1707 val |= (reg & 0xffff);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1709
1710 val = 0;
1711 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1712 val = reg << 16;
1713 if (!tg3_readphy(tp, MII_LPA, &reg))
1714 val |= (reg & 0xffff);
1715 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1716
1717 val = 0;
1718 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1719 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1720 val = reg << 16;
1721 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1722 val |= (reg & 0xffff);
1723 }
1724 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1725
1726 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1727 val = reg << 16;
1728 else
1729 val = 0;
1730 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1731
1732 val = tr32(GRC_RX_CPU_EVENT);
1733 val |= GRC_RX_CPU_DRIVER_EVENT;
1734 tw32_f(GRC_RX_CPU_EVENT, val);
1735}
1736
1737static void tg3_link_report(struct tg3 *tp)
1738{
1739 if (!netif_carrier_ok(tp->dev)) {
1740 if (netif_msg_link(tp))
1741 printk(KERN_INFO PFX "%s: Link is down.\n",
1742 tp->dev->name);
1743 tg3_ump_link_report(tp);
1744 } else if (netif_msg_link(tp)) {
1745 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1746 tp->dev->name,
1747 (tp->link_config.active_speed == SPEED_1000 ?
1748 1000 :
1749 (tp->link_config.active_speed == SPEED_100 ?
1750 100 : 10)),
1751 (tp->link_config.active_duplex == DUPLEX_FULL ?
1752 "full" : "half"));
1753
1754 printk(KERN_INFO PFX
1755 "%s: Flow control is %s for TX and %s for RX.\n",
1756 tp->dev->name,
1757 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1758 "on" : "off",
1759 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1760 "on" : "off");
1761 tg3_ump_link_report(tp);
1762 }
1763}
1764
1765static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1766{
1767 u16 miireg;
1768
1769 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1770 miireg = ADVERTISE_PAUSE_CAP;
1771 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1772 miireg = ADVERTISE_PAUSE_ASYM;
1773 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1774 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1775 else
1776 miireg = 0;
1777
1778 return miireg;
1779}
1780
1781static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1782{
1783 u16 miireg;
1784
1785 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1786 miireg = ADVERTISE_1000XPAUSE;
1787 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1788 miireg = ADVERTISE_1000XPSE_ASYM;
1789 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1790 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1791 else
1792 miireg = 0;
1793
1794 return miireg;
1795}
1796
1797static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1798{
1799 u8 cap = 0;
1800
1801 if (lcladv & ADVERTISE_PAUSE_CAP) {
1802 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1803 if (rmtadv & LPA_PAUSE_CAP)
1804 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1805 else if (rmtadv & LPA_PAUSE_ASYM)
1806 cap = TG3_FLOW_CTRL_RX;
1807 } else {
1808 if (rmtadv & LPA_PAUSE_CAP)
1809 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1810 }
1811 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1812 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1813 cap = TG3_FLOW_CTRL_TX;
1814 }
1815
1816 return cap;
1817}
1818
1819static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1820{
1821 u8 cap = 0;
1822
1823 if (lcladv & ADVERTISE_1000XPAUSE) {
1824 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1825 if (rmtadv & LPA_1000XPAUSE)
1826 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1827 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1828 cap = TG3_FLOW_CTRL_RX;
1829 } else {
1830 if (rmtadv & LPA_1000XPAUSE)
1831 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1832 }
1833 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1834 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1835 cap = TG3_FLOW_CTRL_TX;
1836 }
1837
1838 return cap;
1839}
1840
1841static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1842{
1843 u8 new_tg3_flags = 0;
1844 u32 old_rx_mode = tp->rx_mode;
1845 u32 old_tx_mode = tp->tx_mode;
1846
1847 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1848 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1849 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1850 new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1851 remote_adv);
1852 else
1853 new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1854 remote_adv);
1855 } else {
1856 new_tg3_flags = tp->link_config.flowctrl;
1857 }
1858
1859 tp->link_config.active_flowctrl = new_tg3_flags;
1860
1861 if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1862 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1863 else
1864 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1865
1866 if (old_rx_mode != tp->rx_mode) {
1867 tw32_f(MAC_RX_MODE, tp->rx_mode);
1868 }
1869
1870 if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1871 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1872 else
1873 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1874
1875 if (old_tx_mode != tp->tx_mode) {
1876 tw32_f(MAC_TX_MODE, tp->tx_mode);
1877 }
1878}
1879
1880static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) 2246static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1881{ 2247{
1882 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 2248 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
@@ -1921,7 +2287,7 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8
1921 *speed = SPEED_INVALID; 2287 *speed = SPEED_INVALID;
1922 *duplex = DUPLEX_INVALID; 2288 *duplex = DUPLEX_INVALID;
1923 break; 2289 break;
1924 }; 2290 }
1925} 2291}
1926 2292
1927static void tg3_phy_copper_begin(struct tg3 *tp) 2293static void tg3_phy_copper_begin(struct tg3 *tp)
@@ -2033,7 +2399,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2033 case SPEED_1000: 2399 case SPEED_1000:
2034 bmcr |= TG3_BMCR_SPEED1000; 2400 bmcr |= TG3_BMCR_SPEED1000;
2035 break; 2401 break;
2036 }; 2402 }
2037 2403
2038 if (tp->link_config.duplex == DUPLEX_FULL) 2404 if (tp->link_config.duplex == DUPLEX_FULL)
2039 bmcr |= BMCR_FULLDPLX; 2405 bmcr |= BMCR_FULLDPLX;
@@ -2731,7 +3097,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2731 default: 3097 default:
2732 ret = ANEG_FAILED; 3098 ret = ANEG_FAILED;
2733 break; 3099 break;
2734 }; 3100 }
2735 3101
2736 return ret; 3102 return ret;
2737} 3103}
@@ -3572,7 +3938,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3572 3938
3573 default: 3939 default:
3574 return -EINVAL; 3940 return -EINVAL;
3575 }; 3941 }
3576 3942
3577 /* Do not overwrite any of the map or rp information 3943 /* Do not overwrite any of the map or rp information
3578 * until we are sure we can commit to a new buffer. 3944 * until we are sure we can commit to a new buffer.
@@ -3632,7 +3998,7 @@ static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3632 3998
3633 default: 3999 default:
3634 return; 4000 return;
3635 }; 4001 }
3636 4002
3637 dest_map->skb = src_map->skb; 4003 dest_map->skb = src_map->skb;
3638 pci_unmap_addr_set(dest_map, mapping, 4004 pci_unmap_addr_set(dest_map, mapping,
@@ -3842,7 +4208,15 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3842 sblk->status = SD_STATUS_UPDATED | 4208 sblk->status = SD_STATUS_UPDATED |
3843 (sblk->status & ~SD_STATUS_LINK_CHG); 4209 (sblk->status & ~SD_STATUS_LINK_CHG);
3844 spin_lock(&tp->lock); 4210 spin_lock(&tp->lock);
3845 tg3_setup_phy(tp, 0); 4211 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4212 tw32_f(MAC_STATUS,
4213 (MAC_STATUS_SYNC_CHANGED |
4214 MAC_STATUS_CFG_CHANGED |
4215 MAC_STATUS_MI_COMPLETION |
4216 MAC_STATUS_LNKSTATE_CHANGED));
4217 udelay(40);
4218 } else
4219 tg3_setup_phy(tp, 0);
3846 spin_unlock(&tp->lock); 4220 spin_unlock(&tp->lock);
3847 } 4221 }
3848 } 4222 }
@@ -4130,6 +4504,7 @@ static void tg3_poll_controller(struct net_device *dev)
4130static void tg3_reset_task(struct work_struct *work) 4504static void tg3_reset_task(struct work_struct *work)
4131{ 4505{
4132 struct tg3 *tp = container_of(work, struct tg3, reset_task); 4506 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4507 int err;
4133 unsigned int restart_timer; 4508 unsigned int restart_timer;
4134 4509
4135 tg3_full_lock(tp, 0); 4510 tg3_full_lock(tp, 0);
@@ -4141,6 +4516,8 @@ static void tg3_reset_task(struct work_struct *work)
4141 4516
4142 tg3_full_unlock(tp); 4517 tg3_full_unlock(tp);
4143 4518
4519 tg3_phy_stop(tp);
4520
4144 tg3_netif_stop(tp); 4521 tg3_netif_stop(tp);
4145 4522
4146 tg3_full_lock(tp, 1); 4523 tg3_full_lock(tp, 1);
@@ -4156,7 +4533,8 @@ static void tg3_reset_task(struct work_struct *work)
4156 } 4533 }
4157 4534
4158 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 4535 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4159 if (tg3_init_hw(tp, 1)) 4536 err = tg3_init_hw(tp, 1);
4537 if (err)
4160 goto out; 4538 goto out;
4161 4539
4162 tg3_netif_start(tp); 4540 tg3_netif_start(tp);
@@ -4166,6 +4544,9 @@ static void tg3_reset_task(struct work_struct *work)
4166 4544
4167out: 4545out:
4168 tg3_full_unlock(tp); 4546 tg3_full_unlock(tp);
4547
4548 if (!err)
4549 tg3_phy_start(tp);
4169} 4550}
4170 4551
4171static void tg3_dump_short_state(struct tg3 *tp) 4552static void tg3_dump_short_state(struct tg3 *tp)
@@ -4669,6 +5050,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4669 return 0; 5050 return 0;
4670 } 5051 }
4671 5052
5053 tg3_phy_stop(tp);
5054
4672 tg3_netif_stop(tp); 5055 tg3_netif_stop(tp);
4673 5056
4674 tg3_full_lock(tp, 1); 5057 tg3_full_lock(tp, 1);
@@ -4684,6 +5067,9 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4684 5067
4685 tg3_full_unlock(tp); 5068 tg3_full_unlock(tp);
4686 5069
5070 if (!err)
5071 tg3_phy_start(tp);
5072
4687 return err; 5073 return err;
4688} 5074}
4689 5075
@@ -4975,7 +5361,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
4975 5361
4976 default: 5362 default:
4977 break; 5363 break;
4978 }; 5364 }
4979 } 5365 }
4980 5366
4981 val = tr32(ofs); 5367 val = tr32(ofs);
@@ -5217,7 +5603,7 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5217 5603
5218 default: 5604 default:
5219 break; 5605 break;
5220 }; 5606 }
5221 } 5607 }
5222 5608
5223 if (kind == RESET_KIND_INIT || 5609 if (kind == RESET_KIND_INIT ||
@@ -5242,7 +5628,7 @@ static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5242 5628
5243 default: 5629 default:
5244 break; 5630 break;
5245 }; 5631 }
5246 } 5632 }
5247 5633
5248 if (kind == RESET_KIND_SHUTDOWN) 5634 if (kind == RESET_KIND_SHUTDOWN)
@@ -5271,7 +5657,7 @@ static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5271 5657
5272 default: 5658 default:
5273 break; 5659 break;
5274 }; 5660 }
5275 } 5661 }
5276} 5662}
5277 5663
@@ -5393,6 +5779,8 @@ static int tg3_chip_reset(struct tg3 *tp)
5393 5779
5394 tg3_nvram_lock(tp); 5780 tg3_nvram_lock(tp);
5395 5781
5782 tg3_mdio_stop(tp);
5783
5396 /* No matching tg3_nvram_unlock() after this because 5784 /* No matching tg3_nvram_unlock() after this because
5397 * chip reset below will undo the nvram lock. 5785 * chip reset below will undo the nvram lock.
5398 */ 5786 */
@@ -5408,7 +5796,8 @@ static int tg3_chip_reset(struct tg3 *tp)
5408 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 5796 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5409 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 5797 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5410 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 5798 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5411 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 5799 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5800 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5412 tw32(GRC_FASTBOOT_PC, 0); 5801 tw32(GRC_FASTBOOT_PC, 0);
5413 5802
5414 /* 5803 /*
@@ -5544,6 +5933,8 @@ static int tg3_chip_reset(struct tg3 *tp)
5544 tw32_f(MAC_MODE, 0); 5933 tw32_f(MAC_MODE, 0);
5545 udelay(40); 5934 udelay(40);
5546 5935
5936 tg3_mdio_start(tp);
5937
5547 err = tg3_poll_fw(tp); 5938 err = tg3_poll_fw(tp);
5548 if (err) 5939 if (err)
5549 return err; 5940 return err;
@@ -6623,7 +7014,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6623 tg3_abort_hw(tp, 1); 7014 tg3_abort_hw(tp, 1);
6624 } 7015 }
6625 7016
6626 if (reset_phy) 7017 if (reset_phy &&
7018 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
6627 tg3_phy_reset(tp); 7019 tg3_phy_reset(tp);
6628 7020
6629 err = tg3_chip_reset(tp); 7021 err = tg3_chip_reset(tp);
@@ -6699,7 +7091,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6699 return err; 7091 return err;
6700 7092
6701 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && 7093 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6702 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { 7094 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7095 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6703 /* This value is determined during the probe time DMA 7096 /* This value is determined during the probe time DMA
6704 * engine test, tg3_test_dma. 7097 * engine test, tg3_test_dma.
6705 */ 7098 */
@@ -6938,7 +7331,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6938 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 7331 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6939 RDMAC_MODE_LNGREAD_ENAB); 7332 RDMAC_MODE_LNGREAD_ENAB);
6940 7333
6941 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) 7334 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7335 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
6942 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 7336 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6943 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 7337 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6944 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 7338 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
@@ -7106,8 +7500,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7106 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) || 7500 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7107 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) || 7501 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7108 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) || 7502 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7109 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)) 7503 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7110 val |= (1 << 29); 7504 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7505 val |= WDMAC_MODE_STATUS_TAG_FIX;
7111 7506
7112 tw32_f(WDMAC_MODE, val); 7507 tw32_f(WDMAC_MODE, val);
7113 udelay(40); 7508 udelay(40);
@@ -7168,23 +7563,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7168 7563
7169 tp->rx_mode = RX_MODE_ENABLE; 7564 tp->rx_mode = RX_MODE_ENABLE;
7170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 7565 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 7566 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7567 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7568 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7172 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 7569 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7173 7570
7174 tw32_f(MAC_RX_MODE, tp->rx_mode); 7571 tw32_f(MAC_RX_MODE, tp->rx_mode);
7175 udelay(10); 7572 udelay(10);
7176 7573
7177 if (tp->link_config.phy_is_low_power) {
7178 tp->link_config.phy_is_low_power = 0;
7179 tp->link_config.speed = tp->link_config.orig_speed;
7180 tp->link_config.duplex = tp->link_config.orig_duplex;
7181 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7182 }
7183
7184 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
7185 tw32_f(MAC_MI_MODE, tp->mi_mode);
7186 udelay(80);
7187
7188 tw32(MAC_LED_CTRL, tp->led_ctrl); 7574 tw32(MAC_LED_CTRL, tp->led_ctrl);
7189 7575
7190 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 7576 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
@@ -7231,19 +7617,28 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7231 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 7617 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7232 } 7618 }
7233 7619
7234 err = tg3_setup_phy(tp, 0); 7620 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7235 if (err) 7621 if (tp->link_config.phy_is_low_power) {
7236 return err; 7622 tp->link_config.phy_is_low_power = 0;
7623 tp->link_config.speed = tp->link_config.orig_speed;
7624 tp->link_config.duplex = tp->link_config.orig_duplex;
7625 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7626 }
7237 7627
7238 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 7628 err = tg3_setup_phy(tp, 0);
7239 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) { 7629 if (err)
7240 u32 tmp; 7630 return err;
7241 7631
7242 /* Clear CRC stats. */ 7632 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7243 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { 7633 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7244 tg3_writephy(tp, MII_TG3_TEST1, 7634 u32 tmp;
7245 tmp | MII_TG3_TEST1_CRC_EN); 7635
7246 tg3_readphy(tp, 0x14, &tmp); 7636 /* Clear CRC stats. */
7637 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7638 tg3_writephy(tp, MII_TG3_TEST1,
7639 tmp | MII_TG3_TEST1_CRC_EN);
7640 tg3_readphy(tp, 0x14, &tmp);
7641 }
7247 } 7642 }
7248 } 7643 }
7249 7644
@@ -7296,7 +7691,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7296 7691
7297 default: 7692 default:
7298 break; 7693 break;
7299 }; 7694 }
7300 7695
7301 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 7696 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7302 /* Write our heartbeat update interval to APE. */ 7697 /* Write our heartbeat update interval to APE. */
@@ -7758,6 +8153,8 @@ static int tg3_open(struct net_device *dev)
7758 } 8153 }
7759 } 8154 }
7760 8155
8156 tg3_phy_start(tp);
8157
7761 tg3_full_lock(tp, 0); 8158 tg3_full_lock(tp, 0);
7762 8159
7763 add_timer(&tp->timer); 8160 add_timer(&tp->timer);
@@ -8559,7 +8956,13 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
8559 8956
8560static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 8957static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8561{ 8958{
8562 struct tg3 *tp = netdev_priv(dev); 8959 struct tg3 *tp = netdev_priv(dev);
8960
8961 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8962 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8963 return -EAGAIN;
8964 return phy_ethtool_gset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
8965 }
8563 8966
8564 cmd->supported = (SUPPORTED_Autoneg); 8967 cmd->supported = (SUPPORTED_Autoneg);
8565 8968
@@ -8596,6 +8999,12 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8596{ 8999{
8597 struct tg3 *tp = netdev_priv(dev); 9000 struct tg3 *tp = netdev_priv(dev);
8598 9001
9002 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9003 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9004 return -EAGAIN;
9005 return phy_ethtool_sset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
9006 }
9007
8599 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 9008 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8600 /* These are the only valid advertisement bits allowed. */ 9009 /* These are the only valid advertisement bits allowed. */
8601 if (cmd->autoneg == AUTONEG_ENABLE && 9010 if (cmd->autoneg == AUTONEG_ENABLE &&
@@ -8628,7 +9037,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8628 tp->link_config.advertising = 0; 9037 tp->link_config.advertising = 0;
8629 tp->link_config.speed = cmd->speed; 9038 tp->link_config.speed = cmd->speed;
8630 tp->link_config.duplex = cmd->duplex; 9039 tp->link_config.duplex = cmd->duplex;
8631 } 9040 }
8632 9041
8633 tp->link_config.orig_speed = tp->link_config.speed; 9042 tp->link_config.orig_speed = tp->link_config.speed;
8634 tp->link_config.orig_duplex = tp->link_config.duplex; 9043 tp->link_config.orig_duplex = tp->link_config.duplex;
@@ -8711,7 +9120,10 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
8711 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) { 9120 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8712 if (value) { 9121 if (value) {
8713 dev->features |= NETIF_F_TSO6; 9122 dev->features |= NETIF_F_TSO6;
8714 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 9123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9124 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9125 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9126 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8715 dev->features |= NETIF_F_TSO_ECN; 9127 dev->features |= NETIF_F_TSO_ECN;
8716 } else 9128 } else
8717 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); 9129 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
@@ -8722,7 +9134,6 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
8722static int tg3_nway_reset(struct net_device *dev) 9134static int tg3_nway_reset(struct net_device *dev)
8723{ 9135{
8724 struct tg3 *tp = netdev_priv(dev); 9136 struct tg3 *tp = netdev_priv(dev);
8725 u32 bmcr;
8726 int r; 9137 int r;
8727 9138
8728 if (!netif_running(dev)) 9139 if (!netif_running(dev))
@@ -8731,17 +9142,25 @@ static int tg3_nway_reset(struct net_device *dev)
8731 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 9142 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8732 return -EINVAL; 9143 return -EINVAL;
8733 9144
8734 spin_lock_bh(&tp->lock); 9145 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8735 r = -EINVAL; 9146 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8736 tg3_readphy(tp, MII_BMCR, &bmcr); 9147 return -EAGAIN;
8737 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 9148 r = phy_start_aneg(tp->mdio_bus.phy_map[PHY_ADDR]);
8738 ((bmcr & BMCR_ANENABLE) || 9149 } else {
8739 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) { 9150 u32 bmcr;
8740 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 9151
8741 BMCR_ANENABLE); 9152 spin_lock_bh(&tp->lock);
8742 r = 0; 9153 r = -EINVAL;
9154 tg3_readphy(tp, MII_BMCR, &bmcr);
9155 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9156 ((bmcr & BMCR_ANENABLE) ||
9157 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9158 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9159 BMCR_ANENABLE);
9160 r = 0;
9161 }
9162 spin_unlock_bh(&tp->lock);
8743 } 9163 }
8744 spin_unlock_bh(&tp->lock);
8745 9164
8746 return r; 9165 return r;
8747} 9166}
@@ -8783,6 +9202,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
8783 return -EINVAL; 9202 return -EINVAL;
8784 9203
8785 if (netif_running(dev)) { 9204 if (netif_running(dev)) {
9205 tg3_phy_stop(tp);
8786 tg3_netif_stop(tp); 9206 tg3_netif_stop(tp);
8787 irq_sync = 1; 9207 irq_sync = 1;
8788 } 9208 }
@@ -8806,6 +9226,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
8806 9226
8807 tg3_full_unlock(tp); 9227 tg3_full_unlock(tp);
8808 9228
9229 if (irq_sync && !err)
9230 tg3_phy_start(tp);
9231
8809 return err; 9232 return err;
8810} 9233}
8811 9234
@@ -8829,36 +9252,92 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
8829static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 9252static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8830{ 9253{
8831 struct tg3 *tp = netdev_priv(dev); 9254 struct tg3 *tp = netdev_priv(dev);
8832 int irq_sync = 0, err = 0; 9255 int err = 0;
8833 9256
8834 if (netif_running(dev)) { 9257 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8835 tg3_netif_stop(tp); 9258 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8836 irq_sync = 1; 9259 return -EAGAIN;
8837 }
8838 9260
8839 tg3_full_lock(tp, irq_sync); 9261 if (epause->autoneg) {
9262 u32 newadv;
9263 struct phy_device *phydev;
8840 9264
8841 if (epause->autoneg) 9265 phydev = tp->mdio_bus.phy_map[PHY_ADDR];
8842 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8843 else
8844 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8845 if (epause->rx_pause)
8846 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8847 else
8848 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8849 if (epause->tx_pause)
8850 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8851 else
8852 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8853 9266
8854 if (netif_running(dev)) { 9267 if (epause->rx_pause) {
8855 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 9268 if (epause->tx_pause)
8856 err = tg3_restart_hw(tp, 1); 9269 newadv = ADVERTISED_Pause;
8857 if (!err) 9270 else
8858 tg3_netif_start(tp); 9271 newadv = ADVERTISED_Pause |
8859 } 9272 ADVERTISED_Asym_Pause;
9273 } else if (epause->tx_pause) {
9274 newadv = ADVERTISED_Asym_Pause;
9275 } else
9276 newadv = 0;
9277
9278 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9279 u32 oldadv = phydev->advertising &
9280 (ADVERTISED_Pause |
9281 ADVERTISED_Asym_Pause);
9282 if (oldadv != newadv) {
9283 phydev->advertising &=
9284 ~(ADVERTISED_Pause |
9285 ADVERTISED_Asym_Pause);
9286 phydev->advertising |= newadv;
9287 err = phy_start_aneg(phydev);
9288 }
9289 } else {
9290 tp->link_config.advertising &=
9291 ~(ADVERTISED_Pause |
9292 ADVERTISED_Asym_Pause);
9293 tp->link_config.advertising |= newadv;
9294 }
9295 } else {
9296 if (epause->rx_pause)
9297 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9298 else
9299 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8860 9300
8861 tg3_full_unlock(tp); 9301 if (epause->tx_pause)
9302 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9303 else
9304 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9305
9306 if (netif_running(dev))
9307 tg3_setup_flow_control(tp, 0, 0);
9308 }
9309 } else {
9310 int irq_sync = 0;
9311
9312 if (netif_running(dev)) {
9313 tg3_netif_stop(tp);
9314 irq_sync = 1;
9315 }
9316
9317 tg3_full_lock(tp, irq_sync);
9318
9319 if (epause->autoneg)
9320 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9321 else
9322 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9323 if (epause->rx_pause)
9324 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9325 else
9326 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9327 if (epause->tx_pause)
9328 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9329 else
9330 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9331
9332 if (netif_running(dev)) {
9333 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9334 err = tg3_restart_hw(tp, 1);
9335 if (!err)
9336 tg3_netif_start(tp);
9337 }
9338
9339 tg3_full_unlock(tp);
9340 }
8862 9341
8863 return err; 9342 return err;
8864} 9343}
@@ -8902,7 +9381,8 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8902 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 9381 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8903 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 9382 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8904 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 9383 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8905 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 9384 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9385 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8906 ethtool_op_set_tx_ipv6_csum(dev, data); 9386 ethtool_op_set_tx_ipv6_csum(dev, data);
8907 else 9387 else
8908 ethtool_op_set_tx_csum(dev, data); 9388 ethtool_op_set_tx_csum(dev, data);
@@ -9423,7 +9903,8 @@ static int tg3_test_memory(struct tg3 *tp)
9423 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 9903 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9424 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 9904 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9425 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 9905 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9426 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 9906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9907 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9427 mem_tbl = mem_tbl_5755; 9908 mem_tbl = mem_tbl_5755;
9428 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 9909 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9429 mem_tbl = mem_tbl_5906; 9910 mem_tbl = mem_tbl_5906;
@@ -9630,7 +10111,8 @@ static int tg3_test_loopback(struct tg3 *tp)
9630 return TG3_LOOPBACK_FAILED; 10111 return TG3_LOOPBACK_FAILED;
9631 10112
9632 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 10113 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9633 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { 10114 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10115 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
9634 int i; 10116 int i;
9635 u32 status; 10117 u32 status;
9636 10118
@@ -9658,14 +10140,16 @@ static int tg3_test_loopback(struct tg3 *tp)
9658 err |= TG3_MAC_LOOPBACK_FAILED; 10140 err |= TG3_MAC_LOOPBACK_FAILED;
9659 10141
9660 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 10142 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9661 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { 10143 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10144 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
9662 tw32(TG3_CPMU_CTRL, cpmuctrl); 10145 tw32(TG3_CPMU_CTRL, cpmuctrl);
9663 10146
9664 /* Release the mutex */ 10147 /* Release the mutex */
9665 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER); 10148 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9666 } 10149 }
9667 10150
9668 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 10151 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10152 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
9669 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) 10153 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9670 err |= TG3_PHY_LOOPBACK_FAILED; 10154 err |= TG3_PHY_LOOPBACK_FAILED;
9671 } 10155 }
@@ -9692,9 +10176,10 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9692 data[1] = 1; 10176 data[1] = 1;
9693 } 10177 }
9694 if (etest->flags & ETH_TEST_FL_OFFLINE) { 10178 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9695 int err, irq_sync = 0; 10179 int err, err2 = 0, irq_sync = 0;
9696 10180
9697 if (netif_running(dev)) { 10181 if (netif_running(dev)) {
10182 tg3_phy_stop(tp);
9698 tg3_netif_stop(tp); 10183 tg3_netif_stop(tp);
9699 irq_sync = 1; 10184 irq_sync = 1;
9700 } 10185 }
@@ -9735,11 +10220,15 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9735 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 10220 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9736 if (netif_running(dev)) { 10221 if (netif_running(dev)) {
9737 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 10222 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9738 if (!tg3_restart_hw(tp, 1)) 10223 err2 = tg3_restart_hw(tp, 1);
10224 if (!err2)
9739 tg3_netif_start(tp); 10225 tg3_netif_start(tp);
9740 } 10226 }
9741 10227
9742 tg3_full_unlock(tp); 10228 tg3_full_unlock(tp);
10229
10230 if (irq_sync && !err2)
10231 tg3_phy_start(tp);
9743 } 10232 }
9744 if (tp->link_config.phy_is_low_power) 10233 if (tp->link_config.phy_is_low_power)
9745 tg3_set_power_state(tp, PCI_D3hot); 10234 tg3_set_power_state(tp, PCI_D3hot);
@@ -9752,6 +10241,12 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9752 struct tg3 *tp = netdev_priv(dev); 10241 struct tg3 *tp = netdev_priv(dev);
9753 int err; 10242 int err;
9754 10243
10244 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10245 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10246 return -EAGAIN;
10247 return phy_mii_ioctl(tp->mdio_bus.phy_map[PHY_ADDR], data, cmd);
10248 }
10249
9755 switch(cmd) { 10250 switch(cmd) {
9756 case SIOCGMIIPHY: 10251 case SIOCGMIIPHY:
9757 data->phy_id = PHY_ADDR; 10252 data->phy_id = PHY_ADDR;
@@ -10294,7 +10789,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
10294 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) 10789 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10295 tg3_get_5755_nvram_info(tp); 10790 tg3_get_5755_nvram_info(tp);
10296 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 10791 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) 10792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10793 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10298 tg3_get_5787_nvram_info(tp); 10794 tg3_get_5787_nvram_info(tp);
10299 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 10795 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10300 tg3_get_5761_nvram_info(tp); 10796 tg3_get_5761_nvram_info(tp);
@@ -10625,6 +11121,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10625 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) && 11121 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10626 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) && 11122 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10627 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) && 11123 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11124 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
10628 (tp->nvram_jedecnum == JEDEC_ST) && 11125 (tp->nvram_jedecnum == JEDEC_ST) &&
10629 (nvram_cmd & NVRAM_CMD_FIRST)) { 11126 (nvram_cmd & NVRAM_CMD_FIRST)) {
10630 11127
@@ -10807,7 +11304,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10807 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 11304 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10808 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 11305 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10809 u32 nic_cfg, led_cfg; 11306 u32 nic_cfg, led_cfg;
10810 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id; 11307 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
10811 int eeprom_phy_serdes = 0; 11308 int eeprom_phy_serdes = 0;
10812 11309
10813 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 11310 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
@@ -10821,6 +11318,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10821 (ver > 0) && (ver < 0x100)) 11318 (ver > 0) && (ver < 0x100))
10822 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 11319 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10823 11320
11321 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11322 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11323
10824 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 11324 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10825 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 11325 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10826 eeprom_phy_serdes = 1; 11326 eeprom_phy_serdes = 1;
@@ -10893,7 +11393,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10893 LED_CTRL_MODE_PHY_2); 11393 LED_CTRL_MODE_PHY_2);
10894 break; 11394 break;
10895 11395
10896 }; 11396 }
10897 11397
10898 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 11398 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10899 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) && 11399 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
@@ -10945,6 +11445,13 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10945 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) 11445 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10946 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; 11446 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10947 } 11447 }
11448
11449 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11450 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11451 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11452 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11453 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11454 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
10948 } 11455 }
10949} 11456}
10950 11457
@@ -11003,6 +11510,9 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
11003 u32 hw_phy_id, hw_phy_id_masked; 11510 u32 hw_phy_id, hw_phy_id_masked;
11004 int err; 11511 int err;
11005 11512
11513 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11514 return tg3_phy_init(tp);
11515
11006 /* Reading the PHY ID register can conflict with ASF 11516 /* Reading the PHY ID register can conflict with ASF
11007 * firwmare access to the PHY hardware. 11517 * firwmare access to the PHY hardware.
11008 */ 11518 */
@@ -11525,6 +12035,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 12035 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11527 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 12037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12038 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
11528 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || 12039 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11529 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 12040 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11530 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; 12041 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
@@ -11546,6 +12057,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11546 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 12057 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11547 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11548 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 12059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
11549 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 12061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11550 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; 12062 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11551 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; 12063 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
@@ -11558,14 +12070,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11558 } 12070 }
11559 } 12071 }
11560 12072
11561 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && 12073 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11562 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && 12074 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11563 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11564 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11565 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11566 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11567 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11568 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11569 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; 12075 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11570 12076
11571 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP); 12077 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
@@ -11754,7 +12260,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11754 } 12260 }
11755 12261
11756 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12262 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11757 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { 12263 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
11758 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; 12265 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11759 12266
11760 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 || 12267 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
@@ -11847,7 +12354,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11847 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; 12354 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11848 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 12355 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11849 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM; 12356 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11850 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) 12357 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12358 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
11851 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 12359 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11852 } 12360 }
11853 12361
@@ -11858,8 +12366,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11858 tp->phy_otp = TG3_OTP_DEFAULT; 12366 tp->phy_otp = TG3_OTP_DEFAULT;
11859 } 12367 }
11860 12368
11861 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12369 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
11862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11863 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 12370 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
11864 else 12371 else
11865 tp->mi_mode = MAC_MI_MODE_BASE; 12372 tp->mi_mode = MAC_MI_MODE_BASE;
@@ -11869,9 +12376,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11869 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 12376 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11870 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 12377 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11871 12378
11872 /* Initialize MAC MI mode, polling disabled. */ 12379 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11873 tw32_f(MAC_MI_MODE, tp->mi_mode); 12380 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
11874 udelay(80); 12381
12382 err = tg3_mdio_init(tp);
12383 if (err)
12384 return err;
11875 12385
11876 /* Initialize data/descriptor byte/word swapping. */ 12386 /* Initialize data/descriptor byte/word swapping. */
11877 val = tr32(GRC_MODE); 12387 val = tr32(GRC_MODE);
@@ -11952,6 +12462,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11952 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n", 12462 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11953 pci_name(tp->pdev), err); 12463 pci_name(tp->pdev), err);
11954 /* ... but do not return immediately ... */ 12464 /* ... but do not return immediately ... */
12465 tg3_mdio_fini(tp);
11955 } 12466 }
11956 12467
11957 tg3_read_partno(tp); 12468 tg3_read_partno(tp);
@@ -11999,6 +12510,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 12510 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12511 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 12512 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12513 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12002 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 12514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12003 tp->dev->hard_start_xmit = tg3_start_xmit; 12515 tp->dev->hard_start_xmit = tg3_start_xmit;
12004 else 12516 else
@@ -12201,7 +12713,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12201 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 12713 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12202 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 12714 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12203 break; 12715 break;
12204 }; 12716 }
12205 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 12717 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12206 switch (cacheline_size) { 12718 switch (cacheline_size) {
12207 case 16: 12719 case 16:
@@ -12218,7 +12730,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12218 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 12730 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12219 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; 12731 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12220 break; 12732 break;
12221 }; 12733 }
12222 } else { 12734 } else {
12223 switch (cacheline_size) { 12735 switch (cacheline_size) {
12224 case 16: 12736 case 16:
@@ -12262,7 +12774,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12262 val |= (DMA_RWCTRL_READ_BNDRY_1024 | 12774 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12263 DMA_RWCTRL_WRITE_BNDRY_1024); 12775 DMA_RWCTRL_WRITE_BNDRY_1024);
12264 break; 12776 break;
12265 }; 12777 }
12266 } 12778 }
12267 12779
12268out: 12780out:
@@ -12622,7 +13134,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
12622 case PHY_ID_BCM8002: return "8002/serdes"; 13134 case PHY_ID_BCM8002: return "8002/serdes";
12623 case 0: return "serdes"; 13135 case 0: return "serdes";
12624 default: return "unknown"; 13136 default: return "unknown";
12625 }; 13137 }
12626} 13138}
12627 13139
12628static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) 13140static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
@@ -12923,7 +13435,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
12923 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && 13435 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12924 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) 13436 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12925 dev->features |= NETIF_F_TSO6; 13437 dev->features |= NETIF_F_TSO6;
12926 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 13438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13439 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13440 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12927 dev->features |= NETIF_F_TSO_ECN; 13442 dev->features |= NETIF_F_TSO_ECN;
12928 } 13443 }
12929 13444
@@ -12989,7 +13504,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
12989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 13504 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12990 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 13505 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 13506 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12992 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 13507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13508 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12993 dev->features |= NETIF_F_IPV6_CSUM; 13509 dev->features |= NETIF_F_IPV6_CSUM;
12994 13510
12995 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; 13511 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
@@ -13071,6 +13587,12 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
13071 struct tg3 *tp = netdev_priv(dev); 13587 struct tg3 *tp = netdev_priv(dev);
13072 13588
13073 flush_scheduled_work(); 13589 flush_scheduled_work();
13590
13591 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13592 tg3_phy_fini(tp);
13593 tg3_mdio_fini(tp);
13594 }
13595
13074 unregister_netdev(dev); 13596 unregister_netdev(dev);
13075 if (tp->aperegs) { 13597 if (tp->aperegs) {
13076 iounmap(tp->aperegs); 13598 iounmap(tp->aperegs);
@@ -13103,6 +13625,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13103 return 0; 13625 return 0;
13104 13626
13105 flush_scheduled_work(); 13627 flush_scheduled_work();
13628 tg3_phy_stop(tp);
13106 tg3_netif_stop(tp); 13629 tg3_netif_stop(tp);
13107 13630
13108 del_timer_sync(&tp->timer); 13631 del_timer_sync(&tp->timer);
@@ -13120,10 +13643,13 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13120 13643
13121 err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); 13644 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13122 if (err) { 13645 if (err) {
13646 int err2;
13647
13123 tg3_full_lock(tp, 0); 13648 tg3_full_lock(tp, 0);
13124 13649
13125 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 13650 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13126 if (tg3_restart_hw(tp, 1)) 13651 err2 = tg3_restart_hw(tp, 1);
13652 if (err2)
13127 goto out; 13653 goto out;
13128 13654
13129 tp->timer.expires = jiffies + tp->timer_offset; 13655 tp->timer.expires = jiffies + tp->timer_offset;
@@ -13134,6 +13660,9 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13134 13660
13135out: 13661out:
13136 tg3_full_unlock(tp); 13662 tg3_full_unlock(tp);
13663
13664 if (!err2)
13665 tg3_phy_start(tp);
13137 } 13666 }
13138 13667
13139 return err; 13668 return err;
@@ -13171,6 +13700,9 @@ static int tg3_resume(struct pci_dev *pdev)
13171out: 13700out:
13172 tg3_full_unlock(tp); 13701 tg3_full_unlock(tp);
13173 13702
13703 if (!err)
13704 tg3_phy_start(tp);
13705
13174 return err; 13706 return err;
13175} 13707}
13176 13708
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 0404f93baa29..df07842172b7 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -128,6 +128,7 @@
128#define ASIC_REV_USE_PROD_ID_REG 0x0f 128#define ASIC_REV_USE_PROD_ID_REG 0x0f
129#define ASIC_REV_5784 0x5784 129#define ASIC_REV_5784 0x5784
130#define ASIC_REV_5761 0x5761 130#define ASIC_REV_5761 0x5761
131#define ASIC_REV_5785 0x5785
131#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) 132#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
132#define CHIPREV_5700_AX 0x70 133#define CHIPREV_5700_AX 0x70
133#define CHIPREV_5700_BX 0x71 134#define CHIPREV_5700_BX 0x71
@@ -528,7 +529,23 @@
528#define MAC_SERDES_CFG 0x00000590 529#define MAC_SERDES_CFG 0x00000590
529#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000 530#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000
530#define MAC_SERDES_STAT 0x00000594 531#define MAC_SERDES_STAT 0x00000594
531/* 0x598 --> 0x5b0 unused */ 532/* 0x598 --> 0x5a0 unused */
533#define MAC_PHYCFG1 0x000005a0
534#define MAC_PHYCFG1_RGMII_INT 0x00000001
535#define MAC_PHYCFG1_RGMII_EXT_RX_DEC 0x02000000
536#define MAC_PHYCFG1_RGMII_SND_STAT_EN 0x04000000
537#define MAC_PHYCFG1_TXC_DRV 0x20000000
538#define MAC_PHYCFG2 0x000005a4
539#define MAC_PHYCFG2_INBAND_ENABLE 0x00000001
540#define MAC_EXT_RGMII_MODE 0x000005a8
541#define MAC_RGMII_MODE_TX_ENABLE 0x00000001
542#define MAC_RGMII_MODE_TX_LOWPWR 0x00000002
543#define MAC_RGMII_MODE_TX_RESET 0x00000004
544#define MAC_RGMII_MODE_RX_INT_B 0x00000100
545#define MAC_RGMII_MODE_RX_QUALITY 0x00000200
546#define MAC_RGMII_MODE_RX_ACTIVITY 0x00000400
547#define MAC_RGMII_MODE_RX_ENG_DET 0x00000800
548/* 0x5ac --> 0x5b0 unused */
532#define SERDES_RX_CTRL 0x000005b0 /* 5780/5714 only */ 549#define SERDES_RX_CTRL 0x000005b0 /* 5780/5714 only */
533#define SERDES_RX_SIG_DETECT 0x00000400 550#define SERDES_RX_SIG_DETECT 0x00000400
534#define SG_DIG_CTRL 0x000005b0 551#define SG_DIG_CTRL 0x000005b0
@@ -1109,6 +1126,7 @@
1109#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100 1126#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100
1110#define WDMAC_MODE_LNGREAD_ENAB 0x00000200 1127#define WDMAC_MODE_LNGREAD_ENAB 0x00000200
1111#define WDMAC_MODE_RX_ACCEL 0x00000400 1128#define WDMAC_MODE_RX_ACCEL 0x00000400
1129#define WDMAC_MODE_STATUS_TAG_FIX 0x20000000
1112#define WDMAC_STATUS 0x00004c04 1130#define WDMAC_STATUS 0x00004c04
1113#define WDMAC_STATUS_TGTABORT 0x00000004 1131#define WDMAC_STATUS_TGTABORT 0x00000004
1114#define WDMAC_STATUS_MSTABORT 0x00000008 1132#define WDMAC_STATUS_MSTABORT 0x00000008
@@ -1713,6 +1731,12 @@
1713#define NIC_SRAM_DATA_CFG_3 0x00000d3c 1731#define NIC_SRAM_DATA_CFG_3 0x00000d3c
1714#define NIC_SRAM_ASPM_DEBOUNCE 0x00000002 1732#define NIC_SRAM_ASPM_DEBOUNCE 0x00000002
1715 1733
1734#define NIC_SRAM_DATA_CFG_4 0x00000d60
1735#define NIC_SRAM_GMII_MODE 0x00000002
1736#define NIC_SRAM_RGMII_STD_IBND_DISABLE 0x00000004
1737#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008
1738#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010
1739
1716#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000 1740#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000
1717 1741
1718#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000 1742#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000
@@ -2204,6 +2228,7 @@ struct tg3_link_config {
2204 u16 orig_speed; 2228 u16 orig_speed;
2205 u8 orig_duplex; 2229 u8 orig_duplex;
2206 u8 orig_autoneg; 2230 u8 orig_autoneg;
2231 u32 orig_advertising;
2207}; 2232};
2208 2233
2209struct tg3_bufmgr_config { 2234struct tg3_bufmgr_config {
@@ -2479,6 +2504,13 @@ struct tg3 {
2479#define TG3_FLG3_ENABLE_APE 0x00000002 2504#define TG3_FLG3_ENABLE_APE 0x00000002
2480#define TG3_FLG3_5761_5784_AX_FIXES 0x00000004 2505#define TG3_FLG3_5761_5784_AX_FIXES 0x00000004
2481#define TG3_FLG3_5701_DMA_BUG 0x00000008 2506#define TG3_FLG3_5701_DMA_BUG 0x00000008
2507#define TG3_FLG3_USE_PHYLIB 0x00000010
2508#define TG3_FLG3_MDIOBUS_INITED 0x00000020
2509#define TG3_FLG3_MDIOBUS_PAUSED 0x00000040
2510#define TG3_FLG3_PHY_CONNECTED 0x00000080
2511#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100
2512#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
2513#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400
2482 2514
2483 struct timer_list timer; 2515 struct timer_list timer;
2484 u16 timer_counter; 2516 u16 timer_counter;
@@ -2519,6 +2551,9 @@ struct tg3 {
2519 int msi_cap; 2551 int msi_cap;
2520 int pcix_cap; 2552 int pcix_cap;
2521 2553
2554 struct mii_bus mdio_bus;
2555 int mdio_irq[PHY_MAX_ADDR];
2556
2522 /* PHY info */ 2557 /* PHY info */
2523 u32 phy_id; 2558 u32 phy_id;
2524#define PHY_ID_MASK 0xfffffff0 2559#define PHY_ID_MASK 0xfffffff0
@@ -2546,6 +2581,9 @@ struct tg3 {
2546#define PHY_REV_BCM5401_B2 0x3 2581#define PHY_REV_BCM5401_B2 0x3
2547#define PHY_REV_BCM5401_C0 0x6 2582#define PHY_REV_BCM5401_C0 0x6
2548#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */ 2583#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
2584#define TG3_PHY_ID_BCM50610 0x143bd60
2585#define TG3_PHY_ID_BCMAC131 0x143bc70
2586
2549 2587
2550 u32 led_ctrl; 2588 u32 led_ctrl;
2551 u32 phy_otp; 2589 u32 phy_otp;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 0166407d7061..85246ed7cb9c 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -13,8 +13,6 @@
13 * This software may be used and distributed according to the terms 13 * This software may be used and distributed according to the terms
14 * of the GNU General Public License, incorporated herein by reference. 14 * of the GNU General Public License, incorporated herein by reference.
15 * 15 *
16 ** This file is best viewed/edited with columns>=132.
17 *
18 ** Useful (if not required) reading: 16 ** Useful (if not required) reading:
19 * 17 *
20 * Texas Instruments, ThunderLAN Programmer's Guide, 18 * Texas Instruments, ThunderLAN Programmer's Guide,
@@ -218,9 +216,7 @@ static int bbuf;
218module_param(bbuf, int, 0); 216module_param(bbuf, int, 0);
219MODULE_PARM_DESC(bbuf, "ThunderLAN use big buffer (0-1)"); 217MODULE_PARM_DESC(bbuf, "ThunderLAN use big buffer (0-1)");
220 218
221static u8 *TLanPadBuffer; 219static const char TLanSignature[] = "TLAN";
222static dma_addr_t TLanPadBufferDMA;
223static char TLanSignature[] = "TLAN";
224static const char tlan_banner[] = "ThunderLAN driver v1.15\n"; 220static const char tlan_banner[] = "ThunderLAN driver v1.15\n";
225static int tlan_have_pci; 221static int tlan_have_pci;
226static int tlan_have_eisa; 222static int tlan_have_eisa;
@@ -238,9 +234,11 @@ static struct board {
238 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 234 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
239 { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 235 { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
240 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 236 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
241 { "Compaq NetFlex-3/P", TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 237 { "Compaq NetFlex-3/P",
238 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
242 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 239 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
243 { "Compaq Netelligent Integrated 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 240 { "Compaq Netelligent Integrated 10/100 TX UTP",
241 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
244 { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 }, 242 { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 },
245 { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 }, 243 { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 },
246 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 }, 244 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
@@ -248,8 +246,9 @@ static struct board {
248 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 }, 246 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 },
249 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 247 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
250 { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 }, 248 { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 },
251 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */ 249 { "Compaq NetFlex-3/E",
252 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 250 TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
251 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
253 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ 252 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
254}; 253};
255 254
@@ -294,12 +293,12 @@ static int TLan_Close( struct net_device *);
294static struct net_device_stats *TLan_GetStats( struct net_device *); 293static struct net_device_stats *TLan_GetStats( struct net_device *);
295static void TLan_SetMulticastList( struct net_device *); 294static void TLan_SetMulticastList( struct net_device *);
296static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); 295static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
297static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent); 296static int TLan_probe1( struct pci_dev *pdev, long ioaddr,
297 int irq, int rev, const struct pci_device_id *ent);
298static void TLan_tx_timeout( struct net_device *dev); 298static void TLan_tx_timeout( struct net_device *dev);
299static void TLan_tx_timeout_work(struct work_struct *work); 299static void TLan_tx_timeout_work(struct work_struct *work);
300static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); 300static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
301 301
302static u32 TLan_HandleInvalid( struct net_device *, u16 );
303static u32 TLan_HandleTxEOF( struct net_device *, u16 ); 302static u32 TLan_HandleTxEOF( struct net_device *, u16 );
304static u32 TLan_HandleStatOverflow( struct net_device *, u16 ); 303static u32 TLan_HandleStatOverflow( struct net_device *, u16 );
305static u32 TLan_HandleRxEOF( struct net_device *, u16 ); 304static u32 TLan_HandleRxEOF( struct net_device *, u16 );
@@ -348,29 +347,27 @@ static void TLan_EeReceiveByte( u16, u8 *, int );
348static int TLan_EeReadByte( struct net_device *, u8, u8 * ); 347static int TLan_EeReadByte( struct net_device *, u8, u8 * );
349 348
350 349
351static void 350static inline void
352TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb) 351TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
353{ 352{
354 unsigned long addr = (unsigned long)skb; 353 unsigned long addr = (unsigned long)skb;
355 tag->buffer[9].address = (u32)addr; 354 tag->buffer[9].address = addr;
356 addr >>= 31; /* >>= 32 is undefined for 32bit arch, stupid C */ 355 tag->buffer[8].address = upper_32_bits(addr);
357 addr >>= 1;
358 tag->buffer[8].address = (u32)addr;
359} 356}
360 357
361static struct sk_buff * 358static inline struct sk_buff *
362TLan_GetSKB( struct tlan_list_tag *tag) 359TLan_GetSKB( const struct tlan_list_tag *tag)
363{ 360{
364 unsigned long addr = tag->buffer[8].address; 361 unsigned long addr;
365 addr <<= 31; 362
366 addr <<= 1; 363 addr = tag->buffer[8].address;
367 addr |= tag->buffer[9].address; 364 addr |= (tag->buffer[9].address << 16) << 16;
368 return (struct sk_buff *) addr; 365 return (struct sk_buff *) addr;
369} 366}
370 367
371 368
372static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = { 369static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = {
373 TLan_HandleInvalid, 370 NULL,
374 TLan_HandleTxEOF, 371 TLan_HandleTxEOF,
375 TLan_HandleStatOverflow, 372 TLan_HandleStatOverflow,
376 TLan_HandleRxEOF, 373 TLan_HandleRxEOF,
@@ -444,7 +441,9 @@ static void __devexit tlan_remove_one( struct pci_dev *pdev)
444 unregister_netdev( dev ); 441 unregister_netdev( dev );
445 442
446 if ( priv->dmaStorage ) { 443 if ( priv->dmaStorage ) {
447 pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, priv->dmaStorageDMA ); 444 pci_free_consistent(priv->pciDev,
445 priv->dmaSize, priv->dmaStorage,
446 priv->dmaStorageDMA );
448 } 447 }
449 448
450#ifdef CONFIG_PCI 449#ifdef CONFIG_PCI
@@ -469,16 +468,6 @@ static int __init tlan_probe(void)
469 468
470 printk(KERN_INFO "%s", tlan_banner); 469 printk(KERN_INFO "%s", tlan_banner);
471 470
472 TLanPadBuffer = (u8 *) pci_alloc_consistent(NULL, TLAN_MIN_FRAME_SIZE, &TLanPadBufferDMA);
473
474 if (TLanPadBuffer == NULL) {
475 printk(KERN_ERR "TLAN: Could not allocate memory for pad buffer.\n");
476 rc = -ENOMEM;
477 goto err_out;
478 }
479
480 memset(TLanPadBuffer, 0, TLAN_MIN_FRAME_SIZE);
481
482 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n"); 471 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
483 472
484 /* Use new style PCI probing. Now the kernel will 473 /* Use new style PCI probing. Now the kernel will
@@ -506,8 +495,6 @@ static int __init tlan_probe(void)
506err_out_pci_unreg: 495err_out_pci_unreg:
507 pci_unregister_driver(&tlan_driver); 496 pci_unregister_driver(&tlan_driver);
508err_out_pci_free: 497err_out_pci_free:
509 pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA);
510err_out:
511 return rc; 498 return rc;
512} 499}
513 500
@@ -539,7 +526,8 @@ static int __devinit tlan_init_one( struct pci_dev *pdev,
539 **************************************************************/ 526 **************************************************************/
540 527
541static int __devinit TLan_probe1(struct pci_dev *pdev, 528static int __devinit TLan_probe1(struct pci_dev *pdev,
542 long ioaddr, int irq, int rev, const struct pci_device_id *ent ) 529 long ioaddr, int irq, int rev,
530 const struct pci_device_id *ent )
543{ 531{
544 532
545 struct net_device *dev; 533 struct net_device *dev;
@@ -625,8 +613,10 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
625 /* Kernel parameters */ 613 /* Kernel parameters */
626 if (dev->mem_start) { 614 if (dev->mem_start) {
627 priv->aui = dev->mem_start & 0x01; 615 priv->aui = dev->mem_start & 0x01;
628 priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0 : (dev->mem_start & 0x06) >> 1; 616 priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
629 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0 : (dev->mem_start & 0x18) >> 3; 617 : (dev->mem_start & 0x06) >> 1;
618 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
619 : (dev->mem_start & 0x18) >> 3;
630 620
631 if (priv->speed == 0x1) { 621 if (priv->speed == 0x1) {
632 priv->speed = TLAN_SPEED_10; 622 priv->speed = TLAN_SPEED_10;
@@ -706,7 +696,8 @@ static void TLan_Eisa_Cleanup(void)
706 dev = TLan_Eisa_Devices; 696 dev = TLan_Eisa_Devices;
707 priv = netdev_priv(dev); 697 priv = netdev_priv(dev);
708 if (priv->dmaStorage) { 698 if (priv->dmaStorage) {
709 pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, priv->dmaStorageDMA ); 699 pci_free_consistent(priv->pciDev, priv->dmaSize,
700 priv->dmaStorage, priv->dmaStorageDMA );
710 } 701 }
711 release_region( dev->base_addr, 0x10); 702 release_region( dev->base_addr, 0x10);
712 unregister_netdev( dev ); 703 unregister_netdev( dev );
@@ -724,8 +715,6 @@ static void __exit tlan_exit(void)
724 if (tlan_have_eisa) 715 if (tlan_have_eisa)
725 TLan_Eisa_Cleanup(); 716 TLan_Eisa_Cleanup();
726 717
727 pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA);
728
729} 718}
730 719
731 720
@@ -763,8 +752,10 @@ static void __init TLan_EisaProbe (void)
763 /* Loop through all slots of the EISA bus */ 752 /* Loop through all slots of the EISA bus */
764 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { 753 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
765 754
766 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID)); 755 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
767 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2)); 756 (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID));
757 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
758 (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2));
768 759
769 760
770 TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ", 761 TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ",
@@ -874,7 +865,8 @@ static int TLan_Init( struct net_device *dev )
874 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS ) 865 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
875 * ( sizeof(TLanList) ); 866 * ( sizeof(TLanList) );
876 } 867 }
877 priv->dmaStorage = pci_alloc_consistent(priv->pciDev, dma_size, &priv->dmaStorageDMA); 868 priv->dmaStorage = pci_alloc_consistent(priv->pciDev,
869 dma_size, &priv->dmaStorageDMA);
878 priv->dmaSize = dma_size; 870 priv->dmaSize = dma_size;
879 871
880 if ( priv->dmaStorage == NULL ) { 872 if ( priv->dmaStorage == NULL ) {
@@ -883,16 +875,19 @@ static int TLan_Init( struct net_device *dev )
883 return -ENOMEM; 875 return -ENOMEM;
884 } 876 }
885 memset( priv->dmaStorage, 0, dma_size ); 877 memset( priv->dmaStorage, 0, dma_size );
886 priv->rxList = (TLanList *) 878 priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8);
887 ( ( ( (u32) priv->dmaStorage ) + 7 ) & 0xFFFFFFF8 ); 879 priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8);
888 priv->rxListDMA = ( ( ( (u32) priv->dmaStorageDMA ) + 7 ) & 0xFFFFFFF8 );
889 priv->txList = priv->rxList + TLAN_NUM_RX_LISTS; 880 priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
890 priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS; 881 priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS;
882
891 if ( bbuf ) { 883 if ( bbuf ) {
892 priv->rxBuffer = (u8 *) ( priv->txList + TLAN_NUM_TX_LISTS ); 884 priv->rxBuffer = (u8 *) ( priv->txList + TLAN_NUM_TX_LISTS );
893 priv->rxBufferDMA =priv->txListDMA + sizeof(TLanList) * TLAN_NUM_TX_LISTS; 885 priv->rxBufferDMA =priv->txListDMA
894 priv->txBuffer = priv->rxBuffer + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE ); 886 + sizeof(TLanList) * TLAN_NUM_TX_LISTS;
895 priv->txBufferDMA = priv->rxBufferDMA + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE ); 887 priv->txBuffer = priv->rxBuffer
888 + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE );
889 priv->txBufferDMA = priv->rxBufferDMA
890 + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE );
896 } 891 }
897 892
898 err = 0; 893 err = 0;
@@ -952,10 +947,12 @@ static int TLan_Open( struct net_device *dev )
952 int err; 947 int err;
953 948
954 priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION ); 949 priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
955 err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED, TLanSignature, dev ); 950 err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED,
951 dev->name, dev );
956 952
957 if ( err ) { 953 if ( err ) {
958 printk(KERN_ERR "TLAN: Cannot open %s because IRQ %d is already in use.\n", dev->name, dev->irq ); 954 pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n",
955 dev->name, dev->irq );
959 return err; 956 return err;
960 } 957 }
961 958
@@ -969,7 +966,8 @@ static int TLan_Open( struct net_device *dev )
969 TLan_ReadAndClearStats( dev, TLAN_IGNORE ); 966 TLan_ReadAndClearStats( dev, TLAN_IGNORE );
970 TLan_ResetAdapter( dev ); 967 TLan_ResetAdapter( dev );
971 968
972 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", dev->name, priv->tlanRev ); 969 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
970 dev->name, priv->tlanRev );
973 971
974 return 0; 972 return 0;
975 973
@@ -1007,14 +1005,16 @@ static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1007 1005
1008 1006
1009 case SIOCGMIIREG: /* Read MII PHY register. */ 1007 case SIOCGMIIREG: /* Read MII PHY register. */
1010 TLan_MiiReadReg(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, &data->val_out); 1008 TLan_MiiReadReg(dev, data->phy_id & 0x1f,
1009 data->reg_num & 0x1f, &data->val_out);
1011 return 0; 1010 return 0;
1012 1011
1013 1012
1014 case SIOCSMIIREG: /* Write MII PHY register. */ 1013 case SIOCSMIIREG: /* Write MII PHY register. */
1015 if (!capable(CAP_NET_ADMIN)) 1014 if (!capable(CAP_NET_ADMIN))
1016 return -EPERM; 1015 return -EPERM;
1017 TLan_MiiWriteReg(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); 1016 TLan_MiiWriteReg(dev, data->phy_id & 0x1f,
1017 data->reg_num & 0x1f, data->val_in);
1018 return 0; 1018 return 0;
1019 default: 1019 default:
1020 return -EOPNOTSUPP; 1020 return -EOPNOTSUPP;
@@ -1096,20 +1096,25 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1096 TLanList *tail_list; 1096 TLanList *tail_list;
1097 dma_addr_t tail_list_phys; 1097 dma_addr_t tail_list_phys;
1098 u8 *tail_buffer; 1098 u8 *tail_buffer;
1099 int pad;
1100 unsigned long flags; 1099 unsigned long flags;
1101 1100
1102 if ( ! priv->phyOnline ) { 1101 if ( ! priv->phyOnline ) {
1103 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", dev->name ); 1102 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
1103 dev->name );
1104 dev_kfree_skb_any(skb); 1104 dev_kfree_skb_any(skb);
1105 return 0; 1105 return 0;
1106 } 1106 }
1107 1107
1108 if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
1109 return 0;
1110
1108 tail_list = priv->txList + priv->txTail; 1111 tail_list = priv->txList + priv->txTail;
1109 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail; 1112 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
1110 1113
1111 if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) { 1114 if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) {
1112 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", dev->name, priv->txHead, priv->txTail ); 1115 TLAN_DBG( TLAN_DEBUG_TX,
1116 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
1117 dev->name, priv->txHead, priv->txTail );
1113 netif_stop_queue(dev); 1118 netif_stop_queue(dev);
1114 priv->txBusyCount++; 1119 priv->txBusyCount++;
1115 return 1; 1120 return 1;
@@ -1121,37 +1126,34 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1121 tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE ); 1126 tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
1122 skb_copy_from_linear_data(skb, tail_buffer, skb->len); 1127 skb_copy_from_linear_data(skb, tail_buffer, skb->len);
1123 } else { 1128 } else {
1124 tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE); 1129 tail_list->buffer[0].address = pci_map_single(priv->pciDev,
1130 skb->data, skb->len,
1131 PCI_DMA_TODEVICE);
1125 TLan_StoreSKB(tail_list, skb); 1132 TLan_StoreSKB(tail_list, skb);
1126 } 1133 }
1127 1134
1128 pad = TLAN_MIN_FRAME_SIZE - skb->len; 1135 tail_list->frameSize = (u16) skb->len;
1129 1136 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len;
1130 if ( pad > 0 ) { 1137 tail_list->buffer[1].count = 0;
1131 tail_list->frameSize = (u16) skb->len + pad; 1138 tail_list->buffer[1].address = 0;
1132 tail_list->buffer[0].count = (u32) skb->len;
1133 tail_list->buffer[1].count = TLAN_LAST_BUFFER | (u32) pad;
1134 tail_list->buffer[1].address = TLanPadBufferDMA;
1135 } else {
1136 tail_list->frameSize = (u16) skb->len;
1137 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len;
1138 tail_list->buffer[1].count = 0;
1139 tail_list->buffer[1].address = 0;
1140 }
1141 1139
1142 spin_lock_irqsave(&priv->lock, flags); 1140 spin_lock_irqsave(&priv->lock, flags);
1143 tail_list->cStat = TLAN_CSTAT_READY; 1141 tail_list->cStat = TLAN_CSTAT_READY;
1144 if ( ! priv->txInProgress ) { 1142 if ( ! priv->txInProgress ) {
1145 priv->txInProgress = 1; 1143 priv->txInProgress = 1;
1146 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Starting TX on buffer %d\n", priv->txTail ); 1144 TLAN_DBG( TLAN_DEBUG_TX,
1145 "TRANSMIT: Starting TX on buffer %d\n", priv->txTail );
1147 outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM ); 1146 outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM );
1148 outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD ); 1147 outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD );
1149 } else { 1148 } else {
1150 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n", priv->txTail ); 1149 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n",
1150 priv->txTail );
1151 if ( priv->txTail == 0 ) { 1151 if ( priv->txTail == 0 ) {
1152 ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward = tail_list_phys; 1152 ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward
1153 = tail_list_phys;
1153 } else { 1154 } else {
1154 ( priv->txList + ( priv->txTail - 1 ) )->forward = tail_list_phys; 1155 ( priv->txList + ( priv->txTail - 1 ) )->forward
1156 = tail_list_phys;
1155 } 1157 }
1156 } 1158 }
1157 spin_unlock_irqrestore(&priv->lock, flags); 1159 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1191,33 +1193,31 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1191 1193
1192static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id) 1194static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id)
1193{ 1195{
1194 u32 ack; 1196 struct net_device *dev = dev_id;
1195 struct net_device *dev; 1197 TLanPrivateInfo *priv = netdev_priv(dev);
1196 u32 host_cmd;
1197 u16 host_int; 1198 u16 host_int;
1198 int type; 1199 u16 type;
1199 TLanPrivateInfo *priv;
1200
1201 dev = dev_id;
1202 priv = netdev_priv(dev);
1203 1200
1204 spin_lock(&priv->lock); 1201 spin_lock(&priv->lock);
1205 1202
1206 host_int = inw( dev->base_addr + TLAN_HOST_INT ); 1203 host_int = inw( dev->base_addr + TLAN_HOST_INT );
1207 outw( host_int, dev->base_addr + TLAN_HOST_INT );
1208
1209 type = ( host_int & TLAN_HI_IT_MASK ) >> 2; 1204 type = ( host_int & TLAN_HI_IT_MASK ) >> 2;
1205 if ( type ) {
1206 u32 ack;
1207 u32 host_cmd;
1210 1208
1211 ack = TLanIntVector[type]( dev, host_int ); 1209 outw( host_int, dev->base_addr + TLAN_HOST_INT );
1210 ack = TLanIntVector[type]( dev, host_int );
1212 1211
1213 if ( ack ) { 1212 if ( ack ) {
1214 host_cmd = TLAN_HC_ACK | ack | ( type << 18 ); 1213 host_cmd = TLAN_HC_ACK | ack | ( type << 18 );
1215 outl( host_cmd, dev->base_addr + TLAN_HOST_CMD ); 1214 outl( host_cmd, dev->base_addr + TLAN_HOST_CMD );
1215 }
1216 } 1216 }
1217 1217
1218 spin_unlock(&priv->lock); 1218 spin_unlock(&priv->lock);
1219 1219
1220 return IRQ_HANDLED; 1220 return IRQ_RETVAL(type);
1221} /* TLan_HandleInterrupts */ 1221} /* TLan_HandleInterrupts */
1222 1222
1223 1223
@@ -1286,8 +1286,10 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev )
1286 /* Should only read stats if open ? */ 1286 /* Should only read stats if open ? */
1287 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1287 TLan_ReadAndClearStats( dev, TLAN_RECORD );
1288 1288
1289 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, priv->rxEocCount ); 1289 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
1290 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, priv->txBusyCount ); 1290 priv->rxEocCount );
1291 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
1292 priv->txBusyCount );
1291 if ( debug & TLAN_DEBUG_GNRL ) { 1293 if ( debug & TLAN_DEBUG_GNRL ) {
1292 TLan_PrintDio( dev->base_addr ); 1294 TLan_PrintDio( dev->base_addr );
1293 TLan_PhyPrint( dev ); 1295 TLan_PhyPrint( dev );
@@ -1299,7 +1301,7 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev )
1299 TLan_PrintList( priv->txList + i, "TX", i ); 1301 TLan_PrintList( priv->txList + i, "TX", i );
1300 } 1302 }
1301 1303
1302 return ( &( (TLanPrivateInfo *) netdev_priv(dev) )->stats ); 1304 return &dev->stats;
1303 1305
1304} /* TLan_GetStats */ 1306} /* TLan_GetStats */
1305 1307
@@ -1337,10 +1339,12 @@ static void TLan_SetMulticastList( struct net_device *dev )
1337 1339
1338 if ( dev->flags & IFF_PROMISC ) { 1340 if ( dev->flags & IFF_PROMISC ) {
1339 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1341 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
1340 TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF ); 1342 TLan_DioWrite8( dev->base_addr,
1343 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF );
1341 } else { 1344 } else {
1342 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1345 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
1343 TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF ); 1346 TLan_DioWrite8( dev->base_addr,
1347 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF );
1344 if ( dev->flags & IFF_ALLMULTI ) { 1348 if ( dev->flags & IFF_ALLMULTI ) {
1345 for ( i = 0; i < 3; i++ ) 1349 for ( i = 0; i < 3; i++ )
1346 TLan_SetMac( dev, i + 1, NULL ); 1350 TLan_SetMac( dev, i + 1, NULL );
@@ -1349,7 +1353,8 @@ static void TLan_SetMulticastList( struct net_device *dev )
1349 } else { 1353 } else {
1350 for ( i = 0; i < dev->mc_count; i++ ) { 1354 for ( i = 0; i < dev->mc_count; i++ ) {
1351 if ( i < 3 ) { 1355 if ( i < 3 ) {
1352 TLan_SetMac( dev, i + 1, (char *) &dmi->dmi_addr ); 1356 TLan_SetMac( dev, i + 1,
1357 (char *) &dmi->dmi_addr );
1353 } else { 1358 } else {
1354 offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr ); 1359 offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr );
1355 if ( offset < 32 ) 1360 if ( offset < 32 )
@@ -1383,31 +1388,6 @@ static void TLan_SetMulticastList( struct net_device *dev )
1383*****************************************************************************/ 1388*****************************************************************************/
1384 1389
1385 1390
1386 /***************************************************************
1387 * TLan_HandleInvalid
1388 *
1389 * Returns:
1390 * 0
1391 * Parms:
1392 * dev Device assigned the IRQ that was
1393 * raised.
1394 * host_int The contents of the HOST_INT
1395 * port.
1396 *
1397 * This function handles invalid interrupts. This should
1398 * never happen unless some other adapter is trying to use
1399 * the IRQ line assigned to the device.
1400 *
1401 **************************************************************/
1402
1403static u32 TLan_HandleInvalid( struct net_device *dev, u16 host_int )
1404{
1405 /* printk( "TLAN: Invalid interrupt on %s.\n", dev->name ); */
1406 return 0;
1407
1408} /* TLan_HandleInvalid */
1409
1410
1411 1391
1412 1392
1413 /*************************************************************** 1393 /***************************************************************
@@ -1441,14 +1421,16 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1441 u32 ack = 0; 1421 u32 ack = 0;
1442 u16 tmpCStat; 1422 u16 tmpCStat;
1443 1423
1444 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", priv->txHead, priv->txTail ); 1424 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
1425 priv->txHead, priv->txTail );
1445 head_list = priv->txList + priv->txHead; 1426 head_list = priv->txList + priv->txHead;
1446 1427
1447 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1428 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
1448 ack++; 1429 ack++;
1449 if ( ! bbuf ) { 1430 if ( ! bbuf ) {
1450 struct sk_buff *skb = TLan_GetSKB(head_list); 1431 struct sk_buff *skb = TLan_GetSKB(head_list);
1451 pci_unmap_single(priv->pciDev, head_list->buffer[0].address, skb->len, PCI_DMA_TODEVICE); 1432 pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
1433 skb->len, PCI_DMA_TODEVICE);
1452 dev_kfree_skb_any(skb); 1434 dev_kfree_skb_any(skb);
1453 head_list->buffer[8].address = 0; 1435 head_list->buffer[8].address = 0;
1454 head_list->buffer[9].address = 0; 1436 head_list->buffer[9].address = 0;
@@ -1457,7 +1439,7 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1457 if ( tmpCStat & TLAN_CSTAT_EOC ) 1439 if ( tmpCStat & TLAN_CSTAT_EOC )
1458 eoc = 1; 1440 eoc = 1;
1459 1441
1460 priv->stats.tx_bytes += head_list->frameSize; 1442 dev->stats.tx_bytes += head_list->frameSize;
1461 1443
1462 head_list->cStat = TLAN_CSTAT_UNUSED; 1444 head_list->cStat = TLAN_CSTAT_UNUSED;
1463 netif_start_queue(dev); 1445 netif_start_queue(dev);
@@ -1469,7 +1451,9 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1469 printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n"); 1451 printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n");
1470 1452
1471 if ( eoc ) { 1453 if ( eoc ) {
1472 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n", priv->txHead, priv->txTail ); 1454 TLAN_DBG( TLAN_DEBUG_TX,
1455 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n",
1456 priv->txHead, priv->txTail );
1473 head_list = priv->txList + priv->txHead; 1457 head_list = priv->txList + priv->txHead;
1474 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1458 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
1475 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { 1459 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
@@ -1481,7 +1465,8 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1481 } 1465 }
1482 1466
1483 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1467 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
1484 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1468 TLan_DioWrite8( dev->base_addr,
1469 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
1485 if ( priv->timer.function == NULL ) { 1470 if ( priv->timer.function == NULL ) {
1486 priv->timer.function = &TLan_Timer; 1471 priv->timer.function = &TLan_Timer;
1487 priv->timer.data = (unsigned long) dev; 1472 priv->timer.data = (unsigned long) dev;
@@ -1563,66 +1548,65 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1563 TLanList *head_list; 1548 TLanList *head_list;
1564 struct sk_buff *skb; 1549 struct sk_buff *skb;
1565 TLanList *tail_list; 1550 TLanList *tail_list;
1566 void *t;
1567 u32 frameSize;
1568 u16 tmpCStat; 1551 u16 tmpCStat;
1569 dma_addr_t head_list_phys; 1552 dma_addr_t head_list_phys;
1570 1553
1571 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail ); 1554 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n",
1555 priv->rxHead, priv->rxTail );
1572 head_list = priv->rxList + priv->rxHead; 1556 head_list = priv->rxList + priv->rxHead;
1573 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1557 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
1574 1558
1575 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1559 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
1576 frameSize = head_list->frameSize; 1560 dma_addr_t frameDma = head_list->buffer[0].address;
1561 u32 frameSize = head_list->frameSize;
1577 ack++; 1562 ack++;
1578 if (tmpCStat & TLAN_CSTAT_EOC) 1563 if (tmpCStat & TLAN_CSTAT_EOC)
1579 eoc = 1; 1564 eoc = 1;
1580 1565
1581 if (bbuf) { 1566 if (bbuf) {
1582 skb = dev_alloc_skb(frameSize + 7); 1567 skb = netdev_alloc_skb(dev, frameSize + 7);
1583 if (skb == NULL) 1568 if ( !skb )
1584 printk(KERN_INFO "TLAN: Couldn't allocate memory for received data.\n"); 1569 goto drop_and_reuse;
1585 else { 1570
1586 head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE); 1571 head_buffer = priv->rxBuffer
1587 skb_reserve(skb, 2); 1572 + (priv->rxHead * TLAN_MAX_FRAME_SIZE);
1588 t = (void *) skb_put(skb, frameSize); 1573 skb_reserve(skb, 2);
1589 1574 pci_dma_sync_single_for_cpu(priv->pciDev,
1590 priv->stats.rx_bytes += head_list->frameSize; 1575 frameDma, frameSize,
1591 1576 PCI_DMA_FROMDEVICE);
1592 memcpy( t, head_buffer, frameSize ); 1577 skb_copy_from_linear_data(skb, head_buffer, frameSize);
1593 skb->protocol = eth_type_trans( skb, dev ); 1578 skb_put(skb, frameSize);
1594 netif_rx( skb ); 1579 dev->stats.rx_bytes += frameSize;
1595 } 1580
1581 skb->protocol = eth_type_trans( skb, dev );
1582 netif_rx( skb );
1596 } else { 1583 } else {
1597 struct sk_buff *new_skb; 1584 struct sk_buff *new_skb;
1598 1585
1599 /* 1586 new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
1600 * I changed the algorithm here. What we now do 1587 if ( !new_skb )
1601 * is allocate the new frame. If this fails we 1588 goto drop_and_reuse;
1602 * simply recycle the frame.
1603 */
1604 1589
1605 new_skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 ); 1590 skb = TLan_GetSKB(head_list);
1591 pci_unmap_single(priv->pciDev, frameDma,
1592 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1593 skb_put( skb, frameSize );
1606 1594
1607 if ( new_skb != NULL ) { 1595 dev->stats.rx_bytes += frameSize;
1608 skb = TLan_GetSKB(head_list);
1609 pci_unmap_single(priv->pciDev, head_list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1610 skb_trim( skb, frameSize );
1611 1596
1612 priv->stats.rx_bytes += frameSize; 1597 skb->protocol = eth_type_trans( skb, dev );
1598 netif_rx( skb );
1613 1599
1614 skb->protocol = eth_type_trans( skb, dev ); 1600 skb_reserve( new_skb, NET_IP_ALIGN );
1615 netif_rx( skb ); 1601 head_list->buffer[0].address = pci_map_single(priv->pciDev,
1602 new_skb->data,
1603 TLAN_MAX_FRAME_SIZE,
1604 PCI_DMA_FROMDEVICE);
1616 1605
1617 skb_reserve( new_skb, 2 ); 1606 TLan_StoreSKB(head_list, new_skb);
1618 t = (void *) skb_put( new_skb, TLAN_MAX_FRAME_SIZE );
1619 head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1620 head_list->buffer[8].address = (u32) t;
1621 TLan_StoreSKB(head_list, new_skb);
1622 } else
1623 printk(KERN_WARNING "TLAN: Couldn't allocate memory for received data.\n" );
1624 }
1625 1607
1608 }
1609drop_and_reuse:
1626 head_list->forward = 0; 1610 head_list->forward = 0;
1627 head_list->cStat = 0; 1611 head_list->cStat = 0;
1628 tail_list = priv->rxList + priv->rxTail; 1612 tail_list = priv->rxList + priv->rxTail;
@@ -1638,10 +1622,10 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1638 printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n"); 1622 printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n");
1639 1623
1640 1624
1641
1642
1643 if ( eoc ) { 1625 if ( eoc ) {
1644 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail ); 1626 TLAN_DBG( TLAN_DEBUG_RX,
1627 "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n",
1628 priv->rxHead, priv->rxTail );
1645 head_list = priv->rxList + priv->rxHead; 1629 head_list = priv->rxList + priv->rxHead;
1646 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1630 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
1647 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1631 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
@@ -1650,7 +1634,8 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1650 } 1634 }
1651 1635
1652 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1636 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
1653 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1637 TLan_DioWrite8( dev->base_addr,
1638 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
1654 if ( priv->timer.function == NULL ) { 1639 if ( priv->timer.function == NULL ) {
1655 priv->timer.function = &TLan_Timer; 1640 priv->timer.function = &TLan_Timer;
1656 priv->timer.data = (unsigned long) dev; 1641 priv->timer.data = (unsigned long) dev;
@@ -1728,7 +1713,9 @@ static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
1728 1713
1729 host_int = 0; 1714 host_int = 0;
1730 if ( priv->tlanRev < 0x30 ) { 1715 if ( priv->tlanRev < 0x30 ) {
1731 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n", priv->txHead, priv->txTail ); 1716 TLAN_DBG( TLAN_DEBUG_TX,
1717 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
1718 priv->txHead, priv->txTail );
1732 head_list = priv->txList + priv->txHead; 1719 head_list = priv->txList + priv->txHead;
1733 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1720 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
1734 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { 1721 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
@@ -1796,15 +1783,18 @@ static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
1796 net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS ); 1783 net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS );
1797 if ( net_sts ) { 1784 if ( net_sts ) {
1798 TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts ); 1785 TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts );
1799 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", dev->name, (unsigned) net_sts ); 1786 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
1787 dev->name, (unsigned) net_sts );
1800 } 1788 }
1801 if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) { 1789 if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) {
1802 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts ); 1790 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts );
1803 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); 1791 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
1804 if ( ! ( tlphy_sts & TLAN_TS_POLOK ) && ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1792 if ( ! ( tlphy_sts & TLAN_TS_POLOK ) &&
1793 ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
1805 tlphy_ctl |= TLAN_TC_SWAPOL; 1794 tlphy_ctl |= TLAN_TC_SWAPOL;
1806 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1795 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
1807 } else if ( ( tlphy_sts & TLAN_TS_POLOK ) && ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1796 } else if ( ( tlphy_sts & TLAN_TS_POLOK )
1797 && ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
1808 tlphy_ctl &= ~TLAN_TC_SWAPOL; 1798 tlphy_ctl &= ~TLAN_TC_SWAPOL;
1809 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1799 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
1810 } 1800 }
@@ -1849,7 +1839,9 @@ static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
1849 u32 ack = 1; 1839 u32 ack = 1;
1850 1840
1851 if ( priv->tlanRev < 0x30 ) { 1841 if ( priv->tlanRev < 0x30 ) {
1852 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n", priv->rxHead, priv->rxTail ); 1842 TLAN_DBG( TLAN_DEBUG_RX,
1843 "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n",
1844 priv->rxHead, priv->rxTail );
1853 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1845 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
1854 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1846 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
1855 ack |= TLAN_HC_GO | TLAN_HC_RT; 1847 ack |= TLAN_HC_GO | TLAN_HC_RT;
@@ -1940,10 +1932,12 @@ static void TLan_Timer( unsigned long data )
1940 if ( priv->timer.function == NULL ) { 1932 if ( priv->timer.function == NULL ) {
1941 elapsed = jiffies - priv->timerSetAt; 1933 elapsed = jiffies - priv->timerSetAt;
1942 if ( elapsed >= TLAN_TIMER_ACT_DELAY ) { 1934 if ( elapsed >= TLAN_TIMER_ACT_DELAY ) {
1943 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); 1935 TLan_DioWrite8( dev->base_addr,
1936 TLAN_LED_REG, TLAN_LED_LINK );
1944 } else { 1937 } else {
1945 priv->timer.function = &TLan_Timer; 1938 priv->timer.function = &TLan_Timer;
1946 priv->timer.expires = priv->timerSetAt + TLAN_TIMER_ACT_DELAY; 1939 priv->timer.expires = priv->timerSetAt
1940 + TLAN_TIMER_ACT_DELAY;
1947 spin_unlock_irqrestore(&priv->lock, flags); 1941 spin_unlock_irqrestore(&priv->lock, flags);
1948 add_timer( &priv->timer ); 1942 add_timer( &priv->timer );
1949 break; 1943 break;
@@ -1998,7 +1992,8 @@ static void TLan_ResetLists( struct net_device *dev )
1998 list = priv->txList + i; 1992 list = priv->txList + i;
1999 list->cStat = TLAN_CSTAT_UNUSED; 1993 list->cStat = TLAN_CSTAT_UNUSED;
2000 if ( bbuf ) { 1994 if ( bbuf ) {
2001 list->buffer[0].address = priv->txBufferDMA + ( i * TLAN_MAX_FRAME_SIZE ); 1995 list->buffer[0].address = priv->txBufferDMA
1996 + ( i * TLAN_MAX_FRAME_SIZE );
2002 } else { 1997 } else {
2003 list->buffer[0].address = 0; 1998 list->buffer[0].address = 0;
2004 } 1999 }
@@ -2017,28 +2012,32 @@ static void TLan_ResetLists( struct net_device *dev )
2017 list->frameSize = TLAN_MAX_FRAME_SIZE; 2012 list->frameSize = TLAN_MAX_FRAME_SIZE;
2018 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 2013 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
2019 if ( bbuf ) { 2014 if ( bbuf ) {
2020 list->buffer[0].address = priv->rxBufferDMA + ( i * TLAN_MAX_FRAME_SIZE ); 2015 list->buffer[0].address = priv->rxBufferDMA
2016 + ( i * TLAN_MAX_FRAME_SIZE );
2021 } else { 2017 } else {
2022 skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 ); 2018 skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
2023 if ( skb == NULL ) { 2019 if ( !skb ) {
2024 printk( "TLAN: Couldn't allocate memory for received data.\n" ); 2020 pr_err("TLAN: out of memory for received data.\n" );
2025 /* If this ever happened it would be a problem */ 2021 break;
2026 } else {
2027 skb->dev = dev;
2028 skb_reserve( skb, 2 );
2029 t = (void *) skb_put( skb, TLAN_MAX_FRAME_SIZE );
2030 } 2022 }
2031 list->buffer[0].address = pci_map_single(priv->pciDev, t, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 2023
2032 list->buffer[8].address = (u32) t; 2024 skb_reserve( skb, NET_IP_ALIGN );
2025 list->buffer[0].address = pci_map_single(priv->pciDev, t,
2026 TLAN_MAX_FRAME_SIZE,
2027 PCI_DMA_FROMDEVICE);
2033 TLan_StoreSKB(list, skb); 2028 TLan_StoreSKB(list, skb);
2034 } 2029 }
2035 list->buffer[1].count = 0; 2030 list->buffer[1].count = 0;
2036 list->buffer[1].address = 0; 2031 list->buffer[1].address = 0;
2037 if ( i < TLAN_NUM_RX_LISTS - 1 ) 2032 list->forward = list_phys + sizeof(TLanList);
2038 list->forward = list_phys + sizeof(TLanList); 2033 }
2039 else 2034
2040 list->forward = 0; 2035 /* in case ran out of memory early, clear bits */
2036 while (i < TLAN_NUM_RX_LISTS) {
2037 TLan_StoreSKB(priv->rxList + i, NULL);
2038 ++i;
2041 } 2039 }
2040 list->forward = 0;
2042 2041
2043} /* TLan_ResetLists */ 2042} /* TLan_ResetLists */
2044 2043
@@ -2055,7 +2054,9 @@ static void TLan_FreeLists( struct net_device *dev )
2055 list = priv->txList + i; 2054 list = priv->txList + i;
2056 skb = TLan_GetSKB(list); 2055 skb = TLan_GetSKB(list);
2057 if ( skb ) { 2056 if ( skb ) {
2058 pci_unmap_single(priv->pciDev, list->buffer[0].address, skb->len, PCI_DMA_TODEVICE); 2057 pci_unmap_single(priv->pciDev,
2058 list->buffer[0].address, skb->len,
2059 PCI_DMA_TODEVICE);
2059 dev_kfree_skb_any( skb ); 2060 dev_kfree_skb_any( skb );
2060 list->buffer[8].address = 0; 2061 list->buffer[8].address = 0;
2061 list->buffer[9].address = 0; 2062 list->buffer[9].address = 0;
@@ -2066,7 +2067,10 @@ static void TLan_FreeLists( struct net_device *dev )
2066 list = priv->rxList + i; 2067 list = priv->rxList + i;
2067 skb = TLan_GetSKB(list); 2068 skb = TLan_GetSKB(list);
2068 if ( skb ) { 2069 if ( skb ) {
2069 pci_unmap_single(priv->pciDev, list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 2070 pci_unmap_single(priv->pciDev,
2071 list->buffer[0].address,
2072 TLAN_MAX_FRAME_SIZE,
2073 PCI_DMA_FROMDEVICE);
2070 dev_kfree_skb_any( skb ); 2074 dev_kfree_skb_any( skb );
2071 list->buffer[8].address = 0; 2075 list->buffer[8].address = 0;
2072 list->buffer[9].address = 0; 2076 list->buffer[9].address = 0;
@@ -2097,7 +2101,8 @@ static void TLan_PrintDio( u16 io_base )
2097 u32 data0, data1; 2101 u32 data0, data1;
2098 int i; 2102 int i;
2099 2103
2100 printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n", io_base ); 2104 printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n",
2105 io_base );
2101 printk( "TLAN: Off. +0 +4\n" ); 2106 printk( "TLAN: Off. +0 +4\n" );
2102 for ( i = 0; i < 0x4C; i+= 8 ) { 2107 for ( i = 0; i < 0x4C; i+= 8 ) {
2103 data0 = TLan_DioRead32( io_base, i ); 2108 data0 = TLan_DioRead32( io_base, i );
@@ -2131,13 +2136,14 @@ static void TLan_PrintList( TLanList *list, char *type, int num)
2131{ 2136{
2132 int i; 2137 int i;
2133 2138
2134 printk( "TLAN: %s List %d at 0x%08x\n", type, num, (u32) list ); 2139 printk( "TLAN: %s List %d at %p\n", type, num, list );
2135 printk( "TLAN: Forward = 0x%08x\n", list->forward ); 2140 printk( "TLAN: Forward = 0x%08x\n", list->forward );
2136 printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat ); 2141 printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat );
2137 printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize ); 2142 printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize );
2138 /* for ( i = 0; i < 10; i++ ) { */ 2143 /* for ( i = 0; i < 10; i++ ) { */
2139 for ( i = 0; i < 2; i++ ) { 2144 for ( i = 0; i < 2; i++ ) {
2140 printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", i, list->buffer[i].count, list->buffer[i].address ); 2145 printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2146 i, list->buffer[i].count, list->buffer[i].address );
2141 } 2147 }
2142 2148
2143} /* TLan_PrintList */ 2149} /* TLan_PrintList */
@@ -2165,7 +2171,6 @@ static void TLan_PrintList( TLanList *list, char *type, int num)
2165 2171
2166static void TLan_ReadAndClearStats( struct net_device *dev, int record ) 2172static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2167{ 2173{
2168 TLanPrivateInfo *priv = netdev_priv(dev);
2169 u32 tx_good, tx_under; 2174 u32 tx_good, tx_under;
2170 u32 rx_good, rx_over; 2175 u32 rx_good, rx_over;
2171 u32 def_tx, crc, code; 2176 u32 def_tx, crc, code;
@@ -2202,18 +2207,18 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2202 loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); 2207 loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
2203 2208
2204 if ( record ) { 2209 if ( record ) {
2205 priv->stats.rx_packets += rx_good; 2210 dev->stats.rx_packets += rx_good;
2206 priv->stats.rx_errors += rx_over + crc + code; 2211 dev->stats.rx_errors += rx_over + crc + code;
2207 priv->stats.tx_packets += tx_good; 2212 dev->stats.tx_packets += tx_good;
2208 priv->stats.tx_errors += tx_under + loss; 2213 dev->stats.tx_errors += tx_under + loss;
2209 priv->stats.collisions += multi_col + single_col + excess_col + late_col; 2214 dev->stats.collisions += multi_col + single_col + excess_col + late_col;
2210 2215
2211 priv->stats.rx_over_errors += rx_over; 2216 dev->stats.rx_over_errors += rx_over;
2212 priv->stats.rx_crc_errors += crc; 2217 dev->stats.rx_crc_errors += crc;
2213 priv->stats.rx_frame_errors += code; 2218 dev->stats.rx_frame_errors += code;
2214 2219
2215 priv->stats.tx_aborted_errors += tx_under; 2220 dev->stats.tx_aborted_errors += tx_under;
2216 priv->stats.tx_carrier_errors += loss; 2221 dev->stats.tx_carrier_errors += loss;
2217 } 2222 }
2218 2223
2219} /* TLan_ReadAndClearStats */ 2224} /* TLan_ReadAndClearStats */
@@ -2354,14 +2359,16 @@ TLan_FinishReset( struct net_device *dev )
2354 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 ); 2359 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 );
2355 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 ); 2360 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
2356 2361
2357 if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) || ( priv->aui ) ) { 2362 if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) ||
2363 ( priv->aui ) ) {
2358 status = MII_GS_LINK; 2364 status = MII_GS_LINK;
2359 printk( "TLAN: %s: Link forced.\n", dev->name ); 2365 printk( "TLAN: %s: Link forced.\n", dev->name );
2360 } else { 2366 } else {
2361 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2367 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
2362 udelay( 1000 ); 2368 udelay( 1000 );
2363 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2369 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
2364 if ( (status & MII_GS_LINK) && /* We only support link info on Nat.Sem. PHY's */ 2370 if ( (status & MII_GS_LINK) &&
2371 /* We only support link info on Nat.Sem. PHY's */
2365 (tlphy_id1 == NAT_SEM_ID1) && 2372 (tlphy_id1 == NAT_SEM_ID1) &&
2366 (tlphy_id2 == NAT_SEM_ID2) ) { 2373 (tlphy_id2 == NAT_SEM_ID2) ) {
2367 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner ); 2374 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner );
@@ -2370,12 +2377,12 @@ TLan_FinishReset( struct net_device *dev )
2370 printk( "TLAN: %s: Link active with ", dev->name ); 2377 printk( "TLAN: %s: Link active with ", dev->name );
2371 if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) { 2378 if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) {
2372 printk( "forced 10%sMbps %s-Duplex\n", 2379 printk( "forced 10%sMbps %s-Duplex\n",
2373 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2380 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
2374 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2381 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
2375 } else { 2382 } else {
2376 printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n", 2383 printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n",
2377 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2384 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
2378 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2385 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
2379 printk("TLAN: Partner capability: "); 2386 printk("TLAN: Partner capability: ");
2380 for (i = 5; i <= 10; i++) 2387 for (i = 5; i <= 10; i++)
2381 if (partner & (1<<i)) 2388 if (partner & (1<<i))
@@ -2416,7 +2423,8 @@ TLan_FinishReset( struct net_device *dev )
2416 outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD ); 2423 outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD );
2417 netif_carrier_on(dev); 2424 netif_carrier_on(dev);
2418 } else { 2425 } else {
2419 printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", dev->name ); 2426 printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n",
2427 dev->name );
2420 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET ); 2428 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
2421 return; 2429 return;
2422 } 2430 }
@@ -2456,10 +2464,12 @@ static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
2456 2464
2457 if ( mac != NULL ) { 2465 if ( mac != NULL ) {
2458 for ( i = 0; i < 6; i++ ) 2466 for ( i = 0; i < 6; i++ )
2459 TLan_DioWrite8( dev->base_addr, TLAN_AREG_0 + areg + i, mac[i] ); 2467 TLan_DioWrite8( dev->base_addr,
2468 TLAN_AREG_0 + areg + i, mac[i] );
2460 } else { 2469 } else {
2461 for ( i = 0; i < 6; i++ ) 2470 for ( i = 0; i < 6; i++ )
2462 TLan_DioWrite8( dev->base_addr, TLAN_AREG_0 + areg + i, 0 ); 2471 TLan_DioWrite8( dev->base_addr,
2472 TLAN_AREG_0 + areg + i, 0 );
2463 } 2473 }
2464 2474
2465} /* TLan_SetMac */ 2475} /* TLan_SetMac */
@@ -2565,9 +2575,13 @@ static void TLan_PhyDetect( struct net_device *dev )
2565 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control ); 2575 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control );
2566 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi ); 2576 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi );
2567 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo ); 2577 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo );
2568 if ( ( control != 0xFFFF ) || ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) { 2578 if ( ( control != 0xFFFF ) ||
2569 TLAN_DBG( TLAN_DEBUG_GNRL, "PHY found at %02x %04x %04x %04x\n", phy, control, hi, lo ); 2579 ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) {
2570 if ( ( priv->phy[1] == TLAN_PHY_NONE ) && ( phy != TLAN_PHY_MAX_ADDR ) ) { 2580 TLAN_DBG( TLAN_DEBUG_GNRL,
2581 "PHY found at %02x %04x %04x %04x\n",
2582 phy, control, hi, lo );
2583 if ( ( priv->phy[1] == TLAN_PHY_NONE ) &&
2584 ( phy != TLAN_PHY_MAX_ADDR ) ) {
2571 priv->phy[1] = phy; 2585 priv->phy[1] = phy;
2572 } 2586 }
2573 } 2587 }
@@ -2595,7 +2609,9 @@ static void TLan_PhyPowerDown( struct net_device *dev )
2595 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE; 2609 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
2596 TLan_MiiSync( dev->base_addr ); 2610 TLan_MiiSync( dev->base_addr );
2597 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); 2611 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
2598 if ( ( priv->phyNum == 0 ) && ( priv->phy[1] != TLAN_PHY_NONE ) && ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) { 2612 if ( ( priv->phyNum == 0 ) &&
2613 ( priv->phy[1] != TLAN_PHY_NONE ) &&
2614 ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) {
2599 TLan_MiiSync( dev->base_addr ); 2615 TLan_MiiSync( dev->base_addr );
2600 TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value ); 2616 TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value );
2601 } 2617 }
@@ -2768,10 +2784,10 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2768 * more time. Perhaps we should fail after a while. 2784 * more time. Perhaps we should fail after a while.
2769 */ 2785 */
2770 if (!priv->neg_be_verbose++) { 2786 if (!priv->neg_be_verbose++) {
2771 printk(KERN_INFO "TLAN: Giving autonegotiation more time.\n"); 2787 pr_info("TLAN: Giving autonegotiation more time.\n");
2772 printk(KERN_INFO "TLAN: Please check that your adapter has\n"); 2788 pr_info("TLAN: Please check that your adapter has\n");
2773 printk(KERN_INFO "TLAN: been properly connected to a HUB or Switch.\n"); 2789 pr_info("TLAN: been properly connected to a HUB or Switch.\n");
2774 printk(KERN_INFO "TLAN: Trying to establish link in the background...\n"); 2790 pr_info("TLAN: Trying to establish link in the background...\n");
2775 } 2791 }
2776 TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN ); 2792 TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN );
2777 return; 2793 return;
@@ -2787,7 +2803,9 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2787 priv->tlanFullDuplex = TRUE; 2803 priv->tlanFullDuplex = TRUE;
2788 } 2804 }
2789 2805
2790 if ( ( ! ( mode & 0x0180 ) ) && ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) && ( priv->phyNum != 0 ) ) { 2806 if ( ( ! ( mode & 0x0180 ) ) &&
2807 ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) &&
2808 ( priv->phyNum != 0 ) ) {
2791 priv->phyNum = 0; 2809 priv->phyNum = 0;
2792 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2810 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2793 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); 2811 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
@@ -2796,12 +2814,14 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2796 } 2814 }
2797 2815
2798 if ( priv->phyNum == 0 ) { 2816 if ( priv->phyNum == 0 ) {
2799 if ( ( priv->duplex == TLAN_DUPLEX_FULL ) || ( an_adv & an_lpa & 0x0040 ) ) { 2817 if ( ( priv->duplex == TLAN_DUPLEX_FULL ) ||
2800 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB | MII_GC_DUPLEX ); 2818 ( an_adv & an_lpa & 0x0040 ) ) {
2801 printk( "TLAN: Starting internal PHY with FULL-DUPLEX\n" ); 2819 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL,
2820 MII_GC_AUTOENB | MII_GC_DUPLEX );
2821 pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n" );
2802 } else { 2822 } else {
2803 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB ); 2823 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB );
2804 printk( "TLAN: Starting internal PHY with HALF-DUPLEX\n" ); 2824 pr_info( "TLAN: Starting internal PHY with HALF-DUPLEX\n" );
2805 } 2825 }
2806 } 2826 }
2807 2827
@@ -3209,7 +3229,8 @@ static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
3209 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3229 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
3210 3230
3211 if ( ( ! err ) && stop ) { 3231 if ( ( ! err ) && stop ) {
3212 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* STOP, raise data while clock is high */ 3232 /* STOP, raise data while clock is high */
3233 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
3213 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3234 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3214 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3235 TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
3215 } 3236 }
@@ -3272,7 +3293,8 @@ static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
3272 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */ 3293 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */
3273 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3294 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3274 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3295 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
3275 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* STOP, raise data while clock is high */ 3296 /* STOP, raise data while clock is high */
3297 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
3276 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3298 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
3277 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3299 TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
3278 } 3300 }
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 41ce0b665937..4b82f283e985 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -13,8 +13,6 @@
13 * This software may be used and distributed according to the terms 13 * This software may be used and distributed according to the terms
14 * of the GNU General Public License, incorporated herein by reference. 14 * of the GNU General Public License, incorporated herein by reference.
15 * 15 *
16 ** This file is best viewed/edited with tabstop=4, colums>=132
17 *
18 * 16 *
19 * Dec 10, 1999 Torben Mathiasen <torben.mathiasen@compaq.com> 17 * Dec 10, 1999 Torben Mathiasen <torben.mathiasen@compaq.com>
20 * New Maintainer 18 * New Maintainer
@@ -45,7 +43,9 @@
45#define TLAN_IGNORE 0 43#define TLAN_IGNORE 0
46#define TLAN_RECORD 1 44#define TLAN_RECORD 1
47 45
48#define TLAN_DBG(lvl, format, args...) if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); 46#define TLAN_DBG(lvl, format, args...) \
47 do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0)
48
49#define TLAN_DEBUG_GNRL 0x0001 49#define TLAN_DEBUG_GNRL 0x0001
50#define TLAN_DEBUG_TX 0x0002 50#define TLAN_DEBUG_TX 0x0002
51#define TLAN_DEBUG_RX 0x0004 51#define TLAN_DEBUG_RX 0x0004
@@ -194,7 +194,6 @@ typedef struct tlan_private_tag {
194 u32 timerSetAt; 194 u32 timerSetAt;
195 u32 timerType; 195 u32 timerType;
196 struct timer_list timer; 196 struct timer_list timer;
197 struct net_device_stats stats;
198 struct board *adapter; 197 struct board *adapter;
199 u32 adapterRev; 198 u32 adapterRev;
200 u32 aui; 199 u32 aui;
@@ -205,7 +204,6 @@ typedef struct tlan_private_tag {
205 u32 speed; 204 u32 speed;
206 u8 tlanRev; 205 u8 tlanRev;
207 u8 tlanFullDuplex; 206 u8 tlanFullDuplex;
208 char devName[8];
209 spinlock_t lock; 207 spinlock_t lock;
210 u8 link; 208 u8 link;
211 u8 is_eisa; 209 u8 is_eisa;
@@ -517,12 +515,18 @@ static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
517 * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) ) 515 * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) )
518 * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) ) 516 * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) )
519 * 517 *
520 * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), DA(a,30), DA(a,36), DA(a,42) ); 518 * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
521 * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), DA(a,31), DA(a,37), DA(a,43) ) << 1; 519 * DA(a,30), DA(a,36), DA(a,42) );
522 * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), DA(a,32), DA(a,38), DA(a,44) ) << 2; 520 * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
523 * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), DA(a,33), DA(a,39), DA(a,45) ) << 3; 521 * DA(a,31), DA(a,37), DA(a,43) ) << 1;
524 * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), DA(a,34), DA(a,40), DA(a,46) ) << 4; 522 * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
525 * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), DA(a,35), DA(a,41), DA(a,47) ) << 5; 523 * DA(a,32), DA(a,38), DA(a,44) ) << 2;
524 * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
525 * DA(a,33), DA(a,39), DA(a,45) ) << 3;
526 * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
527 * DA(a,34), DA(a,40), DA(a,46) ) << 4;
528 * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
529 * DA(a,35), DA(a,41), DA(a,47) ) << 5;
526 * 530 *
527 */ 531 */
528static inline u32 TLan_HashFunc( const u8 *a ) 532static inline u32 TLan_HashFunc( const u8 *a )
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 45208a0e69a0..7766cde0d63d 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -132,7 +132,6 @@ static void xl_dn_comp(struct net_device *dev);
132static int xl_close(struct net_device *dev); 132static int xl_close(struct net_device *dev);
133static void xl_set_rx_mode(struct net_device *dev); 133static void xl_set_rx_mode(struct net_device *dev);
134static irqreturn_t xl_interrupt(int irq, void *dev_id); 134static irqreturn_t xl_interrupt(int irq, void *dev_id);
135static struct net_device_stats * xl_get_stats(struct net_device *dev);
136static int xl_set_mac_address(struct net_device *dev, void *addr) ; 135static int xl_set_mac_address(struct net_device *dev, void *addr) ;
137static void xl_arb_cmd(struct net_device *dev); 136static void xl_arb_cmd(struct net_device *dev);
138static void xl_asb_cmd(struct net_device *dev) ; 137static void xl_asb_cmd(struct net_device *dev) ;
@@ -343,7 +342,6 @@ static int __devinit xl_probe(struct pci_dev *pdev,
343 dev->stop=&xl_close; 342 dev->stop=&xl_close;
344 dev->do_ioctl=NULL; 343 dev->do_ioctl=NULL;
345 dev->set_multicast_list=&xl_set_rx_mode; 344 dev->set_multicast_list=&xl_set_rx_mode;
346 dev->get_stats=&xl_get_stats ;
347 dev->set_mac_address=&xl_set_mac_address ; 345 dev->set_mac_address=&xl_set_mac_address ;
348 SET_NETDEV_DEV(dev, &pdev->dev); 346 SET_NETDEV_DEV(dev, &pdev->dev);
349 347
@@ -921,7 +919,7 @@ static void xl_rx(struct net_device *dev)
921 adv_rx_ring(dev) ; 919 adv_rx_ring(dev) ;
922 920
923 adv_rx_ring(dev) ; /* One more time just for luck :) */ 921 adv_rx_ring(dev) ; /* One more time just for luck :) */
924 xl_priv->xl_stats.rx_dropped++ ; 922 dev->stats.rx_dropped++ ;
925 923
926 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; 924 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
927 return ; 925 return ;
@@ -957,7 +955,7 @@ static void xl_rx(struct net_device *dev)
957 if (skb==NULL) { /* Still need to fix the rx ring */ 955 if (skb==NULL) { /* Still need to fix the rx ring */
958 printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer \n",dev->name) ; 956 printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer \n",dev->name) ;
959 adv_rx_ring(dev) ; 957 adv_rx_ring(dev) ;
960 xl_priv->xl_stats.rx_dropped++ ; 958 dev->stats.rx_dropped++ ;
961 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; 959 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
962 return ; 960 return ;
963 } 961 }
@@ -971,8 +969,8 @@ static void xl_rx(struct net_device *dev)
971 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev,skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)); 969 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev,skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
972 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG; 970 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG;
973 adv_rx_ring(dev) ; 971 adv_rx_ring(dev) ;
974 xl_priv->xl_stats.rx_packets++ ; 972 dev->stats.rx_packets++ ;
975 xl_priv->xl_stats.rx_bytes += frame_length ; 973 dev->stats.rx_bytes += frame_length ;
976 974
977 netif_rx(skb2) ; 975 netif_rx(skb2) ;
978 } /* if multiple buffers */ 976 } /* if multiple buffers */
@@ -1182,8 +1180,8 @@ static int xl_xmit(struct sk_buff *skb, struct net_device *dev)
1182 txd->buffer = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE)); 1180 txd->buffer = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE));
1183 txd->buffer_length = cpu_to_le32(skb->len) | TXDNFRAGLAST; 1181 txd->buffer_length = cpu_to_le32(skb->len) | TXDNFRAGLAST;
1184 xl_priv->tx_ring_skb[tx_head] = skb ; 1182 xl_priv->tx_ring_skb[tx_head] = skb ;
1185 xl_priv->xl_stats.tx_packets++ ; 1183 dev->stats.tx_packets++ ;
1186 xl_priv->xl_stats.tx_bytes += skb->len ; 1184 dev->stats.tx_bytes += skb->len ;
1187 1185
1188 /* 1186 /*
1189 * Set the nextptr of the previous descriptor equal to this descriptor, add XL_TX_RING_SIZE -1 1187 * Set the nextptr of the previous descriptor equal to this descriptor, add XL_TX_RING_SIZE -1
@@ -1463,12 +1461,6 @@ static void xl_srb_bh(struct net_device *dev)
1463 return ; 1461 return ;
1464} 1462}
1465 1463
1466static struct net_device_stats * xl_get_stats(struct net_device *dev)
1467{
1468 struct xl_private *xl_priv = netdev_priv(dev);
1469 return (struct net_device_stats *) &xl_priv->xl_stats;
1470}
1471
1472static int xl_set_mac_address (struct net_device *dev, void *addr) 1464static int xl_set_mac_address (struct net_device *dev, void *addr)
1473{ 1465{
1474 struct sockaddr *saddr = addr ; 1466 struct sockaddr *saddr = addr ;
diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h
index 74cf8e1a181b..66b1ff603234 100644
--- a/drivers/net/tokenring/3c359.h
+++ b/drivers/net/tokenring/3c359.h
@@ -273,8 +273,6 @@ struct xl_private {
273 struct wait_queue *srb_wait; 273 struct wait_queue *srb_wait;
274 volatile int asb_queued; 274 volatile int asb_queued;
275 275
276 struct net_device_stats xl_stats ;
277
278 u16 mac_buffer ; 276 u16 mac_buffer ;
279 u16 xl_lan_status ; 277 u16 xl_lan_status ;
280 u8 xl_ring_speed ; 278 u8 xl_ring_speed ;
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 6017d5267d08..febfaee44fe9 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -803,7 +803,8 @@ static int tsi108_refill_rx(struct net_device *dev, int budget)
803 int rx = data->rxhead; 803 int rx = data->rxhead;
804 struct sk_buff *skb; 804 struct sk_buff *skb;
805 805
806 data->rxskbs[rx] = skb = dev_alloc_skb(TSI108_RXBUF_SIZE + 2); 806 data->rxskbs[rx] = skb = netdev_alloc_skb(dev,
807 TSI108_RXBUF_SIZE + 2);
807 if (!skb) 808 if (!skb)
808 break; 809 break;
809 810
@@ -1352,8 +1353,9 @@ static int tsi108_open(struct net_device *dev)
1352 data->rxhead = 0; 1353 data->rxhead = 0;
1353 1354
1354 for (i = 0; i < TSI108_RXRING_LEN; i++) { 1355 for (i = 0; i < TSI108_RXRING_LEN; i++) {
1355 struct sk_buff *skb = dev_alloc_skb(TSI108_RXBUF_SIZE + NET_IP_ALIGN); 1356 struct sk_buff *skb;
1356 1357
1358 skb = netdev_alloc_skb(dev, TSI108_RXBUF_SIZE + NET_IP_ALIGN);
1357 if (!skb) { 1359 if (!skb) {
1358 /* Bah. No memory for now, but maybe we'll get 1360 /* Bah. No memory for now, but maybe we'll get
1359 * some more later. 1361 * some more later.
@@ -1526,7 +1528,7 @@ static int tsi108_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1526 struct tsi108_prv_data *data = netdev_priv(dev); 1528 struct tsi108_prv_data *data = netdev_priv(dev);
1527 unsigned long flags; 1529 unsigned long flags;
1528 int rc; 1530 int rc;
1529 1531
1530 spin_lock_irqsave(&data->txlock, flags); 1532 spin_lock_irqsave(&data->txlock, flags);
1531 rc = mii_ethtool_gset(&data->mii_if, cmd); 1533 rc = mii_ethtool_gset(&data->mii_if, cmd);
1532 spin_unlock_irqrestore(&data->txlock, flags); 1534 spin_unlock_irqrestore(&data->txlock, flags);
@@ -1543,7 +1545,7 @@ static int tsi108_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1543 spin_lock_irqsave(&data->txlock, flags); 1545 spin_lock_irqsave(&data->txlock, flags);
1544 rc = mii_ethtool_sset(&data->mii_if, cmd); 1546 rc = mii_ethtool_sset(&data->mii_if, cmd);
1545 spin_unlock_irqrestore(&data->txlock, flags); 1547 spin_unlock_irqrestore(&data->txlock, flags);
1546 1548
1547 return rc; 1549 return rc;
1548} 1550}
1549 1551
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index f5839c4a5cbd..cfbbfee55836 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Author: Li Yang <leoli@freescale.com> 6 * Author: Li Yang <leoli@freescale.com>
7 * 7 *
8 * Limitation: 8 * Limitation:
9 * Can only get/set setttings of the first queue. 9 * Can only get/set setttings of the first queue.
10 * Need to re-open the interface manually after changing some paramters. 10 * Need to re-open the interface manually after changing some paramters.
11 * 11 *
@@ -160,7 +160,7 @@ uec_set_pauseparam(struct net_device *netdev,
160 160
161 ugeth->ug_info->receiveFlowControl = pause->rx_pause; 161 ugeth->ug_info->receiveFlowControl = pause->rx_pause;
162 ugeth->ug_info->transmitFlowControl = pause->tx_pause; 162 ugeth->ug_info->transmitFlowControl = pause->tx_pause;
163 163
164 if (ugeth->phydev->autoneg) { 164 if (ugeth->phydev->autoneg) {
165 if (netif_running(netdev)) { 165 if (netif_running(netdev)) {
166 /* FIXME: automatically restart */ 166 /* FIXME: automatically restart */
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 0604f3faf043..68e198bd538b 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -154,6 +154,16 @@ config USB_NET_AX8817X
154 This driver creates an interface named "ethX", where X depends on 154 This driver creates an interface named "ethX", where X depends on
155 what other networking devices you have in use. 155 what other networking devices you have in use.
156 156
157config USB_HSO
158 tristate "Option USB High Speed Mobile Devices"
159 depends on USB && RFKILL
160 default n
161 help
162 Choose this option if you have an Option HSDPA/HSUPA card.
163 These cards support downlink speeds of 7.2Mbps or greater.
164
165 To compile this driver as a module, choose M here: the
166 module will be called hso.
157 167
158config USB_NET_CDCETHER 168config USB_NET_CDCETHER
159 tristate "CDC Ethernet support (smart devices such as cable modems)" 169 tristate "CDC Ethernet support (smart devices such as cable modems)"
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 595a539f8384..24800c157f98 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_USB_CATC) += catc.o
6obj-$(CONFIG_USB_KAWETH) += kaweth.o 6obj-$(CONFIG_USB_KAWETH) += kaweth.o
7obj-$(CONFIG_USB_PEGASUS) += pegasus.o 7obj-$(CONFIG_USB_PEGASUS) += pegasus.o
8obj-$(CONFIG_USB_RTL8150) += rtl8150.o 8obj-$(CONFIG_USB_RTL8150) += rtl8150.o
9obj-$(CONFIG_USB_HSO) += hso.o
9obj-$(CONFIG_USB_NET_AX8817X) += asix.o 10obj-$(CONFIG_USB_NET_AX8817X) += asix.o
10obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o 11obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
11obj-$(CONFIG_USB_NET_DM9601) += dm9601.o 12obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
new file mode 100644
index 000000000000..031d07b105af
--- /dev/null
+++ b/drivers/net/usb/hso.c
@@ -0,0 +1,2836 @@
1/******************************************************************************
2 *
3 * Driver for Option High Speed Mobile Devices.
4 *
5 * Copyright (C) 2008 Option International
6 * Copyright (C) 2007 Andrew Bird (Sphere Systems Ltd)
7 * <ajb@spheresystems.co.uk>
8 * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
9 * Copyright (C) 2008 Novell, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
23 * USA
24 *
25 *
26 *****************************************************************************/
27
28/******************************************************************************
29 *
30 * Description of the device:
31 *
32 * Interface 0: Contains the IP network interface on the bulk end points.
33 * The multiplexed serial ports are using the interrupt and
34 * control endpoints.
35 * Interrupt contains a bitmap telling which multiplexed
36 * serialport needs servicing.
37 *
38 * Interface 1: Diagnostics port, uses bulk only, do not submit urbs until the
39 * port is opened, as this have a huge impact on the network port
40 * throughput.
41 *
42 * Interface 2: Standard modem interface - circuit switched interface, should
43 * not be used.
44 *
45 *****************************************************************************/
46
47#include <linux/sched.h>
48#include <linux/slab.h>
49#include <linux/init.h>
50#include <linux/delay.h>
51#include <linux/netdevice.h>
52#include <linux/module.h>
53#include <linux/ethtool.h>
54#include <linux/usb.h>
55#include <linux/timer.h>
56#include <linux/tty.h>
57#include <linux/tty_driver.h>
58#include <linux/tty_flip.h>
59#include <linux/kmod.h>
60#include <linux/rfkill.h>
61#include <linux/ip.h>
62#include <linux/uaccess.h>
63#include <linux/usb/cdc.h>
64#include <net/arp.h>
65#include <asm/byteorder.h>
66
67
68#define DRIVER_VERSION "1.2"
69#define MOD_AUTHOR "Option Wireless"
70#define MOD_DESCRIPTION "USB High Speed Option driver"
71#define MOD_LICENSE "GPL"
72
73#define HSO_MAX_NET_DEVICES 10
74#define HSO__MAX_MTU 2048
75#define DEFAULT_MTU 1500
76#define DEFAULT_MRU 1500
77
78#define CTRL_URB_RX_SIZE 1024
79#define CTRL_URB_TX_SIZE 64
80
81#define BULK_URB_RX_SIZE 4096
82#define BULK_URB_TX_SIZE 8192
83
84#define MUX_BULK_RX_BUF_SIZE HSO__MAX_MTU
85#define MUX_BULK_TX_BUF_SIZE HSO__MAX_MTU
86#define MUX_BULK_RX_BUF_COUNT 4
87#define USB_TYPE_OPTION_VENDOR 0x20
88
89/* These definitions are used with the struct hso_net flags element */
90/* - use *_bit operations on it. (bit indices not values.) */
91#define HSO_NET_RUNNING 0
92
93#define HSO_NET_TX_TIMEOUT (HZ*10)
94
95/* Serial port defines and structs. */
96#define HSO_SERIAL_FLAG_RX_SENT 0
97
98#define HSO_SERIAL_MAGIC 0x48534f31
99
100/* Number of ttys to handle */
101#define HSO_SERIAL_TTY_MINORS 256
102
103#define MAX_RX_URBS 2
104
105#define get_serial_by_tty(x) \
106 (x ? (struct hso_serial *)x->driver_data : NULL)
107
108/*****************************************************************************/
109/* Debugging functions */
110/*****************************************************************************/
111#define D__(lvl_, fmt, arg...) \
112 do { \
113 printk(lvl_ "[%d:%s]: " fmt "\n", \
114 __LINE__, __func__, ## arg); \
115 } while (0)
116
117#define D_(lvl, args...) \
118 do { \
119 if (lvl & debug) \
120 D__(KERN_INFO, args); \
121 } while (0)
122
123#define D1(args...) D_(0x01, ##args)
124#define D2(args...) D_(0x02, ##args)
125#define D3(args...) D_(0x04, ##args)
126#define D4(args...) D_(0x08, ##args)
127#define D5(args...) D_(0x10, ##args)
128
129/*****************************************************************************/
130/* Enumerators */
131/*****************************************************************************/
132enum pkt_parse_state {
133 WAIT_IP,
134 WAIT_DATA,
135 WAIT_SYNC
136};
137
138/*****************************************************************************/
139/* Structs */
140/*****************************************************************************/
141
142struct hso_shared_int {
143 struct usb_endpoint_descriptor *intr_endp;
144 void *shared_intr_buf;
145 struct urb *shared_intr_urb;
146 struct usb_device *usb;
147 int use_count;
148 int ref_count;
149 struct mutex shared_int_lock;
150};
151
152struct hso_net {
153 struct hso_device *parent;
154 struct net_device *net;
155 struct rfkill *rfkill;
156
157 struct usb_endpoint_descriptor *in_endp;
158 struct usb_endpoint_descriptor *out_endp;
159
160 struct urb *mux_bulk_rx_urb_pool[MUX_BULK_RX_BUF_COUNT];
161 struct urb *mux_bulk_tx_urb;
162 void *mux_bulk_rx_buf_pool[MUX_BULK_RX_BUF_COUNT];
163 void *mux_bulk_tx_buf;
164
165 struct sk_buff *skb_rx_buf;
166 struct sk_buff *skb_tx_buf;
167
168 enum pkt_parse_state rx_parse_state;
169 spinlock_t net_lock;
170
171 unsigned short rx_buf_size;
172 unsigned short rx_buf_missing;
173 struct iphdr rx_ip_hdr;
174
175 unsigned long flags;
176};
177
178struct hso_serial {
179 struct hso_device *parent;
180 int magic;
181 u8 minor;
182
183 struct hso_shared_int *shared_int;
184
185 /* rx/tx urb could be either a bulk urb or a control urb depending
186 on which serial port it is used on. */
187 struct urb *rx_urb[MAX_RX_URBS];
188 u8 num_rx_urbs;
189 u8 *rx_data[MAX_RX_URBS];
190 u16 rx_data_length; /* should contain allocated length */
191
192 struct urb *tx_urb;
193 u8 *tx_data;
194 u8 *tx_buffer;
195 u16 tx_data_length; /* should contain allocated length */
196 u16 tx_data_count;
197 u16 tx_buffer_count;
198 struct usb_ctrlrequest ctrl_req_tx;
199 struct usb_ctrlrequest ctrl_req_rx;
200
201 struct usb_endpoint_descriptor *in_endp;
202 struct usb_endpoint_descriptor *out_endp;
203
204 unsigned long flags;
205 u8 rts_state;
206 u8 dtr_state;
207 unsigned tx_urb_used:1;
208
209 /* from usb_serial_port */
210 struct tty_struct *tty;
211 int open_count;
212 spinlock_t serial_lock;
213
214 int (*write_data) (struct hso_serial *serial);
215};
216
217struct hso_device {
218 union {
219 struct hso_serial *dev_serial;
220 struct hso_net *dev_net;
221 } port_data;
222
223 u32 port_spec;
224
225 u8 is_active;
226 u8 usb_gone;
227 struct work_struct async_get_intf;
228 struct work_struct async_put_intf;
229
230 struct usb_device *usb;
231 struct usb_interface *interface;
232
233 struct device *dev;
234 struct kref ref;
235 struct mutex mutex;
236};
237
238/* Type of interface */
239#define HSO_INTF_MASK 0xFF00
240#define HSO_INTF_MUX 0x0100
241#define HSO_INTF_BULK 0x0200
242
243/* Type of port */
244#define HSO_PORT_MASK 0xFF
245#define HSO_PORT_NO_PORT 0x0
246#define HSO_PORT_CONTROL 0x1
247#define HSO_PORT_APP 0x2
248#define HSO_PORT_GPS 0x3
249#define HSO_PORT_PCSC 0x4
250#define HSO_PORT_APP2 0x5
251#define HSO_PORT_GPS_CONTROL 0x6
252#define HSO_PORT_MSD 0x7
253#define HSO_PORT_VOICE 0x8
254#define HSO_PORT_DIAG2 0x9
255#define HSO_PORT_DIAG 0x10
256#define HSO_PORT_MODEM 0x11
257#define HSO_PORT_NETWORK 0x12
258
259/* Additional device info */
260#define HSO_INFO_MASK 0xFF000000
261#define HSO_INFO_CRC_BUG 0x01000000
262
263/*****************************************************************************/
264/* Prototypes */
265/*****************************************************************************/
266/* Serial driver functions */
267static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file,
268 unsigned int set, unsigned int clear);
269static void ctrl_callback(struct urb *urb);
270static void put_rxbuf_data(struct urb *urb, struct hso_serial *serial);
271static void hso_kick_transmit(struct hso_serial *serial);
272/* Helper functions */
273static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int,
274 struct usb_device *usb, gfp_t gfp);
275static void log_usb_status(int status, const char *function);
276static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf,
277 int type, int dir);
278static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports);
279static void hso_free_interface(struct usb_interface *intf);
280static int hso_start_serial_device(struct hso_device *hso_dev, gfp_t flags);
281static int hso_stop_serial_device(struct hso_device *hso_dev);
282static int hso_start_net_device(struct hso_device *hso_dev);
283static void hso_free_shared_int(struct hso_shared_int *shared_int);
284static int hso_stop_net_device(struct hso_device *hso_dev);
285static void hso_serial_ref_free(struct kref *ref);
286static void async_get_intf(struct work_struct *data);
287static void async_put_intf(struct work_struct *data);
288static int hso_put_activity(struct hso_device *hso_dev);
289static int hso_get_activity(struct hso_device *hso_dev);
290
291/*****************************************************************************/
292/* Helping functions */
293/*****************************************************************************/
294
295/* #define DEBUG */
296
297#define dev2net(x) (x->port_data.dev_net)
298#define dev2ser(x) (x->port_data.dev_serial)
299
300/* Debugging functions */
301#ifdef DEBUG
302static void dbg_dump(int line_count, const char *func_name, unsigned char *buf,
303 unsigned int len)
304{
305 u8 i = 0;
306
307 printk(KERN_DEBUG "[%d:%s]: len %d", line_count, func_name, len);
308
309 for (i = 0; i < len; i++) {
310 if (!(i % 16))
311 printk("\n 0x%03x: ", i);
312 printk("%02x ", (unsigned char)buf[i]);
313 }
314 printk("\n");
315}
316
317#define DUMP(buf_, len_) \
318 dbg_dump(__LINE__, __func__, buf_, len_)
319
320#define DUMP1(buf_, len_) \
321 do { \
322 if (0x01 & debug) \
323 DUMP(buf_, len_); \
324 } while (0)
325#else
326#define DUMP(buf_, len_)
327#define DUMP1(buf_, len_)
328#endif
329
330/* module parameters */
331static int debug;
332static int tty_major;
333static int disable_net;
334
335/* driver info */
336static const char driver_name[] = "hso";
337static const char tty_filename[] = "ttyHS";
338static const char *version = __FILE__ ": " DRIVER_VERSION " " MOD_AUTHOR;
339/* the usb driver itself (registered in hso_init) */
340static struct usb_driver hso_driver;
341/* serial structures */
342static struct tty_driver *tty_drv;
343static struct hso_device *serial_table[HSO_SERIAL_TTY_MINORS];
344static struct hso_device *network_table[HSO_MAX_NET_DEVICES];
345static spinlock_t serial_table_lock;
346static struct ktermios *hso_serial_termios[HSO_SERIAL_TTY_MINORS];
347static struct ktermios *hso_serial_termios_locked[HSO_SERIAL_TTY_MINORS];
348
349static const s32 default_port_spec[] = {
350 HSO_INTF_MUX | HSO_PORT_NETWORK,
351 HSO_INTF_BULK | HSO_PORT_DIAG,
352 HSO_INTF_BULK | HSO_PORT_MODEM,
353 0
354};
355
356static const s32 icon321_port_spec[] = {
357 HSO_INTF_MUX | HSO_PORT_NETWORK,
358 HSO_INTF_BULK | HSO_PORT_DIAG2,
359 HSO_INTF_BULK | HSO_PORT_MODEM,
360 HSO_INTF_BULK | HSO_PORT_DIAG,
361 0
362};
363
364#define default_port_device(vendor, product) \
365 USB_DEVICE(vendor, product), \
366 .driver_info = (kernel_ulong_t)default_port_spec
367
368#define icon321_port_device(vendor, product) \
369 USB_DEVICE(vendor, product), \
370 .driver_info = (kernel_ulong_t)icon321_port_spec
371
372/* list of devices we support */
373static const struct usb_device_id hso_ids[] = {
374 {default_port_device(0x0af0, 0x6711)},
375 {default_port_device(0x0af0, 0x6731)},
376 {default_port_device(0x0af0, 0x6751)},
377 {default_port_device(0x0af0, 0x6771)},
378 {default_port_device(0x0af0, 0x6791)},
379 {default_port_device(0x0af0, 0x6811)},
380 {default_port_device(0x0af0, 0x6911)},
381 {default_port_device(0x0af0, 0x6951)},
382 {default_port_device(0x0af0, 0x6971)},
383 {default_port_device(0x0af0, 0x7011)},
384 {default_port_device(0x0af0, 0x7031)},
385 {default_port_device(0x0af0, 0x7051)},
386 {default_port_device(0x0af0, 0x7071)},
387 {default_port_device(0x0af0, 0x7111)},
388 {default_port_device(0x0af0, 0x7211)},
389 {default_port_device(0x0af0, 0x7251)},
390 {default_port_device(0x0af0, 0x7271)},
391 {default_port_device(0x0af0, 0x7311)},
392 {default_port_device(0x0af0, 0xc031)}, /* Icon-Edge */
393 {icon321_port_device(0x0af0, 0xd013)}, /* Module HSxPA */
394 {icon321_port_device(0x0af0, 0xd031)}, /* Icon-321 */
395 {default_port_device(0x0af0, 0xd033)}, /* Icon-322 */
396 {USB_DEVICE(0x0af0, 0x7301)}, /* GE40x */
397 {USB_DEVICE(0x0af0, 0x7361)}, /* GE40x */
398 {USB_DEVICE(0x0af0, 0x7401)}, /* GI 0401 */
399 {USB_DEVICE(0x0af0, 0x7501)}, /* GTM 382 */
400 {USB_DEVICE(0x0af0, 0x7601)}, /* GE40x */
401 {}
402};
403MODULE_DEVICE_TABLE(usb, hso_ids);
404
405/* Sysfs attribute */
406static ssize_t hso_sysfs_show_porttype(struct device *dev,
407 struct device_attribute *attr,
408 char *buf)
409{
410 struct hso_device *hso_dev = dev->driver_data;
411 char *port_name;
412
413 if (!hso_dev)
414 return 0;
415
416 switch (hso_dev->port_spec & HSO_PORT_MASK) {
417 case HSO_PORT_CONTROL:
418 port_name = "Control";
419 break;
420 case HSO_PORT_APP:
421 port_name = "Application";
422 break;
423 case HSO_PORT_APP2:
424 port_name = "Application2";
425 break;
426 case HSO_PORT_GPS:
427 port_name = "GPS";
428 break;
429 case HSO_PORT_GPS_CONTROL:
430 port_name = "GPS Control";
431 break;
432 case HSO_PORT_PCSC:
433 port_name = "PCSC";
434 break;
435 case HSO_PORT_DIAG:
436 port_name = "Diagnostic";
437 break;
438 case HSO_PORT_DIAG2:
439 port_name = "Diagnostic2";
440 break;
441 case HSO_PORT_MODEM:
442 port_name = "Modem";
443 break;
444 case HSO_PORT_NETWORK:
445 port_name = "Network";
446 break;
447 default:
448 port_name = "Unknown";
449 break;
450 }
451
452 return sprintf(buf, "%s\n", port_name);
453}
454static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL);
455
456/* converts mux value to a port spec value */
457static u32 hso_mux_to_port(int mux)
458{
459 u32 result;
460
461 switch (mux) {
462 case 0x1:
463 result = HSO_PORT_CONTROL;
464 break;
465 case 0x2:
466 result = HSO_PORT_APP;
467 break;
468 case 0x4:
469 result = HSO_PORT_PCSC;
470 break;
471 case 0x8:
472 result = HSO_PORT_GPS;
473 break;
474 case 0x10:
475 result = HSO_PORT_APP2;
476 break;
477 default:
478 result = HSO_PORT_NO_PORT;
479 }
480 return result;
481}
482
483/* converts port spec value to a mux value */
484static u32 hso_port_to_mux(int port)
485{
486 u32 result;
487
488 switch (port & HSO_PORT_MASK) {
489 case HSO_PORT_CONTROL:
490 result = 0x0;
491 break;
492 case HSO_PORT_APP:
493 result = 0x1;
494 break;
495 case HSO_PORT_PCSC:
496 result = 0x2;
497 break;
498 case HSO_PORT_GPS:
499 result = 0x3;
500 break;
501 case HSO_PORT_APP2:
502 result = 0x4;
503 break;
504 default:
505 result = 0x0;
506 }
507 return result;
508}
509
510static struct hso_serial *get_serial_by_shared_int_and_type(
511 struct hso_shared_int *shared_int,
512 int mux)
513{
514 int i, port;
515
516 port = hso_mux_to_port(mux);
517
518 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
519 if (serial_table[i]
520 && (dev2ser(serial_table[i])->shared_int == shared_int)
521 && ((serial_table[i]->port_spec & HSO_PORT_MASK) == port)) {
522 return dev2ser(serial_table[i]);
523 }
524 }
525
526 return NULL;
527}
528
529static struct hso_serial *get_serial_by_index(unsigned index)
530{
531 struct hso_serial *serial;
532 unsigned long flags;
533
534 if (!serial_table[index])
535 return NULL;
536 spin_lock_irqsave(&serial_table_lock, flags);
537 serial = dev2ser(serial_table[index]);
538 spin_unlock_irqrestore(&serial_table_lock, flags);
539
540 return serial;
541}
542
543static int get_free_serial_index(void)
544{
545 int index;
546 unsigned long flags;
547
548 spin_lock_irqsave(&serial_table_lock, flags);
549 for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) {
550 if (serial_table[index] == NULL) {
551 spin_unlock_irqrestore(&serial_table_lock, flags);
552 return index;
553 }
554 }
555 spin_unlock_irqrestore(&serial_table_lock, flags);
556
557 printk(KERN_ERR "%s: no free serial devices in table\n", __func__);
558 return -1;
559}
560
561static void set_serial_by_index(unsigned index, struct hso_serial *serial)
562{
563 unsigned long flags;
564 spin_lock_irqsave(&serial_table_lock, flags);
565 if (serial)
566 serial_table[index] = serial->parent;
567 else
568 serial_table[index] = NULL;
569 spin_unlock_irqrestore(&serial_table_lock, flags);
570}
571
572/* log a meaningfull explanation of an USB status */
573static void log_usb_status(int status, const char *function)
574{
575 char *explanation;
576
577 switch (status) {
578 case -ENODEV:
579 explanation = "no device";
580 break;
581 case -ENOENT:
582 explanation = "endpoint not enabled";
583 break;
584 case -EPIPE:
585 explanation = "endpoint stalled";
586 break;
587 case -ENOSPC:
588 explanation = "not enough bandwidth";
589 break;
590 case -ESHUTDOWN:
591 explanation = "device disabled";
592 break;
593 case -EHOSTUNREACH:
594 explanation = "device suspended";
595 break;
596 case -EINVAL:
597 case -EAGAIN:
598 case -EFBIG:
599 case -EMSGSIZE:
600 explanation = "internal error";
601 break;
602 default:
603 explanation = "unknown status";
604 break;
605 }
606 D1("%s: received USB status - %s (%d)", function, explanation, status);
607}
608
609/* Network interface functions */
610
611/* called when net interface is brought up by ifconfig */
612static int hso_net_open(struct net_device *net)
613{
614 struct hso_net *odev = netdev_priv(net);
615 unsigned long flags = 0;
616
617 if (!odev) {
618 dev_err(&net->dev, "No net device !\n");
619 return -ENODEV;
620 }
621
622 odev->skb_tx_buf = NULL;
623
624 /* setup environment */
625 spin_lock_irqsave(&odev->net_lock, flags);
626 odev->rx_parse_state = WAIT_IP;
627 odev->rx_buf_size = 0;
628 odev->rx_buf_missing = sizeof(struct iphdr);
629 spin_unlock_irqrestore(&odev->net_lock, flags);
630
631 hso_start_net_device(odev->parent);
632
633 /* We are up and running. */
634 set_bit(HSO_NET_RUNNING, &odev->flags);
635
636 /* Tell the kernel we are ready to start receiving from it */
637 netif_start_queue(net);
638
639 return 0;
640}
641
642/* called when interface is brought down by ifconfig */
643static int hso_net_close(struct net_device *net)
644{
645 struct hso_net *odev = netdev_priv(net);
646
647 /* we don't need the queue anymore */
648 netif_stop_queue(net);
649 /* no longer running */
650 clear_bit(HSO_NET_RUNNING, &odev->flags);
651
652 hso_stop_net_device(odev->parent);
653
654 /* done */
655 return 0;
656}
657
658/* USB tells is xmit done, we should start the netqueue again */
659static void write_bulk_callback(struct urb *urb)
660{
661 struct hso_net *odev = urb->context;
662 int status = urb->status;
663
664 /* Sanity check */
665 if (!odev || !test_bit(HSO_NET_RUNNING, &odev->flags)) {
666 dev_err(&urb->dev->dev, "%s: device not running\n", __func__);
667 return;
668 }
669
670 /* Do we still have a valid kernel network device? */
671 if (!netif_device_present(odev->net)) {
672 dev_err(&urb->dev->dev, "%s: net device not present\n",
673 __func__);
674 return;
675 }
676
677 /* log status, but don't act on it, we don't need to resubmit anything
678 * anyhow */
679 if (status)
680 log_usb_status(status, __func__);
681
682 hso_put_activity(odev->parent);
683
684 /* Tell the network interface we are ready for another frame */
685 netif_wake_queue(odev->net);
686}
687
688/* called by kernel when we need to transmit a packet */
689static int hso_net_start_xmit(struct sk_buff *skb, struct net_device *net)
690{
691 struct hso_net *odev = netdev_priv(net);
692 int result;
693
694 /* Tell the kernel, "No more frames 'til we are done with this one." */
695 netif_stop_queue(net);
696 if (hso_get_activity(odev->parent) == -EAGAIN) {
697 odev->skb_tx_buf = skb;
698 return 0;
699 }
700
701 /* log if asked */
702 DUMP1(skb->data, skb->len);
703 /* Copy it from kernel memory to OUR memory */
704 memcpy(odev->mux_bulk_tx_buf, skb->data, skb->len);
705 D1("len: %d/%d", skb->len, MUX_BULK_TX_BUF_SIZE);
706
707 /* Fill in the URB for shipping it out. */
708 usb_fill_bulk_urb(odev->mux_bulk_tx_urb,
709 odev->parent->usb,
710 usb_sndbulkpipe(odev->parent->usb,
711 odev->out_endp->
712 bEndpointAddress & 0x7F),
713 odev->mux_bulk_tx_buf, skb->len, write_bulk_callback,
714 odev);
715
716 /* Deal with the Zero Length packet problem, I hope */
717 odev->mux_bulk_tx_urb->transfer_flags |= URB_ZERO_PACKET;
718
719 /* Send the URB on its merry way. */
720 result = usb_submit_urb(odev->mux_bulk_tx_urb, GFP_ATOMIC);
721 if (result) {
722 dev_warn(&odev->parent->interface->dev,
723 "failed mux_bulk_tx_urb %d", result);
724 net->stats.tx_errors++;
725 netif_start_queue(net);
726 } else {
727 net->stats.tx_packets++;
728 net->stats.tx_bytes += skb->len;
729 /* And tell the kernel when the last transmit started. */
730 net->trans_start = jiffies;
731 }
732 dev_kfree_skb(skb);
733 /* we're done */
734 return result;
735}
736
737static void hso_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
738{
739 struct hso_net *odev = netdev_priv(net);
740
741 strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN);
742 strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN);
743 usb_make_path(odev->parent->usb, info->bus_info, sizeof info->bus_info);
744}
745
746static struct ethtool_ops ops = {
747 .get_drvinfo = hso_get_drvinfo,
748 .get_link = ethtool_op_get_link
749};
750
751/* called when a packet did not ack after watchdogtimeout */
752static void hso_net_tx_timeout(struct net_device *net)
753{
754 struct hso_net *odev = netdev_priv(net);
755
756 if (!odev)
757 return;
758
759 /* Tell syslog we are hosed. */
760 dev_warn(&net->dev, "Tx timed out.\n");
761
762 /* Tear the waiting frame off the list */
763 if (odev->mux_bulk_tx_urb
764 && (odev->mux_bulk_tx_urb->status == -EINPROGRESS))
765 usb_unlink_urb(odev->mux_bulk_tx_urb);
766
767 /* Update statistics */
768 net->stats.tx_errors++;
769}
770
771/* make a real packet from the received USB buffer */
772static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
773 unsigned int count, unsigned char is_eop)
774{
775 unsigned short temp_bytes;
776 unsigned short buffer_offset = 0;
777 unsigned short frame_len;
778 unsigned char *tmp_rx_buf;
779
780 /* log if needed */
781 D1("Rx %d bytes", count);
782 DUMP(ip_pkt, min(128, (int)count));
783
784 while (count) {
785 switch (odev->rx_parse_state) {
786 case WAIT_IP:
787 /* waiting for IP header. */
788 /* wanted bytes - size of ip header */
789 temp_bytes =
790 (count <
791 odev->rx_buf_missing) ? count : odev->
792 rx_buf_missing;
793
794 memcpy(((unsigned char *)(&odev->rx_ip_hdr)) +
795 odev->rx_buf_size, ip_pkt + buffer_offset,
796 temp_bytes);
797
798 odev->rx_buf_size += temp_bytes;
799 buffer_offset += temp_bytes;
800 odev->rx_buf_missing -= temp_bytes;
801 count -= temp_bytes;
802
803 if (!odev->rx_buf_missing) {
804 /* header is complete allocate an sk_buffer and
805 * continue to WAIT_DATA */
806 frame_len = ntohs(odev->rx_ip_hdr.tot_len);
807
808 if ((frame_len > DEFAULT_MRU) ||
809 (frame_len < sizeof(struct iphdr))) {
810 dev_err(&odev->net->dev,
811 "Invalid frame (%d) length\n",
812 frame_len);
813 odev->rx_parse_state = WAIT_SYNC;
814 continue;
815 }
816 /* Allocate an sk_buff */
817 odev->skb_rx_buf = dev_alloc_skb(frame_len);
818 if (!odev->skb_rx_buf) {
819 /* We got no receive buffer. */
820 D1("could not allocate memory");
821 odev->rx_parse_state = WAIT_SYNC;
822 return;
823 }
824 /* Here's where it came from */
825 odev->skb_rx_buf->dev = odev->net;
826
827 /* Copy what we got so far. make room for iphdr
828 * after tail. */
829 tmp_rx_buf =
830 skb_put(odev->skb_rx_buf,
831 sizeof(struct iphdr));
832 memcpy(tmp_rx_buf, (char *)&(odev->rx_ip_hdr),
833 sizeof(struct iphdr));
834
835 /* ETH_HLEN */
836 odev->rx_buf_size = sizeof(struct iphdr);
837
838 /* Filip actually use .tot_len */
839 odev->rx_buf_missing =
840 frame_len - sizeof(struct iphdr);
841 odev->rx_parse_state = WAIT_DATA;
842 }
843 break;
844
845 case WAIT_DATA:
846 temp_bytes = (count < odev->rx_buf_missing)
847 ? count : odev->rx_buf_missing;
848
849 /* Copy the rest of the bytes that are left in the
850 * buffer into the waiting sk_buf. */
851 /* Make room for temp_bytes after tail. */
852 tmp_rx_buf = skb_put(odev->skb_rx_buf, temp_bytes);
853 memcpy(tmp_rx_buf, ip_pkt + buffer_offset, temp_bytes);
854
855 odev->rx_buf_missing -= temp_bytes;
856 count -= temp_bytes;
857 buffer_offset += temp_bytes;
858 odev->rx_buf_size += temp_bytes;
859 if (!odev->rx_buf_missing) {
860 /* Packet is complete. Inject into stack. */
861 /* We have IP packet here */
862 odev->skb_rx_buf->protocol =
863 __constant_htons(ETH_P_IP);
864 /* don't check it */
865 odev->skb_rx_buf->ip_summed =
866 CHECKSUM_UNNECESSARY;
867
868 skb_reset_mac_header(odev->skb_rx_buf);
869
870 /* Ship it off to the kernel */
871 netif_rx(odev->skb_rx_buf);
872 /* No longer our buffer. */
873 odev->skb_rx_buf = NULL;
874
875 /* update out statistics */
876 odev->net->stats.rx_packets++;
877
878 odev->net->stats.rx_bytes += odev->rx_buf_size;
879
880 odev->rx_buf_size = 0;
881 odev->rx_buf_missing = sizeof(struct iphdr);
882 odev->rx_parse_state = WAIT_IP;
883 }
884 break;
885
886 case WAIT_SYNC:
887 D1(" W_S");
888 count = 0;
889 break;
890 default:
891 D1(" ");
892 count--;
893 break;
894 }
895 }
896
897 /* Recovery mechanism for WAIT_SYNC state. */
898 if (is_eop) {
899 if (odev->rx_parse_state == WAIT_SYNC) {
900 odev->rx_parse_state = WAIT_IP;
901 odev->rx_buf_size = 0;
902 odev->rx_buf_missing = sizeof(struct iphdr);
903 }
904 }
905}
906
907/* Moving data from usb to kernel (in interrupt state) */
908static void read_bulk_callback(struct urb *urb)
909{
910 struct hso_net *odev = urb->context;
911 struct net_device *net;
912 int result;
913 int status = urb->status;
914
915 /* is al ok? (Filip: Who's Al ?) */
916 if (status) {
917 log_usb_status(status, __func__);
918 return;
919 }
920
921 /* Sanity check */
922 if (!odev || !test_bit(HSO_NET_RUNNING, &odev->flags)) {
923 D1("BULK IN callback but driver is not active!");
924 return;
925 }
926 usb_mark_last_busy(urb->dev);
927
928 net = odev->net;
929
930 if (!netif_device_present(net)) {
931 /* Somebody killed our network interface... */
932 return;
933 }
934
935 if (odev->parent->port_spec & HSO_INFO_CRC_BUG) {
936 u32 rest;
937 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
938 rest = urb->actual_length % odev->in_endp->wMaxPacketSize;
939 if (((rest == 5) || (rest == 6))
940 && !memcmp(((u8 *) urb->transfer_buffer) +
941 urb->actual_length - 4, crc_check, 4)) {
942 urb->actual_length -= 4;
943 }
944 }
945
946 /* do we even have a packet? */
947 if (urb->actual_length) {
948 /* Handle the IP stream, add header and push it onto network
949 * stack if the packet is complete. */
950 spin_lock(&odev->net_lock);
951 packetizeRx(odev, urb->transfer_buffer, urb->actual_length,
952 (urb->transfer_buffer_length >
953 urb->actual_length) ? 1 : 0);
954 spin_unlock(&odev->net_lock);
955 }
956
957 /* We are done with this URB, resubmit it. Prep the USB to wait for
958 * another frame. Reuse same as received. */
959 usb_fill_bulk_urb(urb,
960 odev->parent->usb,
961 usb_rcvbulkpipe(odev->parent->usb,
962 odev->in_endp->
963 bEndpointAddress & 0x7F),
964 urb->transfer_buffer, MUX_BULK_RX_BUF_SIZE,
965 read_bulk_callback, odev);
966
967 /* Give this to the USB subsystem so it can tell us when more data
968 * arrives. */
969 result = usb_submit_urb(urb, GFP_ATOMIC);
970 if (result)
971 dev_warn(&odev->parent->interface->dev,
972 "%s failed submit mux_bulk_rx_urb %d", __func__,
973 result);
974}
975
976/* Serial driver functions */
977
978static void _hso_serial_set_termios(struct tty_struct *tty,
979 struct ktermios *old)
980{
981 struct hso_serial *serial = get_serial_by_tty(tty);
982 struct ktermios *termios;
983
984 if ((!tty) || (!tty->termios) || (!serial)) {
985 printk(KERN_ERR "%s: no tty structures", __func__);
986 return;
987 }
988
989 D4("port %d", serial->minor);
990
991 /*
992 * The default requirements for this device are:
993 */
994 termios = tty->termios;
995 termios->c_iflag &=
996 ~(IGNBRK /* disable ignore break */
997 | BRKINT /* disable break causes interrupt */
998 | PARMRK /* disable mark parity errors */
999 | ISTRIP /* disable clear high bit of input characters */
1000 | INLCR /* disable translate NL to CR */
1001 | IGNCR /* disable ignore CR */
1002 | ICRNL /* disable translate CR to NL */
1003 | IXON); /* disable enable XON/XOFF flow control */
1004
1005 /* disable postprocess output characters */
1006 termios->c_oflag &= ~OPOST;
1007
1008 termios->c_lflag &=
1009 ~(ECHO /* disable echo input characters */
1010 | ECHONL /* disable echo new line */
1011 | ICANON /* disable erase, kill, werase, and rprnt
1012 special characters */
1013 | ISIG /* disable interrupt, quit, and suspend special
1014 characters */
1015 | IEXTEN); /* disable non-POSIX special characters */
1016
1017 termios->c_cflag &=
1018 ~(CSIZE /* no size */
1019 | PARENB /* disable parity bit */
1020 | CBAUD /* clear current baud rate */
1021 | CBAUDEX); /* clear current buad rate */
1022
1023 termios->c_cflag |= CS8; /* character size 8 bits */
1024
1025 /* baud rate 115200 */
1026 tty_encode_baud_rate(serial->tty, 115200, 115200);
1027
1028 /*
1029 * Force low_latency on; otherwise the pushes are scheduled;
1030 * this is bad as it opens up the possibility of dropping bytes
1031 * on the floor. We don't want to drop bytes on the floor. :)
1032 */
1033 serial->tty->low_latency = 1;
1034 return;
1035}
1036
1037/* open the requested serial port */
1038static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1039{
1040 struct hso_serial *serial = get_serial_by_index(tty->index);
1041 int result;
1042
1043 /* sanity check */
1044 if (serial == NULL || serial->magic != HSO_SERIAL_MAGIC) {
1045 tty->driver_data = NULL;
1046 D1("Failed to open port");
1047 return -ENODEV;
1048 }
1049
1050 mutex_lock(&serial->parent->mutex);
1051 result = usb_autopm_get_interface(serial->parent->interface);
1052 if (result < 0)
1053 goto err_out;
1054
1055 D1("Opening %d", serial->minor);
1056 kref_get(&serial->parent->ref);
1057
1058 /* setup */
1059 tty->driver_data = serial;
1060 serial->tty = tty;
1061
1062 /* check for port allready opened, if not set the termios */
1063 serial->open_count++;
1064 if (serial->open_count == 1) {
1065 tty->low_latency = 1;
1066 serial->flags = 0;
1067 /* Force default termio settings */
1068 _hso_serial_set_termios(tty, NULL);
1069 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
1070 if (result) {
1071 hso_stop_serial_device(serial->parent);
1072 serial->open_count--;
1073 kref_put(&serial->parent->ref, hso_serial_ref_free);
1074 }
1075 } else {
1076 D1("Port was already open");
1077 }
1078
1079 usb_autopm_put_interface(serial->parent->interface);
1080
1081 /* done */
1082 if (result)
1083 hso_serial_tiocmset(tty, NULL, TIOCM_RTS | TIOCM_DTR, 0);
1084err_out:
1085 mutex_unlock(&serial->parent->mutex);
1086 return result;
1087}
1088
1089/* close the requested serial port */
1090static void hso_serial_close(struct tty_struct *tty, struct file *filp)
1091{
1092 struct hso_serial *serial = tty->driver_data;
1093 u8 usb_gone;
1094
1095 D1("Closing serial port");
1096
1097 mutex_lock(&serial->parent->mutex);
1098 usb_gone = serial->parent->usb_gone;
1099
1100 if (!usb_gone)
1101 usb_autopm_get_interface(serial->parent->interface);
1102
1103 /* reset the rts and dtr */
1104 /* do the actual close */
1105 serial->open_count--;
1106 if (serial->open_count <= 0) {
1107 kref_put(&serial->parent->ref, hso_serial_ref_free);
1108 serial->open_count = 0;
1109 if (serial->tty) {
1110 serial->tty->driver_data = NULL;
1111 serial->tty = NULL;
1112 }
1113 if (!usb_gone)
1114 hso_stop_serial_device(serial->parent);
1115 }
1116 if (!usb_gone)
1117 usb_autopm_put_interface(serial->parent->interface);
1118 mutex_unlock(&serial->parent->mutex);
1119}
1120
1121/* close the requested serial port */
1122static int hso_serial_write(struct tty_struct *tty, const unsigned char *buf,
1123 int count)
1124{
1125 struct hso_serial *serial = get_serial_by_tty(tty);
1126 int space, tx_bytes;
1127 unsigned long flags;
1128
1129 /* sanity check */
1130 if (serial == NULL) {
1131 printk(KERN_ERR "%s: serial is NULL\n", __func__);
1132 return -ENODEV;
1133 }
1134
1135 spin_lock_irqsave(&serial->serial_lock, flags);
1136
1137 space = serial->tx_data_length - serial->tx_buffer_count;
1138 tx_bytes = (count < space) ? count : space;
1139
1140 if (!tx_bytes)
1141 goto out;
1142
1143 memcpy(serial->tx_buffer + serial->tx_buffer_count, buf, tx_bytes);
1144 serial->tx_buffer_count += tx_bytes;
1145
1146out:
1147 spin_unlock_irqrestore(&serial->serial_lock, flags);
1148
1149 hso_kick_transmit(serial);
1150 /* done */
1151 return tx_bytes;
1152}
1153
1154/* how much room is there for writing */
1155static int hso_serial_write_room(struct tty_struct *tty)
1156{
1157 struct hso_serial *serial = get_serial_by_tty(tty);
1158 int room;
1159 unsigned long flags;
1160
1161 spin_lock_irqsave(&serial->serial_lock, flags);
1162 room = serial->tx_data_length - serial->tx_buffer_count;
1163 spin_unlock_irqrestore(&serial->serial_lock, flags);
1164
1165 /* return free room */
1166 return room;
1167}
1168
1169/* setup the term */
1170static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
1171{
1172 struct hso_serial *serial = get_serial_by_tty(tty);
1173 unsigned long flags;
1174
1175 if (old)
1176 D5("Termios called with: cflags new[%d] - old[%d]",
1177 tty->termios->c_cflag, old->c_cflag);
1178
1179 /* the actual setup */
1180 spin_lock_irqsave(&serial->serial_lock, flags);
1181 if (serial->open_count)
1182 _hso_serial_set_termios(tty, old);
1183 else
1184 tty->termios = old;
1185 spin_unlock_irqrestore(&serial->serial_lock, flags);
1186
1187 /* done */
1188 return;
1189}
1190
1191/* how many characters in the buffer */
1192static int hso_serial_chars_in_buffer(struct tty_struct *tty)
1193{
1194 struct hso_serial *serial = get_serial_by_tty(tty);
1195 int chars;
1196 unsigned long flags;
1197
1198 /* sanity check */
1199 if (serial == NULL)
1200 return 0;
1201
1202 spin_lock_irqsave(&serial->serial_lock, flags);
1203 chars = serial->tx_buffer_count;
1204 spin_unlock_irqrestore(&serial->serial_lock, flags);
1205
1206 return chars;
1207}
1208
1209static int hso_serial_tiocmget(struct tty_struct *tty, struct file *file)
1210{
1211 unsigned int value;
1212 struct hso_serial *serial = get_serial_by_tty(tty);
1213 unsigned long flags;
1214
1215 /* sanity check */
1216 if (!serial) {
1217 D1("no tty structures");
1218 return -EINVAL;
1219 }
1220
1221 spin_lock_irqsave(&serial->serial_lock, flags);
1222 value = ((serial->rts_state) ? TIOCM_RTS : 0) |
1223 ((serial->dtr_state) ? TIOCM_DTR : 0);
1224 spin_unlock_irqrestore(&serial->serial_lock, flags);
1225
1226 return value;
1227}
1228
1229static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file,
1230 unsigned int set, unsigned int clear)
1231{
1232 int val = 0;
1233 unsigned long flags;
1234 int if_num;
1235 struct hso_serial *serial = get_serial_by_tty(tty);
1236
1237 /* sanity check */
1238 if (!serial) {
1239 D1("no tty structures");
1240 return -EINVAL;
1241 }
1242 if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
1243
1244 spin_lock_irqsave(&serial->serial_lock, flags);
1245 if (set & TIOCM_RTS)
1246 serial->rts_state = 1;
1247 if (set & TIOCM_DTR)
1248 serial->dtr_state = 1;
1249
1250 if (clear & TIOCM_RTS)
1251 serial->rts_state = 0;
1252 if (clear & TIOCM_DTR)
1253 serial->dtr_state = 0;
1254
1255 if (serial->dtr_state)
1256 val |= 0x01;
1257 if (serial->rts_state)
1258 val |= 0x02;
1259
1260 spin_unlock_irqrestore(&serial->serial_lock, flags);
1261
1262 return usb_control_msg(serial->parent->usb,
1263 usb_rcvctrlpipe(serial->parent->usb, 0), 0x22,
1264 0x21, val, if_num, NULL, 0,
1265 USB_CTRL_SET_TIMEOUT);
1266}
1267
1268/* starts a transmit */
1269static void hso_kick_transmit(struct hso_serial *serial)
1270{
1271 u8 *temp;
1272 unsigned long flags;
1273 int res;
1274
1275 spin_lock_irqsave(&serial->serial_lock, flags);
1276 if (!serial->tx_buffer_count)
1277 goto out;
1278
1279 if (serial->tx_urb_used)
1280 goto out;
1281
1282 /* Wakeup USB interface if necessary */
1283 if (hso_get_activity(serial->parent) == -EAGAIN)
1284 goto out;
1285
1286 /* Switch pointers around to avoid memcpy */
1287 temp = serial->tx_buffer;
1288 serial->tx_buffer = serial->tx_data;
1289 serial->tx_data = temp;
1290 serial->tx_data_count = serial->tx_buffer_count;
1291 serial->tx_buffer_count = 0;
1292
1293 /* If temp is set, it means we switched buffers */
1294 if (temp && serial->write_data) {
1295 res = serial->write_data(serial);
1296 if (res >= 0)
1297 serial->tx_urb_used = 1;
1298 }
1299out:
1300 spin_unlock_irqrestore(&serial->serial_lock, flags);
1301}
1302
1303/* make a request (for reading and writing data to muxed serial port) */
1304static int mux_device_request(struct hso_serial *serial, u8 type, u16 port,
1305 struct urb *ctrl_urb,
1306 struct usb_ctrlrequest *ctrl_req,
1307 u8 *ctrl_urb_data, u32 size)
1308{
1309 int result;
1310 int pipe;
1311
1312 /* Sanity check */
1313 if (!serial || !ctrl_urb || !ctrl_req) {
1314 printk(KERN_ERR "%s: Wrong arguments\n", __func__);
1315 return -EINVAL;
1316 }
1317
1318 /* initialize */
1319 ctrl_req->wValue = 0;
1320 ctrl_req->wIndex = hso_port_to_mux(port);
1321 ctrl_req->wLength = size;
1322
1323 if (type == USB_CDC_GET_ENCAPSULATED_RESPONSE) {
1324 /* Reading command */
1325 ctrl_req->bRequestType = USB_DIR_IN |
1326 USB_TYPE_OPTION_VENDOR |
1327 USB_RECIP_INTERFACE;
1328 ctrl_req->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
1329 pipe = usb_rcvctrlpipe(serial->parent->usb, 0);
1330 } else {
1331 /* Writing command */
1332 ctrl_req->bRequestType = USB_DIR_OUT |
1333 USB_TYPE_OPTION_VENDOR |
1334 USB_RECIP_INTERFACE;
1335 ctrl_req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
1336 pipe = usb_sndctrlpipe(serial->parent->usb, 0);
1337 }
1338 /* syslog */
1339 D2("%s command (%02x) len: %d, port: %d",
1340 type == USB_CDC_GET_ENCAPSULATED_RESPONSE ? "Read" : "Write",
1341 ctrl_req->bRequestType, ctrl_req->wLength, port);
1342
1343 /* Load ctrl urb */
1344 ctrl_urb->transfer_flags = 0;
1345 usb_fill_control_urb(ctrl_urb,
1346 serial->parent->usb,
1347 pipe,
1348 (u8 *) ctrl_req,
1349 ctrl_urb_data, size, ctrl_callback, serial);
1350 /* Send it on merry way */
1351 result = usb_submit_urb(ctrl_urb, GFP_ATOMIC);
1352 if (result) {
1353 dev_err(&ctrl_urb->dev->dev,
1354 "%s failed submit ctrl_urb %d type %d", __func__,
1355 result, type);
1356 return result;
1357 }
1358
1359 /* done */
1360 return size;
1361}
1362
1363/* called by intr_callback when read occurs */
1364static int hso_mux_serial_read(struct hso_serial *serial)
1365{
1366 if (!serial)
1367 return -EINVAL;
1368
1369 /* clean data */
1370 memset(serial->rx_data[0], 0, CTRL_URB_RX_SIZE);
1371 /* make the request */
1372
1373 if (serial->num_rx_urbs != 1) {
1374 dev_err(&serial->parent->interface->dev,
1375 "ERROR: mux'd reads with multiple buffers "
1376 "not possible\n");
1377 return 0;
1378 }
1379 return mux_device_request(serial,
1380 USB_CDC_GET_ENCAPSULATED_RESPONSE,
1381 serial->parent->port_spec & HSO_PORT_MASK,
1382 serial->rx_urb[0],
1383 &serial->ctrl_req_rx,
1384 serial->rx_data[0], serial->rx_data_length);
1385}
1386
1387/* used for muxed serial port callback (muxed serial read) */
1388static void intr_callback(struct urb *urb)
1389{
1390 struct hso_shared_int *shared_int = urb->context;
1391 struct hso_serial *serial;
1392 unsigned char *port_req;
1393 int status = urb->status;
1394 int i;
1395
1396 usb_mark_last_busy(urb->dev);
1397
1398 /* sanity check */
1399 if (!shared_int)
1400 return;
1401
1402 /* status check */
1403 if (status) {
1404 log_usb_status(status, __func__);
1405 return;
1406 }
1407 D4("\n--- Got intr callback 0x%02X ---", status);
1408
1409 /* what request? */
1410 port_req = urb->transfer_buffer;
1411 D4(" port_req = 0x%.2X\n", *port_req);
1412 /* loop over all muxed ports to find the one sending this */
1413 for (i = 0; i < 8; i++) {
1414 /* max 8 channels on MUX */
1415 if (*port_req & (1 << i)) {
1416 serial = get_serial_by_shared_int_and_type(shared_int,
1417 (1 << i));
1418 if (serial != NULL) {
1419 D1("Pending read interrupt on port %d\n", i);
1420 if (!test_and_set_bit(HSO_SERIAL_FLAG_RX_SENT,
1421 &serial->flags)) {
1422 /* Setup and send a ctrl req read on
1423 * port i */
1424 hso_mux_serial_read(serial);
1425 } else {
1426 D1("Already pending a read on "
1427 "port %d\n", i);
1428 }
1429 }
1430 }
1431 }
1432 /* Resubmit interrupt urb */
1433 hso_mux_submit_intr_urb(shared_int, urb->dev, GFP_ATOMIC);
1434}
1435
1436/* called for writing to muxed serial port */
1437static int hso_mux_serial_write_data(struct hso_serial *serial)
1438{
1439 if (NULL == serial)
1440 return -EINVAL;
1441
1442 return mux_device_request(serial,
1443 USB_CDC_SEND_ENCAPSULATED_COMMAND,
1444 serial->parent->port_spec & HSO_PORT_MASK,
1445 serial->tx_urb,
1446 &serial->ctrl_req_tx,
1447 serial->tx_data, serial->tx_data_count);
1448}
1449
1450/* write callback for Diag and CS port */
1451static void hso_std_serial_write_bulk_callback(struct urb *urb)
1452{
1453 struct hso_serial *serial = urb->context;
1454 int status = urb->status;
1455
1456 /* sanity check */
1457 if (!serial) {
1458 D1("serial == NULL");
1459 return;
1460 }
1461
1462 spin_lock(&serial->serial_lock);
1463 serial->tx_urb_used = 0;
1464 spin_unlock(&serial->serial_lock);
1465 if (status) {
1466 log_usb_status(status, __func__);
1467 return;
1468 }
1469 hso_put_activity(serial->parent);
1470 tty_wakeup(serial->tty);
1471 hso_kick_transmit(serial);
1472
1473 D1(" ");
1474 return;
1475}
1476
1477/* called for writing diag or CS serial port */
1478static int hso_std_serial_write_data(struct hso_serial *serial)
1479{
1480 int count = serial->tx_data_count;
1481 int result;
1482
1483 usb_fill_bulk_urb(serial->tx_urb,
1484 serial->parent->usb,
1485 usb_sndbulkpipe(serial->parent->usb,
1486 serial->out_endp->
1487 bEndpointAddress & 0x7F),
1488 serial->tx_data, serial->tx_data_count,
1489 hso_std_serial_write_bulk_callback, serial);
1490
1491 result = usb_submit_urb(serial->tx_urb, GFP_ATOMIC);
1492 if (result) {
1493 dev_warn(&serial->parent->usb->dev,
1494 "Failed to submit urb - res %d\n", result);
1495 return result;
1496 }
1497
1498 return count;
1499}
1500
1501/* callback after read or write on muxed serial port */
1502static void ctrl_callback(struct urb *urb)
1503{
1504 struct hso_serial *serial = urb->context;
1505 struct usb_ctrlrequest *req;
1506 int status = urb->status;
1507
1508 /* sanity check */
1509 if (!serial)
1510 return;
1511
1512 spin_lock(&serial->serial_lock);
1513 serial->tx_urb_used = 0;
1514 spin_unlock(&serial->serial_lock);
1515 if (status) {
1516 log_usb_status(status, __func__);
1517 return;
1518 }
1519
1520 /* what request? */
1521 req = (struct usb_ctrlrequest *)(urb->setup_packet);
1522 D4("\n--- Got muxed ctrl callback 0x%02X ---", status);
1523 D4("Actual length of urb = %d\n", urb->actual_length);
1524 DUMP1(urb->transfer_buffer, urb->actual_length);
1525
1526 if (req->bRequestType ==
1527 (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) {
1528 /* response to a read command */
1529 if (serial->open_count > 0) {
1530 /* handle RX data the normal way */
1531 put_rxbuf_data(urb, serial);
1532 }
1533
1534 /* Re issue a read as long as we receive data. */
1535 if (urb->actual_length != 0)
1536 hso_mux_serial_read(serial);
1537 else
1538 clear_bit(HSO_SERIAL_FLAG_RX_SENT, &serial->flags);
1539 } else {
1540 hso_put_activity(serial->parent);
1541 tty_wakeup(serial->tty);
1542 /* response to a write command */
1543 hso_kick_transmit(serial);
1544 }
1545}
1546
1547/* handle RX data for serial port */
1548static void put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
1549{
1550 struct tty_struct *tty = serial->tty;
1551
1552 /* Sanity check */
1553 if (urb == NULL || serial == NULL) {
1554 D1("serial = NULL");
1555 return;
1556 }
1557
1558 /* Push data to tty */
1559 if (tty && urb->actual_length) {
1560 D1("data to push to tty");
1561 tty_insert_flip_string(tty, urb->transfer_buffer,
1562 urb->actual_length);
1563 tty_flip_buffer_push(tty);
1564 }
1565}
1566
1567/* read callback for Diag and CS port */
1568static void hso_std_serial_read_bulk_callback(struct urb *urb)
1569{
1570 struct hso_serial *serial = urb->context;
1571 int result;
1572 int status = urb->status;
1573
1574 /* sanity check */
1575 if (!serial) {
1576 D1("serial == NULL");
1577 return;
1578 } else if (status) {
1579 log_usb_status(status, __func__);
1580 return;
1581 }
1582
1583 D4("\n--- Got serial_read_bulk callback %02x ---", status);
1584 D1("Actual length = %d\n", urb->actual_length);
1585 DUMP1(urb->transfer_buffer, urb->actual_length);
1586
1587 /* Anyone listening? */
1588 if (serial->open_count == 0)
1589 return;
1590
1591 if (status == 0) {
1592 if (serial->parent->port_spec & HSO_INFO_CRC_BUG) {
1593 u32 rest;
1594 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
1595 rest =
1596 urb->actual_length %
1597 serial->in_endp->wMaxPacketSize;
1598 if (((rest == 5) || (rest == 6))
1599 && !memcmp(((u8 *) urb->transfer_buffer) +
1600 urb->actual_length - 4, crc_check, 4)) {
1601 urb->actual_length -= 4;
1602 }
1603 }
1604 /* Valid data, handle RX data */
1605 put_rxbuf_data(urb, serial);
1606 } else if (status == -ENOENT || status == -ECONNRESET) {
1607 /* Unlinked - check for throttled port. */
1608 D2("Port %d, successfully unlinked urb", serial->minor);
1609 } else {
1610 D2("Port %d, status = %d for read urb", serial->minor, status);
1611 return;
1612 }
1613
1614 usb_mark_last_busy(urb->dev);
1615
1616 /* We are done with this URB, resubmit it. Prep the USB to wait for
1617 * another frame */
1618 usb_fill_bulk_urb(urb, serial->parent->usb,
1619 usb_rcvbulkpipe(serial->parent->usb,
1620 serial->in_endp->
1621 bEndpointAddress & 0x7F),
1622 urb->transfer_buffer, serial->rx_data_length,
1623 hso_std_serial_read_bulk_callback, serial);
1624 /* Give this to the USB subsystem so it can tell us when more data
1625 * arrives. */
1626 result = usb_submit_urb(urb, GFP_ATOMIC);
1627 if (result) {
1628 dev_err(&urb->dev->dev, "%s failed submit serial rx_urb %d",
1629 __func__, result);
1630 }
1631}
1632
1633/* Base driver functions */
1634
1635static void hso_log_port(struct hso_device *hso_dev)
1636{
1637 char *port_type;
1638 char port_dev[20];
1639
1640 switch (hso_dev->port_spec & HSO_PORT_MASK) {
1641 case HSO_PORT_CONTROL:
1642 port_type = "Control";
1643 break;
1644 case HSO_PORT_APP:
1645 port_type = "Application";
1646 break;
1647 case HSO_PORT_GPS:
1648 port_type = "GPS";
1649 break;
1650 case HSO_PORT_GPS_CONTROL:
1651 port_type = "GPS control";
1652 break;
1653 case HSO_PORT_APP2:
1654 port_type = "Application2";
1655 break;
1656 case HSO_PORT_PCSC:
1657 port_type = "PCSC";
1658 break;
1659 case HSO_PORT_DIAG:
1660 port_type = "Diagnostic";
1661 break;
1662 case HSO_PORT_DIAG2:
1663 port_type = "Diagnostic2";
1664 break;
1665 case HSO_PORT_MODEM:
1666 port_type = "Modem";
1667 break;
1668 case HSO_PORT_NETWORK:
1669 port_type = "Network";
1670 break;
1671 default:
1672 port_type = "Unknown";
1673 break;
1674 }
1675 if ((hso_dev->port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) {
1676 sprintf(port_dev, "%s", dev2net(hso_dev)->net->name);
1677 } else
1678 sprintf(port_dev, "/dev/%s%d", tty_filename,
1679 dev2ser(hso_dev)->minor);
1680
1681 dev_dbg(&hso_dev->interface->dev, "HSO: Found %s port %s\n",
1682 port_type, port_dev);
1683}
1684
1685static int hso_start_net_device(struct hso_device *hso_dev)
1686{
1687 int i, result = 0;
1688 struct hso_net *hso_net = dev2net(hso_dev);
1689
1690 if (!hso_net)
1691 return -ENODEV;
1692
1693 /* send URBs for all read buffers */
1694 for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
1695
1696 /* Prep a receive URB */
1697 usb_fill_bulk_urb(hso_net->mux_bulk_rx_urb_pool[i],
1698 hso_dev->usb,
1699 usb_rcvbulkpipe(hso_dev->usb,
1700 hso_net->in_endp->
1701 bEndpointAddress & 0x7F),
1702 hso_net->mux_bulk_rx_buf_pool[i],
1703 MUX_BULK_RX_BUF_SIZE, read_bulk_callback,
1704 hso_net);
1705
1706 /* Put it out there so the device can send us stuff */
1707 result = usb_submit_urb(hso_net->mux_bulk_rx_urb_pool[i],
1708 GFP_NOIO);
1709 if (result)
1710 dev_warn(&hso_dev->usb->dev,
1711 "%s failed mux_bulk_rx_urb[%d] %d\n", __func__,
1712 i, result);
1713 }
1714
1715 return result;
1716}
1717
1718static int hso_stop_net_device(struct hso_device *hso_dev)
1719{
1720 int i;
1721 struct hso_net *hso_net = dev2net(hso_dev);
1722
1723 if (!hso_net)
1724 return -ENODEV;
1725
1726 for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
1727 if (hso_net->mux_bulk_rx_urb_pool[i])
1728 usb_kill_urb(hso_net->mux_bulk_rx_urb_pool[i]);
1729
1730 }
1731 if (hso_net->mux_bulk_tx_urb)
1732 usb_kill_urb(hso_net->mux_bulk_tx_urb);
1733
1734 return 0;
1735}
1736
1737static int hso_start_serial_device(struct hso_device *hso_dev, gfp_t flags)
1738{
1739 int i, result = 0;
1740 struct hso_serial *serial = dev2ser(hso_dev);
1741
1742 if (!serial)
1743 return -ENODEV;
1744
1745 /* If it is not the MUX port fill in and submit a bulk urb (already
1746 * allocated in hso_serial_start) */
1747 if (!(serial->parent->port_spec & HSO_INTF_MUX)) {
1748 for (i = 0; i < serial->num_rx_urbs; i++) {
1749 usb_fill_bulk_urb(serial->rx_urb[i],
1750 serial->parent->usb,
1751 usb_rcvbulkpipe(serial->parent->usb,
1752 serial->in_endp->
1753 bEndpointAddress &
1754 0x7F),
1755 serial->rx_data[i],
1756 serial->rx_data_length,
1757 hso_std_serial_read_bulk_callback,
1758 serial);
1759 result = usb_submit_urb(serial->rx_urb[i], flags);
1760 if (result) {
1761 dev_warn(&serial->parent->usb->dev,
1762 "Failed to submit urb - res %d\n",
1763 result);
1764 break;
1765 }
1766 }
1767 } else {
1768 mutex_lock(&serial->shared_int->shared_int_lock);
1769 if (!serial->shared_int->use_count) {
1770 result =
1771 hso_mux_submit_intr_urb(serial->shared_int,
1772 hso_dev->usb, flags);
1773 }
1774 serial->shared_int->use_count++;
1775 mutex_unlock(&serial->shared_int->shared_int_lock);
1776 }
1777
1778 return result;
1779}
1780
1781static int hso_stop_serial_device(struct hso_device *hso_dev)
1782{
1783 int i;
1784 struct hso_serial *serial = dev2ser(hso_dev);
1785
1786 if (!serial)
1787 return -ENODEV;
1788
1789 for (i = 0; i < serial->num_rx_urbs; i++) {
1790 if (serial->rx_urb[i])
1791 usb_kill_urb(serial->rx_urb[i]);
1792 }
1793
1794 if (serial->tx_urb)
1795 usb_kill_urb(serial->tx_urb);
1796
1797 if (serial->shared_int) {
1798 mutex_lock(&serial->shared_int->shared_int_lock);
1799 if (serial->shared_int->use_count &&
1800 (--serial->shared_int->use_count == 0)) {
1801 struct urb *urb;
1802
1803 urb = serial->shared_int->shared_intr_urb;
1804 if (urb)
1805 usb_kill_urb(urb);
1806 }
1807 mutex_unlock(&serial->shared_int->shared_int_lock);
1808 }
1809
1810 return 0;
1811}
1812
1813static void hso_serial_common_free(struct hso_serial *serial)
1814{
1815 int i;
1816
1817 if (serial->parent->dev)
1818 device_remove_file(serial->parent->dev, &dev_attr_hsotype);
1819
1820 tty_unregister_device(tty_drv, serial->minor);
1821
1822 for (i = 0; i < serial->num_rx_urbs; i++) {
1823 /* unlink and free RX URB */
1824 usb_free_urb(serial->rx_urb[i]);
1825 /* free the RX buffer */
1826 kfree(serial->rx_data[i]);
1827 }
1828
1829 /* unlink and free TX URB */
1830 usb_free_urb(serial->tx_urb);
1831 kfree(serial->tx_data);
1832}
1833
1834static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
1835 int rx_size, int tx_size)
1836{
1837 struct device *dev;
1838 int minor;
1839 int i;
1840
1841 minor = get_free_serial_index();
1842 if (minor < 0)
1843 goto exit;
1844
1845 /* register our minor number */
1846 serial->parent->dev = tty_register_device(tty_drv, minor,
1847 &serial->parent->interface->dev);
1848 dev = serial->parent->dev;
1849 dev->driver_data = serial->parent;
1850 i = device_create_file(dev, &dev_attr_hsotype);
1851
1852 /* fill in specific data for later use */
1853 serial->minor = minor;
1854 serial->magic = HSO_SERIAL_MAGIC;
1855 spin_lock_init(&serial->serial_lock);
1856 serial->num_rx_urbs = num_urbs;
1857
1858 /* RX, allocate urb and initialize */
1859
1860 /* prepare our RX buffer */
1861 serial->rx_data_length = rx_size;
1862 for (i = 0; i < serial->num_rx_urbs; i++) {
1863 serial->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
1864 if (!serial->rx_urb[i]) {
1865 dev_err(dev, "Could not allocate urb?\n");
1866 goto exit;
1867 }
1868 serial->rx_urb[i]->transfer_buffer = NULL;
1869 serial->rx_urb[i]->transfer_buffer_length = 0;
1870 serial->rx_data[i] = kzalloc(serial->rx_data_length,
1871 GFP_KERNEL);
1872 if (!serial->rx_data[i]) {
1873 dev_err(dev, "%s - Out of memory\n", __func__);
1874 goto exit;
1875 }
1876 }
1877
1878 /* TX, allocate urb and initialize */
1879 serial->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
1880 if (!serial->tx_urb) {
1881 dev_err(dev, "Could not allocate urb?\n");
1882 goto exit;
1883 }
1884 serial->tx_urb->transfer_buffer = NULL;
1885 serial->tx_urb->transfer_buffer_length = 0;
1886 /* prepare our TX buffer */
1887 serial->tx_data_count = 0;
1888 serial->tx_buffer_count = 0;
1889 serial->tx_data_length = tx_size;
1890 serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL);
1891 if (!serial->tx_data) {
1892 dev_err(dev, "%s - Out of memory", __func__);
1893 goto exit;
1894 }
1895 serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL);
1896 if (!serial->tx_buffer) {
1897 dev_err(dev, "%s - Out of memory", __func__);
1898 goto exit;
1899 }
1900
1901 return 0;
1902exit:
1903 hso_serial_common_free(serial);
1904 return -1;
1905}
1906
1907/* Frees a general hso device */
1908static void hso_free_device(struct hso_device *hso_dev)
1909{
1910 kfree(hso_dev);
1911}
1912
1913/* Creates a general hso device */
1914static struct hso_device *hso_create_device(struct usb_interface *intf,
1915 int port_spec)
1916{
1917 struct hso_device *hso_dev;
1918
1919 hso_dev = kzalloc(sizeof(*hso_dev), GFP_ATOMIC);
1920 if (!hso_dev)
1921 return NULL;
1922
1923 hso_dev->port_spec = port_spec;
1924 hso_dev->usb = interface_to_usbdev(intf);
1925 hso_dev->interface = intf;
1926 kref_init(&hso_dev->ref);
1927 mutex_init(&hso_dev->mutex);
1928
1929 INIT_WORK(&hso_dev->async_get_intf, async_get_intf);
1930 INIT_WORK(&hso_dev->async_put_intf, async_put_intf);
1931
1932 return hso_dev;
1933}
1934
1935/* Removes a network device in the network device table */
1936static int remove_net_device(struct hso_device *hso_dev)
1937{
1938 int i;
1939
1940 for (i = 0; i < HSO_MAX_NET_DEVICES; i++) {
1941 if (network_table[i] == hso_dev) {
1942 network_table[i] = NULL;
1943 break;
1944 }
1945 }
1946 if (i == HSO_MAX_NET_DEVICES)
1947 return -1;
1948 return 0;
1949}
1950
1951/* Frees our network device */
1952static void hso_free_net_device(struct hso_device *hso_dev)
1953{
1954 int i;
1955 struct hso_net *hso_net = dev2net(hso_dev);
1956
1957 if (!hso_net)
1958 return;
1959
1960 /* start freeing */
1961 for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
1962 usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]);
1963 kfree(hso_net->mux_bulk_rx_buf_pool[i]);
1964 }
1965 usb_free_urb(hso_net->mux_bulk_tx_urb);
1966 kfree(hso_net->mux_bulk_tx_buf);
1967
1968 remove_net_device(hso_net->parent);
1969
1970 if (hso_net->net) {
1971 unregister_netdev(hso_net->net);
1972 free_netdev(hso_net->net);
1973 }
1974
1975 hso_free_device(hso_dev);
1976}
1977
1978/* initialize the network interface */
1979static void hso_net_init(struct net_device *net)
1980{
1981 struct hso_net *hso_net = netdev_priv(net);
1982
1983 D1("sizeof hso_net is %d", (int)sizeof(*hso_net));
1984
1985 /* fill in the other fields */
1986 net->open = hso_net_open;
1987 net->stop = hso_net_close;
1988 net->hard_start_xmit = hso_net_start_xmit;
1989 net->tx_timeout = hso_net_tx_timeout;
1990 net->watchdog_timeo = HSO_NET_TX_TIMEOUT;
1991 net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1992 net->type = ARPHRD_NONE;
1993 net->mtu = DEFAULT_MTU - 14;
1994 net->tx_queue_len = 10;
1995 SET_ETHTOOL_OPS(net, &ops);
1996
1997 /* and initialize the semaphore */
1998 spin_lock_init(&hso_net->net_lock);
1999}
2000
2001/* Adds a network device in the network device table */
2002static int add_net_device(struct hso_device *hso_dev)
2003{
2004 int i;
2005
2006 for (i = 0; i < HSO_MAX_NET_DEVICES; i++) {
2007 if (network_table[i] == NULL) {
2008 network_table[i] = hso_dev;
2009 break;
2010 }
2011 }
2012 if (i == HSO_MAX_NET_DEVICES)
2013 return -1;
2014 return 0;
2015}
2016
2017static int hso_radio_toggle(void *data, enum rfkill_state state)
2018{
2019 struct hso_device *hso_dev = data;
2020 int enabled = (state == RFKILL_STATE_ON);
2021 int rv;
2022
2023 mutex_lock(&hso_dev->mutex);
2024 if (hso_dev->usb_gone)
2025 rv = 0;
2026 else
2027 rv = usb_control_msg(hso_dev->usb, usb_rcvctrlpipe(hso_dev->usb, 0),
2028 enabled ? 0x82 : 0x81, 0x40, 0, 0, NULL, 0,
2029 USB_CTRL_SET_TIMEOUT);
2030 mutex_unlock(&hso_dev->mutex);
2031 return rv;
2032}
2033
2034/* Creates and sets up everything for rfkill */
2035static void hso_create_rfkill(struct hso_device *hso_dev,
2036 struct usb_interface *interface)
2037{
2038 struct hso_net *hso_net = dev2net(hso_dev);
2039 struct device *dev = hso_dev->dev;
2040 char *rfkn;
2041
2042 hso_net->rfkill = rfkill_allocate(&interface_to_usbdev(interface)->dev,
2043 RFKILL_TYPE_WLAN);
2044 if (!hso_net->rfkill) {
2045 dev_err(dev, "%s - Out of memory", __func__);
2046 return;
2047 }
2048 rfkn = kzalloc(20, GFP_KERNEL);
2049 if (!rfkn) {
2050 rfkill_free(hso_net->rfkill);
2051 dev_err(dev, "%s - Out of memory", __func__);
2052 return;
2053 }
2054 snprintf(rfkn, 20, "hso-%d",
2055 interface->altsetting->desc.bInterfaceNumber);
2056 hso_net->rfkill->name = rfkn;
2057 hso_net->rfkill->state = RFKILL_STATE_ON;
2058 hso_net->rfkill->data = hso_dev;
2059 hso_net->rfkill->toggle_radio = hso_radio_toggle;
2060 if (rfkill_register(hso_net->rfkill) < 0) {
2061 kfree(rfkn);
2062 hso_net->rfkill->name = NULL;
2063 rfkill_free(hso_net->rfkill);
2064 dev_err(dev, "%s - Failed to register rfkill", __func__);
2065 return;
2066 }
2067}
2068
2069/* Creates our network device */
2070static struct hso_device *hso_create_net_device(struct usb_interface *interface)
2071{
2072 int result, i;
2073 struct net_device *net;
2074 struct hso_net *hso_net;
2075 struct hso_device *hso_dev;
2076
2077 hso_dev = hso_create_device(interface, HSO_INTF_MUX | HSO_PORT_NETWORK);
2078 if (!hso_dev)
2079 return NULL;
2080
2081 /* allocate our network device, then we can put in our private data */
2082 /* call hso_net_init to do the basic initialization */
2083 net = alloc_netdev(sizeof(struct hso_net), "hso%d", hso_net_init);
2084 if (!net) {
2085 dev_err(&interface->dev, "Unable to create ethernet device\n");
2086 goto exit;
2087 }
2088
2089 hso_net = netdev_priv(net);
2090
2091 hso_dev->port_data.dev_net = hso_net;
2092 hso_net->net = net;
2093 hso_net->parent = hso_dev;
2094
2095 hso_net->in_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK,
2096 USB_DIR_IN);
2097 if (!hso_net->in_endp) {
2098 dev_err(&interface->dev, "Can't find BULK IN endpoint\n");
2099 goto exit;
2100 }
2101 hso_net->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK,
2102 USB_DIR_OUT);
2103 if (!hso_net->out_endp) {
2104 dev_err(&interface->dev, "Can't find BULK OUT endpoint\n");
2105 goto exit;
2106 }
2107 SET_NETDEV_DEV(net, &interface->dev);
2108
2109 /* registering our net device */
2110 result = register_netdev(net);
2111 if (result) {
2112 dev_err(&interface->dev, "Failed to register device\n");
2113 goto exit;
2114 }
2115
2116 /* start allocating */
2117 for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
2118 hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL);
2119 if (!hso_net->mux_bulk_rx_urb_pool[i]) {
2120 dev_err(&interface->dev, "Could not allocate rx urb\n");
2121 goto exit;
2122 }
2123 hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE,
2124 GFP_KERNEL);
2125 if (!hso_net->mux_bulk_rx_buf_pool[i]) {
2126 dev_err(&interface->dev, "Could not allocate rx buf\n");
2127 goto exit;
2128 }
2129 }
2130 hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL);
2131 if (!hso_net->mux_bulk_tx_urb) {
2132 dev_err(&interface->dev, "Could not allocate tx urb\n");
2133 goto exit;
2134 }
2135 hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL);
2136 if (!hso_net->mux_bulk_tx_buf) {
2137 dev_err(&interface->dev, "Could not allocate tx buf\n");
2138 goto exit;
2139 }
2140
2141 add_net_device(hso_dev);
2142
2143 hso_log_port(hso_dev);
2144
2145 hso_create_rfkill(hso_dev, interface);
2146
2147 return hso_dev;
2148exit:
2149 hso_free_net_device(hso_dev);
2150 return NULL;
2151}
2152
2153/* Frees an AT channel ( goes for both mux and non-mux ) */
2154static void hso_free_serial_device(struct hso_device *hso_dev)
2155{
2156 struct hso_serial *serial = dev2ser(hso_dev);
2157
2158 if (!serial)
2159 return;
2160 set_serial_by_index(serial->minor, NULL);
2161
2162 hso_serial_common_free(serial);
2163
2164 if (serial->shared_int) {
2165 mutex_lock(&serial->shared_int->shared_int_lock);
2166 if (--serial->shared_int->ref_count == 0)
2167 hso_free_shared_int(serial->shared_int);
2168 else
2169 mutex_unlock(&serial->shared_int->shared_int_lock);
2170 }
2171 kfree(serial);
2172 hso_free_device(hso_dev);
2173}
2174
2175/* Creates a bulk AT channel */
2176static struct hso_device *hso_create_bulk_serial_device(
2177 struct usb_interface *interface, int port)
2178{
2179 struct hso_device *hso_dev;
2180 struct hso_serial *serial;
2181 int num_urbs;
2182
2183 hso_dev = hso_create_device(interface, port);
2184 if (!hso_dev)
2185 return NULL;
2186
2187 serial = kzalloc(sizeof(*serial), GFP_KERNEL);
2188 if (!serial)
2189 goto exit;
2190
2191 serial->parent = hso_dev;
2192 hso_dev->port_data.dev_serial = serial;
2193
2194 if (port & HSO_PORT_MODEM)
2195 num_urbs = 2;
2196 else
2197 num_urbs = 1;
2198
2199 if (hso_serial_common_create(serial, num_urbs, BULK_URB_RX_SIZE,
2200 BULK_URB_TX_SIZE))
2201 goto exit;
2202
2203 serial->in_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK,
2204 USB_DIR_IN);
2205 if (!serial->in_endp) {
2206 dev_err(&interface->dev, "Failed to find BULK IN ep\n");
2207 goto exit;
2208 }
2209
2210 if (!
2211 (serial->out_endp =
2212 hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) {
2213 dev_err(&interface->dev, "Failed to find BULK IN ep\n");
2214 goto exit;
2215 }
2216
2217 serial->write_data = hso_std_serial_write_data;
2218
2219 /* and record this serial */
2220 set_serial_by_index(serial->minor, serial);
2221
2222 /* setup the proc dirs and files if needed */
2223 hso_log_port(hso_dev);
2224
2225 /* done, return it */
2226 return hso_dev;
2227exit:
2228 if (hso_dev && serial)
2229 hso_serial_common_free(serial);
2230 kfree(serial);
2231 hso_free_device(hso_dev);
2232 return NULL;
2233}
2234
2235/* Creates a multiplexed AT channel */
2236static
2237struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
2238 int port,
2239 struct hso_shared_int *mux)
2240{
2241 struct hso_device *hso_dev;
2242 struct hso_serial *serial;
2243 int port_spec;
2244
2245 port_spec = HSO_INTF_MUX;
2246 port_spec &= ~HSO_PORT_MASK;
2247
2248 port_spec |= hso_mux_to_port(port);
2249 if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NO_PORT)
2250 return NULL;
2251
2252 hso_dev = hso_create_device(interface, port_spec);
2253 if (!hso_dev)
2254 return NULL;
2255
2256 serial = kzalloc(sizeof(*serial), GFP_KERNEL);
2257 if (!serial)
2258 goto exit;
2259
2260 hso_dev->port_data.dev_serial = serial;
2261 serial->parent = hso_dev;
2262
2263 if (hso_serial_common_create
2264 (serial, 1, CTRL_URB_RX_SIZE, CTRL_URB_TX_SIZE))
2265 goto exit;
2266
2267 serial->tx_data_length--;
2268 serial->write_data = hso_mux_serial_write_data;
2269
2270 serial->shared_int = mux;
2271 mutex_lock(&serial->shared_int->shared_int_lock);
2272 serial->shared_int->ref_count++;
2273 mutex_unlock(&serial->shared_int->shared_int_lock);
2274
2275 /* and record this serial */
2276 set_serial_by_index(serial->minor, serial);
2277
2278 /* setup the proc dirs and files if needed */
2279 hso_log_port(hso_dev);
2280
2281 /* done, return it */
2282 return hso_dev;
2283
2284exit:
2285 if (serial) {
2286 tty_unregister_device(tty_drv, serial->minor);
2287 kfree(serial);
2288 }
2289 if (hso_dev)
2290 hso_free_device(hso_dev);
2291 return NULL;
2292
2293}
2294
2295static void hso_free_shared_int(struct hso_shared_int *mux)
2296{
2297 usb_free_urb(mux->shared_intr_urb);
2298 kfree(mux->shared_intr_buf);
2299 mutex_unlock(&mux->shared_int_lock);
2300 kfree(mux);
2301}
2302
2303static
2304struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface)
2305{
2306 struct hso_shared_int *mux = kzalloc(sizeof(*mux), GFP_KERNEL);
2307
2308 if (!mux)
2309 return NULL;
2310
2311 mux->intr_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_INT,
2312 USB_DIR_IN);
2313 if (!mux->intr_endp) {
2314 dev_err(&interface->dev, "Can't find INT IN endpoint\n");
2315 goto exit;
2316 }
2317
2318 mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL);
2319 if (!mux->shared_intr_urb) {
2320 dev_err(&interface->dev, "Could not allocate intr urb?");
2321 goto exit;
2322 }
2323 mux->shared_intr_buf = kzalloc(mux->intr_endp->wMaxPacketSize,
2324 GFP_KERNEL);
2325 if (!mux->shared_intr_buf) {
2326 dev_err(&interface->dev, "Could not allocate intr buf?");
2327 goto exit;
2328 }
2329
2330 mutex_init(&mux->shared_int_lock);
2331
2332 return mux;
2333
2334exit:
2335 kfree(mux->shared_intr_buf);
2336 usb_free_urb(mux->shared_intr_urb);
2337 kfree(mux);
2338 return NULL;
2339}
2340
2341/* Gets the port spec for a certain interface */
2342static int hso_get_config_data(struct usb_interface *interface)
2343{
2344 struct usb_device *usbdev = interface_to_usbdev(interface);
2345 u8 config_data[17];
2346 u32 if_num = interface->altsetting->desc.bInterfaceNumber;
2347 s32 result;
2348
2349 if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
2350 0x86, 0xC0, 0, 0, config_data, 17,
2351 USB_CTRL_SET_TIMEOUT) != 0x11) {
2352 return -EIO;
2353 }
2354
2355 switch (config_data[if_num]) {
2356 case 0x0:
2357 result = 0;
2358 break;
2359 case 0x1:
2360 result = HSO_PORT_DIAG;
2361 break;
2362 case 0x2:
2363 result = HSO_PORT_GPS;
2364 break;
2365 case 0x3:
2366 result = HSO_PORT_GPS_CONTROL;
2367 break;
2368 case 0x4:
2369 result = HSO_PORT_APP;
2370 break;
2371 case 0x5:
2372 result = HSO_PORT_APP2;
2373 break;
2374 case 0x6:
2375 result = HSO_PORT_CONTROL;
2376 break;
2377 case 0x7:
2378 result = HSO_PORT_NETWORK;
2379 break;
2380 case 0x8:
2381 result = HSO_PORT_MODEM;
2382 break;
2383 case 0x9:
2384 result = HSO_PORT_MSD;
2385 break;
2386 case 0xa:
2387 result = HSO_PORT_PCSC;
2388 break;
2389 case 0xb:
2390 result = HSO_PORT_VOICE;
2391 break;
2392 default:
2393 result = 0;
2394 }
2395
2396 if (result)
2397 result |= HSO_INTF_BULK;
2398
2399 if (config_data[16] & 0x1)
2400 result |= HSO_INFO_CRC_BUG;
2401
2402 return result;
2403}
2404
2405/* called once for each interface upon device insertion */
2406static int hso_probe(struct usb_interface *interface,
2407 const struct usb_device_id *id)
2408{
2409 int mux, i, if_num, port_spec;
2410 unsigned char port_mask;
2411 struct hso_device *hso_dev = NULL;
2412 struct hso_shared_int *shared_int;
2413 struct hso_device *tmp_dev = NULL;
2414
2415 if_num = interface->altsetting->desc.bInterfaceNumber;
2416
2417 /* Get the interface/port specification from either driver_info or from
2418 * the device itself */
2419 if (id->driver_info)
2420 port_spec = ((u32 *)(id->driver_info))[if_num];
2421 else
2422 port_spec = hso_get_config_data(interface);
2423
2424 if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
2425 dev_err(&interface->dev, "Not our interface\n");
2426 return -ENODEV;
2427 }
2428 /* Check if we need to switch to alt interfaces prior to port
2429 * configuration */
2430 if (interface->num_altsetting > 1)
2431 usb_set_interface(interface_to_usbdev(interface), if_num, 1);
2432 interface->needs_remote_wakeup = 1;
2433
2434 /* Allocate new hso device(s) */
2435 switch (port_spec & HSO_INTF_MASK) {
2436 case HSO_INTF_MUX:
2437 if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) {
2438 /* Create the network device */
2439 if (!disable_net) {
2440 hso_dev = hso_create_net_device(interface);
2441 if (!hso_dev)
2442 goto exit;
2443 tmp_dev = hso_dev;
2444 }
2445 }
2446
2447 if (hso_get_mux_ports(interface, &port_mask))
2448 /* TODO: de-allocate everything */
2449 goto exit;
2450
2451 shared_int = hso_create_shared_int(interface);
2452 if (!shared_int)
2453 goto exit;
2454
2455 for (i = 1, mux = 0; i < 0x100; i = i << 1, mux++) {
2456 if (port_mask & i) {
2457 hso_dev = hso_create_mux_serial_device(
2458 interface, i, shared_int);
2459 if (!hso_dev)
2460 goto exit;
2461 }
2462 }
2463
2464 if (tmp_dev)
2465 hso_dev = tmp_dev;
2466 break;
2467
2468 case HSO_INTF_BULK:
2469 /* It's a regular bulk interface */
2470 if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK)
2471 && !disable_net)
2472 hso_dev = hso_create_net_device(interface);
2473 else
2474 hso_dev =
2475 hso_create_bulk_serial_device(interface, port_spec);
2476 if (!hso_dev)
2477 goto exit;
2478 break;
2479 default:
2480 goto exit;
2481 }
2482
2483 usb_driver_claim_interface(&hso_driver, interface, hso_dev);
2484
2485 /* save our data pointer in this device */
2486 usb_set_intfdata(interface, hso_dev);
2487
2488 /* done */
2489 return 0;
2490exit:
2491 hso_free_interface(interface);
2492 return -ENODEV;
2493}
2494
2495/* device removed, cleaning up */
2496static void hso_disconnect(struct usb_interface *interface)
2497{
2498 hso_free_interface(interface);
2499
2500 /* remove reference of our private data */
2501 usb_set_intfdata(interface, NULL);
2502
2503 usb_driver_release_interface(&hso_driver, interface);
2504}
2505
2506static void async_get_intf(struct work_struct *data)
2507{
2508 struct hso_device *hso_dev =
2509 container_of(data, struct hso_device, async_get_intf);
2510 usb_autopm_get_interface(hso_dev->interface);
2511}
2512
2513static void async_put_intf(struct work_struct *data)
2514{
2515 struct hso_device *hso_dev =
2516 container_of(data, struct hso_device, async_put_intf);
2517 usb_autopm_put_interface(hso_dev->interface);
2518}
2519
2520static int hso_get_activity(struct hso_device *hso_dev)
2521{
2522 if (hso_dev->usb->state == USB_STATE_SUSPENDED) {
2523 if (!hso_dev->is_active) {
2524 hso_dev->is_active = 1;
2525 schedule_work(&hso_dev->async_get_intf);
2526 }
2527 }
2528
2529 if (hso_dev->usb->state != USB_STATE_CONFIGURED)
2530 return -EAGAIN;
2531
2532 usb_mark_last_busy(hso_dev->usb);
2533
2534 return 0;
2535}
2536
2537static int hso_put_activity(struct hso_device *hso_dev)
2538{
2539 if (hso_dev->usb->state != USB_STATE_SUSPENDED) {
2540 if (hso_dev->is_active) {
2541 hso_dev->is_active = 0;
2542 schedule_work(&hso_dev->async_put_intf);
2543 return -EAGAIN;
2544 }
2545 }
2546 hso_dev->is_active = 0;
2547 return 0;
2548}
2549
2550/* called by kernel when we need to suspend device */
2551static int hso_suspend(struct usb_interface *iface, pm_message_t message)
2552{
2553 int i, result;
2554
2555 /* Stop all serial ports */
2556 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
2557 if (serial_table[i] && (serial_table[i]->interface == iface)) {
2558 result = hso_stop_serial_device(serial_table[i]);
2559 if (result)
2560 goto out;
2561 }
2562 }
2563
2564 /* Stop all network ports */
2565 for (i = 0; i < HSO_MAX_NET_DEVICES; i++) {
2566 if (network_table[i] &&
2567 (network_table[i]->interface == iface)) {
2568 result = hso_stop_net_device(network_table[i]);
2569 if (result)
2570 goto out;
2571 }
2572 }
2573
2574out:
2575 return 0;
2576}
2577
2578/* called by kernel when we need to resume device */
2579static int hso_resume(struct usb_interface *iface)
2580{
2581 int i, result = 0;
2582 struct hso_net *hso_net;
2583
2584 /* Start all serial ports */
2585 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
2586 if (serial_table[i] && (serial_table[i]->interface == iface)) {
2587 if (dev2ser(serial_table[i])->open_count) {
2588 result =
2589 hso_start_serial_device(serial_table[i], GFP_NOIO);
2590 hso_kick_transmit(dev2ser(serial_table[i]));
2591 if (result)
2592 goto out;
2593 }
2594 }
2595 }
2596
2597 /* Start all network ports */
2598 for (i = 0; i < HSO_MAX_NET_DEVICES; i++) {
2599 if (network_table[i] &&
2600 (network_table[i]->interface == iface)) {
2601 hso_net = dev2net(network_table[i]);
2602 /* First transmit any lingering data, then restart the
2603 * device. */
2604 if (hso_net->skb_tx_buf) {
2605 dev_dbg(&iface->dev,
2606 "Transmitting lingering data\n");
2607 hso_net_start_xmit(hso_net->skb_tx_buf,
2608 hso_net->net);
2609 }
2610 result = hso_start_net_device(network_table[i]);
2611 if (result)
2612 goto out;
2613 }
2614 }
2615
2616out:
2617 return result;
2618}
2619
2620static void hso_serial_ref_free(struct kref *ref)
2621{
2622 struct hso_device *hso_dev = container_of(ref, struct hso_device, ref);
2623
2624 hso_free_serial_device(hso_dev);
2625}
2626
2627static void hso_free_interface(struct usb_interface *interface)
2628{
2629 struct hso_serial *hso_dev;
2630 int i;
2631
2632 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
2633 if (serial_table[i]
2634 && (serial_table[i]->interface == interface)) {
2635 hso_dev = dev2ser(serial_table[i]);
2636 if (hso_dev->tty)
2637 tty_hangup(hso_dev->tty);
2638 mutex_lock(&hso_dev->parent->mutex);
2639 hso_dev->parent->usb_gone = 1;
2640 mutex_unlock(&hso_dev->parent->mutex);
2641 kref_put(&serial_table[i]->ref, hso_serial_ref_free);
2642 }
2643 }
2644
2645 for (i = 0; i < HSO_MAX_NET_DEVICES; i++) {
2646 if (network_table[i]
2647 && (network_table[i]->interface == interface)) {
2648 struct rfkill *rfk = dev2net(network_table[i])->rfkill;
2649 /* hso_stop_net_device doesn't stop the net queue since
2650 * traffic needs to start it again when suspended */
2651 netif_stop_queue(dev2net(network_table[i])->net);
2652 hso_stop_net_device(network_table[i]);
2653 cancel_work_sync(&network_table[i]->async_put_intf);
2654 cancel_work_sync(&network_table[i]->async_get_intf);
2655 if(rfk)
2656 rfkill_unregister(rfk);
2657 hso_free_net_device(network_table[i]);
2658 }
2659 }
2660}
2661
2662/* Helper functions */
2663
2664/* Get the endpoint ! */
2665static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf,
2666 int type, int dir)
2667{
2668 int i;
2669 struct usb_host_interface *iface = intf->cur_altsetting;
2670 struct usb_endpoint_descriptor *endp;
2671
2672 for (i = 0; i < iface->desc.bNumEndpoints; i++) {
2673 endp = &iface->endpoint[i].desc;
2674 if (((endp->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == dir) &&
2675 ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == type))
2676 return endp;
2677 }
2678
2679 return NULL;
2680}
2681
2682/* Get the byte that describes which ports are enabled */
2683static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports)
2684{
2685 int i;
2686 struct usb_host_interface *iface = intf->cur_altsetting;
2687
2688 if (iface->extralen == 3) {
2689 *ports = iface->extra[2];
2690 return 0;
2691 }
2692
2693 for (i = 0; i < iface->desc.bNumEndpoints; i++) {
2694 if (iface->endpoint[i].extralen == 3) {
2695 *ports = iface->endpoint[i].extra[2];
2696 return 0;
2697 }
2698 }
2699
2700 return -1;
2701}
2702
2703/* interrupt urb needs to be submitted, used for serial read of muxed port */
2704static int hso_mux_submit_intr_urb(struct hso_shared_int *shared_int,
2705 struct usb_device *usb, gfp_t gfp)
2706{
2707 int result;
2708
2709 usb_fill_int_urb(shared_int->shared_intr_urb, usb,
2710 usb_rcvintpipe(usb,
2711 shared_int->intr_endp->bEndpointAddress & 0x7F),
2712 shared_int->shared_intr_buf,
2713 shared_int->intr_endp->wMaxPacketSize,
2714 intr_callback, shared_int,
2715 shared_int->intr_endp->bInterval);
2716
2717 result = usb_submit_urb(shared_int->shared_intr_urb, gfp);
2718 if (result)
2719 dev_warn(&usb->dev, "%s failed mux_intr_urb %d", __func__,
2720 result);
2721
2722 return result;
2723}
2724
2725/* operations setup of the serial interface */
2726static struct tty_operations hso_serial_ops = {
2727 .open = hso_serial_open,
2728 .close = hso_serial_close,
2729 .write = hso_serial_write,
2730 .write_room = hso_serial_write_room,
2731 .set_termios = hso_serial_set_termios,
2732 .chars_in_buffer = hso_serial_chars_in_buffer,
2733 .tiocmget = hso_serial_tiocmget,
2734 .tiocmset = hso_serial_tiocmset,
2735};
2736
2737static struct usb_driver hso_driver = {
2738 .name = driver_name,
2739 .probe = hso_probe,
2740 .disconnect = hso_disconnect,
2741 .id_table = hso_ids,
2742 .suspend = hso_suspend,
2743 .resume = hso_resume,
2744 .supports_autosuspend = 1,
2745};
2746
2747static int __init hso_init(void)
2748{
2749 int i;
2750 int result;
2751
2752 /* put it in the log */
2753 printk(KERN_INFO "hso: %s\n", version);
2754
2755 /* Initialise the serial table semaphore and table */
2756 spin_lock_init(&serial_table_lock);
2757 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++)
2758 serial_table[i] = NULL;
2759
2760 /* allocate our driver using the proper amount of supported minors */
2761 tty_drv = alloc_tty_driver(HSO_SERIAL_TTY_MINORS);
2762 if (!tty_drv)
2763 return -ENOMEM;
2764
2765 /* fill in all needed values */
2766 tty_drv->magic = TTY_DRIVER_MAGIC;
2767 tty_drv->owner = THIS_MODULE;
2768 tty_drv->driver_name = driver_name;
2769 tty_drv->name = tty_filename;
2770
2771 /* if major number is provided as parameter, use that one */
2772 if (tty_major)
2773 tty_drv->major = tty_major;
2774
2775 tty_drv->minor_start = 0;
2776 tty_drv->num = HSO_SERIAL_TTY_MINORS;
2777 tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
2778 tty_drv->subtype = SERIAL_TYPE_NORMAL;
2779 tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
2780 tty_drv->init_termios = tty_std_termios;
2781 tty_drv->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
2782 tty_drv->termios = hso_serial_termios;
2783 tty_drv->termios_locked = hso_serial_termios_locked;
2784 tty_set_operations(tty_drv, &hso_serial_ops);
2785
2786 /* register the tty driver */
2787 result = tty_register_driver(tty_drv);
2788 if (result) {
2789 printk(KERN_ERR "%s - tty_register_driver failed(%d)\n",
2790 __func__, result);
2791 return result;
2792 }
2793
2794 /* register this module as an usb driver */
2795 result = usb_register(&hso_driver);
2796 if (result) {
2797 printk(KERN_ERR "Could not register hso driver? error: %d\n",
2798 result);
2799 /* cleanup serial interface */
2800 tty_unregister_driver(tty_drv);
2801 return result;
2802 }
2803
2804 /* done */
2805 return 0;
2806}
2807
2808static void __exit hso_exit(void)
2809{
2810 printk(KERN_INFO "hso: unloaded\n");
2811
2812 tty_unregister_driver(tty_drv);
2813 /* deregister the usb driver */
2814 usb_deregister(&hso_driver);
2815}
2816
2817/* Module definitions */
2818module_init(hso_init);
2819module_exit(hso_exit);
2820
2821MODULE_AUTHOR(MOD_AUTHOR);
2822MODULE_DESCRIPTION(MOD_DESCRIPTION);
2823MODULE_LICENSE(MOD_LICENSE);
2824MODULE_INFO(Version, DRIVER_VERSION);
2825
2826/* change the debug level (eg: insmod hso.ko debug=0x04) */
2827MODULE_PARM_DESC(debug, "Level of debug [0x01 | 0x02 | 0x04 | 0x08 | 0x10]");
2828module_param(debug, int, S_IRUGO | S_IWUSR);
2829
2830/* set the major tty number (eg: insmod hso.ko tty_major=245) */
2831MODULE_PARM_DESC(tty_major, "Set the major tty number");
2832module_param(tty_major, int, S_IRUGO | S_IWUSR);
2833
2834/* disable network interface (eg: insmod hso.ko disable_net=1) */
2835MODULE_PARM_DESC(disable_net, "Disable the network interface");
2836module_param(disable_net, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 6b8d882d197b..bcbf2fa9b94a 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1495,24 +1495,18 @@ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1495 * enough. This function returns a negative value if the received 1495 * enough. This function returns a negative value if the received
1496 * packet is too big or if memory is exhausted. 1496 * packet is too big or if memory is exhausted.
1497 */ 1497 */
1498static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, 1498static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1499 struct velocity_info *vptr) 1499 struct velocity_info *vptr)
1500{ 1500{
1501 int ret = -1; 1501 int ret = -1;
1502
1503 if (pkt_size < rx_copybreak) { 1502 if (pkt_size < rx_copybreak) {
1504 struct sk_buff *new_skb; 1503 struct sk_buff *new_skb;
1505 1504
1506 new_skb = dev_alloc_skb(pkt_size + 2); 1505 new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2);
1507 if (new_skb) { 1506 if (new_skb) {
1508 new_skb->dev = vptr->dev;
1509 new_skb->ip_summed = rx_skb[0]->ip_summed; 1507 new_skb->ip_summed = rx_skb[0]->ip_summed;
1510 1508 skb_reserve(new_skb, 2);
1511 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) 1509 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1512 skb_reserve(new_skb, 2);
1513
1514 skb_copy_from_linear_data(rx_skb[0], new_skb->data,
1515 pkt_size);
1516 *rx_skb = new_skb; 1510 *rx_skb = new_skb;
1517 ret = 0; 1511 ret = 0;
1518 } 1512 }
@@ -1533,12 +1527,8 @@ static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1533static inline void velocity_iph_realign(struct velocity_info *vptr, 1527static inline void velocity_iph_realign(struct velocity_info *vptr,
1534 struct sk_buff *skb, int pkt_size) 1528 struct sk_buff *skb, int pkt_size)
1535{ 1529{
1536 /* FIXME - memmove ? */
1537 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { 1530 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
1538 int i; 1531 memmove(skb->data + 2, skb->data, pkt_size);
1539
1540 for (i = pkt_size; i >= 0; i--)
1541 *(skb->data + i + 2) = *(skb->data + i);
1542 skb_reserve(skb, 2); 1532 skb_reserve(skb, 2);
1543 } 1533 }
1544} 1534}
@@ -1629,7 +1619,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1629 struct rx_desc *rd = &(vptr->rd_ring[idx]); 1619 struct rx_desc *rd = &(vptr->rd_ring[idx]);
1630 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1620 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
1631 1621
1632 rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64); 1622 rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64);
1633 if (rd_info->skb == NULL) 1623 if (rd_info->skb == NULL)
1634 return -ENOMEM; 1624 return -ENOMEM;
1635 1625
@@ -1638,7 +1628,6 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1638 * 64byte alignment. 1628 * 64byte alignment.
1639 */ 1629 */
1640 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); 1630 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
1641 rd_info->skb->dev = vptr->dev;
1642 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1631 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
1643 1632
1644 /* 1633 /*
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 5c0d2b082750..0ba55ba93958 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -306,11 +306,10 @@ static int adm8211_get_tx_stats(struct ieee80211_hw *dev,
306 struct ieee80211_tx_queue_stats *stats) 306 struct ieee80211_tx_queue_stats *stats)
307{ 307{
308 struct adm8211_priv *priv = dev->priv; 308 struct adm8211_priv *priv = dev->priv;
309 struct ieee80211_tx_queue_stats_data *data = &stats->data[0];
310 309
311 data->len = priv->cur_tx - priv->dirty_tx; 310 stats[0].len = priv->cur_tx - priv->dirty_tx;
312 data->limit = priv->tx_ring_size - 2; 311 stats[0].limit = priv->tx_ring_size - 2;
313 data->count = priv->dirty_tx; 312 stats[0].count = priv->dirty_tx;
314 313
315 return 0; 314 return 0;
316} 315}
@@ -325,7 +324,7 @@ static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
325 for (dirty_tx = priv->dirty_tx; priv->cur_tx - dirty_tx; dirty_tx++) { 324 for (dirty_tx = priv->dirty_tx; priv->cur_tx - dirty_tx; dirty_tx++) {
326 unsigned int entry = dirty_tx % priv->tx_ring_size; 325 unsigned int entry = dirty_tx % priv->tx_ring_size;
327 u32 status = le32_to_cpu(priv->tx_ring[entry].status); 326 u32 status = le32_to_cpu(priv->tx_ring[entry].status);
328 struct ieee80211_tx_status tx_status; 327 struct ieee80211_tx_info *txi;
329 struct adm8211_tx_ring_info *info; 328 struct adm8211_tx_ring_info *info;
330 struct sk_buff *skb; 329 struct sk_buff *skb;
331 330
@@ -335,24 +334,23 @@ static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
335 334
336 info = &priv->tx_buffers[entry]; 335 info = &priv->tx_buffers[entry];
337 skb = info->skb; 336 skb = info->skb;
337 txi = IEEE80211_SKB_CB(skb);
338 338
339 /* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */ 339 /* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */
340 340
341 pci_unmap_single(priv->pdev, info->mapping, 341 pci_unmap_single(priv->pdev, info->mapping,
342 info->skb->len, PCI_DMA_TODEVICE); 342 info->skb->len, PCI_DMA_TODEVICE);
343 343
344 memset(&tx_status, 0, sizeof(tx_status)); 344 memset(&txi->status, 0, sizeof(txi->status));
345 skb_pull(skb, sizeof(struct adm8211_tx_hdr)); 345 skb_pull(skb, sizeof(struct adm8211_tx_hdr));
346 memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen); 346 memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen);
347 memcpy(&tx_status.control, &info->tx_control, 347 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK)) {
348 sizeof(tx_status.control));
349 if (!(tx_status.control.flags & IEEE80211_TXCTL_NO_ACK)) {
350 if (status & TDES0_STATUS_ES) 348 if (status & TDES0_STATUS_ES)
351 tx_status.excessive_retries = 1; 349 txi->status.excessive_retries = 1;
352 else 350 else
353 tx_status.flags |= IEEE80211_TX_STATUS_ACK; 351 txi->flags |= IEEE80211_TX_STAT_ACK;
354 } 352 }
355 ieee80211_tx_status_irqsafe(dev, skb, &tx_status); 353 ieee80211_tx_status_irqsafe(dev, skb);
356 354
357 info->skb = NULL; 355 info->skb = NULL;
358 } 356 }
@@ -446,9 +444,9 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
446 struct ieee80211_rx_status rx_status = {0}; 444 struct ieee80211_rx_status rx_status = {0};
447 445
448 if (priv->pdev->revision < ADM8211_REV_CA) 446 if (priv->pdev->revision < ADM8211_REV_CA)
449 rx_status.ssi = rssi; 447 rx_status.signal = rssi;
450 else 448 else
451 rx_status.ssi = 100 - rssi; 449 rx_status.signal = 100 - rssi;
452 450
453 rx_status.rate_idx = rate; 451 rx_status.rate_idx = rate;
454 452
@@ -1639,7 +1637,6 @@ static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int
1639/* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */ 1637/* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */
1640static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, 1638static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
1641 u16 plcp_signal, 1639 u16 plcp_signal,
1642 struct ieee80211_tx_control *control,
1643 size_t hdrlen) 1640 size_t hdrlen)
1644{ 1641{
1645 struct adm8211_priv *priv = dev->priv; 1642 struct adm8211_priv *priv = dev->priv;
@@ -1665,7 +1662,6 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
1665 1662
1666 priv->tx_buffers[entry].skb = skb; 1663 priv->tx_buffers[entry].skb = skb;
1667 priv->tx_buffers[entry].mapping = mapping; 1664 priv->tx_buffers[entry].mapping = mapping;
1668 memcpy(&priv->tx_buffers[entry].tx_control, control, sizeof(*control));
1669 priv->tx_buffers[entry].hdrlen = hdrlen; 1665 priv->tx_buffers[entry].hdrlen = hdrlen;
1670 priv->tx_ring[entry].buffer1 = cpu_to_le32(mapping); 1666 priv->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
1671 1667
@@ -1686,18 +1682,18 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
1686} 1682}
1687 1683
1688/* Put adm8211_tx_hdr on skb and transmit */ 1684/* Put adm8211_tx_hdr on skb and transmit */
1689static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb, 1685static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
1690 struct ieee80211_tx_control *control)
1691{ 1686{
1692 struct adm8211_tx_hdr *txhdr; 1687 struct adm8211_tx_hdr *txhdr;
1693 u16 fc; 1688 u16 fc;
1694 size_t payload_len, hdrlen; 1689 size_t payload_len, hdrlen;
1695 int plcp, dur, len, plcp_signal, short_preamble; 1690 int plcp, dur, len, plcp_signal, short_preamble;
1696 struct ieee80211_hdr *hdr; 1691 struct ieee80211_hdr *hdr;
1692 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1693 struct ieee80211_rate *txrate = ieee80211_get_tx_rate(dev, info);
1697 1694
1698 short_preamble = !!(control->tx_rate->flags & 1695 short_preamble = !!(txrate->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE);
1699 IEEE80211_TXCTL_SHORT_PREAMBLE); 1696 plcp_signal = txrate->bitrate;
1700 plcp_signal = control->tx_rate->bitrate;
1701 1697
1702 hdr = (struct ieee80211_hdr *)skb->data; 1698 hdr = (struct ieee80211_hdr *)skb->data;
1703 fc = le16_to_cpu(hdr->frame_control) & ~IEEE80211_FCTL_PROTECTED; 1699 fc = le16_to_cpu(hdr->frame_control) & ~IEEE80211_FCTL_PROTECTED;
@@ -1731,15 +1727,15 @@ static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
1731 if (short_preamble) 1727 if (short_preamble)
1732 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_SHORT_PREAMBLE); 1728 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_SHORT_PREAMBLE);
1733 1729
1734 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) 1730 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
1735 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_RTS); 1731 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_RTS);
1736 1732
1737 if (fc & IEEE80211_FCTL_PROTECTED) 1733 if (fc & IEEE80211_FCTL_PROTECTED)
1738 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_WEP_ENGINE); 1734 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_WEP_ENGINE);
1739 1735
1740 txhdr->retry_limit = control->retry_limit; 1736 txhdr->retry_limit = info->control.retry_limit;
1741 1737
1742 adm8211_tx_raw(dev, skb, plcp_signal, control, hdrlen); 1738 adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
1743 1739
1744 return NETDEV_TX_OK; 1740 return NETDEV_TX_OK;
1745} 1741}
@@ -1894,9 +1890,10 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
1894 1890
1895 dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr); 1891 dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr);
1896 /* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */ 1892 /* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */
1893 dev->flags = IEEE80211_HW_SIGNAL_UNSPEC;
1897 1894
1898 dev->channel_change_time = 1000; 1895 dev->channel_change_time = 1000;
1899 dev->max_rssi = 100; /* FIXME: find better value */ 1896 dev->max_signal = 100; /* FIXME: find better value */
1900 1897
1901 dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */ 1898 dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */
1902 1899
@@ -2015,7 +2012,7 @@ static int adm8211_resume(struct pci_dev *pdev)
2015 2012
2016 if (priv->mode != IEEE80211_IF_TYPE_INVALID) { 2013 if (priv->mode != IEEE80211_IF_TYPE_INVALID) {
2017 adm8211_start(dev); 2014 adm8211_start(dev);
2018 ieee80211_start_queues(dev); 2015 ieee80211_wake_queues(dev);
2019 } 2016 }
2020 2017
2021 return 0; 2018 return 0;
diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h
index 8d7c564b3b04..9b190ee26e90 100644
--- a/drivers/net/wireless/adm8211.h
+++ b/drivers/net/wireless/adm8211.h
@@ -443,7 +443,6 @@ struct adm8211_rx_ring_info {
443struct adm8211_tx_ring_info { 443struct adm8211_tx_ring_info {
444 struct sk_buff *skb; 444 struct sk_buff *skb;
445 dma_addr_t mapping; 445 dma_addr_t mapping;
446 struct ieee80211_tx_control tx_control;
447 size_t hdrlen; 446 size_t hdrlen;
448}; 447};
449 448
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 32019fb878d8..1e1446bf4b48 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1148,7 +1148,6 @@ static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm);
1148static void airo_networks_free(struct airo_info *ai); 1148static void airo_networks_free(struct airo_info *ai);
1149 1149
1150struct airo_info { 1150struct airo_info {
1151 struct net_device_stats stats;
1152 struct net_device *dev; 1151 struct net_device *dev;
1153 struct list_head dev_list; 1152 struct list_head dev_list;
1154 /* Note, we can have MAX_FIDS outstanding. FIDs are 16-bits, so we 1153 /* Note, we can have MAX_FIDS outstanding. FIDs are 16-bits, so we
@@ -1924,7 +1923,7 @@ static int mpi_start_xmit(struct sk_buff *skb, struct net_device *dev) {
1924 if (npacks >= MAXTXQ - 1) { 1923 if (npacks >= MAXTXQ - 1) {
1925 netif_stop_queue (dev); 1924 netif_stop_queue (dev);
1926 if (npacks > MAXTXQ) { 1925 if (npacks > MAXTXQ) {
1927 ai->stats.tx_fifo_errors++; 1926 dev->stats.tx_fifo_errors++;
1928 return 1; 1927 return 1;
1929 } 1928 }
1930 skb_queue_tail (&ai->txq, skb); 1929 skb_queue_tail (&ai->txq, skb);
@@ -2044,13 +2043,13 @@ static void get_tx_error(struct airo_info *ai, s32 fid)
2044 bap_read(ai, &status, 2, BAP0); 2043 bap_read(ai, &status, 2, BAP0);
2045 } 2044 }
2046 if (le16_to_cpu(status) & 2) /* Too many retries */ 2045 if (le16_to_cpu(status) & 2) /* Too many retries */
2047 ai->stats.tx_aborted_errors++; 2046 ai->dev->stats.tx_aborted_errors++;
2048 if (le16_to_cpu(status) & 4) /* Transmit lifetime exceeded */ 2047 if (le16_to_cpu(status) & 4) /* Transmit lifetime exceeded */
2049 ai->stats.tx_heartbeat_errors++; 2048 ai->dev->stats.tx_heartbeat_errors++;
2050 if (le16_to_cpu(status) & 8) /* Aid fail */ 2049 if (le16_to_cpu(status) & 8) /* Aid fail */
2051 { } 2050 { }
2052 if (le16_to_cpu(status) & 0x10) /* MAC disabled */ 2051 if (le16_to_cpu(status) & 0x10) /* MAC disabled */
2053 ai->stats.tx_carrier_errors++; 2052 ai->dev->stats.tx_carrier_errors++;
2054 if (le16_to_cpu(status) & 0x20) /* Association lost */ 2053 if (le16_to_cpu(status) & 0x20) /* Association lost */
2055 { } 2054 { }
2056 /* We produce a TXDROP event only for retry or lifetime 2055 /* We produce a TXDROP event only for retry or lifetime
@@ -2102,7 +2101,7 @@ static void airo_end_xmit(struct net_device *dev) {
2102 for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++); 2101 for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++);
2103 } else { 2102 } else {
2104 priv->fids[fid] &= 0xffff; 2103 priv->fids[fid] &= 0xffff;
2105 priv->stats.tx_window_errors++; 2104 dev->stats.tx_window_errors++;
2106 } 2105 }
2107 if (i < MAX_FIDS / 2) 2106 if (i < MAX_FIDS / 2)
2108 netif_wake_queue(dev); 2107 netif_wake_queue(dev);
@@ -2128,7 +2127,7 @@ static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) {
2128 netif_stop_queue(dev); 2127 netif_stop_queue(dev);
2129 2128
2130 if (i == MAX_FIDS / 2) { 2129 if (i == MAX_FIDS / 2) {
2131 priv->stats.tx_fifo_errors++; 2130 dev->stats.tx_fifo_errors++;
2132 return 1; 2131 return 1;
2133 } 2132 }
2134 } 2133 }
@@ -2167,7 +2166,7 @@ static void airo_end_xmit11(struct net_device *dev) {
2167 for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++); 2166 for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++);
2168 } else { 2167 } else {
2169 priv->fids[fid] &= 0xffff; 2168 priv->fids[fid] &= 0xffff;
2170 priv->stats.tx_window_errors++; 2169 dev->stats.tx_window_errors++;
2171 } 2170 }
2172 if (i < MAX_FIDS) 2171 if (i < MAX_FIDS)
2173 netif_wake_queue(dev); 2172 netif_wake_queue(dev);
@@ -2199,7 +2198,7 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
2199 netif_stop_queue(dev); 2198 netif_stop_queue(dev);
2200 2199
2201 if (i == MAX_FIDS) { 2200 if (i == MAX_FIDS) {
2202 priv->stats.tx_fifo_errors++; 2201 dev->stats.tx_fifo_errors++;
2203 return 1; 2202 return 1;
2204 } 2203 }
2205 } 2204 }
@@ -2219,8 +2218,9 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
2219 return 0; 2218 return 0;
2220} 2219}
2221 2220
2222static void airo_read_stats(struct airo_info *ai) 2221static void airo_read_stats(struct net_device *dev)
2223{ 2222{
2223 struct airo_info *ai = dev->priv;
2224 StatsRid stats_rid; 2224 StatsRid stats_rid;
2225 __le32 *vals = stats_rid.vals; 2225 __le32 *vals = stats_rid.vals;
2226 2226
@@ -2232,23 +2232,24 @@ static void airo_read_stats(struct airo_info *ai)
2232 readStatsRid(ai, &stats_rid, RID_STATS, 0); 2232 readStatsRid(ai, &stats_rid, RID_STATS, 0);
2233 up(&ai->sem); 2233 up(&ai->sem);
2234 2234
2235 ai->stats.rx_packets = le32_to_cpu(vals[43]) + le32_to_cpu(vals[44]) + 2235 dev->stats.rx_packets = le32_to_cpu(vals[43]) + le32_to_cpu(vals[44]) +
2236 le32_to_cpu(vals[45]); 2236 le32_to_cpu(vals[45]);
2237 ai->stats.tx_packets = le32_to_cpu(vals[39]) + le32_to_cpu(vals[40]) + 2237 dev->stats.tx_packets = le32_to_cpu(vals[39]) + le32_to_cpu(vals[40]) +
2238 le32_to_cpu(vals[41]); 2238 le32_to_cpu(vals[41]);
2239 ai->stats.rx_bytes = le32_to_cpu(vals[92]); 2239 dev->stats.rx_bytes = le32_to_cpu(vals[92]);
2240 ai->stats.tx_bytes = le32_to_cpu(vals[91]); 2240 dev->stats.tx_bytes = le32_to_cpu(vals[91]);
2241 ai->stats.rx_errors = le32_to_cpu(vals[0]) + le32_to_cpu(vals[2]) + 2241 dev->stats.rx_errors = le32_to_cpu(vals[0]) + le32_to_cpu(vals[2]) +
2242 le32_to_cpu(vals[3]) + le32_to_cpu(vals[4]); 2242 le32_to_cpu(vals[3]) + le32_to_cpu(vals[4]);
2243 ai->stats.tx_errors = le32_to_cpu(vals[42]) + ai->stats.tx_fifo_errors; 2243 dev->stats.tx_errors = le32_to_cpu(vals[42]) +
2244 ai->stats.multicast = le32_to_cpu(vals[43]); 2244 dev->stats.tx_fifo_errors;
2245 ai->stats.collisions = le32_to_cpu(vals[89]); 2245 dev->stats.multicast = le32_to_cpu(vals[43]);
2246 dev->stats.collisions = le32_to_cpu(vals[89]);
2246 2247
2247 /* detailed rx_errors: */ 2248 /* detailed rx_errors: */
2248 ai->stats.rx_length_errors = le32_to_cpu(vals[3]); 2249 dev->stats.rx_length_errors = le32_to_cpu(vals[3]);
2249 ai->stats.rx_crc_errors = le32_to_cpu(vals[4]); 2250 dev->stats.rx_crc_errors = le32_to_cpu(vals[4]);
2250 ai->stats.rx_frame_errors = le32_to_cpu(vals[2]); 2251 dev->stats.rx_frame_errors = le32_to_cpu(vals[2]);
2251 ai->stats.rx_fifo_errors = le32_to_cpu(vals[0]); 2252 dev->stats.rx_fifo_errors = le32_to_cpu(vals[0]);
2252} 2253}
2253 2254
2254static struct net_device_stats *airo_get_stats(struct net_device *dev) 2255static struct net_device_stats *airo_get_stats(struct net_device *dev)
@@ -2261,10 +2262,10 @@ static struct net_device_stats *airo_get_stats(struct net_device *dev)
2261 set_bit(JOB_STATS, &local->jobs); 2262 set_bit(JOB_STATS, &local->jobs);
2262 wake_up_interruptible(&local->thr_wait); 2263 wake_up_interruptible(&local->thr_wait);
2263 } else 2264 } else
2264 airo_read_stats(local); 2265 airo_read_stats(dev);
2265 } 2266 }
2266 2267
2267 return &local->stats; 2268 return &dev->stats;
2268} 2269}
2269 2270
2270static void airo_set_promisc(struct airo_info *ai) { 2271static void airo_set_promisc(struct airo_info *ai) {
@@ -3093,7 +3094,7 @@ static int airo_thread(void *data) {
3093 else if (test_bit(JOB_XMIT11, &ai->jobs)) 3094 else if (test_bit(JOB_XMIT11, &ai->jobs))
3094 airo_end_xmit11(dev); 3095 airo_end_xmit11(dev);
3095 else if (test_bit(JOB_STATS, &ai->jobs)) 3096 else if (test_bit(JOB_STATS, &ai->jobs))
3096 airo_read_stats(ai); 3097 airo_read_stats(dev);
3097 else if (test_bit(JOB_WSTATS, &ai->jobs)) 3098 else if (test_bit(JOB_WSTATS, &ai->jobs))
3098 airo_read_wireless_stats(ai); 3099 airo_read_wireless_stats(ai);
3099 else if (test_bit(JOB_PROMISC, &ai->jobs)) 3100 else if (test_bit(JOB_PROMISC, &ai->jobs))
@@ -3289,7 +3290,7 @@ static irqreturn_t airo_interrupt(int irq, void *dev_id)
3289 3290
3290 skb = dev_alloc_skb( len + hdrlen + 2 + 2 ); 3291 skb = dev_alloc_skb( len + hdrlen + 2 + 2 );
3291 if ( !skb ) { 3292 if ( !skb ) {
3292 apriv->stats.rx_dropped++; 3293 dev->stats.rx_dropped++;
3293 goto badrx; 3294 goto badrx;
3294 } 3295 }
3295 skb_reserve(skb, 2); /* This way the IP header is aligned */ 3296 skb_reserve(skb, 2); /* This way the IP header is aligned */
@@ -3557,7 +3558,7 @@ static void mpi_receive_802_3(struct airo_info *ai)
3557 3558
3558 skb = dev_alloc_skb(len); 3559 skb = dev_alloc_skb(len);
3559 if (!skb) { 3560 if (!skb) {
3560 ai->stats.rx_dropped++; 3561 ai->dev->stats.rx_dropped++;
3561 goto badrx; 3562 goto badrx;
3562 } 3563 }
3563 buffer = skb_put(skb,len); 3564 buffer = skb_put(skb,len);
@@ -3650,7 +3651,7 @@ void mpi_receive_802_11 (struct airo_info *ai)
3650 3651
3651 skb = dev_alloc_skb( len + hdrlen + 2 ); 3652 skb = dev_alloc_skb( len + hdrlen + 2 );
3652 if ( !skb ) { 3653 if ( !skb ) {
3653 ai->stats.rx_dropped++; 3654 ai->dev->stats.rx_dropped++;
3654 goto badrx; 3655 goto badrx;
3655 } 3656 }
3656 buffer = (u16*)skb_put (skb, len + hdrlen); 3657 buffer = (u16*)skb_put (skb, len + hdrlen);
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c
index dbdfc9e39d20..dec5e874a54d 100644
--- a/drivers/net/wireless/arlan-main.c
+++ b/drivers/net/wireless/arlan-main.c
@@ -125,7 +125,7 @@ static inline int arlan_drop_tx(struct net_device *dev)
125{ 125{
126 struct arlan_private *priv = netdev_priv(dev); 126 struct arlan_private *priv = netdev_priv(dev);
127 127
128 priv->stats.tx_errors++; 128 dev->stats.tx_errors++;
129 if (priv->Conf->tx_delay_ms) 129 if (priv->Conf->tx_delay_ms)
130 { 130 {
131 priv->tx_done_delayed = jiffies + priv->Conf->tx_delay_ms * HZ / 1000 + 1; 131 priv->tx_done_delayed = jiffies + priv->Conf->tx_delay_ms * HZ / 1000 + 1;
@@ -1269,7 +1269,7 @@ static void arlan_tx_done_interrupt(struct net_device *dev, int status)
1269 { 1269 {
1270 IFDEBUG(ARLAN_DEBUG_TX_CHAIN) 1270 IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
1271 printk("arlan intr: transmit OK\n"); 1271 printk("arlan intr: transmit OK\n");
1272 priv->stats.tx_packets++; 1272 dev->stats.tx_packets++;
1273 priv->bad = 0; 1273 priv->bad = 0;
1274 priv->reset = 0; 1274 priv->reset = 0;
1275 priv->retransmissions = 0; 1275 priv->retransmissions = 0;
@@ -1496,7 +1496,7 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
1496 if (skb == NULL) 1496 if (skb == NULL)
1497 { 1497 {
1498 printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); 1498 printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
1499 priv->stats.rx_dropped++; 1499 dev->stats.rx_dropped++;
1500 break; 1500 break;
1501 } 1501 }
1502 skb_reserve(skb, 2); 1502 skb_reserve(skb, 2);
@@ -1536,14 +1536,14 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
1536 } 1536 }
1537 netif_rx(skb); 1537 netif_rx(skb);
1538 dev->last_rx = jiffies; 1538 dev->last_rx = jiffies;
1539 priv->stats.rx_packets++; 1539 dev->stats.rx_packets++;
1540 priv->stats.rx_bytes += pkt_len; 1540 dev->stats.rx_bytes += pkt_len;
1541 } 1541 }
1542 break; 1542 break;
1543 1543
1544 default: 1544 default:
1545 printk(KERN_ERR "arlan intr: received unknown status\n"); 1545 printk(KERN_ERR "arlan intr: received unknown status\n");
1546 priv->stats.rx_crc_errors++; 1546 dev->stats.rx_crc_errors++;
1547 break; 1547 break;
1548 } 1548 }
1549 ARLAN_DEBUG_EXIT("arlan_rx_interrupt"); 1549 ARLAN_DEBUG_EXIT("arlan_rx_interrupt");
@@ -1719,23 +1719,23 @@ static struct net_device_stats *arlan_statistics(struct net_device *dev)
1719 1719
1720 /* Update the statistics from the device registers. */ 1720 /* Update the statistics from the device registers. */
1721 1721
1722 READSHM(priv->stats.collisions, arlan->numReTransmissions, u_int); 1722 READSHM(dev->stats.collisions, arlan->numReTransmissions, u_int);
1723 READSHM(priv->stats.rx_crc_errors, arlan->numCRCErrors, u_int); 1723 READSHM(dev->stats.rx_crc_errors, arlan->numCRCErrors, u_int);
1724 READSHM(priv->stats.rx_dropped, arlan->numFramesDiscarded, u_int); 1724 READSHM(dev->stats.rx_dropped, arlan->numFramesDiscarded, u_int);
1725 READSHM(priv->stats.rx_fifo_errors, arlan->numRXBufferOverflows, u_int); 1725 READSHM(dev->stats.rx_fifo_errors, arlan->numRXBufferOverflows, u_int);
1726 READSHM(priv->stats.rx_frame_errors, arlan->numReceiveFramesLost, u_int); 1726 READSHM(dev->stats.rx_frame_errors, arlan->numReceiveFramesLost, u_int);
1727 READSHM(priv->stats.rx_over_errors, arlan->numRXOverruns, u_int); 1727 READSHM(dev->stats.rx_over_errors, arlan->numRXOverruns, u_int);
1728 READSHM(priv->stats.rx_packets, arlan->numDatagramsReceived, u_int); 1728 READSHM(dev->stats.rx_packets, arlan->numDatagramsReceived, u_int);
1729 READSHM(priv->stats.tx_aborted_errors, arlan->numAbortErrors, u_int); 1729 READSHM(dev->stats.tx_aborted_errors, arlan->numAbortErrors, u_int);
1730 READSHM(priv->stats.tx_carrier_errors, arlan->numStatusTimeouts, u_int); 1730 READSHM(dev->stats.tx_carrier_errors, arlan->numStatusTimeouts, u_int);
1731 READSHM(priv->stats.tx_dropped, arlan->numDatagramsDiscarded, u_int); 1731 READSHM(dev->stats.tx_dropped, arlan->numDatagramsDiscarded, u_int);
1732 READSHM(priv->stats.tx_fifo_errors, arlan->numTXUnderruns, u_int); 1732 READSHM(dev->stats.tx_fifo_errors, arlan->numTXUnderruns, u_int);
1733 READSHM(priv->stats.tx_packets, arlan->numDatagramsTransmitted, u_int); 1733 READSHM(dev->stats.tx_packets, arlan->numDatagramsTransmitted, u_int);
1734 READSHM(priv->stats.tx_window_errors, arlan->numHoldOffs, u_int); 1734 READSHM(dev->stats.tx_window_errors, arlan->numHoldOffs, u_int);
1735 1735
1736 ARLAN_DEBUG_EXIT("arlan_statistics"); 1736 ARLAN_DEBUG_EXIT("arlan_statistics");
1737 1737
1738 return &priv->stats; 1738 return &dev->stats;
1739} 1739}
1740 1740
1741 1741
diff --git a/drivers/net/wireless/arlan.h b/drivers/net/wireless/arlan.h
index 3ed1df75900f..fb3ad51a1caf 100644
--- a/drivers/net/wireless/arlan.h
+++ b/drivers/net/wireless/arlan.h
@@ -330,7 +330,6 @@ struct TxParam
330#define TX_RING_SIZE 2 330#define TX_RING_SIZE 2
331/* Information that need to be kept for each board. */ 331/* Information that need to be kept for each board. */
332struct arlan_private { 332struct arlan_private {
333 struct net_device_stats stats;
334 struct arlan_shmem __iomem * card; 333 struct arlan_shmem __iomem * card;
335 struct arlan_shmem * conf; 334 struct arlan_shmem * conf;
336 335
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 635b9ac9aaa1..85045afc1ba7 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -167,8 +167,7 @@ static struct pci_driver ath5k_pci_driver = {
167/* 167/*
168 * Prototypes - MAC 802.11 stack related functions 168 * Prototypes - MAC 802.11 stack related functions
169 */ 169 */
170static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 170static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
171 struct ieee80211_tx_control *ctl);
172static int ath5k_reset(struct ieee80211_hw *hw); 171static int ath5k_reset(struct ieee80211_hw *hw);
173static int ath5k_start(struct ieee80211_hw *hw); 172static int ath5k_start(struct ieee80211_hw *hw);
174static void ath5k_stop(struct ieee80211_hw *hw); 173static void ath5k_stop(struct ieee80211_hw *hw);
@@ -196,8 +195,7 @@ static int ath5k_get_tx_stats(struct ieee80211_hw *hw,
196static u64 ath5k_get_tsf(struct ieee80211_hw *hw); 195static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
197static void ath5k_reset_tsf(struct ieee80211_hw *hw); 196static void ath5k_reset_tsf(struct ieee80211_hw *hw);
198static int ath5k_beacon_update(struct ieee80211_hw *hw, 197static int ath5k_beacon_update(struct ieee80211_hw *hw,
199 struct sk_buff *skb, 198 struct sk_buff *skb);
200 struct ieee80211_tx_control *ctl);
201 199
202static struct ieee80211_ops ath5k_hw_ops = { 200static struct ieee80211_ops ath5k_hw_ops = {
203 .tx = ath5k_tx, 201 .tx = ath5k_tx,
@@ -251,9 +249,7 @@ static void ath5k_desc_free(struct ath5k_softc *sc,
251static int ath5k_rxbuf_setup(struct ath5k_softc *sc, 249static int ath5k_rxbuf_setup(struct ath5k_softc *sc,
252 struct ath5k_buf *bf); 250 struct ath5k_buf *bf);
253static int ath5k_txbuf_setup(struct ath5k_softc *sc, 251static int ath5k_txbuf_setup(struct ath5k_softc *sc,
254 struct ath5k_buf *bf, 252 struct ath5k_buf *bf);
255 struct ieee80211_tx_control *ctl);
256
257static inline void ath5k_txbuf_free(struct ath5k_softc *sc, 253static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
258 struct ath5k_buf *bf) 254 struct ath5k_buf *bf)
259{ 255{
@@ -289,8 +285,7 @@ static void ath5k_tx_processq(struct ath5k_softc *sc,
289static void ath5k_tasklet_tx(unsigned long data); 285static void ath5k_tasklet_tx(unsigned long data);
290/* Beacon handling */ 286/* Beacon handling */
291static int ath5k_beacon_setup(struct ath5k_softc *sc, 287static int ath5k_beacon_setup(struct ath5k_softc *sc,
292 struct ath5k_buf *bf, 288 struct ath5k_buf *bf);
293 struct ieee80211_tx_control *ctl);
294static void ath5k_beacon_send(struct ath5k_softc *sc); 289static void ath5k_beacon_send(struct ath5k_softc *sc);
295static void ath5k_beacon_config(struct ath5k_softc *sc); 290static void ath5k_beacon_config(struct ath5k_softc *sc);
296static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf); 291static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
@@ -458,13 +453,11 @@ ath5k_pci_probe(struct pci_dev *pdev,
458 453
459 /* Initialize driver private data */ 454 /* Initialize driver private data */
460 SET_IEEE80211_DEV(hw, &pdev->dev); 455 SET_IEEE80211_DEV(hw, &pdev->dev);
461 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS; 456 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
457 IEEE80211_HW_SIGNAL_DBM |
458 IEEE80211_HW_NOISE_DBM;
462 hw->extra_tx_headroom = 2; 459 hw->extra_tx_headroom = 2;
463 hw->channel_change_time = 5000; 460 hw->channel_change_time = 5000;
464 /* these names are misleading */
465 hw->max_rssi = -110; /* signal in dBm */
466 hw->max_noise = -110; /* noise in dBm */
467 hw->max_signal = 100; /* we will provide a percentage based on rssi */
468 sc = hw->priv; 461 sc = hw->priv;
469 sc->hw = hw; 462 sc->hw = hw;
470 sc->pdev = pdev; 463 sc->pdev = pdev;
@@ -1297,36 +1290,36 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1297} 1290}
1298 1291
1299static int 1292static int
1300ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, 1293ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1301 struct ieee80211_tx_control *ctl)
1302{ 1294{
1303 struct ath5k_hw *ah = sc->ah; 1295 struct ath5k_hw *ah = sc->ah;
1304 struct ath5k_txq *txq = sc->txq; 1296 struct ath5k_txq *txq = sc->txq;
1305 struct ath5k_desc *ds = bf->desc; 1297 struct ath5k_desc *ds = bf->desc;
1306 struct sk_buff *skb = bf->skb; 1298 struct sk_buff *skb = bf->skb;
1299 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1307 unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID; 1300 unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
1308 int ret; 1301 int ret;
1309 1302
1310 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK; 1303 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
1311 bf->ctl = *ctl; 1304
1312 /* XXX endianness */ 1305 /* XXX endianness */
1313 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, 1306 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
1314 PCI_DMA_TODEVICE); 1307 PCI_DMA_TODEVICE);
1315 1308
1316 if (ctl->flags & IEEE80211_TXCTL_NO_ACK) 1309 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1317 flags |= AR5K_TXDESC_NOACK; 1310 flags |= AR5K_TXDESC_NOACK;
1318 1311
1319 pktlen = skb->len; 1312 pktlen = skb->len;
1320 1313
1321 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) { 1314 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)) {
1322 keyidx = ctl->key_idx; 1315 keyidx = info->control.hw_key->hw_key_idx;
1323 pktlen += ctl->icv_len; 1316 pktlen += info->control.icv_len;
1324 } 1317 }
1325
1326 ret = ah->ah_setup_tx_desc(ah, ds, pktlen, 1318 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
1327 ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL, 1319 ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL,
1328 (sc->power_level * 2), ctl->tx_rate->hw_value, 1320 (sc->power_level * 2),
1329 ctl->retry_limit, keyidx, 0, flags, 0, 0); 1321 ieee80211_get_tx_rate(sc->hw, info)->hw_value,
1322 info->control.retry_limit, keyidx, 0, flags, 0, 0);
1330 if (ret) 1323 if (ret)
1331 goto err_unmap; 1324 goto err_unmap;
1332 1325
@@ -1335,7 +1328,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1335 1328
1336 spin_lock_bh(&txq->lock); 1329 spin_lock_bh(&txq->lock);
1337 list_add_tail(&bf->list, &txq->q); 1330 list_add_tail(&bf->list, &txq->q);
1338 sc->tx_stats.data[txq->qnum].len++; 1331 sc->tx_stats[txq->qnum].len++;
1339 if (txq->link == NULL) /* is this first packet? */ 1332 if (txq->link == NULL) /* is this first packet? */
1340 ath5k_hw_put_tx_buf(ah, txq->qnum, bf->daddr); 1333 ath5k_hw_put_tx_buf(ah, txq->qnum, bf->daddr);
1341 else /* no, so only link it */ 1334 else /* no, so only link it */
@@ -1566,7 +1559,7 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1566 ath5k_txbuf_free(sc, bf); 1559 ath5k_txbuf_free(sc, bf);
1567 1560
1568 spin_lock_bh(&sc->txbuflock); 1561 spin_lock_bh(&sc->txbuflock);
1569 sc->tx_stats.data[txq->qnum].len--; 1562 sc->tx_stats[txq->qnum].len--;
1570 list_move_tail(&bf->list, &sc->txbuf); 1563 list_move_tail(&bf->list, &sc->txbuf);
1571 sc->txbuf_len++; 1564 sc->txbuf_len++;
1572 spin_unlock_bh(&sc->txbuflock); 1565 spin_unlock_bh(&sc->txbuflock);
@@ -1601,7 +1594,7 @@ ath5k_txq_cleanup(struct ath5k_softc *sc)
1601 sc->txqs[i].link); 1594 sc->txqs[i].link);
1602 } 1595 }
1603 } 1596 }
1604 ieee80211_start_queues(sc->hw); /* XXX move to callers */ 1597 ieee80211_wake_queues(sc->hw); /* XXX move to callers */
1605 1598
1606 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) 1599 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
1607 if (sc->txqs[i].setup) 1600 if (sc->txqs[i].setup)
@@ -1895,20 +1888,9 @@ accept:
1895 rxs.freq = sc->curchan->center_freq; 1888 rxs.freq = sc->curchan->center_freq;
1896 rxs.band = sc->curband->band; 1889 rxs.band = sc->curband->band;
1897 1890
1898 /*
1899 * signal quality:
1900 * the names here are misleading and the usage of these
1901 * values by iwconfig makes it even worse
1902 */
1903 /* noise floor in dBm, from the last noise calibration */
1904 rxs.noise = sc->ah->ah_noise_floor; 1891 rxs.noise = sc->ah->ah_noise_floor;
1905 /* signal level in dBm */ 1892 rxs.signal = rxs.noise + rs.rs_rssi;
1906 rxs.ssi = rxs.noise + rs.rs_rssi; 1893 rxs.qual = rs.rs_rssi * 100 / 64;
1907 /*
1908 * "signal" is actually displayed as Link Quality by iwconfig
1909 * we provide a percentage based on rssi (assuming max rssi 64)
1910 */
1911 rxs.signal = rs.rs_rssi * 100 / 64;
1912 1894
1913 rxs.antenna = rs.rs_antenna; 1895 rxs.antenna = rs.rs_antenna;
1914 rxs.rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); 1896 rxs.rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
@@ -1939,11 +1921,11 @@ next:
1939static void 1921static void
1940ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) 1922ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1941{ 1923{
1942 struct ieee80211_tx_status txs = {};
1943 struct ath5k_tx_status ts = {}; 1924 struct ath5k_tx_status ts = {};
1944 struct ath5k_buf *bf, *bf0; 1925 struct ath5k_buf *bf, *bf0;
1945 struct ath5k_desc *ds; 1926 struct ath5k_desc *ds;
1946 struct sk_buff *skb; 1927 struct sk_buff *skb;
1928 struct ieee80211_tx_info *info;
1947 int ret; 1929 int ret;
1948 1930
1949 spin_lock(&txq->lock); 1931 spin_lock(&txq->lock);
@@ -1963,28 +1945,29 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1963 } 1945 }
1964 1946
1965 skb = bf->skb; 1947 skb = bf->skb;
1948 info = IEEE80211_SKB_CB(skb);
1966 bf->skb = NULL; 1949 bf->skb = NULL;
1950
1967 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, 1951 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
1968 PCI_DMA_TODEVICE); 1952 PCI_DMA_TODEVICE);
1969 1953
1970 txs.control = bf->ctl; 1954 info->status.retry_count = ts.ts_shortretry + ts.ts_longretry / 6;
1971 txs.retry_count = ts.ts_shortretry + ts.ts_longretry / 6;
1972 if (unlikely(ts.ts_status)) { 1955 if (unlikely(ts.ts_status)) {
1973 sc->ll_stats.dot11ACKFailureCount++; 1956 sc->ll_stats.dot11ACKFailureCount++;
1974 if (ts.ts_status & AR5K_TXERR_XRETRY) 1957 if (ts.ts_status & AR5K_TXERR_XRETRY)
1975 txs.excessive_retries = 1; 1958 info->status.excessive_retries = 1;
1976 else if (ts.ts_status & AR5K_TXERR_FILT) 1959 else if (ts.ts_status & AR5K_TXERR_FILT)
1977 txs.flags |= IEEE80211_TX_STATUS_TX_FILTERED; 1960 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1978 } else { 1961 } else {
1979 txs.flags |= IEEE80211_TX_STATUS_ACK; 1962 info->flags |= IEEE80211_TX_STAT_ACK;
1980 txs.ack_signal = ts.ts_rssi; 1963 info->status.ack_signal = ts.ts_rssi;
1981 } 1964 }
1982 1965
1983 ieee80211_tx_status(sc->hw, skb, &txs); 1966 ieee80211_tx_status(sc->hw, skb);
1984 sc->tx_stats.data[txq->qnum].count++; 1967 sc->tx_stats[txq->qnum].count++;
1985 1968
1986 spin_lock(&sc->txbuflock); 1969 spin_lock(&sc->txbuflock);
1987 sc->tx_stats.data[txq->qnum].len--; 1970 sc->tx_stats[txq->qnum].len--;
1988 list_move_tail(&bf->list, &sc->txbuf); 1971 list_move_tail(&bf->list, &sc->txbuf);
1989 sc->txbuf_len++; 1972 sc->txbuf_len++;
1990 spin_unlock(&sc->txbuflock); 1973 spin_unlock(&sc->txbuflock);
@@ -2017,10 +2000,10 @@ ath5k_tasklet_tx(unsigned long data)
2017 * Setup the beacon frame for transmit. 2000 * Setup the beacon frame for transmit.
2018 */ 2001 */
2019static int 2002static int
2020ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, 2003ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2021 struct ieee80211_tx_control *ctl)
2022{ 2004{
2023 struct sk_buff *skb = bf->skb; 2005 struct sk_buff *skb = bf->skb;
2006 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2024 struct ath5k_hw *ah = sc->ah; 2007 struct ath5k_hw *ah = sc->ah;
2025 struct ath5k_desc *ds; 2008 struct ath5k_desc *ds;
2026 int ret, antenna = 0; 2009 int ret, antenna = 0;
@@ -2059,7 +2042,8 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
2059 ret = ah->ah_setup_tx_desc(ah, ds, skb->len, 2042 ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
2060 ieee80211_get_hdrlen_from_skb(skb), 2043 ieee80211_get_hdrlen_from_skb(skb),
2061 AR5K_PKT_TYPE_BEACON, (sc->power_level * 2), 2044 AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
2062 ctl->tx_rate->hw_value, 1, AR5K_TXKEYIX_INVALID, 2045 ieee80211_get_tx_rate(sc->hw, info)->hw_value,
2046 1, AR5K_TXKEYIX_INVALID,
2063 antenna, flags, 0, 0); 2047 antenna, flags, 0, 0);
2064 if (ret) 2048 if (ret)
2065 goto err_unmap; 2049 goto err_unmap;
@@ -2637,11 +2621,11 @@ ath5k_led_event(struct ath5k_softc *sc, int event)
2637\********************/ 2621\********************/
2638 2622
2639static int 2623static int
2640ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 2624ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2641 struct ieee80211_tx_control *ctl)
2642{ 2625{
2643 struct ath5k_softc *sc = hw->priv; 2626 struct ath5k_softc *sc = hw->priv;
2644 struct ath5k_buf *bf; 2627 struct ath5k_buf *bf;
2628 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2645 unsigned long flags; 2629 unsigned long flags;
2646 int hdrlen; 2630 int hdrlen;
2647 int pad; 2631 int pad;
@@ -2667,13 +2651,13 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
2667 memmove(skb->data, skb->data+pad, hdrlen); 2651 memmove(skb->data, skb->data+pad, hdrlen);
2668 } 2652 }
2669 2653
2670 sc->led_txrate = ctl->tx_rate->hw_value; 2654 sc->led_txrate = ieee80211_get_tx_rate(hw, info)->hw_value;
2671 2655
2672 spin_lock_irqsave(&sc->txbuflock, flags); 2656 spin_lock_irqsave(&sc->txbuflock, flags);
2673 if (list_empty(&sc->txbuf)) { 2657 if (list_empty(&sc->txbuf)) {
2674 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n"); 2658 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
2675 spin_unlock_irqrestore(&sc->txbuflock, flags); 2659 spin_unlock_irqrestore(&sc->txbuflock, flags);
2676 ieee80211_stop_queue(hw, ctl->queue); 2660 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
2677 return -1; 2661 return -1;
2678 } 2662 }
2679 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list); 2663 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
@@ -2685,7 +2669,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
2685 2669
2686 bf->skb = skb; 2670 bf->skb = skb;
2687 2671
2688 if (ath5k_txbuf_setup(sc, bf, ctl)) { 2672 if (ath5k_txbuf_setup(sc, bf)) {
2689 bf->skb = NULL; 2673 bf->skb = NULL;
2690 spin_lock_irqsave(&sc->txbuflock, flags); 2674 spin_lock_irqsave(&sc->txbuflock, flags);
2691 list_add_tail(&bf->list, &sc->txbuf); 2675 list_add_tail(&bf->list, &sc->txbuf);
@@ -3063,8 +3047,7 @@ ath5k_reset_tsf(struct ieee80211_hw *hw)
3063} 3047}
3064 3048
3065static int 3049static int
3066ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 3050ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
3067 struct ieee80211_tx_control *ctl)
3068{ 3051{
3069 struct ath5k_softc *sc = hw->priv; 3052 struct ath5k_softc *sc = hw->priv;
3070 int ret; 3053 int ret;
@@ -3080,7 +3063,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
3080 3063
3081 ath5k_txbuf_free(sc, sc->bbuf); 3064 ath5k_txbuf_free(sc, sc->bbuf);
3082 sc->bbuf->skb = skb; 3065 sc->bbuf->skb = skb;
3083 ret = ath5k_beacon_setup(sc, sc->bbuf, ctl); 3066 ret = ath5k_beacon_setup(sc, sc->bbuf);
3084 if (ret) 3067 if (ret)
3085 sc->bbuf->skb = NULL; 3068 sc->bbuf->skb = NULL;
3086 else 3069 else
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index 3a9755893018..bb4b26d523ab 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -60,7 +60,6 @@ struct ath5k_buf {
60 dma_addr_t daddr; /* physical addr of desc */ 60 dma_addr_t daddr; /* physical addr of desc */
61 struct sk_buff *skb; /* skbuff for buf */ 61 struct sk_buff *skb; /* skbuff for buf */
62 dma_addr_t skbaddr;/* physical addr of skb data */ 62 dma_addr_t skbaddr;/* physical addr of skb data */
63 struct ieee80211_tx_control ctl;
64}; 63};
65 64
66/* 65/*
@@ -92,7 +91,8 @@ struct ath5k_softc {
92 struct pci_dev *pdev; /* for dma mapping */ 91 struct pci_dev *pdev; /* for dma mapping */
93 void __iomem *iobase; /* address of the device */ 92 void __iomem *iobase; /* address of the device */
94 struct mutex lock; /* dev-level lock */ 93 struct mutex lock; /* dev-level lock */
95 struct ieee80211_tx_queue_stats tx_stats; 94 /* FIXME: how many does it really need? */
95 struct ieee80211_tx_queue_stats tx_stats[16];
96 struct ieee80211_low_level_stats ll_stats; 96 struct ieee80211_low_level_stats ll_stats;
97 struct ieee80211_hw *hw; /* IEEE 802.11 common */ 97 struct ieee80211_hw *hw; /* IEEE 802.11 common */
98 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 98 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 438e63ecccf1..7bb2646ae0ef 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -433,7 +433,6 @@ struct atmel_private {
433 struct net_device *dev; 433 struct net_device *dev;
434 struct device *sys_dev; 434 struct device *sys_dev;
435 struct iw_statistics wstats; 435 struct iw_statistics wstats;
436 struct net_device_stats stats; // device stats
437 spinlock_t irqlock, timerlock; // spinlocks 436 spinlock_t irqlock, timerlock; // spinlocks
438 enum { BUS_TYPE_PCCARD, BUS_TYPE_PCI } bus_type; 437 enum { BUS_TYPE_PCCARD, BUS_TYPE_PCI } bus_type;
439 enum { 438 enum {
@@ -694,9 +693,9 @@ static void tx_done_irq(struct atmel_private *priv)
694 693
695 if (type == TX_PACKET_TYPE_DATA) { 694 if (type == TX_PACKET_TYPE_DATA) {
696 if (status == TX_STATUS_SUCCESS) 695 if (status == TX_STATUS_SUCCESS)
697 priv->stats.tx_packets++; 696 priv->dev->stats.tx_packets++;
698 else 697 else
699 priv->stats.tx_errors++; 698 priv->dev->stats.tx_errors++;
700 netif_wake_queue(priv->dev); 699 netif_wake_queue(priv->dev);
701 } 700 }
702 } 701 }
@@ -792,13 +791,13 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
792 791
793 if (priv->card && priv->present_callback && 792 if (priv->card && priv->present_callback &&
794 !(*priv->present_callback)(priv->card)) { 793 !(*priv->present_callback)(priv->card)) {
795 priv->stats.tx_errors++; 794 dev->stats.tx_errors++;
796 dev_kfree_skb(skb); 795 dev_kfree_skb(skb);
797 return 0; 796 return 0;
798 } 797 }
799 798
800 if (priv->station_state != STATION_STATE_READY) { 799 if (priv->station_state != STATION_STATE_READY) {
801 priv->stats.tx_errors++; 800 dev->stats.tx_errors++;
802 dev_kfree_skb(skb); 801 dev_kfree_skb(skb);
803 return 0; 802 return 0;
804 } 803 }
@@ -815,7 +814,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
815 initial + 18 (+30-12) */ 814 initial + 18 (+30-12) */
816 815
817 if (!(buff = find_tx_buff(priv, len + 18))) { 816 if (!(buff = find_tx_buff(priv, len + 18))) {
818 priv->stats.tx_dropped++; 817 dev->stats.tx_dropped++;
819 spin_unlock_irqrestore(&priv->irqlock, flags); 818 spin_unlock_irqrestore(&priv->irqlock, flags);
820 spin_unlock_bh(&priv->timerlock); 819 spin_unlock_bh(&priv->timerlock);
821 netif_stop_queue(dev); 820 netif_stop_queue(dev);
@@ -851,7 +850,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
851 /* low bit of first byte of destination tells us if broadcast */ 850 /* low bit of first byte of destination tells us if broadcast */
852 tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA); 851 tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA);
853 dev->trans_start = jiffies; 852 dev->trans_start = jiffies;
854 priv->stats.tx_bytes += len; 853 dev->stats.tx_bytes += len;
855 854
856 spin_unlock_irqrestore(&priv->irqlock, flags); 855 spin_unlock_irqrestore(&priv->irqlock, flags);
857 spin_unlock_bh(&priv->timerlock); 856 spin_unlock_bh(&priv->timerlock);
@@ -895,7 +894,7 @@ static void fast_rx_path(struct atmel_private *priv,
895 } 894 }
896 895
897 if (!(skb = dev_alloc_skb(msdu_size + 14))) { 896 if (!(skb = dev_alloc_skb(msdu_size + 14))) {
898 priv->stats.rx_dropped++; 897 priv->dev->stats.rx_dropped++;
899 return; 898 return;
900 } 899 }
901 900
@@ -908,7 +907,7 @@ static void fast_rx_path(struct atmel_private *priv,
908 crc = crc32_le(crc, skbp + 12, msdu_size); 907 crc = crc32_le(crc, skbp + 12, msdu_size);
909 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + 30 + msdu_size, 4); 908 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + 30 + msdu_size, 4);
910 if ((crc ^ 0xffffffff) != netcrc) { 909 if ((crc ^ 0xffffffff) != netcrc) {
911 priv->stats.rx_crc_errors++; 910 priv->dev->stats.rx_crc_errors++;
912 dev_kfree_skb(skb); 911 dev_kfree_skb(skb);
913 return; 912 return;
914 } 913 }
@@ -924,8 +923,8 @@ static void fast_rx_path(struct atmel_private *priv,
924 skb->protocol = eth_type_trans(skb, priv->dev); 923 skb->protocol = eth_type_trans(skb, priv->dev);
925 skb->ip_summed = CHECKSUM_NONE; 924 skb->ip_summed = CHECKSUM_NONE;
926 netif_rx(skb); 925 netif_rx(skb);
927 priv->stats.rx_bytes += 12 + msdu_size; 926 priv->dev->stats.rx_bytes += 12 + msdu_size;
928 priv->stats.rx_packets++; 927 priv->dev->stats.rx_packets++;
929} 928}
930 929
931/* Test to see if the packet in card memory at packet_loc has a valid CRC 930/* Test to see if the packet in card memory at packet_loc has a valid CRC
@@ -991,7 +990,7 @@ static void frag_rx_path(struct atmel_private *priv,
991 crc = crc32_le(crc, &priv->rx_buf[12], msdu_size); 990 crc = crc32_le(crc, &priv->rx_buf[12], msdu_size);
992 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); 991 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
993 if ((crc ^ 0xffffffff) != netcrc) { 992 if ((crc ^ 0xffffffff) != netcrc) {
994 priv->stats.rx_crc_errors++; 993 priv->dev->stats.rx_crc_errors++;
995 memset(priv->frag_source, 0xff, 6); 994 memset(priv->frag_source, 0xff, 6);
996 } 995 }
997 } 996 }
@@ -1009,7 +1008,7 @@ static void frag_rx_path(struct atmel_private *priv,
1009 msdu_size); 1008 msdu_size);
1010 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); 1009 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
1011 if ((crc ^ 0xffffffff) != netcrc) { 1010 if ((crc ^ 0xffffffff) != netcrc) {
1012 priv->stats.rx_crc_errors++; 1011 priv->dev->stats.rx_crc_errors++;
1013 memset(priv->frag_source, 0xff, 6); 1012 memset(priv->frag_source, 0xff, 6);
1014 more_frags = 1; /* don't send broken assembly */ 1013 more_frags = 1; /* don't send broken assembly */
1015 } 1014 }
@@ -1021,7 +1020,7 @@ static void frag_rx_path(struct atmel_private *priv,
1021 if (!more_frags) { /* last one */ 1020 if (!more_frags) { /* last one */
1022 memset(priv->frag_source, 0xff, 6); 1021 memset(priv->frag_source, 0xff, 6);
1023 if (!(skb = dev_alloc_skb(priv->frag_len + 14))) { 1022 if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
1024 priv->stats.rx_dropped++; 1023 priv->dev->stats.rx_dropped++;
1025 } else { 1024 } else {
1026 skb_reserve(skb, 2); 1025 skb_reserve(skb, 2);
1027 memcpy(skb_put(skb, priv->frag_len + 12), 1026 memcpy(skb_put(skb, priv->frag_len + 12),
@@ -1031,8 +1030,8 @@ static void frag_rx_path(struct atmel_private *priv,
1031 skb->protocol = eth_type_trans(skb, priv->dev); 1030 skb->protocol = eth_type_trans(skb, priv->dev);
1032 skb->ip_summed = CHECKSUM_NONE; 1031 skb->ip_summed = CHECKSUM_NONE;
1033 netif_rx(skb); 1032 netif_rx(skb);
1034 priv->stats.rx_bytes += priv->frag_len + 12; 1033 priv->dev->stats.rx_bytes += priv->frag_len + 12;
1035 priv->stats.rx_packets++; 1034 priv->dev->stats.rx_packets++;
1036 } 1035 }
1037 } 1036 }
1038 } else 1037 } else
@@ -1057,7 +1056,7 @@ static void rx_done_irq(struct atmel_private *priv)
1057 if (status == 0xc1) /* determined by experiment */ 1056 if (status == 0xc1) /* determined by experiment */
1058 priv->wstats.discard.nwid++; 1057 priv->wstats.discard.nwid++;
1059 else 1058 else
1060 priv->stats.rx_errors++; 1059 priv->dev->stats.rx_errors++;
1061 goto next; 1060 goto next;
1062 } 1061 }
1063 1062
@@ -1065,7 +1064,7 @@ static void rx_done_irq(struct atmel_private *priv)
1065 rx_packet_loc = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_POS_OFFSET, priv->rx_desc_head)); 1064 rx_packet_loc = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_POS_OFFSET, priv->rx_desc_head));
1066 1065
1067 if (msdu_size < 30) { 1066 if (msdu_size < 30) {
1068 priv->stats.rx_errors++; 1067 priv->dev->stats.rx_errors++;
1069 goto next; 1068 goto next;
1070 } 1069 }
1071 1070
@@ -1123,7 +1122,7 @@ static void rx_done_irq(struct atmel_private *priv)
1123 msdu_size -= 4; 1122 msdu_size -= 4;
1124 crc = crc32_le(crc, (unsigned char *)&priv->rx_buf, msdu_size); 1123 crc = crc32_le(crc, (unsigned char *)&priv->rx_buf, msdu_size);
1125 if ((crc ^ 0xffffffff) != (*((u32 *)&priv->rx_buf[msdu_size]))) { 1124 if ((crc ^ 0xffffffff) != (*((u32 *)&priv->rx_buf[msdu_size]))) {
1126 priv->stats.rx_crc_errors++; 1125 priv->dev->stats.rx_crc_errors++;
1127 goto next; 1126 goto next;
1128 } 1127 }
1129 } 1128 }
@@ -1250,12 +1249,6 @@ static irqreturn_t service_interrupt(int irq, void *dev_id)
1250 } 1249 }
1251} 1250}
1252 1251
1253static struct net_device_stats *atmel_get_stats(struct net_device *dev)
1254{
1255 struct atmel_private *priv = netdev_priv(dev);
1256 return &priv->stats;
1257}
1258
1259static struct iw_statistics *atmel_get_wireless_stats(struct net_device *dev) 1252static struct iw_statistics *atmel_get_wireless_stats(struct net_device *dev)
1260{ 1253{
1261 struct atmel_private *priv = netdev_priv(dev); 1254 struct atmel_private *priv = netdev_priv(dev);
@@ -1518,8 +1511,6 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
1518 priv->crc_ok_cnt = priv->crc_ko_cnt = 0; 1511 priv->crc_ok_cnt = priv->crc_ko_cnt = 0;
1519 } else 1512 } else
1520 priv->probe_crc = 0; 1513 priv->probe_crc = 0;
1521 memset(&priv->stats, 0, sizeof(priv->stats));
1522 memset(&priv->wstats, 0, sizeof(priv->wstats));
1523 priv->last_qual = jiffies; 1514 priv->last_qual = jiffies;
1524 priv->last_beacon_timestamp = 0; 1515 priv->last_beacon_timestamp = 0;
1525 memset(priv->frag_source, 0xff, sizeof(priv->frag_source)); 1516 memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
@@ -1568,7 +1559,6 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
1568 dev->change_mtu = atmel_change_mtu; 1559 dev->change_mtu = atmel_change_mtu;
1569 dev->set_mac_address = atmel_set_mac_address; 1560 dev->set_mac_address = atmel_set_mac_address;
1570 dev->hard_start_xmit = start_tx; 1561 dev->hard_start_xmit = start_tx;
1571 dev->get_stats = atmel_get_stats;
1572 dev->wireless_handlers = (struct iw_handler_def *)&atmel_handler_def; 1562 dev->wireless_handlers = (struct iw_handler_def *)&atmel_handler_def;
1573 dev->do_ioctl = atmel_ioctl; 1563 dev->do_ioctl = atmel_ioctl;
1574 dev->irq = irq; 1564 dev->irq = irq;
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index dfa4bdd5597c..239e71c3d1b1 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -410,8 +410,7 @@ enum {
410#define B43_IRQ_TIMEOUT 0x80000000 410#define B43_IRQ_TIMEOUT 0x80000000
411 411
412#define B43_IRQ_ALL 0xFFFFFFFF 412#define B43_IRQ_ALL 0xFFFFFFFF
413#define B43_IRQ_MASKTEMPLATE (B43_IRQ_MAC_SUSPENDED | \ 413#define B43_IRQ_MASKTEMPLATE (B43_IRQ_TBTT_INDI | \
414 B43_IRQ_TBTT_INDI | \
415 B43_IRQ_ATIM_END | \ 414 B43_IRQ_ATIM_END | \
416 B43_IRQ_PMQ | \ 415 B43_IRQ_PMQ | \
417 B43_IRQ_MAC_TXERR | \ 416 B43_IRQ_MAC_TXERR | \
@@ -423,6 +422,26 @@ enum {
423 B43_IRQ_RFKILL | \ 422 B43_IRQ_RFKILL | \
424 B43_IRQ_TX_OK) 423 B43_IRQ_TX_OK)
425 424
425/* The firmware register to fetch the debug-IRQ reason from. */
426#define B43_DEBUGIRQ_REASON_REG 63
427/* Debug-IRQ reasons. */
428#define B43_DEBUGIRQ_PANIC 0 /* The firmware panic'ed */
429#define B43_DEBUGIRQ_DUMP_SHM 1 /* Dump shared SHM */
430#define B43_DEBUGIRQ_DUMP_REGS 2 /* Dump the microcode registers */
431#define B43_DEBUGIRQ_MARKER 3 /* A "marker" was thrown by the firmware. */
432#define B43_DEBUGIRQ_ACK 0xFFFF /* The host writes that to ACK the IRQ */
433
434/* The firmware register that contains the "marker" line. */
435#define B43_MARKER_ID_REG 2
436#define B43_MARKER_LINE_REG 3
437
438/* The firmware register to fetch the panic reason from. */
439#define B43_FWPANIC_REASON_REG 3
440/* Firmware panic reason codes */
441#define B43_FWPANIC_DIE 0 /* Firmware died. Don't auto-restart it. */
442#define B43_FWPANIC_RESTART 1 /* Firmware died. Schedule a controller reset. */
443
444
426/* Device specific rate values. 445/* Device specific rate values.
427 * The actual values defined here are (rate_in_mbps * 2). 446 * The actual values defined here are (rate_in_mbps * 2).
428 * Some code depends on this. Don't change it. */ 447 * Some code depends on this. Don't change it. */
@@ -734,7 +753,6 @@ struct b43_wl {
734 /* The beacon we are currently using (AP or IBSS mode). 753 /* The beacon we are currently using (AP or IBSS mode).
735 * This beacon stuff is protected by the irq_lock. */ 754 * This beacon stuff is protected by the irq_lock. */
736 struct sk_buff *current_beacon; 755 struct sk_buff *current_beacon;
737 struct ieee80211_tx_control beacon_txctl;
738 bool beacon0_uploaded; 756 bool beacon0_uploaded;
739 bool beacon1_uploaded; 757 bool beacon1_uploaded;
740 bool beacon_templates_virgin; /* Never wrote the templates? */ 758 bool beacon_templates_virgin; /* Never wrote the templates? */
@@ -768,6 +786,13 @@ struct b43_firmware {
768 u16 rev; 786 u16 rev;
769 /* Firmware patchlevel */ 787 /* Firmware patchlevel */
770 u16 patch; 788 u16 patch;
789
790 /* Set to true, if we are using an opensource firmware. */
791 bool opensource;
792 /* Set to true, if the core needs a PCM firmware, but
793 * we failed to load one. This is always false for
794 * core rev > 10, as these don't need PCM firmware. */
795 bool pcm_request_failed;
771}; 796};
772 797
773/* Device (802.11 core) initialization status. */ 798/* Device (802.11 core) initialization status. */
@@ -941,22 +966,6 @@ static inline bool __b43_warn_on_dummy(bool x) { return x; }
941# define B43_WARN_ON(x) __b43_warn_on_dummy(unlikely(!!(x))) 966# define B43_WARN_ON(x) __b43_warn_on_dummy(unlikely(!!(x)))
942#endif 967#endif
943 968
944/** Limit a value between two limits */
945#ifdef limit_value
946# undef limit_value
947#endif
948#define limit_value(value, min, max) \
949 ({ \
950 typeof(value) __value = (value); \
951 typeof(value) __min = (min); \
952 typeof(value) __max = (max); \
953 if (__value < __min) \
954 __value = __min; \
955 else if (__value > __max) \
956 __value = __max; \
957 __value; \
958 })
959
960/* Convert an integer to a Q5.2 value */ 969/* Convert an integer to a Q5.2 value */
961#define INT_TO_Q52(i) ((i) << 2) 970#define INT_TO_Q52(i) ((i) << 2)
962/* Convert a Q5.2 value to an integer (precision loss!) */ 971/* Convert a Q5.2 value to an integer (precision loss!) */
diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
index 7fca2ebc747f..210e2789c1c3 100644
--- a/drivers/net/wireless/b43/debugfs.c
+++ b/drivers/net/wireless/b43/debugfs.c
@@ -270,24 +270,22 @@ static int restart_write_file(struct b43_wldev *dev,
270 return err; 270 return err;
271} 271}
272 272
273static ssize_t append_lo_table(ssize_t count, char *buf, const size_t bufsize, 273static unsigned long calc_expire_secs(unsigned long now,
274 struct b43_loctl table[B43_NR_BB][B43_NR_RF]) 274 unsigned long time,
275 unsigned long expire)
275{ 276{
276 unsigned int i, j; 277 expire = time + expire;
277 struct b43_loctl *ctl; 278
278 279 if (time_after(now, expire))
279 for (i = 0; i < B43_NR_BB; i++) { 280 return 0; /* expired */
280 for (j = 0; j < B43_NR_RF; j++) { 281 if (expire < now) {
281 ctl = &(table[i][j]); 282 /* jiffies wrapped */
282 fappend("(bbatt %2u, rfatt %2u) -> " 283 expire -= MAX_JIFFY_OFFSET;
283 "(I %+3d, Q %+3d, Used: %d, Calibrated: %d)\n", 284 now -= MAX_JIFFY_OFFSET;
284 i, j, ctl->i, ctl->q,
285 ctl->used,
286 b43_loctl_is_calibrated(ctl));
287 }
288 } 285 }
286 B43_WARN_ON(expire < now);
289 287
290 return count; 288 return (expire - now) / HZ;
291} 289}
292 290
293static ssize_t loctls_read_file(struct b43_wldev *dev, 291static ssize_t loctls_read_file(struct b43_wldev *dev,
@@ -296,27 +294,45 @@ static ssize_t loctls_read_file(struct b43_wldev *dev,
296 ssize_t count = 0; 294 ssize_t count = 0;
297 struct b43_txpower_lo_control *lo; 295 struct b43_txpower_lo_control *lo;
298 int i, err = 0; 296 int i, err = 0;
297 struct b43_lo_calib *cal;
298 unsigned long now = jiffies;
299 struct b43_phy *phy = &dev->phy;
299 300
300 if (dev->phy.type != B43_PHYTYPE_G) { 301 if (phy->type != B43_PHYTYPE_G) {
301 fappend("Device is not a G-PHY\n"); 302 fappend("Device is not a G-PHY\n");
302 err = -ENODEV; 303 err = -ENODEV;
303 goto out; 304 goto out;
304 } 305 }
305 lo = dev->phy.lo_control; 306 lo = phy->lo_control;
306 fappend("-- Local Oscillator calibration data --\n\n"); 307 fappend("-- Local Oscillator calibration data --\n\n");
307 fappend("Measured: %d, Rebuild: %d, HW-power-control: %d\n", 308 fappend("HW-power-control enabled: %d\n",
308 lo->lo_measured,
309 lo->rebuild,
310 dev->phy.hardware_power_control); 309 dev->phy.hardware_power_control);
311 fappend("TX Bias: 0x%02X, TX Magn: 0x%02X\n", 310 fappend("TX Bias: 0x%02X, TX Magn: 0x%02X (expire in %lu sec)\n",
312 lo->tx_bias, lo->tx_magn); 311 lo->tx_bias, lo->tx_magn,
313 fappend("Power Vector: 0x%08X%08X\n", 312 calc_expire_secs(now, lo->txctl_measured_time,
313 B43_LO_TXCTL_EXPIRE));
314 fappend("Power Vector: 0x%08X%08X (expires in %lu sec)\n",
314 (unsigned int)((lo->power_vector & 0xFFFFFFFF00000000ULL) >> 32), 315 (unsigned int)((lo->power_vector & 0xFFFFFFFF00000000ULL) >> 32),
315 (unsigned int)(lo->power_vector & 0x00000000FFFFFFFFULL)); 316 (unsigned int)(lo->power_vector & 0x00000000FFFFFFFFULL),
316 fappend("\nControl table WITH PADMIX:\n"); 317 calc_expire_secs(now, lo->pwr_vec_read_time,
317 count = append_lo_table(count, buf, bufsize, lo->with_padmix); 318 B43_LO_PWRVEC_EXPIRE));
318 fappend("\nControl table WITHOUT PADMIX:\n"); 319
319 count = append_lo_table(count, buf, bufsize, lo->no_padmix); 320 fappend("\nCalibrated settings:\n");
321 list_for_each_entry(cal, &lo->calib_list, list) {
322 bool active;
323
324 active = (b43_compare_bbatt(&cal->bbatt, &phy->bbatt) &&
325 b43_compare_rfatt(&cal->rfatt, &phy->rfatt));
326 fappend("BB(%d), RF(%d,%d) -> I=%d, Q=%d "
327 "(expires in %lu sec)%s\n",
328 cal->bbatt.att,
329 cal->rfatt.att, cal->rfatt.with_padmix,
330 cal->ctl.i, cal->ctl.q,
331 calc_expire_secs(now, cal->calib_time,
332 B43_LO_CALIB_EXPIRE),
333 active ? " ACTIVE" : "");
334 }
335
320 fappend("\nUsed RF attenuation values: Value(WithPadmix flag)\n"); 336 fappend("\nUsed RF attenuation values: Value(WithPadmix flag)\n");
321 for (i = 0; i < lo->rfatt_list.len; i++) { 337 for (i = 0; i < lo->rfatt_list.len; i++) {
322 fappend("%u(%d), ", 338 fappend("%u(%d), ",
@@ -351,7 +367,7 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf,
351 struct b43_dfs_file *dfile; 367 struct b43_dfs_file *dfile;
352 ssize_t uninitialized_var(ret); 368 ssize_t uninitialized_var(ret);
353 char *buf; 369 char *buf;
354 const size_t bufsize = 1024 * 128; 370 const size_t bufsize = 1024 * 16; /* 16 kiB buffer */
355 const size_t buforder = get_order(bufsize); 371 const size_t buforder = get_order(bufsize);
356 int err = 0; 372 int err = 0;
357 373
@@ -380,8 +396,6 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf,
380 err = -ENOMEM; 396 err = -ENOMEM;
381 goto out_unlock; 397 goto out_unlock;
382 } 398 }
383 /* Sparse warns about the following memset, because it has a big
384 * size value. That warning is bogus, so I will ignore it. --mb */
385 memset(buf, 0, bufsize); 399 memset(buf, 0, bufsize);
386 if (dfops->take_irqlock) { 400 if (dfops->take_irqlock) {
387 spin_lock_irq(&dev->wl->irq_lock); 401 spin_lock_irq(&dev->wl->irq_lock);
@@ -523,6 +537,7 @@ static void b43_add_dynamic_debug(struct b43_wldev *dev)
523 add_dyn_dbg("debug_dmaverbose", B43_DBG_DMAVERBOSE, 0); 537 add_dyn_dbg("debug_dmaverbose", B43_DBG_DMAVERBOSE, 0);
524 add_dyn_dbg("debug_pwork_fast", B43_DBG_PWORK_FAST, 0); 538 add_dyn_dbg("debug_pwork_fast", B43_DBG_PWORK_FAST, 0);
525 add_dyn_dbg("debug_pwork_stop", B43_DBG_PWORK_STOP, 0); 539 add_dyn_dbg("debug_pwork_stop", B43_DBG_PWORK_STOP, 0);
540 add_dyn_dbg("debug_lo", B43_DBG_LO, 0);
526 541
527#undef add_dyn_dbg 542#undef add_dyn_dbg
528} 543}
diff --git a/drivers/net/wireless/b43/debugfs.h b/drivers/net/wireless/b43/debugfs.h
index 6eebe858db5a..c75cff4151d9 100644
--- a/drivers/net/wireless/b43/debugfs.h
+++ b/drivers/net/wireless/b43/debugfs.h
@@ -10,6 +10,7 @@ enum b43_dyndbg { /* Dynamic debugging features */
10 B43_DBG_DMAVERBOSE, 10 B43_DBG_DMAVERBOSE,
11 B43_DBG_PWORK_FAST, 11 B43_DBG_PWORK_FAST,
12 B43_DBG_PWORK_STOP, 12 B43_DBG_PWORK_STOP,
13 B43_DBG_LO,
13 __B43_NR_DYNDBG, 14 __B43_NR_DYNDBG,
14}; 15};
15 16
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 6dcbb3c87e72..b4eadd908bea 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1131,10 +1131,10 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1131} 1131}
1132 1132
1133static int dma_tx_fragment(struct b43_dmaring *ring, 1133static int dma_tx_fragment(struct b43_dmaring *ring,
1134 struct sk_buff *skb, 1134 struct sk_buff *skb)
1135 struct ieee80211_tx_control *ctl)
1136{ 1135{
1137 const struct b43_dma_ops *ops = ring->ops; 1136 const struct b43_dma_ops *ops = ring->ops;
1137 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1138 u8 *header; 1138 u8 *header;
1139 int slot, old_top_slot, old_used_slots; 1139 int slot, old_top_slot, old_used_slots;
1140 int err; 1140 int err;
@@ -1158,7 +1158,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1158 header = &(ring->txhdr_cache[slot * hdrsize]); 1158 header = &(ring->txhdr_cache[slot * hdrsize]);
1159 cookie = generate_cookie(ring, slot); 1159 cookie = generate_cookie(ring, slot);
1160 err = b43_generate_txhdr(ring->dev, header, 1160 err = b43_generate_txhdr(ring->dev, header,
1161 skb->data, skb->len, ctl, cookie); 1161 skb->data, skb->len, info, cookie);
1162 if (unlikely(err)) { 1162 if (unlikely(err)) {
1163 ring->current_slot = old_top_slot; 1163 ring->current_slot = old_top_slot;
1164 ring->used_slots = old_used_slots; 1164 ring->used_slots = old_used_slots;
@@ -1180,7 +1180,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1180 desc = ops->idx2desc(ring, slot, &meta); 1180 desc = ops->idx2desc(ring, slot, &meta);
1181 memset(meta, 0, sizeof(*meta)); 1181 memset(meta, 0, sizeof(*meta));
1182 1182
1183 memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
1184 meta->skb = skb; 1183 meta->skb = skb;
1185 meta->is_last_fragment = 1; 1184 meta->is_last_fragment = 1;
1186 1185
@@ -1210,7 +1209,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1210 1209
1211 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); 1210 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1212 1211
1213 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 1212 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1214 /* Tell the firmware about the cookie of the last 1213 /* Tell the firmware about the cookie of the last
1215 * mcast frame, so it can clear the more-data bit in it. */ 1214 * mcast frame, so it can clear the more-data bit in it. */
1216 b43_shm_write16(ring->dev, B43_SHM_SHARED, 1215 b43_shm_write16(ring->dev, B43_SHM_SHARED,
@@ -1281,16 +1280,16 @@ static struct b43_dmaring * select_ring_by_priority(struct b43_wldev *dev,
1281 return ring; 1280 return ring;
1282} 1281}
1283 1282
1284int b43_dma_tx(struct b43_wldev *dev, 1283int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1285 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
1286{ 1284{
1287 struct b43_dmaring *ring; 1285 struct b43_dmaring *ring;
1288 struct ieee80211_hdr *hdr; 1286 struct ieee80211_hdr *hdr;
1289 int err = 0; 1287 int err = 0;
1290 unsigned long flags; 1288 unsigned long flags;
1289 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1291 1290
1292 hdr = (struct ieee80211_hdr *)skb->data; 1291 hdr = (struct ieee80211_hdr *)skb->data;
1293 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 1292 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1294 /* The multicast ring will be sent after the DTIM */ 1293 /* The multicast ring will be sent after the DTIM */
1295 ring = dev->dma.tx_ring_mcast; 1294 ring = dev->dma.tx_ring_mcast;
1296 /* Set the more-data bit. Ucode will clear it on 1295 /* Set the more-data bit. Ucode will clear it on
@@ -1298,7 +1297,8 @@ int b43_dma_tx(struct b43_wldev *dev,
1298 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1297 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1299 } else { 1298 } else {
1300 /* Decide by priority where to put this frame. */ 1299 /* Decide by priority where to put this frame. */
1301 ring = select_ring_by_priority(dev, ctl->queue); 1300 ring = select_ring_by_priority(
1301 dev, skb_get_queue_mapping(skb));
1302 } 1302 }
1303 1303
1304 spin_lock_irqsave(&ring->lock, flags); 1304 spin_lock_irqsave(&ring->lock, flags);
@@ -1316,9 +1316,9 @@ int b43_dma_tx(struct b43_wldev *dev,
1316 /* Assign the queue number to the ring (if not already done before) 1316 /* Assign the queue number to the ring (if not already done before)
1317 * so TX status handling can use it. The queue to ring mapping is 1317 * so TX status handling can use it. The queue to ring mapping is
1318 * static, so we don't need to store it per frame. */ 1318 * static, so we don't need to store it per frame. */
1319 ring->queue_prio = ctl->queue; 1319 ring->queue_prio = skb_get_queue_mapping(skb);
1320 1320
1321 err = dma_tx_fragment(ring, skb, ctl); 1321 err = dma_tx_fragment(ring, skb);
1322 if (unlikely(err == -ENOKEY)) { 1322 if (unlikely(err == -ENOKEY)) {
1323 /* Drop this packet, as we don't have the encryption key 1323 /* Drop this packet, as we don't have the encryption key
1324 * anymore and must not transmit it unencrypted. */ 1324 * anymore and must not transmit it unencrypted. */
@@ -1334,7 +1334,7 @@ int b43_dma_tx(struct b43_wldev *dev,
1334 if ((free_slots(ring) < SLOTS_PER_PACKET) || 1334 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1335 should_inject_overflow(ring)) { 1335 should_inject_overflow(ring)) {
1336 /* This TX ring is full. */ 1336 /* This TX ring is full. */
1337 ieee80211_stop_queue(dev->wl->hw, ctl->queue); 1337 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
1338 ring->stopped = 1; 1338 ring->stopped = 1;
1339 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { 1339 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1340 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); 1340 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
@@ -1377,13 +1377,19 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1377 b43_txhdr_size(dev), 1); 1377 b43_txhdr_size(dev), 1);
1378 1378
1379 if (meta->is_last_fragment) { 1379 if (meta->is_last_fragment) {
1380 B43_WARN_ON(!meta->skb); 1380 struct ieee80211_tx_info *info;
1381 /* Call back to inform the ieee80211 subsystem about the 1381
1382 * status of the transmission. 1382 BUG_ON(!meta->skb);
1383 * Some fields of txstat are already filled in dma_tx(). 1383
1384 info = IEEE80211_SKB_CB(meta->skb);
1385
1386 memset(&info->status, 0, sizeof(info->status));
1387
1388 /*
1389 * Call back to inform the ieee80211 subsystem about
1390 * the status of the transmission.
1384 */ 1391 */
1385 frame_succeed = b43_fill_txstatus_report( 1392 frame_succeed = b43_fill_txstatus_report(info, status);
1386 &(meta->txstat), status);
1387#ifdef CONFIG_B43_DEBUG 1393#ifdef CONFIG_B43_DEBUG
1388 if (frame_succeed) 1394 if (frame_succeed)
1389 ring->nr_succeed_tx_packets++; 1395 ring->nr_succeed_tx_packets++;
@@ -1391,8 +1397,8 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1391 ring->nr_failed_tx_packets++; 1397 ring->nr_failed_tx_packets++;
1392 ring->nr_total_packet_tries += status->frame_count; 1398 ring->nr_total_packet_tries += status->frame_count;
1393#endif /* DEBUG */ 1399#endif /* DEBUG */
1394 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, 1400 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1395 &(meta->txstat)); 1401
1396 /* skb is freed by ieee80211_tx_status_irqsafe() */ 1402 /* skb is freed by ieee80211_tx_status_irqsafe() */
1397 meta->skb = NULL; 1403 meta->skb = NULL;
1398 } else { 1404 } else {
@@ -1427,18 +1433,16 @@ void b43_dma_get_tx_stats(struct b43_wldev *dev,
1427{ 1433{
1428 const int nr_queues = dev->wl->hw->queues; 1434 const int nr_queues = dev->wl->hw->queues;
1429 struct b43_dmaring *ring; 1435 struct b43_dmaring *ring;
1430 struct ieee80211_tx_queue_stats_data *data;
1431 unsigned long flags; 1436 unsigned long flags;
1432 int i; 1437 int i;
1433 1438
1434 for (i = 0; i < nr_queues; i++) { 1439 for (i = 0; i < nr_queues; i++) {
1435 data = &(stats->data[i]);
1436 ring = select_ring_by_priority(dev, i); 1440 ring = select_ring_by_priority(dev, i);
1437 1441
1438 spin_lock_irqsave(&ring->lock, flags); 1442 spin_lock_irqsave(&ring->lock, flags);
1439 data->len = ring->used_slots / SLOTS_PER_PACKET; 1443 stats[i].len = ring->used_slots / SLOTS_PER_PACKET;
1440 data->limit = ring->nr_slots / SLOTS_PER_PACKET; 1444 stats[i].limit = ring->nr_slots / SLOTS_PER_PACKET;
1441 data->count = ring->nr_tx_packets; 1445 stats[i].count = ring->nr_tx_packets;
1442 spin_unlock_irqrestore(&ring->lock, flags); 1446 spin_unlock_irqrestore(&ring->lock, flags);
1443 } 1447 }
1444} 1448}
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index 20acf885dba5..d1eb5c0848a5 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -181,7 +181,6 @@ struct b43_dmadesc_meta {
181 dma_addr_t dmaaddr; 181 dma_addr_t dmaaddr;
182 /* ieee80211 TX status. Only used once per 802.11 frag. */ 182 /* ieee80211 TX status. Only used once per 802.11 frag. */
183 bool is_last_fragment; 183 bool is_last_fragment;
184 struct ieee80211_tx_status txstat;
185}; 184};
186 185
187struct b43_dmaring; 186struct b43_dmaring;
@@ -285,7 +284,7 @@ void b43_dma_get_tx_stats(struct b43_wldev *dev,
285 struct ieee80211_tx_queue_stats *stats); 284 struct ieee80211_tx_queue_stats *stats);
286 285
287int b43_dma_tx(struct b43_wldev *dev, 286int b43_dma_tx(struct b43_wldev *dev,
288 struct sk_buff *skb, struct ieee80211_tx_control *ctl); 287 struct sk_buff *skb);
289void b43_dma_handle_txstatus(struct b43_wldev *dev, 288void b43_dma_handle_txstatus(struct b43_wldev *dev,
290 const struct b43_txstatus *status); 289 const struct b43_txstatus *status);
291 290
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index d890f366a23b..9c854d6aae36 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -36,17 +36,28 @@
36#include <linux/sched.h> 36#include <linux/sched.h>
37 37
38 38
39/* Define to 1 to always calibrate all possible LO control pairs. 39static struct b43_lo_calib * b43_find_lo_calib(struct b43_txpower_lo_control *lo,
40 * This is a workaround until we fix the partial LO calibration optimization. */ 40 const struct b43_bbatt *bbatt,
41#define B43_CALIB_ALL_LOCTLS 1 41 const struct b43_rfatt *rfatt)
42{
43 struct b43_lo_calib *c;
44
45 list_for_each_entry(c, &lo->calib_list, list) {
46 if (!b43_compare_bbatt(&c->bbatt, bbatt))
47 continue;
48 if (!b43_compare_rfatt(&c->rfatt, rfatt))
49 continue;
50 return c;
51 }
42 52
53 return NULL;
54}
43 55
44/* Write the LocalOscillator Control (adjust) value-pair. */ 56/* Write the LocalOscillator Control (adjust) value-pair. */
45static void b43_lo_write(struct b43_wldev *dev, struct b43_loctl *control) 57static void b43_lo_write(struct b43_wldev *dev, struct b43_loctl *control)
46{ 58{
47 struct b43_phy *phy = &dev->phy; 59 struct b43_phy *phy = &dev->phy;
48 u16 value; 60 u16 value;
49 u16 reg;
50 61
51 if (B43_DEBUG) { 62 if (B43_DEBUG) {
52 if (unlikely(abs(control->i) > 16 || abs(control->q) > 16)) { 63 if (unlikely(abs(control->i) > 16 || abs(control->q) > 16)) {
@@ -56,189 +67,11 @@ static void b43_lo_write(struct b43_wldev *dev, struct b43_loctl *control)
56 return; 67 return;
57 } 68 }
58 } 69 }
70 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
59 71
60 value = (u8) (control->q); 72 value = (u8) (control->q);
61 value |= ((u8) (control->i)) << 8; 73 value |= ((u8) (control->i)) << 8;
62 74 b43_phy_write(dev, B43_PHY_LO_CTL, value);
63 reg = (phy->type == B43_PHYTYPE_B) ? 0x002F : B43_PHY_LO_CTL;
64 b43_phy_write(dev, reg, value);
65}
66
67static int assert_rfatt_and_bbatt(const struct b43_rfatt *rfatt,
68 const struct b43_bbatt *bbatt,
69 struct b43_wldev *dev)
70{
71 int err = 0;
72
73 /* Check the attenuation values against the LO control array sizes. */
74 if (unlikely(rfatt->att >= B43_NR_RF)) {
75 b43err(dev->wl, "rfatt(%u) >= size of LO array\n", rfatt->att);
76 err = -EINVAL;
77 }
78 if (unlikely(bbatt->att >= B43_NR_BB)) {
79 b43err(dev->wl, "bbatt(%u) >= size of LO array\n", bbatt->att);
80 err = -EINVAL;
81 }
82
83 return err;
84}
85
86#if !B43_CALIB_ALL_LOCTLS
87static
88struct b43_loctl *b43_get_lo_g_ctl_nopadmix(struct b43_wldev *dev,
89 const struct b43_rfatt *rfatt,
90 const struct b43_bbatt *bbatt)
91{
92 struct b43_phy *phy = &dev->phy;
93 struct b43_txpower_lo_control *lo = phy->lo_control;
94
95 if (assert_rfatt_and_bbatt(rfatt, bbatt, dev))
96 return &(lo->no_padmix[0][0]); /* Just prevent a crash */
97 return &(lo->no_padmix[bbatt->att][rfatt->att]);
98}
99#endif /* !B43_CALIB_ALL_LOCTLS */
100
101struct b43_loctl *b43_get_lo_g_ctl(struct b43_wldev *dev,
102 const struct b43_rfatt *rfatt,
103 const struct b43_bbatt *bbatt)
104{
105 struct b43_phy *phy = &dev->phy;
106 struct b43_txpower_lo_control *lo = phy->lo_control;
107
108 if (assert_rfatt_and_bbatt(rfatt, bbatt, dev))
109 return &(lo->no_padmix[0][0]); /* Just prevent a crash */
110 if (rfatt->with_padmix)
111 return &(lo->with_padmix[bbatt->att][rfatt->att]);
112 return &(lo->no_padmix[bbatt->att][rfatt->att]);
113}
114
115/* Call a function for every possible LO control value-pair. */
116static void b43_call_for_each_loctl(struct b43_wldev *dev,
117 void (*func) (struct b43_wldev *,
118 struct b43_loctl *))
119{
120 struct b43_phy *phy = &dev->phy;
121 struct b43_txpower_lo_control *ctl = phy->lo_control;
122 int i, j;
123
124 for (i = 0; i < B43_NR_BB; i++) {
125 for (j = 0; j < B43_NR_RF; j++)
126 func(dev, &(ctl->with_padmix[i][j]));
127 }
128 for (i = 0; i < B43_NR_BB; i++) {
129 for (j = 0; j < B43_NR_RF; j++)
130 func(dev, &(ctl->no_padmix[i][j]));
131 }
132}
133
134static u16 lo_b_r15_loop(struct b43_wldev *dev)
135{
136 int i;
137 u16 ret = 0;
138
139 for (i = 0; i < 10; i++) {
140 b43_phy_write(dev, 0x0015, 0xAFA0);
141 udelay(1);
142 b43_phy_write(dev, 0x0015, 0xEFA0);
143 udelay(10);
144 b43_phy_write(dev, 0x0015, 0xFFA0);
145 udelay(40);
146 ret += b43_phy_read(dev, 0x002C);
147 }
148
149 return ret;
150}
151
152void b43_lo_b_measure(struct b43_wldev *dev)
153{
154 struct b43_phy *phy = &dev->phy;
155 u16 regstack[12] = { 0 };
156 u16 mls;
157 u16 fval;
158 int i, j;
159
160 regstack[0] = b43_phy_read(dev, 0x0015);
161 regstack[1] = b43_radio_read16(dev, 0x0052) & 0xFFF0;
162
163 if (phy->radio_ver == 0x2053) {
164 regstack[2] = b43_phy_read(dev, 0x000A);
165 regstack[3] = b43_phy_read(dev, 0x002A);
166 regstack[4] = b43_phy_read(dev, 0x0035);
167 regstack[5] = b43_phy_read(dev, 0x0003);
168 regstack[6] = b43_phy_read(dev, 0x0001);
169 regstack[7] = b43_phy_read(dev, 0x0030);
170
171 regstack[8] = b43_radio_read16(dev, 0x0043);
172 regstack[9] = b43_radio_read16(dev, 0x007A);
173 regstack[10] = b43_read16(dev, 0x03EC);
174 regstack[11] = b43_radio_read16(dev, 0x0052) & 0x00F0;
175
176 b43_phy_write(dev, 0x0030, 0x00FF);
177 b43_write16(dev, 0x03EC, 0x3F3F);
178 b43_phy_write(dev, 0x0035, regstack[4] & 0xFF7F);
179 b43_radio_write16(dev, 0x007A, regstack[9] & 0xFFF0);
180 }
181 b43_phy_write(dev, 0x0015, 0xB000);
182 b43_phy_write(dev, 0x002B, 0x0004);
183
184 if (phy->radio_ver == 0x2053) {
185 b43_phy_write(dev, 0x002B, 0x0203);
186 b43_phy_write(dev, 0x002A, 0x08A3);
187 }
188
189 phy->minlowsig[0] = 0xFFFF;
190
191 for (i = 0; i < 4; i++) {
192 b43_radio_write16(dev, 0x0052, regstack[1] | i);
193 lo_b_r15_loop(dev);
194 }
195 for (i = 0; i < 10; i++) {
196 b43_radio_write16(dev, 0x0052, regstack[1] | i);
197 mls = lo_b_r15_loop(dev) / 10;
198 if (mls < phy->minlowsig[0]) {
199 phy->minlowsig[0] = mls;
200 phy->minlowsigpos[0] = i;
201 }
202 }
203 b43_radio_write16(dev, 0x0052, regstack[1] | phy->minlowsigpos[0]);
204
205 phy->minlowsig[1] = 0xFFFF;
206
207 for (i = -4; i < 5; i += 2) {
208 for (j = -4; j < 5; j += 2) {
209 if (j < 0)
210 fval = (0x0100 * i) + j + 0x0100;
211 else
212 fval = (0x0100 * i) + j;
213 b43_phy_write(dev, 0x002F, fval);
214 mls = lo_b_r15_loop(dev) / 10;
215 if (mls < phy->minlowsig[1]) {
216 phy->minlowsig[1] = mls;
217 phy->minlowsigpos[1] = fval;
218 }
219 }
220 }
221 phy->minlowsigpos[1] += 0x0101;
222
223 b43_phy_write(dev, 0x002F, phy->minlowsigpos[1]);
224 if (phy->radio_ver == 0x2053) {
225 b43_phy_write(dev, 0x000A, regstack[2]);
226 b43_phy_write(dev, 0x002A, regstack[3]);
227 b43_phy_write(dev, 0x0035, regstack[4]);
228 b43_phy_write(dev, 0x0003, regstack[5]);
229 b43_phy_write(dev, 0x0001, regstack[6]);
230 b43_phy_write(dev, 0x0030, regstack[7]);
231
232 b43_radio_write16(dev, 0x0043, regstack[8]);
233 b43_radio_write16(dev, 0x007A, regstack[9]);
234
235 b43_radio_write16(dev, 0x0052,
236 (b43_radio_read16(dev, 0x0052) & 0x000F)
237 | regstack[11]);
238
239 b43_write16(dev, 0x03EC, regstack[10]);
240 }
241 b43_phy_write(dev, 0x0015, regstack[0]);
242} 75}
243 76
244static u16 lo_measure_feedthrough(struct b43_wldev *dev, 77static u16 lo_measure_feedthrough(struct b43_wldev *dev,
@@ -366,7 +199,7 @@ static void lo_measure_txctl_values(struct b43_wldev *dev)
366 if (lb_gain > 10) { 199 if (lb_gain > 10) {
367 radio_pctl_reg = 0; 200 radio_pctl_reg = 0;
368 pga = abs(10 - lb_gain) / 6; 201 pga = abs(10 - lb_gain) / 6;
369 pga = limit_value(pga, 0, 15); 202 pga = clamp_val(pga, 0, 15);
370 } else { 203 } else {
371 int cmp_val; 204 int cmp_val;
372 int tmp; 205 int tmp;
@@ -438,48 +271,26 @@ static void lo_measure_txctl_values(struct b43_wldev *dev)
438 b43_radio_write16(dev, 0x52, b43_radio_read16(dev, 0x52) 271 b43_radio_write16(dev, 0x52, b43_radio_read16(dev, 0x52)
439 & 0xFFF0); /* TX bias == 0 */ 272 & 0xFFF0); /* TX bias == 0 */
440 } 273 }
274 lo->txctl_measured_time = jiffies;
441} 275}
442 276
443static void lo_read_power_vector(struct b43_wldev *dev) 277static void lo_read_power_vector(struct b43_wldev *dev)
444{ 278{
445 struct b43_phy *phy = &dev->phy; 279 struct b43_phy *phy = &dev->phy;
446 struct b43_txpower_lo_control *lo = phy->lo_control; 280 struct b43_txpower_lo_control *lo = phy->lo_control;
447 u16 i; 281 int i;
448 u64 tmp; 282 u64 tmp;
449 u64 power_vector = 0; 283 u64 power_vector = 0;
450 int rf_offset, bb_offset;
451 struct b43_loctl *loctl;
452 284
453 for (i = 0; i < 8; i += 2) { 285 for (i = 0; i < 8; i += 2) {
454 tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x310 + i); 286 tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x310 + i);
455 /* Clear the top byte. We get holes in the bitmap... */
456 tmp &= 0xFF;
457 power_vector |= (tmp << (i * 8)); 287 power_vector |= (tmp << (i * 8));
458 /* Clear the vector on the device. */ 288 /* Clear the vector on the device. */
459 b43_shm_write16(dev, B43_SHM_SHARED, 0x310 + i, 0); 289 b43_shm_write16(dev, B43_SHM_SHARED, 0x310 + i, 0);
460 } 290 }
461
462 if (power_vector) 291 if (power_vector)
463 lo->power_vector = power_vector; 292 lo->power_vector = power_vector;
464 power_vector = lo->power_vector; 293 lo->pwr_vec_read_time = jiffies;
465
466 for (i = 0; i < 64; i++) {
467 if (power_vector & ((u64) 1ULL << i)) {
468 /* Now figure out which b43_loctl corresponds
469 * to this bit.
470 */
471 rf_offset = i / lo->rfatt_list.len;
472 bb_offset = i % lo->rfatt_list.len; //FIXME?
473 loctl =
474 b43_get_lo_g_ctl(dev,
475 &lo->rfatt_list.list[rf_offset],
476 &lo->bbatt_list.list[bb_offset]);
477 /* And mark it as "used", as the device told us
478 * through the bitmap it is using it.
479 */
480 loctl->used = 1;
481 }
482 }
483} 294}
484 295
485/* 802.11/LO/GPHY/MeasuringGains */ 296/* 802.11/LO/GPHY/MeasuringGains */
@@ -510,7 +321,7 @@ static void lo_measure_gain_values(struct b43_wldev *dev,
510 phy->lna_lod_gain = 1; 321 phy->lna_lod_gain = 1;
511 trsw_rx_gain -= 8; 322 trsw_rx_gain -= 8;
512 } 323 }
513 trsw_rx_gain = limit_value(trsw_rx_gain, 0, 0x2D); 324 trsw_rx_gain = clamp_val(trsw_rx_gain, 0, 0x2D);
514 phy->pga_gain = trsw_rx_gain / 3; 325 phy->pga_gain = trsw_rx_gain / 3;
515 if (phy->pga_gain >= 5) { 326 if (phy->pga_gain >= 5) {
516 phy->pga_gain -= 5; 327 phy->pga_gain -= 5;
@@ -609,8 +420,6 @@ static void lo_measure_setup(struct b43_wldev *dev,
609 b43_phy_write(dev, B43_PHY_CCK(0x16), 0x410); 420 b43_phy_write(dev, B43_PHY_CCK(0x16), 0x410);
610 b43_phy_write(dev, B43_PHY_CCK(0x17), 0x820); 421 b43_phy_write(dev, B43_PHY_CCK(0x17), 0x820);
611 } 422 }
612 if (!lo->rebuild && b43_has_hardware_pctl(phy))
613 lo_read_power_vector(dev);
614 if (phy->rev >= 2) { 423 if (phy->rev >= 2) {
615 sav->phy_analogover = b43_phy_read(dev, B43_PHY_ANALOGOVER); 424 sav->phy_analogover = b43_phy_read(dev, B43_PHY_ANALOGOVER);
616 sav->phy_analogoverval = 425 sav->phy_analogoverval =
@@ -691,8 +500,12 @@ static void lo_measure_setup(struct b43_wldev *dev,
691 b43_radio_read16(dev, 0x51); /* dummy read */ 500 b43_radio_read16(dev, 0x51); /* dummy read */
692 if (phy->type == B43_PHYTYPE_G) 501 if (phy->type == B43_PHYTYPE_G)
693 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0); 502 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0);
694 if (lo->rebuild) 503
504 /* Re-measure the txctl values, if needed. */
505 if (time_before(lo->txctl_measured_time,
506 jiffies - B43_LO_TXCTL_EXPIRE))
695 lo_measure_txctl_values(dev); 507 lo_measure_txctl_values(dev);
508
696 if (phy->type == B43_PHYTYPE_G && phy->rev >= 3) { 509 if (phy->type == B43_PHYTYPE_G && phy->rev >= 3) {
697 b43_phy_write(dev, B43_PHY_LO_MASK, 0xC078); 510 b43_phy_write(dev, B43_PHY_LO_MASK, 0xC078);
698 } else { 511 } else {
@@ -707,7 +520,6 @@ static void lo_measure_restore(struct b43_wldev *dev,
707 struct lo_g_saved_values *sav) 520 struct lo_g_saved_values *sav)
708{ 521{
709 struct b43_phy *phy = &dev->phy; 522 struct b43_phy *phy = &dev->phy;
710 struct b43_txpower_lo_control *lo = phy->lo_control;
711 u16 tmp; 523 u16 tmp;
712 524
713 if (phy->rev >= 2) { 525 if (phy->rev >= 2) {
@@ -722,14 +534,6 @@ static void lo_measure_restore(struct b43_wldev *dev,
722 tmp = (phy->pga_gain | 0xEFA0); 534 tmp = (phy->pga_gain | 0xEFA0);
723 b43_phy_write(dev, B43_PHY_PGACTL, tmp); 535 b43_phy_write(dev, B43_PHY_PGACTL, tmp);
724 } 536 }
725 if (b43_has_hardware_pctl(phy)) {
726 b43_gphy_dc_lt_init(dev);
727 } else {
728 if (lo->rebuild)
729 b43_lo_g_adjust_to(dev, 3, 2, 0);
730 else
731 b43_lo_g_adjust(dev);
732 }
733 if (phy->type == B43_PHYTYPE_G) { 537 if (phy->type == B43_PHYTYPE_G) {
734 if (phy->rev >= 3) 538 if (phy->rev >= 3)
735 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0xC078); 539 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0xC078);
@@ -793,7 +597,6 @@ static int lo_probe_possible_loctls(struct b43_wldev *dev,
793 struct b43_lo_g_statemachine *d) 597 struct b43_lo_g_statemachine *d)
794{ 598{
795 struct b43_phy *phy = &dev->phy; 599 struct b43_phy *phy = &dev->phy;
796 struct b43_txpower_lo_control *lo = phy->lo_control;
797 struct b43_loctl test_loctl; 600 struct b43_loctl test_loctl;
798 struct b43_loctl orig_loctl; 601 struct b43_loctl orig_loctl;
799 struct b43_loctl prev_loctl = { 602 struct b43_loctl prev_loctl = {
@@ -852,7 +655,7 @@ static int lo_probe_possible_loctls(struct b43_wldev *dev,
852 found_lower = 1; 655 found_lower = 1;
853 d->lowest_feedth = feedth; 656 d->lowest_feedth = feedth;
854 if ((d->nr_measured < 2) && 657 if ((d->nr_measured < 2) &&
855 (!has_loopback_gain(phy) || lo->rebuild)) 658 !has_loopback_gain(phy))
856 break; 659 break;
857 } 660 }
858 } 661 }
@@ -874,7 +677,6 @@ static void lo_probe_loctls_statemachine(struct b43_wldev *dev,
874 int *max_rx_gain) 677 int *max_rx_gain)
875{ 678{
876 struct b43_phy *phy = &dev->phy; 679 struct b43_phy *phy = &dev->phy;
877 struct b43_txpower_lo_control *lo = phy->lo_control;
878 struct b43_lo_g_statemachine d; 680 struct b43_lo_g_statemachine d;
879 u16 feedth; 681 u16 feedth;
880 int found_lower; 682 int found_lower;
@@ -883,18 +685,18 @@ static void lo_probe_loctls_statemachine(struct b43_wldev *dev,
883 685
884 d.nr_measured = 0; 686 d.nr_measured = 0;
885 d.state_val_multiplier = 1; 687 d.state_val_multiplier = 1;
886 if (has_loopback_gain(phy) && !lo->rebuild) 688 if (has_loopback_gain(phy))
887 d.state_val_multiplier = 3; 689 d.state_val_multiplier = 3;
888 690
889 memcpy(&d.min_loctl, loctl, sizeof(struct b43_loctl)); 691 memcpy(&d.min_loctl, loctl, sizeof(struct b43_loctl));
890 if (has_loopback_gain(phy) && lo->rebuild) 692 if (has_loopback_gain(phy))
891 max_repeat = 4; 693 max_repeat = 4;
892 do { 694 do {
893 b43_lo_write(dev, &d.min_loctl); 695 b43_lo_write(dev, &d.min_loctl);
894 feedth = lo_measure_feedthrough(dev, phy->lna_gain, 696 feedth = lo_measure_feedthrough(dev, phy->lna_gain,
895 phy->pga_gain, 697 phy->pga_gain,
896 phy->trsw_rx_gain); 698 phy->trsw_rx_gain);
897 if (!lo->rebuild && feedth < 0x258) { 699 if (feedth < 0x258) {
898 if (feedth >= 0x12C) 700 if (feedth >= 0x12C)
899 *max_rx_gain += 6; 701 *max_rx_gain += 6;
900 else 702 else
@@ -944,278 +746,188 @@ static void lo_probe_loctls_statemachine(struct b43_wldev *dev,
944 } while (++repeat_cnt < max_repeat); 746 } while (++repeat_cnt < max_repeat);
945} 747}
946 748
947#if B43_CALIB_ALL_LOCTLS 749static
948static const struct b43_rfatt b43_full_rfatt_list_items[] = { 750struct b43_lo_calib * b43_calibrate_lo_setting(struct b43_wldev *dev,
949 { .att = 0, .with_padmix = 0, }, 751 const struct b43_bbatt *bbatt,
950 { .att = 1, .with_padmix = 0, }, 752 const struct b43_rfatt *rfatt)
951 { .att = 2, .with_padmix = 0, },
952 { .att = 3, .with_padmix = 0, },
953 { .att = 4, .with_padmix = 0, },
954 { .att = 5, .with_padmix = 0, },
955 { .att = 6, .with_padmix = 0, },
956 { .att = 7, .with_padmix = 0, },
957 { .att = 8, .with_padmix = 0, },
958 { .att = 9, .with_padmix = 0, },
959 { .att = 10, .with_padmix = 0, },
960 { .att = 11, .with_padmix = 0, },
961 { .att = 12, .with_padmix = 0, },
962 { .att = 13, .with_padmix = 0, },
963 { .att = 14, .with_padmix = 0, },
964 { .att = 15, .with_padmix = 0, },
965 { .att = 0, .with_padmix = 1, },
966 { .att = 1, .with_padmix = 1, },
967 { .att = 2, .with_padmix = 1, },
968 { .att = 3, .with_padmix = 1, },
969 { .att = 4, .with_padmix = 1, },
970 { .att = 5, .with_padmix = 1, },
971 { .att = 6, .with_padmix = 1, },
972 { .att = 7, .with_padmix = 1, },
973 { .att = 8, .with_padmix = 1, },
974 { .att = 9, .with_padmix = 1, },
975 { .att = 10, .with_padmix = 1, },
976 { .att = 11, .with_padmix = 1, },
977 { .att = 12, .with_padmix = 1, },
978 { .att = 13, .with_padmix = 1, },
979 { .att = 14, .with_padmix = 1, },
980 { .att = 15, .with_padmix = 1, },
981};
982static const struct b43_rfatt_list b43_full_rfatt_list = {
983 .list = b43_full_rfatt_list_items,
984 .len = ARRAY_SIZE(b43_full_rfatt_list_items),
985};
986
987static const struct b43_bbatt b43_full_bbatt_list_items[] = {
988 { .att = 0, },
989 { .att = 1, },
990 { .att = 2, },
991 { .att = 3, },
992 { .att = 4, },
993 { .att = 5, },
994 { .att = 6, },
995 { .att = 7, },
996 { .att = 8, },
997 { .att = 9, },
998 { .att = 10, },
999 { .att = 11, },
1000};
1001static const struct b43_bbatt_list b43_full_bbatt_list = {
1002 .list = b43_full_bbatt_list_items,
1003 .len = ARRAY_SIZE(b43_full_bbatt_list_items),
1004};
1005#endif /* B43_CALIB_ALL_LOCTLS */
1006
1007static void lo_measure(struct b43_wldev *dev)
1008{ 753{
1009 struct b43_phy *phy = &dev->phy; 754 struct b43_phy *phy = &dev->phy;
1010 struct b43_txpower_lo_control *lo = phy->lo_control;
1011 struct b43_loctl loctl = { 755 struct b43_loctl loctl = {
1012 .i = 0, 756 .i = 0,
1013 .q = 0, 757 .q = 0,
1014 }; 758 };
1015 struct b43_loctl *ploctl;
1016 int max_rx_gain; 759 int max_rx_gain;
1017 int rfidx, bbidx; 760 struct b43_lo_calib *cal;
1018 const struct b43_bbatt_list *bbatt_list; 761 struct lo_g_saved_values uninitialized_var(saved_regs);
1019 const struct b43_rfatt_list *rfatt_list;
1020
1021 /* Values from the "TXCTL Register and Value Table" */ 762 /* Values from the "TXCTL Register and Value Table" */
1022 u16 txctl_reg; 763 u16 txctl_reg;
1023 u16 txctl_value; 764 u16 txctl_value;
1024 u16 pad_mix_gain; 765 u16 pad_mix_gain;
1025 766
1026 bbatt_list = &lo->bbatt_list; 767 saved_regs.old_channel = phy->channel;
1027 rfatt_list = &lo->rfatt_list; 768 b43_mac_suspend(dev);
1028#if B43_CALIB_ALL_LOCTLS 769 lo_measure_setup(dev, &saved_regs);
1029 bbatt_list = &b43_full_bbatt_list;
1030 rfatt_list = &b43_full_rfatt_list;
1031#endif
1032 770
1033 txctl_reg = lo_txctl_register_table(dev, &txctl_value, &pad_mix_gain); 771 txctl_reg = lo_txctl_register_table(dev, &txctl_value, &pad_mix_gain);
1034 772
1035 for (rfidx = 0; rfidx < rfatt_list->len; rfidx++) { 773 b43_radio_write16(dev, 0x43,
1036 774 (b43_radio_read16(dev, 0x43) & 0xFFF0)
1037 b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43) 775 | rfatt->att);
1038 & 0xFFF0) | 776 b43_radio_write16(dev, txctl_reg,
1039 rfatt_list->list[rfidx].att); 777 (b43_radio_read16(dev, txctl_reg) & ~txctl_value)
1040 b43_radio_write16(dev, txctl_reg, 778 | (rfatt->with_padmix) ? txctl_value : 0);
1041 (b43_radio_read16(dev, txctl_reg)
1042 & ~txctl_value)
1043 | (rfatt_list->list[rfidx].with_padmix ?
1044 txctl_value : 0));
1045
1046 for (bbidx = 0; bbidx < bbatt_list->len; bbidx++) {
1047 if (lo->rebuild) {
1048#if B43_CALIB_ALL_LOCTLS
1049 ploctl = b43_get_lo_g_ctl(dev,
1050 &rfatt_list->list[rfidx],
1051 &bbatt_list->list[bbidx]);
1052#else
1053 ploctl = b43_get_lo_g_ctl_nopadmix(dev,
1054 &rfatt_list->
1055 list[rfidx],
1056 &bbatt_list->
1057 list[bbidx]);
1058#endif
1059 } else {
1060 ploctl = b43_get_lo_g_ctl(dev,
1061 &rfatt_list->list[rfidx],
1062 &bbatt_list->list[bbidx]);
1063 if (!ploctl->used)
1064 continue;
1065 }
1066 memcpy(&loctl, ploctl, sizeof(loctl));
1067 loctl.i = 0;
1068 loctl.q = 0;
1069
1070 max_rx_gain = rfatt_list->list[rfidx].att * 2;
1071 max_rx_gain += bbatt_list->list[bbidx].att / 2;
1072 if (rfatt_list->list[rfidx].with_padmix)
1073 max_rx_gain -= pad_mix_gain;
1074 if (has_loopback_gain(phy))
1075 max_rx_gain += phy->max_lb_gain;
1076 lo_measure_gain_values(dev, max_rx_gain,
1077 has_loopback_gain(phy));
1078
1079 b43_phy_set_baseband_attenuation(dev,
1080 bbatt_list->list[bbidx].att);
1081 lo_probe_loctls_statemachine(dev, &loctl, &max_rx_gain);
1082 if (phy->type == B43_PHYTYPE_B) {
1083 loctl.i++;
1084 loctl.q++;
1085 }
1086 b43_loctl_set_calibrated(&loctl, 1);
1087 memcpy(ploctl, &loctl, sizeof(loctl));
1088 }
1089 }
1090}
1091
1092#if B43_DEBUG
1093static void do_validate_loctl(struct b43_wldev *dev, struct b43_loctl *control)
1094{
1095 const int is_initializing = (b43_status(dev) == B43_STAT_UNINIT);
1096 int i = control->i;
1097 int q = control->q;
1098 779
1099 if (b43_loctl_is_calibrated(control)) { 780 max_rx_gain = rfatt->att * 2;
1100 if ((abs(i) > 16) || (abs(q) > 16)) 781 max_rx_gain += bbatt->att / 2;
1101 goto error; 782 if (rfatt->with_padmix)
1102 } else { 783 max_rx_gain -= pad_mix_gain;
1103 if (control->used) 784 if (has_loopback_gain(phy))
1104 goto error; 785 max_rx_gain += phy->max_lb_gain;
1105 if (dev->phy.lo_control->rebuild) { 786 lo_measure_gain_values(dev, max_rx_gain,
1106 control->i = 0; 787 has_loopback_gain(phy));
1107 control->q = 0; 788
1108 if ((i != B43_LOCTL_POISON) || 789 b43_phy_set_baseband_attenuation(dev, bbatt->att);
1109 (q != B43_LOCTL_POISON)) 790 lo_probe_loctls_statemachine(dev, &loctl, &max_rx_gain);
1110 goto error; 791
1111 } 792 lo_measure_restore(dev, &saved_regs);
793 b43_mac_enable(dev);
794
795 if (b43_debug(dev, B43_DBG_LO)) {
796 b43dbg(dev->wl, "LO: Calibrated for BB(%u), RF(%u,%u) "
797 "=> I=%d Q=%d\n",
798 bbatt->att, rfatt->att, rfatt->with_padmix,
799 loctl.i, loctl.q);
1112 } 800 }
1113 if (is_initializing && control->used)
1114 goto error;
1115
1116 return;
1117error:
1118 b43err(dev->wl, "LO control pair validation failed "
1119 "(I: %d, Q: %d, used %u, calib: %u, initing: %d)\n",
1120 i, q, control->used,
1121 b43_loctl_is_calibrated(control),
1122 is_initializing);
1123}
1124 801
1125static void validate_all_loctls(struct b43_wldev *dev) 802 cal = kmalloc(sizeof(*cal), GFP_KERNEL);
1126{ 803 if (!cal) {
1127 b43_call_for_each_loctl(dev, do_validate_loctl); 804 b43warn(dev->wl, "LO calib: out of memory\n");
1128} 805 return NULL;
1129
1130static void do_reset_calib(struct b43_wldev *dev, struct b43_loctl *control)
1131{
1132 if (dev->phy.lo_control->rebuild ||
1133 control->used) {
1134 b43_loctl_set_calibrated(control, 0);
1135 control->i = B43_LOCTL_POISON;
1136 control->q = B43_LOCTL_POISON;
1137 } 806 }
807 memcpy(&cal->bbatt, bbatt, sizeof(*bbatt));
808 memcpy(&cal->rfatt, rfatt, sizeof(*rfatt));
809 memcpy(&cal->ctl, &loctl, sizeof(loctl));
810 cal->calib_time = jiffies;
811 INIT_LIST_HEAD(&cal->list);
812
813 return cal;
1138} 814}
1139 815
1140static void reset_all_loctl_calibration_states(struct b43_wldev *dev) 816/* Get a calibrated LO setting for the given attenuation values.
817 * Might return a NULL pointer under OOM! */
818static
819struct b43_lo_calib * b43_get_calib_lo_settings(struct b43_wldev *dev,
820 const struct b43_bbatt *bbatt,
821 const struct b43_rfatt *rfatt)
1141{ 822{
1142 b43_call_for_each_loctl(dev, do_reset_calib); 823 struct b43_txpower_lo_control *lo = dev->phy.lo_control;
824 struct b43_lo_calib *c;
825
826 c = b43_find_lo_calib(lo, bbatt, rfatt);
827 if (c)
828 return c;
829 /* Not in the list of calibrated LO settings.
830 * Calibrate it now. */
831 c = b43_calibrate_lo_setting(dev, bbatt, rfatt);
832 if (!c)
833 return NULL;
834 list_add(&c->list, &lo->calib_list);
835
836 return c;
1143} 837}
1144 838
1145#else /* B43_DEBUG */ 839void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
1146static inline void validate_all_loctls(struct b43_wldev *dev) { }
1147static inline void reset_all_loctl_calibration_states(struct b43_wldev *dev) { }
1148#endif /* B43_DEBUG */
1149
1150void b43_lo_g_measure(struct b43_wldev *dev)
1151{ 840{
1152 struct b43_phy *phy = &dev->phy; 841 struct b43_phy *phy = &dev->phy;
1153 struct lo_g_saved_values uninitialized_var(sav); 842 struct b43_txpower_lo_control *lo = phy->lo_control;
1154 843 int i;
1155 B43_WARN_ON((phy->type != B43_PHYTYPE_B) && 844 int rf_offset, bb_offset;
1156 (phy->type != B43_PHYTYPE_G)); 845 const struct b43_rfatt *rfatt;
1157 846 const struct b43_bbatt *bbatt;
1158 sav.old_channel = phy->channel; 847 u64 power_vector;
1159 lo_measure_setup(dev, &sav); 848 bool table_changed = 0;
1160 reset_all_loctl_calibration_states(dev);
1161 lo_measure(dev);
1162 lo_measure_restore(dev, &sav);
1163
1164 validate_all_loctls(dev);
1165 849
1166 phy->lo_control->lo_measured = 1; 850 BUILD_BUG_ON(B43_DC_LT_SIZE != 32);
1167 phy->lo_control->rebuild = 0; 851 B43_WARN_ON(lo->rfatt_list.len * lo->bbatt_list.len > 64);
1168}
1169 852
1170#if B43_DEBUG 853 power_vector = lo->power_vector;
1171static void validate_loctl_calibration(struct b43_wldev *dev, 854 if (!update_all && !power_vector)
1172 struct b43_loctl *loctl, 855 return; /* Nothing to do. */
1173 struct b43_rfatt *rfatt, 856
1174 struct b43_bbatt *bbatt) 857 /* Suspend the MAC now to avoid continuous suspend/enable
1175{ 858 * cycles in the loop. */
1176 if (b43_loctl_is_calibrated(loctl)) 859 b43_mac_suspend(dev);
1177 return; 860
1178 if (!dev->phy.lo_control->lo_measured) { 861 for (i = 0; i < B43_DC_LT_SIZE * 2; i++) {
1179 /* On init we set the attenuation values before we 862 struct b43_lo_calib *cal;
1180 * calibrated the LO. I guess that's OK. */ 863 int idx;
1181 return; 864 u16 val;
865
866 if (!update_all && !(power_vector & (((u64)1ULL) << i)))
867 continue;
868 /* Update the table entry for this power_vector bit.
869 * The table rows are RFatt entries and columns are BBatt. */
870 bb_offset = i / lo->rfatt_list.len;
871 rf_offset = i % lo->rfatt_list.len;
872 bbatt = &(lo->bbatt_list.list[bb_offset]);
873 rfatt = &(lo->rfatt_list.list[rf_offset]);
874
875 cal = b43_calibrate_lo_setting(dev, bbatt, rfatt);
876 if (!cal) {
877 b43warn(dev->wl, "LO: Could not "
878 "calibrate DC table entry\n");
879 continue;
880 }
881 /*FIXME: Is Q really in the low nibble? */
882 val = (u8)(cal->ctl.q);
883 val |= ((u8)(cal->ctl.i)) << 4;
884 kfree(cal);
885
886 /* Get the index into the hardware DC LT. */
887 idx = i / 2;
888 /* Change the table in memory. */
889 if (i % 2) {
890 /* Change the high byte. */
891 lo->dc_lt[idx] = (lo->dc_lt[idx] & 0x00FF)
892 | ((val & 0x00FF) << 8);
893 } else {
894 /* Change the low byte. */
895 lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xFF00)
896 | (val & 0x00FF);
897 }
898 table_changed = 1;
1182 } 899 }
1183 b43err(dev->wl, "Adjusting Local Oscillator to an uncalibrated " 900 if (table_changed) {
1184 "control pair: rfatt=%u,%spadmix bbatt=%u\n", 901 /* The table changed in memory. Update the hardware table. */
1185 rfatt->att, 902 for (i = 0; i < B43_DC_LT_SIZE; i++)
1186 (rfatt->with_padmix) ? "" : "no-", 903 b43_phy_write(dev, 0x3A0 + i, lo->dc_lt[i]);
1187 bbatt->att); 904 }
1188} 905 b43_mac_enable(dev);
1189#else
1190static inline void validate_loctl_calibration(struct b43_wldev *dev,
1191 struct b43_loctl *loctl,
1192 struct b43_rfatt *rfatt,
1193 struct b43_bbatt *bbatt)
1194{
1195} 906}
1196#endif
1197 907
1198static inline void fixup_rfatt_for_txcontrol(struct b43_rfatt *rf, 908/* Fixup the RF attenuation value for the case where we are
1199 u8 tx_control) 909 * using the PAD mixer. */
910static inline void b43_lo_fixup_rfatt(struct b43_rfatt *rf)
1200{ 911{
1201 if (tx_control & B43_TXCTL_TXMIX) { 912 if (!rf->with_padmix)
1202 if (rf->att < 5) 913 return;
1203 rf->att = 4; 914 if ((rf->att != 1) && (rf->att != 2) && (rf->att != 3))
1204 } 915 rf->att = 4;
1205} 916}
1206 917
1207void b43_lo_g_adjust(struct b43_wldev *dev) 918void b43_lo_g_adjust(struct b43_wldev *dev)
1208{ 919{
1209 struct b43_phy *phy = &dev->phy; 920 struct b43_phy *phy = &dev->phy;
921 struct b43_lo_calib *cal;
1210 struct b43_rfatt rf; 922 struct b43_rfatt rf;
1211 struct b43_loctl *loctl;
1212 923
1213 memcpy(&rf, &phy->rfatt, sizeof(rf)); 924 memcpy(&rf, &phy->rfatt, sizeof(rf));
1214 fixup_rfatt_for_txcontrol(&rf, phy->tx_control); 925 b43_lo_fixup_rfatt(&rf);
1215 926
1216 loctl = b43_get_lo_g_ctl(dev, &rf, &phy->bbatt); 927 cal = b43_get_calib_lo_settings(dev, &phy->bbatt, &rf);
1217 validate_loctl_calibration(dev, loctl, &rf, &phy->bbatt); 928 if (!cal)
1218 b43_lo_write(dev, loctl); 929 return;
930 b43_lo_write(dev, &cal->ctl);
1219} 931}
1220 932
1221void b43_lo_g_adjust_to(struct b43_wldev *dev, 933void b43_lo_g_adjust_to(struct b43_wldev *dev,
@@ -1223,39 +935,102 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev,
1223{ 935{
1224 struct b43_rfatt rf; 936 struct b43_rfatt rf;
1225 struct b43_bbatt bb; 937 struct b43_bbatt bb;
1226 struct b43_loctl *loctl; 938 struct b43_lo_calib *cal;
1227 939
1228 memset(&rf, 0, sizeof(rf)); 940 memset(&rf, 0, sizeof(rf));
1229 memset(&bb, 0, sizeof(bb)); 941 memset(&bb, 0, sizeof(bb));
1230 rf.att = rfatt; 942 rf.att = rfatt;
1231 bb.att = bbatt; 943 bb.att = bbatt;
1232 fixup_rfatt_for_txcontrol(&rf, tx_control); 944 b43_lo_fixup_rfatt(&rf);
1233 loctl = b43_get_lo_g_ctl(dev, &rf, &bb); 945 cal = b43_get_calib_lo_settings(dev, &bb, &rf);
1234 validate_loctl_calibration(dev, loctl, &rf, &bb); 946 if (!cal)
1235 b43_lo_write(dev, loctl); 947 return;
948 b43_lo_write(dev, &cal->ctl);
1236} 949}
1237 950
1238static void do_mark_unused(struct b43_wldev *dev, struct b43_loctl *control) 951/* Periodic LO maintanance work */
952void b43_lo_g_maintanance_work(struct b43_wldev *dev)
1239{ 953{
1240 control->used = 0; 954 struct b43_phy *phy = &dev->phy;
955 struct b43_txpower_lo_control *lo = phy->lo_control;
956 unsigned long now;
957 unsigned long expire;
958 struct b43_lo_calib *cal, *tmp;
959 bool current_item_expired = 0;
960 bool hwpctl;
961
962 if (!lo)
963 return;
964 now = jiffies;
965 hwpctl = b43_has_hardware_pctl(phy);
966
967 if (hwpctl) {
968 /* Read the power vector and update it, if needed. */
969 expire = now - B43_LO_PWRVEC_EXPIRE;
970 if (time_before(lo->pwr_vec_read_time, expire)) {
971 lo_read_power_vector(dev);
972 b43_gphy_dc_lt_init(dev, 0);
973 }
974 //FIXME Recalc the whole DC table from time to time?
975 }
976
977 if (hwpctl)
978 return;
979 /* Search for expired LO settings. Remove them.
980 * Recalibrate the current setting, if expired. */
981 expire = now - B43_LO_CALIB_EXPIRE;
982 list_for_each_entry_safe(cal, tmp, &lo->calib_list, list) {
983 if (!time_before(cal->calib_time, expire))
984 continue;
985 /* This item expired. */
986 if (b43_compare_bbatt(&cal->bbatt, &phy->bbatt) &&
987 b43_compare_rfatt(&cal->rfatt, &phy->rfatt)) {
988 B43_WARN_ON(current_item_expired);
989 current_item_expired = 1;
990 }
991 if (b43_debug(dev, B43_DBG_LO)) {
992 b43dbg(dev->wl, "LO: Item BB(%u), RF(%u,%u), "
993 "I=%d, Q=%d expired\n",
994 cal->bbatt.att, cal->rfatt.att,
995 cal->rfatt.with_padmix,
996 cal->ctl.i, cal->ctl.q);
997 }
998 list_del(&cal->list);
999 kfree(cal);
1000 }
1001 if (current_item_expired || unlikely(list_empty(&lo->calib_list))) {
1002 /* Recalibrate currently used LO setting. */
1003 if (b43_debug(dev, B43_DBG_LO))
1004 b43dbg(dev->wl, "LO: Recalibrating current LO setting\n");
1005 cal = b43_calibrate_lo_setting(dev, &phy->bbatt, &phy->rfatt);
1006 if (cal) {
1007 list_add(&cal->list, &lo->calib_list);
1008 b43_lo_write(dev, &cal->ctl);
1009 } else
1010 b43warn(dev->wl, "Failed to recalibrate current LO setting\n");
1011 }
1241} 1012}
1242 1013
1243void b43_lo_g_ctl_mark_all_unused(struct b43_wldev *dev) 1014void b43_lo_g_cleanup(struct b43_wldev *dev)
1244{ 1015{
1245 struct b43_phy *phy = &dev->phy; 1016 struct b43_txpower_lo_control *lo = dev->phy.lo_control;
1246 struct b43_txpower_lo_control *lo = phy->lo_control; 1017 struct b43_lo_calib *cal, *tmp;
1247 1018
1248 b43_call_for_each_loctl(dev, do_mark_unused); 1019 if (!lo)
1249 lo->rebuild = 1; 1020 return;
1021 list_for_each_entry_safe(cal, tmp, &lo->calib_list, list) {
1022 list_del(&cal->list);
1023 kfree(cal);
1024 }
1250} 1025}
1251 1026
1252void b43_lo_g_ctl_mark_cur_used(struct b43_wldev *dev) 1027/* LO Initialization */
1028void b43_lo_g_init(struct b43_wldev *dev)
1253{ 1029{
1254 struct b43_phy *phy = &dev->phy; 1030 struct b43_phy *phy = &dev->phy;
1255 struct b43_rfatt rf;
1256 1031
1257 memcpy(&rf, &phy->rfatt, sizeof(rf)); 1032 if (b43_has_hardware_pctl(phy)) {
1258 fixup_rfatt_for_txcontrol(&rf, phy->tx_control); 1033 lo_read_power_vector(dev);
1259 1034 b43_gphy_dc_lt_init(dev, 1);
1260 b43_get_lo_g_ctl(dev, &rf, &phy->bbatt)->used = 1; 1035 }
1261} 1036}
diff --git a/drivers/net/wireless/b43/lo.h b/drivers/net/wireless/b43/lo.h
index 455615d1f8c6..1da321cabc12 100644
--- a/drivers/net/wireless/b43/lo.h
+++ b/drivers/net/wireless/b43/lo.h
@@ -10,82 +10,63 @@ struct b43_loctl {
10 /* Control values. */ 10 /* Control values. */
11 s8 i; 11 s8 i;
12 s8 q; 12 s8 q;
13 /* "Used by hardware" flag. */
14 bool used;
15#ifdef CONFIG_B43_DEBUG
16 /* Is this lo-control-array entry calibrated? */
17 bool calibrated;
18#endif
19}; 13};
20
21/* Debugging: Poison value for i and q values. */ 14/* Debugging: Poison value for i and q values. */
22#define B43_LOCTL_POISON 111 15#define B43_LOCTL_POISON 111
23 16
24/* loctl->calibrated debugging mechanism */ 17/* This struct holds calibrated LO settings for a set of
25#ifdef CONFIG_B43_DEBUG 18 * Baseband and RF attenuation settings. */
26static inline void b43_loctl_set_calibrated(struct b43_loctl *loctl, 19struct b43_lo_calib {
27 bool calibrated) 20 /* The set of attenuation values this set of LO
28{ 21 * control values is calibrated for. */
29 loctl->calibrated = calibrated; 22 struct b43_bbatt bbatt;
30} 23 struct b43_rfatt rfatt;
31static inline bool b43_loctl_is_calibrated(struct b43_loctl *loctl) 24 /* The set of control values for the LO. */
32{ 25 struct b43_loctl ctl;
33 return loctl->calibrated; 26 /* The time when these settings were calibrated (in jiffies) */
34} 27 unsigned long calib_time;
35#else 28 /* List. */
36static inline void b43_loctl_set_calibrated(struct b43_loctl *loctl, 29 struct list_head list;
37 bool calibrated) 30};
38{ 31
39} 32/* Size of the DC Lookup Table in 16bit words. */
40static inline bool b43_loctl_is_calibrated(struct b43_loctl *loctl) 33#define B43_DC_LT_SIZE 32
41{ 34
42 return 1; 35/* Local Oscillator calibration information */
43}
44#endif
45
46/* TX Power LO Control Array.
47 * Value-pairs to adjust the LocalOscillator are stored
48 * in this structure.
49 * There are two different set of values. One for "Flag is Set"
50 * and one for "Flag is Unset".
51 * By "Flag" the flag in struct b43_rfatt is meant.
52 * The Value arrays are two-dimensional. The first index
53 * is the baseband attenuation and the second index
54 * is the radio attenuation.
55 * Use b43_get_lo_g_ctl() to retrieve a value from the lists.
56 */
57struct b43_txpower_lo_control { 36struct b43_txpower_lo_control {
58#define B43_NR_BB 12 37 /* Lists of RF and BB attenuation values for this device.
59#define B43_NR_RF 16 38 * Used for building hardware power control tables. */
60 /* LO Control values, with PAD Mixer */
61 struct b43_loctl with_padmix[B43_NR_BB][B43_NR_RF];
62 /* LO Control values, without PAD Mixer */
63 struct b43_loctl no_padmix[B43_NR_BB][B43_NR_RF];
64
65 /* Flag to indicate a complete rebuild of the two tables above
66 * to the LO measuring code. */
67 bool rebuild;
68
69 /* Lists of valid RF and BB attenuation values for this device. */
70 struct b43_rfatt_list rfatt_list; 39 struct b43_rfatt_list rfatt_list;
71 struct b43_bbatt_list bbatt_list; 40 struct b43_bbatt_list bbatt_list;
72 41
42 /* The DC Lookup Table is cached in memory here.
43 * Note that this is only used for Hardware Power Control. */
44 u16 dc_lt[B43_DC_LT_SIZE];
45
46 /* List of calibrated control values (struct b43_lo_calib). */
47 struct list_head calib_list;
48 /* Last time the power vector was read (jiffies). */
49 unsigned long pwr_vec_read_time;
50 /* Last time the txctl values were measured (jiffies). */
51 unsigned long txctl_measured_time;
52
73 /* Current TX Bias value */ 53 /* Current TX Bias value */
74 u8 tx_bias; 54 u8 tx_bias;
75 /* Current TX Magnification Value (if used by the device) */ 55 /* Current TX Magnification Value (if used by the device) */
76 u8 tx_magn; 56 u8 tx_magn;
77 57
78 /* GPHY LO is measured. */
79 bool lo_measured;
80
81 /* Saved device PowerVector */ 58 /* Saved device PowerVector */
82 u64 power_vector; 59 u64 power_vector;
83}; 60};
84 61
85/* Measure the BPHY Local Oscillator. */ 62/* Calibration expire timeouts.
86void b43_lo_b_measure(struct b43_wldev *dev); 63 * Timeouts must be multiple of 15 seconds. To make sure
87/* Measure the BPHY/GPHY Local Oscillator. */ 64 * the item really expired when the 15 second timer hits, we
88void b43_lo_g_measure(struct b43_wldev *dev); 65 * subtract two additional seconds from the timeout. */
66#define B43_LO_CALIB_EXPIRE (HZ * (30 - 2))
67#define B43_LO_PWRVEC_EXPIRE (HZ * (30 - 2))
68#define B43_LO_TXCTL_EXPIRE (HZ * (180 - 4))
69
89 70
90/* Adjust the Local Oscillator to the saved attenuation 71/* Adjust the Local Oscillator to the saved attenuation
91 * and txctl values. 72 * and txctl values.
@@ -95,18 +76,10 @@ void b43_lo_g_adjust(struct b43_wldev *dev);
95void b43_lo_g_adjust_to(struct b43_wldev *dev, 76void b43_lo_g_adjust_to(struct b43_wldev *dev,
96 u16 rfatt, u16 bbatt, u16 tx_control); 77 u16 rfatt, u16 bbatt, u16 tx_control);
97 78
98/* Mark all possible b43_lo_g_ctl as "unused" */ 79void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all);
99void b43_lo_g_ctl_mark_all_unused(struct b43_wldev *dev);
100/* Mark the b43_lo_g_ctl corresponding to the current
101 * attenuation values as used.
102 */
103void b43_lo_g_ctl_mark_cur_used(struct b43_wldev *dev);
104 80
105/* Get a reference to a LO Control value pair in the 81void b43_lo_g_maintanance_work(struct b43_wldev *dev);
106 * TX Power LO Control Array. 82void b43_lo_g_cleanup(struct b43_wldev *dev);
107 */ 83void b43_lo_g_init(struct b43_wldev *dev);
108struct b43_loctl *b43_get_lo_g_ctl(struct b43_wldev *dev,
109 const struct b43_rfatt *rfatt,
110 const struct b43_bbatt *bbatt);
111 84
112#endif /* B43_LO_H_ */ 85#endif /* B43_LO_H_ */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 6c3d9ea0a9f8..1e31e0bca744 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1182,10 +1182,10 @@ static void handle_irq_noise(struct b43_wldev *dev)
1182 /* Get the noise samples. */ 1182 /* Get the noise samples. */
1183 B43_WARN_ON(dev->noisecalc.nr_samples >= 8); 1183 B43_WARN_ON(dev->noisecalc.nr_samples >= 8);
1184 i = dev->noisecalc.nr_samples; 1184 i = dev->noisecalc.nr_samples;
1185 noise[0] = limit_value(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 1185 noise[0] = clamp_val(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
1186 noise[1] = limit_value(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 1186 noise[1] = clamp_val(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
1187 noise[2] = limit_value(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 1187 noise[2] = clamp_val(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
1188 noise[3] = limit_value(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 1188 noise[3] = clamp_val(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
1189 dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]]; 1189 dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]];
1190 dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]]; 1190 dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]];
1191 dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]]; 1191 dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]];
@@ -1368,18 +1368,18 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
1368 unsigned int rate; 1368 unsigned int rate;
1369 u16 ctl; 1369 u16 ctl;
1370 int antenna; 1370 int antenna;
1371 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon);
1371 1372
1372 bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data); 1373 bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data);
1373 len = min((size_t) dev->wl->current_beacon->len, 1374 len = min((size_t) dev->wl->current_beacon->len,
1374 0x200 - sizeof(struct b43_plcp_hdr6)); 1375 0x200 - sizeof(struct b43_plcp_hdr6));
1375 rate = dev->wl->beacon_txctl.tx_rate->hw_value; 1376 rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value;
1376 1377
1377 b43_write_template_common(dev, (const u8 *)bcn, 1378 b43_write_template_common(dev, (const u8 *)bcn,
1378 len, ram_offset, shm_size_offset, rate); 1379 len, ram_offset, shm_size_offset, rate);
1379 1380
1380 /* Write the PHY TX control parameters. */ 1381 /* Write the PHY TX control parameters. */
1381 antenna = b43_antenna_from_ieee80211(dev, 1382 antenna = b43_antenna_from_ieee80211(dev, info->antenna_sel_tx);
1382 dev->wl->beacon_txctl.antenna_sel_tx);
1383 antenna = b43_antenna_to_phyctl(antenna); 1383 antenna = b43_antenna_to_phyctl(antenna);
1384 ctl = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL); 1384 ctl = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL);
1385 /* We can't send beacons with short preamble. Would get PHY errors. */ 1385 /* We can't send beacons with short preamble. Would get PHY errors. */
@@ -1430,11 +1430,17 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
1430 i += ie_len + 2; 1430 i += ie_len + 2;
1431 } 1431 }
1432 if (!tim_found) { 1432 if (!tim_found) {
1433 b43warn(dev->wl, "Did not find a valid TIM IE in " 1433 /*
1434 "the beacon template packet. AP or IBSS operation " 1434 * If ucode wants to modify TIM do it behind the beacon, this
1435 "may be broken.\n"); 1435 * will happen, for example, when doing mesh networking.
1436 } else 1436 */
1437 b43dbg(dev->wl, "Updated beacon template\n"); 1437 b43_shm_write16(dev, B43_SHM_SHARED,
1438 B43_SHM_SH_TIMBPOS,
1439 len + sizeof(struct b43_plcp_hdr6));
1440 b43_shm_write16(dev, B43_SHM_SHARED,
1441 B43_SHM_SH_DTIMPER, 0);
1442 }
1443 b43dbg(dev->wl, "Updated beacon template at 0x%x\n", ram_offset);
1438} 1444}
1439 1445
1440static void b43_write_probe_resp_plcp(struct b43_wldev *dev, 1446static void b43_write_probe_resp_plcp(struct b43_wldev *dev,
@@ -1573,7 +1579,8 @@ static void handle_irq_beacon(struct b43_wldev *dev)
1573 struct b43_wl *wl = dev->wl; 1579 struct b43_wl *wl = dev->wl;
1574 u32 cmd, beacon0_valid, beacon1_valid; 1580 u32 cmd, beacon0_valid, beacon1_valid;
1575 1581
1576 if (!b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) 1582 if (!b43_is_mode(wl, IEEE80211_IF_TYPE_AP) &&
1583 !b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT))
1577 return; 1584 return;
1578 1585
1579 /* This is the bottom half of the asynchronous beacon update. */ 1586 /* This is the bottom half of the asynchronous beacon update. */
@@ -1640,8 +1647,7 @@ static void b43_beacon_update_trigger_work(struct work_struct *work)
1640 1647
1641/* Asynchronously update the packet templates in template RAM. 1648/* Asynchronously update the packet templates in template RAM.
1642 * Locking: Requires wl->irq_lock to be locked. */ 1649 * Locking: Requires wl->irq_lock to be locked. */
1643static void b43_update_templates(struct b43_wl *wl, struct sk_buff *beacon, 1650static void b43_update_templates(struct b43_wl *wl, struct sk_buff *beacon)
1644 const struct ieee80211_tx_control *txctl)
1645{ 1651{
1646 /* This is the top half of the ansynchronous beacon update. 1652 /* This is the top half of the ansynchronous beacon update.
1647 * The bottom half is the beacon IRQ. 1653 * The bottom half is the beacon IRQ.
@@ -1652,7 +1658,6 @@ static void b43_update_templates(struct b43_wl *wl, struct sk_buff *beacon,
1652 if (wl->current_beacon) 1658 if (wl->current_beacon)
1653 dev_kfree_skb_any(wl->current_beacon); 1659 dev_kfree_skb_any(wl->current_beacon);
1654 wl->current_beacon = beacon; 1660 wl->current_beacon = beacon;
1655 memcpy(&wl->beacon_txctl, txctl, sizeof(wl->beacon_txctl));
1656 wl->beacon0_uploaded = 0; 1661 wl->beacon0_uploaded = 0;
1657 wl->beacon1_uploaded = 0; 1662 wl->beacon1_uploaded = 0;
1658 queue_work(wl->hw->workqueue, &wl->beacon_update_trigger); 1663 queue_work(wl->hw->workqueue, &wl->beacon_update_trigger);
@@ -1691,9 +1696,100 @@ static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int)
1691 b43dbg(dev->wl, "Set beacon interval to %u\n", beacon_int); 1696 b43dbg(dev->wl, "Set beacon interval to %u\n", beacon_int);
1692} 1697}
1693 1698
1699static void b43_handle_firmware_panic(struct b43_wldev *dev)
1700{
1701 u16 reason;
1702
1703 /* Read the register that contains the reason code for the panic. */
1704 reason = b43_shm_read16(dev, B43_SHM_SCRATCH, B43_FWPANIC_REASON_REG);
1705 b43err(dev->wl, "Whoopsy, firmware panic! Reason: %u\n", reason);
1706
1707 switch (reason) {
1708 default:
1709 b43dbg(dev->wl, "The panic reason is unknown.\n");
1710 /* fallthrough */
1711 case B43_FWPANIC_DIE:
1712 /* Do not restart the controller or firmware.
1713 * The device is nonfunctional from now on.
1714 * Restarting would result in this panic to trigger again,
1715 * so we avoid that recursion. */
1716 break;
1717 case B43_FWPANIC_RESTART:
1718 b43_controller_restart(dev, "Microcode panic");
1719 break;
1720 }
1721}
1722
1694static void handle_irq_ucode_debug(struct b43_wldev *dev) 1723static void handle_irq_ucode_debug(struct b43_wldev *dev)
1695{ 1724{
1696 //TODO 1725 unsigned int i, cnt;
1726 u16 reason, marker_id, marker_line;
1727 __le16 *buf;
1728
1729 /* The proprietary firmware doesn't have this IRQ. */
1730 if (!dev->fw.opensource)
1731 return;
1732
1733 /* Read the register that contains the reason code for this IRQ. */
1734 reason = b43_shm_read16(dev, B43_SHM_SCRATCH, B43_DEBUGIRQ_REASON_REG);
1735
1736 switch (reason) {
1737 case B43_DEBUGIRQ_PANIC:
1738 b43_handle_firmware_panic(dev);
1739 break;
1740 case B43_DEBUGIRQ_DUMP_SHM:
1741 if (!B43_DEBUG)
1742 break; /* Only with driver debugging enabled. */
1743 buf = kmalloc(4096, GFP_ATOMIC);
1744 if (!buf) {
1745 b43dbg(dev->wl, "SHM-dump: Failed to allocate memory\n");
1746 goto out;
1747 }
1748 for (i = 0; i < 4096; i += 2) {
1749 u16 tmp = b43_shm_read16(dev, B43_SHM_SHARED, i);
1750 buf[i / 2] = cpu_to_le16(tmp);
1751 }
1752 b43info(dev->wl, "Shared memory dump:\n");
1753 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
1754 16, 2, buf, 4096, 1);
1755 kfree(buf);
1756 break;
1757 case B43_DEBUGIRQ_DUMP_REGS:
1758 if (!B43_DEBUG)
1759 break; /* Only with driver debugging enabled. */
1760 b43info(dev->wl, "Microcode register dump:\n");
1761 for (i = 0, cnt = 0; i < 64; i++) {
1762 u16 tmp = b43_shm_read16(dev, B43_SHM_SCRATCH, i);
1763 if (cnt == 0)
1764 printk(KERN_INFO);
1765 printk("r%02u: 0x%04X ", i, tmp);
1766 cnt++;
1767 if (cnt == 6) {
1768 printk("\n");
1769 cnt = 0;
1770 }
1771 }
1772 printk("\n");
1773 break;
1774 case B43_DEBUGIRQ_MARKER:
1775 if (!B43_DEBUG)
1776 break; /* Only with driver debugging enabled. */
1777 marker_id = b43_shm_read16(dev, B43_SHM_SCRATCH,
1778 B43_MARKER_ID_REG);
1779 marker_line = b43_shm_read16(dev, B43_SHM_SCRATCH,
1780 B43_MARKER_LINE_REG);
1781 b43info(dev->wl, "The firmware just executed the MARKER(%u) "
1782 "at line number %u\n",
1783 marker_id, marker_line);
1784 break;
1785 default:
1786 b43dbg(dev->wl, "Debug-IRQ triggered for unknown reason: %u\n",
1787 reason);
1788 }
1789out:
1790 /* Acknowledge the debug-IRQ, so the firmware can continue. */
1791 b43_shm_write16(dev, B43_SHM_SCRATCH,
1792 B43_DEBUGIRQ_REASON_REG, B43_DEBUGIRQ_ACK);
1697} 1793}
1698 1794
1699/* Interrupt handler bottom-half */ 1795/* Interrupt handler bottom-half */
@@ -1880,7 +1976,8 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
1880 1976
1881static int do_request_fw(struct b43_wldev *dev, 1977static int do_request_fw(struct b43_wldev *dev,
1882 const char *name, 1978 const char *name,
1883 struct b43_firmware_file *fw) 1979 struct b43_firmware_file *fw,
1980 bool silent)
1884{ 1981{
1885 char path[sizeof(modparam_fwpostfix) + 32]; 1982 char path[sizeof(modparam_fwpostfix) + 32];
1886 const struct firmware *blob; 1983 const struct firmware *blob;
@@ -1904,9 +2001,15 @@ static int do_request_fw(struct b43_wldev *dev,
1904 "b43%s/%s.fw", 2001 "b43%s/%s.fw",
1905 modparam_fwpostfix, name); 2002 modparam_fwpostfix, name);
1906 err = request_firmware(&blob, path, dev->dev->dev); 2003 err = request_firmware(&blob, path, dev->dev->dev);
1907 if (err) { 2004 if (err == -ENOENT) {
1908 b43err(dev->wl, "Firmware file \"%s\" not found " 2005 if (!silent) {
1909 "or load failed.\n", path); 2006 b43err(dev->wl, "Firmware file \"%s\" not found\n",
2007 path);
2008 }
2009 return err;
2010 } else if (err) {
2011 b43err(dev->wl, "Firmware file \"%s\" request failed (err=%d)\n",
2012 path, err);
1910 return err; 2013 return err;
1911 } 2014 }
1912 if (blob->size < sizeof(struct b43_fw_header)) 2015 if (blob->size < sizeof(struct b43_fw_header))
@@ -1957,7 +2060,7 @@ static int b43_request_firmware(struct b43_wldev *dev)
1957 filename = "ucode13"; 2060 filename = "ucode13";
1958 else 2061 else
1959 goto err_no_ucode; 2062 goto err_no_ucode;
1960 err = do_request_fw(dev, filename, &fw->ucode); 2063 err = do_request_fw(dev, filename, &fw->ucode, 0);
1961 if (err) 2064 if (err)
1962 goto err_load; 2065 goto err_load;
1963 2066
@@ -1968,8 +2071,13 @@ static int b43_request_firmware(struct b43_wldev *dev)
1968 filename = NULL; 2071 filename = NULL;
1969 else 2072 else
1970 goto err_no_pcm; 2073 goto err_no_pcm;
1971 err = do_request_fw(dev, filename, &fw->pcm); 2074 fw->pcm_request_failed = 0;
1972 if (err) 2075 err = do_request_fw(dev, filename, &fw->pcm, 1);
2076 if (err == -ENOENT) {
2077 /* We did not find a PCM file? Not fatal, but
2078 * core rev <= 10 must do without hwcrypto then. */
2079 fw->pcm_request_failed = 1;
2080 } else if (err)
1973 goto err_load; 2081 goto err_load;
1974 2082
1975 /* Get initvals */ 2083 /* Get initvals */
@@ -1987,7 +2095,7 @@ static int b43_request_firmware(struct b43_wldev *dev)
1987 if ((rev >= 5) && (rev <= 10)) 2095 if ((rev >= 5) && (rev <= 10))
1988 filename = "b0g0initvals5"; 2096 filename = "b0g0initvals5";
1989 else if (rev >= 13) 2097 else if (rev >= 13)
1990 filename = "lp0initvals13"; 2098 filename = "b0g0initvals13";
1991 else 2099 else
1992 goto err_no_initvals; 2100 goto err_no_initvals;
1993 break; 2101 break;
@@ -2000,7 +2108,7 @@ static int b43_request_firmware(struct b43_wldev *dev)
2000 default: 2108 default:
2001 goto err_no_initvals; 2109 goto err_no_initvals;
2002 } 2110 }
2003 err = do_request_fw(dev, filename, &fw->initvals); 2111 err = do_request_fw(dev, filename, &fw->initvals, 0);
2004 if (err) 2112 if (err)
2005 goto err_load; 2113 goto err_load;
2006 2114
@@ -2034,7 +2142,7 @@ static int b43_request_firmware(struct b43_wldev *dev)
2034 default: 2142 default:
2035 goto err_no_initvals; 2143 goto err_no_initvals;
2036 } 2144 }
2037 err = do_request_fw(dev, filename, &fw->initvals_band); 2145 err = do_request_fw(dev, filename, &fw->initvals_band, 0);
2038 if (err) 2146 if (err)
2039 goto err_load; 2147 goto err_load;
2040 2148
@@ -2151,14 +2259,28 @@ static int b43_upload_microcode(struct b43_wldev *dev)
2151 err = -EOPNOTSUPP; 2259 err = -EOPNOTSUPP;
2152 goto error; 2260 goto error;
2153 } 2261 }
2154 b43info(dev->wl, "Loading firmware version %u.%u "
2155 "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n",
2156 fwrev, fwpatch,
2157 (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF,
2158 (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F);
2159
2160 dev->fw.rev = fwrev; 2262 dev->fw.rev = fwrev;
2161 dev->fw.patch = fwpatch; 2263 dev->fw.patch = fwpatch;
2264 dev->fw.opensource = (fwdate == 0xFFFF);
2265
2266 if (dev->fw.opensource) {
2267 /* Patchlevel info is encoded in the "time" field. */
2268 dev->fw.patch = fwtime;
2269 b43info(dev->wl, "Loading OpenSource firmware version %u.%u%s\n",
2270 dev->fw.rev, dev->fw.patch,
2271 dev->fw.pcm_request_failed ? " (Hardware crypto not supported)" : "");
2272 } else {
2273 b43info(dev->wl, "Loading firmware version %u.%u "
2274 "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n",
2275 fwrev, fwpatch,
2276 (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF,
2277 (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F);
2278 if (dev->fw.pcm_request_failed) {
2279 b43warn(dev->wl, "No \"pcm5.fw\" firmware file found. "
2280 "Hardware accelerated cryptography is disabled.\n");
2281 b43_print_fw_helptext(dev->wl, 0);
2282 }
2283 }
2162 2284
2163 if (b43_is_old_txhdr_format(dev)) { 2285 if (b43_is_old_txhdr_format(dev)) {
2164 b43warn(dev->wl, "You are using an old firmware image. " 2286 b43warn(dev->wl, "You are using an old firmware image. "
@@ -2335,7 +2457,7 @@ static void b43_gpio_cleanup(struct b43_wldev *dev)
2335} 2457}
2336 2458
2337/* http://bcm-specs.sipsolutions.net/EnableMac */ 2459/* http://bcm-specs.sipsolutions.net/EnableMac */
2338static void b43_mac_enable(struct b43_wldev *dev) 2460void b43_mac_enable(struct b43_wldev *dev)
2339{ 2461{
2340 dev->mac_suspended--; 2462 dev->mac_suspended--;
2341 B43_WARN_ON(dev->mac_suspended < 0); 2463 B43_WARN_ON(dev->mac_suspended < 0);
@@ -2349,16 +2471,11 @@ static void b43_mac_enable(struct b43_wldev *dev)
2349 b43_read32(dev, B43_MMIO_MACCTL); 2471 b43_read32(dev, B43_MMIO_MACCTL);
2350 b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); 2472 b43_read32(dev, B43_MMIO_GEN_IRQ_REASON);
2351 b43_power_saving_ctl_bits(dev, 0); 2473 b43_power_saving_ctl_bits(dev, 0);
2352
2353 /* Re-enable IRQs. */
2354 spin_lock_irq(&dev->wl->irq_lock);
2355 b43_interrupt_enable(dev, dev->irq_savedstate);
2356 spin_unlock_irq(&dev->wl->irq_lock);
2357 } 2474 }
2358} 2475}
2359 2476
2360/* http://bcm-specs.sipsolutions.net/SuspendMAC */ 2477/* http://bcm-specs.sipsolutions.net/SuspendMAC */
2361static void b43_mac_suspend(struct b43_wldev *dev) 2478void b43_mac_suspend(struct b43_wldev *dev)
2362{ 2479{
2363 int i; 2480 int i;
2364 u32 tmp; 2481 u32 tmp;
@@ -2367,14 +2484,6 @@ static void b43_mac_suspend(struct b43_wldev *dev)
2367 B43_WARN_ON(dev->mac_suspended < 0); 2484 B43_WARN_ON(dev->mac_suspended < 0);
2368 2485
2369 if (dev->mac_suspended == 0) { 2486 if (dev->mac_suspended == 0) {
2370 /* Mask IRQs before suspending MAC. Otherwise
2371 * the MAC stays busy and won't suspend. */
2372 spin_lock_irq(&dev->wl->irq_lock);
2373 tmp = b43_interrupt_disable(dev, B43_IRQ_ALL);
2374 spin_unlock_irq(&dev->wl->irq_lock);
2375 b43_synchronize_irq(dev);
2376 dev->irq_savedstate = tmp;
2377
2378 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); 2487 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
2379 b43_write32(dev, B43_MMIO_MACCTL, 2488 b43_write32(dev, B43_MMIO_MACCTL,
2380 b43_read32(dev, B43_MMIO_MACCTL) 2489 b43_read32(dev, B43_MMIO_MACCTL)
@@ -2416,7 +2525,8 @@ static void b43_adjust_opmode(struct b43_wldev *dev)
2416 ctl &= ~B43_MACCTL_BEACPROMISC; 2525 ctl &= ~B43_MACCTL_BEACPROMISC;
2417 ctl |= B43_MACCTL_INFRA; 2526 ctl |= B43_MACCTL_INFRA;
2418 2527
2419 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) 2528 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP) ||
2529 b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT))
2420 ctl |= B43_MACCTL_AP; 2530 ctl |= B43_MACCTL_AP;
2421 else if (b43_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) 2531 else if (b43_is_mode(wl, IEEE80211_IF_TYPE_IBSS))
2422 ctl &= ~B43_MACCTL_INFRA; 2532 ctl &= ~B43_MACCTL_INFRA;
@@ -2530,6 +2640,7 @@ static void b43_chip_exit(struct b43_wldev *dev)
2530{ 2640{
2531 b43_radio_turn_off(dev, 1); 2641 b43_radio_turn_off(dev, 1);
2532 b43_gpio_cleanup(dev); 2642 b43_gpio_cleanup(dev);
2643 b43_lo_g_cleanup(dev);
2533 /* firmware is released later */ 2644 /* firmware is released later */
2534} 2645}
2535 2646
@@ -2636,28 +2747,12 @@ err_gpio_clean:
2636 return err; 2747 return err;
2637} 2748}
2638 2749
2639static void b43_periodic_every120sec(struct b43_wldev *dev)
2640{
2641 struct b43_phy *phy = &dev->phy;
2642
2643 if (phy->type != B43_PHYTYPE_G || phy->rev < 2)
2644 return;
2645
2646 b43_mac_suspend(dev);
2647 b43_lo_g_measure(dev);
2648 b43_mac_enable(dev);
2649 if (b43_has_hardware_pctl(phy))
2650 b43_lo_g_ctl_mark_all_unused(dev);
2651}
2652
2653static void b43_periodic_every60sec(struct b43_wldev *dev) 2750static void b43_periodic_every60sec(struct b43_wldev *dev)
2654{ 2751{
2655 struct b43_phy *phy = &dev->phy; 2752 struct b43_phy *phy = &dev->phy;
2656 2753
2657 if (phy->type != B43_PHYTYPE_G) 2754 if (phy->type != B43_PHYTYPE_G)
2658 return; 2755 return;
2659 if (!b43_has_hardware_pctl(phy))
2660 b43_lo_g_ctl_mark_all_unused(dev);
2661 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI) { 2756 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI) {
2662 b43_mac_suspend(dev); 2757 b43_mac_suspend(dev);
2663 b43_calc_nrssi_slope(dev); 2758 b43_calc_nrssi_slope(dev);
@@ -2709,6 +2804,7 @@ static void b43_periodic_every15sec(struct b43_wldev *dev)
2709 } 2804 }
2710 } 2805 }
2711 b43_phy_xmitpower(dev); //FIXME: unless scanning? 2806 b43_phy_xmitpower(dev); //FIXME: unless scanning?
2807 b43_lo_g_maintanance_work(dev);
2712 //TODO for APHY (temperature?) 2808 //TODO for APHY (temperature?)
2713 2809
2714 atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); 2810 atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
@@ -2720,8 +2816,6 @@ static void do_periodic_work(struct b43_wldev *dev)
2720 unsigned int state; 2816 unsigned int state;
2721 2817
2722 state = dev->periodic_state; 2818 state = dev->periodic_state;
2723 if (state % 8 == 0)
2724 b43_periodic_every120sec(dev);
2725 if (state % 4 == 0) 2819 if (state % 4 == 0)
2726 b43_periodic_every60sec(dev); 2820 b43_periodic_every60sec(dev);
2727 if (state % 2 == 0) 2821 if (state % 2 == 0)
@@ -2869,8 +2963,7 @@ static int b43_rng_init(struct b43_wl *wl)
2869} 2963}
2870 2964
2871static int b43_op_tx(struct ieee80211_hw *hw, 2965static int b43_op_tx(struct ieee80211_hw *hw,
2872 struct sk_buff *skb, 2966 struct sk_buff *skb)
2873 struct ieee80211_tx_control *ctl)
2874{ 2967{
2875 struct b43_wl *wl = hw_to_b43_wl(hw); 2968 struct b43_wl *wl = hw_to_b43_wl(hw);
2876 struct b43_wldev *dev = wl->current_dev; 2969 struct b43_wldev *dev = wl->current_dev;
@@ -2892,9 +2985,9 @@ static int b43_op_tx(struct ieee80211_hw *hw,
2892 err = -ENODEV; 2985 err = -ENODEV;
2893 if (likely(b43_status(dev) >= B43_STAT_STARTED)) { 2986 if (likely(b43_status(dev) >= B43_STAT_STARTED)) {
2894 if (b43_using_pio_transfers(dev)) 2987 if (b43_using_pio_transfers(dev))
2895 err = b43_pio_tx(dev, skb, ctl); 2988 err = b43_pio_tx(dev, skb);
2896 else 2989 else
2897 err = b43_dma_tx(dev, skb, ctl); 2990 err = b43_dma_tx(dev, skb);
2898 } 2991 }
2899 2992
2900 read_unlock_irqrestore(&wl->tx_lock, flags); 2993 read_unlock_irqrestore(&wl->tx_lock, flags);
@@ -3052,8 +3145,7 @@ static void b43_qos_update_work(struct work_struct *work)
3052 mutex_unlock(&wl->mutex); 3145 mutex_unlock(&wl->mutex);
3053} 3146}
3054 3147
3055static int b43_op_conf_tx(struct ieee80211_hw *hw, 3148static int b43_op_conf_tx(struct ieee80211_hw *hw, u16 _queue,
3056 int _queue,
3057 const struct ieee80211_tx_queue_params *params) 3149 const struct ieee80211_tx_queue_params *params)
3058{ 3150{
3059 struct b43_wl *wl = hw_to_b43_wl(hw); 3151 struct b43_wl *wl = hw_to_b43_wl(hw);
@@ -3301,8 +3393,9 @@ static int b43_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
3301 antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_rx); 3393 antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_rx);
3302 b43_set_rx_antenna(dev, antenna); 3394 b43_set_rx_antenna(dev, antenna);
3303 3395
3304 /* Update templates for AP mode. */ 3396 /* Update templates for AP/mesh mode. */
3305 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) 3397 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP) ||
3398 b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT))
3306 b43_set_beacon_int(dev, conf->beacon_int); 3399 b43_set_beacon_int(dev, conf->beacon_int);
3307 3400
3308 if (!!conf->radio_enabled != phy->radio_on) { 3401 if (!!conf->radio_enabled != phy->radio_on) {
@@ -3353,6 +3446,13 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3353 if (!dev || b43_status(dev) < B43_STAT_INITIALIZED) 3446 if (!dev || b43_status(dev) < B43_STAT_INITIALIZED)
3354 goto out_unlock; 3447 goto out_unlock;
3355 3448
3449 if (dev->fw.pcm_request_failed) {
3450 /* We don't have firmware for the crypto engine.
3451 * Must use software-crypto. */
3452 err = -EOPNOTSUPP;
3453 goto out_unlock;
3454 }
3455
3356 err = -EINVAL; 3456 err = -EINVAL;
3357 switch (key->alg) { 3457 switch (key->alg) {
3358 case ALG_WEP: 3458 case ALG_WEP:
@@ -3483,13 +3583,12 @@ static int b43_op_config_interface(struct ieee80211_hw *hw,
3483 else 3583 else
3484 memset(wl->bssid, 0, ETH_ALEN); 3584 memset(wl->bssid, 0, ETH_ALEN);
3485 if (b43_status(dev) >= B43_STAT_INITIALIZED) { 3585 if (b43_status(dev) >= B43_STAT_INITIALIZED) {
3486 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) { 3586 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP) ||
3487 B43_WARN_ON(conf->type != IEEE80211_IF_TYPE_AP); 3587 b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT)) {
3588 B43_WARN_ON(conf->type != wl->if_type);
3488 b43_set_ssid(dev, conf->ssid, conf->ssid_len); 3589 b43_set_ssid(dev, conf->ssid, conf->ssid_len);
3489 if (conf->beacon) { 3590 if (conf->beacon)
3490 b43_update_templates(wl, conf->beacon, 3591 b43_update_templates(wl, conf->beacon);
3491 conf->beacon_control);
3492 }
3493 } 3592 }
3494 b43_write_mac_bssid_templates(dev); 3593 b43_write_mac_bssid_templates(dev);
3495 } 3594 }
@@ -3554,7 +3653,6 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
3554 /* Start data flow (TX/RX). */ 3653 /* Start data flow (TX/RX). */
3555 b43_mac_enable(dev); 3654 b43_mac_enable(dev);
3556 b43_interrupt_enable(dev, dev->irq_savedstate); 3655 b43_interrupt_enable(dev, dev->irq_savedstate);
3557 ieee80211_start_queues(dev->wl->hw);
3558 3656
3559 /* Start maintainance work */ 3657 /* Start maintainance work */
3560 b43_periodic_tasks_setup(dev); 3658 b43_periodic_tasks_setup(dev);
@@ -3695,8 +3793,8 @@ static void setup_struct_phy_for_init(struct b43_wldev *dev,
3695 lo = phy->lo_control; 3793 lo = phy->lo_control;
3696 if (lo) { 3794 if (lo) {
3697 memset(lo, 0, sizeof(*(phy->lo_control))); 3795 memset(lo, 0, sizeof(*(phy->lo_control)));
3698 lo->rebuild = 1;
3699 lo->tx_bias = 0xFF; 3796 lo->tx_bias = 0xFF;
3797 INIT_LIST_HEAD(&lo->calib_list);
3700 } 3798 }
3701 phy->max_lb_gain = 0; 3799 phy->max_lb_gain = 0;
3702 phy->trsw_rx_gain = 0; 3800 phy->trsw_rx_gain = 0;
@@ -4027,6 +4125,7 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
4027 /* TODO: allow WDS/AP devices to coexist */ 4125 /* TODO: allow WDS/AP devices to coexist */
4028 4126
4029 if (conf->type != IEEE80211_IF_TYPE_AP && 4127 if (conf->type != IEEE80211_IF_TYPE_AP &&
4128 conf->type != IEEE80211_IF_TYPE_MESH_POINT &&
4030 conf->type != IEEE80211_IF_TYPE_STA && 4129 conf->type != IEEE80211_IF_TYPE_STA &&
4031 conf->type != IEEE80211_IF_TYPE_WDS && 4130 conf->type != IEEE80211_IF_TYPE_WDS &&
4032 conf->type != IEEE80211_IF_TYPE_IBSS) 4131 conf->type != IEEE80211_IF_TYPE_IBSS)
@@ -4179,31 +4278,29 @@ static int b43_op_beacon_set_tim(struct ieee80211_hw *hw, int aid, int set)
4179 struct b43_wl *wl = hw_to_b43_wl(hw); 4278 struct b43_wl *wl = hw_to_b43_wl(hw);
4180 struct sk_buff *beacon; 4279 struct sk_buff *beacon;
4181 unsigned long flags; 4280 unsigned long flags;
4182 struct ieee80211_tx_control txctl;
4183 4281
4184 /* We could modify the existing beacon and set the aid bit in 4282 /* We could modify the existing beacon and set the aid bit in
4185 * the TIM field, but that would probably require resizing and 4283 * the TIM field, but that would probably require resizing and
4186 * moving of data within the beacon template. 4284 * moving of data within the beacon template.
4187 * Simply request a new beacon and let mac80211 do the hard work. */ 4285 * Simply request a new beacon and let mac80211 do the hard work. */
4188 beacon = ieee80211_beacon_get(hw, wl->vif, &txctl); 4286 beacon = ieee80211_beacon_get(hw, wl->vif);
4189 if (unlikely(!beacon)) 4287 if (unlikely(!beacon))
4190 return -ENOMEM; 4288 return -ENOMEM;
4191 spin_lock_irqsave(&wl->irq_lock, flags); 4289 spin_lock_irqsave(&wl->irq_lock, flags);
4192 b43_update_templates(wl, beacon, &txctl); 4290 b43_update_templates(wl, beacon);
4193 spin_unlock_irqrestore(&wl->irq_lock, flags); 4291 spin_unlock_irqrestore(&wl->irq_lock, flags);
4194 4292
4195 return 0; 4293 return 0;
4196} 4294}
4197 4295
4198static int b43_op_ibss_beacon_update(struct ieee80211_hw *hw, 4296static int b43_op_ibss_beacon_update(struct ieee80211_hw *hw,
4199 struct sk_buff *beacon, 4297 struct sk_buff *beacon)
4200 struct ieee80211_tx_control *ctl)
4201{ 4298{
4202 struct b43_wl *wl = hw_to_b43_wl(hw); 4299 struct b43_wl *wl = hw_to_b43_wl(hw);
4203 unsigned long flags; 4300 unsigned long flags;
4204 4301
4205 spin_lock_irqsave(&wl->irq_lock, flags); 4302 spin_lock_irqsave(&wl->irq_lock, flags);
4206 b43_update_templates(wl, beacon, ctl); 4303 b43_update_templates(wl, beacon);
4207 spin_unlock_irqrestore(&wl->irq_lock, flags); 4304 spin_unlock_irqrestore(&wl->irq_lock, flags);
4208 4305
4209 return 0; 4306 return 0;
@@ -4530,10 +4627,10 @@ static int b43_wireless_init(struct ssb_device *dev)
4530 4627
4531 /* fill hw info */ 4628 /* fill hw info */
4532 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 4629 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
4533 IEEE80211_HW_RX_INCLUDES_FCS; 4630 IEEE80211_HW_RX_INCLUDES_FCS |
4534 hw->max_signal = 100; 4631 IEEE80211_HW_SIGNAL_DBM |
4535 hw->max_rssi = -110; 4632 IEEE80211_HW_NOISE_DBM;
4536 hw->max_noise = -110; 4633
4537 hw->queues = b43_modparam_qos ? 4 : 1; 4634 hw->queues = b43_modparam_qos ? 4 : 1;
4538 SET_IEEE80211_DEV(hw, dev->dev); 4635 SET_IEEE80211_DEV(hw, dev->dev);
4539 if (is_valid_ether_addr(sprom->et1mac)) 4636 if (is_valid_ether_addr(sprom->et1mac))
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index 5230aeca78bf..dad23c42b422 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -114,4 +114,7 @@ void b43_controller_restart(struct b43_wldev *dev, const char *reason);
114#define B43_PS_ASLEEP (1 << 3) /* Force device asleep */ 114#define B43_PS_ASLEEP (1 << 3) /* Force device asleep */
115void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags); 115void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags);
116 116
117void b43_mac_suspend(struct b43_wldev *dev);
118void b43_mac_enable(struct b43_wldev *dev);
119
117#endif /* B43_MAIN_H_ */ 120#endif /* B43_MAIN_H_ */
diff --git a/drivers/net/wireless/b43/nphy.c b/drivers/net/wireless/b43/nphy.c
index 8695eb223476..644eed993bea 100644
--- a/drivers/net/wireless/b43/nphy.c
+++ b/drivers/net/wireless/b43/nphy.c
@@ -29,8 +29,6 @@
29#include "nphy.h" 29#include "nphy.h"
30#include "tables_nphy.h" 30#include "tables_nphy.h"
31 31
32#include <linux/delay.h>
33
34 32
35void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) 33void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
36{//TODO 34{//TODO
diff --git a/drivers/net/wireless/b43/phy.c b/drivers/net/wireless/b43/phy.c
index de024dc03718..305d4cd6fd03 100644
--- a/drivers/net/wireless/b43/phy.c
+++ b/drivers/net/wireless/b43/phy.c
@@ -28,6 +28,7 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/bitrev.h>
31 32
32#include "b43.h" 33#include "b43.h"
33#include "phy.h" 34#include "phy.h"
@@ -83,25 +84,9 @@ const u8 b43_radio_channel_codes_bg[] = {
83 72, 84, 84 72, 84,
84}; 85};
85 86
87#define bitrev4(tmp) (bitrev8(tmp) >> 4)
86static void b43_phy_initg(struct b43_wldev *dev); 88static void b43_phy_initg(struct b43_wldev *dev);
87 89
88/* Reverse the bits of a 4bit value.
89 * Example: 1101 is flipped 1011
90 */
91static u16 flip_4bit(u16 value)
92{
93 u16 flipped = 0x0000;
94
95 B43_WARN_ON(value & ~0x000F);
96
97 flipped |= (value & 0x0001) << 3;
98 flipped |= (value & 0x0002) << 1;
99 flipped |= (value & 0x0004) >> 1;
100 flipped |= (value & 0x0008) >> 3;
101
102 return flipped;
103}
104
105static void generate_rfatt_list(struct b43_wldev *dev, 90static void generate_rfatt_list(struct b43_wldev *dev,
106 struct b43_rfatt_list *list) 91 struct b43_rfatt_list *list)
107{ 92{
@@ -145,8 +130,7 @@ static void generate_rfatt_list(struct b43_wldev *dev,
145 {.att = 9,.with_padmix = 1,}, 130 {.att = 9,.with_padmix = 1,},
146 }; 131 };
147 132
148 if ((phy->type == B43_PHYTYPE_A && phy->rev < 5) || 133 if (!b43_has_hardware_pctl(phy)) {
149 (phy->type == B43_PHYTYPE_G && phy->rev < 6)) {
150 /* Software pctl */ 134 /* Software pctl */
151 list->list = rfatt_0; 135 list->list = rfatt_0;
152 list->len = ARRAY_SIZE(rfatt_0); 136 list->len = ARRAY_SIZE(rfatt_0);
@@ -158,7 +142,7 @@ static void generate_rfatt_list(struct b43_wldev *dev,
158 /* Hardware pctl */ 142 /* Hardware pctl */
159 list->list = rfatt_1; 143 list->list = rfatt_1;
160 list->len = ARRAY_SIZE(rfatt_1); 144 list->len = ARRAY_SIZE(rfatt_1);
161 list->min_val = 2; 145 list->min_val = 0;
162 list->max_val = 14; 146 list->max_val = 14;
163 return; 147 return;
164 } 148 }
@@ -346,6 +330,7 @@ void b43_set_txpower_g(struct b43_wldev *dev,
346 /* Save the values for later */ 330 /* Save the values for later */
347 phy->tx_control = tx_control; 331 phy->tx_control = tx_control;
348 memcpy(&phy->rfatt, rfatt, sizeof(*rfatt)); 332 memcpy(&phy->rfatt, rfatt, sizeof(*rfatt));
333 phy->rfatt.with_padmix = !!(tx_control & B43_TXCTL_TXMIX);
349 memcpy(&phy->bbatt, bbatt, sizeof(*bbatt)); 334 memcpy(&phy->bbatt, bbatt, sizeof(*bbatt));
350 335
351 if (b43_debug(dev, B43_DBG_XMITPOWER)) { 336 if (b43_debug(dev, B43_DBG_XMITPOWER)) {
@@ -559,11 +544,6 @@ static void b43_gphy_gain_lt_init(struct b43_wldev *dev)
559 u16 tmp; 544 u16 tmp;
560 u8 rf, bb; 545 u8 rf, bb;
561 546
562 if (!lo->lo_measured) {
563 b43_phy_write(dev, 0x3FF, 0);
564 return;
565 }
566
567 for (rf = 0; rf < lo->rfatt_list.len; rf++) { 547 for (rf = 0; rf < lo->rfatt_list.len; rf++) {
568 for (bb = 0; bb < lo->bbatt_list.len; bb++) { 548 for (bb = 0; bb < lo->bbatt_list.len; bb++) {
569 if (nr_written >= 0x40) 549 if (nr_written >= 0x40)
@@ -581,42 +561,6 @@ static void b43_gphy_gain_lt_init(struct b43_wldev *dev)
581 } 561 }
582} 562}
583 563
584/* GPHY_DC_Lookup_Table */
585void b43_gphy_dc_lt_init(struct b43_wldev *dev)
586{
587 struct b43_phy *phy = &dev->phy;
588 struct b43_txpower_lo_control *lo = phy->lo_control;
589 struct b43_loctl *loctl0;
590 struct b43_loctl *loctl1;
591 int i;
592 int rf_offset, bb_offset;
593 u16 tmp;
594
595 for (i = 0; i < lo->rfatt_list.len + lo->bbatt_list.len; i += 2) {
596 rf_offset = i / lo->rfatt_list.len;
597 bb_offset = i % lo->rfatt_list.len;
598
599 loctl0 = b43_get_lo_g_ctl(dev, &lo->rfatt_list.list[rf_offset],
600 &lo->bbatt_list.list[bb_offset]);
601 if (i + 1 < lo->rfatt_list.len * lo->bbatt_list.len) {
602 rf_offset = (i + 1) / lo->rfatt_list.len;
603 bb_offset = (i + 1) % lo->rfatt_list.len;
604
605 loctl1 =
606 b43_get_lo_g_ctl(dev,
607 &lo->rfatt_list.list[rf_offset],
608 &lo->bbatt_list.list[bb_offset]);
609 } else
610 loctl1 = loctl0;
611
612 tmp = ((u16) loctl0->q & 0xF);
613 tmp |= ((u16) loctl0->i & 0xF) << 4;
614 tmp |= ((u16) loctl1->q & 0xF) << 8;
615 tmp |= ((u16) loctl1->i & 0xF) << 12; //FIXME?
616 b43_phy_write(dev, 0x3A0 + (i / 2), tmp);
617 }
618}
619
620static void hardware_pctl_init_aphy(struct b43_wldev *dev) 564static void hardware_pctl_init_aphy(struct b43_wldev *dev)
621{ 565{
622 //TODO 566 //TODO
@@ -643,7 +587,7 @@ static void hardware_pctl_init_gphy(struct b43_wldev *dev)
643 b43_phy_write(dev, 0x0801, b43_phy_read(dev, 0x0801) 587 b43_phy_write(dev, 0x0801, b43_phy_read(dev, 0x0801)
644 & 0xFFBF); 588 & 0xFFBF);
645 589
646 b43_gphy_dc_lt_init(dev); 590 b43_gphy_dc_lt_init(dev, 1);
647} 591}
648 592
649/* HardwarePowerControl init for A and G PHY */ 593/* HardwarePowerControl init for A and G PHY */
@@ -931,109 +875,6 @@ static void b43_phy_inita(struct b43_wldev *dev)
931 } 875 }
932} 876}
933 877
934static void b43_phy_initb2(struct b43_wldev *dev)
935{
936 struct b43_phy *phy = &dev->phy;
937 u16 offset, val;
938
939 b43_write16(dev, 0x03EC, 0x3F22);
940 b43_phy_write(dev, 0x0020, 0x301C);
941 b43_phy_write(dev, 0x0026, 0x0000);
942 b43_phy_write(dev, 0x0030, 0x00C6);
943 b43_phy_write(dev, 0x0088, 0x3E00);
944 val = 0x3C3D;
945 for (offset = 0x0089; offset < 0x00A7; offset++) {
946 b43_phy_write(dev, offset, val);
947 val -= 0x0202;
948 }
949 b43_phy_write(dev, 0x03E4, 0x3000);
950 b43_radio_selectchannel(dev, phy->channel, 0);
951 if (phy->radio_ver != 0x2050) {
952 b43_radio_write16(dev, 0x0075, 0x0080);
953 b43_radio_write16(dev, 0x0079, 0x0081);
954 }
955 b43_radio_write16(dev, 0x0050, 0x0020);
956 b43_radio_write16(dev, 0x0050, 0x0023);
957 if (phy->radio_ver == 0x2050) {
958 b43_radio_write16(dev, 0x0050, 0x0020);
959 b43_radio_write16(dev, 0x005A, 0x0070);
960 b43_radio_write16(dev, 0x005B, 0x007B);
961 b43_radio_write16(dev, 0x005C, 0x00B0);
962 b43_radio_write16(dev, 0x007A, 0x000F);
963 b43_phy_write(dev, 0x0038, 0x0677);
964 b43_radio_init2050(dev);
965 }
966 b43_phy_write(dev, 0x0014, 0x0080);
967 b43_phy_write(dev, 0x0032, 0x00CA);
968 b43_phy_write(dev, 0x0032, 0x00CC);
969 b43_phy_write(dev, 0x0035, 0x07C2);
970 b43_lo_b_measure(dev);
971 b43_phy_write(dev, 0x0026, 0xCC00);
972 if (phy->radio_ver != 0x2050)
973 b43_phy_write(dev, 0x0026, 0xCE00);
974 b43_write16(dev, B43_MMIO_CHANNEL_EXT, 0x1000);
975 b43_phy_write(dev, 0x002A, 0x88A3);
976 if (phy->radio_ver != 0x2050)
977 b43_phy_write(dev, 0x002A, 0x88C2);
978 b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control);
979 b43_phy_init_pctl(dev);
980}
981
982static void b43_phy_initb4(struct b43_wldev *dev)
983{
984 struct b43_phy *phy = &dev->phy;
985 u16 offset, val;
986
987 b43_write16(dev, 0x03EC, 0x3F22);
988 b43_phy_write(dev, 0x0020, 0x301C);
989 b43_phy_write(dev, 0x0026, 0x0000);
990 b43_phy_write(dev, 0x0030, 0x00C6);
991 b43_phy_write(dev, 0x0088, 0x3E00);
992 val = 0x3C3D;
993 for (offset = 0x0089; offset < 0x00A7; offset++) {
994 b43_phy_write(dev, offset, val);
995 val -= 0x0202;
996 }
997 b43_phy_write(dev, 0x03E4, 0x3000);
998 b43_radio_selectchannel(dev, phy->channel, 0);
999 if (phy->radio_ver != 0x2050) {
1000 b43_radio_write16(dev, 0x0075, 0x0080);
1001 b43_radio_write16(dev, 0x0079, 0x0081);
1002 }
1003 b43_radio_write16(dev, 0x0050, 0x0020);
1004 b43_radio_write16(dev, 0x0050, 0x0023);
1005 if (phy->radio_ver == 0x2050) {
1006 b43_radio_write16(dev, 0x0050, 0x0020);
1007 b43_radio_write16(dev, 0x005A, 0x0070);
1008 b43_radio_write16(dev, 0x005B, 0x007B);
1009 b43_radio_write16(dev, 0x005C, 0x00B0);
1010 b43_radio_write16(dev, 0x007A, 0x000F);
1011 b43_phy_write(dev, 0x0038, 0x0677);
1012 b43_radio_init2050(dev);
1013 }
1014 b43_phy_write(dev, 0x0014, 0x0080);
1015 b43_phy_write(dev, 0x0032, 0x00CA);
1016 if (phy->radio_ver == 0x2050)
1017 b43_phy_write(dev, 0x0032, 0x00E0);
1018 b43_phy_write(dev, 0x0035, 0x07C2);
1019
1020 b43_lo_b_measure(dev);
1021
1022 b43_phy_write(dev, 0x0026, 0xCC00);
1023 if (phy->radio_ver == 0x2050)
1024 b43_phy_write(dev, 0x0026, 0xCE00);
1025 b43_write16(dev, B43_MMIO_CHANNEL_EXT, 0x1100);
1026 b43_phy_write(dev, 0x002A, 0x88A3);
1027 if (phy->radio_ver == 0x2050)
1028 b43_phy_write(dev, 0x002A, 0x88C2);
1029 b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control);
1030 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI) {
1031 b43_calc_nrssi_slope(dev);
1032 b43_calc_nrssi_threshold(dev);
1033 }
1034 b43_phy_init_pctl(dev);
1035}
1036
1037static void b43_phy_initb5(struct b43_wldev *dev) 878static void b43_phy_initb5(struct b43_wldev *dev)
1038{ 879{
1039 struct ssb_bus *bus = dev->dev->bus; 880 struct ssb_bus *bus = dev->dev->bus;
@@ -1259,19 +1100,9 @@ static void b43_phy_initb6(struct b43_wldev *dev)
1259 b43_phy_write(dev, 0x0002, (b43_phy_read(dev, 0x0002) & 0xFFC0) 1100 b43_phy_write(dev, 0x0002, (b43_phy_read(dev, 0x0002) & 0xFFC0)
1260 | 0x0004); 1101 | 0x0004);
1261 } 1102 }
1262 if (phy->type == B43_PHYTYPE_B) { 1103 if (phy->type == B43_PHYTYPE_B)
1263 b43_write16(dev, 0x03E6, 0x8140); 1104 B43_WARN_ON(1);
1264 b43_phy_write(dev, 0x0016, 0x0410); 1105 else if (phy->type == B43_PHYTYPE_G)
1265 b43_phy_write(dev, 0x0017, 0x0820);
1266 b43_phy_write(dev, 0x0062, 0x0007);
1267 b43_radio_init2050(dev);
1268 b43_lo_g_measure(dev);
1269 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI) {
1270 b43_calc_nrssi_slope(dev);
1271 b43_calc_nrssi_threshold(dev);
1272 }
1273 b43_phy_init_pctl(dev);
1274 } else if (phy->type == B43_PHYTYPE_G)
1275 b43_write16(dev, 0x03E6, 0x0); 1106 b43_write16(dev, 0x03E6, 0x0);
1276} 1107}
1277 1108
@@ -1534,34 +1365,31 @@ static void b43_phy_initg(struct b43_wldev *dev)
1534 else 1365 else
1535 b43_radio_write16(dev, 0x0078, phy->initval); 1366 b43_radio_write16(dev, 0x0078, phy->initval);
1536 } 1367 }
1537 if (phy->lo_control->tx_bias == 0xFF) { 1368 b43_lo_g_init(dev);
1538 b43_lo_g_measure(dev); 1369 if (has_tx_magnification(phy)) {
1370 b43_radio_write16(dev, 0x52,
1371 (b43_radio_read16(dev, 0x52) & 0xFF00)
1372 | phy->lo_control->tx_bias | phy->
1373 lo_control->tx_magn);
1539 } else { 1374 } else {
1540 if (has_tx_magnification(phy)) { 1375 b43_radio_write16(dev, 0x52,
1541 b43_radio_write16(dev, 0x52, 1376 (b43_radio_read16(dev, 0x52) & 0xFFF0)
1542 (b43_radio_read16(dev, 0x52) & 0xFF00) 1377 | phy->lo_control->tx_bias);
1543 | phy->lo_control->tx_bias | phy->
1544 lo_control->tx_magn);
1545 } else {
1546 b43_radio_write16(dev, 0x52,
1547 (b43_radio_read16(dev, 0x52) & 0xFFF0)
1548 | phy->lo_control->tx_bias);
1549 }
1550 if (phy->rev >= 6) {
1551 b43_phy_write(dev, B43_PHY_CCK(0x36),
1552 (b43_phy_read(dev, B43_PHY_CCK(0x36))
1553 & 0x0FFF) | (phy->lo_control->
1554 tx_bias << 12));
1555 }
1556 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
1557 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075);
1558 else
1559 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F);
1560 if (phy->rev < 2)
1561 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x101);
1562 else
1563 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x202);
1564 } 1378 }
1379 if (phy->rev >= 6) {
1380 b43_phy_write(dev, B43_PHY_CCK(0x36),
1381 (b43_phy_read(dev, B43_PHY_CCK(0x36))
1382 & 0x0FFF) | (phy->lo_control->
1383 tx_bias << 12));
1384 }
1385 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
1386 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075);
1387 else
1388 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F);
1389 if (phy->rev < 2)
1390 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x101);
1391 else
1392 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x202);
1565 if (phy->gmode || phy->rev >= 2) { 1393 if (phy->gmode || phy->rev >= 2) {
1566 b43_lo_g_adjust(dev); 1394 b43_lo_g_adjust(dev);
1567 b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078); 1395 b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078);
@@ -1572,7 +1400,7 @@ static void b43_phy_initg(struct b43_wldev *dev)
1572 * the value 0x7FFFFFFF here. I think that is some weird 1400 * the value 0x7FFFFFFF here. I think that is some weird
1573 * compiler optimization in the original driver. 1401 * compiler optimization in the original driver.
1574 * Essentially, what we do here is resetting all NRSSI LT 1402 * Essentially, what we do here is resetting all NRSSI LT
1575 * entries to -32 (see the limit_value() in nrssi_hw_update()) 1403 * entries to -32 (see the clamp_val() in nrssi_hw_update())
1576 */ 1404 */
1577 b43_nrssi_hw_update(dev, 0xFFFF); //FIXME? 1405 b43_nrssi_hw_update(dev, 0xFFFF); //FIXME?
1578 b43_calc_nrssi_threshold(dev); 1406 b43_calc_nrssi_threshold(dev);
@@ -1634,13 +1462,13 @@ static s8 b43_phy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
1634 switch (phy->type) { 1462 switch (phy->type) {
1635 case B43_PHYTYPE_A: 1463 case B43_PHYTYPE_A:
1636 tmp += 0x80; 1464 tmp += 0x80;
1637 tmp = limit_value(tmp, 0x00, 0xFF); 1465 tmp = clamp_val(tmp, 0x00, 0xFF);
1638 dbm = phy->tssi2dbm[tmp]; 1466 dbm = phy->tssi2dbm[tmp];
1639 //TODO: There's a FIXME on the specs 1467 //TODO: There's a FIXME on the specs
1640 break; 1468 break;
1641 case B43_PHYTYPE_B: 1469 case B43_PHYTYPE_B:
1642 case B43_PHYTYPE_G: 1470 case B43_PHYTYPE_G:
1643 tmp = limit_value(tmp, 0x00, 0x3F); 1471 tmp = clamp_val(tmp, 0x00, 0x3F);
1644 dbm = phy->tssi2dbm[tmp]; 1472 dbm = phy->tssi2dbm[tmp];
1645 break; 1473 break;
1646 default: 1474 default:
@@ -1699,8 +1527,8 @@ void b43_put_attenuation_into_ranges(struct b43_wldev *dev,
1699 break; 1527 break;
1700 } 1528 }
1701 1529
1702 *_rfatt = limit_value(rfatt, rf_min, rf_max); 1530 *_rfatt = clamp_val(rfatt, rf_min, rf_max);
1703 *_bbatt = limit_value(bbatt, bb_min, bb_max); 1531 *_bbatt = clamp_val(bbatt, bb_min, bb_max);
1704} 1532}
1705 1533
1706/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */ 1534/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */
@@ -1795,7 +1623,7 @@ void b43_phy_xmitpower(struct b43_wldev *dev)
1795 /* Get desired power (in Q5.2) */ 1623 /* Get desired power (in Q5.2) */
1796 desired_pwr = INT_TO_Q52(phy->power_level); 1624 desired_pwr = INT_TO_Q52(phy->power_level);
1797 /* And limit it. max_pwr already is Q5.2 */ 1625 /* And limit it. max_pwr already is Q5.2 */
1798 desired_pwr = limit_value(desired_pwr, 0, max_pwr); 1626 desired_pwr = clamp_val(desired_pwr, 0, max_pwr);
1799 if (b43_debug(dev, B43_DBG_XMITPOWER)) { 1627 if (b43_debug(dev, B43_DBG_XMITPOWER)) {
1800 b43dbg(dev->wl, 1628 b43dbg(dev->wl,
1801 "Current TX power output: " Q52_FMT 1629 "Current TX power output: " Q52_FMT
@@ -1821,10 +1649,8 @@ void b43_phy_xmitpower(struct b43_wldev *dev)
1821 bbatt_delta -= 4 * rfatt_delta; 1649 bbatt_delta -= 4 * rfatt_delta;
1822 1650
1823 /* So do we finally need to adjust something? */ 1651 /* So do we finally need to adjust something? */
1824 if ((rfatt_delta == 0) && (bbatt_delta == 0)) { 1652 if ((rfatt_delta == 0) && (bbatt_delta == 0))
1825 b43_lo_g_ctl_mark_cur_used(dev);
1826 return; 1653 return;
1827 }
1828 1654
1829 /* Calculate the new attenuation values. */ 1655 /* Calculate the new attenuation values. */
1830 bbatt = phy->bbatt.att; 1656 bbatt = phy->bbatt.att;
@@ -1870,7 +1696,6 @@ void b43_phy_xmitpower(struct b43_wldev *dev)
1870 b43_radio_lock(dev); 1696 b43_radio_lock(dev);
1871 b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, 1697 b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt,
1872 phy->tx_control); 1698 phy->tx_control);
1873 b43_lo_g_ctl_mark_cur_used(dev);
1874 b43_radio_unlock(dev); 1699 b43_radio_unlock(dev);
1875 b43_phy_unlock(dev); 1700 b43_phy_unlock(dev);
1876 break; 1701 break;
@@ -1908,7 +1733,7 @@ static inline
1908 f = q; 1733 f = q;
1909 i++; 1734 i++;
1910 } while (delta >= 2); 1735 } while (delta >= 2);
1911 entry[index] = limit_value(b43_tssi2dbm_ad(m1 * f, 8192), -127, 128); 1736 entry[index] = clamp_val(b43_tssi2dbm_ad(m1 * f, 8192), -127, 128);
1912 return 0; 1737 return 0;
1913} 1738}
1914 1739
@@ -2007,24 +1832,6 @@ int b43_phy_init(struct b43_wldev *dev)
2007 else 1832 else
2008 unsupported = 1; 1833 unsupported = 1;
2009 break; 1834 break;
2010 case B43_PHYTYPE_B:
2011 switch (phy->rev) {
2012 case 2:
2013 b43_phy_initb2(dev);
2014 break;
2015 case 4:
2016 b43_phy_initb4(dev);
2017 break;
2018 case 5:
2019 b43_phy_initb5(dev);
2020 break;
2021 case 6:
2022 b43_phy_initb6(dev);
2023 break;
2024 default:
2025 unsupported = 1;
2026 }
2027 break;
2028 case B43_PHYTYPE_G: 1835 case B43_PHYTYPE_G:
2029 b43_phy_initg(dev); 1836 b43_phy_initg(dev);
2030 break; 1837 break;
@@ -2452,7 +2259,7 @@ void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val)
2452 for (i = 0; i < 64; i++) { 2259 for (i = 0; i < 64; i++) {
2453 tmp = b43_nrssi_hw_read(dev, i); 2260 tmp = b43_nrssi_hw_read(dev, i);
2454 tmp -= val; 2261 tmp -= val;
2455 tmp = limit_value(tmp, -32, 31); 2262 tmp = clamp_val(tmp, -32, 31);
2456 b43_nrssi_hw_write(dev, i, tmp); 2263 b43_nrssi_hw_write(dev, i, tmp);
2457 } 2264 }
2458} 2265}
@@ -2469,7 +2276,7 @@ void b43_nrssi_mem_update(struct b43_wldev *dev)
2469 tmp = (i - delta) * phy->nrssislope; 2276 tmp = (i - delta) * phy->nrssislope;
2470 tmp /= 0x10000; 2277 tmp /= 0x10000;
2471 tmp += 0x3A; 2278 tmp += 0x3A;
2472 tmp = limit_value(tmp, 0, 0x3F); 2279 tmp = clamp_val(tmp, 0, 0x3F);
2473 phy->nrssi_lt[i] = tmp; 2280 phy->nrssi_lt[i] = tmp;
2474 } 2281 }
2475} 2282}
@@ -2906,7 +2713,7 @@ void b43_calc_nrssi_threshold(struct b43_wldev *dev)
2906 } else 2713 } else
2907 threshold = phy->nrssi[1] - 5; 2714 threshold = phy->nrssi[1] - 5;
2908 2715
2909 threshold = limit_value(threshold, 0, 0x3E); 2716 threshold = clamp_val(threshold, 0, 0x3E);
2910 b43_phy_read(dev, 0x0020); /* dummy read */ 2717 b43_phy_read(dev, 0x0020); /* dummy read */
2911 b43_phy_write(dev, 0x0020, 2718 b43_phy_write(dev, 0x0020,
2912 (((u16) threshold) << 8) | 0x001C); 2719 (((u16) threshold) << 8) | 0x001C);
@@ -2957,7 +2764,7 @@ void b43_calc_nrssi_threshold(struct b43_wldev *dev)
2957 else 2764 else
2958 a += 32; 2765 a += 32;
2959 a = a >> 6; 2766 a = a >> 6;
2960 a = limit_value(a, -31, 31); 2767 a = clamp_val(a, -31, 31);
2961 2768
2962 b = b * (phy->nrssi[1] - phy->nrssi[0]); 2769 b = b * (phy->nrssi[1] - phy->nrssi[0]);
2963 b += (phy->nrssi[0] << 6); 2770 b += (phy->nrssi[0] << 6);
@@ -2966,7 +2773,7 @@ void b43_calc_nrssi_threshold(struct b43_wldev *dev)
2966 else 2773 else
2967 b += 32; 2774 b += 32;
2968 b = b >> 6; 2775 b = b >> 6;
2969 b = limit_value(b, -31, 31); 2776 b = clamp_val(b, -31, 31);
2970 2777
2971 tmp_u16 = b43_phy_read(dev, 0x048A) & 0xF000; 2778 tmp_u16 = b43_phy_read(dev, 0x048A) & 0xF000;
2972 tmp_u16 |= ((u32) b & 0x0000003F); 2779 tmp_u16 |= ((u32) b & 0x0000003F);
@@ -3069,13 +2876,13 @@ b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode)
3069 } 2876 }
3070 radio_stacksave(0x0078); 2877 radio_stacksave(0x0078);
3071 tmp = (b43_radio_read16(dev, 0x0078) & 0x001E); 2878 tmp = (b43_radio_read16(dev, 0x0078) & 0x001E);
3072 flipped = flip_4bit(tmp); 2879 B43_WARN_ON(tmp > 15);
2880 flipped = bitrev4(tmp);
3073 if (flipped < 10 && flipped >= 8) 2881 if (flipped < 10 && flipped >= 8)
3074 flipped = 7; 2882 flipped = 7;
3075 else if (flipped >= 10) 2883 else if (flipped >= 10)
3076 flipped -= 3; 2884 flipped -= 3;
3077 flipped = flip_4bit(flipped); 2885 flipped = (bitrev4(flipped) << 1) | 0x0020;
3078 flipped = (flipped << 1) | 0x0020;
3079 b43_radio_write16(dev, 0x0078, flipped); 2886 b43_radio_write16(dev, 0x0078, flipped);
3080 2887
3081 b43_calc_nrssi_threshold(dev); 2888 b43_calc_nrssi_threshold(dev);
@@ -3708,7 +3515,7 @@ u16 b43_radio_init2050(struct b43_wldev *dev)
3708 tmp1 >>= 9; 3515 tmp1 >>= 9;
3709 3516
3710 for (i = 0; i < 16; i++) { 3517 for (i = 0; i < 16; i++) {
3711 radio78 = ((flip_4bit(i) << 1) | 0x20); 3518 radio78 = (bitrev4(i) << 1) | 0x0020;
3712 b43_radio_write16(dev, 0x78, radio78); 3519 b43_radio_write16(dev, 0x78, radio78);
3713 udelay(10); 3520 udelay(10);
3714 for (j = 0; j < 16; j++) { 3521 for (j = 0; j < 16; j++) {
diff --git a/drivers/net/wireless/b43/phy.h b/drivers/net/wireless/b43/phy.h
index 6d165d822175..4aab10903529 100644
--- a/drivers/net/wireless/b43/phy.h
+++ b/drivers/net/wireless/b43/phy.h
@@ -225,7 +225,6 @@ int b43_phy_init(struct b43_wldev *dev);
225void b43_set_rx_antenna(struct b43_wldev *dev, int antenna); 225void b43_set_rx_antenna(struct b43_wldev *dev, int antenna);
226 226
227void b43_phy_xmitpower(struct b43_wldev *dev); 227void b43_phy_xmitpower(struct b43_wldev *dev);
228void b43_gphy_dc_lt_init(struct b43_wldev *dev);
229 228
230/* Returns the boolean whether the board has HardwarePowerControl */ 229/* Returns the boolean whether the board has HardwarePowerControl */
231bool b43_has_hardware_pctl(struct b43_phy *phy); 230bool b43_has_hardware_pctl(struct b43_phy *phy);
@@ -252,6 +251,14 @@ struct b43_rfatt_list {
252 u8 max_val; 251 u8 max_val;
253}; 252};
254 253
254/* Returns true, if the values are the same. */
255static inline bool b43_compare_rfatt(const struct b43_rfatt *a,
256 const struct b43_rfatt *b)
257{
258 return ((a->att == b->att) &&
259 (a->with_padmix == b->with_padmix));
260}
261
255/* Baseband Attenuation */ 262/* Baseband Attenuation */
256struct b43_bbatt { 263struct b43_bbatt {
257 u8 att; /* Attenuation value */ 264 u8 att; /* Attenuation value */
@@ -265,6 +272,13 @@ struct b43_bbatt_list {
265 u8 max_val; 272 u8 max_val;
266}; 273};
267 274
275/* Returns true, if the values are the same. */
276static inline bool b43_compare_bbatt(const struct b43_bbatt *a,
277 const struct b43_bbatt *b)
278{
279 return (a->att == b->att);
280}
281
268/* tx_control bits. */ 282/* tx_control bits. */
269#define B43_TXCTL_PA3DB 0x40 /* PA Gain 3dB */ 283#define B43_TXCTL_PA3DB 0x40 /* PA Gain 3dB */
270#define B43_TXCTL_PA2DB 0x20 /* PA Gain 2dB */ 284#define B43_TXCTL_PA2DB 0x20 /* PA Gain 2dB */
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index fcacafb04346..8b1555d95f1c 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -446,29 +446,27 @@ static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack,
446} 446}
447 447
448static int pio_tx_frame(struct b43_pio_txqueue *q, 448static int pio_tx_frame(struct b43_pio_txqueue *q,
449 struct sk_buff *skb, 449 struct sk_buff *skb)
450 struct ieee80211_tx_control *ctl)
451{ 450{
452 struct b43_pio_txpacket *pack; 451 struct b43_pio_txpacket *pack;
453 struct b43_txhdr txhdr; 452 struct b43_txhdr txhdr;
454 u16 cookie; 453 u16 cookie;
455 int err; 454 int err;
456 unsigned int hdrlen; 455 unsigned int hdrlen;
456 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
457 457
458 B43_WARN_ON(list_empty(&q->packets_list)); 458 B43_WARN_ON(list_empty(&q->packets_list));
459 pack = list_entry(q->packets_list.next, 459 pack = list_entry(q->packets_list.next,
460 struct b43_pio_txpacket, list); 460 struct b43_pio_txpacket, list);
461 memset(&pack->txstat, 0, sizeof(pack->txstat));
462 memcpy(&pack->txstat.control, ctl, sizeof(*ctl));
463 461
464 cookie = generate_cookie(q, pack); 462 cookie = generate_cookie(q, pack);
465 hdrlen = b43_txhdr_size(q->dev); 463 hdrlen = b43_txhdr_size(q->dev);
466 err = b43_generate_txhdr(q->dev, (u8 *)&txhdr, skb->data, 464 err = b43_generate_txhdr(q->dev, (u8 *)&txhdr, skb->data,
467 skb->len, ctl, cookie); 465 skb->len, info, cookie);
468 if (err) 466 if (err)
469 return err; 467 return err;
470 468
471 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 469 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
472 /* Tell the firmware about the cookie of the last 470 /* Tell the firmware about the cookie of the last
473 * mcast frame, so it can clear the more-data bit in it. */ 471 * mcast frame, so it can clear the more-data bit in it. */
474 b43_shm_write16(q->dev, B43_SHM_SHARED, 472 b43_shm_write16(q->dev, B43_SHM_SHARED,
@@ -492,17 +490,18 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
492 return 0; 490 return 0;
493} 491}
494 492
495int b43_pio_tx(struct b43_wldev *dev, 493int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
496 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
497{ 494{
498 struct b43_pio_txqueue *q; 495 struct b43_pio_txqueue *q;
499 struct ieee80211_hdr *hdr; 496 struct ieee80211_hdr *hdr;
500 unsigned long flags; 497 unsigned long flags;
501 unsigned int hdrlen, total_len; 498 unsigned int hdrlen, total_len;
502 int err = 0; 499 int err = 0;
500 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
503 501
504 hdr = (struct ieee80211_hdr *)skb->data; 502 hdr = (struct ieee80211_hdr *)skb->data;
505 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 503
504 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
506 /* The multicast queue will be sent after the DTIM. */ 505 /* The multicast queue will be sent after the DTIM. */
507 q = dev->pio.tx_queue_mcast; 506 q = dev->pio.tx_queue_mcast;
508 /* Set the frame More-Data bit. Ucode will clear it 507 /* Set the frame More-Data bit. Ucode will clear it
@@ -510,7 +509,7 @@ int b43_pio_tx(struct b43_wldev *dev,
510 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 509 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
511 } else { 510 } else {
512 /* Decide by priority where to put this frame. */ 511 /* Decide by priority where to put this frame. */
513 q = select_queue_by_priority(dev, ctl->queue); 512 q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
514 } 513 }
515 514
516 spin_lock_irqsave(&q->lock, flags); 515 spin_lock_irqsave(&q->lock, flags);
@@ -533,7 +532,7 @@ int b43_pio_tx(struct b43_wldev *dev,
533 if (total_len > (q->buffer_size - q->buffer_used)) { 532 if (total_len > (q->buffer_size - q->buffer_used)) {
534 /* Not enough memory on the queue. */ 533 /* Not enough memory on the queue. */
535 err = -EBUSY; 534 err = -EBUSY;
536 ieee80211_stop_queue(dev->wl->hw, ctl->queue); 535 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
537 q->stopped = 1; 536 q->stopped = 1;
538 goto out_unlock; 537 goto out_unlock;
539 } 538 }
@@ -541,9 +540,9 @@ int b43_pio_tx(struct b43_wldev *dev,
541 /* Assign the queue number to the ring (if not already done before) 540 /* Assign the queue number to the ring (if not already done before)
542 * so TX status handling can use it. The mac80211-queue to b43-queue 541 * so TX status handling can use it. The mac80211-queue to b43-queue
543 * mapping is static, so we don't need to store it per frame. */ 542 * mapping is static, so we don't need to store it per frame. */
544 q->queue_prio = ctl->queue; 543 q->queue_prio = skb_get_queue_mapping(skb);
545 544
546 err = pio_tx_frame(q, skb, ctl); 545 err = pio_tx_frame(q, skb);
547 if (unlikely(err == -ENOKEY)) { 546 if (unlikely(err == -ENOKEY)) {
548 /* Drop this packet, as we don't have the encryption key 547 /* Drop this packet, as we don't have the encryption key
549 * anymore and must not transmit it unencrypted. */ 548 * anymore and must not transmit it unencrypted. */
@@ -561,7 +560,7 @@ int b43_pio_tx(struct b43_wldev *dev,
561 if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) || 560 if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
562 (q->free_packet_slots == 0)) { 561 (q->free_packet_slots == 0)) {
563 /* The queue is full. */ 562 /* The queue is full. */
564 ieee80211_stop_queue(dev->wl->hw, ctl->queue); 563 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
565 q->stopped = 1; 564 q->stopped = 1;
566 } 565 }
567 566
@@ -578,6 +577,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
578 struct b43_pio_txqueue *q; 577 struct b43_pio_txqueue *q;
579 struct b43_pio_txpacket *pack = NULL; 578 struct b43_pio_txpacket *pack = NULL;
580 unsigned int total_len; 579 unsigned int total_len;
580 struct ieee80211_tx_info *info;
581 581
582 q = parse_cookie(dev, status->cookie, &pack); 582 q = parse_cookie(dev, status->cookie, &pack);
583 if (unlikely(!q)) 583 if (unlikely(!q))
@@ -586,15 +586,17 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
586 586
587 spin_lock(&q->lock); /* IRQs are already disabled. */ 587 spin_lock(&q->lock); /* IRQs are already disabled. */
588 588
589 b43_fill_txstatus_report(&(pack->txstat), status); 589 info = (void *)pack->skb;
590 memset(&info->status, 0, sizeof(info->status));
591
592 b43_fill_txstatus_report(info, status);
590 593
591 total_len = pack->skb->len + b43_txhdr_size(dev); 594 total_len = pack->skb->len + b43_txhdr_size(dev);
592 total_len = roundup(total_len, 4); 595 total_len = roundup(total_len, 4);
593 q->buffer_used -= total_len; 596 q->buffer_used -= total_len;
594 q->free_packet_slots += 1; 597 q->free_packet_slots += 1;
595 598
596 ieee80211_tx_status_irqsafe(dev->wl->hw, pack->skb, 599 ieee80211_tx_status_irqsafe(dev->wl->hw, pack->skb);
597 &(pack->txstat));
598 pack->skb = NULL; 600 pack->skb = NULL;
599 list_add(&pack->list, &q->packets_list); 601 list_add(&pack->list, &q->packets_list);
600 602
@@ -611,18 +613,16 @@ void b43_pio_get_tx_stats(struct b43_wldev *dev,
611{ 613{
612 const int nr_queues = dev->wl->hw->queues; 614 const int nr_queues = dev->wl->hw->queues;
613 struct b43_pio_txqueue *q; 615 struct b43_pio_txqueue *q;
614 struct ieee80211_tx_queue_stats_data *data;
615 unsigned long flags; 616 unsigned long flags;
616 int i; 617 int i;
617 618
618 for (i = 0; i < nr_queues; i++) { 619 for (i = 0; i < nr_queues; i++) {
619 data = &(stats->data[i]);
620 q = select_queue_by_priority(dev, i); 620 q = select_queue_by_priority(dev, i);
621 621
622 spin_lock_irqsave(&q->lock, flags); 622 spin_lock_irqsave(&q->lock, flags);
623 data->len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots; 623 stats[i].len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots;
624 data->limit = B43_PIO_MAX_NR_TXPACKETS; 624 stats[i].limit = B43_PIO_MAX_NR_TXPACKETS;
625 data->count = q->nr_tx_packets; 625 stats[i].count = q->nr_tx_packets;
626 spin_unlock_irqrestore(&q->lock, flags); 626 spin_unlock_irqrestore(&q->lock, flags);
627 } 627 }
628} 628}
diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
index e2ec676cc9e4..6c174c91ca20 100644
--- a/drivers/net/wireless/b43/pio.h
+++ b/drivers/net/wireless/b43/pio.h
@@ -62,8 +62,6 @@ struct b43_pio_txpacket {
62 struct b43_pio_txqueue *queue; 62 struct b43_pio_txqueue *queue;
63 /* The TX data packet. */ 63 /* The TX data packet. */
64 struct sk_buff *skb; 64 struct sk_buff *skb;
65 /* The status meta data. */
66 struct ieee80211_tx_status txstat;
67 /* Index in the (struct b43_pio_txqueue)->packets array. */ 65 /* Index in the (struct b43_pio_txqueue)->packets array. */
68 u8 index; 66 u8 index;
69 67
@@ -167,8 +165,7 @@ int b43_pio_init(struct b43_wldev *dev);
167void b43_pio_stop(struct b43_wldev *dev); 165void b43_pio_stop(struct b43_wldev *dev);
168void b43_pio_free(struct b43_wldev *dev); 166void b43_pio_free(struct b43_wldev *dev);
169 167
170int b43_pio_tx(struct b43_wldev *dev, 168int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb);
171 struct sk_buff *skb, struct ieee80211_tx_control *ctl);
172void b43_pio_handle_txstatus(struct b43_wldev *dev, 169void b43_pio_handle_txstatus(struct b43_wldev *dev,
173 const struct b43_txstatus *status); 170 const struct b43_txstatus *status);
174void b43_pio_get_tx_stats(struct b43_wldev *dev, 171void b43_pio_get_tx_stats(struct b43_wldev *dev,
@@ -193,8 +190,7 @@ static inline void b43_pio_stop(struct b43_wldev *dev)
193{ 190{
194} 191}
195static inline int b43_pio_tx(struct b43_wldev *dev, 192static inline int b43_pio_tx(struct b43_wldev *dev,
196 struct sk_buff *skb, 193 struct sk_buff *skb)
197 struct ieee80211_tx_control *ctl)
198{ 194{
199 return 0; 195 return 0;
200} 196}
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 19aefbfb2c93..f9e1cff2aecb 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -185,14 +185,14 @@ int b43_generate_txhdr(struct b43_wldev *dev,
185 u8 *_txhdr, 185 u8 *_txhdr,
186 const unsigned char *fragment_data, 186 const unsigned char *fragment_data,
187 unsigned int fragment_len, 187 unsigned int fragment_len,
188 const struct ieee80211_tx_control *txctl, 188 const struct ieee80211_tx_info *info,
189 u16 cookie) 189 u16 cookie)
190{ 190{
191 struct b43_txhdr *txhdr = (struct b43_txhdr *)_txhdr; 191 struct b43_txhdr *txhdr = (struct b43_txhdr *)_txhdr;
192 const struct b43_phy *phy = &dev->phy; 192 const struct b43_phy *phy = &dev->phy;
193 const struct ieee80211_hdr *wlhdr = 193 const struct ieee80211_hdr *wlhdr =
194 (const struct ieee80211_hdr *)fragment_data; 194 (const struct ieee80211_hdr *)fragment_data;
195 int use_encryption = (!(txctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)); 195 int use_encryption = (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT));
196 u16 fctl = le16_to_cpu(wlhdr->frame_control); 196 u16 fctl = le16_to_cpu(wlhdr->frame_control);
197 struct ieee80211_rate *fbrate; 197 struct ieee80211_rate *fbrate;
198 u8 rate, rate_fb; 198 u8 rate, rate_fb;
@@ -201,13 +201,14 @@ int b43_generate_txhdr(struct b43_wldev *dev,
201 u32 mac_ctl = 0; 201 u32 mac_ctl = 0;
202 u16 phy_ctl = 0; 202 u16 phy_ctl = 0;
203 u8 extra_ft = 0; 203 u8 extra_ft = 0;
204 struct ieee80211_rate *txrate;
204 205
205 memset(txhdr, 0, sizeof(*txhdr)); 206 memset(txhdr, 0, sizeof(*txhdr));
206 207
207 WARN_ON(!txctl->tx_rate); 208 txrate = ieee80211_get_tx_rate(dev->wl->hw, info);
208 rate = txctl->tx_rate ? txctl->tx_rate->hw_value : B43_CCK_RATE_1MB; 209 rate = txrate ? txrate->hw_value : B43_CCK_RATE_1MB;
209 rate_ofdm = b43_is_ofdm_rate(rate); 210 rate_ofdm = b43_is_ofdm_rate(rate);
210 fbrate = txctl->alt_retry_rate ? : txctl->tx_rate; 211 fbrate = ieee80211_get_alt_retry_rate(dev->wl->hw, info) ? : txrate;
211 rate_fb = fbrate->hw_value; 212 rate_fb = fbrate->hw_value;
212 rate_fb_ofdm = b43_is_ofdm_rate(rate_fb); 213 rate_fb_ofdm = b43_is_ofdm_rate(rate_fb);
213 214
@@ -227,15 +228,13 @@ int b43_generate_txhdr(struct b43_wldev *dev,
227 * use the original dur_id field. */ 228 * use the original dur_id field. */
228 txhdr->dur_fb = wlhdr->duration_id; 229 txhdr->dur_fb = wlhdr->duration_id;
229 } else { 230 } else {
230 txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw, 231 txhdr->dur_fb = ieee80211_generic_frame_duration(
231 txctl->vif, 232 dev->wl->hw, info->control.vif, fragment_len, fbrate);
232 fragment_len,
233 fbrate);
234 } 233 }
235 234
236 plcp_fragment_len = fragment_len + FCS_LEN; 235 plcp_fragment_len = fragment_len + FCS_LEN;
237 if (use_encryption) { 236 if (use_encryption) {
238 u8 key_idx = (u16) (txctl->key_idx); 237 u8 key_idx = info->control.hw_key->hw_key_idx;
239 struct b43_key *key; 238 struct b43_key *key;
240 int wlhdr_len; 239 int wlhdr_len;
241 size_t iv_len; 240 size_t iv_len;
@@ -253,7 +252,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
253 } 252 }
254 253
255 /* Hardware appends ICV. */ 254 /* Hardware appends ICV. */
256 plcp_fragment_len += txctl->icv_len; 255 plcp_fragment_len += info->control.icv_len;
257 256
258 key_idx = b43_kidx_to_fw(dev, key_idx); 257 key_idx = b43_kidx_to_fw(dev, key_idx);
259 mac_ctl |= (key_idx << B43_TXH_MAC_KEYIDX_SHIFT) & 258 mac_ctl |= (key_idx << B43_TXH_MAC_KEYIDX_SHIFT) &
@@ -261,7 +260,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
261 mac_ctl |= (key->algorithm << B43_TXH_MAC_KEYALG_SHIFT) & 260 mac_ctl |= (key->algorithm << B43_TXH_MAC_KEYALG_SHIFT) &
262 B43_TXH_MAC_KEYALG; 261 B43_TXH_MAC_KEYALG;
263 wlhdr_len = ieee80211_get_hdrlen(fctl); 262 wlhdr_len = ieee80211_get_hdrlen(fctl);
264 iv_len = min((size_t) txctl->iv_len, 263 iv_len = min((size_t) info->control.iv_len,
265 ARRAY_SIZE(txhdr->iv)); 264 ARRAY_SIZE(txhdr->iv));
266 memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len); 265 memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len);
267 } 266 }
@@ -292,10 +291,10 @@ int b43_generate_txhdr(struct b43_wldev *dev,
292 phy_ctl |= B43_TXH_PHY_ENC_OFDM; 291 phy_ctl |= B43_TXH_PHY_ENC_OFDM;
293 else 292 else
294 phy_ctl |= B43_TXH_PHY_ENC_CCK; 293 phy_ctl |= B43_TXH_PHY_ENC_CCK;
295 if (txctl->flags & IEEE80211_TXCTL_SHORT_PREAMBLE) 294 if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE)
296 phy_ctl |= B43_TXH_PHY_SHORTPRMBL; 295 phy_ctl |= B43_TXH_PHY_SHORTPRMBL;
297 296
298 switch (b43_ieee80211_antenna_sanitize(dev, txctl->antenna_sel_tx)) { 297 switch (b43_ieee80211_antenna_sanitize(dev, info->antenna_sel_tx)) {
299 case 0: /* Default */ 298 case 0: /* Default */
300 phy_ctl |= B43_TXH_PHY_ANT01AUTO; 299 phy_ctl |= B43_TXH_PHY_ANT01AUTO;
301 break; 300 break;
@@ -316,34 +315,36 @@ int b43_generate_txhdr(struct b43_wldev *dev,
316 } 315 }
317 316
318 /* MAC control */ 317 /* MAC control */
319 if (!(txctl->flags & IEEE80211_TXCTL_NO_ACK)) 318 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
320 mac_ctl |= B43_TXH_MAC_ACK; 319 mac_ctl |= B43_TXH_MAC_ACK;
321 if (!(((fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) && 320 if (!(((fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) &&
322 ((fctl & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL))) 321 ((fctl & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)))
323 mac_ctl |= B43_TXH_MAC_HWSEQ; 322 mac_ctl |= B43_TXH_MAC_HWSEQ;
324 if (txctl->flags & IEEE80211_TXCTL_FIRST_FRAGMENT) 323 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
325 mac_ctl |= B43_TXH_MAC_STMSDU; 324 mac_ctl |= B43_TXH_MAC_STMSDU;
326 if (phy->type == B43_PHYTYPE_A) 325 if (phy->type == B43_PHYTYPE_A)
327 mac_ctl |= B43_TXH_MAC_5GHZ; 326 mac_ctl |= B43_TXH_MAC_5GHZ;
328 if (txctl->flags & IEEE80211_TXCTL_LONG_RETRY_LIMIT) 327 if (info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT)
329 mac_ctl |= B43_TXH_MAC_LONGFRAME; 328 mac_ctl |= B43_TXH_MAC_LONGFRAME;
330 329
331 /* Generate the RTS or CTS-to-self frame */ 330 /* Generate the RTS or CTS-to-self frame */
332 if ((txctl->flags & IEEE80211_TXCTL_USE_RTS_CTS) || 331 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) ||
333 (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { 332 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)) {
334 unsigned int len; 333 unsigned int len;
335 struct ieee80211_hdr *hdr; 334 struct ieee80211_hdr *hdr;
336 int rts_rate, rts_rate_fb; 335 int rts_rate, rts_rate_fb;
337 int rts_rate_ofdm, rts_rate_fb_ofdm; 336 int rts_rate_ofdm, rts_rate_fb_ofdm;
338 struct b43_plcp_hdr6 *plcp; 337 struct b43_plcp_hdr6 *plcp;
338 struct ieee80211_rate *rts_cts_rate;
339 339
340 WARN_ON(!txctl->rts_cts_rate); 340 rts_cts_rate = ieee80211_get_rts_cts_rate(dev->wl->hw, info);
341 rts_rate = txctl->rts_cts_rate ? txctl->rts_cts_rate->hw_value : B43_CCK_RATE_1MB; 341
342 rts_rate = rts_cts_rate ? rts_cts_rate->hw_value : B43_CCK_RATE_1MB;
342 rts_rate_ofdm = b43_is_ofdm_rate(rts_rate); 343 rts_rate_ofdm = b43_is_ofdm_rate(rts_rate);
343 rts_rate_fb = b43_calc_fallback_rate(rts_rate); 344 rts_rate_fb = b43_calc_fallback_rate(rts_rate);
344 rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb); 345 rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb);
345 346
346 if (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { 347 if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
347 struct ieee80211_cts *cts; 348 struct ieee80211_cts *cts;
348 349
349 if (b43_is_old_txhdr_format(dev)) { 350 if (b43_is_old_txhdr_format(dev)) {
@@ -353,9 +354,9 @@ int b43_generate_txhdr(struct b43_wldev *dev,
353 cts = (struct ieee80211_cts *) 354 cts = (struct ieee80211_cts *)
354 (txhdr->new_format.rts_frame); 355 (txhdr->new_format.rts_frame);
355 } 356 }
356 ieee80211_ctstoself_get(dev->wl->hw, txctl->vif, 357 ieee80211_ctstoself_get(dev->wl->hw, info->control.vif,
357 fragment_data, fragment_len, 358 fragment_data, fragment_len,
358 txctl, cts); 359 info, cts);
359 mac_ctl |= B43_TXH_MAC_SENDCTS; 360 mac_ctl |= B43_TXH_MAC_SENDCTS;
360 len = sizeof(struct ieee80211_cts); 361 len = sizeof(struct ieee80211_cts);
361 } else { 362 } else {
@@ -368,9 +369,9 @@ int b43_generate_txhdr(struct b43_wldev *dev,
368 rts = (struct ieee80211_rts *) 369 rts = (struct ieee80211_rts *)
369 (txhdr->new_format.rts_frame); 370 (txhdr->new_format.rts_frame);
370 } 371 }
371 ieee80211_rts_get(dev->wl->hw, txctl->vif, 372 ieee80211_rts_get(dev->wl->hw, info->control.vif,
372 fragment_data, fragment_len, 373 fragment_data, fragment_len,
373 txctl, rts); 374 info, rts);
374 mac_ctl |= B43_TXH_MAC_SENDRTS; 375 mac_ctl |= B43_TXH_MAC_SENDRTS;
375 len = sizeof(struct ieee80211_rts); 376 len = sizeof(struct ieee80211_rts);
376 } 377 }
@@ -581,12 +582,11 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
581 // and also find out what the maximum possible value is. 582 // and also find out what the maximum possible value is.
582 // Fill status.ssi and status.signal fields. 583 // Fill status.ssi and status.signal fields.
583 } else { 584 } else {
584 status.ssi = b43_rssi_postprocess(dev, rxhdr->jssi, 585 status.signal = b43_rssi_postprocess(dev, rxhdr->jssi,
585 (phystat0 & B43_RX_PHYST0_OFDM), 586 (phystat0 & B43_RX_PHYST0_OFDM),
586 (phystat0 & B43_RX_PHYST0_GAINCTL), 587 (phystat0 & B43_RX_PHYST0_GAINCTL),
587 (phystat3 & B43_RX_PHYST3_TRSTATE)); 588 (phystat3 & B43_RX_PHYST3_TRSTATE));
588 /* the next line looks wrong, but is what mac80211 wants */ 589 status.qual = (rxhdr->jssi * 100) / B43_RX_MAX_SSI;
589 status.signal = (rxhdr->jssi * 100) / B43_RX_MAX_SSI;
590 } 590 }
591 591
592 if (phystat0 & B43_RX_PHYST0_OFDM) 592 if (phystat0 & B43_RX_PHYST0_OFDM)
@@ -685,27 +685,27 @@ void b43_handle_txstatus(struct b43_wldev *dev,
685/* Fill out the mac80211 TXstatus report based on the b43-specific 685/* Fill out the mac80211 TXstatus report based on the b43-specific
686 * txstatus report data. This returns a boolean whether the frame was 686 * txstatus report data. This returns a boolean whether the frame was
687 * successfully transmitted. */ 687 * successfully transmitted. */
688bool b43_fill_txstatus_report(struct ieee80211_tx_status *report, 688bool b43_fill_txstatus_report(struct ieee80211_tx_info *report,
689 const struct b43_txstatus *status) 689 const struct b43_txstatus *status)
690{ 690{
691 bool frame_success = 1; 691 bool frame_success = 1;
692 692
693 if (status->acked) { 693 if (status->acked) {
694 /* The frame was ACKed. */ 694 /* The frame was ACKed. */
695 report->flags |= IEEE80211_TX_STATUS_ACK; 695 report->flags |= IEEE80211_TX_STAT_ACK;
696 } else { 696 } else {
697 /* The frame was not ACKed... */ 697 /* The frame was not ACKed... */
698 if (!(report->control.flags & IEEE80211_TXCTL_NO_ACK)) { 698 if (!(report->flags & IEEE80211_TX_CTL_NO_ACK)) {
699 /* ...but we expected an ACK. */ 699 /* ...but we expected an ACK. */
700 frame_success = 0; 700 frame_success = 0;
701 report->excessive_retries = 1; 701 report->status.excessive_retries = 1;
702 } 702 }
703 } 703 }
704 if (status->frame_count == 0) { 704 if (status->frame_count == 0) {
705 /* The frame was not transmitted at all. */ 705 /* The frame was not transmitted at all. */
706 report->retry_count = 0; 706 report->status.retry_count = 0;
707 } else 707 } else
708 report->retry_count = status->frame_count - 1; 708 report->status.retry_count = status->frame_count - 1;
709 709
710 return frame_success; 710 return frame_success;
711} 711}
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index b05f44e0d626..0215faf47541 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -178,7 +178,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
178 u8 * txhdr, 178 u8 * txhdr,
179 const unsigned char *fragment_data, 179 const unsigned char *fragment_data,
180 unsigned int fragment_len, 180 unsigned int fragment_len,
181 const struct ieee80211_tx_control *txctl, u16 cookie); 181 const struct ieee80211_tx_info *txctl, u16 cookie);
182 182
183/* Transmit Status */ 183/* Transmit Status */
184struct b43_txstatus { 184struct b43_txstatus {
@@ -294,7 +294,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr);
294 294
295void b43_handle_txstatus(struct b43_wldev *dev, 295void b43_handle_txstatus(struct b43_wldev *dev,
296 const struct b43_txstatus *status); 296 const struct b43_txstatus *status);
297bool b43_fill_txstatus_report(struct ieee80211_tx_status *report, 297bool b43_fill_txstatus_report(struct ieee80211_tx_info *report,
298 const struct b43_txstatus *status); 298 const struct b43_txstatus *status);
299 299
300void b43_tx_suspend(struct b43_wldev *dev); 300void b43_tx_suspend(struct b43_wldev *dev);
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index ded3cd31b3df..c40078e1fff9 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -823,23 +823,6 @@ void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...)
823# define b43legacydbg(wl, fmt...) do { /* nothing */ } while (0) 823# define b43legacydbg(wl, fmt...) do { /* nothing */ } while (0)
824#endif /* DEBUG */ 824#endif /* DEBUG */
825 825
826
827/** Limit a value between two limits */
828#ifdef limit_value
829# undef limit_value
830#endif
831#define limit_value(value, min, max) \
832 ({ \
833 typeof(value) __value = (value); \
834 typeof(value) __min = (min); \
835 typeof(value) __max = (max); \
836 if (__value < __min) \
837 __value = __min; \
838 else if (__value > __max) \
839 __value = __max; \
840 __value; \
841 })
842
843/* Macros for printing a value in Q5.2 format */ 826/* Macros for printing a value in Q5.2 format */
844#define Q52_FMT "%u.%u" 827#define Q52_FMT "%u.%u"
845#define Q52_ARG(q52) ((q52) / 4), (((q52) & 3) * 100 / 4) 828#define Q52_ARG(q52) ((q52) / 4), (((q52) & 3) * 100 / 4)
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index c990f87b107a..33cc256c5baf 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -1205,10 +1205,10 @@ struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
1205} 1205}
1206 1206
1207static int dma_tx_fragment(struct b43legacy_dmaring *ring, 1207static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1208 struct sk_buff *skb, 1208 struct sk_buff *skb)
1209 struct ieee80211_tx_control *ctl)
1210{ 1209{
1211 const struct b43legacy_dma_ops *ops = ring->ops; 1210 const struct b43legacy_dma_ops *ops = ring->ops;
1211 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1212 u8 *header; 1212 u8 *header;
1213 int slot, old_top_slot, old_used_slots; 1213 int slot, old_top_slot, old_used_slots;
1214 int err; 1214 int err;
@@ -1231,7 +1231,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1231 header = &(ring->txhdr_cache[slot * sizeof( 1231 header = &(ring->txhdr_cache[slot * sizeof(
1232 struct b43legacy_txhdr_fw3)]); 1232 struct b43legacy_txhdr_fw3)]);
1233 err = b43legacy_generate_txhdr(ring->dev, header, 1233 err = b43legacy_generate_txhdr(ring->dev, header,
1234 skb->data, skb->len, ctl, 1234 skb->data, skb->len, info,
1235 generate_cookie(ring, slot)); 1235 generate_cookie(ring, slot));
1236 if (unlikely(err)) { 1236 if (unlikely(err)) {
1237 ring->current_slot = old_top_slot; 1237 ring->current_slot = old_top_slot;
@@ -1255,7 +1255,6 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1255 desc = ops->idx2desc(ring, slot, &meta); 1255 desc = ops->idx2desc(ring, slot, &meta);
1256 memset(meta, 0, sizeof(*meta)); 1256 memset(meta, 0, sizeof(*meta));
1257 1257
1258 memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
1259 meta->skb = skb; 1258 meta->skb = skb;
1260 meta->is_last_fragment = 1; 1259 meta->is_last_fragment = 1;
1261 1260
@@ -1323,14 +1322,13 @@ int should_inject_overflow(struct b43legacy_dmaring *ring)
1323} 1322}
1324 1323
1325int b43legacy_dma_tx(struct b43legacy_wldev *dev, 1324int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1326 struct sk_buff *skb, 1325 struct sk_buff *skb)
1327 struct ieee80211_tx_control *ctl)
1328{ 1326{
1329 struct b43legacy_dmaring *ring; 1327 struct b43legacy_dmaring *ring;
1330 int err = 0; 1328 int err = 0;
1331 unsigned long flags; 1329 unsigned long flags;
1332 1330
1333 ring = priority_to_txring(dev, ctl->queue); 1331 ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1334 spin_lock_irqsave(&ring->lock, flags); 1332 spin_lock_irqsave(&ring->lock, flags);
1335 B43legacy_WARN_ON(!ring->tx); 1333 B43legacy_WARN_ON(!ring->tx);
1336 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) { 1334 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
@@ -1343,7 +1341,7 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1343 * That would be a mac80211 bug. */ 1341 * That would be a mac80211 bug. */
1344 B43legacy_BUG_ON(ring->stopped); 1342 B43legacy_BUG_ON(ring->stopped);
1345 1343
1346 err = dma_tx_fragment(ring, skb, ctl); 1344 err = dma_tx_fragment(ring, skb);
1347 if (unlikely(err == -ENOKEY)) { 1345 if (unlikely(err == -ENOKEY)) {
1348 /* Drop this packet, as we don't have the encryption key 1346 /* Drop this packet, as we don't have the encryption key
1349 * anymore and must not transmit it unencrypted. */ 1347 * anymore and must not transmit it unencrypted. */
@@ -1401,26 +1399,29 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1401 1); 1399 1);
1402 1400
1403 if (meta->is_last_fragment) { 1401 if (meta->is_last_fragment) {
1404 B43legacy_WARN_ON(!meta->skb); 1402 struct ieee80211_tx_info *info;
1403 BUG_ON(!meta->skb);
1404 info = IEEE80211_SKB_CB(meta->skb);
1405 /* Call back to inform the ieee80211 subsystem about the 1405 /* Call back to inform the ieee80211 subsystem about the
1406 * status of the transmission. 1406 * status of the transmission.
1407 * Some fields of txstat are already filled in dma_tx(). 1407 * Some fields of txstat are already filled in dma_tx().
1408 */ 1408 */
1409
1410 memset(&info->status, 0, sizeof(info->status));
1411
1409 if (status->acked) { 1412 if (status->acked) {
1410 meta->txstat.flags |= IEEE80211_TX_STATUS_ACK; 1413 info->flags |= IEEE80211_TX_STAT_ACK;
1411 } else { 1414 } else {
1412 if (!(meta->txstat.control.flags 1415 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
1413 & IEEE80211_TXCTL_NO_ACK)) 1416 info->status.excessive_retries = 1;
1414 meta->txstat.excessive_retries = 1;
1415 } 1417 }
1416 if (status->frame_count == 0) { 1418 if (status->frame_count == 0) {
1417 /* The frame was not transmitted at all. */ 1419 /* The frame was not transmitted at all. */
1418 meta->txstat.retry_count = 0; 1420 info->status.retry_count = 0;
1419 } else 1421 } else
1420 meta->txstat.retry_count = status->frame_count 1422 info->status.retry_count = status->frame_count
1421 - 1; 1423 - 1;
1422 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, 1424 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1423 &(meta->txstat));
1424 /* skb is freed by ieee80211_tx_status_irqsafe() */ 1425 /* skb is freed by ieee80211_tx_status_irqsafe() */
1425 meta->skb = NULL; 1426 meta->skb = NULL;
1426 } else { 1427 } else {
@@ -1455,18 +1456,16 @@ void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
1455{ 1456{
1456 const int nr_queues = dev->wl->hw->queues; 1457 const int nr_queues = dev->wl->hw->queues;
1457 struct b43legacy_dmaring *ring; 1458 struct b43legacy_dmaring *ring;
1458 struct ieee80211_tx_queue_stats_data *data;
1459 unsigned long flags; 1459 unsigned long flags;
1460 int i; 1460 int i;
1461 1461
1462 for (i = 0; i < nr_queues; i++) { 1462 for (i = 0; i < nr_queues; i++) {
1463 data = &(stats->data[i]);
1464 ring = priority_to_txring(dev, i); 1463 ring = priority_to_txring(dev, i);
1465 1464
1466 spin_lock_irqsave(&ring->lock, flags); 1465 spin_lock_irqsave(&ring->lock, flags);
1467 data->len = ring->used_slots / SLOTS_PER_PACKET; 1466 stats[i].len = ring->used_slots / SLOTS_PER_PACKET;
1468 data->limit = ring->nr_slots / SLOTS_PER_PACKET; 1467 stats[i].limit = ring->nr_slots / SLOTS_PER_PACKET;
1469 data->count = ring->nr_tx_packets; 1468 stats[i].count = ring->nr_tx_packets;
1470 spin_unlock_irqrestore(&ring->lock, flags); 1469 spin_unlock_irqrestore(&ring->lock, flags);
1471 } 1470 }
1472} 1471}
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index 2dd488c5be2d..2f186003c31e 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -195,7 +195,6 @@ struct b43legacy_dmadesc_meta {
195 dma_addr_t dmaaddr; 195 dma_addr_t dmaaddr;
196 /* ieee80211 TX status. Only used once per 802.11 frag. */ 196 /* ieee80211 TX status. Only used once per 802.11 frag. */
197 bool is_last_fragment; 197 bool is_last_fragment;
198 struct ieee80211_tx_status txstat;
199}; 198};
200 199
201struct b43legacy_dmaring; 200struct b43legacy_dmaring;
@@ -297,8 +296,7 @@ void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
297 struct ieee80211_tx_queue_stats *stats); 296 struct ieee80211_tx_queue_stats *stats);
298 297
299int b43legacy_dma_tx(struct b43legacy_wldev *dev, 298int b43legacy_dma_tx(struct b43legacy_wldev *dev,
300 struct sk_buff *skb, 299 struct sk_buff *skb);
301 struct ieee80211_tx_control *ctl);
302void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, 300void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
303 const struct b43legacy_txstatus *status); 301 const struct b43legacy_txstatus *status);
304 302
@@ -323,8 +321,7 @@ void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
323} 321}
324static inline 322static inline
325int b43legacy_dma_tx(struct b43legacy_wldev *dev, 323int b43legacy_dma_tx(struct b43legacy_wldev *dev,
326 struct sk_buff *skb, 324 struct sk_buff *skb)
327 struct ieee80211_tx_control *ctl)
328{ 325{
329 return 0; 326 return 0;
330} 327}
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 204077c13870..5f533b93ad5d 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -846,10 +846,10 @@ static void handle_irq_noise(struct b43legacy_wldev *dev)
846 /* Get the noise samples. */ 846 /* Get the noise samples. */
847 B43legacy_WARN_ON(dev->noisecalc.nr_samples >= 8); 847 B43legacy_WARN_ON(dev->noisecalc.nr_samples >= 8);
848 i = dev->noisecalc.nr_samples; 848 i = dev->noisecalc.nr_samples;
849 noise[0] = limit_value(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 849 noise[0] = clamp_val(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
850 noise[1] = limit_value(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 850 noise[1] = clamp_val(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
851 noise[2] = limit_value(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 851 noise[2] = clamp_val(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
852 noise[3] = limit_value(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 852 noise[3] = clamp_val(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
853 dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]]; 853 dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]];
854 dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]]; 854 dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]];
855 dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]]; 855 dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]];
@@ -2358,8 +2358,7 @@ static int b43legacy_rng_init(struct b43legacy_wl *wl)
2358} 2358}
2359 2359
2360static int b43legacy_op_tx(struct ieee80211_hw *hw, 2360static int b43legacy_op_tx(struct ieee80211_hw *hw,
2361 struct sk_buff *skb, 2361 struct sk_buff *skb)
2362 struct ieee80211_tx_control *ctl)
2363{ 2362{
2364 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 2363 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
2365 struct b43legacy_wldev *dev = wl->current_dev; 2364 struct b43legacy_wldev *dev = wl->current_dev;
@@ -2373,18 +2372,17 @@ static int b43legacy_op_tx(struct ieee80211_hw *hw,
2373 /* DMA-TX is done without a global lock. */ 2372 /* DMA-TX is done without a global lock. */
2374 if (b43legacy_using_pio(dev)) { 2373 if (b43legacy_using_pio(dev)) {
2375 spin_lock_irqsave(&wl->irq_lock, flags); 2374 spin_lock_irqsave(&wl->irq_lock, flags);
2376 err = b43legacy_pio_tx(dev, skb, ctl); 2375 err = b43legacy_pio_tx(dev, skb);
2377 spin_unlock_irqrestore(&wl->irq_lock, flags); 2376 spin_unlock_irqrestore(&wl->irq_lock, flags);
2378 } else 2377 } else
2379 err = b43legacy_dma_tx(dev, skb, ctl); 2378 err = b43legacy_dma_tx(dev, skb);
2380out: 2379out:
2381 if (unlikely(err)) 2380 if (unlikely(err))
2382 return NETDEV_TX_BUSY; 2381 return NETDEV_TX_BUSY;
2383 return NETDEV_TX_OK; 2382 return NETDEV_TX_OK;
2384} 2383}
2385 2384
2386static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, 2385static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
2387 int queue,
2388 const struct ieee80211_tx_queue_params *params) 2386 const struct ieee80211_tx_queue_params *params)
2389{ 2387{
2390 return 0; 2388 return 0;
@@ -2795,7 +2793,6 @@ static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev)
2795 /* Start data flow (TX/RX) */ 2793 /* Start data flow (TX/RX) */
2796 b43legacy_mac_enable(dev); 2794 b43legacy_mac_enable(dev);
2797 b43legacy_interrupt_enable(dev, dev->irq_savedstate); 2795 b43legacy_interrupt_enable(dev, dev->irq_savedstate);
2798 ieee80211_start_queues(dev->wl->hw);
2799 2796
2800 /* Start maintenance work */ 2797 /* Start maintenance work */
2801 b43legacy_periodic_tasks_setup(dev); 2798 b43legacy_periodic_tasks_setup(dev);
@@ -3404,7 +3401,7 @@ static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw,
3404 * field, but that would probably require resizing and moving of data 3401 * field, but that would probably require resizing and moving of data
3405 * within the beacon template. Simply request a new beacon and let 3402 * within the beacon template. Simply request a new beacon and let
3406 * mac80211 do the hard work. */ 3403 * mac80211 do the hard work. */
3407 beacon = ieee80211_beacon_get(hw, wl->vif, NULL); 3404 beacon = ieee80211_beacon_get(hw, wl->vif);
3408 if (unlikely(!beacon)) 3405 if (unlikely(!beacon))
3409 return -ENOMEM; 3406 return -ENOMEM;
3410 spin_lock_irqsave(&wl->irq_lock, flags); 3407 spin_lock_irqsave(&wl->irq_lock, flags);
@@ -3415,8 +3412,7 @@ static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw,
3415} 3412}
3416 3413
3417static int b43legacy_op_ibss_beacon_update(struct ieee80211_hw *hw, 3414static int b43legacy_op_ibss_beacon_update(struct ieee80211_hw *hw,
3418 struct sk_buff *beacon, 3415 struct sk_buff *beacon)
3419 struct ieee80211_tx_control *ctl)
3420{ 3416{
3421 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 3417 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3422 unsigned long flags; 3418 unsigned long flags;
@@ -3716,10 +3712,9 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
3716 3712
3717 /* fill hw info */ 3713 /* fill hw info */
3718 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 3714 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
3719 IEEE80211_HW_RX_INCLUDES_FCS; 3715 IEEE80211_HW_RX_INCLUDES_FCS |
3720 hw->max_signal = 100; 3716 IEEE80211_HW_SIGNAL_DBM |
3721 hw->max_rssi = -110; 3717 IEEE80211_HW_NOISE_DBM;
3722 hw->max_noise = -110;
3723 hw->queues = 1; /* FIXME: hardware has more queues */ 3718 hw->queues = 1; /* FIXME: hardware has more queues */
3724 SET_IEEE80211_DEV(hw, dev->dev); 3719 SET_IEEE80211_DEV(hw, dev->dev);
3725 if (is_valid_ether_addr(sprom->et1mac)) 3720 if (is_valid_ether_addr(sprom->et1mac))
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 8e5c09b81871..768cccb9b1ba 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -1088,7 +1088,7 @@ static void b43legacy_phy_initg(struct b43legacy_wldev *dev)
1088 * the value 0x7FFFFFFF here. I think that is some weird 1088 * the value 0x7FFFFFFF here. I think that is some weird
1089 * compiler optimization in the original driver. 1089 * compiler optimization in the original driver.
1090 * Essentially, what we do here is resetting all NRSSI LT 1090 * Essentially, what we do here is resetting all NRSSI LT
1091 * entries to -32 (see the limit_value() in nrssi_hw_update()) 1091 * entries to -32 (see the clamp_val() in nrssi_hw_update())
1092 */ 1092 */
1093 b43legacy_nrssi_hw_update(dev, 0xFFFF); 1093 b43legacy_nrssi_hw_update(dev, 0xFFFF);
1094 b43legacy_calc_nrssi_threshold(dev); 1094 b43legacy_calc_nrssi_threshold(dev);
@@ -1756,7 +1756,7 @@ static s8 b43legacy_phy_estimate_power_out(struct b43legacy_wldev *dev, s8 tssi)
1756 switch (phy->type) { 1756 switch (phy->type) {
1757 case B43legacy_PHYTYPE_B: 1757 case B43legacy_PHYTYPE_B:
1758 case B43legacy_PHYTYPE_G: 1758 case B43legacy_PHYTYPE_G:
1759 tmp = limit_value(tmp, 0x00, 0x3F); 1759 tmp = clamp_val(tmp, 0x00, 0x3F);
1760 dbm = phy->tssi2dbm[tmp]; 1760 dbm = phy->tssi2dbm[tmp];
1761 break; 1761 break;
1762 default: 1762 default:
@@ -1859,7 +1859,7 @@ void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev)
1859 1859
1860 /* find the desired power in Q5.2 - power_level is in dBm 1860 /* find the desired power in Q5.2 - power_level is in dBm
1861 * and limit it - max_pwr is already in Q5.2 */ 1861 * and limit it - max_pwr is already in Q5.2 */
1862 desired_pwr = limit_value(phy->power_level << 2, 0, max_pwr); 1862 desired_pwr = clamp_val(phy->power_level << 2, 0, max_pwr);
1863 if (b43legacy_debug(dev, B43legacy_DBG_XMITPOWER)) 1863 if (b43legacy_debug(dev, B43legacy_DBG_XMITPOWER))
1864 b43legacydbg(dev->wl, "Current TX power output: " Q52_FMT 1864 b43legacydbg(dev->wl, "Current TX power output: " Q52_FMT
1865 " dBm, Desired TX power output: " Q52_FMT 1865 " dBm, Desired TX power output: " Q52_FMT
@@ -1905,7 +1905,7 @@ void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev)
1905 radio_attenuation++; 1905 radio_attenuation++;
1906 } 1906 }
1907 } 1907 }
1908 baseband_attenuation = limit_value(baseband_attenuation, 0, 11); 1908 baseband_attenuation = clamp_val(baseband_attenuation, 0, 11);
1909 1909
1910 txpower = phy->txctl1; 1910 txpower = phy->txctl1;
1911 if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) { 1911 if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) {
@@ -1933,8 +1933,8 @@ void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev)
1933 } 1933 }
1934 /* Save the control values */ 1934 /* Save the control values */
1935 phy->txctl1 = txpower; 1935 phy->txctl1 = txpower;
1936 baseband_attenuation = limit_value(baseband_attenuation, 0, 11); 1936 baseband_attenuation = clamp_val(baseband_attenuation, 0, 11);
1937 radio_attenuation = limit_value(radio_attenuation, 0, 9); 1937 radio_attenuation = clamp_val(radio_attenuation, 0, 9);
1938 phy->rfatt = radio_attenuation; 1938 phy->rfatt = radio_attenuation;
1939 phy->bbatt = baseband_attenuation; 1939 phy->bbatt = baseband_attenuation;
1940 1940
@@ -1979,7 +1979,7 @@ s8 b43legacy_tssi2dbm_entry(s8 entry [], u8 index, s16 pab0, s16 pab1, s16 pab2)
1979 f = q; 1979 f = q;
1980 i++; 1980 i++;
1981 } while (delta >= 2); 1981 } while (delta >= 2);
1982 entry[index] = limit_value(b43legacy_tssi2dbm_ad(m1 * f, 8192), 1982 entry[index] = clamp_val(b43legacy_tssi2dbm_ad(m1 * f, 8192),
1983 -127, 128); 1983 -127, 128);
1984 return 0; 1984 return 0;
1985} 1985}
diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c
index bcdd54eb2edb..a86c7647fa2d 100644
--- a/drivers/net/wireless/b43legacy/pio.c
+++ b/drivers/net/wireless/b43legacy/pio.c
@@ -196,7 +196,7 @@ static int pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
196 B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); 196 B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
197 err = b43legacy_generate_txhdr(queue->dev, 197 err = b43legacy_generate_txhdr(queue->dev,
198 txhdr, skb->data, skb->len, 198 txhdr, skb->data, skb->len,
199 &packet->txstat.control, 199 IEEE80211_SKB_CB(skb),
200 generate_cookie(queue, packet)); 200 generate_cookie(queue, packet));
201 if (err) 201 if (err)
202 return err; 202 return err;
@@ -463,8 +463,7 @@ err_destroy0:
463} 463}
464 464
465int b43legacy_pio_tx(struct b43legacy_wldev *dev, 465int b43legacy_pio_tx(struct b43legacy_wldev *dev,
466 struct sk_buff *skb, 466 struct sk_buff *skb)
467 struct ieee80211_tx_control *ctl)
468{ 467{
469 struct b43legacy_pioqueue *queue = dev->pio.queue1; 468 struct b43legacy_pioqueue *queue = dev->pio.queue1;
470 struct b43legacy_pio_txpacket *packet; 469 struct b43legacy_pio_txpacket *packet;
@@ -476,9 +475,6 @@ int b43legacy_pio_tx(struct b43legacy_wldev *dev,
476 list); 475 list);
477 packet->skb = skb; 476 packet->skb = skb;
478 477
479 memset(&packet->txstat, 0, sizeof(packet->txstat));
480 memcpy(&packet->txstat.control, ctl, sizeof(*ctl));
481
482 list_move_tail(&packet->list, &queue->txqueue); 478 list_move_tail(&packet->list, &queue->txqueue);
483 queue->nr_txfree--; 479 queue->nr_txfree--;
484 queue->nr_tx_packets++; 480 queue->nr_tx_packets++;
@@ -494,6 +490,7 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
494{ 490{
495 struct b43legacy_pioqueue *queue; 491 struct b43legacy_pioqueue *queue;
496 struct b43legacy_pio_txpacket *packet; 492 struct b43legacy_pio_txpacket *packet;
493 struct ieee80211_tx_info *info;
497 494
498 queue = parse_cookie(dev, status->cookie, &packet); 495 queue = parse_cookie(dev, status->cookie, &packet);
499 B43legacy_WARN_ON(!queue); 496 B43legacy_WARN_ON(!queue);
@@ -505,11 +502,13 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
505 queue->tx_devq_used -= (packet->skb->len + 502 queue->tx_devq_used -= (packet->skb->len +
506 sizeof(struct b43legacy_txhdr_fw3)); 503 sizeof(struct b43legacy_txhdr_fw3));
507 504
505 info = IEEE80211_SKB_CB(packet->skb);
506 memset(&info->status, 0, sizeof(info->status));
507
508 if (status->acked) 508 if (status->acked)
509 packet->txstat.flags |= IEEE80211_TX_STATUS_ACK; 509 info->flags |= IEEE80211_TX_STAT_ACK;
510 packet->txstat.retry_count = status->frame_count - 1; 510 info->status.retry_count = status->frame_count - 1;
511 ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb, 511 ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb);
512 &(packet->txstat));
513 packet->skb = NULL; 512 packet->skb = NULL;
514 513
515 free_txpacket(packet, 1); 514 free_txpacket(packet, 1);
@@ -525,13 +524,11 @@ void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
525{ 524{
526 struct b43legacy_pio *pio = &dev->pio; 525 struct b43legacy_pio *pio = &dev->pio;
527 struct b43legacy_pioqueue *queue; 526 struct b43legacy_pioqueue *queue;
528 struct ieee80211_tx_queue_stats_data *data;
529 527
530 queue = pio->queue1; 528 queue = pio->queue1;
531 data = &(stats->data[0]); 529 stats[0].len = B43legacy_PIO_MAXTXPACKETS - queue->nr_txfree;
532 data->len = B43legacy_PIO_MAXTXPACKETS - queue->nr_txfree; 530 stats[0].limit = B43legacy_PIO_MAXTXPACKETS;
533 data->limit = B43legacy_PIO_MAXTXPACKETS; 531 stats[0].count = queue->nr_tx_packets;
534 data->count = queue->nr_tx_packets;
535} 532}
536 533
537static void pio_rx_error(struct b43legacy_pioqueue *queue, 534static void pio_rx_error(struct b43legacy_pioqueue *queue,
diff --git a/drivers/net/wireless/b43legacy/pio.h b/drivers/net/wireless/b43legacy/pio.h
index 5bfed0c40030..464fec05a06d 100644
--- a/drivers/net/wireless/b43legacy/pio.h
+++ b/drivers/net/wireless/b43legacy/pio.h
@@ -41,7 +41,6 @@ struct b43legacy_xmitstatus;
41struct b43legacy_pio_txpacket { 41struct b43legacy_pio_txpacket {
42 struct b43legacy_pioqueue *queue; 42 struct b43legacy_pioqueue *queue;
43 struct sk_buff *skb; 43 struct sk_buff *skb;
44 struct ieee80211_tx_status txstat;
45 struct list_head list; 44 struct list_head list;
46}; 45};
47 46
@@ -104,8 +103,7 @@ int b43legacy_pio_init(struct b43legacy_wldev *dev);
104void b43legacy_pio_free(struct b43legacy_wldev *dev); 103void b43legacy_pio_free(struct b43legacy_wldev *dev);
105 104
106int b43legacy_pio_tx(struct b43legacy_wldev *dev, 105int b43legacy_pio_tx(struct b43legacy_wldev *dev,
107 struct sk_buff *skb, 106 struct sk_buff *skb);
108 struct ieee80211_tx_control *ctl);
109void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev, 107void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
110 const struct b43legacy_txstatus *status); 108 const struct b43legacy_txstatus *status);
111void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev, 109void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
@@ -132,8 +130,7 @@ void b43legacy_pio_free(struct b43legacy_wldev *dev)
132} 130}
133static inline 131static inline
134int b43legacy_pio_tx(struct b43legacy_wldev *dev, 132int b43legacy_pio_tx(struct b43legacy_wldev *dev,
135 struct sk_buff *skb, 133 struct sk_buff *skb)
136 struct ieee80211_tx_control *ctl)
137{ 134{
138 return 0; 135 return 0;
139} 136}
diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c
index 955832e8654f..2df545cfad14 100644
--- a/drivers/net/wireless/b43legacy/radio.c
+++ b/drivers/net/wireless/b43legacy/radio.c
@@ -357,7 +357,7 @@ void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val)
357 for (i = 0; i < 64; i++) { 357 for (i = 0; i < 64; i++) {
358 tmp = b43legacy_nrssi_hw_read(dev, i); 358 tmp = b43legacy_nrssi_hw_read(dev, i);
359 tmp -= val; 359 tmp -= val;
360 tmp = limit_value(tmp, -32, 31); 360 tmp = clamp_val(tmp, -32, 31);
361 b43legacy_nrssi_hw_write(dev, i, tmp); 361 b43legacy_nrssi_hw_write(dev, i, tmp);
362 } 362 }
363} 363}
@@ -375,7 +375,7 @@ void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev)
375 tmp = (i - delta) * phy->nrssislope; 375 tmp = (i - delta) * phy->nrssislope;
376 tmp /= 0x10000; 376 tmp /= 0x10000;
377 tmp += 0x3A; 377 tmp += 0x3A;
378 tmp = limit_value(tmp, 0, 0x3F); 378 tmp = clamp_val(tmp, 0, 0x3F);
379 phy->nrssi_lt[i] = tmp; 379 phy->nrssi_lt[i] = tmp;
380 } 380 }
381} 381}
@@ -839,7 +839,7 @@ void b43legacy_calc_nrssi_threshold(struct b43legacy_wldev *dev)
839 } else 839 } else
840 threshold = phy->nrssi[1] - 5; 840 threshold = phy->nrssi[1] - 5;
841 841
842 threshold = limit_value(threshold, 0, 0x3E); 842 threshold = clamp_val(threshold, 0, 0x3E);
843 b43legacy_phy_read(dev, 0x0020); /* dummy read */ 843 b43legacy_phy_read(dev, 0x0020); /* dummy read */
844 b43legacy_phy_write(dev, 0x0020, (((u16)threshold) << 8) 844 b43legacy_phy_write(dev, 0x0020, (((u16)threshold) << 8)
845 | 0x001C); 845 | 0x001C);
@@ -892,7 +892,7 @@ void b43legacy_calc_nrssi_threshold(struct b43legacy_wldev *dev)
892 else 892 else
893 a += 32; 893 a += 32;
894 a = a >> 6; 894 a = a >> 6;
895 a = limit_value(a, -31, 31); 895 a = clamp_val(a, -31, 31);
896 896
897 b = b * (phy->nrssi[1] - phy->nrssi[0]); 897 b = b * (phy->nrssi[1] - phy->nrssi[0]);
898 b += (phy->nrssi[0] << 6); 898 b += (phy->nrssi[0] << 6);
@@ -901,7 +901,7 @@ void b43legacy_calc_nrssi_threshold(struct b43legacy_wldev *dev)
901 else 901 else
902 b += 32; 902 b += 32;
903 b = b >> 6; 903 b = b >> 6;
904 b = limit_value(b, -31, 31); 904 b = clamp_val(b, -31, 31);
905 905
906 tmp_u16 = b43legacy_phy_read(dev, 0x048A) & 0xF000; 906 tmp_u16 = b43legacy_phy_read(dev, 0x048A) & 0xF000;
907 tmp_u16 |= ((u32)b & 0x0000003F); 907 tmp_u16 |= ((u32)b & 0x0000003F);
@@ -1905,7 +1905,7 @@ void b43legacy_radio_set_txpower_a(struct b43legacy_wldev *dev, u16 txpower)
1905 u16 dac; 1905 u16 dac;
1906 u16 ilt; 1906 u16 ilt;
1907 1907
1908 txpower = limit_value(txpower, 0, 63); 1908 txpower = clamp_val(txpower, 0, 63);
1909 1909
1910 pamp = b43legacy_get_txgain_freq_power_amp(txpower); 1910 pamp = b43legacy_get_txgain_freq_power_amp(txpower);
1911 pamp <<= 5; 1911 pamp <<= 5;
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index dcad2491a606..82dc04d59446 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -188,11 +188,11 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
188 struct b43legacy_txhdr_fw3 *txhdr, 188 struct b43legacy_txhdr_fw3 *txhdr,
189 const unsigned char *fragment_data, 189 const unsigned char *fragment_data,
190 unsigned int fragment_len, 190 unsigned int fragment_len,
191 const struct ieee80211_tx_control *txctl, 191 const struct ieee80211_tx_info *info,
192 u16 cookie) 192 u16 cookie)
193{ 193{
194 const struct ieee80211_hdr *wlhdr; 194 const struct ieee80211_hdr *wlhdr;
195 int use_encryption = (!(txctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)); 195 int use_encryption = (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT));
196 u16 fctl; 196 u16 fctl;
197 u8 rate; 197 u8 rate;
198 struct ieee80211_rate *rate_fb; 198 struct ieee80211_rate *rate_fb;
@@ -201,15 +201,18 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
201 unsigned int plcp_fragment_len; 201 unsigned int plcp_fragment_len;
202 u32 mac_ctl = 0; 202 u32 mac_ctl = 0;
203 u16 phy_ctl = 0; 203 u16 phy_ctl = 0;
204 struct ieee80211_rate *tx_rate;
204 205
205 wlhdr = (const struct ieee80211_hdr *)fragment_data; 206 wlhdr = (const struct ieee80211_hdr *)fragment_data;
206 fctl = le16_to_cpu(wlhdr->frame_control); 207 fctl = le16_to_cpu(wlhdr->frame_control);
207 208
208 memset(txhdr, 0, sizeof(*txhdr)); 209 memset(txhdr, 0, sizeof(*txhdr));
209 210
210 rate = txctl->tx_rate->hw_value; 211 tx_rate = ieee80211_get_tx_rate(dev->wl->hw, info);
212
213 rate = tx_rate->hw_value;
211 rate_ofdm = b43legacy_is_ofdm_rate(rate); 214 rate_ofdm = b43legacy_is_ofdm_rate(rate);
212 rate_fb = txctl->alt_retry_rate ? : txctl->tx_rate; 215 rate_fb = ieee80211_get_alt_retry_rate(dev->wl->hw, info) ? : tx_rate;
213 rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value); 216 rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value);
214 217
215 txhdr->mac_frame_ctl = wlhdr->frame_control; 218 txhdr->mac_frame_ctl = wlhdr->frame_control;
@@ -225,14 +228,14 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
225 txhdr->dur_fb = wlhdr->duration_id; 228 txhdr->dur_fb = wlhdr->duration_id;
226 } else { 229 } else {
227 txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw, 230 txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw,
228 txctl->vif, 231 info->control.vif,
229 fragment_len, 232 fragment_len,
230 rate_fb); 233 rate_fb);
231 } 234 }
232 235
233 plcp_fragment_len = fragment_len + FCS_LEN; 236 plcp_fragment_len = fragment_len + FCS_LEN;
234 if (use_encryption) { 237 if (use_encryption) {
235 u8 key_idx = (u16)(txctl->key_idx); 238 u8 key_idx = info->control.hw_key->hw_key_idx;
236 struct b43legacy_key *key; 239 struct b43legacy_key *key;
237 int wlhdr_len; 240 int wlhdr_len;
238 size_t iv_len; 241 size_t iv_len;
@@ -242,7 +245,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
242 245
243 if (key->enabled) { 246 if (key->enabled) {
244 /* Hardware appends ICV. */ 247 /* Hardware appends ICV. */
245 plcp_fragment_len += txctl->icv_len; 248 plcp_fragment_len += info->control.icv_len;
246 249
247 key_idx = b43legacy_kidx_to_fw(dev, key_idx); 250 key_idx = b43legacy_kidx_to_fw(dev, key_idx);
248 mac_ctl |= (key_idx << B43legacy_TX4_MAC_KEYIDX_SHIFT) & 251 mac_ctl |= (key_idx << B43legacy_TX4_MAC_KEYIDX_SHIFT) &
@@ -251,7 +254,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
251 B43legacy_TX4_MAC_KEYALG_SHIFT) & 254 B43legacy_TX4_MAC_KEYALG_SHIFT) &
252 B43legacy_TX4_MAC_KEYALG; 255 B43legacy_TX4_MAC_KEYALG;
253 wlhdr_len = ieee80211_get_hdrlen(fctl); 256 wlhdr_len = ieee80211_get_hdrlen(fctl);
254 iv_len = min((size_t)txctl->iv_len, 257 iv_len = min((size_t)info->control.iv_len,
255 ARRAY_SIZE(txhdr->iv)); 258 ARRAY_SIZE(txhdr->iv));
256 memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len); 259 memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len);
257 } else { 260 } else {
@@ -275,7 +278,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
275 phy_ctl |= B43legacy_TX4_PHY_OFDM; 278 phy_ctl |= B43legacy_TX4_PHY_OFDM;
276 if (dev->short_preamble) 279 if (dev->short_preamble)
277 phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL; 280 phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL;
278 switch (txctl->antenna_sel_tx) { 281 switch (info->antenna_sel_tx) {
279 case 0: 282 case 0:
280 phy_ctl |= B43legacy_TX4_PHY_ANTLAST; 283 phy_ctl |= B43legacy_TX4_PHY_ANTLAST;
281 break; 284 break;
@@ -290,21 +293,21 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
290 } 293 }
291 294
292 /* MAC control */ 295 /* MAC control */
293 if (!(txctl->flags & IEEE80211_TXCTL_NO_ACK)) 296 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
294 mac_ctl |= B43legacy_TX4_MAC_ACK; 297 mac_ctl |= B43legacy_TX4_MAC_ACK;
295 if (!(((fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) && 298 if (!(((fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) &&
296 ((fctl & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL))) 299 ((fctl & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)))
297 mac_ctl |= B43legacy_TX4_MAC_HWSEQ; 300 mac_ctl |= B43legacy_TX4_MAC_HWSEQ;
298 if (txctl->flags & IEEE80211_TXCTL_FIRST_FRAGMENT) 301 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
299 mac_ctl |= B43legacy_TX4_MAC_STMSDU; 302 mac_ctl |= B43legacy_TX4_MAC_STMSDU;
300 if (rate_fb_ofdm) 303 if (rate_fb_ofdm)
301 mac_ctl |= B43legacy_TX4_MAC_FALLBACKOFDM; 304 mac_ctl |= B43legacy_TX4_MAC_FALLBACKOFDM;
302 if (txctl->flags & IEEE80211_TXCTL_LONG_RETRY_LIMIT) 305 if (info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT)
303 mac_ctl |= B43legacy_TX4_MAC_LONGFRAME; 306 mac_ctl |= B43legacy_TX4_MAC_LONGFRAME;
304 307
305 /* Generate the RTS or CTS-to-self frame */ 308 /* Generate the RTS or CTS-to-self frame */
306 if ((txctl->flags & IEEE80211_TXCTL_USE_RTS_CTS) || 309 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) ||
307 (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { 310 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)) {
308 unsigned int len; 311 unsigned int len;
309 struct ieee80211_hdr *hdr; 312 struct ieee80211_hdr *hdr;
310 int rts_rate; 313 int rts_rate;
@@ -312,26 +315,26 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
312 int rts_rate_ofdm; 315 int rts_rate_ofdm;
313 int rts_rate_fb_ofdm; 316 int rts_rate_fb_ofdm;
314 317
315 rts_rate = txctl->rts_cts_rate->hw_value; 318 rts_rate = ieee80211_get_rts_cts_rate(dev->wl->hw, info)->hw_value;
316 rts_rate_ofdm = b43legacy_is_ofdm_rate(rts_rate); 319 rts_rate_ofdm = b43legacy_is_ofdm_rate(rts_rate);
317 rts_rate_fb = b43legacy_calc_fallback_rate(rts_rate); 320 rts_rate_fb = b43legacy_calc_fallback_rate(rts_rate);
318 rts_rate_fb_ofdm = b43legacy_is_ofdm_rate(rts_rate_fb); 321 rts_rate_fb_ofdm = b43legacy_is_ofdm_rate(rts_rate_fb);
319 if (rts_rate_fb_ofdm) 322 if (rts_rate_fb_ofdm)
320 mac_ctl |= B43legacy_TX4_MAC_CTSFALLBACKOFDM; 323 mac_ctl |= B43legacy_TX4_MAC_CTSFALLBACKOFDM;
321 324
322 if (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { 325 if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
323 ieee80211_ctstoself_get(dev->wl->hw, 326 ieee80211_ctstoself_get(dev->wl->hw,
324 txctl->vif, 327 info->control.vif,
325 fragment_data, 328 fragment_data,
326 fragment_len, txctl, 329 fragment_len, info,
327 (struct ieee80211_cts *) 330 (struct ieee80211_cts *)
328 (txhdr->rts_frame)); 331 (txhdr->rts_frame));
329 mac_ctl |= B43legacy_TX4_MAC_SENDCTS; 332 mac_ctl |= B43legacy_TX4_MAC_SENDCTS;
330 len = sizeof(struct ieee80211_cts); 333 len = sizeof(struct ieee80211_cts);
331 } else { 334 } else {
332 ieee80211_rts_get(dev->wl->hw, 335 ieee80211_rts_get(dev->wl->hw,
333 txctl->vif, 336 info->control.vif,
334 fragment_data, fragment_len, txctl, 337 fragment_data, fragment_len, info,
335 (struct ieee80211_rts *) 338 (struct ieee80211_rts *)
336 (txhdr->rts_frame)); 339 (txhdr->rts_frame));
337 mac_ctl |= B43legacy_TX4_MAC_SENDRTS; 340 mac_ctl |= B43legacy_TX4_MAC_SENDRTS;
@@ -362,12 +365,12 @@ int b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
362 u8 *txhdr, 365 u8 *txhdr,
363 const unsigned char *fragment_data, 366 const unsigned char *fragment_data,
364 unsigned int fragment_len, 367 unsigned int fragment_len,
365 const struct ieee80211_tx_control *txctl, 368 const struct ieee80211_tx_info *info,
366 u16 cookie) 369 u16 cookie)
367{ 370{
368 return generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr, 371 return generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr,
369 fragment_data, fragment_len, 372 fragment_data, fragment_len,
370 txctl, cookie); 373 info, cookie);
371} 374}
372 375
373static s8 b43legacy_rssi_postprocess(struct b43legacy_wldev *dev, 376static s8 b43legacy_rssi_postprocess(struct b43legacy_wldev *dev,
@@ -532,12 +535,12 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
532 } 535 }
533 } 536 }
534 537
535 status.ssi = b43legacy_rssi_postprocess(dev, jssi, 538 status.signal = b43legacy_rssi_postprocess(dev, jssi,
536 (phystat0 & B43legacy_RX_PHYST0_OFDM), 539 (phystat0 & B43legacy_RX_PHYST0_OFDM),
537 (phystat0 & B43legacy_RX_PHYST0_GAINCTL), 540 (phystat0 & B43legacy_RX_PHYST0_GAINCTL),
538 (phystat3 & B43legacy_RX_PHYST3_TRSTATE)); 541 (phystat3 & B43legacy_RX_PHYST3_TRSTATE));
539 status.noise = dev->stats.link_noise; 542 status.noise = dev->stats.link_noise;
540 status.signal = (jssi * 100) / B43legacy_RX_MAX_SSI; 543 status.qual = (jssi * 100) / B43legacy_RX_MAX_SSI;
541 /* change to support A PHY */ 544 /* change to support A PHY */
542 if (phystat0 & B43legacy_RX_PHYST0_OFDM) 545 if (phystat0 & B43legacy_RX_PHYST0_OFDM)
543 status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false); 546 status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false);
diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h
index bab47928a0c9..e56777e0feab 100644
--- a/drivers/net/wireless/b43legacy/xmit.h
+++ b/drivers/net/wireless/b43legacy/xmit.h
@@ -80,7 +80,7 @@ int b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
80 u8 *txhdr, 80 u8 *txhdr,
81 const unsigned char *fragment_data, 81 const unsigned char *fragment_data,
82 unsigned int fragment_len, 82 unsigned int fragment_len,
83 const struct ieee80211_tx_control *txctl, 83 const struct ieee80211_tx_info *info,
84 u16 cookie); 84 u16 cookie);
85 85
86 86
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 62fb89d82318..5f3e849043f7 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -14,6 +14,15 @@ config IWLWIFI_LEDS
14 bool 14 bool
15 default n 15 default n
16 16
17config IWLWIFI_RUN_TIME_CALIB
18 bool
19 depends on IWLCORE
20 default n
21 ---help---
22 This option will enable run time calibration for the iwlwifi driver.
23 These calibrations are Sensitivity and Chain Noise.
24
25
17config IWLWIFI_RFKILL 26config IWLWIFI_RFKILL
18 boolean "IWLWIFI RF kill support" 27 boolean "IWLWIFI RF kill support"
19 depends on IWLCORE 28 depends on IWLCORE
@@ -67,12 +76,14 @@ config IWL4965_SPECTRUM_MEASUREMENT
67 ---help--- 76 ---help---
68 This option will enable spectrum measurement for the iwl4965 driver. 77 This option will enable spectrum measurement for the iwl4965 driver.
69 78
70config IWL4965_SENSITIVITY 79config IWL4965_RUN_TIME_CALIB
71 bool "Enable Sensitivity Calibration in iwl4965 driver" 80 bool "Enable run time Calibration for 4965 NIC"
81 select IWLWIFI_RUN_TIME_CALIB
72 depends on IWL4965 82 depends on IWL4965
83 default y
73 ---help--- 84 ---help---
74 This option will enable sensitivity calibration for the iwl4965 85 This option will enable run time calibration for the iwl4965 driver.
75 driver. 86 These calibrations are Sensitivity and Chain Noise. If unsure, say yes
76 87
77config IWLWIFI_DEBUG 88config IWLWIFI_DEBUG
78 bool "Enable full debugging output in iwl4965 driver" 89 bool "Enable full debugging output in iwl4965 driver"
@@ -85,13 +96,13 @@ config IWLWIFI_DEBUG
85 control which debug output is sent to the kernel log by setting the 96 control which debug output is sent to the kernel log by setting the
86 value in 97 value in
87 98
88 /sys/bus/pci/drivers/${DRIVER}/debug_level 99 /sys/class/net/wlan0/device/debug_level
89 100
90 This entry will only exist if this option is enabled. 101 This entry will only exist if this option is enabled.
91 102
92 To set a value, simply echo an 8-byte hex value to the same file: 103 To set a value, simply echo an 8-byte hex value to the same file:
93 104
94 % echo 0x43fff > /sys/bus/pci/drivers/${DRIVER}/debug_level 105 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
95 106
96 You can find the list of debug mask values in: 107 You can find the list of debug mask values in:
97 drivers/net/wireless/iwlwifi/iwl-4965-debug.h 108 drivers/net/wireless/iwlwifi/iwl-4965-debug.h
@@ -100,6 +111,23 @@ config IWLWIFI_DEBUG
100 as the debug information can assist others in helping you resolve 111 as the debug information can assist others in helping you resolve
101 any problems you may encounter. 112 any problems you may encounter.
102 113
114config IWL5000
115 bool "Intel Wireless WiFi 5000AGN"
116 depends on IWL4965
117 ---help---
118 This option enables support for Intel Wireless WiFi Link 5000AGN Family
119 Dependency on 4965 is temporary
120
121config IWL5000_RUN_TIME_CALIB
122 bool "Enable run time Calibration for 5000 NIC"
123 select IWLWIFI_RUN_TIME_CALIB
124 depends on IWL5000
125 default y
126 ---help---
127 This option will enable run time calibration for the iwl5000 driver.
128 These calibrations are Sensitivity and Chain Noise. If unsure, say yes
129
130
103config IWLWIFI_DEBUGFS 131config IWLWIFI_DEBUGFS
104 bool "Iwlwifi debugfs support" 132 bool "Iwlwifi debugfs support"
105 depends on IWLCORE && IWLWIFI_DEBUG && MAC80211_DEBUGFS 133 depends on IWLCORE && IWLWIFI_DEBUG && MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index ec6187b75c3b..5c73eede7193 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,13 +1,20 @@
1obj-$(CONFIG_IWLCORE) += iwlcore.o 1obj-$(CONFIG_IWLCORE) += iwlcore.o
2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o 2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o
3iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 4iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
4iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o 5iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o
5iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o 6iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o
7iwlcore-$(CONFIG_IWLWIFI_RUN_TIME_CALIB) += iwl-calib.o
6 8
7obj-$(CONFIG_IWL3945) += iwl3945.o 9obj-$(CONFIG_IWL3945) += iwl3945.o
8iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o 10iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o
9iwl3945-$(CONFIG_IWL3945_LEDS) += iwl-3945-led.o 11iwl3945-$(CONFIG_IWL3945_LEDS) += iwl-3945-led.o
10 12
11obj-$(CONFIG_IWL4965) += iwl4965.o 13obj-$(CONFIG_IWL4965) += iwl4965.o
12iwl4965-objs := iwl4965-base.o iwl-4965.o iwl-4965-rs.o iwl-sta.o 14iwl4965-objs := iwl4965-base.o iwl-4965.o iwl-4965-rs.o
15
16ifeq ($(CONFIG_IWL5000),y)
17 iwl4965-objs += iwl-5000.o
18endif
19
13 20
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index ad612a8719f4..644bd9e08052 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -126,7 +126,7 @@ enum {
126 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */ 126 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
127 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */ 127 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
128 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */ 128 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
129 EEPROM_CHANNEL_NARROW = (1 << 6), /* 10 MHz channel (not used) */ 129 /* Bit 6 Reserved (was Narrow Channel) */
130 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */ 130 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
131}; 131};
132 132
@@ -289,17 +289,6 @@ struct iwl3945_eeprom {
289#define PCI_REG_WUM8 0x0E8 289#define PCI_REG_WUM8 0x0E8
290#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000) 290#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000)
291 291
292/* SCD (3945 Tx Frame Scheduler) */
293#define SCD_BASE (CSR_BASE + 0x2E00)
294
295#define SCD_MODE_REG (SCD_BASE + 0x000)
296#define SCD_ARASTAT_REG (SCD_BASE + 0x004)
297#define SCD_TXFACT_REG (SCD_BASE + 0x010)
298#define SCD_TXF4MF_REG (SCD_BASE + 0x014)
299#define SCD_TXF5MF_REG (SCD_BASE + 0x020)
300#define SCD_SBYP_MODE_1_REG (SCD_BASE + 0x02C)
301#define SCD_SBYP_MODE_2_REG (SCD_BASE + 0x030)
302
303/*=== FH (data Flow Handler) ===*/ 292/*=== FH (data Flow Handler) ===*/
304#define FH_BASE (0x800) 293#define FH_BASE (0x800)
305 294
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 85c22641542d..10c64bdb314c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -29,7 +29,6 @@
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <linux/wireless.h> 30#include <linux/wireless.h>
31#include <net/mac80211.h> 31#include <net/mac80211.h>
32#include <net/ieee80211.h>
33 32
34#include <linux/netdevice.h> 33#include <linux/netdevice.h>
35#include <linux/etherdevice.h> 34#include <linux/etherdevice.h>
@@ -446,8 +445,7 @@ static int rs_adjust_next_rate(struct iwl3945_priv *priv, int rate)
446 */ 445 */
447static void rs_tx_status(void *priv_rate, 446static void rs_tx_status(void *priv_rate,
448 struct net_device *dev, 447 struct net_device *dev,
449 struct sk_buff *skb, 448 struct sk_buff *skb)
450 struct ieee80211_tx_status *tx_resp)
451{ 449{
452 u8 retries, current_count; 450 u8 retries, current_count;
453 int scale_rate_index, first_index, last_index; 451 int scale_rate_index, first_index, last_index;
@@ -458,14 +456,15 @@ static void rs_tx_status(void *priv_rate,
458 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 456 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
459 struct iwl3945_rs_sta *rs_sta; 457 struct iwl3945_rs_sta *rs_sta;
460 struct ieee80211_supported_band *sband; 458 struct ieee80211_supported_band *sband;
459 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
461 460
462 IWL_DEBUG_RATE("enter\n"); 461 IWL_DEBUG_RATE("enter\n");
463 462
464 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 463 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
465 464
466 465
467 retries = tx_resp->retry_count; 466 retries = info->status.retry_count;
468 first_index = tx_resp->control.tx_rate->hw_value; 467 first_index = sband->bitrates[info->tx_rate_idx].hw_value;
469 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) { 468 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) {
470 IWL_DEBUG_RATE("leave: Rate out of bounds: %d\n", first_index); 469 IWL_DEBUG_RATE("leave: Rate out of bounds: %d\n", first_index);
471 return; 470 return;
@@ -526,11 +525,11 @@ static void rs_tx_status(void *priv_rate,
526 /* Update the last index window with success/failure based on ACK */ 525 /* Update the last index window with success/failure based on ACK */
527 IWL_DEBUG_RATE("Update rate %d with %s.\n", 526 IWL_DEBUG_RATE("Update rate %d with %s.\n",
528 last_index, 527 last_index,
529 (tx_resp->flags & IEEE80211_TX_STATUS_ACK) ? 528 (info->flags & IEEE80211_TX_STAT_ACK) ?
530 "success" : "failure"); 529 "success" : "failure");
531 iwl3945_collect_tx_data(rs_sta, 530 iwl3945_collect_tx_data(rs_sta,
532 &rs_sta->win[last_index], 531 &rs_sta->win[last_index],
533 tx_resp->flags & IEEE80211_TX_STATUS_ACK, 1); 532 info->flags & IEEE80211_TX_STAT_ACK, 1);
534 533
535 /* We updated the rate scale window -- if its been more than 534 /* We updated the rate scale window -- if its been more than
536 * flush_time since the last run, schedule the flush 535 * flush_time since the last run, schedule the flush
@@ -670,7 +669,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
670 is_multicast_ether_addr(hdr->addr1) || 669 is_multicast_ether_addr(hdr->addr1) ||
671 !sta || !sta->rate_ctrl_priv) { 670 !sta || !sta->rate_ctrl_priv) {
672 IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); 671 IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
673 sel->rate = rate_lowest(local, sband, sta); 672 sel->rate_idx = rate_lowest_index(local, sband, sta);
674 rcu_read_unlock(); 673 rcu_read_unlock();
675 return; 674 return;
676 } 675 }
@@ -814,7 +813,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
814 813
815 IWL_DEBUG_RATE("leave: %d\n", index); 814 IWL_DEBUG_RATE("leave: %d\n", index);
816 815
817 sel->rate = &sband->bitrates[sta->txrate_idx]; 816 sel->rate_idx = sta->txrate_idx;
818} 817}
819 818
820static struct rate_control_ops rs_ops = { 819static struct rate_control_ops rs_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 62a3d8f8563e..0ba6889dfd41 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -283,8 +283,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl3945_priv *priv,
283 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 283 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
284 284
285 tx_info = &txq->txb[txq->q.read_ptr]; 285 tx_info = &txq->txb[txq->q.read_ptr];
286 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0], 286 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
287 &tx_info->status);
288 tx_info->skb[0] = NULL; 287 tx_info->skb[0] = NULL;
289 iwl3945_hw_txq_free_tfd(priv, txq); 288 iwl3945_hw_txq_free_tfd(priv, txq);
290 } 289 }
@@ -306,7 +305,7 @@ static void iwl3945_rx_reply_tx(struct iwl3945_priv *priv,
306 int txq_id = SEQ_TO_QUEUE(sequence); 305 int txq_id = SEQ_TO_QUEUE(sequence);
307 int index = SEQ_TO_INDEX(sequence); 306 int index = SEQ_TO_INDEX(sequence);
308 struct iwl3945_tx_queue *txq = &priv->txq[txq_id]; 307 struct iwl3945_tx_queue *txq = &priv->txq[txq_id];
309 struct ieee80211_tx_status *tx_status; 308 struct ieee80211_tx_info *info;
310 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 309 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
311 u32 status = le32_to_cpu(tx_resp->status); 310 u32 status = le32_to_cpu(tx_resp->status);
312 int rate_idx; 311 int rate_idx;
@@ -319,19 +318,22 @@ static void iwl3945_rx_reply_tx(struct iwl3945_priv *priv,
319 return; 318 return;
320 } 319 }
321 320
322 tx_status = &(txq->txb[txq->q.read_ptr].status); 321 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
322 memset(&info->status, 0, sizeof(info->status));
323 323
324 tx_status->retry_count = tx_resp->failure_frame; 324 info->status.retry_count = tx_resp->failure_frame;
325 /* tx_status->rts_retry_count = tx_resp->failure_rts; */ 325 /* tx_status->rts_retry_count = tx_resp->failure_rts; */
326 tx_status->flags = ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ? 326 info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
327 IEEE80211_TX_STATUS_ACK : 0; 327 IEEE80211_TX_STAT_ACK : 0;
328 328
329 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", 329 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
330 txq_id, iwl3945_get_tx_fail_reason(status), status, 330 txq_id, iwl3945_get_tx_fail_reason(status), status,
331 tx_resp->rate, tx_resp->failure_frame); 331 tx_resp->rate, tx_resp->failure_frame);
332 332
333 rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate); 333 rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
334 tx_status->control.tx_rate = &priv->ieee_rates[rate_idx]; 334 if (info->band == IEEE80211_BAND_5GHZ)
335 rate_idx -= IWL_FIRST_OFDM_RATE;
336 info->tx_rate_idx = rate_idx;
335 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); 337 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
336 iwl3945_tx_queue_reclaim(priv, txq_id, index); 338 iwl3945_tx_queue_reclaim(priv, txq_id, index);
337 339
@@ -520,7 +522,7 @@ static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
520{ 522{
521 /* First cache any information we need before we overwrite 523 /* First cache any information we need before we overwrite
522 * the information provided in the skb from the hardware */ 524 * the information provided in the skb from the hardware */
523 s8 signal = stats->ssi; 525 s8 signal = stats->signal;
524 s8 noise = 0; 526 s8 noise = 0;
525 int rate = stats->rate_idx; 527 int rate = stats->rate_idx;
526 u64 tsf = stats->mactime; 528 u64 tsf = stats->mactime;
@@ -693,7 +695,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
693 } 695 }
694 696
695 /* Convert 3945's rssi indicator to dBm */ 697 /* Convert 3945's rssi indicator to dBm */
696 rx_status.ssi = rx_stats->rssi - IWL_RSSI_OFFSET; 698 rx_status.signal = rx_stats->rssi - IWL_RSSI_OFFSET;
697 699
698 /* Set default noise value to -127 */ 700 /* Set default noise value to -127 */
699 if (priv->last_rx_noise == 0) 701 if (priv->last_rx_noise == 0)
@@ -712,21 +714,21 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
712 * Calculate rx_status.signal (quality indicator in %) based on SNR. */ 714 * Calculate rx_status.signal (quality indicator in %) based on SNR. */
713 if (rx_stats_noise_diff) { 715 if (rx_stats_noise_diff) {
714 snr = rx_stats_sig_avg / rx_stats_noise_diff; 716 snr = rx_stats_sig_avg / rx_stats_noise_diff;
715 rx_status.noise = rx_status.ssi - 717 rx_status.noise = rx_status.signal -
716 iwl3945_calc_db_from_ratio(snr); 718 iwl3945_calc_db_from_ratio(snr);
717 rx_status.signal = iwl3945_calc_sig_qual(rx_status.ssi, 719 rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal,
718 rx_status.noise); 720 rx_status.noise);
719 721
720 /* If noise info not available, calculate signal quality indicator (%) 722 /* If noise info not available, calculate signal quality indicator (%)
721 * using just the dBm signal level. */ 723 * using just the dBm signal level. */
722 } else { 724 } else {
723 rx_status.noise = priv->last_rx_noise; 725 rx_status.noise = priv->last_rx_noise;
724 rx_status.signal = iwl3945_calc_sig_qual(rx_status.ssi, 0); 726 rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal, 0);
725 } 727 }
726 728
727 729
728 IWL_DEBUG_STATS("Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n", 730 IWL_DEBUG_STATS("Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n",
729 rx_status.ssi, rx_status.noise, rx_status.signal, 731 rx_status.signal, rx_status.noise, rx_status.qual,
730 rx_stats_sig_avg, rx_stats_noise_diff); 732 rx_stats_sig_avg, rx_stats_noise_diff);
731 733
732 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); 734 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
@@ -736,8 +738,8 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
736 IWL_DEBUG_STATS_LIMIT("[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n", 738 IWL_DEBUG_STATS_LIMIT("[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n",
737 network_packet ? '*' : ' ', 739 network_packet ? '*' : ' ',
738 le16_to_cpu(rx_hdr->channel), 740 le16_to_cpu(rx_hdr->channel),
739 rx_status.ssi, rx_status.ssi, 741 rx_status.signal, rx_status.signal,
740 rx_status.ssi, rx_status.rate_idx); 742 rx_status.noise, rx_status.rate_idx);
741 743
742#ifdef CONFIG_IWL3945_DEBUG 744#ifdef CONFIG_IWL3945_DEBUG
743 if (iwl3945_debug_level & (IWL_DL_RX)) 745 if (iwl3945_debug_level & (IWL_DL_RX))
@@ -748,7 +750,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
748 if (network_packet) { 750 if (network_packet) {
749 priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp); 751 priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp);
750 priv->last_tsf = le64_to_cpu(rx_end->timestamp); 752 priv->last_tsf = le64_to_cpu(rx_end->timestamp);
751 priv->last_rx_rssi = rx_status.ssi; 753 priv->last_rx_rssi = rx_status.signal;
752 priv->last_rx_noise = rx_status.noise; 754 priv->last_rx_noise = rx_status.noise;
753 } 755 }
754 756
@@ -958,11 +960,12 @@ u8 iwl3945_hw_find_station(struct iwl3945_priv *priv, const u8 *addr)
958*/ 960*/
959void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv, 961void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
960 struct iwl3945_cmd *cmd, 962 struct iwl3945_cmd *cmd,
961 struct ieee80211_tx_control *ctrl, 963 struct ieee80211_tx_info *info,
962 struct ieee80211_hdr *hdr, int sta_id, int tx_id) 964 struct ieee80211_hdr *hdr, int sta_id, int tx_id)
963{ 965{
964 unsigned long flags; 966 unsigned long flags;
965 u16 rate_index = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1); 967 u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
968 u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT - 1);
966 u16 rate_mask; 969 u16 rate_mask;
967 int rate; 970 int rate;
968 u8 rts_retry_limit; 971 u8 rts_retry_limit;
@@ -974,7 +977,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
974 tx_flags = cmd->cmd.tx.tx_flags; 977 tx_flags = cmd->cmd.tx.tx_flags;
975 978
976 /* We need to figure out how to get the sta->supp_rates while 979 /* We need to figure out how to get the sta->supp_rates while
977 * in this running context; perhaps encoding into ctrl->tx_rate? */ 980 * in this running context */
978 rate_mask = IWL_RATES_MASK; 981 rate_mask = IWL_RATES_MASK;
979 982
980 spin_lock_irqsave(&priv->sta_lock, flags); 983 spin_lock_irqsave(&priv->sta_lock, flags);
@@ -1229,7 +1232,7 @@ int iwl3945_hw_nic_init(struct iwl3945_priv *priv)
1229 iwl3945_power_init_handle(priv); 1232 iwl3945_power_init_handle(priv);
1230 1233
1231 spin_lock_irqsave(&priv->lock, flags); 1234 spin_lock_irqsave(&priv->lock, flags);
1232 iwl3945_set_bit(priv, CSR_ANA_PLL_CFG, (1 << 24)); 1235 iwl3945_set_bit(priv, CSR_ANA_PLL_CFG, CSR39_ANA_PLL_CFG_VAL);
1233 iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS, 1236 iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1234 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 1237 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1235 1238
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index c7695a215a39..a9b3edad3868 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -124,7 +124,6 @@ int iwl3945_x2_queue_used(const struct iwl3945_queue *q, int i);
124 124
125/* One for each TFD */ 125/* One for each TFD */
126struct iwl3945_tx_info { 126struct iwl3945_tx_info {
127 struct ieee80211_tx_status status;
128 struct sk_buff *skb[MAX_NUM_OF_TBS]; 127 struct sk_buff *skb[MAX_NUM_OF_TBS];
129}; 128};
130 129
@@ -645,7 +644,7 @@ extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl3945_priv *priv,
645extern int iwl3945_hw_get_rx_read(struct iwl3945_priv *priv); 644extern int iwl3945_hw_get_rx_read(struct iwl3945_priv *priv);
646extern void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv, 645extern void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
647 struct iwl3945_cmd *cmd, 646 struct iwl3945_cmd *cmd,
648 struct ieee80211_tx_control *ctrl, 647 struct ieee80211_tx_info *info,
649 struct ieee80211_hdr *hdr, 648 struct ieee80211_hdr *hdr,
650 int sta_id, int tx_id); 649 int sta_id, int tx_id);
651extern int iwl3945_hw_reg_send_txpower(struct iwl3945_priv *priv); 650extern int iwl3945_hw_reg_send_txpower(struct iwl3945_priv *priv);
@@ -836,8 +835,6 @@ struct iwl3945_priv {
836 835
837 u8 mac80211_registered; 836 u8 mac80211_registered;
838 837
839 u32 notif_missed_beacons;
840
841 /* Rx'd packet timing information */ 838 /* Rx'd packet timing information */
842 u32 last_beacon_time; 839 u32 last_beacon_time;
843 u64 last_tsf; 840 u64 last_tsf;
@@ -886,6 +883,7 @@ struct iwl3945_priv {
886 struct work_struct report_work; 883 struct work_struct report_work;
887 struct work_struct request_scan; 884 struct work_struct request_scan;
888 struct work_struct beacon_update; 885 struct work_struct beacon_update;
886 struct work_struct set_monitor;
889 887
890 struct tasklet_struct irq_tasklet; 888 struct tasklet_struct irq_tasklet;
891 889
@@ -924,11 +922,6 @@ static inline int is_channel_valid(const struct iwl3945_channel_info *ch_info)
924 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; 922 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
925} 923}
926 924
927static inline int is_channel_narrow(const struct iwl3945_channel_info *ch_info)
928{
929 return (ch_info->flags & EEPROM_CHANNEL_NARROW) ? 1 : 0;
930}
931
932static inline int is_channel_radar(const struct iwl3945_channel_info *ch_info) 925static inline int is_channel_radar(const struct iwl3945_channel_info *ch_info)
933{ 926{
934 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; 927 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index 1a66b508a8ea..fc118335b60f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -62,13 +62,18 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-4965-hw.h) only for hardware-related definitions. 64 * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
65 * Use iwl-4965-commands.h for uCode API definitions. 65 * Use iwl-commands.h for uCode API definitions.
66 * Use iwl-4965.h for driver implementation definitions. 66 * Use iwl-dev.h for driver implementation definitions.
67 */ 67 */
68 68
69#ifndef __iwl_4965_hw_h__ 69#ifndef __iwl_4965_hw_h__
70#define __iwl_4965_hw_h__ 70#define __iwl_4965_hw_h__
71 71
72#include "iwl-fh.h"
73
74/* EERPROM */
75#define IWL4965_EEPROM_IMG_SIZE 1024
76
72/* 77/*
73 * uCode queue management definitions ... 78 * uCode queue management definitions ...
74 * Queue #4 is the command queue for 3945 and 4965; map it to Tx FIFO chnl 4. 79 * Queue #4 is the command queue for 3945 and 4965; map it to Tx FIFO chnl 4.
@@ -93,11 +98,16 @@
93#define IWL_RSSI_OFFSET 44 98#define IWL_RSSI_OFFSET 44
94 99
95 100
96#include "iwl-4965-commands.h" 101#include "iwl-commands.h"
97 102
98#define PCI_LINK_CTRL 0x0F0 103/* PCI registers */
104#define PCI_LINK_CTRL 0x0F0 /* 1 byte */
99#define PCI_POWER_SOURCE 0x0C8 105#define PCI_POWER_SOURCE 0x0C8
100#define PCI_REG_WUM8 0x0E8 106#define PCI_REG_WUM8 0x0E8
107
108/* PCI register values */
109#define PCI_LINK_VAL_L0S_EN 0x01
110#define PCI_LINK_VAL_L1_EN 0x02
101#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000) 111#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000)
102 112
103#define TFD_QUEUE_SIZE_MAX (256) 113#define TFD_QUEUE_SIZE_MAX (256)
@@ -131,10 +141,8 @@
131#define RTC_DATA_LOWER_BOUND (0x800000) 141#define RTC_DATA_LOWER_BOUND (0x800000)
132#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000) 142#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000)
133 143
134#define IWL49_RTC_INST_SIZE \ 144#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND)
135 (IWL49_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND) 145#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND)
136#define IWL49_RTC_DATA_SIZE \
137 (IWL49_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND)
138 146
139#define IWL_MAX_INST_SIZE IWL49_RTC_INST_SIZE 147#define IWL_MAX_INST_SIZE IWL49_RTC_INST_SIZE
140#define IWL_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE 148#define IWL_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
@@ -785,579 +793,13 @@ enum {
785 793
786/********************* END TXPOWER *****************************************/ 794/********************* END TXPOWER *****************************************/
787 795
788/****************************/
789/* Flow Handler Definitions */
790/****************************/
791
792/**
793 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
794 * Addresses are offsets from device's PCI hardware base address.
795 */
796#define FH_MEM_LOWER_BOUND (0x1000)
797#define FH_MEM_UPPER_BOUND (0x1EF0)
798
799/**
800 * Keep-Warm (KW) buffer base address.
801 *
802 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
803 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
804 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
805 * from going into a power-savings mode that would cause higher DRAM latency,
806 * and possible data over/under-runs, before all Tx/Rx is complete.
807 *
808 * Driver loads IWL_FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
809 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
810 * automatically invokes keep-warm accesses when normal accesses might not
811 * be sufficient to maintain fast DRAM response.
812 *
813 * Bit fields:
814 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
815 */
816#define IWL_FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
817
818
819/**
820 * TFD Circular Buffers Base (CBBC) addresses
821 *
822 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
823 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
824 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
825 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
826 * aligned (address bits 0-7 must be 0).
827 *
828 * Bit fields in each pointer register:
829 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
830 */
831#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
832#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
833
834/* Find TFD CB base pointer for given queue (range 0-15). */
835#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
836
837
838/**
839 * Rx SRAM Control and Status Registers (RSCSR)
840 *
841 * These registers provide handshake between driver and 4965 for the Rx queue
842 * (this queue handles *all* command responses, notifications, Rx data, etc.
843 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
844 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
845 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
846 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
847 * mapping between RBDs and RBs.
848 *
849 * Driver must allocate host DRAM memory for the following, and set the
850 * physical address of each into 4965 registers:
851 *
852 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
853 * entries (although any power of 2, up to 4096, is selectable by driver).
854 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
855 * (typically 4K, although 8K or 16K are also selectable by driver).
856 * Driver sets up RB size and number of RBDs in the CB via Rx config
857 * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
858 *
859 * Bit fields within one RBD:
860 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
861 *
862 * Driver sets physical address [35:8] of base of RBD circular buffer
863 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
864 *
865 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
866 * (RBs) have been filled, via a "write pointer", actually the index of
867 * the RB's corresponding RBD within the circular buffer. Driver sets
868 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
869 *
870 * Bit fields in lower dword of Rx status buffer (upper dword not used
871 * by driver; see struct iwl4965_shared, val0):
872 * 31-12: Not used by driver
873 * 11- 0: Index of last filled Rx buffer descriptor
874 * (4965 writes, driver reads this value)
875 *
876 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
877 * enter pointers to these RBs into contiguous RBD circular buffer entries,
878 * and update the 4965's "write" index register, FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
879 *
880 * This "write" index corresponds to the *next* RBD that the driver will make
881 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
882 * the circular buffer. This value should initially be 0 (before preparing any
883 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
884 * wrap back to 0 at the end of the circular buffer (but don't wrap before
885 * "read" index has advanced past 1! See below).
886 * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
887 *
888 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
889 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
890 * to tell the driver the index of the latest filled RBD. The driver must
891 * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
892 *
893 * The driver must also internally keep track of a third index, which is the
894 * next RBD to process. When receiving an Rx interrupt, driver should process
895 * all filled but unprocessed RBs up to, but not including, the RB
896 * corresponding to the "read" index. For example, if "read" index becomes "1",
897 * driver may process the RB pointed to by RBD 0. Depending on volume of
898 * traffic, there may be many RBs to process.
899 *
900 * If read index == write index, 4965 thinks there is no room to put new data.
901 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
902 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
903 * and "read" indexes; that is, make sure that there are no more than 254
904 * buffers waiting to be filled.
905 */
906#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
907#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
908#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
909
910/**
911 * Physical base address of 8-byte Rx Status buffer.
912 * Bit fields:
913 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
914 */
915#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
916
917/**
918 * Physical base address of Rx Buffer Descriptor Circular Buffer.
919 * Bit fields:
920 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
921 */
922#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
923
924/**
925 * Rx write pointer (index, really!).
926 * Bit fields:
927 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
928 * NOTE: For 256-entry circular buffer, use only bits [7:0].
929 */
930#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
931#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
932
933
934/**
935 * Rx Config/Status Registers (RCSR)
936 * Rx Config Reg for channel 0 (only channel used)
937 *
938 * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
939 * normal operation (see bit fields).
940 *
941 * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
942 * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
943 * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
944 *
945 * Bit fields:
946 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
947 * '10' operate normally
948 * 29-24: reserved
949 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
950 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
951 * 19-18: reserved
952 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
953 * '10' 12K, '11' 16K.
954 * 15-14: reserved
955 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
956 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
957 * typical value 0x10 (about 1/2 msec)
958 * 3- 0: reserved
959 */
960#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
961#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
962#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
963
964#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
965
966#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MASK (0x00000FF0) /* bit 4-11 */
967#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MASK (0x00001000) /* bit 12 */
968#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MASK (0x00008000) /* bit 15 */
969#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MASK (0x00030000) /* bits 16-17 */
970#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MASK (0x00F00000) /* bits 20-23 */
971#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MASK (0xC0000000) /* bits 30-31 */
972
973#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT (20)
974#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_BITSHIFT (4)
975#define RX_RB_TIMEOUT (0x10)
976
977#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
978#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
979#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
980
981#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
982#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
983#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
984#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
985
986#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
987#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
988
989
990/**
991 * Rx Shared Status Registers (RSSR)
992 *
993 * After stopping Rx DMA channel (writing 0 to FH_MEM_RCSR_CHNL0_CONFIG_REG),
994 * driver must poll FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
995 *
996 * Bit fields:
997 * 24: 1 = Channel 0 is idle
998 *
999 * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV contain
1000 * default values that should not be altered by the driver.
1001 */
1002#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
1003#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
1004
1005#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
1006#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
1007#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV (FH_MEM_RSSR_LOWER_BOUND + 0x008)
1008
1009#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
1010
1011
1012/**
1013 * Transmit DMA Channel Control/Status Registers (TCSR)
1014 *
1015 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
1016 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
1017 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
1018 *
1019 * To use a Tx DMA channel, driver must initialize its
1020 * IWL_FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
1021 *
1022 * IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1023 * IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
1024 *
1025 * All other bits should be 0.
1026 *
1027 * Bit fields:
1028 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
1029 * '10' operate normally
1030 * 29- 4: Reserved, set to "0"
1031 * 3: Enable internal DMA requests (1, normal operation), disable (0)
1032 * 2- 0: Reserved, set to "0"
1033 */
1034#define IWL_FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
1035#define IWL_FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
1036
1037/* Find Control/Status reg for given Tx DMA/FIFO channel */
1038#define IWL_FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
1039 (IWL_FH_TCSR_LOWER_BOUND + 0x20 * _chnl)
1040
1041#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
1042#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
1043
1044#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
1045#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
1046#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
1047
1048/**
1049 * Tx Shared Status Registers (TSSR)
1050 *
1051 * After stopping Tx DMA channel (writing 0 to
1052 * IWL_FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
1053 * IWL_FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
1054 * (channel's buffers empty | no pending requests).
1055 *
1056 * Bit fields:
1057 * 31-24: 1 = Channel buffers empty (channel 7:0)
1058 * 23-16: 1 = No pending requests (channel 7:0)
1059 */
1060#define IWL_FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
1061#define IWL_FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
1062
1063#define IWL_FH_TSSR_TX_STATUS_REG (IWL_FH_TSSR_LOWER_BOUND + 0x010)
1064
1065#define IWL_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) \
1066 ((1 << (_chnl)) << 24)
1067#define IWL_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) \
1068 ((1 << (_chnl)) << 16)
1069
1070#define IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) \
1071 (IWL_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) | \
1072 IWL_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl))
1073
1074
1075/********************* START TX SCHEDULER *************************************/
1076
1077/**
1078 * 4965 Tx Scheduler
1079 *
1080 * The Tx Scheduler selects the next frame to be transmitted, chosing TFDs
1081 * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
1082 * host DRAM. It steers each frame's Tx command (which contains the frame
1083 * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
1084 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
1085 * but one DMA channel may take input from several queues.
1086 *
1087 * Tx DMA channels have dedicated purposes. For 4965, they are used as follows:
1088 *
1089 * 0 -- EDCA BK (background) frames, lowest priority
1090 * 1 -- EDCA BE (best effort) frames, normal priority
1091 * 2 -- EDCA VI (video) frames, higher priority
1092 * 3 -- EDCA VO (voice) and management frames, highest priority
1093 * 4 -- Commands (e.g. RXON, etc.)
1094 * 5 -- HCCA short frames
1095 * 6 -- HCCA long frames
1096 * 7 -- not used by driver (device-internal only)
1097 *
1098 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
1099 * In addition, driver can map queues 7-15 to Tx DMA/FIFO channels 0-3 to
1100 * support 11n aggregation via EDCA DMA channels.
1101 *
1102 * The driver sets up each queue to work in one of two modes:
1103 *
1104 * 1) Scheduler-Ack, in which the scheduler automatically supports a
1105 * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
1106 * contains TFDs for a unique combination of Recipient Address (RA)
1107 * and Traffic Identifier (TID), that is, traffic of a given
1108 * Quality-Of-Service (QOS) priority, destined for a single station.
1109 *
1110 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
1111 * each frame within the BA window, including whether it's been transmitted,
1112 * and whether it's been acknowledged by the receiving station. The device
1113 * automatically processes block-acks received from the receiving STA,
1114 * and reschedules un-acked frames to be retransmitted (successful
1115 * Tx completion may end up being out-of-order).
1116 *
1117 * The driver must maintain the queue's Byte Count table in host DRAM
1118 * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
1119 * This mode does not support fragmentation.
1120 *
1121 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
1122 * The device may automatically retry Tx, but will retry only one frame
1123 * at a time, until receiving ACK from receiving station, or reaching
1124 * retry limit and giving up.
1125 *
1126 * The command queue (#4) must use this mode!
1127 * This mode does not require use of the Byte Count table in host DRAM.
1128 *
1129 * Driver controls scheduler operation via 3 means:
1130 * 1) Scheduler registers
1131 * 2) Shared scheduler data base in internal 4956 SRAM
1132 * 3) Shared data in host DRAM
1133 *
1134 * Initialization:
1135 *
1136 * When loading, driver should allocate memory for:
1137 * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
1138 * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
1139 * (1024 bytes for each queue).
1140 *
1141 * After receiving "Alive" response from uCode, driver must initialize
1142 * the scheduler (especially for queue #4, the command queue, otherwise
1143 * the driver can't issue commands!):
1144 */
1145
1146/**
1147 * Max Tx window size is the max number of contiguous TFDs that the scheduler
1148 * can keep track of at one time when creating block-ack chains of frames.
1149 * Note that "64" matches the number of ack bits in a block-ack packet.
1150 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
1151 * SCD_CONTEXT_QUEUE_OFFSET(x) values.
1152 */
1153#define SCD_WIN_SIZE 64
1154#define SCD_FRAME_LIMIT 64
1155
1156/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
1157#define SCD_START_OFFSET 0xa02c00
1158
1159/*
1160 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
1161 * Value is valid only after "Alive" response from uCode.
1162 */
1163#define SCD_SRAM_BASE_ADDR (SCD_START_OFFSET + 0x0)
1164
1165/*
1166 * Driver may need to update queue-empty bits after changing queue's
1167 * write and read pointers (indexes) during (re-)initialization (i.e. when
1168 * scheduler is not tracking what's happening).
1169 * Bit fields:
1170 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
1171 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
1172 * NOTE: This register is not used by Linux driver.
1173 */
1174#define SCD_EMPTY_BITS (SCD_START_OFFSET + 0x4)
1175
1176/*
1177 * Physical base address of array of byte count (BC) circular buffers (CBs).
1178 * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
1179 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
1180 * Others are spaced by 1024 bytes.
1181 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
1182 * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
1183 * Bit fields:
1184 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
1185 */
1186#define SCD_DRAM_BASE_ADDR (SCD_START_OFFSET + 0x10)
1187
1188/*
1189 * Enables any/all Tx DMA/FIFO channels.
1190 * Scheduler generates requests for only the active channels.
1191 * Set this to 0xff to enable all 8 channels (normal usage).
1192 * Bit fields:
1193 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
1194 */
1195#define SCD_TXFACT (SCD_START_OFFSET + 0x1c)
1196
1197/* Mask to enable contiguous Tx DMA/FIFO channels between "lo" and "hi". */
1198#define SCD_TXFACT_REG_TXFIFO_MASK(lo, hi) \
1199 ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
1200
1201/*
1202 * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
1203 * Initialized and updated by driver as new TFDs are added to queue.
1204 * NOTE: If using Block Ack, index must correspond to frame's
1205 * Start Sequence Number; index = (SSN & 0xff)
1206 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
1207 */
1208#define SCD_QUEUE_WRPTR(x) (SCD_START_OFFSET + 0x24 + (x) * 4)
1209
1210/*
1211 * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
1212 * For FIFO mode, index indicates next frame to transmit.
1213 * For Scheduler-ACK mode, index indicates first frame in Tx window.
1214 * Initialized by driver, updated by scheduler.
1215 */
1216#define SCD_QUEUE_RDPTR(x) (SCD_START_OFFSET + 0x64 + (x) * 4)
1217
1218/*
1219 * Select which queues work in chain mode (1) vs. not (0).
1220 * Use chain mode to build chains of aggregated frames.
1221 * Bit fields:
1222 * 31-16: Reserved
1223 * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
1224 * NOTE: If driver sets up queue for chain mode, it should be also set up
1225 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
1226 */
1227#define SCD_QUEUECHAIN_SEL (SCD_START_OFFSET + 0xd0)
1228
1229/*
1230 * Select which queues interrupt driver when scheduler increments
1231 * a queue's read pointer (index).
1232 * Bit fields:
1233 * 31-16: Reserved
1234 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
1235 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
1236 * from Rx queue to read Tx command responses and update Tx queues.
1237 */
1238#define SCD_INTERRUPT_MASK (SCD_START_OFFSET + 0xe4)
1239
1240/*
1241 * Queue search status registers. One for each queue.
1242 * Sets up queue mode and assigns queue to Tx DMA channel.
1243 * Bit fields:
1244 * 19-10: Write mask/enable bits for bits 0-9
1245 * 9: Driver should init to "0"
1246 * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
1247 * Driver should init to "1" for aggregation mode, or "0" otherwise.
1248 * 7-6: Driver should init to "0"
1249 * 5: Window Size Left; indicates whether scheduler can request
1250 * another TFD, based on window size, etc. Driver should init
1251 * this bit to "1" for aggregation mode, or "0" for non-agg.
1252 * 4-1: Tx FIFO to use (range 0-7).
1253 * 0: Queue is active (1), not active (0).
1254 * Other bits should be written as "0"
1255 *
1256 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
1257 * via SCD_QUEUECHAIN_SEL.
1258 */
1259#define SCD_QUEUE_STATUS_BITS(x) (SCD_START_OFFSET + 0x104 + (x) * 4)
1260
1261/* Bit field positions */
1262#define SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
1263#define SCD_QUEUE_STTS_REG_POS_TXF (1)
1264#define SCD_QUEUE_STTS_REG_POS_WSL (5)
1265#define SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
1266
1267/* Write masks */
1268#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
1269#define SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
1270
1271/**
1272 * 4965 internal SRAM structures for scheduler, shared with driver ...
1273 *
1274 * Driver should clear and initialize the following areas after receiving
1275 * "Alive" response from 4965 uCode, i.e. after initial
1276 * uCode load, or after a uCode load done for error recovery:
1277 *
1278 * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
1279 * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
1280 * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
1281 *
1282 * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
1283 * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
1284 * All OFFSET values must be added to this base address.
1285 */
1286
1287/*
1288 * Queue context. One 8-byte entry for each of 16 queues.
1289 *
1290 * Driver should clear this entire area (size 0x80) to 0 after receiving
1291 * "Alive" notification from uCode. Additionally, driver should init
1292 * each queue's entry as follows:
1293 *
1294 * LS Dword bit fields:
1295 * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
1296 *
1297 * MS Dword bit fields:
1298 * 16-22: Frame limit. Driver should init to 10 (0xa).
1299 *
1300 * Driver should init all other bits to 0.
1301 *
1302 * Init must be done after driver receives "Alive" response from 4965 uCode,
1303 * and when setting up queue for aggregation.
1304 */
1305#define SCD_CONTEXT_DATA_OFFSET 0x380
1306#define SCD_CONTEXT_QUEUE_OFFSET(x) (SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
1307
1308#define SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
1309#define SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
1310#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
1311#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
1312
1313/*
1314 * Tx Status Bitmap
1315 *
1316 * Driver should clear this entire area (size 0x100) to 0 after receiving
1317 * "Alive" notification from uCode. Area is used only by device itself;
1318 * no other support (besides clearing) is required from driver.
1319 */
1320#define SCD_TX_STTS_BITMAP_OFFSET 0x400
1321
1322/*
1323 * RAxTID to queue translation mapping.
1324 *
1325 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
1326 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
1327 * one QOS priority level destined for one station (for this wireless link,
1328 * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
1329 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
1330 * mode, the device ignores the mapping value.
1331 *
1332 * Bit fields, for each 16-bit map:
1333 * 15-9: Reserved, set to 0
1334 * 8-4: Index into device's station table for recipient station
1335 * 3-0: Traffic ID (tid), range 0-15
1336 *
1337 * Driver should clear this entire area (size 32 bytes) to 0 after receiving
1338 * "Alive" notification from uCode. To update a 16-bit map value, driver
1339 * must read a dword-aligned value from device SRAM, replace the 16-bit map
1340 * value of interest, and write the dword value back into device SRAM.
1341 */
1342#define SCD_TRANSLATE_TBL_OFFSET 0x500
1343
1344/* Find translation table dword to read/write for given queue */
1345#define SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
1346 ((SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
1347
1348#define SCD_TXFIFO_POS_TID (0)
1349#define SCD_TXFIFO_POS_RA (4)
1350#define SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
1351
1352/*********************** END TX SCHEDULER *************************************/
1353
1354static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags) 796static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
1355{ 797{
1356 return le32_to_cpu(rate_n_flags) & 0xFF; 798 return le32_to_cpu(rate_n_flags) & 0xFF;
1357} 799}
1358static inline u16 iwl4965_hw_get_rate_n_flags(__le32 rate_n_flags) 800static inline u32 iwl4965_hw_get_rate_n_flags(__le32 rate_n_flags)
1359{ 801{
1360 return le32_to_cpu(rate_n_flags) & 0xFFFF; 802 return le32_to_cpu(rate_n_flags) & 0x1FFFF;
1361} 803}
1362static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u16 flags) 804static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u16 flags)
1363{ 805{
@@ -1385,14 +827,14 @@ static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u16 flags)
1385 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array 827 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
1386 * in DRAM containing 256 Transmit Frame Descriptors (TFDs). 828 * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
1387 */ 829 */
1388#define IWL4965_MAX_WIN_SIZE 64 830#define IWL49_MAX_WIN_SIZE 64
1389#define IWL4965_QUEUE_SIZE 256 831#define IWL49_QUEUE_SIZE 256
1390#define IWL4965_NUM_FIFOS 7 832#define IWL49_NUM_FIFOS 7
1391#define IWL4965_MAX_NUM_QUEUES 16 833#define IWL49_CMD_FIFO_NUM 4
1392 834#define IWL49_NUM_QUEUES 16
1393 835
1394/** 836/**
1395 * struct iwl4965_tfd_frame_data 837 * struct iwl_tfd_frame_data
1396 * 838 *
1397 * Describes up to 2 buffers containing (contiguous) portions of a Tx frame. 839 * Describes up to 2 buffers containing (contiguous) portions of a Tx frame.
1398 * Each buffer must be on dword boundary. 840 * Each buffer must be on dword boundary.
@@ -1411,7 +853,7 @@ static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u16 flags)
1411 * 31-20: Tx buffer 2 length (bytes) 853 * 31-20: Tx buffer 2 length (bytes)
1412 * 19- 0: Tx buffer 2 address bits [35:16] 854 * 19- 0: Tx buffer 2 address bits [35:16]
1413 */ 855 */
1414struct iwl4965_tfd_frame_data { 856struct iwl_tfd_frame_data {
1415 __le32 tb1_addr; 857 __le32 tb1_addr;
1416 858
1417 __le32 val1; 859 __le32 val1;
@@ -1441,7 +883,7 @@ struct iwl4965_tfd_frame_data {
1441 883
1442 884
1443/** 885/**
1444 * struct iwl4965_tfd_frame 886 * struct iwl_tfd_frame
1445 * 887 *
1446 * Transmit Frame Descriptor (TFD) 888 * Transmit Frame Descriptor (TFD)
1447 * 889 *
@@ -1468,7 +910,7 @@ struct iwl4965_tfd_frame_data {
1468 * 910 *
1469 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. 911 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
1470 */ 912 */
1471struct iwl4965_tfd_frame { 913struct iwl_tfd_frame {
1472 __le32 val0; 914 __le32 val0;
1473 /* __le32 rsvd1:24; */ 915 /* __le32 rsvd1:24; */
1474 /* __le32 num_tbs:5; */ 916 /* __le32 num_tbs:5; */
@@ -1477,7 +919,7 @@ struct iwl4965_tfd_frame {
1477#define IWL_num_tbs_SYM val0 919#define IWL_num_tbs_SYM val0
1478 /* __le32 rsvd2:1; */ 920 /* __le32 rsvd2:1; */
1479 /* __le32 padding:2; */ 921 /* __le32 padding:2; */
1480 struct iwl4965_tfd_frame_data pa[10]; 922 struct iwl_tfd_frame_data pa[10];
1481 __le32 reserved; 923 __le32 reserved;
1482} __attribute__ ((packed)); 924} __attribute__ ((packed));
1483 925
@@ -1520,10 +962,10 @@ struct iwl4965_queue_byte_cnt_entry {
1520 * 4965 assumes tables are separated by 1024 bytes. 962 * 4965 assumes tables are separated by 1024 bytes.
1521 */ 963 */
1522struct iwl4965_sched_queue_byte_cnt_tbl { 964struct iwl4965_sched_queue_byte_cnt_tbl {
1523 struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL4965_QUEUE_SIZE + 965 struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL49_QUEUE_SIZE +
1524 IWL4965_MAX_WIN_SIZE]; 966 IWL49_MAX_WIN_SIZE];
1525 u8 dont_care[1024 - 967 u8 dont_care[1024 -
1526 (IWL4965_QUEUE_SIZE + IWL4965_MAX_WIN_SIZE) * 968 (IWL49_QUEUE_SIZE + IWL49_MAX_WIN_SIZE) *
1527 sizeof(__le16)]; 969 sizeof(__le16)];
1528} __attribute__ ((packed)); 970} __attribute__ ((packed));
1529 971
@@ -1553,7 +995,7 @@ struct iwl4965_sched_queue_byte_cnt_tbl {
1553 */ 995 */
1554struct iwl4965_shared { 996struct iwl4965_shared {
1555 struct iwl4965_sched_queue_byte_cnt_tbl 997 struct iwl4965_sched_queue_byte_cnt_tbl
1556 queues_byte_cnt_tbls[IWL4965_MAX_NUM_QUEUES]; 998 queues_byte_cnt_tbls[IWL49_NUM_QUEUES];
1557 __le32 rb_closed; 999 __le32 rb_closed;
1558 1000
1559 /* __le32 rb_closed_stts_rb_num:12; */ 1001 /* __le32 rb_closed_stts_rb_num:12; */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
index 3a7f0cb710ec..d8f2b4d33fd9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
@@ -28,7 +28,6 @@
28#include <linux/skbuff.h> 28#include <linux/skbuff.h>
29#include <linux/wireless.h> 29#include <linux/wireless.h>
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31#include <net/ieee80211.h>
32 31
33#include <linux/netdevice.h> 32#include <linux/netdevice.h>
34#include <linux/etherdevice.h> 33#include <linux/etherdevice.h>
@@ -38,13 +37,13 @@
38 37
39#include "../net/mac80211/rate.h" 38#include "../net/mac80211/rate.h"
40 39
41#include "iwl-4965.h" 40#include "iwl-dev.h"
42#include "iwl-core.h" 41#include "iwl-core.h"
43#include "iwl-helpers.h" 42#include "iwl-helpers.h"
44 43
45#define RS_NAME "iwl-4965-rs" 44#define RS_NAME "iwl-4965-rs"
46 45
47#define NUM_TRY_BEFORE_ANTENNA_TOGGLE 1 46#define NUM_TRY_BEFORE_ANT_TOGGLE 1
48#define IWL_NUMBER_TRY 1 47#define IWL_NUMBER_TRY 1
49#define IWL_HT_NUMBER_TRY 3 48#define IWL_HT_NUMBER_TRY 3
50 49
@@ -65,9 +64,16 @@ static u8 rs_ht_to_legacy[] = {
65 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX 64 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
66}; 65};
67 66
68struct iwl4965_rate { 67static const u8 ant_toggle_lookup[] = {
69 u32 rate_n_flags; 68 /*ANT_NONE -> */ ANT_NONE,
70} __attribute__ ((packed)); 69 /*ANT_A -> */ ANT_B,
70 /*ANT_B -> */ ANT_C,
71 /*ANT_AB -> */ ANT_BC,
72 /*ANT_C -> */ ANT_A,
73 /*ANT_AC -> */ ANT_AB,
74 /*ANT_BC -> */ ANT_AC,
75 /*ANT_ABC -> */ ANT_ABC,
76};
71 77
72/** 78/**
73 * struct iwl4965_rate_scale_data -- tx success history for one rate 79 * struct iwl4965_rate_scale_data -- tx success history for one rate
@@ -88,14 +94,14 @@ struct iwl4965_rate_scale_data {
88 * one for "active", and one for "search". 94 * one for "active", and one for "search".
89 */ 95 */
90struct iwl4965_scale_tbl_info { 96struct iwl4965_scale_tbl_info {
91 enum iwl4965_table_type lq_type; 97 enum iwl_table_type lq_type;
92 enum iwl4965_antenna_type antenna_type; 98 u8 ant_type;
93 u8 is_SGI; /* 1 = short guard interval */ 99 u8 is_SGI; /* 1 = short guard interval */
94 u8 is_fat; /* 1 = 40 MHz channel width */ 100 u8 is_fat; /* 1 = 40 MHz channel width */
95 u8 is_dup; /* 1 = duplicated data streams */ 101 u8 is_dup; /* 1 = duplicated data streams */
96 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */ 102 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
97 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ 103 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
98 struct iwl4965_rate current_rate; /* rate_n_flags, uCode API format */ 104 u32 current_rate; /* rate_n_flags, uCode API format */
99 struct iwl4965_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ 105 struct iwl4965_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
100}; 106};
101 107
@@ -136,8 +142,6 @@ struct iwl4965_lq_sta {
136 u32 flush_timer; /* time staying in mode before new search */ 142 u32 flush_timer; /* time staying in mode before new search */
137 143
138 u8 action_counter; /* # mode-switch actions tried */ 144 u8 action_counter; /* # mode-switch actions tried */
139 u8 antenna;
140 u8 valid_antenna;
141 u8 is_green; 145 u8 is_green;
142 u8 is_dup; 146 u8 is_dup;
143 enum ieee80211_band band; 147 enum ieee80211_band band;
@@ -145,9 +149,10 @@ struct iwl4965_lq_sta {
145 149
146 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ 150 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
147 u32 supp_rates; 151 u32 supp_rates;
148 u16 active_rate; 152 u16 active_legacy_rate;
149 u16 active_siso_rate; 153 u16 active_siso_rate;
150 u16 active_mimo_rate; 154 u16 active_mimo2_rate;
155 u16 active_mimo3_rate;
151 u16 active_rate_basic; 156 u16 active_rate_basic;
152 157
153 struct iwl_link_quality_cmd lq; 158 struct iwl_link_quality_cmd lq;
@@ -162,7 +167,7 @@ struct iwl4965_lq_sta {
162#ifdef CONFIG_IWL4965_HT 167#ifdef CONFIG_IWL4965_HT
163 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; 168 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
164#endif 169#endif
165 struct iwl4965_rate dbg_fixed; 170 u32 dbg_fixed_rate;
166#endif 171#endif
167 struct iwl_priv *drv; 172 struct iwl_priv *drv;
168}; 173};
@@ -171,17 +176,17 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
171 struct net_device *dev, 176 struct net_device *dev,
172 struct ieee80211_hdr *hdr, 177 struct ieee80211_hdr *hdr,
173 struct sta_info *sta); 178 struct sta_info *sta);
174static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta, 179static void rs_fill_link_cmd(const struct iwl_priv *priv,
175 struct iwl4965_rate *tx_mcs, 180 struct iwl4965_lq_sta *lq_sta,
176 struct iwl_link_quality_cmd *tbl); 181 u32 rate_n_flags);
177 182
178 183
179#ifdef CONFIG_MAC80211_DEBUGFS 184#ifdef CONFIG_MAC80211_DEBUGFS
180static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta, 185static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
181 struct iwl4965_rate *mcs, int index); 186 u32 *rate_n_flags, int index);
182#else 187#else
183static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta, 188static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
184 struct iwl4965_rate *mcs, int index) 189 u32 *rate_n_flags, int index)
185{} 190{}
186#endif 191#endif
187 192
@@ -190,6 +195,7 @@ static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
190 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits 195 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
191 * "G" is the only table that supports CCK (the first 4 rates). 196 * "G" is the only table that supports CCK (the first 4 rates).
192 */ 197 */
198/*FIXME:RS:need to spearate tables for MIMO2/MIMO3*/
193static s32 expected_tpt_A[IWL_RATE_COUNT] = { 199static s32 expected_tpt_A[IWL_RATE_COUNT] = {
194 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186, 186 200 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186, 186
195}; 201};
@@ -230,7 +236,7 @@ static s32 expected_tpt_mimo40MHzSGI[IWL_RATE_COUNT] = {
230 0, 0, 0, 0, 131, 131, 191, 222, 242, 270, 284, 289, 293 236 0, 0, 0, 0, 131, 131, 191, 222, 242, 270, 284, 289, 293
231}; 237};
232 238
233static inline u8 iwl4965_rate_get_rate(u32 rate_n_flags) 239static inline u8 rs_extract_rate(u32 rate_n_flags)
234{ 240{
235 return (u8)(rate_n_flags & 0xFF); 241 return (u8)(rate_n_flags & 0xFF);
236} 242}
@@ -245,6 +251,11 @@ static void rs_rate_scale_clear_window(struct iwl4965_rate_scale_data *window)
245 window->stamp = 0; 251 window->stamp = 0;
246} 252}
247 253
254static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
255{
256 return ((ant_type & valid_antenna) == ant_type);
257}
258
248#ifdef CONFIG_IWL4965_HT 259#ifdef CONFIG_IWL4965_HT
249/* 260/*
250 * removes the old data from the statistics. All data that is older than 261 * removes the old data from the statistics. All data that is older than
@@ -271,14 +282,20 @@ static void rs_tl_rm_old_stats(struct iwl4965_traffic_load *tl, u32 curr_time)
271 * increment traffic load value for tid and also remove 282 * increment traffic load value for tid and also remove
272 * any old values if passed the certain time period 283 * any old values if passed the certain time period
273 */ 284 */
274static void rs_tl_add_packet(struct iwl4965_lq_sta *lq_data, u8 tid) 285static void rs_tl_add_packet(struct iwl4965_lq_sta *lq_data,
286 struct ieee80211_hdr *hdr)
275{ 287{
276 u32 curr_time = jiffies_to_msecs(jiffies); 288 u32 curr_time = jiffies_to_msecs(jiffies);
277 u32 time_diff; 289 u32 time_diff;
278 s32 index; 290 s32 index;
279 struct iwl4965_traffic_load *tl = NULL; 291 struct iwl4965_traffic_load *tl = NULL;
292 u16 fc = le16_to_cpu(hdr->frame_control);
293 u8 tid;
280 294
281 if (tid >= TID_MAX_LOAD_COUNT) 295 if (ieee80211_is_qos_data(fc)) {
296 u8 *qc = ieee80211_get_qos_ctrl(hdr, ieee80211_get_hdrlen(fc));
297 tid = qc[0] & 0xf;
298 } else
282 return; 299 return;
283 300
284 tl = &lq_data->load[tid]; 301 tl = &lq_data->load[tid];
@@ -349,9 +366,9 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
349 unsigned long state; 366 unsigned long state;
350 DECLARE_MAC_BUF(mac); 367 DECLARE_MAC_BUF(mac);
351 368
352 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 369 spin_lock_bh(&sta->lock);
353 state = sta->ampdu_mlme.tid_state_tx[tid]; 370 state = sta->ampdu_mlme.tid_state_tx[tid];
354 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 371 spin_unlock_bh(&sta->lock);
355 372
356 if (state == HT_AGG_STATE_IDLE && 373 if (state == HT_AGG_STATE_IDLE &&
357 rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) { 374 rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
@@ -374,6 +391,13 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
374 391
375#endif /* CONFIG_IWLWIFI_HT */ 392#endif /* CONFIG_IWLWIFI_HT */
376 393
394static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
395{
396 return (!!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
397 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
398 !!(rate_n_flags & RATE_MCS_ANT_C_MSK));
399}
400
377/** 401/**
378 * rs_collect_tx_data - Update the success/failure sliding window 402 * rs_collect_tx_data - Update the success/failure sliding window
379 * 403 *
@@ -386,8 +410,7 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
386 int successes) 410 int successes)
387{ 411{
388 struct iwl4965_rate_scale_data *window = NULL; 412 struct iwl4965_rate_scale_data *window = NULL;
389 u64 mask; 413 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
390 u8 win_size = IWL_RATE_MAX_WINDOW;
391 s32 fail_count; 414 s32 fail_count;
392 415
393 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) 416 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
@@ -405,14 +428,14 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
405 * we keep these bitmaps!). 428 * we keep these bitmaps!).
406 */ 429 */
407 while (retries > 0) { 430 while (retries > 0) {
408 if (window->counter >= win_size) { 431 if (window->counter >= IWL_RATE_MAX_WINDOW) {
409 window->counter = win_size - 1; 432
410 mask = 1; 433 /* remove earliest */
411 mask = (mask << (win_size - 1)); 434 window->counter = IWL_RATE_MAX_WINDOW - 1;
435
412 if (window->data & mask) { 436 if (window->data & mask) {
413 window->data &= ~mask; 437 window->data &= ~mask;
414 window->success_counter = 438 window->success_counter--;
415 window->success_counter - 1;
416 } 439 }
417 } 440 }
418 441
@@ -422,10 +445,9 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
422 /* Shift bitmap by one frame (throw away oldest history), 445 /* Shift bitmap by one frame (throw away oldest history),
423 * OR in "1", and increment "success" if this 446 * OR in "1", and increment "success" if this
424 * frame was successful. */ 447 * frame was successful. */
425 mask = window->data; 448 window->data <<= 1;;
426 window->data = (mask << 1);
427 if (successes > 0) { 449 if (successes > 0) {
428 window->success_counter = window->success_counter + 1; 450 window->success_counter++;
429 window->data |= 0x1; 451 window->data |= 0x1;
430 successes--; 452 successes--;
431 } 453 }
@@ -458,170 +480,166 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
458/* 480/*
459 * Fill uCode API rate_n_flags field, based on "search" or "active" table. 481 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
460 */ 482 */
461static void rs_mcs_from_tbl(struct iwl4965_rate *mcs_rate, 483/* FIXME:RS:remove this function and put the flags statically in the table */
462 struct iwl4965_scale_tbl_info *tbl, 484static u32 rate_n_flags_from_tbl(struct iwl4965_scale_tbl_info *tbl,
463 int index, u8 use_green) 485 int index, u8 use_green)
464{ 486{
487 u32 rate_n_flags = 0;
488
465 if (is_legacy(tbl->lq_type)) { 489 if (is_legacy(tbl->lq_type)) {
466 mcs_rate->rate_n_flags = iwl4965_rates[index].plcp; 490 rate_n_flags = iwl_rates[index].plcp;
467 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) 491 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
468 mcs_rate->rate_n_flags |= RATE_MCS_CCK_MSK; 492 rate_n_flags |= RATE_MCS_CCK_MSK;
469 493
470 } else if (is_siso(tbl->lq_type)) { 494 } else if (is_Ht(tbl->lq_type)) {
471 if (index > IWL_LAST_OFDM_RATE) 495 if (index > IWL_LAST_OFDM_RATE) {
496 IWL_ERROR("invalid HT rate index %d\n", index);
472 index = IWL_LAST_OFDM_RATE; 497 index = IWL_LAST_OFDM_RATE;
473 mcs_rate->rate_n_flags = iwl4965_rates[index].plcp_siso | 498 }
474 RATE_MCS_HT_MSK; 499 rate_n_flags = RATE_MCS_HT_MSK;
475 } else {
476 if (index > IWL_LAST_OFDM_RATE)
477 index = IWL_LAST_OFDM_RATE;
478 mcs_rate->rate_n_flags = iwl4965_rates[index].plcp_mimo |
479 RATE_MCS_HT_MSK;
480 }
481
482 switch (tbl->antenna_type) {
483 case ANT_BOTH:
484 mcs_rate->rate_n_flags |= RATE_MCS_ANT_AB_MSK;
485 break;
486 case ANT_MAIN:
487 mcs_rate->rate_n_flags |= RATE_MCS_ANT_A_MSK;
488 break;
489 case ANT_AUX:
490 mcs_rate->rate_n_flags |= RATE_MCS_ANT_B_MSK;
491 break;
492 case ANT_NONE:
493 break;
494 }
495
496 if (is_legacy(tbl->lq_type))
497 return;
498 500
499 if (tbl->is_fat) { 501 if (is_siso(tbl->lq_type))
500 if (tbl->is_dup) 502 rate_n_flags |= iwl_rates[index].plcp_siso;
501 mcs_rate->rate_n_flags |= RATE_MCS_DUP_MSK; 503 else if (is_mimo2(tbl->lq_type))
504 rate_n_flags |= iwl_rates[index].plcp_mimo2;
502 else 505 else
503 mcs_rate->rate_n_flags |= RATE_MCS_FAT_MSK; 506 rate_n_flags |= iwl_rates[index].plcp_mimo3;
507 } else {
508 IWL_ERROR("Invalid tbl->lq_type %d\n", tbl->lq_type);
504 } 509 }
505 if (tbl->is_SGI)
506 mcs_rate->rate_n_flags |= RATE_MCS_SGI_MSK;
507 510
508 if (use_green) { 511 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
509 mcs_rate->rate_n_flags |= RATE_MCS_GF_MSK; 512 RATE_MCS_ANT_ABC_MSK);
510 if (is_siso(tbl->lq_type)) 513
511 mcs_rate->rate_n_flags &= ~RATE_MCS_SGI_MSK; 514 if (is_Ht(tbl->lq_type)) {
515 if (tbl->is_fat) {
516 if (tbl->is_dup)
517 rate_n_flags |= RATE_MCS_DUP_MSK;
518 else
519 rate_n_flags |= RATE_MCS_FAT_MSK;
520 }
521 if (tbl->is_SGI)
522 rate_n_flags |= RATE_MCS_SGI_MSK;
523
524 if (use_green) {
525 rate_n_flags |= RATE_MCS_GF_MSK;
526 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
527 rate_n_flags &= ~RATE_MCS_SGI_MSK;
528 IWL_ERROR("GF was set with SGI:SISO\n");
529 }
530 }
512 } 531 }
532 return rate_n_flags;
513} 533}
514 534
515/* 535/*
516 * Interpret uCode API's rate_n_flags format, 536 * Interpret uCode API's rate_n_flags format,
517 * fill "search" or "active" tx mode table. 537 * fill "search" or "active" tx mode table.
518 */ 538 */
519static int rs_get_tbl_info_from_mcs(const struct iwl4965_rate *mcs_rate, 539static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
520 enum ieee80211_band band, 540 enum ieee80211_band band,
521 struct iwl4965_scale_tbl_info *tbl, 541 struct iwl4965_scale_tbl_info *tbl,
522 int *rate_idx) 542 int *rate_idx)
523{ 543{
524 int index; 544 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
525 u32 ant_msk; 545 u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
546 u8 mcs;
526 547
527 index = iwl4965_hwrate_to_plcp_idx(mcs_rate->rate_n_flags); 548 *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
528 549
529 if (index == IWL_RATE_INVALID) { 550 if (*rate_idx == IWL_RATE_INVALID) {
530 *rate_idx = -1; 551 *rate_idx = -1;
531 return -EINVAL; 552 return -EINVAL;
532 } 553 }
533 tbl->is_SGI = 0; /* default legacy setup */ 554 tbl->is_SGI = 0; /* default legacy setup */
534 tbl->is_fat = 0; 555 tbl->is_fat = 0;
535 tbl->is_dup = 0; 556 tbl->is_dup = 0;
536 tbl->antenna_type = ANT_BOTH; /* default MIMO setup */ 557 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
558 tbl->lq_type = LQ_NONE;
537 559
538 /* legacy rate format */ 560 /* legacy rate format */
539 if (!(mcs_rate->rate_n_flags & RATE_MCS_HT_MSK)) { 561 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
540 ant_msk = (mcs_rate->rate_n_flags & RATE_MCS_ANT_AB_MSK); 562 if (num_of_ant == 1) {
541
542 if (ant_msk == RATE_MCS_ANT_AB_MSK)
543 tbl->lq_type = LQ_NONE;
544 else {
545
546 if (band == IEEE80211_BAND_5GHZ) 563 if (band == IEEE80211_BAND_5GHZ)
547 tbl->lq_type = LQ_A; 564 tbl->lq_type = LQ_A;
548 else 565 else
549 tbl->lq_type = LQ_G; 566 tbl->lq_type = LQ_G;
550
551 if (mcs_rate->rate_n_flags & RATE_MCS_ANT_A_MSK)
552 tbl->antenna_type = ANT_MAIN;
553 else
554 tbl->antenna_type = ANT_AUX;
555 } 567 }
556 *rate_idx = index; 568 /* HT rate format */
557
558 /* HT rate format, SISO (might be 20 MHz legacy or 40 MHz fat width) */
559 } else if (iwl4965_rate_get_rate(mcs_rate->rate_n_flags)
560 <= IWL_RATE_SISO_60M_PLCP) {
561 tbl->lq_type = LQ_SISO;
562
563 ant_msk = (mcs_rate->rate_n_flags & RATE_MCS_ANT_AB_MSK);
564 if (ant_msk == RATE_MCS_ANT_AB_MSK)
565 tbl->lq_type = LQ_NONE;
566 else {
567 if (mcs_rate->rate_n_flags & RATE_MCS_ANT_A_MSK)
568 tbl->antenna_type = ANT_MAIN;
569 else
570 tbl->antenna_type = ANT_AUX;
571 }
572 if (mcs_rate->rate_n_flags & RATE_MCS_SGI_MSK)
573 tbl->is_SGI = 1;
574
575 if ((mcs_rate->rate_n_flags & RATE_MCS_FAT_MSK) ||
576 (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK))
577 tbl->is_fat = 1;
578
579 if (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK)
580 tbl->is_dup = 1;
581
582 *rate_idx = index;
583
584 /* HT rate format, MIMO (might be 20 MHz legacy or 40 MHz fat width) */
585 } else { 569 } else {
586 tbl->lq_type = LQ_MIMO; 570 if (rate_n_flags & RATE_MCS_SGI_MSK)
587 if (mcs_rate->rate_n_flags & RATE_MCS_SGI_MSK)
588 tbl->is_SGI = 1; 571 tbl->is_SGI = 1;
589 572
590 if ((mcs_rate->rate_n_flags & RATE_MCS_FAT_MSK) || 573 if ((rate_n_flags & RATE_MCS_FAT_MSK) ||
591 (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK)) 574 (rate_n_flags & RATE_MCS_DUP_MSK))
592 tbl->is_fat = 1; 575 tbl->is_fat = 1;
593 576
594 if (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK) 577 if (rate_n_flags & RATE_MCS_DUP_MSK)
595 tbl->is_dup = 1; 578 tbl->is_dup = 1;
596 *rate_idx = index; 579
580 mcs = rs_extract_rate(rate_n_flags);
581
582 /* SISO */
583 if (mcs <= IWL_RATE_SISO_60M_PLCP) {
584 if (num_of_ant == 1)
585 tbl->lq_type = LQ_SISO; /*else NONE*/
586 /* MIMO2 */
587 } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
588 if (num_of_ant == 2)
589 tbl->lq_type = LQ_MIMO2;
590 /* MIMO3 */
591 } else {
592 if (num_of_ant == 3)
593 tbl->lq_type = LQ_MIMO3;
594 }
597 } 595 }
598 return 0; 596 return 0;
599} 597}
600 598
601static inline void rs_toggle_antenna(struct iwl4965_rate *new_rate, 599/* switch to another antenna/antennas and return 1 */
602 struct iwl4965_scale_tbl_info *tbl) 600/* if no other valid antenna found, return 0 */
601static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
602 struct iwl4965_scale_tbl_info *tbl)
603{ 603{
604 if (tbl->antenna_type == ANT_AUX) { 604 u8 new_ant_type;
605 tbl->antenna_type = ANT_MAIN; 605
606 new_rate->rate_n_flags &= ~RATE_MCS_ANT_B_MSK; 606 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
607 new_rate->rate_n_flags |= RATE_MCS_ANT_A_MSK; 607 return 0;
608 } else { 608
609 tbl->antenna_type = ANT_AUX; 609 if (!rs_is_valid_ant(valid_ant, tbl->ant_type))
610 new_rate->rate_n_flags &= ~RATE_MCS_ANT_A_MSK; 610 return 0;
611 new_rate->rate_n_flags |= RATE_MCS_ANT_B_MSK; 611
612 } 612 new_ant_type = ant_toggle_lookup[tbl->ant_type];
613
614 while ((new_ant_type != tbl->ant_type) &&
615 !rs_is_valid_ant(valid_ant, new_ant_type))
616 new_ant_type = ant_toggle_lookup[new_ant_type];
617
618 if (new_ant_type == tbl->ant_type)
619 return 0;
620
621 tbl->ant_type = new_ant_type;
622 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
623 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
624 return 1;
613} 625}
614 626
615static inline u8 rs_use_green(struct iwl_priv *priv, 627/* FIXME:RS: in 4965 we don't use greenfield at all */
616 struct ieee80211_conf *conf) 628/* FIXME:RS: don't use greenfield for now in TX */
629/* #ifdef CONFIG_IWL4965_HT */
630#if 0
631static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf)
617{ 632{
618#ifdef CONFIG_IWL4965_HT
619 return ((conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) && 633 return ((conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
620 priv->current_ht_config.is_green_field && 634 priv->current_ht_config.is_green_field &&
621 !priv->current_ht_config.non_GF_STA_present); 635 !priv->current_ht_config.non_GF_STA_present);
622#endif /* CONFIG_IWL4965_HT */ 636}
637#else
638static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf)
639{
623 return 0; 640 return 0;
624} 641}
642#endif /* CONFIG_IWL4965_HT */
625 643
626/** 644/**
627 * rs_get_supported_rates - get the available rates 645 * rs_get_supported_rates - get the available rates
@@ -630,27 +648,28 @@ static inline u8 rs_use_green(struct iwl_priv *priv,
630 * basic available rates. 648 * basic available rates.
631 * 649 *
632 */ 650 */
633static void rs_get_supported_rates(struct iwl4965_lq_sta *lq_sta, 651static u16 rs_get_supported_rates(struct iwl4965_lq_sta *lq_sta,
634 struct ieee80211_hdr *hdr, 652 struct ieee80211_hdr *hdr,
635 enum iwl4965_table_type rate_type, 653 enum iwl_table_type rate_type)
636 u16 *data_rate)
637{ 654{
638 if (is_legacy(rate_type)) 655 if (hdr && is_multicast_ether_addr(hdr->addr1) &&
639 *data_rate = lq_sta->active_rate; 656 lq_sta->active_rate_basic)
640 else { 657 return lq_sta->active_rate_basic;
658
659 if (is_legacy(rate_type)) {
660 return lq_sta->active_legacy_rate;
661 } else {
641 if (is_siso(rate_type)) 662 if (is_siso(rate_type))
642 *data_rate = lq_sta->active_siso_rate; 663 return lq_sta->active_siso_rate;
664 else if (is_mimo2(rate_type))
665 return lq_sta->active_mimo2_rate;
643 else 666 else
644 *data_rate = lq_sta->active_mimo_rate; 667 return lq_sta->active_mimo3_rate;
645 }
646
647 if (hdr && is_multicast_ether_addr(hdr->addr1) &&
648 lq_sta->active_rate_basic) {
649 *data_rate = lq_sta->active_rate_basic;
650 } 668 }
651} 669}
652 670
653static u16 rs_get_adjacent_rate(u8 index, u16 rate_mask, int rate_type) 671static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
672 int rate_type)
654{ 673{
655 u8 high = IWL_RATE_INVALID; 674 u8 high = IWL_RATE_INVALID;
656 u8 low = IWL_RATE_INVALID; 675 u8 low = IWL_RATE_INVALID;
@@ -684,7 +703,7 @@ static u16 rs_get_adjacent_rate(u8 index, u16 rate_mask, int rate_type)
684 703
685 low = index; 704 low = index;
686 while (low != IWL_RATE_INVALID) { 705 while (low != IWL_RATE_INVALID) {
687 low = iwl4965_rates[low].prev_rs; 706 low = iwl_rates[low].prev_rs;
688 if (low == IWL_RATE_INVALID) 707 if (low == IWL_RATE_INVALID)
689 break; 708 break;
690 if (rate_mask & (1 << low)) 709 if (rate_mask & (1 << low))
@@ -694,7 +713,7 @@ static u16 rs_get_adjacent_rate(u8 index, u16 rate_mask, int rate_type)
694 713
695 high = index; 714 high = index;
696 while (high != IWL_RATE_INVALID) { 715 while (high != IWL_RATE_INVALID) {
697 high = iwl4965_rates[high].next_rs; 716 high = iwl_rates[high].next_rs;
698 if (high == IWL_RATE_INVALID) 717 if (high == IWL_RATE_INVALID)
699 break; 718 break;
700 if (rate_mask & (1 << high)) 719 if (rate_mask & (1 << high))
@@ -705,9 +724,9 @@ static u16 rs_get_adjacent_rate(u8 index, u16 rate_mask, int rate_type)
705 return (high << 8) | low; 724 return (high << 8) | low;
706} 725}
707 726
708static void rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta, 727static u32 rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta,
709 struct iwl4965_scale_tbl_info *tbl, u8 scale_index, 728 struct iwl4965_scale_tbl_info *tbl, u8 scale_index,
710 u8 ht_possible, struct iwl4965_rate *mcs_rate) 729 u8 ht_possible)
711{ 730{
712 s32 low; 731 s32 low;
713 u16 rate_mask; 732 u16 rate_mask;
@@ -726,15 +745,14 @@ static void rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta,
726 else 745 else
727 tbl->lq_type = LQ_G; 746 tbl->lq_type = LQ_G;
728 747
729 if ((tbl->antenna_type == ANT_BOTH) || 748 if (num_of_ant(tbl->ant_type) > 1)
730 (tbl->antenna_type == ANT_NONE)) 749 tbl->ant_type = ANT_A;/*FIXME:RS*/
731 tbl->antenna_type = ANT_MAIN;
732 750
733 tbl->is_fat = 0; 751 tbl->is_fat = 0;
734 tbl->is_SGI = 0; 752 tbl->is_SGI = 0;
735 } 753 }
736 754
737 rs_get_supported_rates(lq_sta, NULL, tbl->lq_type, &rate_mask); 755 rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
738 756
739 /* Mask with station rate restriction */ 757 /* Mask with station rate restriction */
740 if (is_legacy(tbl->lq_type)) { 758 if (is_legacy(tbl->lq_type)) {
@@ -748,25 +766,26 @@ static void rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta,
748 766
749 /* If we switched from HT to legacy, check current rate */ 767 /* If we switched from HT to legacy, check current rate */
750 if (switch_to_legacy && (rate_mask & (1 << scale_index))) { 768 if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
751 rs_mcs_from_tbl(mcs_rate, tbl, scale_index, is_green); 769 low = scale_index;
752 return; 770 goto out;
753 } 771 }
754 772
755 high_low = rs_get_adjacent_rate(scale_index, rate_mask, tbl->lq_type); 773 high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask,
774 tbl->lq_type);
756 low = high_low & 0xff; 775 low = high_low & 0xff;
757 776
758 if (low != IWL_RATE_INVALID) 777 if (low == IWL_RATE_INVALID)
759 rs_mcs_from_tbl(mcs_rate, tbl, low, is_green); 778 low = scale_index;
760 else 779
761 rs_mcs_from_tbl(mcs_rate, tbl, scale_index, is_green); 780out:
781 return rate_n_flags_from_tbl(tbl, low, is_green);
762} 782}
763 783
764/* 784/*
765 * mac80211 sends us Tx status 785 * mac80211 sends us Tx status
766 */ 786 */
767static void rs_tx_status(void *priv_rate, struct net_device *dev, 787static void rs_tx_status(void *priv_rate, struct net_device *dev,
768 struct sk_buff *skb, 788 struct sk_buff *skb)
769 struct ieee80211_tx_status *tx_resp)
770{ 789{
771 int status; 790 int status;
772 u8 retries; 791 u8 retries;
@@ -778,9 +797,10 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
778 struct iwl_priv *priv = (struct iwl_priv *)priv_rate; 797 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
779 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 798 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
780 struct ieee80211_hw *hw = local_to_hw(local); 799 struct ieee80211_hw *hw = local_to_hw(local);
800 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
781 struct iwl4965_rate_scale_data *window = NULL; 801 struct iwl4965_rate_scale_data *window = NULL;
782 struct iwl4965_rate_scale_data *search_win = NULL; 802 struct iwl4965_rate_scale_data *search_win = NULL;
783 struct iwl4965_rate tx_mcs; 803 u32 tx_rate;
784 struct iwl4965_scale_tbl_info tbl_type; 804 struct iwl4965_scale_tbl_info tbl_type;
785 struct iwl4965_scale_tbl_info *curr_tbl, *search_tbl; 805 struct iwl4965_scale_tbl_info *curr_tbl, *search_tbl;
786 u8 active_index = 0; 806 u8 active_index = 0;
@@ -793,11 +813,11 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
793 return; 813 return;
794 814
795 /* This packet was aggregated but doesn't carry rate scale info */ 815 /* This packet was aggregated but doesn't carry rate scale info */
796 if ((tx_resp->control.flags & IEEE80211_TXCTL_AMPDU) && 816 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
797 !(tx_resp->flags & IEEE80211_TX_STATUS_AMPDU)) 817 !(info->flags & IEEE80211_TX_STAT_AMPDU))
798 return; 818 return;
799 819
800 retries = tx_resp->retry_count; 820 retries = info->status.retry_count;
801 821
802 if (retries > 15) 822 if (retries > 15)
803 retries = 15; 823 retries = 15;
@@ -822,15 +842,6 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
822 table = &lq_sta->lq; 842 table = &lq_sta->lq;
823 active_index = lq_sta->active_tbl; 843 active_index = lq_sta->active_tbl;
824 844
825 /* Get mac80211 antenna info */
826 lq_sta->antenna =
827 (lq_sta->valid_antenna & local->hw.conf.antenna_sel_tx);
828 if (!lq_sta->antenna)
829 lq_sta->antenna = lq_sta->valid_antenna;
830
831 /* Ignore mac80211 antenna info for now */
832 lq_sta->antenna = lq_sta->valid_antenna;
833
834 curr_tbl = &(lq_sta->lq_info[active_index]); 845 curr_tbl = &(lq_sta->lq_info[active_index]);
835 search_tbl = &(lq_sta->lq_info[(1 - active_index)]); 846 search_tbl = &(lq_sta->lq_info[(1 - active_index)]);
836 window = (struct iwl4965_rate_scale_data *) 847 window = (struct iwl4965_rate_scale_data *)
@@ -846,28 +857,26 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
846 * to check "search" mode, or a prior "search" mode after we've moved 857 * to check "search" mode, or a prior "search" mode after we've moved
847 * to a new "search" mode (which might become the new "active" mode). 858 * to a new "search" mode (which might become the new "active" mode).
848 */ 859 */
849 tx_mcs.rate_n_flags = le32_to_cpu(table->rs_table[0].rate_n_flags); 860 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
850 rs_get_tbl_info_from_mcs(&tx_mcs, priv->band, &tbl_type, &rs_index); 861 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
851 if (priv->band == IEEE80211_BAND_5GHZ) 862 if (priv->band == IEEE80211_BAND_5GHZ)
852 rs_index -= IWL_FIRST_OFDM_RATE; 863 rs_index -= IWL_FIRST_OFDM_RATE;
853 864
854 if ((tx_resp->control.tx_rate == NULL) || 865 if ((info->tx_rate_idx < 0) ||
855 (tbl_type.is_SGI ^ 866 (tbl_type.is_SGI ^
856 !!(tx_resp->control.flags & IEEE80211_TXCTL_SHORT_GI)) || 867 !!(info->flags & IEEE80211_TX_CTL_SHORT_GI)) ||
857 (tbl_type.is_fat ^ 868 (tbl_type.is_fat ^
858 !!(tx_resp->control.flags & IEEE80211_TXCTL_40_MHZ_WIDTH)) || 869 !!(info->flags & IEEE80211_TX_CTL_40_MHZ_WIDTH)) ||
859 (tbl_type.is_dup ^ 870 (tbl_type.is_dup ^
860 !!(tx_resp->control.flags & IEEE80211_TXCTL_DUP_DATA)) || 871 !!(info->flags & IEEE80211_TX_CTL_DUP_DATA)) ||
861 (tbl_type.antenna_type ^ 872 (tbl_type.ant_type ^ info->antenna_sel_tx) ||
862 tx_resp->control.antenna_sel_tx) || 873 (!!(tx_rate & RATE_MCS_HT_MSK) ^
863 (!!(tx_mcs.rate_n_flags & RATE_MCS_HT_MSK) ^ 874 !!(info->flags & IEEE80211_TX_CTL_OFDM_HT)) ||
864 !!(tx_resp->control.flags & IEEE80211_TXCTL_OFDM_HT)) || 875 (!!(tx_rate & RATE_MCS_GF_MSK) ^
865 (!!(tx_mcs.rate_n_flags & RATE_MCS_GF_MSK) ^ 876 !!(info->flags & IEEE80211_TX_CTL_GREEN_FIELD)) ||
866 !!(tx_resp->control.flags & IEEE80211_TXCTL_GREEN_FIELD)) ||
867 (hw->wiphy->bands[priv->band]->bitrates[rs_index].bitrate != 877 (hw->wiphy->bands[priv->band]->bitrates[rs_index].bitrate !=
868 tx_resp->control.tx_rate->bitrate)) { 878 hw->wiphy->bands[info->band]->bitrates[info->tx_rate_idx].bitrate)) {
869 IWL_DEBUG_RATE("initial rate does not match 0x%x\n", 879 IWL_DEBUG_RATE("initial rate does not match 0x%x\n", tx_rate);
870 tx_mcs.rate_n_flags);
871 goto out; 880 goto out;
872 } 881 }
873 882
@@ -875,15 +884,14 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
875 while (retries) { 884 while (retries) {
876 /* Look up the rate and other info used for each tx attempt. 885 /* Look up the rate and other info used for each tx attempt.
877 * Each tx attempt steps one entry deeper in the rate table. */ 886 * Each tx attempt steps one entry deeper in the rate table. */
878 tx_mcs.rate_n_flags = 887 tx_rate = le32_to_cpu(table->rs_table[index].rate_n_flags);
879 le32_to_cpu(table->rs_table[index].rate_n_flags); 888 rs_get_tbl_info_from_mcs(tx_rate, priv->band,
880 rs_get_tbl_info_from_mcs(&tx_mcs, priv->band,
881 &tbl_type, &rs_index); 889 &tbl_type, &rs_index);
882 890
883 /* If type matches "search" table, 891 /* If type matches "search" table,
884 * add failure to "search" history */ 892 * add failure to "search" history */
885 if ((tbl_type.lq_type == search_tbl->lq_type) && 893 if ((tbl_type.lq_type == search_tbl->lq_type) &&
886 (tbl_type.antenna_type == search_tbl->antenna_type) && 894 (tbl_type.ant_type == search_tbl->ant_type) &&
887 (tbl_type.is_SGI == search_tbl->is_SGI)) { 895 (tbl_type.is_SGI == search_tbl->is_SGI)) {
888 if (search_tbl->expected_tpt) 896 if (search_tbl->expected_tpt)
889 tpt = search_tbl->expected_tpt[rs_index]; 897 tpt = search_tbl->expected_tpt[rs_index];
@@ -894,7 +902,7 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
894 /* Else if type matches "current/active" table, 902 /* Else if type matches "current/active" table,
895 * add failure to "current/active" history */ 903 * add failure to "current/active" history */
896 } else if ((tbl_type.lq_type == curr_tbl->lq_type) && 904 } else if ((tbl_type.lq_type == curr_tbl->lq_type) &&
897 (tbl_type.antenna_type == curr_tbl->antenna_type) && 905 (tbl_type.ant_type == curr_tbl->ant_type) &&
898 (tbl_type.is_SGI == curr_tbl->is_SGI)) { 906 (tbl_type.is_SGI == curr_tbl->is_SGI)) {
899 if (curr_tbl->expected_tpt) 907 if (curr_tbl->expected_tpt)
900 tpt = curr_tbl->expected_tpt[rs_index]; 908 tpt = curr_tbl->expected_tpt[rs_index];
@@ -917,44 +925,41 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
917 * if Tx was successful first try, use original rate, 925 * if Tx was successful first try, use original rate,
918 * else look up the rate that was, finally, successful. 926 * else look up the rate that was, finally, successful.
919 */ 927 */
920 tx_mcs.rate_n_flags = le32_to_cpu(table->rs_table[index].rate_n_flags); 928 tx_rate = le32_to_cpu(table->rs_table[index].rate_n_flags);
921 rs_get_tbl_info_from_mcs(&tx_mcs, priv->band, &tbl_type, &rs_index); 929 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
922 930
923 /* Update frame history window with "success" if Tx got ACKed ... */ 931 /* Update frame history window with "success" if Tx got ACKed ... */
924 if (tx_resp->flags & IEEE80211_TX_STATUS_ACK) 932 status = !!(info->flags & IEEE80211_TX_STAT_ACK);
925 status = 1;
926 else
927 status = 0;
928 933
929 /* If type matches "search" table, 934 /* If type matches "search" table,
930 * add final tx status to "search" history */ 935 * add final tx status to "search" history */
931 if ((tbl_type.lq_type == search_tbl->lq_type) && 936 if ((tbl_type.lq_type == search_tbl->lq_type) &&
932 (tbl_type.antenna_type == search_tbl->antenna_type) && 937 (tbl_type.ant_type == search_tbl->ant_type) &&
933 (tbl_type.is_SGI == search_tbl->is_SGI)) { 938 (tbl_type.is_SGI == search_tbl->is_SGI)) {
934 if (search_tbl->expected_tpt) 939 if (search_tbl->expected_tpt)
935 tpt = search_tbl->expected_tpt[rs_index]; 940 tpt = search_tbl->expected_tpt[rs_index];
936 else 941 else
937 tpt = 0; 942 tpt = 0;
938 if (tx_resp->control.flags & IEEE80211_TXCTL_AMPDU) 943 if (info->flags & IEEE80211_TX_CTL_AMPDU)
939 rs_collect_tx_data(search_win, rs_index, tpt, 944 rs_collect_tx_data(search_win, rs_index, tpt,
940 tx_resp->ampdu_ack_len, 945 info->status.ampdu_ack_len,
941 tx_resp->ampdu_ack_map); 946 info->status.ampdu_ack_map);
942 else 947 else
943 rs_collect_tx_data(search_win, rs_index, tpt, 948 rs_collect_tx_data(search_win, rs_index, tpt,
944 1, status); 949 1, status);
945 /* Else if type matches "current/active" table, 950 /* Else if type matches "current/active" table,
946 * add final tx status to "current/active" history */ 951 * add final tx status to "current/active" history */
947 } else if ((tbl_type.lq_type == curr_tbl->lq_type) && 952 } else if ((tbl_type.lq_type == curr_tbl->lq_type) &&
948 (tbl_type.antenna_type == curr_tbl->antenna_type) && 953 (tbl_type.ant_type == curr_tbl->ant_type) &&
949 (tbl_type.is_SGI == curr_tbl->is_SGI)) { 954 (tbl_type.is_SGI == curr_tbl->is_SGI)) {
950 if (curr_tbl->expected_tpt) 955 if (curr_tbl->expected_tpt)
951 tpt = curr_tbl->expected_tpt[rs_index]; 956 tpt = curr_tbl->expected_tpt[rs_index];
952 else 957 else
953 tpt = 0; 958 tpt = 0;
954 if (tx_resp->control.flags & IEEE80211_TXCTL_AMPDU) 959 if (info->flags & IEEE80211_TX_CTL_AMPDU)
955 rs_collect_tx_data(window, rs_index, tpt, 960 rs_collect_tx_data(window, rs_index, tpt,
956 tx_resp->ampdu_ack_len, 961 info->status.ampdu_ack_len,
957 tx_resp->ampdu_ack_map); 962 info->status.ampdu_ack_map);
958 else 963 else
959 rs_collect_tx_data(window, rs_index, tpt, 964 rs_collect_tx_data(window, rs_index, tpt,
960 1, status); 965 1, status);
@@ -963,10 +968,10 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
963 /* If not searching for new mode, increment success/failed counter 968 /* If not searching for new mode, increment success/failed counter
964 * ... these help determine when to start searching again */ 969 * ... these help determine when to start searching again */
965 if (lq_sta->stay_in_tbl) { 970 if (lq_sta->stay_in_tbl) {
966 if (tx_resp->control.flags & IEEE80211_TXCTL_AMPDU) { 971 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
967 lq_sta->total_success += tx_resp->ampdu_ack_map; 972 lq_sta->total_success += info->status.ampdu_ack_map;
968 lq_sta->total_failed += 973 lq_sta->total_failed +=
969 (tx_resp->ampdu_ack_len - tx_resp->ampdu_ack_map); 974 (info->status.ampdu_ack_len - info->status.ampdu_ack_map);
970 } else { 975 } else {
971 if (status) 976 if (status)
972 lq_sta->total_success++; 977 lq_sta->total_success++;
@@ -982,30 +987,6 @@ out:
982 return; 987 return;
983} 988}
984 989
985static u8 rs_is_ant_connected(u8 valid_antenna,
986 enum iwl4965_antenna_type antenna_type)
987{
988 if (antenna_type == ANT_AUX)
989 return ((valid_antenna & 0x2) ? 1:0);
990 else if (antenna_type == ANT_MAIN)
991 return ((valid_antenna & 0x1) ? 1:0);
992 else if (antenna_type == ANT_BOTH)
993 return ((valid_antenna & 0x3) == 0x3);
994
995 return 1;
996}
997
998static u8 rs_is_other_ant_connected(u8 valid_antenna,
999 enum iwl4965_antenna_type antenna_type)
1000{
1001 if (antenna_type == ANT_AUX)
1002 return rs_is_ant_connected(valid_antenna, ANT_MAIN);
1003 else
1004 return rs_is_ant_connected(valid_antenna, ANT_AUX);
1005
1006 return 0;
1007}
1008
1009/* 990/*
1010 * Begin a period of staying with a selected modulation mode. 991 * Begin a period of staying with a selected modulation mode.
1011 * Set "stay_in_tbl" flag to prevent any mode switches. 992 * Set "stay_in_tbl" flag to prevent any mode switches.
@@ -1014,10 +995,10 @@ static u8 rs_is_other_ant_connected(u8 valid_antenna,
1014 * These control how long we stay using same modulation mode before 995 * These control how long we stay using same modulation mode before
1015 * searching for a new mode. 996 * searching for a new mode.
1016 */ 997 */
1017static void rs_set_stay_in_table(u8 is_legacy, 998static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1018 struct iwl4965_lq_sta *lq_sta) 999 struct iwl4965_lq_sta *lq_sta)
1019{ 1000{
1020 IWL_DEBUG_HT("we are staying in the same table\n"); 1001 IWL_DEBUG_RATE("we are staying in the same table\n");
1021 lq_sta->stay_in_tbl = 1; /* only place this gets set */ 1002 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1022 if (is_legacy) { 1003 if (is_legacy) {
1023 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT; 1004 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
@@ -1036,7 +1017,7 @@ static void rs_set_stay_in_table(u8 is_legacy,
1036/* 1017/*
1037 * Find correct throughput table for given mode of modulation 1018 * Find correct throughput table for given mode of modulation
1038 */ 1019 */
1039static void rs_get_expected_tpt_table(struct iwl4965_lq_sta *lq_sta, 1020static void rs_set_expected_tpt_table(struct iwl4965_lq_sta *lq_sta,
1040 struct iwl4965_scale_tbl_info *tbl) 1021 struct iwl4965_scale_tbl_info *tbl)
1041{ 1022{
1042 if (is_legacy(tbl->lq_type)) { 1023 if (is_legacy(tbl->lq_type)) {
@@ -1055,7 +1036,7 @@ static void rs_get_expected_tpt_table(struct iwl4965_lq_sta *lq_sta,
1055 else 1036 else
1056 tbl->expected_tpt = expected_tpt_siso20MHz; 1037 tbl->expected_tpt = expected_tpt_siso20MHz;
1057 1038
1058 } else if (is_mimo(tbl->lq_type)) { 1039 } else if (is_mimo(tbl->lq_type)) { /* FIXME:need to separate mimo2/3 */
1059 if (tbl->is_fat && !lq_sta->is_dup) 1040 if (tbl->is_fat && !lq_sta->is_dup)
1060 if (tbl->is_SGI) 1041 if (tbl->is_SGI)
1061 tbl->expected_tpt = expected_tpt_mimo40MHzSGI; 1042 tbl->expected_tpt = expected_tpt_mimo40MHzSGI;
@@ -1085,7 +1066,7 @@ static void rs_get_expected_tpt_table(struct iwl4965_lq_sta *lq_sta,
1085static s32 rs_get_best_rate(struct iwl_priv *priv, 1066static s32 rs_get_best_rate(struct iwl_priv *priv,
1086 struct iwl4965_lq_sta *lq_sta, 1067 struct iwl4965_lq_sta *lq_sta,
1087 struct iwl4965_scale_tbl_info *tbl, /* "search" */ 1068 struct iwl4965_scale_tbl_info *tbl, /* "search" */
1088 u16 rate_mask, s8 index, s8 rate) 1069 u16 rate_mask, s8 index)
1089{ 1070{
1090 /* "active" values */ 1071 /* "active" values */
1091 struct iwl4965_scale_tbl_info *active_tbl = 1072 struct iwl4965_scale_tbl_info *active_tbl =
@@ -1098,11 +1079,13 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
1098 1079
1099 s32 new_rate, high, low, start_hi; 1080 s32 new_rate, high, low, start_hi;
1100 u16 high_low; 1081 u16 high_low;
1082 s8 rate = index;
1101 1083
1102 new_rate = high = low = start_hi = IWL_RATE_INVALID; 1084 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1103 1085
1104 for (; ;) { 1086 for (; ;) {
1105 high_low = rs_get_adjacent_rate(rate, rate_mask, tbl->lq_type); 1087 high_low = rs_get_adjacent_rate(priv, rate, rate_mask,
1088 tbl->lq_type);
1106 1089
1107 low = high_low & 0xff; 1090 low = high_low & 0xff;
1108 high = (high_low >> 8) & 0xff; 1091 high = (high_low >> 8) & 0xff;
@@ -1171,21 +1154,16 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
1171} 1154}
1172#endif /* CONFIG_IWL4965_HT */ 1155#endif /* CONFIG_IWL4965_HT */
1173 1156
1174static inline u8 rs_is_both_ant_supp(u8 valid_antenna)
1175{
1176 return (rs_is_ant_connected(valid_antenna, ANT_BOTH));
1177}
1178
1179/* 1157/*
1180 * Set up search table for MIMO 1158 * Set up search table for MIMO
1181 */ 1159 */
1182static int rs_switch_to_mimo(struct iwl_priv *priv, 1160#ifdef CONFIG_IWL4965_HT
1161static int rs_switch_to_mimo2(struct iwl_priv *priv,
1183 struct iwl4965_lq_sta *lq_sta, 1162 struct iwl4965_lq_sta *lq_sta,
1184 struct ieee80211_conf *conf, 1163 struct ieee80211_conf *conf,
1185 struct sta_info *sta, 1164 struct sta_info *sta,
1186 struct iwl4965_scale_tbl_info *tbl, int index) 1165 struct iwl4965_scale_tbl_info *tbl, int index)
1187{ 1166{
1188#ifdef CONFIG_IWL4965_HT
1189 u16 rate_mask; 1167 u16 rate_mask;
1190 s32 rate; 1168 s32 rate;
1191 s8 is_green = lq_sta->is_green; 1169 s8 is_green = lq_sta->is_green;
@@ -1194,26 +1172,27 @@ static int rs_switch_to_mimo(struct iwl_priv *priv,
1194 !sta->ht_info.ht_supported) 1172 !sta->ht_info.ht_supported)
1195 return -1; 1173 return -1;
1196 1174
1197 IWL_DEBUG_HT("LQ: try to switch to MIMO\n");
1198 tbl->lq_type = LQ_MIMO;
1199 rs_get_supported_rates(lq_sta, NULL, tbl->lq_type,
1200 &rate_mask);
1201
1202 if (priv->current_ht_config.tx_mimo_ps_mode == IWL_MIMO_PS_STATIC) 1175 if (priv->current_ht_config.tx_mimo_ps_mode == IWL_MIMO_PS_STATIC)
1203 return -1; 1176 return -1;
1204 1177
1205 /* Need both Tx chains/antennas to support MIMO */ 1178 /* Need both Tx chains/antennas to support MIMO */
1206 if (!rs_is_both_ant_supp(lq_sta->antenna)) 1179 if (priv->hw_params.tx_chains_num < 2)
1207 return -1; 1180 return -1;
1208 1181
1182 IWL_DEBUG_RATE("LQ: try to switch to MIMO2\n");
1183
1184 tbl->lq_type = LQ_MIMO2;
1209 tbl->is_dup = lq_sta->is_dup; 1185 tbl->is_dup = lq_sta->is_dup;
1210 tbl->action = 0; 1186 tbl->action = 0;
1187 rate_mask = lq_sta->active_mimo2_rate;
1188
1211 if (priv->current_ht_config.supported_chan_width 1189 if (priv->current_ht_config.supported_chan_width
1212 == IWL_CHANNEL_WIDTH_40MHZ) 1190 == IWL_CHANNEL_WIDTH_40MHZ)
1213 tbl->is_fat = 1; 1191 tbl->is_fat = 1;
1214 else 1192 else
1215 tbl->is_fat = 0; 1193 tbl->is_fat = 0;
1216 1194
1195 /* FIXME: - don't toggle SGI here
1217 if (tbl->is_fat) { 1196 if (tbl->is_fat) {
1218 if (priv->current_ht_config.sgf & HT_SHORT_GI_40MHZ_ONLY) 1197 if (priv->current_ht_config.sgf & HT_SHORT_GI_40MHZ_ONLY)
1219 tbl->is_SGI = 1; 1198 tbl->is_SGI = 1;
@@ -1223,23 +1202,35 @@ static int rs_switch_to_mimo(struct iwl_priv *priv,
1223 tbl->is_SGI = 1; 1202 tbl->is_SGI = 1;
1224 else 1203 else
1225 tbl->is_SGI = 0; 1204 tbl->is_SGI = 0;
1205 */
1226 1206
1227 rs_get_expected_tpt_table(lq_sta, tbl); 1207 rs_set_expected_tpt_table(lq_sta, tbl);
1228 1208
1229 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index, index); 1209 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1230 1210
1231 IWL_DEBUG_HT("LQ: MIMO best rate %d mask %X\n", rate, rate_mask); 1211 IWL_DEBUG_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
1232 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) 1212
1213 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1214 IWL_DEBUG_RATE("Can't switch with index %d rate mask %x\n",
1215 rate, rate_mask);
1233 return -1; 1216 return -1;
1234 rs_mcs_from_tbl(&tbl->current_rate, tbl, rate, is_green); 1217 }
1218 tbl->current_rate = rate_n_flags_from_tbl(tbl, rate, is_green);
1235 1219
1236 IWL_DEBUG_HT("LQ: Switch to new mcs %X index is green %X\n", 1220 IWL_DEBUG_RATE("LQ: Switch to new mcs %X index is green %X\n",
1237 tbl->current_rate.rate_n_flags, is_green); 1221 tbl->current_rate, is_green);
1238 return 0; 1222 return 0;
1223}
1239#else 1224#else
1225static int rs_switch_to_mimo2(struct iwl_priv *priv,
1226 struct iwl4965_lq_sta *lq_sta,
1227 struct ieee80211_conf *conf,
1228 struct sta_info *sta,
1229 struct iwl4965_scale_tbl_info *tbl, int index)
1230{
1240 return -1; 1231 return -1;
1241#endif /*CONFIG_IWL4965_HT */
1242} 1232}
1233#endif /*CONFIG_IWL4965_HT */
1243 1234
1244/* 1235/*
1245 * Set up search table for SISO 1236 * Set up search table for SISO
@@ -1255,16 +1246,16 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1255 u8 is_green = lq_sta->is_green; 1246 u8 is_green = lq_sta->is_green;
1256 s32 rate; 1247 s32 rate;
1257 1248
1258 IWL_DEBUG_HT("LQ: try to switch to SISO\n");
1259 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) || 1249 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) ||
1260 !sta->ht_info.ht_supported) 1250 !sta->ht_info.ht_supported)
1261 return -1; 1251 return -1;
1262 1252
1253 IWL_DEBUG_RATE("LQ: try to switch to SISO\n");
1254
1263 tbl->is_dup = lq_sta->is_dup; 1255 tbl->is_dup = lq_sta->is_dup;
1264 tbl->lq_type = LQ_SISO; 1256 tbl->lq_type = LQ_SISO;
1265 tbl->action = 0; 1257 tbl->action = 0;
1266 rs_get_supported_rates(lq_sta, NULL, tbl->lq_type, 1258 rate_mask = lq_sta->active_siso_rate;
1267 &rate_mask);
1268 1259
1269 if (priv->current_ht_config.supported_chan_width 1260 if (priv->current_ht_config.supported_chan_width
1270 == IWL_CHANNEL_WIDTH_40MHZ) 1261 == IWL_CHANNEL_WIDTH_40MHZ)
@@ -1272,6 +1263,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1272 else 1263 else
1273 tbl->is_fat = 0; 1264 tbl->is_fat = 0;
1274 1265
1266 /* FIXME: - don't toggle SGI here
1275 if (tbl->is_fat) { 1267 if (tbl->is_fat) {
1276 if (priv->current_ht_config.sgf & HT_SHORT_GI_40MHZ_ONLY) 1268 if (priv->current_ht_config.sgf & HT_SHORT_GI_40MHZ_ONLY)
1277 tbl->is_SGI = 1; 1269 tbl->is_SGI = 1;
@@ -1281,26 +1273,26 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1281 tbl->is_SGI = 1; 1273 tbl->is_SGI = 1;
1282 else 1274 else
1283 tbl->is_SGI = 0; 1275 tbl->is_SGI = 0;
1276 */
1284 1277
1285 if (is_green) 1278 if (is_green)
1286 tbl->is_SGI = 0; 1279 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
1287 1280
1288 rs_get_expected_tpt_table(lq_sta, tbl); 1281 rs_set_expected_tpt_table(lq_sta, tbl);
1289 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index, index); 1282 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1290 1283
1291 IWL_DEBUG_HT("LQ: get best rate %d mask %X\n", rate, rate_mask); 1284 IWL_DEBUG_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask);
1292 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { 1285 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1293 IWL_DEBUG_HT("can not switch with index %d rate mask %x\n", 1286 IWL_DEBUG_RATE("can not switch with index %d rate mask %x\n",
1294 rate, rate_mask); 1287 rate, rate_mask);
1295 return -1; 1288 return -1;
1296 } 1289 }
1297 rs_mcs_from_tbl(&tbl->current_rate, tbl, rate, is_green); 1290 tbl->current_rate = rate_n_flags_from_tbl(tbl, rate, is_green);
1298 IWL_DEBUG_HT("LQ: Switch to new mcs %X index is green %X\n", 1291 IWL_DEBUG_RATE("LQ: Switch to new mcs %X index is green %X\n",
1299 tbl->current_rate.rate_n_flags, is_green); 1292 tbl->current_rate, is_green);
1300 return 0; 1293 return 0;
1301#else 1294#else
1302 return -1; 1295 return -1;
1303
1304#endif /*CONFIG_IWL4965_HT */ 1296#endif /*CONFIG_IWL4965_HT */
1305} 1297}
1306 1298
@@ -1313,7 +1305,6 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1313 struct sta_info *sta, 1305 struct sta_info *sta,
1314 int index) 1306 int index)
1315{ 1307{
1316 int ret = 0;
1317 struct iwl4965_scale_tbl_info *tbl = 1308 struct iwl4965_scale_tbl_info *tbl =
1318 &(lq_sta->lq_info[lq_sta->active_tbl]); 1309 &(lq_sta->lq_info[lq_sta->active_tbl]);
1319 struct iwl4965_scale_tbl_info *search_tbl = 1310 struct iwl4965_scale_tbl_info *search_tbl =
@@ -1322,41 +1313,35 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1322 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) - 1313 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) -
1323 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT)); 1314 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
1324 u8 start_action = tbl->action; 1315 u8 start_action = tbl->action;
1316 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1317 int ret = 0;
1325 1318
1326 for (; ;) { 1319 for (; ;) {
1327 switch (tbl->action) { 1320 switch (tbl->action) {
1328 case IWL_LEGACY_SWITCH_ANTENNA: 1321 case IWL_LEGACY_SWITCH_ANTENNA:
1329 IWL_DEBUG_HT("LQ Legacy switch Antenna\n"); 1322 IWL_DEBUG_RATE("LQ: Legacy toggle Antenna\n");
1330 1323
1331 search_tbl->lq_type = LQ_NONE;
1332 lq_sta->action_counter++; 1324 lq_sta->action_counter++;
1333 1325
1334 /* Don't change antenna if success has been great */ 1326 /* Don't change antenna if success has been great */
1335 if (window->success_ratio >= IWL_RS_GOOD_RATIO) 1327 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1336 break; 1328 break;
1337 1329
1338 /* Don't change antenna if other one is not connected */
1339 if (!rs_is_other_ant_connected(lq_sta->antenna,
1340 tbl->antenna_type))
1341 break;
1342
1343 /* Set up search table to try other antenna */ 1330 /* Set up search table to try other antenna */
1344 memcpy(search_tbl, tbl, sz); 1331 memcpy(search_tbl, tbl, sz);
1345 1332
1346 rs_toggle_antenna(&(search_tbl->current_rate), 1333 if (rs_toggle_antenna(valid_tx_ant,
1347 search_tbl); 1334 &search_tbl->current_rate, search_tbl)) {
1348 rs_get_expected_tpt_table(lq_sta, search_tbl); 1335 lq_sta->search_better_tbl = 1;
1349 lq_sta->search_better_tbl = 1; 1336 goto out;
1350 goto out; 1337 }
1351 1338 break;
1352 case IWL_LEGACY_SWITCH_SISO: 1339 case IWL_LEGACY_SWITCH_SISO:
1353 IWL_DEBUG_HT("LQ: Legacy switch to SISO\n"); 1340 IWL_DEBUG_RATE("LQ: Legacy switch to SISO\n");
1354 1341
1355 /* Set up search table to try SISO */ 1342 /* Set up search table to try SISO */
1356 memcpy(search_tbl, tbl, sz); 1343 memcpy(search_tbl, tbl, sz);
1357 search_tbl->lq_type = LQ_SISO;
1358 search_tbl->is_SGI = 0; 1344 search_tbl->is_SGI = 0;
1359 search_tbl->is_fat = 0;
1360 ret = rs_switch_to_siso(priv, lq_sta, conf, sta, 1345 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1361 search_tbl, index); 1346 search_tbl, index);
1362 if (!ret) { 1347 if (!ret) {
@@ -1366,16 +1351,15 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1366 } 1351 }
1367 1352
1368 break; 1353 break;
1369 case IWL_LEGACY_SWITCH_MIMO: 1354 case IWL_LEGACY_SWITCH_MIMO2:
1370 IWL_DEBUG_HT("LQ: Legacy switch MIMO\n"); 1355 IWL_DEBUG_RATE("LQ: Legacy switch to MIMO2\n");
1371 1356
1372 /* Set up search table to try MIMO */ 1357 /* Set up search table to try MIMO */
1373 memcpy(search_tbl, tbl, sz); 1358 memcpy(search_tbl, tbl, sz);
1374 search_tbl->lq_type = LQ_MIMO;
1375 search_tbl->is_SGI = 0; 1359 search_tbl->is_SGI = 0;
1376 search_tbl->is_fat = 0; 1360 search_tbl->ant_type = ANT_AB;/*FIXME:RS*/
1377 search_tbl->antenna_type = ANT_BOTH; 1361 /*FIXME:RS:need to check ant validity*/
1378 ret = rs_switch_to_mimo(priv, lq_sta, conf, sta, 1362 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1379 search_tbl, index); 1363 search_tbl, index);
1380 if (!ret) { 1364 if (!ret) {
1381 lq_sta->search_better_tbl = 1; 1365 lq_sta->search_better_tbl = 1;
@@ -1385,7 +1369,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1385 break; 1369 break;
1386 } 1370 }
1387 tbl->action++; 1371 tbl->action++;
1388 if (tbl->action > IWL_LEGACY_SWITCH_MIMO) 1372 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
1389 tbl->action = IWL_LEGACY_SWITCH_ANTENNA; 1373 tbl->action = IWL_LEGACY_SWITCH_ANTENNA;
1390 1374
1391 if (tbl->action == start_action) 1375 if (tbl->action == start_action)
@@ -1396,7 +1380,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1396 1380
1397 out: 1381 out:
1398 tbl->action++; 1382 tbl->action++;
1399 if (tbl->action > IWL_LEGACY_SWITCH_MIMO) 1383 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
1400 tbl->action = IWL_LEGACY_SWITCH_ANTENNA; 1384 tbl->action = IWL_LEGACY_SWITCH_ANTENNA;
1401 return 0; 1385 return 0;
1402 1386
@@ -1411,7 +1395,6 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1411 struct sta_info *sta, 1395 struct sta_info *sta,
1412 int index) 1396 int index)
1413{ 1397{
1414 int ret;
1415 u8 is_green = lq_sta->is_green; 1398 u8 is_green = lq_sta->is_green;
1416 struct iwl4965_scale_tbl_info *tbl = 1399 struct iwl4965_scale_tbl_info *tbl =
1417 &(lq_sta->lq_info[lq_sta->active_tbl]); 1400 &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1421,35 +1404,30 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1421 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) - 1404 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) -
1422 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT)); 1405 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
1423 u8 start_action = tbl->action; 1406 u8 start_action = tbl->action;
1407 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1408 int ret;
1424 1409
1425 for (;;) { 1410 for (;;) {
1426 lq_sta->action_counter++; 1411 lq_sta->action_counter++;
1427 switch (tbl->action) { 1412 switch (tbl->action) {
1428 case IWL_SISO_SWITCH_ANTENNA: 1413 case IWL_SISO_SWITCH_ANTENNA:
1429 IWL_DEBUG_HT("LQ: SISO SWITCH ANTENNA SISO\n"); 1414 IWL_DEBUG_RATE("LQ: SISO toggle Antenna\n");
1430 search_tbl->lq_type = LQ_NONE;
1431 if (window->success_ratio >= IWL_RS_GOOD_RATIO) 1415 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1432 break; 1416 break;
1433 if (!rs_is_other_ant_connected(lq_sta->antenna,
1434 tbl->antenna_type))
1435 break;
1436 1417
1437 memcpy(search_tbl, tbl, sz); 1418 memcpy(search_tbl, tbl, sz);
1438 search_tbl->action = IWL_SISO_SWITCH_MIMO; 1419 if (rs_toggle_antenna(valid_tx_ant,
1439 rs_toggle_antenna(&(search_tbl->current_rate), 1420 &search_tbl->current_rate, search_tbl)) {
1440 search_tbl); 1421 lq_sta->search_better_tbl = 1;
1441 lq_sta->search_better_tbl = 1; 1422 goto out;
1442 1423 }
1443 goto out; 1424 break;
1444 1425 case IWL_SISO_SWITCH_MIMO2:
1445 case IWL_SISO_SWITCH_MIMO: 1426 IWL_DEBUG_RATE("LQ: SISO switch to MIMO2\n");
1446 IWL_DEBUG_HT("LQ: SISO SWITCH TO MIMO FROM SISO\n");
1447 memcpy(search_tbl, tbl, sz); 1427 memcpy(search_tbl, tbl, sz);
1448 search_tbl->lq_type = LQ_MIMO;
1449 search_tbl->is_SGI = 0; 1428 search_tbl->is_SGI = 0;
1450 search_tbl->is_fat = 0; 1429 search_tbl->ant_type = ANT_AB; /*FIXME:RS*/
1451 search_tbl->antenna_type = ANT_BOTH; 1430 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1452 ret = rs_switch_to_mimo(priv, lq_sta, conf, sta,
1453 search_tbl, index); 1431 search_tbl, index);
1454 if (!ret) { 1432 if (!ret) {
1455 lq_sta->search_better_tbl = 1; 1433 lq_sta->search_better_tbl = 1;
@@ -1457,29 +1435,34 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1457 } 1435 }
1458 break; 1436 break;
1459 case IWL_SISO_SWITCH_GI: 1437 case IWL_SISO_SWITCH_GI:
1460 IWL_DEBUG_HT("LQ: SISO SWITCH TO GI\n"); 1438 if (!tbl->is_fat &&
1439 !(priv->current_ht_config.sgf &
1440 HT_SHORT_GI_20MHZ))
1441 break;
1442 if (tbl->is_fat &&
1443 !(priv->current_ht_config.sgf &
1444 HT_SHORT_GI_40MHZ))
1445 break;
1446
1447 IWL_DEBUG_RATE("LQ: SISO toggle SGI/NGI\n");
1461 1448
1462 memcpy(search_tbl, tbl, sz); 1449 memcpy(search_tbl, tbl, sz);
1463 search_tbl->action = 0; 1450 if (is_green) {
1464 if (search_tbl->is_SGI) 1451 if (!tbl->is_SGI)
1465 search_tbl->is_SGI = 0; 1452 break;
1466 else if (!is_green) 1453 else
1467 search_tbl->is_SGI = 1; 1454 IWL_ERROR("SGI was set in GF+SISO\n");
1468 else 1455 }
1469 break; 1456 search_tbl->is_SGI = !tbl->is_SGI;
1470 lq_sta->search_better_tbl = 1; 1457 rs_set_expected_tpt_table(lq_sta, search_tbl);
1471 if ((tbl->lq_type == LQ_SISO) && 1458 if (tbl->is_SGI) {
1472 (tbl->is_SGI)) {
1473 s32 tpt = lq_sta->last_tpt / 100; 1459 s32 tpt = lq_sta->last_tpt / 100;
1474 if (((!tbl->is_fat) && 1460 if (tpt >= search_tbl->expected_tpt[index])
1475 (tpt >= expected_tpt_siso20MHz[index])) || 1461 break;
1476 ((tbl->is_fat) &&
1477 (tpt >= expected_tpt_siso40MHz[index])))
1478 lq_sta->search_better_tbl = 0;
1479 } 1462 }
1480 rs_get_expected_tpt_table(lq_sta, search_tbl); 1463 search_tbl->current_rate = rate_n_flags_from_tbl(
1481 rs_mcs_from_tbl(&search_tbl->current_rate, 1464 search_tbl, index, is_green);
1482 search_tbl, index, is_green); 1465 lq_sta->search_better_tbl = 1;
1483 goto out; 1466 goto out;
1484 } 1467 }
1485 tbl->action++; 1468 tbl->action++;
@@ -1507,7 +1490,6 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1507 struct sta_info *sta, 1490 struct sta_info *sta,
1508 int index) 1491 int index)
1509{ 1492{
1510 int ret;
1511 s8 is_green = lq_sta->is_green; 1493 s8 is_green = lq_sta->is_green;
1512 struct iwl4965_scale_tbl_info *tbl = 1494 struct iwl4965_scale_tbl_info *tbl =
1513 &(lq_sta->lq_info[lq_sta->active_tbl]); 1495 &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1516,24 +1498,24 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1516 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) - 1498 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) -
1517 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT)); 1499 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
1518 u8 start_action = tbl->action; 1500 u8 start_action = tbl->action;
1501 /*u8 valid_tx_ant = priv->hw_params.valid_tx_ant;*/
1502 int ret;
1519 1503
1520 for (;;) { 1504 for (;;) {
1521 lq_sta->action_counter++; 1505 lq_sta->action_counter++;
1522 switch (tbl->action) { 1506 switch (tbl->action) {
1523 case IWL_MIMO_SWITCH_ANTENNA_A: 1507 case IWL_MIMO_SWITCH_ANTENNA_A:
1524 case IWL_MIMO_SWITCH_ANTENNA_B: 1508 case IWL_MIMO_SWITCH_ANTENNA_B:
1525 IWL_DEBUG_HT("LQ: MIMO SWITCH TO SISO\n"); 1509 IWL_DEBUG_RATE("LQ: MIMO2 switch to SISO\n");
1526
1527 1510
1528 /* Set up new search table for SISO */ 1511 /* Set up new search table for SISO */
1529 memcpy(search_tbl, tbl, sz); 1512 memcpy(search_tbl, tbl, sz);
1530 search_tbl->lq_type = LQ_SISO; 1513
1531 search_tbl->is_SGI = 0; 1514 /*FIXME:RS:need to check ant validity + C*/
1532 search_tbl->is_fat = 0;
1533 if (tbl->action == IWL_MIMO_SWITCH_ANTENNA_A) 1515 if (tbl->action == IWL_MIMO_SWITCH_ANTENNA_A)
1534 search_tbl->antenna_type = ANT_MAIN; 1516 search_tbl->ant_type = ANT_A;
1535 else 1517 else
1536 search_tbl->antenna_type = ANT_AUX; 1518 search_tbl->ant_type = ANT_B;
1537 1519
1538 ret = rs_switch_to_siso(priv, lq_sta, conf, sta, 1520 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1539 search_tbl, index); 1521 search_tbl, index);
@@ -1544,37 +1526,35 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1544 break; 1526 break;
1545 1527
1546 case IWL_MIMO_SWITCH_GI: 1528 case IWL_MIMO_SWITCH_GI:
1547 IWL_DEBUG_HT("LQ: MIMO SWITCH TO GI\n"); 1529 if (!tbl->is_fat &&
1530 !(priv->current_ht_config.sgf &
1531 HT_SHORT_GI_20MHZ))
1532 break;
1533 if (tbl->is_fat &&
1534 !(priv->current_ht_config.sgf &
1535 HT_SHORT_GI_40MHZ))
1536 break;
1537
1538 IWL_DEBUG_RATE("LQ: MIMO toggle SGI/NGI\n");
1548 1539
1549 /* Set up new search table for MIMO */ 1540 /* Set up new search table for MIMO */
1550 memcpy(search_tbl, tbl, sz); 1541 memcpy(search_tbl, tbl, sz);
1551 search_tbl->lq_type = LQ_MIMO; 1542 search_tbl->is_SGI = !tbl->is_SGI;
1552 search_tbl->antenna_type = ANT_BOTH; 1543 rs_set_expected_tpt_table(lq_sta, search_tbl);
1553 search_tbl->action = 0;
1554 if (search_tbl->is_SGI)
1555 search_tbl->is_SGI = 0;
1556 else
1557 search_tbl->is_SGI = 1;
1558 lq_sta->search_better_tbl = 1;
1559
1560 /* 1544 /*
1561 * If active table already uses the fastest possible 1545 * If active table already uses the fastest possible
1562 * modulation (dual stream with short guard interval), 1546 * modulation (dual stream with short guard interval),
1563 * and it's working well, there's no need to look 1547 * and it's working well, there's no need to look
1564 * for a better type of modulation! 1548 * for a better type of modulation!
1565 */ 1549 */
1566 if ((tbl->lq_type == LQ_MIMO) && 1550 if (tbl->is_SGI) {
1567 (tbl->is_SGI)) {
1568 s32 tpt = lq_sta->last_tpt / 100; 1551 s32 tpt = lq_sta->last_tpt / 100;
1569 if (((!tbl->is_fat) && 1552 if (tpt >= search_tbl->expected_tpt[index])
1570 (tpt >= expected_tpt_mimo20MHz[index])) || 1553 break;
1571 ((tbl->is_fat) &&
1572 (tpt >= expected_tpt_mimo40MHz[index])))
1573 lq_sta->search_better_tbl = 0;
1574 } 1554 }
1575 rs_get_expected_tpt_table(lq_sta, search_tbl); 1555 search_tbl->current_rate = rate_n_flags_from_tbl(
1576 rs_mcs_from_tbl(&search_tbl->current_rate, 1556 search_tbl, index, is_green);
1577 search_tbl, index, is_green); 1557 lq_sta->search_better_tbl = 1;
1578 goto out; 1558 goto out;
1579 1559
1580 } 1560 }
@@ -1608,7 +1588,9 @@ static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta)
1608 int i; 1588 int i;
1609 int active_tbl; 1589 int active_tbl;
1610 int flush_interval_passed = 0; 1590 int flush_interval_passed = 0;
1591 struct iwl_priv *priv;
1611 1592
1593 priv = lq_sta->drv;
1612 active_tbl = lq_sta->active_tbl; 1594 active_tbl = lq_sta->active_tbl;
1613 1595
1614 tbl = &(lq_sta->lq_info[active_tbl]); 1596 tbl = &(lq_sta->lq_info[active_tbl]);
@@ -1623,9 +1605,6 @@ static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta)
1623 (unsigned long)(lq_sta->flush_timer + 1605 (unsigned long)(lq_sta->flush_timer +
1624 IWL_RATE_SCALE_FLUSH_INTVL)); 1606 IWL_RATE_SCALE_FLUSH_INTVL));
1625 1607
1626 /* For now, disable the elapsed time criterion */
1627 flush_interval_passed = 0;
1628
1629 /* 1608 /*
1630 * Check if we should allow search for new modulation mode. 1609 * Check if we should allow search for new modulation mode.
1631 * If many frames have failed or succeeded, or we've used 1610 * If many frames have failed or succeeded, or we've used
@@ -1638,7 +1617,7 @@ static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta)
1638 (lq_sta->total_success > lq_sta->max_success_limit) || 1617 (lq_sta->total_success > lq_sta->max_success_limit) ||
1639 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) 1618 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
1640 && (flush_interval_passed))) { 1619 && (flush_interval_passed))) {
1641 IWL_DEBUG_HT("LQ: stay is expired %d %d %d\n:", 1620 IWL_DEBUG_RATE("LQ: stay is expired %d %d %d\n:",
1642 lq_sta->total_failed, 1621 lq_sta->total_failed,
1643 lq_sta->total_success, 1622 lq_sta->total_success,
1644 flush_interval_passed); 1623 flush_interval_passed);
@@ -1661,7 +1640,7 @@ static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta)
1661 lq_sta->table_count_limit) { 1640 lq_sta->table_count_limit) {
1662 lq_sta->table_count = 0; 1641 lq_sta->table_count = 0;
1663 1642
1664 IWL_DEBUG_HT("LQ: stay in table clear win\n"); 1643 IWL_DEBUG_RATE("LQ: stay in table clear win\n");
1665 for (i = 0; i < IWL_RATE_COUNT; i++) 1644 for (i = 0; i < IWL_RATE_COUNT; i++)
1666 rs_rate_scale_clear_window( 1645 rs_rate_scale_clear_window(
1667 &(tbl->win[i])); 1646 &(tbl->win[i]));
@@ -1704,14 +1683,14 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1704 struct iwl4965_lq_sta *lq_sta; 1683 struct iwl4965_lq_sta *lq_sta;
1705 struct iwl4965_scale_tbl_info *tbl, *tbl1; 1684 struct iwl4965_scale_tbl_info *tbl, *tbl1;
1706 u16 rate_scale_index_msk = 0; 1685 u16 rate_scale_index_msk = 0;
1707 struct iwl4965_rate mcs_rate; 1686 u32 rate;
1708 u8 is_green = 0; 1687 u8 is_green = 0;
1709 u8 active_tbl = 0; 1688 u8 active_tbl = 0;
1710 u8 done_search = 0; 1689 u8 done_search = 0;
1711 u16 high_low; 1690 u16 high_low;
1691 s32 sr;
1712#ifdef CONFIG_IWL4965_HT 1692#ifdef CONFIG_IWL4965_HT
1713 u8 tid = MAX_TID_COUNT; 1693 u8 tid = MAX_TID_COUNT;
1714 __le16 *qc;
1715#endif 1694#endif
1716 1695
1717 IWL_DEBUG_RATE("rate scale calculate new rate for skb\n"); 1696 IWL_DEBUG_RATE("rate scale calculate new rate for skb\n");
@@ -1734,11 +1713,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1734 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 1713 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv;
1735 1714
1736#ifdef CONFIG_IWL4965_HT 1715#ifdef CONFIG_IWL4965_HT
1737 qc = ieee80211_get_qos_ctrl(hdr); 1716 rs_tl_add_packet(lq_sta, hdr);
1738 if (qc) {
1739 tid = (u8)(le16_to_cpu(*qc) & 0xf);
1740 rs_tl_add_packet(lq_sta, tid);
1741 }
1742#endif 1717#endif
1743 /* 1718 /*
1744 * Select rate-scale / modulation-mode table to work with in 1719 * Select rate-scale / modulation-mode table to work with in
@@ -1760,8 +1735,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1760 tbl->lq_type); 1735 tbl->lq_type);
1761 1736
1762 /* rates available for this association, and for modulation mode */ 1737 /* rates available for this association, and for modulation mode */
1763 rs_get_supported_rates(lq_sta, hdr, tbl->lq_type, 1738 rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1764 &rate_mask);
1765 1739
1766 IWL_DEBUG_RATE("mask 0x%04X \n", rate_mask); 1740 IWL_DEBUG_RATE("mask 0x%04X \n", rate_mask);
1767 1741
@@ -1781,27 +1755,16 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1781 if (!rate_scale_index_msk) 1755 if (!rate_scale_index_msk)
1782 rate_scale_index_msk = rate_mask; 1756 rate_scale_index_msk = rate_mask;
1783 1757
1784 /* If current rate is no longer supported on current association, 1758 if (!((1 << index) & rate_scale_index_msk)) {
1785 * or user changed preferences for rates, find a new supported rate. */ 1759 IWL_ERROR("Current Rate is not valid\n");
1786 if (index < 0 || !((1 << index) & rate_scale_index_msk)) { 1760 return;
1787 index = IWL_INVALID_VALUE;
1788 update_lq = 1;
1789
1790 /* get the highest available rate */
1791 for (i = 0; i <= IWL_RATE_COUNT; i++) {
1792 if ((1 << i) & rate_scale_index_msk)
1793 index = i;
1794 }
1795
1796 if (index == IWL_INVALID_VALUE) {
1797 IWL_WARNING("Can not find a suitable rate\n");
1798 return;
1799 }
1800 } 1761 }
1801 1762
1802 /* Get expected throughput table and history window for current rate */ 1763 /* Get expected throughput table and history window for current rate */
1803 if (!tbl->expected_tpt) 1764 if (!tbl->expected_tpt) {
1804 rs_get_expected_tpt_table(lq_sta, tbl); 1765 IWL_ERROR("tbl->expected_tpt is NULL\n");
1766 return;
1767 }
1805 1768
1806 window = &(tbl->win[index]); 1769 window = &(tbl->win[index]);
1807 1770
@@ -1813,10 +1776,9 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1813 * in current association (use new rate found above). 1776 * in current association (use new rate found above).
1814 */ 1777 */
1815 fail_count = window->counter - window->success_counter; 1778 fail_count = window->counter - window->success_counter;
1816 if (((fail_count < IWL_RATE_MIN_FAILURE_TH) && 1779 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
1817 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) 1780 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
1818 || (tbl->expected_tpt == NULL)) { 1781 IWL_DEBUG_RATE("LQ: still below TH. succ=%d total=%d "
1819 IWL_DEBUG_RATE("LQ: still below TH succ %d total %d "
1820 "for index %d\n", 1782 "for index %d\n",
1821 window->success_counter, window->counter, index); 1783 window->success_counter, window->counter, index);
1822 1784
@@ -1827,44 +1789,51 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1827 * or search for a new one? */ 1789 * or search for a new one? */
1828 rs_stay_in_table(lq_sta); 1790 rs_stay_in_table(lq_sta);
1829 1791
1830 /* Set up new rate table in uCode, if needed */
1831 if (update_lq) {
1832 rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green);
1833 rs_fill_link_cmd(lq_sta, &mcs_rate, &lq_sta->lq);
1834 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
1835 }
1836 goto out; 1792 goto out;
1837 1793
1838 /* Else we have enough samples; calculate estimate of 1794 /* Else we have enough samples; calculate estimate of
1839 * actual average throughput */ 1795 * actual average throughput */
1840 } else 1796 } else {
1841 window->average_tpt = ((window->success_ratio * 1797 /*FIXME:RS remove this else if we don't get this error*/
1798 if (window->average_tpt != ((window->success_ratio *
1799 tbl->expected_tpt[index] + 64) / 128)) {
1800 IWL_ERROR("expected_tpt should have been calculated"
1801 " by now\n");
1802 window->average_tpt = ((window->success_ratio *
1842 tbl->expected_tpt[index] + 64) / 128); 1803 tbl->expected_tpt[index] + 64) / 128);
1804 }
1805 }
1843 1806
1844 /* If we are searching for better modulation mode, check success. */ 1807 /* If we are searching for better modulation mode, check success. */
1845 if (lq_sta->search_better_tbl) { 1808 if (lq_sta->search_better_tbl) {
1846 int success_limit = IWL_RATE_SCALE_SWITCH;
1847 1809
1848 /* If good success, continue using the "search" mode; 1810 /* If good success, continue using the "search" mode;
1849 * no need to send new link quality command, since we're 1811 * no need to send new link quality command, since we're
1850 * continuing to use the setup that we've been trying. */ 1812 * continuing to use the setup that we've been trying. */
1851 if ((window->success_ratio > success_limit) || 1813 if (window->average_tpt > lq_sta->last_tpt) {
1852 (window->average_tpt > lq_sta->last_tpt)) { 1814
1853 if (!is_legacy(tbl->lq_type)) { 1815 IWL_DEBUG_RATE("LQ: SWITCHING TO CURRENT TABLE "
1854 IWL_DEBUG_HT("LQ: we are switching to HT" 1816 "suc=%d cur-tpt=%d old-tpt=%d\n",
1855 " rate suc %d current tpt %d" 1817 window->success_ratio,
1856 " old tpt %d\n", 1818 window->average_tpt,
1857 window->success_ratio, 1819 lq_sta->last_tpt);
1858 window->average_tpt, 1820
1859 lq_sta->last_tpt); 1821 if (!is_legacy(tbl->lq_type))
1860 lq_sta->enable_counter = 1; 1822 lq_sta->enable_counter = 1;
1861 } 1823
1862 /* Swap tables; "search" becomes "active" */ 1824 /* Swap tables; "search" becomes "active" */
1863 lq_sta->active_tbl = active_tbl; 1825 lq_sta->active_tbl = active_tbl;
1864 current_tpt = window->average_tpt; 1826 current_tpt = window->average_tpt;
1865 1827
1866 /* Else poor success; go back to mode in "active" table */ 1828 /* Else poor success; go back to mode in "active" table */
1867 } else { 1829 } else {
1830
1831 IWL_DEBUG_RATE("LQ: GOING BACK TO THE OLD TABLE "
1832 "suc=%d cur-tpt=%d old-tpt=%d\n",
1833 window->success_ratio,
1834 window->average_tpt,
1835 lq_sta->last_tpt);
1836
1868 /* Nullify "search" table */ 1837 /* Nullify "search" table */
1869 tbl->lq_type = LQ_NONE; 1838 tbl->lq_type = LQ_NONE;
1870 1839
@@ -1874,12 +1843,11 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1874 1843
1875 /* Revert to "active" rate and throughput info */ 1844 /* Revert to "active" rate and throughput info */
1876 index = iwl4965_hwrate_to_plcp_idx( 1845 index = iwl4965_hwrate_to_plcp_idx(
1877 tbl->current_rate.rate_n_flags); 1846 tbl->current_rate);
1878 current_tpt = lq_sta->last_tpt; 1847 current_tpt = lq_sta->last_tpt;
1879 1848
1880 /* Need to set up a new rate table in uCode */ 1849 /* Need to set up a new rate table in uCode */
1881 update_lq = 1; 1850 update_lq = 1;
1882 IWL_DEBUG_HT("XXY GO BACK TO OLD TABLE\n");
1883 } 1851 }
1884 1852
1885 /* Either way, we've made a decision; modulation mode 1853 /* Either way, we've made a decision; modulation mode
@@ -1891,11 +1859,13 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1891 1859
1892 /* (Else) not in search of better modulation mode, try for better 1860 /* (Else) not in search of better modulation mode, try for better
1893 * starting rate, while staying in this mode. */ 1861 * starting rate, while staying in this mode. */
1894 high_low = rs_get_adjacent_rate(index, rate_scale_index_msk, 1862 high_low = rs_get_adjacent_rate(priv, index, rate_scale_index_msk,
1895 tbl->lq_type); 1863 tbl->lq_type);
1896 low = high_low & 0xff; 1864 low = high_low & 0xff;
1897 high = (high_low >> 8) & 0xff; 1865 high = (high_low >> 8) & 0xff;
1898 1866
1867 sr = window->success_ratio;
1868
1899 /* Collect measured throughputs for current and adjacent rates */ 1869 /* Collect measured throughputs for current and adjacent rates */
1900 current_tpt = window->average_tpt; 1870 current_tpt = window->average_tpt;
1901 if (low != IWL_RATE_INVALID) 1871 if (low != IWL_RATE_INVALID)
@@ -1903,19 +1873,22 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1903 if (high != IWL_RATE_INVALID) 1873 if (high != IWL_RATE_INVALID)
1904 high_tpt = tbl->win[high].average_tpt; 1874 high_tpt = tbl->win[high].average_tpt;
1905 1875
1906 /* Assume rate increase */ 1876 scale_action = 0;
1907 scale_action = 1;
1908 1877
1909 /* Too many failures, decrease rate */ 1878 /* Too many failures, decrease rate */
1910 if ((window->success_ratio <= IWL_RATE_DECREASE_TH) || 1879 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
1911 (current_tpt == 0)) {
1912 IWL_DEBUG_RATE("decrease rate because of low success_ratio\n"); 1880 IWL_DEBUG_RATE("decrease rate because of low success_ratio\n");
1913 scale_action = -1; 1881 scale_action = -1;
1914 1882
1915 /* No throughput measured yet for adjacent rates; try increase. */ 1883 /* No throughput measured yet for adjacent rates; try increase. */
1916 } else if ((low_tpt == IWL_INVALID_VALUE) && 1884 } else if ((low_tpt == IWL_INVALID_VALUE) &&
1917 (high_tpt == IWL_INVALID_VALUE)) 1885 (high_tpt == IWL_INVALID_VALUE)) {
1918 scale_action = 1; 1886
1887 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
1888 scale_action = 1;
1889 else if (low != IWL_RATE_INVALID)
1890 scale_action = -1;
1891 }
1919 1892
1920 /* Both adjacent throughputs are measured, but neither one has better 1893 /* Both adjacent throughputs are measured, but neither one has better
1921 * throughput; we're using the best rate, don't change it! */ 1894 * throughput; we're using the best rate, don't change it! */
@@ -1931,9 +1904,10 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1931 /* Higher adjacent rate's throughput is measured */ 1904 /* Higher adjacent rate's throughput is measured */
1932 if (high_tpt != IWL_INVALID_VALUE) { 1905 if (high_tpt != IWL_INVALID_VALUE) {
1933 /* Higher rate has better throughput */ 1906 /* Higher rate has better throughput */
1934 if (high_tpt > current_tpt) 1907 if (high_tpt > current_tpt &&
1908 sr >= IWL_RATE_INCREASE_TH) {
1935 scale_action = 1; 1909 scale_action = 1;
1936 else { 1910 } else {
1937 IWL_DEBUG_RATE 1911 IWL_DEBUG_RATE
1938 ("decrease rate because of high tpt\n"); 1912 ("decrease rate because of high tpt\n");
1939 scale_action = -1; 1913 scale_action = -1;
@@ -1946,23 +1920,17 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1946 IWL_DEBUG_RATE 1920 IWL_DEBUG_RATE
1947 ("decrease rate because of low tpt\n"); 1921 ("decrease rate because of low tpt\n");
1948 scale_action = -1; 1922 scale_action = -1;
1949 } else 1923 } else if (sr >= IWL_RATE_INCREASE_TH) {
1950 scale_action = 1; 1924 scale_action = 1;
1925 }
1951 } 1926 }
1952 } 1927 }
1953 1928
1954 /* Sanity check; asked for decrease, but success rate or throughput 1929 /* Sanity check; asked for decrease, but success rate or throughput
1955 * has been good at old rate. Don't change it. */ 1930 * has been good at old rate. Don't change it. */
1956 if (scale_action == -1) { 1931 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
1957 if ((low != IWL_RATE_INVALID) && 1932 ((sr > IWL_RATE_HIGH_TH) ||
1958 ((window->success_ratio > IWL_RATE_HIGH_TH) ||
1959 (current_tpt > (100 * tbl->expected_tpt[low])))) 1933 (current_tpt > (100 * tbl->expected_tpt[low]))))
1960 scale_action = 0;
1961
1962 /* Sanity check; asked for increase, but success rate has not been great
1963 * even at old rate, higher rate will be worse. Don't change it. */
1964 } else if ((scale_action == 1) &&
1965 (window->success_ratio < IWL_RATE_INCREASE_TH))
1966 scale_action = 0; 1934 scale_action = 0;
1967 1935
1968 switch (scale_action) { 1936 switch (scale_action) {
@@ -1987,15 +1955,15 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1987 break; 1955 break;
1988 } 1956 }
1989 1957
1990 IWL_DEBUG_HT("choose rate scale index %d action %d low %d " 1958 IWL_DEBUG_RATE("choose rate scale index %d action %d low %d "
1991 "high %d type %d\n", 1959 "high %d type %d\n",
1992 index, scale_action, low, high, tbl->lq_type); 1960 index, scale_action, low, high, tbl->lq_type);
1993 1961
1994 lq_update: 1962lq_update:
1995 /* Replace uCode's rate table for the destination station. */ 1963 /* Replace uCode's rate table for the destination station. */
1996 if (update_lq) { 1964 if (update_lq) {
1997 rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green); 1965 rate = rate_n_flags_from_tbl(tbl, index, is_green);
1998 rs_fill_link_cmd(lq_sta, &mcs_rate, &lq_sta->lq); 1966 rs_fill_link_cmd(priv, lq_sta, rate);
1999 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 1967 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
2000 } 1968 }
2001 1969
@@ -2030,12 +1998,11 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2030 1998
2031 /* Use new "search" start rate */ 1999 /* Use new "search" start rate */
2032 index = iwl4965_hwrate_to_plcp_idx( 2000 index = iwl4965_hwrate_to_plcp_idx(
2033 tbl->current_rate.rate_n_flags); 2001 tbl->current_rate);
2034 2002
2035 IWL_DEBUG_HT("Switch current mcs: %X index: %d\n", 2003 IWL_DEBUG_RATE("Switch current mcs: %X index: %d\n",
2036 tbl->current_rate.rate_n_flags, index); 2004 tbl->current_rate, index);
2037 rs_fill_link_cmd(lq_sta, &tbl->current_rate, 2005 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
2038 &lq_sta->lq);
2039 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 2006 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
2040 } 2007 }
2041 2008
@@ -2051,8 +2018,8 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2051#endif 2018#endif
2052 (lq_sta->action_counter >= 1)) { 2019 (lq_sta->action_counter >= 1)) {
2053 lq_sta->action_counter = 0; 2020 lq_sta->action_counter = 0;
2054 IWL_DEBUG_HT("LQ: STAY in legacy table\n"); 2021 IWL_DEBUG_RATE("LQ: STAY in legacy table\n");
2055 rs_set_stay_in_table(1, lq_sta); 2022 rs_set_stay_in_table(priv, 1, lq_sta);
2056 } 2023 }
2057 2024
2058 /* If we're in an HT mode, and all 3 mode switch actions 2025 /* If we're in an HT mode, and all 3 mode switch actions
@@ -2064,12 +2031,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2064 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && 2031 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2065 (lq_sta->tx_agg_tid_en & (1 << tid)) && 2032 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2066 (tid != MAX_TID_COUNT)) { 2033 (tid != MAX_TID_COUNT)) {
2067 IWL_DEBUG_HT("try to aggregate tid %d\n", tid); 2034 IWL_DEBUG_RATE("try to aggregate tid %d\n", tid);
2068 rs_tl_turn_on_agg(priv, tid, lq_sta, sta); 2035 rs_tl_turn_on_agg(priv, tid, lq_sta, sta);
2069 } 2036 }
2070#endif /*CONFIG_IWL4965_HT */ 2037#endif /*CONFIG_IWL4965_HT */
2071 lq_sta->action_counter = 0; 2038 lq_sta->action_counter = 0;
2072 rs_set_stay_in_table(0, lq_sta); 2039 rs_set_stay_in_table(priv, 0, lq_sta);
2073 } 2040 }
2074 2041
2075 /* 2042 /*
@@ -2085,7 +2052,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2085 } 2052 }
2086 2053
2087out: 2054out:
2088 rs_mcs_from_tbl(&tbl->current_rate, tbl, index, is_green); 2055 tbl->current_rate = rate_n_flags_from_tbl(tbl, index, is_green);
2089 i = index; 2056 i = index;
2090 sta->last_txrate_idx = i; 2057 sta->last_txrate_idx = i;
2091 2058
@@ -2105,13 +2072,14 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2105 struct ieee80211_conf *conf, 2072 struct ieee80211_conf *conf,
2106 struct sta_info *sta) 2073 struct sta_info *sta)
2107{ 2074{
2108 int i;
2109 struct iwl4965_lq_sta *lq_sta; 2075 struct iwl4965_lq_sta *lq_sta;
2110 struct iwl4965_scale_tbl_info *tbl; 2076 struct iwl4965_scale_tbl_info *tbl;
2111 u8 active_tbl = 0;
2112 int rate_idx; 2077 int rate_idx;
2078 int i;
2079 u32 rate;
2113 u8 use_green = rs_use_green(priv, conf); 2080 u8 use_green = rs_use_green(priv, conf);
2114 struct iwl4965_rate mcs_rate; 2081 u8 active_tbl = 0;
2082 u8 valid_tx_ant;
2115 2083
2116 if (!sta || !sta->rate_ctrl_priv) 2084 if (!sta || !sta->rate_ctrl_priv)
2117 goto out; 2085 goto out;
@@ -2123,6 +2091,8 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2123 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)) 2091 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS))
2124 goto out; 2092 goto out;
2125 2093
2094 valid_tx_ant = priv->hw_params.valid_tx_ant;
2095
2126 if (!lq_sta->search_better_tbl) 2096 if (!lq_sta->search_better_tbl)
2127 active_tbl = lq_sta->active_tbl; 2097 active_tbl = lq_sta->active_tbl;
2128 else 2098 else
@@ -2133,22 +2103,23 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2133 if ((i < 0) || (i >= IWL_RATE_COUNT)) 2103 if ((i < 0) || (i >= IWL_RATE_COUNT))
2134 i = 0; 2104 i = 0;
2135 2105
2136 mcs_rate.rate_n_flags = iwl4965_rates[i].plcp ; 2106 /* FIXME:RS: This is also wrong in 4965 */
2137 mcs_rate.rate_n_flags |= RATE_MCS_ANT_B_MSK; 2107 rate = iwl_rates[i].plcp;
2138 mcs_rate.rate_n_flags &= ~RATE_MCS_ANT_A_MSK; 2108 rate |= RATE_MCS_ANT_B_MSK;
2109 rate &= ~RATE_MCS_ANT_A_MSK;
2139 2110
2140 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE) 2111 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
2141 mcs_rate.rate_n_flags |= RATE_MCS_CCK_MSK; 2112 rate |= RATE_MCS_CCK_MSK;
2142 2113
2143 tbl->antenna_type = ANT_AUX; 2114 tbl->ant_type = ANT_B;
2144 rs_get_tbl_info_from_mcs(&mcs_rate, priv->band, tbl, &rate_idx); 2115 rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
2145 if (!rs_is_ant_connected(priv->valid_antenna, tbl->antenna_type)) 2116 if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2146 rs_toggle_antenna(&mcs_rate, tbl); 2117 rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2147 2118
2148 rs_mcs_from_tbl(&mcs_rate, tbl, rate_idx, use_green); 2119 rate = rate_n_flags_from_tbl(tbl, rate_idx, use_green);
2149 tbl->current_rate.rate_n_flags = mcs_rate.rate_n_flags; 2120 tbl->current_rate = rate;
2150 rs_get_expected_tpt_table(lq_sta, tbl); 2121 rs_set_expected_tpt_table(lq_sta, tbl);
2151 rs_fill_link_cmd(lq_sta, &mcs_rate, &lq_sta->lq); 2122 rs_fill_link_cmd(NULL, lq_sta, rate);
2152 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 2123 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
2153 out: 2124 out:
2154 return; 2125 return;
@@ -2180,7 +2151,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2180 fc = le16_to_cpu(hdr->frame_control); 2151 fc = le16_to_cpu(hdr->frame_control);
2181 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1) || 2152 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1) ||
2182 !sta || !sta->rate_ctrl_priv) { 2153 !sta || !sta->rate_ctrl_priv) {
2183 sel->rate = rate_lowest(local, sband, sta); 2154 sel->rate_idx = rate_lowest_index(local, sband, sta);
2184 goto out; 2155 goto out;
2185 } 2156 }
2186 2157
@@ -2189,13 +2160,13 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2189 2160
2190 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 2161 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2191 !lq_sta->ibss_sta_added) { 2162 !lq_sta->ibss_sta_added) {
2192 u8 sta_id = iwl4965_hw_find_station(priv, hdr->addr1); 2163 u8 sta_id = iwl_find_station(priv, hdr->addr1);
2193 DECLARE_MAC_BUF(mac); 2164 DECLARE_MAC_BUF(mac);
2194 2165
2195 if (sta_id == IWL_INVALID_STATION) { 2166 if (sta_id == IWL_INVALID_STATION) {
2196 IWL_DEBUG_RATE("LQ: ADD station %s\n", 2167 IWL_DEBUG_RATE("LQ: ADD station %s\n",
2197 print_mac(mac, hdr->addr1)); 2168 print_mac(mac, hdr->addr1));
2198 sta_id = iwl4965_add_station_flags(priv, hdr->addr1, 2169 sta_id = iwl_add_station_flags(priv, hdr->addr1,
2199 0, CMD_ASYNC, NULL); 2170 0, CMD_ASYNC, NULL);
2200 } 2171 }
2201 if ((sta_id != IWL_INVALID_STATION)) { 2172 if ((sta_id != IWL_INVALID_STATION)) {
@@ -2210,20 +2181,24 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2210 2181
2211done: 2182done:
2212 if ((i < 0) || (i > IWL_RATE_COUNT)) { 2183 if ((i < 0) || (i > IWL_RATE_COUNT)) {
2213 sel->rate = rate_lowest(local, sband, sta); 2184 sel->rate_idx = rate_lowest_index(local, sband, sta);
2214 goto out; 2185 goto out;
2215 } 2186 }
2216 2187
2217 sel->rate = &priv->ieee_rates[i]; 2188 if (sband->band == IEEE80211_BAND_5GHZ)
2189 i -= IWL_FIRST_OFDM_RATE;
2190 sel->rate_idx = i;
2218out: 2191out:
2219 rcu_read_unlock(); 2192 rcu_read_unlock();
2220} 2193}
2221 2194
2222static void *rs_alloc_sta(void *priv, gfp_t gfp) 2195static void *rs_alloc_sta(void *priv_rate, gfp_t gfp)
2223{ 2196{
2224 struct iwl4965_lq_sta *lq_sta; 2197 struct iwl4965_lq_sta *lq_sta;
2198 struct iwl_priv *priv;
2225 int i, j; 2199 int i, j;
2226 2200
2201 priv = (struct iwl_priv *)priv_rate;
2227 IWL_DEBUG_RATE("create station rate scale window\n"); 2202 IWL_DEBUG_RATE("create station rate scale window\n");
2228 2203
2229 lq_sta = kzalloc(sizeof(struct iwl4965_lq_sta), gfp); 2204 lq_sta = kzalloc(sizeof(struct iwl4965_lq_sta), gfp);
@@ -2259,7 +2234,7 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2259 for (i = 0; i < IWL_RATE_COUNT; i++) 2234 for (i = 0; i < IWL_RATE_COUNT; i++)
2260 rs_rate_scale_clear_window(&(lq_sta->lq_info[j].win[i])); 2235 rs_rate_scale_clear_window(&(lq_sta->lq_info[j].win[i]));
2261 2236
2262 IWL_DEBUG_RATE("rate scale global init\n"); 2237 IWL_DEBUG_RATE("LQ: *** rate scale global init ***\n");
2263 /* TODO: what is a good starting rate for STA? About middle? Maybe not 2238 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2264 * the lowest or the highest rate.. Could consider using RSSI from 2239 * the lowest or the highest rate.. Could consider using RSSI from
2265 * previous packets? Need to have IEEE 802.1X auth succeed immediately 2240 * previous packets? Need to have IEEE 802.1X auth succeed immediately
@@ -2267,17 +2242,17 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2267 2242
2268 lq_sta->ibss_sta_added = 0; 2243 lq_sta->ibss_sta_added = 0;
2269 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 2244 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2270 u8 sta_id = iwl4965_hw_find_station(priv, sta->addr); 2245 u8 sta_id = iwl_find_station(priv, sta->addr);
2271 DECLARE_MAC_BUF(mac); 2246 DECLARE_MAC_BUF(mac);
2272 2247
2273 /* for IBSS the call are from tasklet */ 2248 /* for IBSS the call are from tasklet */
2274 IWL_DEBUG_HT("LQ: ADD station %s\n", 2249 IWL_DEBUG_RATE("LQ: ADD station %s\n",
2275 print_mac(mac, sta->addr)); 2250 print_mac(mac, sta->addr));
2276 2251
2277 if (sta_id == IWL_INVALID_STATION) { 2252 if (sta_id == IWL_INVALID_STATION) {
2278 IWL_DEBUG_RATE("LQ: ADD station %s\n", 2253 IWL_DEBUG_RATE("LQ: ADD station %s\n",
2279 print_mac(mac, sta->addr)); 2254 print_mac(mac, sta->addr));
2280 sta_id = iwl4965_add_station_flags(priv, sta->addr, 2255 sta_id = iwl_add_station_flags(priv, sta->addr,
2281 0, CMD_ASYNC, NULL); 2256 0, CMD_ASYNC, NULL);
2282 } 2257 }
2283 if ((sta_id != IWL_INVALID_STATION)) { 2258 if ((sta_id != IWL_INVALID_STATION)) {
@@ -2300,11 +2275,8 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2300 sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 2275 sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2301 2276
2302 lq_sta->is_dup = 0; 2277 lq_sta->is_dup = 0;
2303 lq_sta->valid_antenna = priv->valid_antenna;
2304 lq_sta->antenna = priv->antenna;
2305 lq_sta->is_green = rs_use_green(priv, conf); 2278 lq_sta->is_green = rs_use_green(priv, conf);
2306 lq_sta->active_rate = priv->active_rate; 2279 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2307 lq_sta->active_rate &= ~(0x1000);
2308 lq_sta->active_rate_basic = priv->active_rate_basic; 2280 lq_sta->active_rate_basic = priv->active_rate_basic;
2309 lq_sta->band = priv->band; 2281 lq_sta->band = priv->band;
2310#ifdef CONFIG_IWL4965_HT 2282#ifdef CONFIG_IWL4965_HT
@@ -2312,23 +2284,37 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2312 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), 2284 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2313 * supp_rates[] does not; shift to convert format, force 9 MBits off. 2285 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2314 */ 2286 */
2315 lq_sta->active_siso_rate = (priv->current_ht_config.supp_mcs_set[0] << 1); 2287 lq_sta->active_siso_rate =
2288 priv->current_ht_config.supp_mcs_set[0] << 1;
2316 lq_sta->active_siso_rate |= 2289 lq_sta->active_siso_rate |=
2317 (priv->current_ht_config.supp_mcs_set[0] & 0x1); 2290 priv->current_ht_config.supp_mcs_set[0] & 0x1;
2318 lq_sta->active_siso_rate &= ~((u16)0x2); 2291 lq_sta->active_siso_rate &= ~((u16)0x2);
2319 lq_sta->active_siso_rate = 2292 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2320 lq_sta->active_siso_rate << IWL_FIRST_OFDM_RATE;
2321 2293
2322 /* Same here */ 2294 /* Same here */
2323 lq_sta->active_mimo_rate = (priv->current_ht_config.supp_mcs_set[1] << 1); 2295 lq_sta->active_mimo2_rate =
2324 lq_sta->active_mimo_rate |= 2296 priv->current_ht_config.supp_mcs_set[1] << 1;
2325 (priv->current_ht_config.supp_mcs_set[1] & 0x1); 2297 lq_sta->active_mimo2_rate |=
2326 lq_sta->active_mimo_rate &= ~((u16)0x2); 2298 priv->current_ht_config.supp_mcs_set[1] & 0x1;
2327 lq_sta->active_mimo_rate = 2299 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2328 lq_sta->active_mimo_rate << IWL_FIRST_OFDM_RATE; 2300 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2329 IWL_DEBUG_HT("SISO RATE 0x%X MIMO RATE 0x%X\n", 2301
2302 lq_sta->active_mimo3_rate =
2303 priv->current_ht_config.supp_mcs_set[2] << 1;
2304 lq_sta->active_mimo3_rate |=
2305 priv->current_ht_config.supp_mcs_set[2] & 0x1;
2306 lq_sta->active_mimo3_rate &= ~((u16)0x2);
2307 lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
2308
2309 IWL_DEBUG_RATE("SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
2330 lq_sta->active_siso_rate, 2310 lq_sta->active_siso_rate,
2331 lq_sta->active_mimo_rate); 2311 lq_sta->active_mimo2_rate,
2312 lq_sta->active_mimo3_rate);
2313
2314 /* These values will be overriden later */
2315 lq_sta->lq.general_params.single_stream_ant_msk = ANT_A;
2316 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2317
2332 /* as default allow aggregation for all tids */ 2318 /* as default allow aggregation for all tids */
2333 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; 2319 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2334#endif /*CONFIG_IWL4965_HT*/ 2320#endif /*CONFIG_IWL4965_HT*/
@@ -2342,50 +2328,55 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2342 rs_initialize_lq(priv, conf, sta); 2328 rs_initialize_lq(priv, conf, sta);
2343} 2329}
2344 2330
2345static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta, 2331static void rs_fill_link_cmd(const struct iwl_priv *priv,
2346 struct iwl4965_rate *tx_mcs, 2332 struct iwl4965_lq_sta *lq_sta,
2347 struct iwl_link_quality_cmd *lq_cmd) 2333 u32 new_rate)
2348{ 2334{
2335 struct iwl4965_scale_tbl_info tbl_type;
2349 int index = 0; 2336 int index = 0;
2350 int rate_idx; 2337 int rate_idx;
2351 int repeat_rate = 0; 2338 int repeat_rate = 0;
2352 u8 ant_toggle_count = 0; 2339 u8 ant_toggle_cnt = 0;
2353 u8 use_ht_possible = 1; 2340 u8 use_ht_possible = 1;
2354 struct iwl4965_rate new_rate; 2341 u8 valid_tx_ant = 0;
2355 struct iwl4965_scale_tbl_info tbl_type = { 0 }; 2342 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2356 2343
2357 /* Override starting rate (index 0) if needed for debug purposes */ 2344 /* Override starting rate (index 0) if needed for debug purposes */
2358 rs_dbgfs_set_mcs(lq_sta, tx_mcs, index); 2345 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2359 2346
2360 /* Interpret rate_n_flags */ 2347 /* Interpret new_rate (rate_n_flags) */
2361 rs_get_tbl_info_from_mcs(tx_mcs, lq_sta->band, 2348 memset(&tbl_type, 0, sizeof(tbl_type));
2349 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2362 &tbl_type, &rate_idx); 2350 &tbl_type, &rate_idx);
2363 2351
2364 /* How many times should we repeat the initial rate? */ 2352 /* How many times should we repeat the initial rate? */
2365 if (is_legacy(tbl_type.lq_type)) { 2353 if (is_legacy(tbl_type.lq_type)) {
2366 ant_toggle_count = 1; 2354 ant_toggle_cnt = 1;
2367 repeat_rate = IWL_NUMBER_TRY; 2355 repeat_rate = IWL_NUMBER_TRY;
2368 } else 2356 } else {
2369 repeat_rate = IWL_HT_NUMBER_TRY; 2357 repeat_rate = IWL_HT_NUMBER_TRY;
2358 }
2370 2359
2371 lq_cmd->general_params.mimo_delimiter = 2360 lq_cmd->general_params.mimo_delimiter =
2372 is_mimo(tbl_type.lq_type) ? 1 : 0; 2361 is_mimo(tbl_type.lq_type) ? 1 : 0;
2373 2362
2374 /* Fill 1st table entry (index 0) */ 2363 /* Fill 1st table entry (index 0) */
2375 lq_cmd->rs_table[index].rate_n_flags = 2364 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2376 cpu_to_le32(tx_mcs->rate_n_flags);
2377 new_rate.rate_n_flags = tx_mcs->rate_n_flags;
2378 2365
2379 if (is_mimo(tbl_type.lq_type) || (tbl_type.antenna_type == ANT_MAIN)) 2366 if (num_of_ant(tbl_type.ant_type) == 1) {
2380 lq_cmd->general_params.single_stream_ant_msk 2367 lq_cmd->general_params.single_stream_ant_msk =
2381 = LINK_QUAL_ANT_A_MSK; 2368 tbl_type.ant_type;
2382 else 2369 } else if (num_of_ant(tbl_type.ant_type) == 2) {
2383 lq_cmd->general_params.single_stream_ant_msk 2370 lq_cmd->general_params.dual_stream_ant_msk =
2384 = LINK_QUAL_ANT_B_MSK; 2371 tbl_type.ant_type;
2372 } /* otherwise we don't modify the existing value */
2385 2373
2386 index++; 2374 index++;
2387 repeat_rate--; 2375 repeat_rate--;
2388 2376
2377 if (priv)
2378 valid_tx_ant = priv->hw_params.valid_tx_ant;
2379
2389 /* Fill rest of rate table */ 2380 /* Fill rest of rate table */
2390 while (index < LINK_QUAL_MAX_RETRY_NUM) { 2381 while (index < LINK_QUAL_MAX_RETRY_NUM) {
2391 /* Repeat initial/next rate. 2382 /* Repeat initial/next rate.
@@ -2393,26 +2384,25 @@ static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta,
2393 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */ 2384 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
2394 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) { 2385 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
2395 if (is_legacy(tbl_type.lq_type)) { 2386 if (is_legacy(tbl_type.lq_type)) {
2396 if (ant_toggle_count < 2387 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2397 NUM_TRY_BEFORE_ANTENNA_TOGGLE) 2388 ant_toggle_cnt++;
2398 ant_toggle_count++; 2389 else if (priv &&
2399 else { 2390 rs_toggle_antenna(valid_tx_ant,
2400 rs_toggle_antenna(&new_rate, &tbl_type); 2391 &new_rate, &tbl_type))
2401 ant_toggle_count = 1; 2392 ant_toggle_cnt = 1;
2402 } 2393}
2403 }
2404 2394
2405 /* Override next rate if needed for debug purposes */ 2395 /* Override next rate if needed for debug purposes */
2406 rs_dbgfs_set_mcs(lq_sta, &new_rate, index); 2396 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2407 2397
2408 /* Fill next table entry */ 2398 /* Fill next table entry */
2409 lq_cmd->rs_table[index].rate_n_flags = 2399 lq_cmd->rs_table[index].rate_n_flags =
2410 cpu_to_le32(new_rate.rate_n_flags); 2400 cpu_to_le32(new_rate);
2411 repeat_rate--; 2401 repeat_rate--;
2412 index++; 2402 index++;
2413 } 2403 }
2414 2404
2415 rs_get_tbl_info_from_mcs(&new_rate, lq_sta->band, &tbl_type, 2405 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
2416 &rate_idx); 2406 &rate_idx);
2417 2407
2418 /* Indicate to uCode which entries might be MIMO. 2408 /* Indicate to uCode which entries might be MIMO.
@@ -2422,20 +2412,22 @@ static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta,
2422 lq_cmd->general_params.mimo_delimiter = index; 2412 lq_cmd->general_params.mimo_delimiter = index;
2423 2413
2424 /* Get next rate */ 2414 /* Get next rate */
2425 rs_get_lower_rate(lq_sta, &tbl_type, rate_idx, 2415 new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
2426 use_ht_possible, &new_rate); 2416 use_ht_possible);
2427 2417
2428 /* How many times should we repeat the next rate? */ 2418 /* How many times should we repeat the next rate? */
2429 if (is_legacy(tbl_type.lq_type)) { 2419 if (is_legacy(tbl_type.lq_type)) {
2430 if (ant_toggle_count < NUM_TRY_BEFORE_ANTENNA_TOGGLE) 2420 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2431 ant_toggle_count++; 2421 ant_toggle_cnt++;
2432 else { 2422 else if (priv &&
2433 rs_toggle_antenna(&new_rate, &tbl_type); 2423 rs_toggle_antenna(valid_tx_ant,
2434 ant_toggle_count = 1; 2424 &new_rate, &tbl_type))
2435 } 2425 ant_toggle_cnt = 1;
2426
2436 repeat_rate = IWL_NUMBER_TRY; 2427 repeat_rate = IWL_NUMBER_TRY;
2437 } else 2428 } else {
2438 repeat_rate = IWL_HT_NUMBER_TRY; 2429 repeat_rate = IWL_HT_NUMBER_TRY;
2430 }
2439 2431
2440 /* Don't allow HT rates after next pass. 2432 /* Don't allow HT rates after next pass.
2441 * rs_get_lower_rate() will change type to LQ_A or LQ_G. */ 2433 * rs_get_lower_rate() will change type to LQ_A or LQ_G. */
@@ -2445,14 +2437,13 @@ static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta,
2445 rs_dbgfs_set_mcs(lq_sta, &new_rate, index); 2437 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2446 2438
2447 /* Fill next table entry */ 2439 /* Fill next table entry */
2448 lq_cmd->rs_table[index].rate_n_flags = 2440 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2449 cpu_to_le32(new_rate.rate_n_flags);
2450 2441
2451 index++; 2442 index++;
2452 repeat_rate--; 2443 repeat_rate--;
2453 } 2444 }
2454 2445
2455 lq_cmd->general_params.dual_stream_ant_msk = 3; 2446 lq_cmd->agg_params.agg_frame_cnt_limit = 64;
2456 lq_cmd->agg_params.agg_dis_start_th = 3; 2447 lq_cmd->agg_params.agg_dis_start_th = 3;
2457 lq_cmd->agg_params.agg_time_limit = cpu_to_le16(4000); 2448 lq_cmd->agg_params.agg_time_limit = cpu_to_le16(4000);
2458} 2449}
@@ -2478,10 +2469,12 @@ static void rs_clear(void *priv_rate)
2478 IWL_DEBUG_RATE("leave\n"); 2469 IWL_DEBUG_RATE("leave\n");
2479} 2470}
2480 2471
2481static void rs_free_sta(void *priv, void *priv_sta) 2472static void rs_free_sta(void *priv_rate, void *priv_sta)
2482{ 2473{
2483 struct iwl4965_lq_sta *lq_sta = priv_sta; 2474 struct iwl4965_lq_sta *lq_sta = priv_sta;
2475 struct iwl_priv *priv;
2484 2476
2477 priv = (struct iwl_priv *)priv_rate;
2485 IWL_DEBUG_RATE("enter\n"); 2478 IWL_DEBUG_RATE("enter\n");
2486 kfree(lq_sta); 2479 kfree(lq_sta);
2487 IWL_DEBUG_RATE("leave\n"); 2480 IWL_DEBUG_RATE("leave\n");
@@ -2495,54 +2488,56 @@ static int open_file_generic(struct inode *inode, struct file *file)
2495 return 0; 2488 return 0;
2496} 2489}
2497static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta, 2490static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
2498 struct iwl4965_rate *mcs, int index) 2491 u32 *rate_n_flags, int index)
2499{ 2492{
2500 u32 base_rate; 2493 struct iwl_priv *priv;
2501 2494
2502 if (lq_sta->band == IEEE80211_BAND_5GHZ) 2495 priv = lq_sta->drv;
2503 base_rate = 0x800D; 2496 if (lq_sta->dbg_fixed_rate) {
2504 else 2497 if (index < 12) {
2505 base_rate = 0x820A; 2498 *rate_n_flags = lq_sta->dbg_fixed_rate;
2506 2499 } else {
2507 if (lq_sta->dbg_fixed.rate_n_flags) { 2500 if (lq_sta->band == IEEE80211_BAND_5GHZ)
2508 if (index < 12) 2501 *rate_n_flags = 0x800D;
2509 mcs->rate_n_flags = lq_sta->dbg_fixed.rate_n_flags; 2502 else
2510 else 2503 *rate_n_flags = 0x820A;
2511 mcs->rate_n_flags = base_rate; 2504 }
2512 IWL_DEBUG_RATE("Fixed rate ON\n"); 2505 IWL_DEBUG_RATE("Fixed rate ON\n");
2513 return; 2506 } else {
2507 IWL_DEBUG_RATE("Fixed rate OFF\n");
2514 } 2508 }
2515
2516 IWL_DEBUG_RATE("Fixed rate OFF\n");
2517} 2509}
2518 2510
2519static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, 2511static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2520 const char __user *user_buf, size_t count, loff_t *ppos) 2512 const char __user *user_buf, size_t count, loff_t *ppos)
2521{ 2513{
2522 struct iwl4965_lq_sta *lq_sta = file->private_data; 2514 struct iwl4965_lq_sta *lq_sta = file->private_data;
2515 struct iwl_priv *priv;
2523 char buf[64]; 2516 char buf[64];
2524 int buf_size; 2517 int buf_size;
2525 u32 parsed_rate; 2518 u32 parsed_rate;
2526 2519
2520 priv = lq_sta->drv;
2527 memset(buf, 0, sizeof(buf)); 2521 memset(buf, 0, sizeof(buf));
2528 buf_size = min(count, sizeof(buf) - 1); 2522 buf_size = min(count, sizeof(buf) - 1);
2529 if (copy_from_user(buf, user_buf, buf_size)) 2523 if (copy_from_user(buf, user_buf, buf_size))
2530 return -EFAULT; 2524 return -EFAULT;
2531 2525
2532 if (sscanf(buf, "%x", &parsed_rate) == 1) 2526 if (sscanf(buf, "%x", &parsed_rate) == 1)
2533 lq_sta->dbg_fixed.rate_n_flags = parsed_rate; 2527 lq_sta->dbg_fixed_rate = parsed_rate;
2534 else 2528 else
2535 lq_sta->dbg_fixed.rate_n_flags = 0; 2529 lq_sta->dbg_fixed_rate = 0;
2536 2530
2537 lq_sta->active_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ 2531 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2538 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ 2532 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2539 lq_sta->active_mimo_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ 2533 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2534 lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2540 2535
2541 IWL_DEBUG_RATE("sta_id %d rate 0x%X\n", 2536 IWL_DEBUG_RATE("sta_id %d rate 0x%X\n",
2542 lq_sta->lq.sta_id, lq_sta->dbg_fixed.rate_n_flags); 2537 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
2543 2538
2544 if (lq_sta->dbg_fixed.rate_n_flags) { 2539 if (lq_sta->dbg_fixed_rate) {
2545 rs_fill_link_cmd(lq_sta, &lq_sta->dbg_fixed, &lq_sta->lq); 2540 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2546 iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC); 2541 iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC);
2547 } 2542 }
2548 2543
@@ -2561,9 +2556,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2561 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); 2556 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2562 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", 2557 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
2563 lq_sta->total_failed, lq_sta->total_success, 2558 lq_sta->total_failed, lq_sta->total_success,
2564 lq_sta->active_rate); 2559 lq_sta->active_legacy_rate);
2565 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 2560 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
2566 lq_sta->dbg_fixed.rate_n_flags); 2561 lq_sta->dbg_fixed_rate);
2567 desc += sprintf(buff+desc, "general:" 2562 desc += sprintf(buff+desc, "general:"
2568 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n", 2563 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
2569 lq_sta->lq.general_params.flags, 2564 lq_sta->lq.general_params.flags,
@@ -2613,7 +2608,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
2613 lq_sta->lq_info[i].is_SGI, 2608 lq_sta->lq_info[i].is_SGI,
2614 lq_sta->lq_info[i].is_fat, 2609 lq_sta->lq_info[i].is_fat,
2615 lq_sta->lq_info[i].is_dup, 2610 lq_sta->lq_info[i].is_dup,
2616 lq_sta->lq_info[i].current_rate.rate_n_flags); 2611 lq_sta->lq_info[i].current_rate);
2617 for (j = 0; j < IWL_RATE_COUNT; j++) { 2612 for (j = 0; j < IWL_RATE_COUNT; j++) {
2618 desc += sprintf(buff+desc, 2613 desc += sprintf(buff+desc,
2619 "counter=%d success=%d %%=%d\n", 2614 "counter=%d success=%d %%=%d\n",
@@ -2703,7 +2698,7 @@ int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2703 lq_sta = (void *)sta->rate_ctrl_priv; 2698 lq_sta = (void *)sta->rate_ctrl_priv;
2704 2699
2705 lq_type = lq_sta->lq_info[lq_sta->active_tbl].lq_type; 2700 lq_type = lq_sta->lq_info[lq_sta->active_tbl].lq_type;
2706 antenna = lq_sta->lq_info[lq_sta->active_tbl].antenna_type; 2701 antenna = lq_sta->lq_info[lq_sta->active_tbl].ant_type;
2707 2702
2708 if (is_legacy(lq_type)) 2703 if (is_legacy(lq_type))
2709 i = IWL_RATE_54M_INDEX; 2704 i = IWL_RATE_54M_INDEX;
@@ -2715,7 +2710,7 @@ int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2715 int active = lq_sta->active_tbl; 2710 int active = lq_sta->active_tbl;
2716 2711
2717 cnt += 2712 cnt +=
2718 sprintf(&buf[cnt], " %2dMbs: ", iwl4965_rates[i].ieee / 2); 2713 sprintf(&buf[cnt], " %2dMbs: ", iwl_rates[i].ieee / 2);
2719 2714
2720 mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1)); 2715 mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1));
2721 for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1) 2716 for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1)
@@ -2726,7 +2721,7 @@ int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2726 samples += lq_sta->lq_info[active].win[i].counter; 2721 samples += lq_sta->lq_info[active].win[i].counter;
2727 good += lq_sta->lq_info[active].win[i].success_counter; 2722 good += lq_sta->lq_info[active].win[i].success_counter;
2728 success += lq_sta->lq_info[active].win[i].success_counter * 2723 success += lq_sta->lq_info[active].win[i].success_counter *
2729 iwl4965_rates[i].ieee; 2724 iwl_rates[i].ieee;
2730 2725
2731 if (lq_sta->lq_info[active].win[i].stamp) { 2726 if (lq_sta->lq_info[active].win[i].stamp) {
2732 int delta = 2727 int delta =
@@ -2746,10 +2741,11 @@ int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2746 i = j; 2741 i = j;
2747 } 2742 }
2748 2743
2749 /* Display the average rate of all samples taken. 2744 /*
2750 * 2745 * Display the average rate of all samples taken.
2751 * NOTE: We multiply # of samples by 2 since the IEEE measurement 2746 * NOTE: We multiply # of samples by 2 since the IEEE measurement
2752 * added from iwl4965_rates is actually 2X the rate */ 2747 * added from iwl_rates is actually 2X the rate.
2748 */
2753 if (samples) 2749 if (samples)
2754 cnt += sprintf(&buf[cnt], 2750 cnt += sprintf(&buf[cnt],
2755 "\nAverage rate is %3d.%02dMbs over last %4dms\n" 2751 "\nAverage rate is %3d.%02dMbs over last %4dms\n"
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h b/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
index 866e378aa385..1dd4124227a5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
@@ -27,12 +27,13 @@
27#ifndef __iwl_4965_rs_h__ 27#ifndef __iwl_4965_rs_h__
28#define __iwl_4965_rs_h__ 28#define __iwl_4965_rs_h__
29 29
30#include "iwl-4965.h" 30#include "iwl-dev.h"
31 31
32struct iwl4965_rate_info { 32struct iwl_rate_info {
33 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ 33 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
34 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ 34 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
35 u8 plcp_mimo; /* uCode API: IWL_RATE_MIMO_6M_PLCP, etc. */ 35 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
36 u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
36 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ 37 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
37 u8 prev_ieee; /* previous rate in IEEE speeds */ 38 u8 prev_ieee; /* previous rate in IEEE speeds */
38 u8 next_ieee; /* next rate in IEEE speeds */ 39 u8 next_ieee; /* next rate in IEEE speeds */
@@ -44,7 +45,7 @@ struct iwl4965_rate_info {
44 45
45/* 46/*
46 * These serve as indexes into 47 * These serve as indexes into
47 * struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT]; 48 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
48 */ 49 */
49enum { 50enum {
50 IWL_RATE_1M_INDEX = 0, 51 IWL_RATE_1M_INDEX = 0,
@@ -60,9 +61,9 @@ enum {
60 IWL_RATE_48M_INDEX, 61 IWL_RATE_48M_INDEX,
61 IWL_RATE_54M_INDEX, 62 IWL_RATE_54M_INDEX,
62 IWL_RATE_60M_INDEX, 63 IWL_RATE_60M_INDEX,
63 IWL_RATE_COUNT, 64 IWL_RATE_COUNT, /*FIXME:RS:change to IWL_RATE_INDEX_COUNT,*/
64 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, 65 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
65 IWL_RATE_INVALID = IWL_RATE_INVM_INDEX 66 IWL_RATE_INVALID = IWL_RATE_COUNT,
66}; 67};
67 68
68enum { 69enum {
@@ -97,11 +98,13 @@ enum {
97 IWL_RATE_36M_PLCP = 11, 98 IWL_RATE_36M_PLCP = 11,
98 IWL_RATE_48M_PLCP = 1, 99 IWL_RATE_48M_PLCP = 1,
99 IWL_RATE_54M_PLCP = 3, 100 IWL_RATE_54M_PLCP = 3,
100 IWL_RATE_60M_PLCP = 3, 101 IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
101 IWL_RATE_1M_PLCP = 10, 102 IWL_RATE_1M_PLCP = 10,
102 IWL_RATE_2M_PLCP = 20, 103 IWL_RATE_2M_PLCP = 20,
103 IWL_RATE_5M_PLCP = 55, 104 IWL_RATE_5M_PLCP = 55,
104 IWL_RATE_11M_PLCP = 110, 105 IWL_RATE_11M_PLCP = 110,
106 /*FIXME:RS:change to IWL_RATE_LEGACY_??M_PLCP */
107 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
105}; 108};
106 109
107/* 4965 uCode API values for OFDM high-throughput (HT) bit rates */ 110/* 4965 uCode API values for OFDM high-throughput (HT) bit rates */
@@ -114,16 +117,25 @@ enum {
114 IWL_RATE_SISO_48M_PLCP = 5, 117 IWL_RATE_SISO_48M_PLCP = 5,
115 IWL_RATE_SISO_54M_PLCP = 6, 118 IWL_RATE_SISO_54M_PLCP = 6,
116 IWL_RATE_SISO_60M_PLCP = 7, 119 IWL_RATE_SISO_60M_PLCP = 7,
117 IWL_RATE_MIMO_6M_PLCP = 0x8, 120 IWL_RATE_MIMO2_6M_PLCP = 0x8,
118 IWL_RATE_MIMO_12M_PLCP = 0x9, 121 IWL_RATE_MIMO2_12M_PLCP = 0x9,
119 IWL_RATE_MIMO_18M_PLCP = 0xa, 122 IWL_RATE_MIMO2_18M_PLCP = 0xa,
120 IWL_RATE_MIMO_24M_PLCP = 0xb, 123 IWL_RATE_MIMO2_24M_PLCP = 0xb,
121 IWL_RATE_MIMO_36M_PLCP = 0xc, 124 IWL_RATE_MIMO2_36M_PLCP = 0xc,
122 IWL_RATE_MIMO_48M_PLCP = 0xd, 125 IWL_RATE_MIMO2_48M_PLCP = 0xd,
123 IWL_RATE_MIMO_54M_PLCP = 0xe, 126 IWL_RATE_MIMO2_54M_PLCP = 0xe,
124 IWL_RATE_MIMO_60M_PLCP = 0xf, 127 IWL_RATE_MIMO2_60M_PLCP = 0xf,
128 IWL_RATE_MIMO3_6M_PLCP = 0x10,
129 IWL_RATE_MIMO3_12M_PLCP = 0x11,
130 IWL_RATE_MIMO3_18M_PLCP = 0x12,
131 IWL_RATE_MIMO3_24M_PLCP = 0x13,
132 IWL_RATE_MIMO3_36M_PLCP = 0x14,
133 IWL_RATE_MIMO3_48M_PLCP = 0x15,
134 IWL_RATE_MIMO3_54M_PLCP = 0x16,
135 IWL_RATE_MIMO3_60M_PLCP = 0x17,
125 IWL_RATE_SISO_INVM_PLCP, 136 IWL_RATE_SISO_INVM_PLCP,
126 IWL_RATE_MIMO_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, 137 IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
138 IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
127}; 139};
128 140
129/* MAC header values for bit rates */ 141/* MAC header values for bit rates */
@@ -196,11 +208,11 @@ enum {
196/* possible actions when in legacy mode */ 208/* possible actions when in legacy mode */
197#define IWL_LEGACY_SWITCH_ANTENNA 0 209#define IWL_LEGACY_SWITCH_ANTENNA 0
198#define IWL_LEGACY_SWITCH_SISO 1 210#define IWL_LEGACY_SWITCH_SISO 1
199#define IWL_LEGACY_SWITCH_MIMO 2 211#define IWL_LEGACY_SWITCH_MIMO2 2
200 212
201/* possible actions when in siso mode */ 213/* possible actions when in siso mode */
202#define IWL_SISO_SWITCH_ANTENNA 0 214#define IWL_SISO_SWITCH_ANTENNA 0
203#define IWL_SISO_SWITCH_MIMO 1 215#define IWL_SISO_SWITCH_MIMO2 1
204#define IWL_SISO_SWITCH_GI 2 216#define IWL_SISO_SWITCH_GI 2
205 217
206/* possible actions when in mimo mode */ 218/* possible actions when in mimo mode */
@@ -208,6 +220,10 @@ enum {
208#define IWL_MIMO_SWITCH_ANTENNA_B 1 220#define IWL_MIMO_SWITCH_ANTENNA_B 1
209#define IWL_MIMO_SWITCH_GI 2 221#define IWL_MIMO_SWITCH_GI 2
210 222
223/*FIXME:RS:separate MIMO2/3 transitions*/
224
225/*FIXME:RS:add posible acctions for MIMO3*/
226
211#define IWL_ACTION_LIMIT 3 /* # possible actions */ 227#define IWL_ACTION_LIMIT 3 /* # possible actions */
212 228
213#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ 229#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
@@ -224,35 +240,46 @@ enum {
224#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) 240#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
225#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) 241#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
226 242
227extern const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT]; 243extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
228 244
229enum iwl4965_table_type { 245enum iwl_table_type {
230 LQ_NONE, 246 LQ_NONE,
231 LQ_G, /* legacy types */ 247 LQ_G, /* legacy types */
232 LQ_A, 248 LQ_A,
233 LQ_SISO, /* high-throughput types */ 249 LQ_SISO, /* high-throughput types */
234 LQ_MIMO, 250 LQ_MIMO2,
251 LQ_MIMO3,
235 LQ_MAX, 252 LQ_MAX,
236}; 253};
237 254
238#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) 255#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
239#define is_siso(tbl) (((tbl) == LQ_SISO)) 256#define is_siso(tbl) ((tbl) == LQ_SISO)
240#define is_mimo(tbl) (((tbl) == LQ_MIMO)) 257#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
258#define is_mimo3(tbl) ((tbl) == LQ_MIMO3)
259#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl))
241#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) 260#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
242#define is_a_band(tbl) (((tbl) == LQ_A)) 261#define is_a_band(tbl) ((tbl) == LQ_A)
243#define is_g_and(tbl) (((tbl) == LQ_G)) 262#define is_g_and(tbl) ((tbl) == LQ_G)
244 263
245/* 4965 has 2 antennas/chains for Tx (but 3 for Rx) */ 264#define ANT_NONE 0x0
246enum iwl4965_antenna_type { 265#define ANT_A BIT(0)
247 ANT_NONE, 266#define ANT_B BIT(1)
248 ANT_MAIN, 267#define ANT_AB (ANT_A | ANT_B)
249 ANT_AUX, 268#define ANT_C BIT(2)
250 ANT_BOTH, 269#define ANT_AC (ANT_A | ANT_C)
251}; 270#define ANT_BC (ANT_B | ANT_C)
271#define ANT_ABC (ANT_AB | ANT_C)
272
273static inline u8 num_of_ant(u8 mask)
274{
275 return !!((mask) & ANT_A) +
276 !!((mask) & ANT_B) +
277 !!((mask) & ANT_C);
278}
252 279
253static inline u8 iwl4965_get_prev_ieee_rate(u8 rate_index) 280static inline u8 iwl4965_get_prev_ieee_rate(u8 rate_index)
254{ 281{
255 u8 rate = iwl4965_rates[rate_index].prev_ieee; 282 u8 rate = iwl_rates[rate_index].prev_ieee;
256 283
257 if (rate == IWL_RATE_INVALID) 284 if (rate == IWL_RATE_INVALID)
258 rate = rate_index; 285 rate = rate_index;
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index bf19eb8aafd0..aee7014bcb94 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -39,81 +39,22 @@
39#include <asm/unaligned.h> 39#include <asm/unaligned.h>
40 40
41#include "iwl-eeprom.h" 41#include "iwl-eeprom.h"
42#include "iwl-4965.h" 42#include "iwl-dev.h"
43#include "iwl-core.h" 43#include "iwl-core.h"
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
46#include "iwl-calib.h"
47#include "iwl-sta.h"
46 48
47/* module parameters */ 49/* module parameters */
48static struct iwl_mod_params iwl4965_mod_params = { 50static struct iwl_mod_params iwl4965_mod_params = {
49 .num_of_queues = IWL4965_MAX_NUM_QUEUES, 51 .num_of_queues = IWL49_NUM_QUEUES,
50 .enable_qos = 1, 52 .enable_qos = 1,
51 .amsdu_size_8K = 1, 53 .amsdu_size_8K = 1,
54 .restart_fw = 1,
52 /* the rest are 0 by default */ 55 /* the rest are 0 by default */
53}; 56};
54 57
55static void iwl4965_hw_card_show_info(struct iwl_priv *priv);
56
57#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
58 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
59 IWL_RATE_SISO_##s##M_PLCP, \
60 IWL_RATE_MIMO_##s##M_PLCP, \
61 IWL_RATE_##r##M_IEEE, \
62 IWL_RATE_##ip##M_INDEX, \
63 IWL_RATE_##in##M_INDEX, \
64 IWL_RATE_##rp##M_INDEX, \
65 IWL_RATE_##rn##M_INDEX, \
66 IWL_RATE_##pp##M_INDEX, \
67 IWL_RATE_##np##M_INDEX }
68
69/*
70 * Parameter order:
71 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
72 *
73 * If there isn't a valid next or previous rate then INV is used which
74 * maps to IWL_RATE_INVALID
75 *
76 */
77const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT] = {
78 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
79 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
80 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
81 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
82 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
83 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
84 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
85 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
86 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
87 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
88 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
89 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
90 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
91};
92
93#ifdef CONFIG_IWL4965_HT
94
95static const u16 default_tid_to_tx_fifo[] = {
96 IWL_TX_FIFO_AC1,
97 IWL_TX_FIFO_AC0,
98 IWL_TX_FIFO_AC0,
99 IWL_TX_FIFO_AC1,
100 IWL_TX_FIFO_AC2,
101 IWL_TX_FIFO_AC2,
102 IWL_TX_FIFO_AC3,
103 IWL_TX_FIFO_AC3,
104 IWL_TX_FIFO_NONE,
105 IWL_TX_FIFO_NONE,
106 IWL_TX_FIFO_NONE,
107 IWL_TX_FIFO_NONE,
108 IWL_TX_FIFO_NONE,
109 IWL_TX_FIFO_NONE,
110 IWL_TX_FIFO_NONE,
111 IWL_TX_FIFO_NONE,
112 IWL_TX_FIFO_AC3
113};
114
115#endif /*CONFIG_IWL4965_HT */
116
117/* check contents of special bootstrap uCode SRAM */ 58/* check contents of special bootstrap uCode SRAM */
118static int iwl4965_verify_bsm(struct iwl_priv *priv) 59static int iwl4965_verify_bsm(struct iwl_priv *priv)
119{ 60{
@@ -192,15 +133,18 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
192 133
193 IWL_DEBUG_INFO("Begin load bsm\n"); 134 IWL_DEBUG_INFO("Begin load bsm\n");
194 135
136 priv->ucode_type = UCODE_RT;
137
195 /* make sure bootstrap program is no larger than BSM's SRAM size */ 138 /* make sure bootstrap program is no larger than BSM's SRAM size */
196 if (len > IWL_MAX_BSM_SIZE) 139 if (len > IWL_MAX_BSM_SIZE)
197 return -EINVAL; 140 return -EINVAL;
198 141
199 /* Tell bootstrap uCode where to find the "Initialize" uCode 142 /* Tell bootstrap uCode where to find the "Initialize" uCode
200 * in host DRAM ... host DRAM physical address bits 35:4 for 4965. 143 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
201 * NOTE: iwl4965_initialize_alive_start() will replace these values, 144 * NOTE: iwl_init_alive_start() will replace these values,
202 * after the "initialize" uCode has run, to point to 145 * after the "initialize" uCode has run, to point to
203 * runtime/protocol instructions and backup data cache. */ 146 * runtime/protocol instructions and backup data cache.
147 */
204 pinst = priv->ucode_init.p_addr >> 4; 148 pinst = priv->ucode_init.p_addr >> 4;
205 pdata = priv->ucode_init_data.p_addr >> 4; 149 pdata = priv->ucode_init_data.p_addr >> 4;
206 inst_len = priv->ucode_init.len; 150 inst_len = priv->ucode_init.len;
@@ -259,99 +203,100 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
259 return 0; 203 return 0;
260} 204}
261 205
262static int iwl4965_init_drv(struct iwl_priv *priv) 206/**
207 * iwl4965_set_ucode_ptrs - Set uCode address location
208 *
209 * Tell initialization uCode where to find runtime uCode.
210 *
211 * BSM registers initially contain pointers to initialization uCode.
212 * We need to replace them to load runtime uCode inst and data,
213 * and to save runtime data when powering down.
214 */
215static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
263{ 216{
264 int ret; 217 dma_addr_t pinst;
265 int i; 218 dma_addr_t pdata;
266 219 unsigned long flags;
267 priv->antenna = (enum iwl4965_antenna)priv->cfg->mod_params->antenna; 220 int ret = 0;
268 priv->retry_rate = 1;
269 priv->ibss_beacon = NULL;
270
271 spin_lock_init(&priv->lock);
272 spin_lock_init(&priv->power_data.lock);
273 spin_lock_init(&priv->sta_lock);
274 spin_lock_init(&priv->hcmd_lock);
275 spin_lock_init(&priv->lq_mngr.lock);
276 221
277 priv->shared_virt = pci_alloc_consistent(priv->pci_dev, 222 /* bits 35:4 for 4965 */
278 sizeof(struct iwl4965_shared), 223 pinst = priv->ucode_code.p_addr >> 4;
279 &priv->shared_phys); 224 pdata = priv->ucode_data_backup.p_addr >> 4;
280 225
281 if (!priv->shared_virt) { 226 spin_lock_irqsave(&priv->lock, flags);
282 ret = -ENOMEM; 227 ret = iwl_grab_nic_access(priv);
283 goto err; 228 if (ret) {
229 spin_unlock_irqrestore(&priv->lock, flags);
230 return ret;
284 } 231 }
285 232
286 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared)); 233 /* Tell bootstrap uCode where to find image to load */
287 234 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
288 235 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
289 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) 236 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
290 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); 237 priv->ucode_data.len);
291
292 INIT_LIST_HEAD(&priv->free_frames);
293
294 mutex_init(&priv->mutex);
295
296 /* Clear the driver's (not device's) station table */
297 iwlcore_clear_stations_table(priv);
298
299 priv->data_retry_limit = -1;
300 priv->ieee_channels = NULL;
301 priv->ieee_rates = NULL;
302 priv->band = IEEE80211_BAND_2GHZ;
303
304 priv->iw_mode = IEEE80211_IF_TYPE_STA;
305
306 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
307 priv->valid_antenna = 0x7; /* assume all 3 connected */
308 priv->ps_mode = IWL_MIMO_PS_NONE;
309
310 /* Choose which receivers/antennas to use */
311 iwl4965_set_rxon_chain(priv);
312
313 iwlcore_reset_qos(priv);
314
315 priv->qos_data.qos_active = 0;
316 priv->qos_data.qos_cap.val = 0;
317 238
318 iwlcore_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6); 239 /* Inst bytecount must be last to set up, bit 31 signals uCode
240 * that all new ptr/size info is in place */
241 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
242 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
243 iwl_release_nic_access(priv);
319 244
320 priv->rates_mask = IWL_RATES_MASK; 245 spin_unlock_irqrestore(&priv->lock, flags);
321 /* If power management is turned on, default to AC mode */
322 priv->power_mode = IWL_POWER_AC;
323 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
324 246
325 ret = iwl_init_channel_map(priv); 247 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
326 if (ret) {
327 IWL_ERROR("initializing regulatory failed: %d\n", ret);
328 goto err;
329 }
330 248
331 ret = iwl4965_init_geos(priv); 249 return ret;
332 if (ret) { 250}
333 IWL_ERROR("initializing geos failed: %d\n", ret);
334 goto err_free_channel_map;
335 }
336 251
337 ret = ieee80211_register_hw(priv->hw); 252/**
338 if (ret) { 253 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
339 IWL_ERROR("Failed to register network device (error %d)\n", 254 *
340 ret); 255 * Called after REPLY_ALIVE notification received from "initialize" uCode.
341 goto err_free_geos; 256 *
257 * The 4965 "initialize" ALIVE reply contains calibration data for:
258 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
259 * (3945 does not contain this data).
260 *
261 * Tell "initialize" uCode to go ahead and load the runtime uCode.
262*/
263static void iwl4965_init_alive_start(struct iwl_priv *priv)
264{
265 /* Check alive response for "valid" sign from uCode */
266 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
267 /* We had an error bringing up the hardware, so take it
268 * all the way back down so we can try again */
269 IWL_DEBUG_INFO("Initialize Alive failed.\n");
270 goto restart;
271 }
272
273 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
274 * This is a paranoid check, because we would not have gotten the
275 * "initialize" alive if code weren't properly loaded. */
276 if (iwl_verify_ucode(priv)) {
277 /* Runtime instruction load was bad;
278 * take it all the way back down so we can try again */
279 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
280 goto restart;
281 }
282
283 /* Calculate temperature */
284 priv->temperature = iwl4965_get_temperature(priv);
285
286 /* Send pointers to protocol/runtime uCode image ... init code will
287 * load and launch runtime uCode, which will send us another "Alive"
288 * notification. */
289 IWL_DEBUG_INFO("Initialization Alive received.\n");
290 if (iwl4965_set_ucode_ptrs(priv)) {
291 /* Runtime instruction load won't happen;
292 * take it all the way back down so we can try again */
293 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
294 goto restart;
342 } 295 }
296 return;
343 297
344 priv->hw->conf.beacon_int = 100; 298restart:
345 priv->mac80211_registered = 1; 299 queue_work(priv->workqueue, &priv->restart);
346
347 return 0;
348
349err_free_geos:
350 iwl4965_free_geos(priv);
351err_free_channel_map:
352 iwl_free_channel_map(priv);
353err:
354 return ret;
355} 300}
356 301
357static int is_fat_channel(__le32 rxon_flags) 302static int is_fat_channel(__le32 rxon_flags)
@@ -360,19 +305,6 @@ static int is_fat_channel(__le32 rxon_flags)
360 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK); 305 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
361} 306}
362 307
363static u8 is_single_stream(struct iwl_priv *priv)
364{
365#ifdef CONFIG_IWL4965_HT
366 if (!priv->current_ht_config.is_ht ||
367 (priv->current_ht_config.supp_mcs_set[1] == 0) ||
368 (priv->ps_mode == IWL_MIMO_PS_STATIC))
369 return 1;
370#else
371 return 1;
372#endif /*CONFIG_IWL4965_HT */
373 return 0;
374}
375
376int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags) 308int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
377{ 309{
378 int idx = 0; 310 int idx = 0;
@@ -381,8 +313,8 @@ int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
381 if (rate_n_flags & RATE_MCS_HT_MSK) { 313 if (rate_n_flags & RATE_MCS_HT_MSK) {
382 idx = (rate_n_flags & 0xff); 314 idx = (rate_n_flags & 0xff);
383 315
384 if (idx >= IWL_RATE_MIMO_6M_PLCP) 316 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
385 idx = idx - IWL_RATE_MIMO_6M_PLCP; 317 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
386 318
387 idx += IWL_FIRST_OFDM_RATE; 319 idx += IWL_FIRST_OFDM_RATE;
388 /* skip 9M not supported in ht*/ 320 /* skip 9M not supported in ht*/
@@ -393,8 +325,8 @@ int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
393 325
394 /* 4965 legacy rate format, search for match in table */ 326 /* 4965 legacy rate format, search for match in table */
395 } else { 327 } else {
396 for (idx = 0; idx < ARRAY_SIZE(iwl4965_rates); idx++) 328 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
397 if (iwl4965_rates[idx].plcp == (rate_n_flags & 0xFF)) 329 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
398 return idx; 330 return idx;
399 } 331 }
400 332
@@ -405,125 +337,54 @@ int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
405 * translate ucode response to mac80211 tx status control values 337 * translate ucode response to mac80211 tx status control values
406 */ 338 */
407void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, 339void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
408 struct ieee80211_tx_control *control) 340 struct ieee80211_tx_info *control)
409{ 341{
410 int rate_index; 342 int rate_index;
411 343
412 control->antenna_sel_tx = 344 control->antenna_sel_tx =
413 ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS); 345 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
414 if (rate_n_flags & RATE_MCS_HT_MSK) 346 if (rate_n_flags & RATE_MCS_HT_MSK)
415 control->flags |= IEEE80211_TXCTL_OFDM_HT; 347 control->flags |= IEEE80211_TX_CTL_OFDM_HT;
416 if (rate_n_flags & RATE_MCS_GF_MSK) 348 if (rate_n_flags & RATE_MCS_GF_MSK)
417 control->flags |= IEEE80211_TXCTL_GREEN_FIELD; 349 control->flags |= IEEE80211_TX_CTL_GREEN_FIELD;
418 if (rate_n_flags & RATE_MCS_FAT_MSK) 350 if (rate_n_flags & RATE_MCS_FAT_MSK)
419 control->flags |= IEEE80211_TXCTL_40_MHZ_WIDTH; 351 control->flags |= IEEE80211_TX_CTL_40_MHZ_WIDTH;
420 if (rate_n_flags & RATE_MCS_DUP_MSK) 352 if (rate_n_flags & RATE_MCS_DUP_MSK)
421 control->flags |= IEEE80211_TXCTL_DUP_DATA; 353 control->flags |= IEEE80211_TX_CTL_DUP_DATA;
422 if (rate_n_flags & RATE_MCS_SGI_MSK) 354 if (rate_n_flags & RATE_MCS_SGI_MSK)
423 control->flags |= IEEE80211_TXCTL_SHORT_GI; 355 control->flags |= IEEE80211_TX_CTL_SHORT_GI;
424 /* since iwl4965_hwrate_to_plcp_idx is band indifferent, we always use
425 * IEEE80211_BAND_2GHZ band as it contains all the rates */
426 rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags); 356 rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
427 if (rate_index == -1) 357 if (control->band == IEEE80211_BAND_5GHZ)
428 control->tx_rate = NULL; 358 rate_index -= IWL_FIRST_OFDM_RATE;
429 else 359 control->tx_rate_idx = rate_index;
430 control->tx_rate =
431 &priv->bands[IEEE80211_BAND_2GHZ].bitrates[rate_index];
432} 360}
433 361
434/* 362/*
435 * Determine how many receiver/antenna chains to use. 363 * EEPROM handlers
436 * More provides better reception via diversity. Fewer saves power.
437 * MIMO (dual stream) requires at least 2, but works better with 3.
438 * This does not determine *which* chains to use, just how many.
439 */ 364 */
440static int iwl4965_get_rx_chain_counter(struct iwl_priv *priv,
441 u8 *idle_state, u8 *rx_state)
442{
443 u8 is_single = is_single_stream(priv);
444 u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1;
445
446 /* # of Rx chains to use when expecting MIMO. */
447 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
448 *rx_state = 2;
449 else
450 *rx_state = 3;
451
452 /* # Rx chains when idling and maybe trying to save power */
453 switch (priv->ps_mode) {
454 case IWL_MIMO_PS_STATIC:
455 case IWL_MIMO_PS_DYNAMIC:
456 *idle_state = (is_cam) ? 2 : 1;
457 break;
458 case IWL_MIMO_PS_NONE:
459 *idle_state = (is_cam) ? *rx_state : 1;
460 break;
461 default:
462 *idle_state = 1;
463 break;
464 }
465
466 return 0;
467}
468 365
469int iwl4965_hw_rxq_stop(struct iwl_priv *priv) 366static int iwl4965_eeprom_check_version(struct iwl_priv *priv)
470{ 367{
471 int rc; 368 u16 eeprom_ver;
472 unsigned long flags; 369 u16 calib_ver;
473 370
474 spin_lock_irqsave(&priv->lock, flags); 371 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
475 rc = iwl_grab_nic_access(priv);
476 if (rc) {
477 spin_unlock_irqrestore(&priv->lock, flags);
478 return rc;
479 }
480 372
481 /* stop Rx DMA */ 373 calib_ver = iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
482 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
483 rc = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
484 (1 << 24), 1000);
485 if (rc < 0)
486 IWL_ERROR("Can't stop Rx DMA.\n");
487 374
488 iwl_release_nic_access(priv); 375 if (eeprom_ver < EEPROM_4965_EEPROM_VERSION ||
489 spin_unlock_irqrestore(&priv->lock, flags); 376 calib_ver < EEPROM_4965_TX_POWER_VERSION)
377 goto err;
490 378
491 return 0; 379 return 0;
492} 380err:
493 381 IWL_ERROR("Unsuported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
494u8 iwl4965_hw_find_station(struct iwl_priv *priv, const u8 *addr) 382 eeprom_ver, EEPROM_4965_EEPROM_VERSION,
495{ 383 calib_ver, EEPROM_4965_TX_POWER_VERSION);
496 int i; 384 return -EINVAL;
497 int start = 0;
498 int ret = IWL_INVALID_STATION;
499 unsigned long flags;
500 DECLARE_MAC_BUF(mac);
501
502 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) ||
503 (priv->iw_mode == IEEE80211_IF_TYPE_AP))
504 start = IWL_STA_ID;
505
506 if (is_broadcast_ether_addr(addr))
507 return priv->hw_params.bcast_sta_id;
508
509 spin_lock_irqsave(&priv->sta_lock, flags);
510 for (i = start; i < priv->hw_params.max_stations; i++)
511 if ((priv->stations[i].used) &&
512 (!compare_ether_addr
513 (priv->stations[i].sta.sta.addr, addr))) {
514 ret = i;
515 goto out;
516 }
517
518 IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n",
519 print_mac(mac, addr), priv->num_stations);
520 385
521 out:
522 spin_unlock_irqrestore(&priv->sta_lock, flags);
523 return ret;
524} 386}
525 387int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
526static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max)
527{ 388{
528 int ret; 389 int ret;
529 unsigned long flags; 390 unsigned long flags;
@@ -535,340 +396,130 @@ static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max)
535 return ret; 396 return ret;
536 } 397 }
537 398
538 if (!pwr_max) { 399 if (src == IWL_PWR_SRC_VAUX) {
539 u32 val; 400 u32 val;
540
541 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE, 401 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
542 &val); 402 &val);
543 403
544 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) 404 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) {
545 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 405 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
546 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 406 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
547 ~APMG_PS_CTRL_MSK_PWR_SRC); 407 ~APMG_PS_CTRL_MSK_PWR_SRC);
548 } else 408 }
409 } else {
549 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 410 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
550 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 411 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
551 ~APMG_PS_CTRL_MSK_PWR_SRC); 412 ~APMG_PS_CTRL_MSK_PWR_SRC);
552
553 iwl_release_nic_access(priv);
554 spin_unlock_irqrestore(&priv->lock, flags);
555
556 return ret;
557}
558
559static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
560{
561 int ret;
562 unsigned long flags;
563 unsigned int rb_size;
564
565 spin_lock_irqsave(&priv->lock, flags);
566 ret = iwl_grab_nic_access(priv);
567 if (ret) {
568 spin_unlock_irqrestore(&priv->lock, flags);
569 return ret;
570 } 413 }
571 414
572 if (priv->cfg->mod_params->amsdu_size_8K)
573 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
574 else
575 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
576
577 /* Stop Rx DMA */
578 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
579
580 /* Reset driver's Rx queue write index */
581 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
582
583 /* Tell device where to find RBD circular buffer in DRAM */
584 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
585 rxq->dma_addr >> 8);
586
587 /* Tell device where in DRAM to update its Rx status */
588 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
589 (priv->shared_phys +
590 offsetof(struct iwl4965_shared, rb_closed)) >> 4);
591
592 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
593 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
594 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
595 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
596 rb_size |
597 /* 0x10 << 4 | */
598 (RX_QUEUE_SIZE_LOG <<
599 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
600
601 /*
602 * iwl_write32(priv,CSR_INT_COAL_REG,0);
603 */
604
605 iwl_release_nic_access(priv);
606 spin_unlock_irqrestore(&priv->lock, flags);
607
608 return 0;
609}
610
611/* Tell 4965 where to find the "keep warm" buffer */
612static int iwl4965_kw_init(struct iwl_priv *priv)
613{
614 unsigned long flags;
615 int rc;
616
617 spin_lock_irqsave(&priv->lock, flags);
618 rc = iwl_grab_nic_access(priv);
619 if (rc)
620 goto out;
621
622 iwl_write_direct32(priv, IWL_FH_KW_MEM_ADDR_REG,
623 priv->kw.dma_addr >> 4);
624 iwl_release_nic_access(priv); 415 iwl_release_nic_access(priv);
625out:
626 spin_unlock_irqrestore(&priv->lock, flags); 416 spin_unlock_irqrestore(&priv->lock, flags);
627 return rc;
628}
629
630static int iwl4965_kw_alloc(struct iwl_priv *priv)
631{
632 struct pci_dev *dev = priv->pci_dev;
633 struct iwl4965_kw *kw = &priv->kw;
634
635 kw->size = IWL4965_KW_SIZE; /* TBW need set somewhere else */
636 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
637 if (!kw->v_addr)
638 return -ENOMEM;
639
640 return 0;
641}
642
643/**
644 * iwl4965_kw_free - Free the "keep warm" buffer
645 */
646static void iwl4965_kw_free(struct iwl_priv *priv)
647{
648 struct pci_dev *dev = priv->pci_dev;
649 struct iwl4965_kw *kw = &priv->kw;
650 417
651 if (kw->v_addr) { 418 return ret;
652 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
653 memset(kw, 0, sizeof(*kw));
654 }
655} 419}
656 420
657/** 421/*
658 * iwl4965_txq_ctx_reset - Reset TX queue context 422 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask
659 * Destroys all DMA structures and initialise them again 423 * must be called under priv->lock and mac access
660 *
661 * @param priv
662 * @return error code
663 */ 424 */
664static int iwl4965_txq_ctx_reset(struct iwl_priv *priv) 425static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
665{ 426{
666 int rc = 0; 427 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
667 int txq_id, slots_num;
668 unsigned long flags;
669
670 iwl4965_kw_free(priv);
671
672 /* Free all tx/cmd queues and keep-warm buffer */
673 iwl4965_hw_txq_ctx_free(priv);
674
675 /* Alloc keep-warm buffer */
676 rc = iwl4965_kw_alloc(priv);
677 if (rc) {
678 IWL_ERROR("Keep Warm allocation failed");
679 goto error_kw;
680 }
681
682 spin_lock_irqsave(&priv->lock, flags);
683
684 rc = iwl_grab_nic_access(priv);
685 if (unlikely(rc)) {
686 IWL_ERROR("TX reset failed");
687 spin_unlock_irqrestore(&priv->lock, flags);
688 goto error_reset;
689 }
690
691 /* Turn off all Tx DMA channels */
692 iwl_write_prph(priv, IWL49_SCD_TXFACT, 0);
693 iwl_release_nic_access(priv);
694 spin_unlock_irqrestore(&priv->lock, flags);
695
696 /* Tell 4965 where to find the keep-warm buffer */
697 rc = iwl4965_kw_init(priv);
698 if (rc) {
699 IWL_ERROR("kw_init failed\n");
700 goto error_reset;
701 }
702
703 /* Alloc and init all (default 16) Tx queues,
704 * including the command queue (#4) */
705 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
706 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
707 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
708 rc = iwl4965_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
709 txq_id);
710 if (rc) {
711 IWL_ERROR("Tx %d queue init failed\n", txq_id);
712 goto error;
713 }
714 }
715
716 return rc;
717
718 error:
719 iwl4965_hw_txq_ctx_free(priv);
720 error_reset:
721 iwl4965_kw_free(priv);
722 error_kw:
723 return rc;
724} 428}
725 429
726int iwl4965_hw_nic_init(struct iwl_priv *priv) 430static int iwl4965_apm_init(struct iwl_priv *priv)
727{ 431{
728 int rc; 432 int ret = 0;
729 unsigned long flags;
730 struct iwl4965_rx_queue *rxq = &priv->rxq;
731 u8 rev_id;
732 u32 val;
733 u8 val_link;
734
735 iwl4965_power_init_handle(priv);
736 433
737 /* nic_init */ 434 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
738 spin_lock_irqsave(&priv->lock, flags); 435 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
739 436
437 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
740 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, 438 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
741 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 439 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
742 440
441 /* set "initialization complete" bit to move adapter
442 * D0U* --> D0A* state */
743 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 443 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
744 rc = iwl_poll_bit(priv, CSR_GP_CNTRL,
745 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
746 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
747 if (rc < 0) {
748 spin_unlock_irqrestore(&priv->lock, flags);
749 IWL_DEBUG_INFO("Failed to init the card\n");
750 return rc;
751 }
752 444
753 rc = iwl_grab_nic_access(priv); 445 /* wait for clock stabilization */
754 if (rc) { 446 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
755 spin_unlock_irqrestore(&priv->lock, flags); 447 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
756 return rc; 448 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
449 if (ret < 0) {
450 IWL_DEBUG_INFO("Failed to init the card\n");
451 goto out;
757 } 452 }
758 453
759 iwl_read_prph(priv, APMG_CLK_CTRL_REG); 454 ret = iwl_grab_nic_access(priv);
455 if (ret)
456 goto out;
760 457
761 iwl_write_prph(priv, APMG_CLK_CTRL_REG, 458 /* enable DMA */
762 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); 459 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
763 iwl_read_prph(priv, APMG_CLK_CTRL_REG); 460 APMG_CLK_VAL_BSM_CLK_RQT);
764 461
765 udelay(20); 462 udelay(20);
766 463
464 /* disable L1-Active */
767 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 465 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
768 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 466 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
769 467
770 iwl_release_nic_access(priv); 468 iwl_release_nic_access(priv);
771 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32); 469out:
772 spin_unlock_irqrestore(&priv->lock, flags); 470 return ret;
471}
773 472
774 /* Determine HW type */
775 rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
776 if (rc)
777 return rc;
778 473
779 IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id); 474static void iwl4965_nic_config(struct iwl_priv *priv)
475{
476 unsigned long flags;
477 u32 val;
478 u16 radio_cfg;
479 u8 val_link;
780 480
781 iwl4965_nic_set_pwr_src(priv, 1);
782 spin_lock_irqsave(&priv->lock, flags); 481 spin_lock_irqsave(&priv->lock, flags);
783 482
784 if ((rev_id & 0x80) == 0x80 && (rev_id & 0x7f) < 8) { 483 if ((priv->rev_id & 0x80) == 0x80 && (priv->rev_id & 0x7f) < 8) {
785 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val); 484 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
786 /* Enable No Snoop field */ 485 /* Enable No Snoop field */
787 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8, 486 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
788 val & ~(1 << 11)); 487 val & ~(1 << 11));
789 } 488 }
790 489
791 spin_unlock_irqrestore(&priv->lock, flags);
792
793 if (priv->eeprom.calib_version < EEPROM_TX_POWER_VERSION_NEW) {
794 IWL_ERROR("Older EEPROM detected! Aborting.\n");
795 return -EINVAL;
796 }
797
798 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link); 490 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
799 491
800 /* disable L1 entry -- workaround for pre-B1 */ 492 /* L1 is enabled by BIOS */
801 pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02); 493 if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN)
494 /* diable L0S disabled L1A enabled */
495 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
496 else
497 /* L0S enabled L1A disabled */
498 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
802 499
803 spin_lock_irqsave(&priv->lock, flags); 500 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
804 501
805 /* set CSR_HW_CONFIG_REG for uCode use */ 502 /* write radio config values to register */
503 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
504 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
505 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
506 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
507 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
806 508
509 /* set CSR_HW_CONFIG_REG for uCode use */
807 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 510 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
808 CSR49_HW_IF_CONFIG_REG_BIT_4965_R | 511 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
809 CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI | 512 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
810 CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI);
811
812 rc = iwl_grab_nic_access(priv);
813 if (rc < 0) {
814 spin_unlock_irqrestore(&priv->lock, flags);
815 IWL_DEBUG_INFO("Failed to init the card\n");
816 return rc;
817 }
818
819 iwl_read_prph(priv, APMG_PS_CTRL_REG);
820 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
821 udelay(5);
822 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
823
824 iwl_release_nic_access(priv);
825 spin_unlock_irqrestore(&priv->lock, flags);
826
827 iwl4965_hw_card_show_info(priv);
828
829 /* end nic_init */
830
831 /* Allocate the RX queue, or reset if it is already allocated */
832 if (!rxq->bd) {
833 rc = iwl4965_rx_queue_alloc(priv);
834 if (rc) {
835 IWL_ERROR("Unable to initialize Rx queue\n");
836 return -ENOMEM;
837 }
838 } else
839 iwl4965_rx_queue_reset(priv, rxq);
840
841 iwl4965_rx_replenish(priv);
842
843 iwl4965_rx_init(priv, rxq);
844
845 spin_lock_irqsave(&priv->lock, flags);
846 513
847 rxq->need_update = 1; 514 priv->calib_info = (struct iwl_eeprom_calib_info *)
848 iwl4965_rx_queue_update_write_ptr(priv, rxq); 515 iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
849 516
850 spin_unlock_irqrestore(&priv->lock, flags); 517 spin_unlock_irqrestore(&priv->lock, flags);
851
852 /* Allocate and init all Tx and Command queues */
853 rc = iwl4965_txq_ctx_reset(priv);
854 if (rc)
855 return rc;
856
857 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
858 IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n");
859
860 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
861 IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n");
862
863 set_bit(STATUS_INIT, &priv->status);
864
865 return 0;
866} 518}
867 519
868int iwl4965_hw_nic_stop_master(struct iwl_priv *priv) 520static int iwl4965_apm_stop_master(struct iwl_priv *priv)
869{ 521{
870 int rc = 0; 522 int ret = 0;
871 u32 reg_val;
872 unsigned long flags; 523 unsigned long flags;
873 524
874 spin_lock_irqsave(&priv->lock, flags); 525 spin_lock_irqsave(&priv->lock, flags);
@@ -876,64 +527,41 @@ int iwl4965_hw_nic_stop_master(struct iwl_priv *priv)
876 /* set stop master bit */ 527 /* set stop master bit */
877 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 528 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
878 529
879 reg_val = iwl_read32(priv, CSR_GP_CNTRL); 530 ret = iwl_poll_bit(priv, CSR_RESET,
880
881 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
882 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
883 IWL_DEBUG_INFO("Card in power save, master is already "
884 "stopped\n");
885 else {
886 rc = iwl_poll_bit(priv, CSR_RESET,
887 CSR_RESET_REG_FLAG_MASTER_DISABLED, 531 CSR_RESET_REG_FLAG_MASTER_DISABLED,
888 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 532 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
889 if (rc < 0) { 533 if (ret < 0)
890 spin_unlock_irqrestore(&priv->lock, flags); 534 goto out;
891 return rc;
892 }
893 }
894 535
536out:
895 spin_unlock_irqrestore(&priv->lock, flags); 537 spin_unlock_irqrestore(&priv->lock, flags);
896 IWL_DEBUG_INFO("stop master\n"); 538 IWL_DEBUG_INFO("stop master\n");
897 539
898 return rc; 540 return ret;
899} 541}
900 542
901/** 543static void iwl4965_apm_stop(struct iwl_priv *priv)
902 * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
903 */
904void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv)
905{ 544{
906
907 int txq_id;
908 unsigned long flags; 545 unsigned long flags;
909 546
910 /* Stop each Tx DMA channel, and wait for it to be idle */ 547 iwl4965_apm_stop_master(priv);
911 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
912 spin_lock_irqsave(&priv->lock, flags);
913 if (iwl_grab_nic_access(priv)) {
914 spin_unlock_irqrestore(&priv->lock, flags);
915 continue;
916 }
917 548
918 iwl_write_direct32(priv, 549 spin_lock_irqsave(&priv->lock, flags);
919 IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0); 550
920 iwl_poll_direct_bit(priv, IWL_FH_TSSR_TX_STATUS_REG, 551 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
921 IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
922 (txq_id), 200);
923 iwl_release_nic_access(priv);
924 spin_unlock_irqrestore(&priv->lock, flags);
925 }
926 552
927 /* Deallocate memory for all Tx queues */ 553 udelay(10);
928 iwl4965_hw_txq_ctx_free(priv); 554
555 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
556 spin_unlock_irqrestore(&priv->lock, flags);
929} 557}
930 558
931int iwl4965_hw_nic_reset(struct iwl_priv *priv) 559static int iwl4965_apm_reset(struct iwl_priv *priv)
932{ 560{
933 int rc = 0; 561 int ret = 0;
934 unsigned long flags; 562 unsigned long flags;
935 563
936 iwl4965_hw_nic_stop_master(priv); 564 iwl4965_apm_stop_master(priv);
937 565
938 spin_lock_irqsave(&priv->lock, flags); 566 spin_lock_irqsave(&priv->lock, flags);
939 567
@@ -941,34 +569,41 @@ int iwl4965_hw_nic_reset(struct iwl_priv *priv)
941 569
942 udelay(10); 570 udelay(10);
943 571
572 /* FIXME: put here L1A -L0S w/a */
573
944 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 574 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
945 rc = iwl_poll_bit(priv, CSR_RESET, 575
576 ret = iwl_poll_bit(priv, CSR_RESET,
946 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 577 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
947 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25); 578 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
948 579
580 if (ret)
581 goto out;
582
949 udelay(10); 583 udelay(10);
950 584
951 rc = iwl_grab_nic_access(priv); 585 ret = iwl_grab_nic_access(priv);
952 if (!rc) { 586 if (ret)
953 iwl_write_prph(priv, APMG_CLK_EN_REG, 587 goto out;
954 APMG_CLK_VAL_DMA_CLK_RQT | 588 /* Enable DMA and BSM Clock */
955 APMG_CLK_VAL_BSM_CLK_RQT); 589 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT |
590 APMG_CLK_VAL_BSM_CLK_RQT);
956 591
957 udelay(10); 592 udelay(10);
958 593
959 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 594 /* disable L1A */
960 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 595 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
596 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
961 597
962 iwl_release_nic_access(priv); 598 iwl_release_nic_access(priv);
963 }
964 599
965 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 600 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
966 wake_up_interruptible(&priv->wait_command_queue); 601 wake_up_interruptible(&priv->wait_command_queue);
967 602
603out:
968 spin_unlock_irqrestore(&priv->lock, flags); 604 spin_unlock_irqrestore(&priv->lock, flags);
969 605
970 return rc; 606 return ret;
971
972} 607}
973 608
974#define REG_RECALIB_PERIOD (60) 609#define REG_RECALIB_PERIOD (60)
@@ -993,15 +628,9 @@ static void iwl4965_bg_statistics_periodic(unsigned long data)
993 iwl_send_statistics_request(priv, CMD_ASYNC); 628 iwl_send_statistics_request(priv, CMD_ASYNC);
994} 629}
995 630
996#define CT_LIMIT_CONST 259
997#define TM_CT_KILL_THRESHOLD 110
998
999void iwl4965_rf_kill_ct_config(struct iwl_priv *priv) 631void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
1000{ 632{
1001 struct iwl4965_ct_kill_config cmd; 633 struct iwl4965_ct_kill_config cmd;
1002 u32 R1, R2, R3;
1003 u32 temp_th;
1004 u32 crit_temperature;
1005 unsigned long flags; 634 unsigned long flags;
1006 int ret = 0; 635 int ret = 0;
1007 636
@@ -1010,440 +639,28 @@ void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
1010 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); 639 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1011 spin_unlock_irqrestore(&priv->lock, flags); 640 spin_unlock_irqrestore(&priv->lock, flags);
1012 641
1013 if (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) { 642 cmd.critical_temperature_R =
1014 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); 643 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1015 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1016 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1017 } else {
1018 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1019 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1020 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1021 }
1022
1023 temp_th = CELSIUS_TO_KELVIN(TM_CT_KILL_THRESHOLD);
1024 644
1025 crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2;
1026 cmd.critical_temperature_R = cpu_to_le32(crit_temperature);
1027 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, 645 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1028 sizeof(cmd), &cmd); 646 sizeof(cmd), &cmd);
1029 if (ret) 647 if (ret)
1030 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n"); 648 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
1031 else 649 else
1032 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n"); 650 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded, "
1033} 651 "critical temperature is %d\n",
1034 652 cmd.critical_temperature_R);
1035#ifdef CONFIG_IWL4965_SENSITIVITY
1036
1037/* "false alarms" are signals that our DSP tries to lock onto,
1038 * but then determines that they are either noise, or transmissions
1039 * from a distant wireless network (also "noise", really) that get
1040 * "stepped on" by stronger transmissions within our own network.
1041 * This algorithm attempts to set a sensitivity level that is high
1042 * enough to receive all of our own network traffic, but not so
1043 * high that our DSP gets too busy trying to lock onto non-network
1044 * activity/noise. */
1045static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
1046 u32 norm_fa,
1047 u32 rx_enable_time,
1048 struct statistics_general_data *rx_info)
1049{
1050 u32 max_nrg_cck = 0;
1051 int i = 0;
1052 u8 max_silence_rssi = 0;
1053 u32 silence_ref = 0;
1054 u8 silence_rssi_a = 0;
1055 u8 silence_rssi_b = 0;
1056 u8 silence_rssi_c = 0;
1057 u32 val;
1058
1059 /* "false_alarms" values below are cross-multiplications to assess the
1060 * numbers of false alarms within the measured period of actual Rx
1061 * (Rx is off when we're txing), vs the min/max expected false alarms
1062 * (some should be expected if rx is sensitive enough) in a
1063 * hypothetical listening period of 200 time units (TU), 204.8 msec:
1064 *
1065 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
1066 *
1067 * */
1068 u32 false_alarms = norm_fa * 200 * 1024;
1069 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
1070 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
1071 struct iwl4965_sensitivity_data *data = NULL;
1072
1073 data = &(priv->sensitivity_data);
1074
1075 data->nrg_auto_corr_silence_diff = 0;
1076
1077 /* Find max silence rssi among all 3 receivers.
1078 * This is background noise, which may include transmissions from other
1079 * networks, measured during silence before our network's beacon */
1080 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
1081 ALL_BAND_FILTER) >> 8);
1082 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
1083 ALL_BAND_FILTER) >> 8);
1084 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
1085 ALL_BAND_FILTER) >> 8);
1086
1087 val = max(silence_rssi_b, silence_rssi_c);
1088 max_silence_rssi = max(silence_rssi_a, (u8) val);
1089
1090 /* Store silence rssi in 20-beacon history table */
1091 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
1092 data->nrg_silence_idx++;
1093 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
1094 data->nrg_silence_idx = 0;
1095
1096 /* Find max silence rssi across 20 beacon history */
1097 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
1098 val = data->nrg_silence_rssi[i];
1099 silence_ref = max(silence_ref, val);
1100 }
1101 IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n",
1102 silence_rssi_a, silence_rssi_b, silence_rssi_c,
1103 silence_ref);
1104
1105 /* Find max rx energy (min value!) among all 3 receivers,
1106 * measured during beacon frame.
1107 * Save it in 10-beacon history table. */
1108 i = data->nrg_energy_idx;
1109 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
1110 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
1111
1112 data->nrg_energy_idx++;
1113 if (data->nrg_energy_idx >= 10)
1114 data->nrg_energy_idx = 0;
1115
1116 /* Find min rx energy (max value) across 10 beacon history.
1117 * This is the minimum signal level that we want to receive well.
1118 * Add backoff (margin so we don't miss slightly lower energy frames).
1119 * This establishes an upper bound (min value) for energy threshold. */
1120 max_nrg_cck = data->nrg_value[0];
1121 for (i = 1; i < 10; i++)
1122 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
1123 max_nrg_cck += 6;
1124
1125 IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
1126 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
1127 rx_info->beacon_energy_c, max_nrg_cck - 6);
1128
1129 /* Count number of consecutive beacons with fewer-than-desired
1130 * false alarms. */
1131 if (false_alarms < min_false_alarms)
1132 data->num_in_cck_no_fa++;
1133 else
1134 data->num_in_cck_no_fa = 0;
1135 IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n",
1136 data->num_in_cck_no_fa);
1137
1138 /* If we got too many false alarms this time, reduce sensitivity */
1139 if (false_alarms > max_false_alarms) {
1140 IWL_DEBUG_CALIB("norm FA %u > max FA %u\n",
1141 false_alarms, max_false_alarms);
1142 IWL_DEBUG_CALIB("... reducing sensitivity\n");
1143 data->nrg_curr_state = IWL_FA_TOO_MANY;
1144
1145 if (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
1146 /* Store for "fewer than desired" on later beacon */
1147 data->nrg_silence_ref = silence_ref;
1148
1149 /* increase energy threshold (reduce nrg value)
1150 * to decrease sensitivity */
1151 if (data->nrg_th_cck > (NRG_MAX_CCK + NRG_STEP_CCK))
1152 data->nrg_th_cck = data->nrg_th_cck
1153 - NRG_STEP_CCK;
1154 }
1155
1156 /* increase auto_corr values to decrease sensitivity */
1157 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
1158 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
1159 else {
1160 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
1161 data->auto_corr_cck = min((u32)AUTO_CORR_MAX_CCK, val);
1162 }
1163 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
1164 data->auto_corr_cck_mrc = min((u32)AUTO_CORR_MAX_CCK_MRC, val);
1165
1166 /* Else if we got fewer than desired, increase sensitivity */
1167 } else if (false_alarms < min_false_alarms) {
1168 data->nrg_curr_state = IWL_FA_TOO_FEW;
1169
1170 /* Compare silence level with silence level for most recent
1171 * healthy number or too many false alarms */
1172 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
1173 (s32)silence_ref;
1174
1175 IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n",
1176 false_alarms, min_false_alarms,
1177 data->nrg_auto_corr_silence_diff);
1178
1179 /* Increase value to increase sensitivity, but only if:
1180 * 1a) previous beacon did *not* have *too many* false alarms
1181 * 1b) AND there's a significant difference in Rx levels
1182 * from a previous beacon with too many, or healthy # FAs
1183 * OR 2) We've seen a lot of beacons (100) with too few
1184 * false alarms */
1185 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
1186 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
1187 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
1188
1189 IWL_DEBUG_CALIB("... increasing sensitivity\n");
1190 /* Increase nrg value to increase sensitivity */
1191 val = data->nrg_th_cck + NRG_STEP_CCK;
1192 data->nrg_th_cck = min((u32)NRG_MIN_CCK, val);
1193
1194 /* Decrease auto_corr values to increase sensitivity */
1195 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
1196 data->auto_corr_cck = max((u32)AUTO_CORR_MIN_CCK, val);
1197
1198 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
1199 data->auto_corr_cck_mrc =
1200 max((u32)AUTO_CORR_MIN_CCK_MRC, val);
1201
1202 } else
1203 IWL_DEBUG_CALIB("... but not changing sensitivity\n");
1204
1205 /* Else we got a healthy number of false alarms, keep status quo */
1206 } else {
1207 IWL_DEBUG_CALIB(" FA in safe zone\n");
1208 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
1209
1210 /* Store for use in "fewer than desired" with later beacon */
1211 data->nrg_silence_ref = silence_ref;
1212
1213 /* If previous beacon had too many false alarms,
1214 * give it some extra margin by reducing sensitivity again
1215 * (but don't go below measured energy of desired Rx) */
1216 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
1217 IWL_DEBUG_CALIB("... increasing margin\n");
1218 data->nrg_th_cck -= NRG_MARGIN;
1219 }
1220 }
1221
1222 /* Make sure the energy threshold does not go above the measured
1223 * energy of the desired Rx signals (reduced by backoff margin),
1224 * or else we might start missing Rx frames.
1225 * Lower value is higher energy, so we use max()!
1226 */
1227 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
1228 IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
1229
1230 data->nrg_prev_state = data->nrg_curr_state;
1231
1232 return 0;
1233}
1234
1235
1236static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
1237 u32 norm_fa,
1238 u32 rx_enable_time)
1239{
1240 u32 val;
1241 u32 false_alarms = norm_fa * 200 * 1024;
1242 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
1243 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
1244 struct iwl4965_sensitivity_data *data = NULL;
1245
1246 data = &(priv->sensitivity_data);
1247
1248 /* If we got too many false alarms this time, reduce sensitivity */
1249 if (false_alarms > max_false_alarms) {
1250
1251 IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n",
1252 false_alarms, max_false_alarms);
1253
1254 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
1255 data->auto_corr_ofdm =
1256 min((u32)AUTO_CORR_MAX_OFDM, val);
1257
1258 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
1259 data->auto_corr_ofdm_mrc =
1260 min((u32)AUTO_CORR_MAX_OFDM_MRC, val);
1261
1262 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
1263 data->auto_corr_ofdm_x1 =
1264 min((u32)AUTO_CORR_MAX_OFDM_X1, val);
1265
1266 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
1267 data->auto_corr_ofdm_mrc_x1 =
1268 min((u32)AUTO_CORR_MAX_OFDM_MRC_X1, val);
1269 }
1270
1271 /* Else if we got fewer than desired, increase sensitivity */
1272 else if (false_alarms < min_false_alarms) {
1273
1274 IWL_DEBUG_CALIB("norm FA %u < min FA %u\n",
1275 false_alarms, min_false_alarms);
1276
1277 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
1278 data->auto_corr_ofdm =
1279 max((u32)AUTO_CORR_MIN_OFDM, val);
1280
1281 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
1282 data->auto_corr_ofdm_mrc =
1283 max((u32)AUTO_CORR_MIN_OFDM_MRC, val);
1284
1285 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
1286 data->auto_corr_ofdm_x1 =
1287 max((u32)AUTO_CORR_MIN_OFDM_X1, val);
1288
1289 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
1290 data->auto_corr_ofdm_mrc_x1 =
1291 max((u32)AUTO_CORR_MIN_OFDM_MRC_X1, val);
1292 }
1293
1294 else
1295 IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
1296 min_false_alarms, false_alarms, max_false_alarms);
1297
1298 return 0;
1299}
1300
1301static int iwl4965_sensitivity_callback(struct iwl_priv *priv,
1302 struct iwl_cmd *cmd, struct sk_buff *skb)
1303{
1304 /* We didn't cache the SKB; let the caller free it */
1305 return 1;
1306}
1307
1308/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
1309static int iwl4965_sensitivity_write(struct iwl_priv *priv, u8 flags)
1310{
1311 struct iwl4965_sensitivity_cmd cmd ;
1312 struct iwl4965_sensitivity_data *data = NULL;
1313 struct iwl_host_cmd cmd_out = {
1314 .id = SENSITIVITY_CMD,
1315 .len = sizeof(struct iwl4965_sensitivity_cmd),
1316 .meta.flags = flags,
1317 .data = &cmd,
1318 };
1319 int ret;
1320
1321 data = &(priv->sensitivity_data);
1322
1323 memset(&cmd, 0, sizeof(cmd));
1324
1325 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
1326 cpu_to_le16((u16)data->auto_corr_ofdm);
1327 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
1328 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
1329 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
1330 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
1331 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
1332 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
1333
1334 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
1335 cpu_to_le16((u16)data->auto_corr_cck);
1336 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
1337 cpu_to_le16((u16)data->auto_corr_cck_mrc);
1338
1339 cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] =
1340 cpu_to_le16((u16)data->nrg_th_cck);
1341 cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] =
1342 cpu_to_le16((u16)data->nrg_th_ofdm);
1343
1344 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
1345 __constant_cpu_to_le16(190);
1346 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
1347 __constant_cpu_to_le16(390);
1348 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
1349 __constant_cpu_to_le16(62);
1350
1351 IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
1352 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
1353 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
1354 data->nrg_th_ofdm);
1355
1356 IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n",
1357 data->auto_corr_cck, data->auto_corr_cck_mrc,
1358 data->nrg_th_cck);
1359
1360 /* Update uCode's "work" table, and copy it to DSP */
1361 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
1362
1363 if (flags & CMD_ASYNC)
1364 cmd_out.meta.u.callback = iwl4965_sensitivity_callback;
1365
1366 /* Don't send command to uCode if nothing has changed */
1367 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
1368 sizeof(u16)*HD_TABLE_SIZE)) {
1369 IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n");
1370 return 0;
1371 }
1372
1373 /* Copy table for comparison next time */
1374 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
1375 sizeof(u16)*HD_TABLE_SIZE);
1376
1377 ret = iwl_send_cmd(priv, &cmd_out);
1378 if (ret)
1379 IWL_ERROR("SENSITIVITY_CMD failed\n");
1380
1381 return ret;
1382}
1383
1384void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags, u8 force)
1385{
1386 struct iwl4965_sensitivity_data *data = NULL;
1387 int i;
1388 int ret = 0;
1389
1390 IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n");
1391
1392 if (force)
1393 memset(&(priv->sensitivity_tbl[0]), 0,
1394 sizeof(u16)*HD_TABLE_SIZE);
1395
1396 /* Clear driver's sensitivity algo data */
1397 data = &(priv->sensitivity_data);
1398 memset(data, 0, sizeof(struct iwl4965_sensitivity_data));
1399
1400 data->num_in_cck_no_fa = 0;
1401 data->nrg_curr_state = IWL_FA_TOO_MANY;
1402 data->nrg_prev_state = IWL_FA_TOO_MANY;
1403 data->nrg_silence_ref = 0;
1404 data->nrg_silence_idx = 0;
1405 data->nrg_energy_idx = 0;
1406
1407 for (i = 0; i < 10; i++)
1408 data->nrg_value[i] = 0;
1409
1410 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
1411 data->nrg_silence_rssi[i] = 0;
1412
1413 data->auto_corr_ofdm = 90;
1414 data->auto_corr_ofdm_mrc = 170;
1415 data->auto_corr_ofdm_x1 = 105;
1416 data->auto_corr_ofdm_mrc_x1 = 220;
1417 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
1418 data->auto_corr_cck_mrc = 200;
1419 data->nrg_th_cck = 100;
1420 data->nrg_th_ofdm = 100;
1421
1422 data->last_bad_plcp_cnt_ofdm = 0;
1423 data->last_fa_cnt_ofdm = 0;
1424 data->last_bad_plcp_cnt_cck = 0;
1425 data->last_fa_cnt_cck = 0;
1426
1427 /* Clear prior Sensitivity command data to force send to uCode */
1428 if (force)
1429 memset(&(priv->sensitivity_tbl[0]), 0,
1430 sizeof(u16)*HD_TABLE_SIZE);
1431
1432 ret |= iwl4965_sensitivity_write(priv, flags);
1433 IWL_DEBUG_CALIB("<<return 0x%X\n", ret);
1434
1435 return;
1436} 653}
1437 654
655#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
1438 656
1439/* Reset differential Rx gains in NIC to prepare for chain noise calibration. 657/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
1440 * Called after every association, but this runs only once! 658 * Called after every association, but this runs only once!
1441 * ... once chain noise is calibrated the first time, it's good forever. */ 659 * ... once chain noise is calibrated the first time, it's good forever. */
1442void iwl4965_chain_noise_reset(struct iwl_priv *priv) 660static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
1443{ 661{
1444 struct iwl4965_chain_noise_data *data = NULL; 662 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
1445 663
1446 data = &(priv->chain_noise_data);
1447 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { 664 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
1448 struct iwl4965_calibration_cmd cmd; 665 struct iwl4965_calibration_cmd cmd;
1449 666
@@ -1452,357 +669,76 @@ void iwl4965_chain_noise_reset(struct iwl_priv *priv)
1452 cmd.diff_gain_a = 0; 669 cmd.diff_gain_a = 0;
1453 cmd.diff_gain_b = 0; 670 cmd.diff_gain_b = 0;
1454 cmd.diff_gain_c = 0; 671 cmd.diff_gain_c = 0;
1455 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD, 672 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1456 sizeof(cmd), &cmd, NULL); 673 sizeof(cmd), &cmd))
1457 msleep(4); 674 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n");
1458 data->state = IWL_CHAIN_NOISE_ACCUMULATE; 675 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1459 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n"); 676 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
1460 } 677 }
1461 return;
1462} 678}
1463 679
1464/* 680static void iwl4965_gain_computation(struct iwl_priv *priv,
1465 * Accumulate 20 beacons of signal and noise statistics for each of 681 u32 *average_noise,
1466 * 3 receivers/antennas/rx-chains, then figure out: 682 u16 min_average_noise_antenna_i,
1467 * 1) Which antennas are connected. 683 u32 min_average_noise)
1468 * 2) Differential rx gain settings to balance the 3 receivers.
1469 */
1470static void iwl4965_noise_calibration(struct iwl_priv *priv,
1471 struct iwl4965_notif_statistics *stat_resp)
1472{ 684{
1473 struct iwl4965_chain_noise_data *data = NULL; 685 int i, ret;
1474 int ret = 0; 686 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
1475
1476 u32 chain_noise_a;
1477 u32 chain_noise_b;
1478 u32 chain_noise_c;
1479 u32 chain_sig_a;
1480 u32 chain_sig_b;
1481 u32 chain_sig_c;
1482 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1483 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1484 u32 max_average_sig;
1485 u16 max_average_sig_antenna_i;
1486 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
1487 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
1488 u16 i = 0;
1489 u16 chan_num = INITIALIZATION_VALUE;
1490 u32 band = INITIALIZATION_VALUE;
1491 u32 active_chains = 0;
1492 unsigned long flags;
1493 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
1494
1495 data = &(priv->chain_noise_data);
1496
1497 /* Accumulate just the first 20 beacons after the first association,
1498 * then we're done forever. */
1499 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
1500 if (data->state == IWL_CHAIN_NOISE_ALIVE)
1501 IWL_DEBUG_CALIB("Wait for noise calib reset\n");
1502 return;
1503 }
1504
1505 spin_lock_irqsave(&priv->lock, flags);
1506 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1507 IWL_DEBUG_CALIB(" << Interference data unavailable\n");
1508 spin_unlock_irqrestore(&priv->lock, flags);
1509 return;
1510 }
1511
1512 band = (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) ? 0 : 1;
1513 chan_num = le16_to_cpu(priv->staging_rxon.channel);
1514
1515 /* Make sure we accumulate data for just the associated channel
1516 * (even if scanning). */
1517 if ((chan_num != (le32_to_cpu(stat_resp->flag) >> 16)) ||
1518 ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
1519 (stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && band)) {
1520 IWL_DEBUG_CALIB("Stats not from chan=%d, band=%d\n",
1521 chan_num, band);
1522 spin_unlock_irqrestore(&priv->lock, flags);
1523 return;
1524 }
1525
1526 /* Accumulate beacon statistics values across 20 beacons */
1527 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
1528 IN_BAND_FILTER;
1529 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
1530 IN_BAND_FILTER;
1531 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
1532 IN_BAND_FILTER;
1533
1534 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
1535 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
1536 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
1537
1538 spin_unlock_irqrestore(&priv->lock, flags);
1539
1540 data->beacon_count++;
1541
1542 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
1543 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
1544 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
1545
1546 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
1547 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
1548 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
1549
1550 IWL_DEBUG_CALIB("chan=%d, band=%d, beacon=%d\n", chan_num, band,
1551 data->beacon_count);
1552 IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n",
1553 chain_sig_a, chain_sig_b, chain_sig_c);
1554 IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n",
1555 chain_noise_a, chain_noise_b, chain_noise_c);
1556
1557 /* If this is the 20th beacon, determine:
1558 * 1) Disconnected antennas (using signal strengths)
1559 * 2) Differential gain (using silence noise) to balance receivers */
1560 if (data->beacon_count == CAL_NUM_OF_BEACONS) {
1561
1562 /* Analyze signal for disconnected antenna */
1563 average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS;
1564 average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS;
1565 average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS;
1566
1567 if (average_sig[0] >= average_sig[1]) {
1568 max_average_sig = average_sig[0];
1569 max_average_sig_antenna_i = 0;
1570 active_chains = (1 << max_average_sig_antenna_i);
1571 } else {
1572 max_average_sig = average_sig[1];
1573 max_average_sig_antenna_i = 1;
1574 active_chains = (1 << max_average_sig_antenna_i);
1575 }
1576
1577 if (average_sig[2] >= max_average_sig) {
1578 max_average_sig = average_sig[2];
1579 max_average_sig_antenna_i = 2;
1580 active_chains = (1 << max_average_sig_antenna_i);
1581 }
1582
1583 IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n",
1584 average_sig[0], average_sig[1], average_sig[2]);
1585 IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n",
1586 max_average_sig, max_average_sig_antenna_i);
1587
1588 /* Compare signal strengths for all 3 receivers. */
1589 for (i = 0; i < NUM_RX_CHAINS; i++) {
1590 if (i != max_average_sig_antenna_i) {
1591 s32 rssi_delta = (max_average_sig -
1592 average_sig[i]);
1593
1594 /* If signal is very weak, compared with
1595 * strongest, mark it as disconnected. */
1596 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
1597 data->disconn_array[i] = 1;
1598 else
1599 active_chains |= (1 << i);
1600 IWL_DEBUG_CALIB("i = %d rssiDelta = %d "
1601 "disconn_array[i] = %d\n",
1602 i, rssi_delta, data->disconn_array[i]);
1603 }
1604 }
1605
1606 /*If both chains A & B are disconnected -
1607 * connect B and leave A as is */
1608 if (data->disconn_array[CHAIN_A] &&
1609 data->disconn_array[CHAIN_B]) {
1610 data->disconn_array[CHAIN_B] = 0;
1611 active_chains |= (1 << CHAIN_B);
1612 IWL_DEBUG_CALIB("both A & B chains are disconnected! "
1613 "W/A - declare B as connected\n");
1614 }
1615
1616 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n",
1617 active_chains);
1618
1619 /* Save for use within RXON, TX, SCAN commands, etc. */
1620 priv->valid_antenna = active_chains;
1621
1622 /* Analyze noise for rx balance */
1623 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
1624 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
1625 average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS);
1626
1627 for (i = 0; i < NUM_RX_CHAINS; i++) {
1628 if (!(data->disconn_array[i]) &&
1629 (average_noise[i] <= min_average_noise)) {
1630 /* This means that chain i is active and has
1631 * lower noise values so far: */
1632 min_average_noise = average_noise[i];
1633 min_average_noise_antenna_i = i;
1634 }
1635 }
1636
1637 data->delta_gain_code[min_average_noise_antenna_i] = 0;
1638 687
1639 IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n", 688 data->delta_gain_code[min_average_noise_antenna_i] = 0;
1640 average_noise[0], average_noise[1],
1641 average_noise[2]);
1642 689
1643 IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n", 690 for (i = 0; i < NUM_RX_CHAINS; i++) {
1644 min_average_noise, min_average_noise_antenna_i); 691 s32 delta_g = 0;
1645 692
1646 for (i = 0; i < NUM_RX_CHAINS; i++) { 693 if (!(data->disconn_array[i]) &&
1647 s32 delta_g = 0; 694 (data->delta_gain_code[i] ==
1648
1649 if (!(data->disconn_array[i]) &&
1650 (data->delta_gain_code[i] ==
1651 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { 695 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
1652 delta_g = average_noise[i] - min_average_noise; 696 delta_g = average_noise[i] - min_average_noise;
1653 data->delta_gain_code[i] = (u8)((delta_g * 697 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
1654 10) / 15); 698 data->delta_gain_code[i] =
1655 if (CHAIN_NOISE_MAX_DELTA_GAIN_CODE < 699 min(data->delta_gain_code[i],
1656 data->delta_gain_code[i]) 700 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
1657 data->delta_gain_code[i] = 701
1658 CHAIN_NOISE_MAX_DELTA_GAIN_CODE; 702 data->delta_gain_code[i] =
1659 703 (data->delta_gain_code[i] | (1 << 2));
1660 data->delta_gain_code[i] = 704 } else {
1661 (data->delta_gain_code[i] | (1 << 2)); 705 data->delta_gain_code[i] = 0;
1662 } else
1663 data->delta_gain_code[i] = 0;
1664 }
1665 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
1666 data->delta_gain_code[0],
1667 data->delta_gain_code[1],
1668 data->delta_gain_code[2]);
1669
1670 /* Differential gain gets sent to uCode only once */
1671 if (!data->radio_write) {
1672 struct iwl4965_calibration_cmd cmd;
1673 data->radio_write = 1;
1674
1675 memset(&cmd, 0, sizeof(cmd));
1676 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1677 cmd.diff_gain_a = data->delta_gain_code[0];
1678 cmd.diff_gain_b = data->delta_gain_code[1];
1679 cmd.diff_gain_c = data->delta_gain_code[2];
1680 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1681 sizeof(cmd), &cmd);
1682 if (ret)
1683 IWL_DEBUG_CALIB("fail sending cmd "
1684 "REPLY_PHY_CALIBRATION_CMD \n");
1685
1686 /* TODO we might want recalculate
1687 * rx_chain in rxon cmd */
1688
1689 /* Mark so we run this algo only once! */
1690 data->state = IWL_CHAIN_NOISE_CALIBRATED;
1691 } 706 }
1692 data->chain_noise_a = 0;
1693 data->chain_noise_b = 0;
1694 data->chain_noise_c = 0;
1695 data->chain_signal_a = 0;
1696 data->chain_signal_b = 0;
1697 data->chain_signal_c = 0;
1698 data->beacon_count = 0;
1699 }
1700 return;
1701}
1702
1703static void iwl4965_sensitivity_calibration(struct iwl_priv *priv,
1704 struct iwl4965_notif_statistics *resp)
1705{
1706 u32 rx_enable_time;
1707 u32 fa_cck;
1708 u32 fa_ofdm;
1709 u32 bad_plcp_cck;
1710 u32 bad_plcp_ofdm;
1711 u32 norm_fa_ofdm;
1712 u32 norm_fa_cck;
1713 struct iwl4965_sensitivity_data *data = NULL;
1714 struct statistics_rx_non_phy *rx_info = &(resp->rx.general);
1715 struct statistics_rx *statistics = &(resp->rx);
1716 unsigned long flags;
1717 struct statistics_general_data statis;
1718 int ret;
1719
1720 data = &(priv->sensitivity_data);
1721
1722 if (!iwl_is_associated(priv)) {
1723 IWL_DEBUG_CALIB("<< - not associated\n");
1724 return;
1725 }
1726
1727 spin_lock_irqsave(&priv->lock, flags);
1728 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1729 IWL_DEBUG_CALIB("<< invalid data.\n");
1730 spin_unlock_irqrestore(&priv->lock, flags);
1731 return;
1732 }
1733
1734 /* Extract Statistics: */
1735 rx_enable_time = le32_to_cpu(rx_info->channel_load);
1736 fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt);
1737 fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt);
1738 bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err);
1739 bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err);
1740
1741 statis.beacon_silence_rssi_a =
1742 le32_to_cpu(statistics->general.beacon_silence_rssi_a);
1743 statis.beacon_silence_rssi_b =
1744 le32_to_cpu(statistics->general.beacon_silence_rssi_b);
1745 statis.beacon_silence_rssi_c =
1746 le32_to_cpu(statistics->general.beacon_silence_rssi_c);
1747 statis.beacon_energy_a =
1748 le32_to_cpu(statistics->general.beacon_energy_a);
1749 statis.beacon_energy_b =
1750 le32_to_cpu(statistics->general.beacon_energy_b);
1751 statis.beacon_energy_c =
1752 le32_to_cpu(statistics->general.beacon_energy_c);
1753
1754 spin_unlock_irqrestore(&priv->lock, flags);
1755
1756 IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
1757
1758 if (!rx_enable_time) {
1759 IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n");
1760 return;
1761 }
1762
1763 /* These statistics increase monotonically, and do not reset
1764 * at each beacon. Calculate difference from last value, or just
1765 * use the new statistics value if it has reset or wrapped around. */
1766 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
1767 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
1768 else {
1769 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
1770 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
1771 } 707 }
708 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
709 data->delta_gain_code[0],
710 data->delta_gain_code[1],
711 data->delta_gain_code[2]);
1772 712
1773 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm) 713 /* Differential gain gets sent to uCode only once */
1774 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm; 714 if (!data->radio_write) {
1775 else { 715 struct iwl4965_calibration_cmd cmd;
1776 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm; 716 data->radio_write = 1;
1777 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
1778 }
1779
1780 if (data->last_fa_cnt_ofdm > fa_ofdm)
1781 data->last_fa_cnt_ofdm = fa_ofdm;
1782 else {
1783 fa_ofdm -= data->last_fa_cnt_ofdm;
1784 data->last_fa_cnt_ofdm += fa_ofdm;
1785 }
1786
1787 if (data->last_fa_cnt_cck > fa_cck)
1788 data->last_fa_cnt_cck = fa_cck;
1789 else {
1790 fa_cck -= data->last_fa_cnt_cck;
1791 data->last_fa_cnt_cck += fa_cck;
1792 }
1793
1794 /* Total aborted signal locks */
1795 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
1796 norm_fa_cck = fa_cck + bad_plcp_cck;
1797
1798 IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
1799 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
1800
1801 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
1802 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
1803 ret = iwl4965_sensitivity_write(priv, CMD_ASYNC);
1804 717
1805 return; 718 memset(&cmd, 0, sizeof(cmd));
719 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
720 cmd.diff_gain_a = data->delta_gain_code[0];
721 cmd.diff_gain_b = data->delta_gain_code[1];
722 cmd.diff_gain_c = data->delta_gain_code[2];
723 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
724 sizeof(cmd), &cmd);
725 if (ret)
726 IWL_DEBUG_CALIB("fail sending cmd "
727 "REPLY_PHY_CALIBRATION_CMD \n");
728
729 /* TODO we might want recalculate
730 * rx_chain in rxon cmd */
731
732 /* Mark so we run this algo only once! */
733 data->state = IWL_CHAIN_NOISE_CALIBRATED;
734 }
735 data->chain_noise_a = 0;
736 data->chain_noise_b = 0;
737 data->chain_noise_c = 0;
738 data->chain_signal_a = 0;
739 data->chain_signal_b = 0;
740 data->chain_signal_c = 0;
741 data->beacon_count = 0;
1806} 742}
1807 743
1808static void iwl4965_bg_sensitivity_work(struct work_struct *work) 744static void iwl4965_bg_sensitivity_work(struct work_struct *work)
@@ -1819,21 +755,15 @@ static void iwl4965_bg_sensitivity_work(struct work_struct *work)
1819 } 755 }
1820 756
1821 if (priv->start_calib) { 757 if (priv->start_calib) {
1822 iwl4965_noise_calibration(priv, &priv->statistics); 758 iwl_chain_noise_calibration(priv, &priv->statistics);
1823 759
1824 if (priv->sensitivity_data.state == 760 iwl_sensitivity_calibration(priv, &priv->statistics);
1825 IWL_SENS_CALIB_NEED_REINIT) {
1826 iwl4965_init_sensitivity(priv, CMD_ASYNC, 0);
1827 priv->sensitivity_data.state = IWL_SENS_CALIB_ALLOWED;
1828 } else
1829 iwl4965_sensitivity_calibration(priv,
1830 &priv->statistics);
1831 } 761 }
1832 762
1833 mutex_unlock(&priv->mutex); 763 mutex_unlock(&priv->mutex);
1834 return; 764 return;
1835} 765}
1836#endif /*CONFIG_IWL4965_SENSITIVITY*/ 766#endif /*CONFIG_IWL4965_RUN_TIME_CALIB*/
1837 767
1838static void iwl4965_bg_txpower_work(struct work_struct *work) 768static void iwl4965_bg_txpower_work(struct work_struct *work)
1839{ 769{
@@ -1880,7 +810,7 @@ static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
1880 * NOTE: Acquire priv->lock before calling this function ! 810 * NOTE: Acquire priv->lock before calling this function !
1881 */ 811 */
1882static void iwl4965_tx_queue_set_status(struct iwl_priv *priv, 812static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
1883 struct iwl4965_tx_queue *txq, 813 struct iwl_tx_queue *txq,
1884 int tx_fifo_id, int scd_retry) 814 int tx_fifo_id, int scd_retry)
1885{ 815{
1886 int txq_id = txq->q.id; 816 int txq_id = txq->q.id;
@@ -1890,11 +820,11 @@ static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
1890 820
1891 /* Set up and activate */ 821 /* Set up and activate */
1892 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id), 822 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
1893 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 823 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1894 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | 824 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
1895 (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) | 825 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
1896 (scd_retry << SCD_QUEUE_STTS_REG_POS_SCD_ACK) | 826 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
1897 SCD_QUEUE_STTS_REG_MSK); 827 IWL49_SCD_QUEUE_STTS_REG_MSK);
1898 828
1899 txq->sched_retry = scd_retry; 829 txq->sched_retry = scd_retry;
1900 830
@@ -1908,21 +838,11 @@ static const u16 default_queue_to_tx_fifo[] = {
1908 IWL_TX_FIFO_AC2, 838 IWL_TX_FIFO_AC2,
1909 IWL_TX_FIFO_AC1, 839 IWL_TX_FIFO_AC1,
1910 IWL_TX_FIFO_AC0, 840 IWL_TX_FIFO_AC0,
1911 IWL_CMD_FIFO_NUM, 841 IWL49_CMD_FIFO_NUM,
1912 IWL_TX_FIFO_HCCA_1, 842 IWL_TX_FIFO_HCCA_1,
1913 IWL_TX_FIFO_HCCA_2 843 IWL_TX_FIFO_HCCA_2
1914}; 844};
1915 845
1916static inline void iwl4965_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1917{
1918 set_bit(txq_id, &priv->txq_ctx_active_msk);
1919}
1920
1921static inline void iwl4965_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1922{
1923 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1924}
1925
1926int iwl4965_alive_notify(struct iwl_priv *priv) 846int iwl4965_alive_notify(struct iwl_priv *priv)
1927{ 847{
1928 u32 a; 848 u32 a;
@@ -1932,15 +852,6 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
1932 852
1933 spin_lock_irqsave(&priv->lock, flags); 853 spin_lock_irqsave(&priv->lock, flags);
1934 854
1935#ifdef CONFIG_IWL4965_SENSITIVITY
1936 memset(&(priv->sensitivity_data), 0,
1937 sizeof(struct iwl4965_sensitivity_data));
1938 memset(&(priv->chain_noise_data), 0,
1939 sizeof(struct iwl4965_chain_noise_data));
1940 for (i = 0; i < NUM_RX_CHAINS; i++)
1941 priv->chain_noise_data.delta_gain_code[i] =
1942 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
1943#endif /* CONFIG_IWL4965_SENSITIVITY*/
1944 ret = iwl_grab_nic_access(priv); 855 ret = iwl_grab_nic_access(priv);
1945 if (ret) { 856 if (ret) {
1946 spin_unlock_irqrestore(&priv->lock, flags); 857 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1949,10 +860,10 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
1949 860
1950 /* Clear 4965's internal Tx Scheduler data base */ 861 /* Clear 4965's internal Tx Scheduler data base */
1951 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR); 862 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
1952 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET; 863 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
1953 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4) 864 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1954 iwl_write_targ_mem(priv, a, 0); 865 iwl_write_targ_mem(priv, a, 0);
1955 for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4) 866 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
1956 iwl_write_targ_mem(priv, a, 0); 867 iwl_write_targ_mem(priv, a, 0);
1957 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4) 868 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
1958 iwl_write_targ_mem(priv, a, 0); 869 iwl_write_targ_mem(priv, a, 0);
@@ -1974,45 +885,66 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
1974 885
1975 /* Max Tx Window size for Scheduler-ACK mode */ 886 /* Max Tx Window size for Scheduler-ACK mode */
1976 iwl_write_targ_mem(priv, priv->scd_base_addr + 887 iwl_write_targ_mem(priv, priv->scd_base_addr +
1977 SCD_CONTEXT_QUEUE_OFFSET(i), 888 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
1978 (SCD_WIN_SIZE << 889 (SCD_WIN_SIZE <<
1979 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & 890 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1980 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); 891 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1981 892
1982 /* Frame limit */ 893 /* Frame limit */
1983 iwl_write_targ_mem(priv, priv->scd_base_addr + 894 iwl_write_targ_mem(priv, priv->scd_base_addr +
1984 SCD_CONTEXT_QUEUE_OFFSET(i) + 895 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
1985 sizeof(u32), 896 sizeof(u32),
1986 (SCD_FRAME_LIMIT << 897 (SCD_FRAME_LIMIT <<
1987 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 898 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1988 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); 899 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1989 900
1990 } 901 }
1991 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK, 902 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
1992 (1 << priv->hw_params.max_txq_num) - 1); 903 (1 << priv->hw_params.max_txq_num) - 1);
1993 904
1994 /* Activate all Tx DMA/FIFO channels */ 905 /* Activate all Tx DMA/FIFO channels */
1995 iwl_write_prph(priv, IWL49_SCD_TXFACT, 906 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
1996 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1997 907
1998 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 908 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
1999 909
2000 /* Map each Tx/cmd queue to its corresponding fifo */ 910 /* Map each Tx/cmd queue to its corresponding fifo */
2001 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { 911 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
2002 int ac = default_queue_to_tx_fifo[i]; 912 int ac = default_queue_to_tx_fifo[i];
2003 iwl4965_txq_ctx_activate(priv, i); 913 iwl_txq_ctx_activate(priv, i);
2004 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); 914 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
2005 } 915 }
2006 916
2007 iwl_release_nic_access(priv); 917 iwl_release_nic_access(priv);
2008 spin_unlock_irqrestore(&priv->lock, flags); 918 spin_unlock_irqrestore(&priv->lock, flags);
2009 919
2010 /* Ask for statistics now, the uCode will send statistics notification
2011 * periodically after association */
2012 iwl_send_statistics_request(priv, CMD_ASYNC);
2013 return ret; 920 return ret;
2014} 921}
2015 922
923#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
924static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
925 .min_nrg_cck = 97,
926 .max_nrg_cck = 0,
927
928 .auto_corr_min_ofdm = 85,
929 .auto_corr_min_ofdm_mrc = 170,
930 .auto_corr_min_ofdm_x1 = 105,
931 .auto_corr_min_ofdm_mrc_x1 = 220,
932
933 .auto_corr_max_ofdm = 120,
934 .auto_corr_max_ofdm_mrc = 210,
935 .auto_corr_max_ofdm_x1 = 140,
936 .auto_corr_max_ofdm_mrc_x1 = 270,
937
938 .auto_corr_min_cck = 125,
939 .auto_corr_max_cck = 200,
940 .auto_corr_min_cck_mrc = 200,
941 .auto_corr_max_cck_mrc = 400,
942
943 .nrg_th_cck = 100,
944 .nrg_th_ofdm = 100,
945};
946#endif
947
2016/** 948/**
2017 * iwl4965_hw_set_hw_params 949 * iwl4965_hw_set_hw_params
2018 * 950 *
@@ -2021,15 +953,15 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
2021int iwl4965_hw_set_hw_params(struct iwl_priv *priv) 953int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
2022{ 954{
2023 955
2024 if ((priv->cfg->mod_params->num_of_queues > IWL4965_MAX_NUM_QUEUES) || 956 if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) ||
2025 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) { 957 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
2026 IWL_ERROR("invalid queues_num, should be between %d and %d\n", 958 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
2027 IWL_MIN_NUM_QUEUES, IWL4965_MAX_NUM_QUEUES); 959 IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
2028 return -EINVAL; 960 return -EINVAL;
2029 } 961 }
2030 962
2031 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues; 963 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
2032 priv->hw_params.tx_cmd_len = sizeof(struct iwl4965_tx_cmd); 964 priv->hw_params.sw_crypto = priv->cfg->mod_params->sw_crypto;
2033 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; 965 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2034 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; 966 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2035 if (priv->cfg->mod_params->amsdu_size_8K) 967 if (priv->cfg->mod_params->amsdu_size_8K)
@@ -2040,90 +972,35 @@ int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
2040 priv->hw_params.max_stations = IWL4965_STATION_COUNT; 972 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
2041 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID; 973 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
2042 974
975 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
976 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
977 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
978 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_5GHZ);
979
2043 priv->hw_params.tx_chains_num = 2; 980 priv->hw_params.tx_chains_num = 2;
2044 priv->hw_params.rx_chains_num = 2; 981 priv->hw_params.rx_chains_num = 2;
2045 priv->hw_params.valid_tx_ant = (IWL_ANTENNA_MAIN | IWL_ANTENNA_AUX); 982 priv->hw_params.valid_tx_ant = ANT_A | ANT_B;
2046 priv->hw_params.valid_rx_ant = (IWL_ANTENNA_MAIN | IWL_ANTENNA_AUX); 983 priv->hw_params.valid_rx_ant = ANT_A | ANT_B;
2047 984 priv->hw_params.ct_kill_threshold = CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
2048 return 0;
2049}
2050
2051/**
2052 * iwl4965_hw_txq_ctx_free - Free TXQ Context
2053 *
2054 * Destroy all TX DMA queues and structures
2055 */
2056void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
2057{
2058 int txq_id;
2059 985
2060 /* Tx queues */ 986#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
2061 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 987 priv->hw_params.sens = &iwl4965_sensitivity;
2062 iwl4965_tx_queue_free(priv, &priv->txq[txq_id]); 988#endif
2063 989
2064 /* Keep-warm buffer */ 990 return 0;
2065 iwl4965_kw_free(priv);
2066} 991}
2067 992
2068/** 993/* set card power command */
2069 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 994static int iwl4965_set_power(struct iwl_priv *priv,
2070 * 995 void *cmd)
2071 * Does NOT advance any TFD circular buffer read/write indexes
2072 * Does NOT free the TFD itself (which is within circular buffer)
2073 */
2074int iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
2075{ 996{
2076 struct iwl4965_tfd_frame *bd_tmp = (struct iwl4965_tfd_frame *)&txq->bd[0]; 997 int ret = 0;
2077 struct iwl4965_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
2078 struct pci_dev *dev = priv->pci_dev;
2079 int i;
2080 int counter = 0;
2081 int index, is_odd;
2082
2083 /* Host command buffers stay mapped in memory, nothing to clean */
2084 if (txq->q.id == IWL_CMD_QUEUE_NUM)
2085 return 0;
2086
2087 /* Sanity check on number of chunks */
2088 counter = IWL_GET_BITS(*bd, num_tbs);
2089 if (counter > MAX_NUM_OF_TBS) {
2090 IWL_ERROR("Too many chunks: %i\n", counter);
2091 /* @todo issue fatal error, it is quite serious situation */
2092 return 0;
2093 }
2094 998
2095 /* Unmap chunks, if any. 999 ret = iwl_send_cmd_pdu_async(priv, POWER_TABLE_CMD,
2096 * TFD info for odd chunks is different format than for even chunks. */ 1000 sizeof(struct iwl4965_powertable_cmd),
2097 for (i = 0; i < counter; i++) { 1001 cmd, NULL);
2098 index = i / 2; 1002 return ret;
2099 is_odd = i & 0x1;
2100
2101 if (is_odd)
2102 pci_unmap_single(
2103 dev,
2104 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
2105 (IWL_GET_BITS(bd->pa[index],
2106 tb2_addr_hi20) << 16),
2107 IWL_GET_BITS(bd->pa[index], tb2_len),
2108 PCI_DMA_TODEVICE);
2109
2110 else if (i > 0)
2111 pci_unmap_single(dev,
2112 le32_to_cpu(bd->pa[index].tb1_addr),
2113 IWL_GET_BITS(bd->pa[index], tb1_len),
2114 PCI_DMA_TODEVICE);
2115
2116 /* Free SKB, if any, for this chunk */
2117 if (txq->txb[txq->q.read_ptr].skb[i]) {
2118 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
2119
2120 dev_kfree_skb(skb);
2121 txq->txb[txq->q.read_ptr].skb[i] = NULL;
2122 }
2123 }
2124 return 0;
2125} 1003}
2126
2127int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) 1004int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
2128{ 1005{
2129 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n"); 1006 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n");
@@ -2224,11 +1101,11 @@ static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
2224 s32 b = -1; 1101 s32 b = -1;
2225 1102
2226 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) { 1103 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
2227 if (priv->eeprom.calib_info.band_info[b].ch_from == 0) 1104 if (priv->calib_info->band_info[b].ch_from == 0)
2228 continue; 1105 continue;
2229 1106
2230 if ((channel >= priv->eeprom.calib_info.band_info[b].ch_from) 1107 if ((channel >= priv->calib_info->band_info[b].ch_from)
2231 && (channel <= priv->eeprom.calib_info.band_info[b].ch_to)) 1108 && (channel <= priv->calib_info->band_info[b].ch_to))
2232 break; 1109 break;
2233 } 1110 }
2234 1111
@@ -2256,14 +1133,14 @@ static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
2256 * in channel number. 1133 * in channel number.
2257 */ 1134 */
2258static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel, 1135static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
2259 struct iwl4965_eeprom_calib_ch_info *chan_info) 1136 struct iwl_eeprom_calib_ch_info *chan_info)
2260{ 1137{
2261 s32 s = -1; 1138 s32 s = -1;
2262 u32 c; 1139 u32 c;
2263 u32 m; 1140 u32 m;
2264 const struct iwl4965_eeprom_calib_measure *m1; 1141 const struct iwl_eeprom_calib_measure *m1;
2265 const struct iwl4965_eeprom_calib_measure *m2; 1142 const struct iwl_eeprom_calib_measure *m2;
2266 struct iwl4965_eeprom_calib_measure *omeas; 1143 struct iwl_eeprom_calib_measure *omeas;
2267 u32 ch_i1; 1144 u32 ch_i1;
2268 u32 ch_i2; 1145 u32 ch_i2;
2269 1146
@@ -2273,8 +1150,8 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
2273 return -1; 1150 return -1;
2274 } 1151 }
2275 1152
2276 ch_i1 = priv->eeprom.calib_info.band_info[s].ch1.ch_num; 1153 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
2277 ch_i2 = priv->eeprom.calib_info.band_info[s].ch2.ch_num; 1154 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
2278 chan_info->ch_num = (u8) channel; 1155 chan_info->ch_num = (u8) channel;
2279 1156
2280 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", 1157 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
@@ -2282,9 +1159,9 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
2282 1159
2283 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { 1160 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
2284 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) { 1161 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
2285 m1 = &(priv->eeprom.calib_info.band_info[s].ch1. 1162 m1 = &(priv->calib_info->band_info[s].ch1.
2286 measurements[c][m]); 1163 measurements[c][m]);
2287 m2 = &(priv->eeprom.calib_info.band_info[s].ch2. 1164 m2 = &(priv->calib_info->band_info[s].ch2.
2288 measurements[c][m]); 1165 measurements[c][m]);
2289 omeas = &(chan_info->measurements[c][m]); 1166 omeas = &(chan_info->measurements[c][m]);
2290 1167
@@ -2603,8 +1480,8 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2603 int i; 1480 int i;
2604 int c; 1481 int c;
2605 const struct iwl_channel_info *ch_info = NULL; 1482 const struct iwl_channel_info *ch_info = NULL;
2606 struct iwl4965_eeprom_calib_ch_info ch_eeprom_info; 1483 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
2607 const struct iwl4965_eeprom_calib_measure *measurement; 1484 const struct iwl_eeprom_calib_measure *measurement;
2608 s16 voltage; 1485 s16 voltage;
2609 s32 init_voltage; 1486 s32 init_voltage;
2610 s32 voltage_compensation; 1487 s32 voltage_compensation;
@@ -2661,9 +1538,9 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2661 /* hardware txpower limits ... 1538 /* hardware txpower limits ...
2662 * saturation (clipping distortion) txpowers are in half-dBm */ 1539 * saturation (clipping distortion) txpowers are in half-dBm */
2663 if (band) 1540 if (band)
2664 saturation_power = priv->eeprom.calib_info.saturation_power24; 1541 saturation_power = priv->calib_info->saturation_power24;
2665 else 1542 else
2666 saturation_power = priv->eeprom.calib_info.saturation_power52; 1543 saturation_power = priv->calib_info->saturation_power52;
2667 1544
2668 if (saturation_power < IWL_TX_POWER_SATURATION_MIN || 1545 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
2669 saturation_power > IWL_TX_POWER_SATURATION_MAX) { 1546 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
@@ -2693,7 +1570,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2693 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); 1570 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
2694 1571
2695 /* calculate tx gain adjustment based on power supply voltage */ 1572 /* calculate tx gain adjustment based on power supply voltage */
2696 voltage = priv->eeprom.calib_info.voltage; 1573 voltage = priv->calib_info->voltage;
2697 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); 1574 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
2698 voltage_compensation = 1575 voltage_compensation =
2699 iwl4965_get_voltage_compensation(voltage, init_voltage); 1576 iwl4965_get_voltage_compensation(voltage, init_voltage);
@@ -2888,8 +1765,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
2888{ 1765{
2889 int ret = 0; 1766 int ret = 0;
2890 struct iwl4965_rxon_assoc_cmd rxon_assoc; 1767 struct iwl4965_rxon_assoc_cmd rxon_assoc;
2891 const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon; 1768 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
2892 const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon; 1769 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
2893 1770
2894 if ((rxon1->flags == rxon2->flags) && 1771 if ((rxon1->flags == rxon2->flags) &&
2895 (rxon1->filter_flags == rxon2->filter_flags) && 1772 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -2965,77 +1842,7 @@ int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
2965 return rc; 1842 return rc;
2966} 1843}
2967 1844
2968#define RTS_HCCA_RETRY_LIMIT 3 1845static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv)
2969#define RTS_DFAULT_RETRY_LIMIT 60
2970
2971void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv,
2972 struct iwl_cmd *cmd,
2973 struct ieee80211_tx_control *ctrl,
2974 struct ieee80211_hdr *hdr, int sta_id,
2975 int is_hcca)
2976{
2977 struct iwl4965_tx_cmd *tx = &cmd->cmd.tx;
2978 u8 rts_retry_limit = 0;
2979 u8 data_retry_limit = 0;
2980 u16 fc = le16_to_cpu(hdr->frame_control);
2981 u8 rate_plcp;
2982 u16 rate_flags = 0;
2983 int rate_idx = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1);
2984
2985 rate_plcp = iwl4965_rates[rate_idx].plcp;
2986
2987 rts_retry_limit = (is_hcca) ?
2988 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
2989
2990 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
2991 rate_flags |= RATE_MCS_CCK_MSK;
2992
2993
2994 if (ieee80211_is_probe_response(fc)) {
2995 data_retry_limit = 3;
2996 if (data_retry_limit < rts_retry_limit)
2997 rts_retry_limit = data_retry_limit;
2998 } else
2999 data_retry_limit = IWL_DEFAULT_TX_RETRY;
3000
3001 if (priv->data_retry_limit != -1)
3002 data_retry_limit = priv->data_retry_limit;
3003
3004
3005 if (ieee80211_is_data(fc)) {
3006 tx->initial_rate_index = 0;
3007 tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
3008 } else {
3009 switch (fc & IEEE80211_FCTL_STYPE) {
3010 case IEEE80211_STYPE_AUTH:
3011 case IEEE80211_STYPE_DEAUTH:
3012 case IEEE80211_STYPE_ASSOC_REQ:
3013 case IEEE80211_STYPE_REASSOC_REQ:
3014 if (tx->tx_flags & TX_CMD_FLG_RTS_MSK) {
3015 tx->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
3016 tx->tx_flags |= TX_CMD_FLG_CTS_MSK;
3017 }
3018 break;
3019 default:
3020 break;
3021 }
3022
3023 /* Alternate between antenna A and B for successive frames */
3024 if (priv->use_ant_b_for_management_frame) {
3025 priv->use_ant_b_for_management_frame = 0;
3026 rate_flags |= RATE_MCS_ANT_B_MSK;
3027 } else {
3028 priv->use_ant_b_for_management_frame = 1;
3029 rate_flags |= RATE_MCS_ANT_A_MSK;
3030 }
3031 }
3032
3033 tx->rts_retry_limit = rts_retry_limit;
3034 tx->data_retry_limit = data_retry_limit;
3035 tx->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
3036}
3037
3038int iwl4965_hw_get_rx_read(struct iwl_priv *priv)
3039{ 1846{
3040 struct iwl4965_shared *s = priv->shared_virt; 1847 struct iwl4965_shared *s = priv->shared_virt;
3041 return le32_to_cpu(s->rb_closed) & 0xFFF; 1848 return le32_to_cpu(s->rb_closed) & 0xFFF;
@@ -3047,7 +1854,7 @@ int iwl4965_hw_get_temperature(struct iwl_priv *priv)
3047} 1854}
3048 1855
3049unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, 1856unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
3050 struct iwl4965_frame *frame, u8 rate) 1857 struct iwl_frame *frame, u8 rate)
3051{ 1858{
3052 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd; 1859 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
3053 unsigned int frame_size; 1860 unsigned int frame_size;
@@ -3060,7 +1867,7 @@ unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
3060 1867
3061 frame_size = iwl4965_fill_beacon_frame(priv, 1868 frame_size = iwl4965_fill_beacon_frame(priv,
3062 tx_beacon_cmd->frame, 1869 tx_beacon_cmd->frame,
3063 iwl4965_broadcast_addr, 1870 iwl_bcast_addr,
3064 sizeof(frame->u) - sizeof(*tx_beacon_cmd)); 1871 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3065 1872
3066 BUG_ON(frame_size > MAX_MPDU_SIZE); 1873 BUG_ON(frame_size > MAX_MPDU_SIZE);
@@ -3078,95 +1885,35 @@ unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
3078 return (sizeof(*tx_beacon_cmd) + frame_size); 1885 return (sizeof(*tx_beacon_cmd) + frame_size);
3079} 1886}
3080 1887
3081/* 1888static int iwl4965_alloc_shared_mem(struct iwl_priv *priv)
3082 * Tell 4965 where to find circular buffer of Tx Frame Descriptors for
3083 * given Tx queue, and enable the DMA channel used for that queue.
3084 *
3085 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
3086 * channels supported in hardware.
3087 */
3088int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
3089{
3090 int rc;
3091 unsigned long flags;
3092 int txq_id = txq->q.id;
3093
3094 spin_lock_irqsave(&priv->lock, flags);
3095 rc = iwl_grab_nic_access(priv);
3096 if (rc) {
3097 spin_unlock_irqrestore(&priv->lock, flags);
3098 return rc;
3099 }
3100
3101 /* Circular buffer (TFD queue in DRAM) physical base address */
3102 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
3103 txq->q.dma_addr >> 8);
3104
3105 /* Enable DMA channel, using same id as for TFD queue */
3106 iwl_write_direct32(
3107 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
3108 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3109 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
3110 iwl_release_nic_access(priv);
3111 spin_unlock_irqrestore(&priv->lock, flags);
3112
3113 return 0;
3114}
3115
3116int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
3117 dma_addr_t addr, u16 len)
3118{ 1889{
3119 int index, is_odd; 1890 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
3120 struct iwl4965_tfd_frame *tfd = ptr; 1891 sizeof(struct iwl4965_shared),
3121 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs); 1892 &priv->shared_phys);
3122 1893 if (!priv->shared_virt)
3123 /* Each TFD can point to a maximum 20 Tx buffers */ 1894 return -ENOMEM;
3124 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
3125 IWL_ERROR("Error can not send more than %d chunks\n",
3126 MAX_NUM_OF_TBS);
3127 return -EINVAL;
3128 }
3129
3130 index = num_tbs / 2;
3131 is_odd = num_tbs & 0x1;
3132 1895
3133 if (!is_odd) { 1896 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared));
3134 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
3135 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
3136 iwl_get_dma_hi_address(addr));
3137 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
3138 } else {
3139 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
3140 (u32) (addr & 0xffff));
3141 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
3142 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
3143 }
3144 1897
3145 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1); 1898 priv->rb_closed_offset = offsetof(struct iwl4965_shared, rb_closed);
3146 1899
3147 return 0; 1900 return 0;
3148} 1901}
3149 1902
3150static void iwl4965_hw_card_show_info(struct iwl_priv *priv) 1903static void iwl4965_free_shared_mem(struct iwl_priv *priv)
3151{ 1904{
3152 u16 hw_version = priv->eeprom.board_revision_4965; 1905 if (priv->shared_virt)
3153 1906 pci_free_consistent(priv->pci_dev,
3154 IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n", 1907 sizeof(struct iwl4965_shared),
3155 ((hw_version >> 8) & 0x0F), 1908 priv->shared_virt,
3156 ((hw_version >> 8) >> 4), (hw_version & 0x00FF)); 1909 priv->shared_phys);
3157
3158 IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n",
3159 priv->eeprom.board_pba_number_4965);
3160} 1910}
3161 1911
3162#define IWL_TX_CRC_SIZE 4
3163#define IWL_TX_DELIMITER_SIZE 4
3164
3165/** 1912/**
3166 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 1913 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
3167 */ 1914 */
3168static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv, 1915static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
3169 struct iwl4965_tx_queue *txq, 1916 struct iwl_tx_queue *txq,
3170 u16 byte_cnt) 1917 u16 byte_cnt)
3171{ 1918{
3172 int len; 1919 int len;
@@ -3180,50 +1927,13 @@ static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
3180 tfd_offset[txq->q.write_ptr], byte_cnt, len); 1927 tfd_offset[txq->q.write_ptr], byte_cnt, len);
3181 1928
3182 /* If within first 64 entries, duplicate at end */ 1929 /* If within first 64 entries, duplicate at end */
3183 if (txq->q.write_ptr < IWL4965_MAX_WIN_SIZE) 1930 if (txq->q.write_ptr < IWL49_MAX_WIN_SIZE)
3184 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. 1931 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
3185 tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr], 1932 tfd_offset[IWL49_QUEUE_SIZE + txq->q.write_ptr],
3186 byte_cnt, len); 1933 byte_cnt, len);
3187} 1934}
3188 1935
3189/** 1936/**
3190 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
3191 *
3192 * Selects how many and which Rx receivers/antennas/chains to use.
3193 * This should not be used for scan command ... it puts data in wrong place.
3194 */
3195void iwl4965_set_rxon_chain(struct iwl_priv *priv)
3196{
3197 u8 is_single = is_single_stream(priv);
3198 u8 idle_state, rx_state;
3199
3200 priv->staging_rxon.rx_chain = 0;
3201 rx_state = idle_state = 3;
3202
3203 /* Tell uCode which antennas are actually connected.
3204 * Before first association, we assume all antennas are connected.
3205 * Just after first association, iwl4965_noise_calibration()
3206 * checks which antennas actually *are* connected. */
3207 priv->staging_rxon.rx_chain |=
3208 cpu_to_le16(priv->valid_antenna << RXON_RX_CHAIN_VALID_POS);
3209
3210 /* How many receivers should we use? */
3211 iwl4965_get_rx_chain_counter(priv, &idle_state, &rx_state);
3212 priv->staging_rxon.rx_chain |=
3213 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS);
3214 priv->staging_rxon.rx_chain |=
3215 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS);
3216
3217 if (!is_single && (rx_state >= 2) &&
3218 !test_bit(STATUS_POWER_PMI, &priv->status))
3219 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
3220 else
3221 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
3222
3223 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
3224}
3225
3226/**
3227 * sign_extend - Sign extend a value using specified bit as sign-bit 1937 * sign_extend - Sign extend a value using specified bit as sign-bit
3228 * 1938 *
3229 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1 1939 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
@@ -3383,9 +2093,10 @@ static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
3383 priv->last_rx_noise); 2093 priv->last_rx_noise);
3384} 2094}
3385 2095
3386void iwl4965_hw_rx_statistics(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb) 2096void iwl4965_hw_rx_statistics(struct iwl_priv *priv,
2097 struct iwl_rx_mem_buffer *rxb)
3387{ 2098{
3388 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2099 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3389 int change; 2100 int change;
3390 s32 temp; 2101 s32 temp;
3391 2102
@@ -3412,7 +2123,7 @@ void iwl4965_hw_rx_statistics(struct iwl_priv *priv, struct iwl4965_rx_mem_buffe
3412 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && 2123 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3413 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { 2124 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
3414 iwl4965_rx_calc_noise(priv); 2125 iwl4965_rx_calc_noise(priv);
3415#ifdef CONFIG_IWL4965_SENSITIVITY 2126#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
3416 queue_work(priv->workqueue, &priv->sensitivity_work); 2127 queue_work(priv->workqueue, &priv->sensitivity_work);
3417#endif 2128#endif
3418 } 2129 }
@@ -3455,7 +2166,7 @@ static void iwl4965_add_radiotap(struct iwl_priv *priv,
3455 struct ieee80211_rx_status *stats, 2166 struct ieee80211_rx_status *stats,
3456 u32 ampdu_status) 2167 u32 ampdu_status)
3457{ 2168{
3458 s8 signal = stats->ssi; 2169 s8 signal = stats->signal;
3459 s8 noise = 0; 2170 s8 noise = 0;
3460 int rate = stats->rate_idx; 2171 int rate = stats->rate_idx;
3461 u64 tsf = stats->mactime; 2172 u64 tsf = stats->mactime;
@@ -3529,7 +2240,7 @@ static void iwl4965_add_radiotap(struct iwl_priv *priv,
3529 if (rate == -1) 2240 if (rate == -1)
3530 iwl4965_rt->rt_rate = 0; 2241 iwl4965_rt->rt_rate = 0;
3531 else 2242 else
3532 iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee; 2243 iwl4965_rt->rt_rate = iwl_rates[rate].ieee;
3533 2244
3534 /* 2245 /*
3535 * "antenna number" 2246 * "antenna number"
@@ -3562,7 +2273,54 @@ static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len)
3562 priv->rx_stats[idx].bytes += len; 2273 priv->rx_stats[idx].bytes += len;
3563} 2274}
3564 2275
3565static u32 iwl4965_translate_rx_status(u32 decrypt_in) 2276/*
2277 * returns non-zero if packet should be dropped
2278 */
2279static int iwl4965_set_decrypted_flag(struct iwl_priv *priv,
2280 struct ieee80211_hdr *hdr,
2281 u32 decrypt_res,
2282 struct ieee80211_rx_status *stats)
2283{
2284 u16 fc = le16_to_cpu(hdr->frame_control);
2285
2286 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2287 return 0;
2288
2289 if (!(fc & IEEE80211_FCTL_PROTECTED))
2290 return 0;
2291
2292 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2293 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2294 case RX_RES_STATUS_SEC_TYPE_TKIP:
2295 /* The uCode has got a bad phase 1 Key, pushes the packet.
2296 * Decryption will be done in SW. */
2297 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2298 RX_RES_STATUS_BAD_KEY_TTAK)
2299 break;
2300
2301 case RX_RES_STATUS_SEC_TYPE_WEP:
2302 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2303 RX_RES_STATUS_BAD_ICV_MIC) {
2304 /* bad ICV, the packet is destroyed since the
2305 * decryption is inplace, drop it */
2306 IWL_DEBUG_RX("Packet destroyed\n");
2307 return -1;
2308 }
2309 case RX_RES_STATUS_SEC_TYPE_CCMP:
2310 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2311 RX_RES_STATUS_DECRYPT_OK) {
2312 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2313 stats->flag |= RX_FLAG_DECRYPTED;
2314 }
2315 break;
2316
2317 default:
2318 break;
2319 }
2320 return 0;
2321}
2322
2323static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
3566{ 2324{
3567 u32 decrypt_out = 0; 2325 u32 decrypt_out = 0;
3568 2326
@@ -3623,10 +2381,10 @@ static u32 iwl4965_translate_rx_status(u32 decrypt_in)
3623 2381
3624static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data, 2382static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
3625 int include_phy, 2383 int include_phy,
3626 struct iwl4965_rx_mem_buffer *rxb, 2384 struct iwl_rx_mem_buffer *rxb,
3627 struct ieee80211_rx_status *stats) 2385 struct ieee80211_rx_status *stats)
3628{ 2386{
3629 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data; 2387 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3630 struct iwl4965_rx_phy_res *rx_start = (include_phy) ? 2388 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3631 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL; 2389 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
3632 struct ieee80211_hdr *hdr; 2390 struct ieee80211_hdr *hdr;
@@ -3663,7 +2421,9 @@ static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
3663 rx_start->byte_count = amsdu->byte_count; 2421 rx_start->byte_count = amsdu->byte_count;
3664 rx_end = (__le32 *) (((u8 *) hdr) + len); 2422 rx_end = (__le32 *) (((u8 *) hdr) + len);
3665 } 2423 }
3666 if (len > priv->hw_params.max_pkt_size || len < 16) { 2424 /* In monitor mode allow 802.11 ACk frames (10 bytes) */
2425 if (len > priv->hw_params.max_pkt_size ||
2426 len < ((priv->iw_mode == IEEE80211_IF_TYPE_MNTR) ? 10 : 16)) {
3667 IWL_WARNING("byte count out of range [16,4K] : %d\n", len); 2427 IWL_WARNING("byte count out of range [16,4K] : %d\n", len);
3668 return; 2428 return;
3669 } 2429 }
@@ -3674,7 +2434,7 @@ static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
3674 if (!include_phy) { 2434 if (!include_phy) {
3675 /* New status scheme, need to translate */ 2435 /* New status scheme, need to translate */
3676 ampdu_status_legacy = ampdu_status; 2436 ampdu_status_legacy = ampdu_status;
3677 ampdu_status = iwl4965_translate_rx_status(ampdu_status); 2437 ampdu_status = iwl4965_translate_rx_status(priv, ampdu_status);
3678 } 2438 }
3679 2439
3680 /* start from MAC */ 2440 /* start from MAC */
@@ -3691,8 +2451,10 @@ static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
3691 stats->flag = 0; 2451 stats->flag = 0;
3692 hdr = (struct ieee80211_hdr *)rxb->skb->data; 2452 hdr = (struct ieee80211_hdr *)rxb->skb->data;
3693 2453
3694 if (!priv->cfg->mod_params->sw_crypto) 2454 /* in case of HW accelerated crypto and bad decryption, drop */
3695 iwl4965_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats); 2455 if (!priv->hw_params.sw_crypto &&
2456 iwl4965_set_decrypted_flag(priv, hdr, ampdu_status, stats))
2457 return;
3696 2458
3697 if (priv->add_radiotap) 2459 if (priv->add_radiotap)
3698 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status); 2460 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
@@ -3704,7 +2466,8 @@ static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
3704} 2466}
3705 2467
3706/* Calc max signal level (dBm) among 3 possible receivers */ 2468/* Calc max signal level (dBm) among 3 possible receivers */
3707static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp) 2469static int iwl4965_calc_rssi(struct iwl_priv *priv,
2470 struct iwl4965_rx_phy_res *rx_resp)
3708{ 2471{
3709 /* data from PHY/DSP regarding signal strength, etc., 2472 /* data from PHY/DSP regarding signal strength, etc.,
3710 * contents are always there, not configurable by host. */ 2473 * contents are always there, not configurable by host. */
@@ -3737,38 +2500,6 @@ static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp)
3737 return (max_rssi - agc - IWL_RSSI_OFFSET); 2500 return (max_rssi - agc - IWL_RSSI_OFFSET);
3738} 2501}
3739 2502
3740#ifdef CONFIG_IWL4965_HT
3741
3742void iwl4965_init_ht_hw_capab(struct iwl_priv *priv,
3743 struct ieee80211_ht_info *ht_info,
3744 enum ieee80211_band band)
3745{
3746 ht_info->cap = 0;
3747 memset(ht_info->supp_mcs_set, 0, 16);
3748
3749 ht_info->ht_supported = 1;
3750
3751 if (band == IEEE80211_BAND_5GHZ) {
3752 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH;
3753 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40;
3754 ht_info->supp_mcs_set[4] = 0x01;
3755 }
3756 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
3757 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
3758 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
3759 (IWL_MIMO_PS_NONE << 2));
3760
3761 if (priv->cfg->mod_params->amsdu_size_8K)
3762 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU;
3763
3764 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3765 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3766
3767 ht_info->supp_mcs_set[0] = 0xFF;
3768 ht_info->supp_mcs_set[1] = 0xFF;
3769}
3770#endif /* CONFIG_IWL4965_HT */
3771
3772static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) 2503static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
3773{ 2504{
3774 unsigned long flags; 2505 unsigned long flags;
@@ -3780,13 +2511,13 @@ static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
3780 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 2511 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3781 spin_unlock_irqrestore(&priv->sta_lock, flags); 2512 spin_unlock_irqrestore(&priv->sta_lock, flags);
3782 2513
3783 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 2514 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
3784} 2515}
3785 2516
3786static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr) 2517static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
3787{ 2518{
3788 /* FIXME: need locking over ps_status ??? */ 2519 /* FIXME: need locking over ps_status ??? */
3789 u8 sta_id = iwl4965_hw_find_station(priv, addr); 2520 u8 sta_id = iwl_find_station(priv, addr);
3790 2521
3791 if (sta_id != IWL_INVALID_STATION) { 2522 if (sta_id != IWL_INVALID_STATION) {
3792 u8 sta_awake = priv->stations[sta_id]. 2523 u8 sta_awake = priv->stations[sta_id].
@@ -3813,7 +2544,7 @@ static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
3813 * proper operation with 4965. 2544 * proper operation with 4965.
3814 */ 2545 */
3815static void iwl4965_dbg_report_frame(struct iwl_priv *priv, 2546static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3816 struct iwl4965_rx_packet *pkt, 2547 struct iwl_rx_packet *pkt,
3817 struct ieee80211_hdr *header, int group100) 2548 struct ieee80211_hdr *header, int group100)
3818{ 2549{
3819 u32 to_us; 2550 u32 to_us;
@@ -3840,7 +2571,7 @@ static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3840 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt); 2571 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
3841 u8 *data = IWL_RX_DATA(pkt); 2572 u8 *data = IWL_RX_DATA(pkt);
3842 2573
3843 if (likely(!(iwl_debug_level & IWL_DL_RX))) 2574 if (likely(!(priv->debug_level & IWL_DL_RX)))
3844 return; 2575 return;
3845 2576
3846 /* MAC header */ 2577 /* MAC header */
@@ -3921,7 +2652,7 @@ static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3921 if (unlikely(rate_idx == -1)) 2652 if (unlikely(rate_idx == -1))
3922 bitrate = 0; 2653 bitrate = 0;
3923 else 2654 else
3924 bitrate = iwl4965_rates[rate_idx].ieee / 2; 2655 bitrate = iwl_rates[rate_idx].ieee / 2;
3925 2656
3926 /* print frame summary. 2657 /* print frame summary.
3927 * MAC addresses show just the last byte (for brevity), 2658 * MAC addresses show just the last byte (for brevity),
@@ -3943,11 +2674,11 @@ static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3943 } 2674 }
3944 } 2675 }
3945 if (print_dump) 2676 if (print_dump)
3946 iwl_print_hex_dump(IWL_DL_RX, data, length); 2677 iwl_print_hex_dump(priv, IWL_DL_RX, data, length);
3947} 2678}
3948#else 2679#else
3949static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv, 2680static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3950 struct iwl4965_rx_packet *pkt, 2681 struct iwl_rx_packet *pkt,
3951 struct ieee80211_hdr *header, 2682 struct ieee80211_hdr *header,
3952 int group100) 2683 int group100)
3953{ 2684{
@@ -3958,12 +2689,12 @@ static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3958 2689
3959/* Called for REPLY_RX (legacy ABG frames), or 2690/* Called for REPLY_RX (legacy ABG frames), or
3960 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ 2691 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
3961static void iwl4965_rx_reply_rx(struct iwl_priv *priv, 2692void iwl4965_rx_reply_rx(struct iwl_priv *priv,
3962 struct iwl4965_rx_mem_buffer *rxb) 2693 struct iwl_rx_mem_buffer *rxb)
3963{ 2694{
3964 struct ieee80211_hdr *header; 2695 struct ieee80211_hdr *header;
3965 struct ieee80211_rx_status rx_status; 2696 struct ieee80211_rx_status rx_status;
3966 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2697 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3967 /* Use phy data (Rx signal strength, etc.) contained within 2698 /* Use phy data (Rx signal strength, etc.) contained within
3968 * this rx packet for legacy frames, 2699 * this rx packet for legacy frames,
3969 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */ 2700 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
@@ -4036,7 +2767,7 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
4036 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp); 2767 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
4037 2768
4038 /* Find max signal strength (dBm) among 3 antenna/receiver chains */ 2769 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
4039 rx_status.ssi = iwl4965_calc_rssi(rx_start); 2770 rx_status.signal = iwl4965_calc_rssi(priv, rx_start);
4040 2771
4041 /* Meaningful noise values are available only from beacon statistics, 2772 /* Meaningful noise values are available only from beacon statistics,
4042 * which are gathered only when associated, and indicate noise 2773 * which are gathered only when associated, and indicate noise
@@ -4045,11 +2776,11 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
4045 if (iwl_is_associated(priv) && 2776 if (iwl_is_associated(priv) &&
4046 !test_bit(STATUS_SCANNING, &priv->status)) { 2777 !test_bit(STATUS_SCANNING, &priv->status)) {
4047 rx_status.noise = priv->last_rx_noise; 2778 rx_status.noise = priv->last_rx_noise;
4048 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, 2779 rx_status.qual = iwl4965_calc_sig_qual(rx_status.signal,
4049 rx_status.noise); 2780 rx_status.noise);
4050 } else { 2781 } else {
4051 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE; 2782 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
4052 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, 0); 2783 rx_status.qual = iwl4965_calc_sig_qual(rx_status.signal, 0);
4053 } 2784 }
4054 2785
4055 /* Reset beacon noise level if not associated. */ 2786 /* Reset beacon noise level if not associated. */
@@ -4061,12 +2792,19 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
4061 iwl4965_dbg_report_frame(priv, pkt, header, 1); 2792 iwl4965_dbg_report_frame(priv, pkt, header, 1);
4062 2793
4063 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n", 2794 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n",
4064 rx_status.ssi, rx_status.noise, rx_status.signal, 2795 rx_status.signal, rx_status.noise, rx_status.signal,
4065 (unsigned long long)rx_status.mactime); 2796 (unsigned long long)rx_status.mactime);
4066 2797
2798
2799 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
2800 iwl4965_handle_data_packet(priv, 1, include_phy,
2801 rxb, &rx_status);
2802 return;
2803 }
2804
4067 network_packet = iwl4965_is_network_packet(priv, header); 2805 network_packet = iwl4965_is_network_packet(priv, header);
4068 if (network_packet) { 2806 if (network_packet) {
4069 priv->last_rx_rssi = rx_status.ssi; 2807 priv->last_rx_rssi = rx_status.signal;
4070 priv->last_beacon_time = priv->ucode_beacon_time; 2808 priv->last_beacon_time = priv->ucode_beacon_time;
4071 priv->last_tsf = le64_to_cpu(rx_start->timestamp); 2809 priv->last_tsf = le64_to_cpu(rx_start->timestamp);
4072 } 2810 }
@@ -4125,65 +2863,16 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
4125 } 2863 }
4126} 2864}
4127 2865
4128/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
4129 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
4130static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
4131 struct iwl4965_rx_mem_buffer *rxb)
4132{
4133 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4134 priv->last_phy_res[0] = 1;
4135 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
4136 sizeof(struct iwl4965_rx_phy_res));
4137}
4138static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
4139 struct iwl4965_rx_mem_buffer *rxb)
4140
4141{
4142#ifdef CONFIG_IWL4965_SENSITIVITY
4143 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4144 struct iwl4965_missed_beacon_notif *missed_beacon;
4145
4146 missed_beacon = &pkt->u.missed_beacon;
4147 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
4148 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
4149 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
4150 le32_to_cpu(missed_beacon->total_missed_becons),
4151 le32_to_cpu(missed_beacon->num_recvd_beacons),
4152 le32_to_cpu(missed_beacon->num_expected_beacons));
4153 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
4154 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)))
4155 queue_work(priv->workqueue, &priv->sensitivity_work);
4156 }
4157#endif /*CONFIG_IWL4965_SENSITIVITY*/
4158}
4159#ifdef CONFIG_IWL4965_HT 2866#ifdef CONFIG_IWL4965_HT
4160 2867
4161/** 2868/**
4162 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
4163 */
4164static void iwl4965_sta_modify_enable_tid_tx(struct iwl_priv *priv,
4165 int sta_id, int tid)
4166{
4167 unsigned long flags;
4168
4169 /* Remove "disable" flag, to enable Tx for this TID */
4170 spin_lock_irqsave(&priv->sta_lock, flags);
4171 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
4172 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
4173 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4174 spin_unlock_irqrestore(&priv->sta_lock, flags);
4175
4176 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4177}
4178
4179/**
4180 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack 2869 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
4181 * 2870 *
4182 * Go through block-ack's bitmap of ACK'd frames, update driver's record of 2871 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
4183 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. 2872 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
4184 */ 2873 */
4185static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv, 2874static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
4186 struct iwl4965_ht_agg *agg, 2875 struct iwl_ht_agg *agg,
4187 struct iwl4965_compressed_ba_resp* 2876 struct iwl4965_compressed_ba_resp*
4188 ba_resp) 2877 ba_resp)
4189 2878
@@ -4193,7 +2882,7 @@ static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
4193 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); 2882 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4194 u64 bitmap; 2883 u64 bitmap;
4195 int successes = 0; 2884 int successes = 0;
4196 struct ieee80211_tx_status *tx_status; 2885 struct ieee80211_tx_info *info;
4197 2886
4198 if (unlikely(!agg->wait_for_ba)) { 2887 if (unlikely(!agg->wait_for_ba)) {
4199 IWL_ERROR("Received BA when not expected\n"); 2888 IWL_ERROR("Received BA when not expected\n");
@@ -4231,13 +2920,13 @@ static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
4231 agg->start_idx + i); 2920 agg->start_idx + i);
4232 } 2921 }
4233 2922
4234 tx_status = &priv->txq[scd_flow].txb[agg->start_idx].status; 2923 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
4235 tx_status->flags = IEEE80211_TX_STATUS_ACK; 2924 memset(&info->status, 0, sizeof(info->status));
4236 tx_status->flags |= IEEE80211_TX_STATUS_AMPDU; 2925 info->flags = IEEE80211_TX_STAT_ACK;
4237 tx_status->ampdu_ack_map = successes; 2926 info->flags |= IEEE80211_TX_STAT_AMPDU;
4238 tx_status->ampdu_ack_len = agg->frame_count; 2927 info->status.ampdu_ack_map = successes;
4239 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, 2928 info->status.ampdu_ack_len = agg->frame_count;
4240 &tx_status->control); 2929 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
4241 2930
4242 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap); 2931 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
4243 2932
@@ -4254,16 +2943,16 @@ static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
4254 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ 2943 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4255 iwl_write_prph(priv, 2944 iwl_write_prph(priv,
4256 IWL49_SCD_QUEUE_STATUS_BITS(txq_id), 2945 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
4257 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| 2946 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4258 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 2947 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4259} 2948}
4260 2949
4261/** 2950/**
4262 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID 2951 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
4263 * priv->lock must be held by the caller 2952 * priv->lock must be held by the caller
4264 */ 2953 */
4265static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id, 2954static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
4266 u16 ssn_idx, u8 tx_fifo) 2955 u16 ssn_idx, u8 tx_fifo)
4267{ 2956{
4268 int ret = 0; 2957 int ret = 0;
4269 2958
@@ -4287,7 +2976,7 @@ static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
4287 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); 2976 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4288 2977
4289 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); 2978 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
4290 iwl4965_txq_ctx_deactivate(priv, txq_id); 2979 iwl_txq_ctx_deactivate(priv, txq_id);
4291 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); 2980 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4292 2981
4293 iwl_release_nic_access(priv); 2982 iwl_release_nic_access(priv);
@@ -4295,49 +2984,6 @@ static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
4295 return 0; 2984 return 0;
4296} 2985}
4297 2986
4298int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
4299 u8 tid, int txq_id)
4300{
4301 struct iwl4965_queue *q = &priv->txq[txq_id].q;
4302 u8 *addr = priv->stations[sta_id].sta.sta.addr;
4303 struct iwl4965_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
4304
4305 switch (priv->stations[sta_id].tid[tid].agg.state) {
4306 case IWL_EMPTYING_HW_QUEUE_DELBA:
4307 /* We are reclaiming the last packet of the */
4308 /* aggregated HW queue */
4309 if (txq_id == tid_data->agg.txq_id &&
4310 q->read_ptr == q->write_ptr) {
4311 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
4312 int tx_fifo = default_tid_to_tx_fifo[tid];
4313 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
4314 iwl4965_tx_queue_agg_disable(priv, txq_id,
4315 ssn, tx_fifo);
4316 tid_data->agg.state = IWL_AGG_OFF;
4317 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4318 }
4319 break;
4320 case IWL_EMPTYING_HW_QUEUE_ADDBA:
4321 /* We are reclaiming the last packet of the queue */
4322 if (tid_data->tfds_in_queue == 0) {
4323 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
4324 tid_data->agg.state = IWL_AGG_ON;
4325 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4326 }
4327 break;
4328 }
4329 return 0;
4330}
4331
4332/**
4333 * iwl4965_queue_dec_wrap - Decrement queue index, wrap back to end if needed
4334 * @index -- current index
4335 * @n_bd -- total number of entries in queue (s/b power of 2)
4336 */
4337static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
4338{
4339 return (index == 0) ? n_bd - 1 : index - 1;
4340}
4341 2987
4342/** 2988/**
4343 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA 2989 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
@@ -4346,13 +2992,13 @@ static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
4346 * of frames sent via aggregation. 2992 * of frames sent via aggregation.
4347 */ 2993 */
4348static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, 2994static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
4349 struct iwl4965_rx_mem_buffer *rxb) 2995 struct iwl_rx_mem_buffer *rxb)
4350{ 2996{
4351 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2997 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
4352 struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; 2998 struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
4353 int index; 2999 int index;
4354 struct iwl4965_tx_queue *txq = NULL; 3000 struct iwl_tx_queue *txq = NULL;
4355 struct iwl4965_ht_agg *agg; 3001 struct iwl_ht_agg *agg;
4356 DECLARE_MAC_BUF(mac); 3002 DECLARE_MAC_BUF(mac);
4357 3003
4358 /* "flow" corresponds to Tx queue */ 3004 /* "flow" corresponds to Tx queue */
@@ -4371,7 +3017,7 @@ static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
4371 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg; 3017 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
4372 3018
4373 /* Find index just before block-ack window */ 3019 /* Find index just before block-ack window */
4374 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); 3020 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
4375 3021
4376 /* TODO: Need to get this copy more safely - now good for debug */ 3022 /* TODO: Need to get this copy more safely - now good for debug */
4377 3023
@@ -4398,15 +3044,19 @@ static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
4398 * block-ack window (we assume that they've been successfully 3044 * block-ack window (we assume that they've been successfully
4399 * transmitted ... if not, it's too late anyway). */ 3045 * transmitted ... if not, it's too late anyway). */
4400 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { 3046 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
4401 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index); 3047 /* calculate mac80211 ampdu sw queue to wake */
3048 int ampdu_q =
3049 scd_flow - IWL_BACK_QUEUE_FIRST_ID + priv->hw->queues;
3050 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
4402 priv->stations[ba_resp->sta_id]. 3051 priv->stations[ba_resp->sta_id].
4403 tid[ba_resp->tid].tfds_in_queue -= freed; 3052 tid[ba_resp->tid].tfds_in_queue -= freed;
4404 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark && 3053 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
4405 priv->mac80211_registered && 3054 priv->mac80211_registered &&
4406 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) 3055 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
4407 ieee80211_wake_queue(priv->hw, scd_flow); 3056 ieee80211_wake_queue(priv->hw, ampdu_q);
4408 iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id, 3057
4409 ba_resp->tid, scd_flow); 3058 iwl_txq_check_empty(priv, ba_resp->sta_id,
3059 ba_resp->tid, scd_flow);
4410 } 3060 }
4411} 3061}
4412 3062
@@ -4420,10 +3070,10 @@ static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
4420 u32 tbl_dw; 3070 u32 tbl_dw;
4421 u16 scd_q2ratid; 3071 u16 scd_q2ratid;
4422 3072
4423 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 3073 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
4424 3074
4425 tbl_dw_addr = priv->scd_base_addr + 3075 tbl_dw_addr = priv->scd_base_addr +
4426 SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); 3076 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
4427 3077
4428 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); 3078 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
4429 3079
@@ -4444,12 +3094,11 @@ static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
4444 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID, 3094 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID,
4445 * i.e. it must be one of the higher queues used for aggregation 3095 * i.e. it must be one of the higher queues used for aggregation
4446 */ 3096 */
4447static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id, 3097static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
4448 int tx_fifo, int sta_id, int tid, 3098 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
4449 u16 ssn_idx)
4450{ 3099{
4451 unsigned long flags; 3100 unsigned long flags;
4452 int rc; 3101 int ret;
4453 u16 ra_tid; 3102 u16 ra_tid;
4454 3103
4455 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) 3104 if (IWL_BACK_QUEUE_FIRST_ID > txq_id)
@@ -4459,13 +3108,13 @@ static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
4459 ra_tid = BUILD_RAxTID(sta_id, tid); 3108 ra_tid = BUILD_RAxTID(sta_id, tid);
4460 3109
4461 /* Modify device's station table to Tx this TID */ 3110 /* Modify device's station table to Tx this TID */
4462 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid); 3111 iwl_sta_modify_enable_tid_tx(priv, sta_id, tid);
4463 3112
4464 spin_lock_irqsave(&priv->lock, flags); 3113 spin_lock_irqsave(&priv->lock, flags);
4465 rc = iwl_grab_nic_access(priv); 3114 ret = iwl_grab_nic_access(priv);
4466 if (rc) { 3115 if (ret) {
4467 spin_unlock_irqrestore(&priv->lock, flags); 3116 spin_unlock_irqrestore(&priv->lock, flags);
4468 return rc; 3117 return ret;
4469 } 3118 }
4470 3119
4471 /* Stop this Tx queue before configuring it */ 3120 /* Stop this Tx queue before configuring it */
@@ -4485,14 +3134,14 @@ static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
4485 3134
4486 /* Set up Tx window size and frame limit for this queue */ 3135 /* Set up Tx window size and frame limit for this queue */
4487 iwl_write_targ_mem(priv, 3136 iwl_write_targ_mem(priv,
4488 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 3137 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
4489 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & 3138 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4490 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); 3139 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4491 3140
4492 iwl_write_targ_mem(priv, priv->scd_base_addr + 3141 iwl_write_targ_mem(priv, priv->scd_base_addr +
4493 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 3142 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
4494 (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) 3143 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
4495 & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); 3144 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4496 3145
4497 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); 3146 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
4498 3147
@@ -4507,209 +3156,17 @@ static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
4507 3156
4508#endif /* CONFIG_IWL4965_HT */ 3157#endif /* CONFIG_IWL4965_HT */
4509 3158
4510/**
4511 * iwl4965_add_station - Initialize a station's hardware rate table
4512 *
4513 * The uCode's station table contains a table of fallback rates
4514 * for automatic fallback during transmission.
4515 *
4516 * NOTE: This sets up a default set of values. These will be replaced later
4517 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
4518 * rc80211_simple.
4519 *
4520 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
4521 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
4522 * which requires station table entry to exist).
4523 */
4524void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
4525{
4526 int i, r;
4527 struct iwl_link_quality_cmd link_cmd = {
4528 .reserved1 = 0,
4529 };
4530 u16 rate_flags;
4531
4532 /* Set up the rate scaling to start at selected rate, fall back
4533 * all the way down to 1M in IEEE order, and then spin on 1M */
4534 if (is_ap)
4535 r = IWL_RATE_54M_INDEX;
4536 else if (priv->band == IEEE80211_BAND_5GHZ)
4537 r = IWL_RATE_6M_INDEX;
4538 else
4539 r = IWL_RATE_1M_INDEX;
4540
4541 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
4542 rate_flags = 0;
4543 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
4544 rate_flags |= RATE_MCS_CCK_MSK;
4545
4546 /* Use Tx antenna B only */
4547 rate_flags |= RATE_MCS_ANT_B_MSK;
4548 rate_flags &= ~RATE_MCS_ANT_A_MSK;
4549
4550 link_cmd.rs_table[i].rate_n_flags =
4551 iwl4965_hw_set_rate_n_flags(iwl4965_rates[r].plcp, rate_flags);
4552 r = iwl4965_get_prev_ieee_rate(r);
4553 }
4554
4555 link_cmd.general_params.single_stream_ant_msk = 2;
4556 link_cmd.general_params.dual_stream_ant_msk = 3;
4557 link_cmd.agg_params.agg_dis_start_th = 3;
4558 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
4559
4560 /* Update the rate scaling for control frame Tx to AP */
4561 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
4562
4563 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
4564 sizeof(link_cmd), &link_cmd, NULL);
4565}
4566 3159
4567#ifdef CONFIG_IWL4965_HT 3160#ifdef CONFIG_IWL4965_HT
4568 3161static int iwl4965_rx_agg_start(struct iwl_priv *priv,
4569static u8 iwl4965_is_channel_extension(struct iwl_priv *priv, 3162 const u8 *addr, int tid, u16 ssn)
4570 enum ieee80211_band band,
4571 u16 channel, u8 extension_chan_offset)
4572{
4573 const struct iwl_channel_info *ch_info;
4574
4575 ch_info = iwl_get_channel_info(priv, band, channel);
4576 if (!is_channel_valid(ch_info))
4577 return 0;
4578
4579 if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE)
4580 return 0;
4581
4582 if ((ch_info->fat_extension_channel == extension_chan_offset) ||
4583 (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX))
4584 return 1;
4585
4586 return 0;
4587}
4588
4589static u8 iwl4965_is_fat_tx_allowed(struct iwl_priv *priv,
4590 struct ieee80211_ht_info *sta_ht_inf)
4591{
4592 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
4593
4594 if ((!iwl_ht_conf->is_ht) ||
4595 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
4596 (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE))
4597 return 0;
4598
4599 if (sta_ht_inf) {
4600 if ((!sta_ht_inf->ht_supported) ||
4601 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH)))
4602 return 0;
4603 }
4604
4605 return (iwl4965_is_channel_extension(priv, priv->band,
4606 iwl_ht_conf->control_channel,
4607 iwl_ht_conf->extension_chan_offset));
4608}
4609
4610void iwl4965_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
4611{
4612 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
4613 u32 val;
4614
4615 if (!ht_info->is_ht)
4616 return;
4617
4618 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
4619 if (iwl4965_is_fat_tx_allowed(priv, NULL))
4620 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4621 else
4622 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4623 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
4624
4625 if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
4626 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
4627 le16_to_cpu(rxon->channel),
4628 ht_info->control_channel);
4629 rxon->channel = cpu_to_le16(ht_info->control_channel);
4630 return;
4631 }
4632
4633 /* Note: control channel is opposite of extension channel */
4634 switch (ht_info->extension_chan_offset) {
4635 case IWL_EXT_CHANNEL_OFFSET_ABOVE:
4636 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
4637 break;
4638 case IWL_EXT_CHANNEL_OFFSET_BELOW:
4639 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
4640 break;
4641 case IWL_EXT_CHANNEL_OFFSET_NONE:
4642 default:
4643 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4644 break;
4645 }
4646
4647 val = ht_info->ht_protection;
4648
4649 rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
4650
4651 iwl4965_set_rxon_chain(priv);
4652
4653 IWL_DEBUG_ASSOC("supported HT rate 0x%X %X "
4654 "rxon flags 0x%X operation mode :0x%X "
4655 "extension channel offset 0x%x "
4656 "control chan %d\n",
4657 ht_info->supp_mcs_set[0], ht_info->supp_mcs_set[1],
4658 le32_to_cpu(rxon->flags), ht_info->ht_protection,
4659 ht_info->extension_chan_offset,
4660 ht_info->control_channel);
4661 return;
4662}
4663
4664void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index,
4665 struct ieee80211_ht_info *sta_ht_inf)
4666{
4667 __le32 sta_flags;
4668 u8 mimo_ps_mode;
4669
4670 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
4671 goto done;
4672
4673 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
4674
4675 sta_flags = priv->stations[index].sta.station_flags;
4676
4677 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
4678
4679 switch (mimo_ps_mode) {
4680 case WLAN_HT_CAP_MIMO_PS_STATIC:
4681 sta_flags |= STA_FLG_MIMO_DIS_MSK;
4682 break;
4683 case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
4684 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
4685 break;
4686 case WLAN_HT_CAP_MIMO_PS_DISABLED:
4687 break;
4688 default:
4689 IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
4690 break;
4691 }
4692
4693 sta_flags |= cpu_to_le32(
4694 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
4695
4696 sta_flags |= cpu_to_le32(
4697 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
4698
4699 if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf))
4700 sta_flags |= STA_FLG_FAT_EN_MSK;
4701 else
4702 sta_flags &= ~STA_FLG_FAT_EN_MSK;
4703
4704 priv->stations[index].sta.station_flags = sta_flags;
4705 done:
4706 return;
4707}
4708
4709static void iwl4965_sta_modify_add_ba_tid(struct iwl_priv *priv,
4710 int sta_id, int tid, u16 ssn)
4711{ 3163{
4712 unsigned long flags; 3164 unsigned long flags;
3165 int sta_id;
3166
3167 sta_id = iwl_find_station(priv, addr);
3168 if (sta_id == IWL_INVALID_STATION)
3169 return -ENXIO;
4713 3170
4714 spin_lock_irqsave(&priv->sta_lock, flags); 3171 spin_lock_irqsave(&priv->sta_lock, flags);
4715 priv->stations[sta_id].sta.station_flags_msk = 0; 3172 priv->stations[sta_id].sta.station_flags_msk = 0;
@@ -4719,13 +3176,19 @@ static void iwl4965_sta_modify_add_ba_tid(struct iwl_priv *priv,
4719 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 3176 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4720 spin_unlock_irqrestore(&priv->sta_lock, flags); 3177 spin_unlock_irqrestore(&priv->sta_lock, flags);
4721 3178
4722 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 3179 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
3180 CMD_ASYNC);
4723} 3181}
4724 3182
4725static void iwl4965_sta_modify_del_ba_tid(struct iwl_priv *priv, 3183static int iwl4965_rx_agg_stop(struct iwl_priv *priv,
4726 int sta_id, int tid) 3184 const u8 *addr, int tid)
4727{ 3185{
4728 unsigned long flags; 3186 unsigned long flags;
3187 int sta_id;
3188
3189 sta_id = iwl_find_station(priv, addr);
3190 if (sta_id == IWL_INVALID_STATION)
3191 return -ENXIO;
4729 3192
4730 spin_lock_irqsave(&priv->sta_lock, flags); 3193 spin_lock_irqsave(&priv->sta_lock, flags);
4731 priv->stations[sta_id].sta.station_flags_msk = 0; 3194 priv->stations[sta_id].sta.station_flags_msk = 0;
@@ -4734,193 +3197,322 @@ static void iwl4965_sta_modify_del_ba_tid(struct iwl_priv *priv,
4734 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 3197 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4735 spin_unlock_irqrestore(&priv->sta_lock, flags); 3198 spin_unlock_irqrestore(&priv->sta_lock, flags);
4736 3199
4737 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 3200 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
4738} 3201 CMD_ASYNC);
4739
4740/*
4741 * Find first available (lowest unused) Tx Queue, mark it "active".
4742 * Called only when finding queue for aggregation.
4743 * Should never return anything < 7, because they should already
4744 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
4745 */
4746static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
4747{
4748 int txq_id;
4749
4750 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
4751 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
4752 return txq_id;
4753 return -1;
4754} 3202}
4755 3203
4756static int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, const u8 *da, 3204int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
4757 u16 tid, u16 *start_seq_num) 3205 enum ieee80211_ampdu_mlme_action action,
3206 const u8 *addr, u16 tid, u16 *ssn)
4758{ 3207{
4759 struct iwl_priv *priv = hw->priv; 3208 struct iwl_priv *priv = hw->priv;
4760 int sta_id;
4761 int tx_fifo;
4762 int txq_id;
4763 int ssn = -1;
4764 int ret = 0;
4765 unsigned long flags;
4766 struct iwl4965_tid_data *tid_data;
4767 DECLARE_MAC_BUF(mac); 3209 DECLARE_MAC_BUF(mac);
4768 3210
4769 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) 3211 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
4770 tx_fifo = default_tid_to_tx_fifo[tid]; 3212 print_mac(mac, addr), tid);
4771 else
4772 return -EINVAL;
4773 3213
4774 IWL_WARNING("%s on da = %s tid = %d\n", 3214 switch (action) {
4775 __func__, print_mac(mac, da), tid); 3215 case IEEE80211_AMPDU_RX_START:
3216 IWL_DEBUG_HT("start Rx\n");
3217 return iwl4965_rx_agg_start(priv, addr, tid, *ssn);
3218 case IEEE80211_AMPDU_RX_STOP:
3219 IWL_DEBUG_HT("stop Rx\n");
3220 return iwl4965_rx_agg_stop(priv, addr, tid);
3221 case IEEE80211_AMPDU_TX_START:
3222 IWL_DEBUG_HT("start Tx\n");
3223 return iwl_tx_agg_start(priv, addr, tid, ssn);
3224 case IEEE80211_AMPDU_TX_STOP:
3225 IWL_DEBUG_HT("stop Tx\n");
3226 return iwl_tx_agg_stop(priv, addr, tid);
3227 default:
3228 IWL_DEBUG_HT("unknown\n");
3229 return -EINVAL;
3230 break;
3231 }
3232 return 0;
3233}
3234#endif /* CONFIG_IWL4965_HT */
4776 3235
4777 sta_id = iwl4965_hw_find_station(priv, da);
4778 if (sta_id == IWL_INVALID_STATION)
4779 return -ENXIO;
4780 3236
4781 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { 3237static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
4782 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n"); 3238{
4783 return -ENXIO; 3239 switch (cmd_id) {
3240 case REPLY_RXON:
3241 return (u16) sizeof(struct iwl4965_rxon_cmd);
3242 default:
3243 return len;
4784 } 3244 }
3245}
4785 3246
4786 txq_id = iwl4965_txq_ctx_activate_free(priv); 3247static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
4787 if (txq_id == -1) 3248{
4788 return -ENXIO; 3249 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
3250 addsta->mode = cmd->mode;
3251 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
3252 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
3253 addsta->station_flags = cmd->station_flags;
3254 addsta->station_flags_msk = cmd->station_flags_msk;
3255 addsta->tid_disable_tx = cmd->tid_disable_tx;
3256 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
3257 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
3258 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
3259 addsta->reserved1 = __constant_cpu_to_le16(0);
3260 addsta->reserved2 = __constant_cpu_to_le32(0);
4789 3261
4790 spin_lock_irqsave(&priv->sta_lock, flags); 3262 return (u16)sizeof(struct iwl4965_addsta_cmd);
4791 tid_data = &priv->stations[sta_id].tid[tid]; 3263}
4792 ssn = SEQ_TO_SN(tid_data->seq_number);
4793 tid_data->agg.txq_id = txq_id;
4794 spin_unlock_irqrestore(&priv->sta_lock, flags);
4795 3264
4796 *start_seq_num = ssn; 3265#ifdef CONFIG_IWL4965_HT
4797 ret = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo, 3266static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
4798 sta_id, tid, ssn); 3267{
4799 if (ret) 3268 __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
4800 return ret; 3269 tx_resp->frame_count);
3270 return le32_to_cpu(*scd_ssn) & MAX_SN;
4801 3271
4802 ret = 0;
4803 if (tid_data->tfds_in_queue == 0) {
4804 printk(KERN_ERR "HW queue is empty\n");
4805 tid_data->agg.state = IWL_AGG_ON;
4806 ieee80211_start_tx_ba_cb_irqsafe(hw, da, tid);
4807 } else {
4808 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
4809 tid_data->tfds_in_queue);
4810 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
4811 }
4812 return ret;
4813} 3272}
4814 3273
4815static int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, const u8 *da, 3274/**
4816 u16 tid) 3275 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
3276 */
3277static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
3278 struct iwl_ht_agg *agg,
3279 struct iwl4965_tx_resp_agg *tx_resp,
3280 u16 start_idx)
4817{ 3281{
3282 u16 status;
3283 struct agg_tx_status *frame_status = &tx_resp->status;
3284 struct ieee80211_tx_info *info = NULL;
3285 struct ieee80211_hdr *hdr = NULL;
3286 int i, sh;
3287 int txq_id, idx;
3288 u16 seq;
3289
3290 if (agg->wait_for_ba)
3291 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
3292
3293 agg->frame_count = tx_resp->frame_count;
3294 agg->start_idx = start_idx;
3295 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3296 agg->bitmap = 0;
3297
3298 /* # frames attempted by Tx command */
3299 if (agg->frame_count == 1) {
3300 /* Only one frame was attempted; no block-ack will arrive */
3301 status = le16_to_cpu(frame_status[0].status);
3302 seq = le16_to_cpu(frame_status[0].sequence);
3303 idx = SEQ_TO_INDEX(seq);
3304 txq_id = SEQ_TO_QUEUE(seq);
3305
3306 /* FIXME: code repetition */
3307 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
3308 agg->frame_count, agg->start_idx, idx);
3309
3310 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
3311 info->status.retry_count = tx_resp->failure_frame;
3312 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
3313 info->flags |= iwl_is_tx_success(status)?
3314 IEEE80211_TX_STAT_ACK : 0;
3315 iwl4965_hwrate_to_tx_control(priv,
3316 le32_to_cpu(tx_resp->rate_n_flags),
3317 info);
3318 /* FIXME: code repetition end */
3319
3320 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
3321 status & 0xff, tx_resp->failure_frame);
3322 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
3323 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags));
3324
3325 agg->wait_for_ba = 0;
3326 } else {
3327 /* Two or more frames were attempted; expect block-ack */
3328 u64 bitmap = 0;
3329 int start = agg->start_idx;
3330
3331 /* Construct bit-map of pending frames within Tx window */
3332 for (i = 0; i < agg->frame_count; i++) {
3333 u16 sc;
3334 status = le16_to_cpu(frame_status[i].status);
3335 seq = le16_to_cpu(frame_status[i].sequence);
3336 idx = SEQ_TO_INDEX(seq);
3337 txq_id = SEQ_TO_QUEUE(seq);
3338
3339 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
3340 AGG_TX_STATE_ABORT_MSK))
3341 continue;
3342
3343 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
3344 agg->frame_count, txq_id, idx);
3345
3346 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
3347
3348 sc = le16_to_cpu(hdr->seq_ctrl);
3349 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
3350 IWL_ERROR("BUG_ON idx doesn't match seq control"
3351 " idx=%d, seq_idx=%d, seq=%d\n",
3352 idx, SEQ_TO_SN(sc),
3353 hdr->seq_ctrl);
3354 return -1;
3355 }
4818 3356
4819 struct iwl_priv *priv = hw->priv; 3357 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
4820 int tx_fifo_id, txq_id, sta_id, ssn = -1; 3358 i, idx, SEQ_TO_SN(sc));
4821 struct iwl4965_tid_data *tid_data; 3359
4822 int ret, write_ptr, read_ptr; 3360 sh = idx - start;
4823 unsigned long flags; 3361 if (sh > 64) {
4824 DECLARE_MAC_BUF(mac); 3362 sh = (start - idx) + 0xff;
3363 bitmap = bitmap << sh;
3364 sh = 0;
3365 start = idx;
3366 } else if (sh < -64)
3367 sh = 0xff - (start - idx);
3368 else if (sh < 0) {
3369 sh = start - idx;
3370 start = idx;
3371 bitmap = bitmap << sh;
3372 sh = 0;
3373 }
3374 bitmap |= (1 << sh);
3375 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
3376 start, (u32)(bitmap & 0xFFFFFFFF));
3377 }
4825 3378
4826 if (!da) { 3379 agg->bitmap = bitmap;
4827 IWL_ERROR("da = NULL\n"); 3380 agg->start_idx = start;
4828 return -EINVAL; 3381 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
4829 } 3382 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
3383 agg->frame_count, agg->start_idx,
3384 (unsigned long long)agg->bitmap);
4830 3385
4831 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) 3386 if (bitmap)
4832 tx_fifo_id = default_tid_to_tx_fifo[tid]; 3387 agg->wait_for_ba = 1;
4833 else 3388 }
4834 return -EINVAL; 3389 return 0;
3390}
3391#endif
4835 3392
4836 sta_id = iwl4965_hw_find_station(priv, da); 3393/**
3394 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
3395 */
3396static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
3397 struct iwl_rx_mem_buffer *rxb)
3398{
3399 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3400 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3401 int txq_id = SEQ_TO_QUEUE(sequence);
3402 int index = SEQ_TO_INDEX(sequence);
3403 struct iwl_tx_queue *txq = &priv->txq[txq_id];
3404 struct ieee80211_tx_info *info;
3405 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
3406 u32 status = le32_to_cpu(tx_resp->status);
3407#ifdef CONFIG_IWL4965_HT
3408 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION;
3409 u16 fc;
3410 struct ieee80211_hdr *hdr;
3411 u8 *qc = NULL;
3412#endif
4837 3413
4838 if (sta_id == IWL_INVALID_STATION) 3414 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
4839 return -ENXIO; 3415 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
3416 "is out of range [0-%d] %d %d\n", txq_id,
3417 index, txq->q.n_bd, txq->q.write_ptr,
3418 txq->q.read_ptr);
3419 return;
3420 }
4840 3421
4841 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) 3422 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
4842 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n"); 3423 memset(&info->status, 0, sizeof(info->status));
4843 3424
4844 tid_data = &priv->stations[sta_id].tid[tid]; 3425#ifdef CONFIG_IWL4965_HT
4845 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; 3426 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
4846 txq_id = tid_data->agg.txq_id; 3427 fc = le16_to_cpu(hdr->frame_control);
4847 write_ptr = priv->txq[txq_id].q.write_ptr; 3428 if (ieee80211_is_qos_data(fc)) {
4848 read_ptr = priv->txq[txq_id].q.read_ptr; 3429 qc = ieee80211_get_qos_ctrl(hdr, ieee80211_get_hdrlen(fc));
3430 tid = qc[0] & 0xf;
3431 }
4849 3432
4850 /* The queue is not empty */ 3433 sta_id = iwl_get_ra_sta_id(priv, hdr);
4851 if (write_ptr != read_ptr) { 3434 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
4852 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n"); 3435 IWL_ERROR("Station not known\n");
4853 priv->stations[sta_id].tid[tid].agg.state = 3436 return;
4854 IWL_EMPTYING_HW_QUEUE_DELBA;
4855 return 0;
4856 } 3437 }
4857 3438
4858 IWL_DEBUG_HT("HW queue empty\n");; 3439 if (txq->sched_retry) {
4859 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; 3440 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
3441 struct iwl_ht_agg *agg = NULL;
4860 3442
4861 spin_lock_irqsave(&priv->lock, flags); 3443 if (!qc)
4862 ret = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id); 3444 return;
4863 spin_unlock_irqrestore(&priv->lock, flags);
4864 3445
4865 if (ret) 3446 agg = &priv->stations[sta_id].tid[tid].agg;
4866 return ret;
4867 3447
4868 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, da, tid); 3448 iwl4965_tx_status_reply_tx(priv, agg,
3449 (struct iwl4965_tx_resp_agg *)tx_resp, index);
4869 3450
4870 IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n", 3451 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) {
4871 print_mac(mac, da), tid); 3452 /* TODO: send BAR */
3453 }
4872 3454
4873 return 0; 3455 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
4874} 3456 int freed, ampdu_q;
3457 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
3458 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
3459 "%d index %d\n", scd_ssn , index);
3460 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
3461 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
3462
3463 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
3464 txq_id >= 0 && priv->mac80211_registered &&
3465 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) {
3466 /* calculate mac80211 ampdu sw queue to wake */
3467 ampdu_q = txq_id - IWL_BACK_QUEUE_FIRST_ID +
3468 priv->hw->queues;
3469 if (agg->state == IWL_AGG_OFF)
3470 ieee80211_wake_queue(priv->hw, txq_id);
3471 else
3472 ieee80211_wake_queue(priv->hw, ampdu_q);
3473 }
3474 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
3475 }
3476 } else {
3477#endif /* CONFIG_IWL4965_HT */
4875 3478
4876int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, 3479 info->status.retry_count = tx_resp->failure_frame;
4877 enum ieee80211_ampdu_mlme_action action, 3480 info->flags |= iwl_is_tx_success(status) ? IEEE80211_TX_STAT_ACK : 0;
4878 const u8 *addr, u16 tid, u16 *ssn) 3481 iwl4965_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
4879{ 3482 info);
4880 struct iwl_priv *priv = hw->priv;
4881 int sta_id;
4882 DECLARE_MAC_BUF(mac);
4883 3483
4884 IWL_DEBUG_HT("A-MPDU action on da=%s tid=%d ", 3484 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
4885 print_mac(mac, addr), tid); 3485 "retries %d\n", txq_id, iwl_get_tx_fail_reason(status),
4886 sta_id = iwl4965_hw_find_station(priv, addr); 3486 status, le32_to_cpu(tx_resp->rate_n_flags),
4887 switch (action) { 3487 tx_resp->failure_frame);
4888 case IEEE80211_AMPDU_RX_START: 3488
4889 IWL_DEBUG_HT("start Rx\n"); 3489 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
4890 iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, *ssn); 3490#ifdef CONFIG_IWL4965_HT
4891 break; 3491 if (index != -1) {
4892 case IEEE80211_AMPDU_RX_STOP: 3492 int freed = iwl_tx_queue_reclaim(priv, txq_id, index);
4893 IWL_DEBUG_HT("stop Rx\n"); 3493 if (tid != MAX_TID_COUNT)
4894 iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid); 3494 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
4895 break; 3495 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
4896 case IEEE80211_AMPDU_TX_START: 3496 (txq_id >= 0) && priv->mac80211_registered)
4897 IWL_DEBUG_HT("start Tx\n"); 3497 ieee80211_wake_queue(priv->hw, txq_id);
4898 return iwl4965_mac_ht_tx_agg_start(hw, addr, tid, ssn); 3498 if (tid != MAX_TID_COUNT)
4899 case IEEE80211_AMPDU_TX_STOP: 3499 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
4900 IWL_DEBUG_HT("stop Tx\n");
4901 return iwl4965_mac_ht_tx_agg_stop(hw, addr, tid);
4902 default:
4903 IWL_DEBUG_HT("unknown\n");
4904 return -EINVAL;
4905 break;
4906 } 3500 }
4907 return 0; 3501 }
3502#endif /* CONFIG_IWL4965_HT */
3503
3504 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3505 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
4908} 3506}
4909 3507
4910#endif /* CONFIG_IWL4965_HT */
4911 3508
4912/* Set up 4965-specific Rx frame reply handlers */ 3509/* Set up 4965-specific Rx frame reply handlers */
4913void iwl4965_hw_rx_handler_setup(struct iwl_priv *priv) 3510static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
4914{ 3511{
4915 /* Legacy Rx frames */ 3512 /* Legacy Rx frames */
4916 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx; 3513 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
4917 3514 /* Tx response */
4918 /* High-throughput (HT) Rx frames */ 3515 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
4919 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
4920 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
4921
4922 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
4923 iwl4965_rx_missed_beacon_notif;
4924 3516
4925#ifdef CONFIG_IWL4965_HT 3517#ifdef CONFIG_IWL4965_HT
4926 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba; 3518 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
@@ -4930,7 +3522,7 @@ void iwl4965_hw_rx_handler_setup(struct iwl_priv *priv)
4930void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv) 3522void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv)
4931{ 3523{
4932 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work); 3524 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
4933#ifdef CONFIG_IWL4965_SENSITIVITY 3525#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
4934 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work); 3526 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
4935#endif 3527#endif
4936 init_timer(&priv->statistics_periodic); 3528 init_timer(&priv->statistics_periodic);
@@ -4951,23 +3543,56 @@ static struct iwl_hcmd_ops iwl4965_hcmd = {
4951}; 3543};
4952 3544
4953static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { 3545static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
4954 .enqueue_hcmd = iwl4965_enqueue_hcmd, 3546 .get_hcmd_size = iwl4965_get_hcmd_size,
3547 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
3548#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
3549 .chain_noise_reset = iwl4965_chain_noise_reset,
3550 .gain_computation = iwl4965_gain_computation,
3551#endif
4955}; 3552};
4956 3553
4957static struct iwl_lib_ops iwl4965_lib = { 3554static struct iwl_lib_ops iwl4965_lib = {
4958 .init_drv = iwl4965_init_drv,
4959 .set_hw_params = iwl4965_hw_set_hw_params, 3555 .set_hw_params = iwl4965_hw_set_hw_params,
3556 .alloc_shared_mem = iwl4965_alloc_shared_mem,
3557 .free_shared_mem = iwl4965_free_shared_mem,
3558 .shared_mem_rx_idx = iwl4965_shared_mem_rx_idx,
4960 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl, 3559 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
4961 .hw_nic_init = iwl4965_hw_nic_init, 3560 .txq_set_sched = iwl4965_txq_set_sched,
3561#ifdef CONFIG_IWL4965_HT
3562 .txq_agg_enable = iwl4965_txq_agg_enable,
3563 .txq_agg_disable = iwl4965_txq_agg_disable,
3564#endif
3565 .rx_handler_setup = iwl4965_rx_handler_setup,
4962 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr, 3566 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
4963 .alive_notify = iwl4965_alive_notify, 3567 .alive_notify = iwl4965_alive_notify,
3568 .init_alive_start = iwl4965_init_alive_start,
4964 .load_ucode = iwl4965_load_bsm, 3569 .load_ucode = iwl4965_load_bsm,
3570 .apm_ops = {
3571 .init = iwl4965_apm_init,
3572 .reset = iwl4965_apm_reset,
3573 .stop = iwl4965_apm_stop,
3574 .config = iwl4965_nic_config,
3575 .set_pwr_src = iwl4965_set_pwr_src,
3576 },
4965 .eeprom_ops = { 3577 .eeprom_ops = {
3578 .regulatory_bands = {
3579 EEPROM_REGULATORY_BAND_1_CHANNELS,
3580 EEPROM_REGULATORY_BAND_2_CHANNELS,
3581 EEPROM_REGULATORY_BAND_3_CHANNELS,
3582 EEPROM_REGULATORY_BAND_4_CHANNELS,
3583 EEPROM_REGULATORY_BAND_5_CHANNELS,
3584 EEPROM_4965_REGULATORY_BAND_24_FAT_CHANNELS,
3585 EEPROM_4965_REGULATORY_BAND_52_FAT_CHANNELS
3586 },
4966 .verify_signature = iwlcore_eeprom_verify_signature, 3587 .verify_signature = iwlcore_eeprom_verify_signature,
4967 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 3588 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
4968 .release_semaphore = iwlcore_eeprom_release_semaphore, 3589 .release_semaphore = iwlcore_eeprom_release_semaphore,
3590 .check_version = iwl4965_eeprom_check_version,
3591 .query_addr = iwlcore_eeprom_query_addr,
4969 }, 3592 },
4970 .radio_kill_sw = iwl4965_radio_kill_sw, 3593 .radio_kill_sw = iwl4965_radio_kill_sw,
3594 .set_power = iwl4965_set_power,
3595 .update_chain_flags = iwl4965_update_chain_flags,
4971}; 3596};
4972 3597
4973static struct iwl_ops iwl4965_ops = { 3598static struct iwl_ops iwl4965_ops = {
@@ -4980,6 +3605,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
4980 .name = "4965AGN", 3605 .name = "4965AGN",
4981 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode", 3606 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode",
4982 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 3607 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
3608 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
4983 .ops = &iwl4965_ops, 3609 .ops = &iwl4965_ops,
4984 .mod_params = &iwl4965_mod_params, 3610 .mod_params = &iwl4965_mod_params,
4985}; 3611};
@@ -5004,4 +3630,5 @@ module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444);
5004MODULE_PARM_DESC(qos_enable, "enable all QoS functionality"); 3630MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
5005module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444); 3631module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444);
5006MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); 3632MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
5007 3633module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, 0444);
3634MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
new file mode 100644
index 000000000000..9e557ce315b7
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -0,0 +1,133 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-5000-hw.h) only for hardware-related definitions.
65 * Use iwl-5000-commands.h for uCode API definitions.
66 */
67
68#ifndef __iwl_5000_hw_h__
69#define __iwl_5000_hw_h__
70
71#define IWL50_RTC_INST_UPPER_BOUND (0x020000)
72#define IWL50_RTC_DATA_UPPER_BOUND (0x80C000)
73#define IWL50_RTC_INST_SIZE (IWL50_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND)
74#define IWL50_RTC_DATA_SIZE (IWL50_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND)
75
76/* EERPROM */
77#define IWL_5000_EEPROM_IMG_SIZE 2048
78
79
80#define IWL50_MAX_WIN_SIZE 64
81#define IWL50_QUEUE_SIZE 256
82#define IWL50_CMD_FIFO_NUM 7
83#define IWL50_NUM_QUEUES 20
84#define IWL50_BACK_QUEUE_FIRST_ID 10
85
86#define IWL_sta_id_POS 12
87#define IWL_sta_id_LEN 4
88#define IWL_sta_id_SYM val
89
90/* Fixed (non-configurable) rx data from phy */
91
92/* Base physical address of iwl5000_shared is provided to SCD_DRAM_BASE_ADDR
93 * and &iwl5000_shared.val0 is provided to FH_RSCSR_CHNL0_STTS_WPTR_REG */
94struct iwl5000_sched_queue_byte_cnt_tbl {
95 struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL50_QUEUE_SIZE +
96 IWL50_MAX_WIN_SIZE];
97} __attribute__ ((packed));
98
99struct iwl5000_shared {
100 struct iwl5000_sched_queue_byte_cnt_tbl
101 queues_byte_cnt_tbls[IWL50_NUM_QUEUES];
102 __le32 rb_closed;
103
104 /* __le32 rb_closed_stts_rb_num:12; */
105#define IWL_rb_closed_stts_rb_num_POS 0
106#define IWL_rb_closed_stts_rb_num_LEN 12
107#define IWL_rb_closed_stts_rb_num_SYM rb_closed
108 /* __le32 rsrv1:4; */
109 /* __le32 rb_closed_stts_rx_frame_num:12; */
110#define IWL_rb_closed_stts_rx_frame_num_POS 16
111#define IWL_rb_closed_stts_rx_frame_num_LEN 12
112#define IWL_rb_closed_stts_rx_frame_num_SYM rb_closed
113 /* __le32 rsrv2:4; */
114
115 __le32 frm_finished;
116 /* __le32 frame_finished_stts_rb_num:12; */
117#define IWL_frame_finished_stts_rb_num_POS 0
118#define IWL_frame_finished_stts_rb_num_LEN 12
119#define IWL_frame_finished_stts_rb_num_SYM frm_finished
120 /* __le32 rsrv3:4; */
121 /* __le32 frame_finished_stts_rx_frame_num:12; */
122#define IWL_frame_finished_stts_rx_frame_num_POS 16
123#define IWL_frame_finished_stts_rx_frame_num_LEN 12
124#define IWL_frame_finished_stts_rx_frame_num_SYM frm_finished
125 /* __le32 rsrv4:4; */
126
127 __le32 padding1; /* so that allocation will be aligned to 16B */
128 __le32 padding2;
129} __attribute__ ((packed));
130
131
132#endif /* __iwl_5000_hw_h__ */
133
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
new file mode 100644
index 000000000000..7e525ad45135
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -0,0 +1,1417 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 *
24 *****************************************************************************/
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/version.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-eeprom.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-helpers.h"
45#include "iwl-5000-hw.h"
46
47#define IWL5000_UCODE_API "-1"
48
49static const u16 iwl5000_default_queue_to_tx_fifo[] = {
50 IWL_TX_FIFO_AC3,
51 IWL_TX_FIFO_AC2,
52 IWL_TX_FIFO_AC1,
53 IWL_TX_FIFO_AC0,
54 IWL50_CMD_FIFO_NUM,
55 IWL_TX_FIFO_HCCA_1,
56 IWL_TX_FIFO_HCCA_2
57};
58
59/* FIXME: same implementation as 4965 */
60static int iwl5000_apm_stop_master(struct iwl_priv *priv)
61{
62 int ret = 0;
63 unsigned long flags;
64
65 spin_lock_irqsave(&priv->lock, flags);
66
67 /* set stop master bit */
68 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
69
70 ret = iwl_poll_bit(priv, CSR_RESET,
71 CSR_RESET_REG_FLAG_MASTER_DISABLED,
72 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
73 if (ret < 0)
74 goto out;
75
76out:
77 spin_unlock_irqrestore(&priv->lock, flags);
78 IWL_DEBUG_INFO("stop master\n");
79
80 return ret;
81}
82
83
84static int iwl5000_apm_init(struct iwl_priv *priv)
85{
86 int ret = 0;
87
88 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
89 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
90
91 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
92 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
93 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
94
95 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
96
97 /* set "initialization complete" bit to move adapter
98 * D0U* --> D0A* state */
99 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
100
101 /* wait for clock stabilization */
102 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
103 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
104 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
105 if (ret < 0) {
106 IWL_DEBUG_INFO("Failed to init the card\n");
107 return ret;
108 }
109
110 ret = iwl_grab_nic_access(priv);
111 if (ret)
112 return ret;
113
114 /* enable DMA */
115 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
116
117 udelay(20);
118
119 /* disable L1-Active */
120 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
121 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
122
123 iwl_release_nic_access(priv);
124
125 return ret;
126}
127
128/* FIXME: this is indentical to 4965 */
129static void iwl5000_apm_stop(struct iwl_priv *priv)
130{
131 unsigned long flags;
132
133 iwl5000_apm_stop_master(priv);
134
135 spin_lock_irqsave(&priv->lock, flags);
136
137 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
138
139 udelay(10);
140
141 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
142
143 spin_unlock_irqrestore(&priv->lock, flags);
144}
145
146
147static int iwl5000_apm_reset(struct iwl_priv *priv)
148{
149 int ret = 0;
150 unsigned long flags;
151
152 iwl5000_apm_stop_master(priv);
153
154 spin_lock_irqsave(&priv->lock, flags);
155
156 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
157
158 udelay(10);
159
160
161 /* FIXME: put here L1A -L0S w/a */
162
163 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
164
165 /* set "initialization complete" bit to move adapter
166 * D0U* --> D0A* state */
167 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
168
169 /* wait for clock stabilization */
170 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
171 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
172 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
173 if (ret < 0) {
174 IWL_DEBUG_INFO("Failed to init the card\n");
175 goto out;
176 }
177
178 ret = iwl_grab_nic_access(priv);
179 if (ret)
180 goto out;
181
182 /* enable DMA */
183 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
184
185 udelay(20);
186
187 /* disable L1-Active */
188 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
189 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
190
191 iwl_release_nic_access(priv);
192
193out:
194 spin_unlock_irqrestore(&priv->lock, flags);
195
196 return ret;
197}
198
199
200static void iwl5000_nic_config(struct iwl_priv *priv)
201{
202 unsigned long flags;
203 u16 radio_cfg;
204 u8 val_link;
205
206 spin_lock_irqsave(&priv->lock, flags);
207
208 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
209
210 /* L1 is enabled by BIOS */
211 if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN)
212 /* diable L0S disabled L1A enabled */
213 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
214 else
215 /* L0S enabled L1A disabled */
216 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
217
218 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
219
220 /* write radio config values to register */
221 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) < EEPROM_5000_RF_CFG_TYPE_MAX)
222 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
223 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
224 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
225 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
226
227 /* set CSR_HW_CONFIG_REG for uCode use */
228 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
229 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
230 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
231
232 spin_unlock_irqrestore(&priv->lock, flags);
233}
234
235
236
237/*
238 * EEPROM
239 */
240static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
241{
242 u16 offset = 0;
243
244 if ((address & INDIRECT_ADDRESS) == 0)
245 return address;
246
247 switch (address & INDIRECT_TYPE_MSK) {
248 case INDIRECT_HOST:
249 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST);
250 break;
251 case INDIRECT_GENERAL:
252 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL);
253 break;
254 case INDIRECT_REGULATORY:
255 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY);
256 break;
257 case INDIRECT_CALIBRATION:
258 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION);
259 break;
260 case INDIRECT_PROCESS_ADJST:
261 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST);
262 break;
263 case INDIRECT_OTHERS:
264 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS);
265 break;
266 default:
267 IWL_ERROR("illegal indirect type: 0x%X\n",
268 address & INDIRECT_TYPE_MSK);
269 break;
270 }
271
272 /* translate the offset from words to byte */
273 return (address & ADDRESS_MSK) + (offset << 1);
274}
275
276static int iwl5000_eeprom_check_version(struct iwl_priv *priv)
277{
278 u16 eeprom_ver;
279 struct iwl_eeprom_calib_hdr {
280 u8 version;
281 u8 pa_type;
282 u16 voltage;
283 } *hdr;
284
285 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
286
287 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
288 EEPROM_5000_CALIB_ALL);
289
290 if (eeprom_ver < EEPROM_5000_EEPROM_VERSION ||
291 hdr->version < EEPROM_5000_TX_POWER_VERSION)
292 goto err;
293
294 return 0;
295err:
296 IWL_ERROR("Unsuported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
297 eeprom_ver, EEPROM_5000_EEPROM_VERSION,
298 hdr->version, EEPROM_5000_TX_POWER_VERSION);
299 return -EINVAL;
300
301}
302
303#ifdef CONFIG_IWL5000_RUN_TIME_CALIB
304
305static void iwl5000_gain_computation(struct iwl_priv *priv,
306 u32 average_noise[NUM_RX_CHAINS],
307 u16 min_average_noise_antenna_i,
308 u32 min_average_noise)
309{
310 int i;
311 s32 delta_g;
312 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
313
314 /* Find Gain Code for the antennas B and C */
315 for (i = 1; i < NUM_RX_CHAINS; i++) {
316 if ((data->disconn_array[i])) {
317 data->delta_gain_code[i] = 0;
318 continue;
319 }
320 delta_g = (1000 * ((s32)average_noise[0] -
321 (s32)average_noise[i])) / 1500;
322 /* bound gain by 2 bits value max, 3rd bit is sign */
323 data->delta_gain_code[i] =
324 min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
325
326 if (delta_g < 0)
327 /* set negative sign */
328 data->delta_gain_code[i] |= (1 << 2);
329 }
330
331 IWL_DEBUG_CALIB("Delta gains: ANT_B = %d ANT_C = %d\n",
332 data->delta_gain_code[1], data->delta_gain_code[2]);
333
334 if (!data->radio_write) {
335 struct iwl5000_calibration_chain_noise_gain_cmd cmd;
336 memset(&cmd, 0, sizeof(cmd));
337
338 cmd.op_code = IWL5000_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
339 cmd.delta_gain_1 = data->delta_gain_code[1];
340 cmd.delta_gain_2 = data->delta_gain_code[2];
341 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
342 sizeof(cmd), &cmd, NULL);
343
344 data->radio_write = 1;
345 data->state = IWL_CHAIN_NOISE_CALIBRATED;
346 }
347
348 data->chain_noise_a = 0;
349 data->chain_noise_b = 0;
350 data->chain_noise_c = 0;
351 data->chain_signal_a = 0;
352 data->chain_signal_b = 0;
353 data->chain_signal_c = 0;
354 data->beacon_count = 0;
355}
356
357
358static void iwl5000_chain_noise_reset(struct iwl_priv *priv)
359{
360 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
361
362 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
363 struct iwl5000_calibration_chain_noise_reset_cmd cmd;
364
365 memset(&cmd, 0, sizeof(cmd));
366 cmd.op_code = IWL5000_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
367 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
368 sizeof(cmd), &cmd))
369 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n");
370 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
371 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
372 }
373}
374
375static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
376 .min_nrg_cck = 95,
377 .max_nrg_cck = 0,
378 .auto_corr_min_ofdm = 90,
379 .auto_corr_min_ofdm_mrc = 170,
380 .auto_corr_min_ofdm_x1 = 120,
381 .auto_corr_min_ofdm_mrc_x1 = 240,
382
383 .auto_corr_max_ofdm = 120,
384 .auto_corr_max_ofdm_mrc = 210,
385 .auto_corr_max_ofdm_x1 = 155,
386 .auto_corr_max_ofdm_mrc_x1 = 290,
387
388 .auto_corr_min_cck = 125,
389 .auto_corr_max_cck = 200,
390 .auto_corr_min_cck_mrc = 170,
391 .auto_corr_max_cck_mrc = 400,
392 .nrg_th_cck = 95,
393 .nrg_th_ofdm = 95,
394};
395
396#endif /* CONFIG_IWL5000_RUN_TIME_CALIB */
397
398
399
400static const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
401 size_t offset)
402{
403 u32 address = eeprom_indirect_address(priv, offset);
404 BUG_ON(address >= priv->cfg->eeprom_size);
405 return &priv->eeprom[address];
406}
407
408/*
409 * Calibration
410 */
411static int iwl5000_send_Xtal_calib(struct iwl_priv *priv)
412{
413 u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
414
415 struct iwl5000_calibration cal_cmd = {
416 .op_code = IWL5000_PHY_CALIBRATE_CRYSTAL_FRQ_CMD,
417 .data = {
418 (u8)xtal_calib[0],
419 (u8)xtal_calib[1],
420 }
421 };
422
423 return iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
424 sizeof(cal_cmd), &cal_cmd);
425}
426
427static int iwl5000_send_calib_results(struct iwl_priv *priv)
428{
429 int ret = 0;
430
431 struct iwl_host_cmd hcmd = {
432 .id = REPLY_PHY_CALIBRATION_CMD,
433 .meta.flags = CMD_SIZE_HUGE,
434 };
435
436 if (priv->calib_results.lo_res) {
437 hcmd.len = priv->calib_results.lo_res_len;
438 hcmd.data = priv->calib_results.lo_res;
439 ret = iwl_send_cmd_sync(priv, &hcmd);
440
441 if (ret)
442 goto err;
443 }
444
445 if (priv->calib_results.tx_iq_res) {
446 hcmd.len = priv->calib_results.tx_iq_res_len;
447 hcmd.data = priv->calib_results.tx_iq_res;
448 ret = iwl_send_cmd_sync(priv, &hcmd);
449
450 if (ret)
451 goto err;
452 }
453
454 if (priv->calib_results.tx_iq_perd_res) {
455 hcmd.len = priv->calib_results.tx_iq_perd_res_len;
456 hcmd.data = priv->calib_results.tx_iq_perd_res;
457 ret = iwl_send_cmd_sync(priv, &hcmd);
458
459 if (ret)
460 goto err;
461 }
462
463 return 0;
464err:
465 IWL_ERROR("Error %d\n", ret);
466 return ret;
467}
468
469static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
470{
471 struct iwl5000_calib_cfg_cmd calib_cfg_cmd;
472 struct iwl_host_cmd cmd = {
473 .id = CALIBRATION_CFG_CMD,
474 .len = sizeof(struct iwl5000_calib_cfg_cmd),
475 .data = &calib_cfg_cmd,
476 };
477
478 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
479 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
480 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
481 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
482 calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
483
484 return iwl_send_cmd(priv, &cmd);
485}
486
487static void iwl5000_rx_calib_result(struct iwl_priv *priv,
488 struct iwl_rx_mem_buffer *rxb)
489{
490 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
491 struct iwl5000_calib_hdr *hdr = (struct iwl5000_calib_hdr *)pkt->u.raw;
492 int len = le32_to_cpu(pkt->len) & FH_RSCSR_FRAME_SIZE_MSK;
493
494 iwl_free_calib_results(priv);
495
496 /* reduce the size of the length field itself */
497 len -= 4;
498
499 switch (hdr->op_code) {
500 case IWL5000_PHY_CALIBRATE_LO_CMD:
501 priv->calib_results.lo_res = kzalloc(len, GFP_ATOMIC);
502 priv->calib_results.lo_res_len = len;
503 memcpy(priv->calib_results.lo_res, pkt->u.raw, len);
504 break;
505 case IWL5000_PHY_CALIBRATE_TX_IQ_CMD:
506 priv->calib_results.tx_iq_res = kzalloc(len, GFP_ATOMIC);
507 priv->calib_results.tx_iq_res_len = len;
508 memcpy(priv->calib_results.tx_iq_res, pkt->u.raw, len);
509 break;
510 case IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD:
511 priv->calib_results.tx_iq_perd_res = kzalloc(len, GFP_ATOMIC);
512 priv->calib_results.tx_iq_perd_res_len = len;
513 memcpy(priv->calib_results.tx_iq_perd_res, pkt->u.raw, len);
514 break;
515 default:
516 IWL_ERROR("Unknown calibration notification %d\n",
517 hdr->op_code);
518 return;
519 }
520}
521
522static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
523 struct iwl_rx_mem_buffer *rxb)
524{
525 IWL_DEBUG_INFO("Init. calibration is completed, restarting fw.\n");
526 queue_work(priv->workqueue, &priv->restart);
527}
528
529/*
530 * ucode
531 */
532static int iwl5000_load_section(struct iwl_priv *priv,
533 struct fw_desc *image,
534 u32 dst_addr)
535{
536 int ret = 0;
537 unsigned long flags;
538
539 dma_addr_t phy_addr = image->p_addr;
540 u32 byte_cnt = image->len;
541
542 spin_lock_irqsave(&priv->lock, flags);
543 ret = iwl_grab_nic_access(priv);
544 if (ret) {
545 spin_unlock_irqrestore(&priv->lock, flags);
546 return ret;
547 }
548
549 iwl_write_direct32(priv,
550 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
551 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
552
553 iwl_write_direct32(priv,
554 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
555
556 iwl_write_direct32(priv,
557 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
558 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
559
560 /* FIME: write the MSB of the phy_addr in CTRL1
561 * iwl_write_direct32(priv,
562 IWL_FH_TFDIB_CTRL1_REG(IWL_FH_SRVC_CHNL),
563 ((phy_addr & MSB_MSK)
564 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_count);
565 */
566 iwl_write_direct32(priv,
567 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), byte_cnt);
568 iwl_write_direct32(priv,
569 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
570 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
571 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
572 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
573
574 iwl_write_direct32(priv,
575 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
576 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
577 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
578 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
579
580 iwl_release_nic_access(priv);
581 spin_unlock_irqrestore(&priv->lock, flags);
582 return 0;
583}
584
585static int iwl5000_load_given_ucode(struct iwl_priv *priv,
586 struct fw_desc *inst_image,
587 struct fw_desc *data_image)
588{
589 int ret = 0;
590
591 ret = iwl5000_load_section(
592 priv, inst_image, RTC_INST_LOWER_BOUND);
593 if (ret)
594 return ret;
595
596 IWL_DEBUG_INFO("INST uCode section being loaded...\n");
597 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
598 priv->ucode_write_complete, 5 * HZ);
599 if (ret == -ERESTARTSYS) {
600 IWL_ERROR("Could not load the INST uCode section due "
601 "to interrupt\n");
602 return ret;
603 }
604 if (!ret) {
605 IWL_ERROR("Could not load the INST uCode section\n");
606 return -ETIMEDOUT;
607 }
608
609 priv->ucode_write_complete = 0;
610
611 ret = iwl5000_load_section(
612 priv, data_image, RTC_DATA_LOWER_BOUND);
613 if (ret)
614 return ret;
615
616 IWL_DEBUG_INFO("DATA uCode section being loaded...\n");
617
618 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
619 priv->ucode_write_complete, 5 * HZ);
620 if (ret == -ERESTARTSYS) {
621 IWL_ERROR("Could not load the INST uCode section due "
622 "to interrupt\n");
623 return ret;
624 } else if (!ret) {
625 IWL_ERROR("Could not load the DATA uCode section\n");
626 return -ETIMEDOUT;
627 } else
628 ret = 0;
629
630 priv->ucode_write_complete = 0;
631
632 return ret;
633}
634
635static int iwl5000_load_ucode(struct iwl_priv *priv)
636{
637 int ret = 0;
638
639 /* check whether init ucode should be loaded, or rather runtime ucode */
640 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
641 IWL_DEBUG_INFO("Init ucode found. Loading init ucode...\n");
642 ret = iwl5000_load_given_ucode(priv,
643 &priv->ucode_init, &priv->ucode_init_data);
644 if (!ret) {
645 IWL_DEBUG_INFO("Init ucode load complete.\n");
646 priv->ucode_type = UCODE_INIT;
647 }
648 } else {
649 IWL_DEBUG_INFO("Init ucode not found, or already loaded. "
650 "Loading runtime ucode...\n");
651 ret = iwl5000_load_given_ucode(priv,
652 &priv->ucode_code, &priv->ucode_data);
653 if (!ret) {
654 IWL_DEBUG_INFO("Runtime ucode load complete.\n");
655 priv->ucode_type = UCODE_RT;
656 }
657 }
658
659 return ret;
660}
661
662static void iwl5000_init_alive_start(struct iwl_priv *priv)
663{
664 int ret = 0;
665
666 /* Check alive response for "valid" sign from uCode */
667 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
668 /* We had an error bringing up the hardware, so take it
669 * all the way back down so we can try again */
670 IWL_DEBUG_INFO("Initialize Alive failed.\n");
671 goto restart;
672 }
673
674 /* initialize uCode was loaded... verify inst image.
675 * This is a paranoid check, because we would not have gotten the
676 * "initialize" alive if code weren't properly loaded. */
677 if (iwl_verify_ucode(priv)) {
678 /* Runtime instruction load was bad;
679 * take it all the way back down so we can try again */
680 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
681 goto restart;
682 }
683
684 iwlcore_clear_stations_table(priv);
685 ret = priv->cfg->ops->lib->alive_notify(priv);
686 if (ret) {
687 IWL_WARNING("Could not complete ALIVE transition: %d\n", ret);
688 goto restart;
689 }
690
691 iwl5000_send_calib_cfg(priv);
692 return;
693
694restart:
695 /* real restart (first load init_ucode) */
696 queue_work(priv->workqueue, &priv->restart);
697}
698
699static void iwl5000_set_wr_ptrs(struct iwl_priv *priv,
700 int txq_id, u32 index)
701{
702 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
703 (index & 0xff) | (txq_id << 8));
704 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index);
705}
706
707static void iwl5000_tx_queue_set_status(struct iwl_priv *priv,
708 struct iwl_tx_queue *txq,
709 int tx_fifo_id, int scd_retry)
710{
711 int txq_id = txq->q.id;
712 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
713
714 iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
715 (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
716 (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) |
717 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) |
718 IWL50_SCD_QUEUE_STTS_REG_MSK);
719
720 txq->sched_retry = scd_retry;
721
722 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
723 active ? "Activate" : "Deactivate",
724 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
725}
726
727static int iwl5000_send_wimax_coex(struct iwl_priv *priv)
728{
729 struct iwl_wimax_coex_cmd coex_cmd;
730
731 memset(&coex_cmd, 0, sizeof(coex_cmd));
732
733 return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
734 sizeof(coex_cmd), &coex_cmd);
735}
736
737static int iwl5000_alive_notify(struct iwl_priv *priv)
738{
739 u32 a;
740 int i = 0;
741 unsigned long flags;
742 int ret;
743
744 spin_lock_irqsave(&priv->lock, flags);
745
746 ret = iwl_grab_nic_access(priv);
747 if (ret) {
748 spin_unlock_irqrestore(&priv->lock, flags);
749 return ret;
750 }
751
752 priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR);
753 a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET;
754 for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET;
755 a += 4)
756 iwl_write_targ_mem(priv, a, 0);
757 for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
758 a += 4)
759 iwl_write_targ_mem(priv, a, 0);
760 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
761 iwl_write_targ_mem(priv, a, 0);
762
763 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
764 (priv->shared_phys +
765 offsetof(struct iwl5000_shared, queues_byte_cnt_tbls)) >> 10);
766 iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
767 IWL50_SCD_QUEUECHAIN_SEL_ALL(
768 priv->hw_params.max_txq_num));
769 iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0);
770
771 /* initiate the queues */
772 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
773 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0);
774 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
775 iwl_write_targ_mem(priv, priv->scd_base_addr +
776 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
777 iwl_write_targ_mem(priv, priv->scd_base_addr +
778 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) +
779 sizeof(u32),
780 ((SCD_WIN_SIZE <<
781 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
782 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
783 ((SCD_FRAME_LIMIT <<
784 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
785 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
786 }
787
788 iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK,
789 IWL_MASK(0, priv->hw_params.max_txq_num));
790
791 /* Activate all Tx DMA/FIFO channels */
792 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
793
794 iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
795 /* map qos queues to fifos one-to-one */
796 for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
797 int ac = iwl5000_default_queue_to_tx_fifo[i];
798 iwl_txq_ctx_activate(priv, i);
799 iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
800 }
801 /* TODO - need to initialize those FIFOs inside the loop above,
802 * not only mark them as active */
803 iwl_txq_ctx_activate(priv, 4);
804 iwl_txq_ctx_activate(priv, 7);
805 iwl_txq_ctx_activate(priv, 8);
806 iwl_txq_ctx_activate(priv, 9);
807
808 iwl_release_nic_access(priv);
809 spin_unlock_irqrestore(&priv->lock, flags);
810
811
812 iwl5000_send_wimax_coex(priv);
813
814 iwl5000_send_Xtal_calib(priv);
815
816 if (priv->ucode_type == UCODE_RT) {
817 iwl5000_send_calib_results(priv);
818 set_bit(STATUS_READY, &priv->status);
819 priv->is_open = 1;
820 }
821
822 return 0;
823}
824
825static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
826{
827 if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) ||
828 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
829 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
830 IWL_MIN_NUM_QUEUES, IWL50_NUM_QUEUES);
831 return -EINVAL;
832 }
833
834 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
835 priv->hw_params.sw_crypto = priv->cfg->mod_params->sw_crypto;
836 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
837 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
838 if (priv->cfg->mod_params->amsdu_size_8K)
839 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K;
840 else
841 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K;
842 priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
843 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
844 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
845 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
846 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
847 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
848 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_2GHZ) |
849 BIT(IEEE80211_BAND_5GHZ);
850#ifdef CONFIG_IWL5000_RUN_TIME_CALIB
851 priv->hw_params.sens = &iwl5000_sensitivity;
852#endif
853
854 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
855 case CSR_HW_REV_TYPE_5100:
856 case CSR_HW_REV_TYPE_5150:
857 priv->hw_params.tx_chains_num = 1;
858 priv->hw_params.rx_chains_num = 2;
859 /* FIXME: move to ANT_A, ANT_B, ANT_C enum */
860 priv->hw_params.valid_tx_ant = ANT_A;
861 priv->hw_params.valid_rx_ant = ANT_AB;
862 break;
863 case CSR_HW_REV_TYPE_5300:
864 case CSR_HW_REV_TYPE_5350:
865 priv->hw_params.tx_chains_num = 3;
866 priv->hw_params.rx_chains_num = 3;
867 priv->hw_params.valid_tx_ant = ANT_ABC;
868 priv->hw_params.valid_rx_ant = ANT_ABC;
869 break;
870 }
871
872 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
873 case CSR_HW_REV_TYPE_5100:
874 case CSR_HW_REV_TYPE_5300:
875 /* 5X00 wants in Celsius */
876 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
877 break;
878 case CSR_HW_REV_TYPE_5150:
879 case CSR_HW_REV_TYPE_5350:
880 /* 5X50 wants in Kelvin */
881 priv->hw_params.ct_kill_threshold =
882 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
883 break;
884 }
885
886 return 0;
887}
888
889static int iwl5000_alloc_shared_mem(struct iwl_priv *priv)
890{
891 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
892 sizeof(struct iwl5000_shared),
893 &priv->shared_phys);
894 if (!priv->shared_virt)
895 return -ENOMEM;
896
897 memset(priv->shared_virt, 0, sizeof(struct iwl5000_shared));
898
899 priv->rb_closed_offset = offsetof(struct iwl5000_shared, rb_closed);
900
901 return 0;
902}
903
904static void iwl5000_free_shared_mem(struct iwl_priv *priv)
905{
906 if (priv->shared_virt)
907 pci_free_consistent(priv->pci_dev,
908 sizeof(struct iwl5000_shared),
909 priv->shared_virt,
910 priv->shared_phys);
911}
912
913static int iwl5000_shared_mem_rx_idx(struct iwl_priv *priv)
914{
915 struct iwl5000_shared *s = priv->shared_virt;
916 return le32_to_cpu(s->rb_closed) & 0xFFF;
917}
918
919/**
920 * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
921 */
922static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
923 struct iwl_tx_queue *txq,
924 u16 byte_cnt)
925{
926 struct iwl5000_shared *shared_data = priv->shared_virt;
927 int txq_id = txq->q.id;
928 u8 sec_ctl = 0;
929 u8 sta = 0;
930 int len;
931
932 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
933
934 if (txq_id != IWL_CMD_QUEUE_NUM) {
935 sta = txq->cmd[txq->q.write_ptr].cmd.tx.sta_id;
936 sec_ctl = txq->cmd[txq->q.write_ptr].cmd.tx.sec_ctl;
937
938 switch (sec_ctl & TX_CMD_SEC_MSK) {
939 case TX_CMD_SEC_CCM:
940 len += CCMP_MIC_LEN;
941 break;
942 case TX_CMD_SEC_TKIP:
943 len += TKIP_ICV_LEN;
944 break;
945 case TX_CMD_SEC_WEP:
946 len += WEP_IV_LEN + WEP_ICV_LEN;
947 break;
948 }
949 }
950
951 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
952 tfd_offset[txq->q.write_ptr], byte_cnt, len);
953
954 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
955 tfd_offset[txq->q.write_ptr], sta_id, sta);
956
957 if (txq->q.write_ptr < IWL50_MAX_WIN_SIZE) {
958 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
959 tfd_offset[IWL50_QUEUE_SIZE + txq->q.write_ptr],
960 byte_cnt, len);
961 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
962 tfd_offset[IWL50_QUEUE_SIZE + txq->q.write_ptr],
963 sta_id, sta);
964 }
965}
966
967static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
968 struct iwl_tx_queue *txq)
969{
970 int txq_id = txq->q.id;
971 struct iwl5000_shared *shared_data = priv->shared_virt;
972 u8 sta = 0;
973
974 if (txq_id != IWL_CMD_QUEUE_NUM)
975 sta = txq->cmd[txq->q.read_ptr].cmd.tx.sta_id;
976
977 shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr].
978 val = cpu_to_le16(1 | (sta << 12));
979
980 if (txq->q.write_ptr < IWL50_MAX_WIN_SIZE) {
981 shared_data->queues_byte_cnt_tbls[txq_id].
982 tfd_offset[IWL50_QUEUE_SIZE + txq->q.read_ptr].
983 val = cpu_to_le16(1 | (sta << 12));
984 }
985}
986
987static u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
988{
989 u16 size = (u16)sizeof(struct iwl_addsta_cmd);
990 memcpy(data, cmd, size);
991 return size;
992}
993
994
995/*
996 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask
997 * must be called under priv->lock and mac access
998 */
999static void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask)
1000{
1001 iwl_write_prph(priv, IWL50_SCD_TXFACT, mask);
1002}
1003
1004
1005static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
1006{
1007 __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
1008 tx_resp->frame_count);
1009 return le32_to_cpu(*scd_ssn) & MAX_SN;
1010
1011}
1012
1013static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1014 struct iwl_ht_agg *agg,
1015 struct iwl5000_tx_resp *tx_resp,
1016 u16 start_idx)
1017{
1018 u16 status;
1019 struct agg_tx_status *frame_status = &tx_resp->status;
1020 struct ieee80211_tx_info *info = NULL;
1021 struct ieee80211_hdr *hdr = NULL;
1022 int i, sh;
1023 int txq_id, idx;
1024 u16 seq;
1025
1026 if (agg->wait_for_ba)
1027 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
1028
1029 agg->frame_count = tx_resp->frame_count;
1030 agg->start_idx = start_idx;
1031 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
1032 agg->bitmap = 0;
1033
1034 /* # frames attempted by Tx command */
1035 if (agg->frame_count == 1) {
1036 /* Only one frame was attempted; no block-ack will arrive */
1037 status = le16_to_cpu(frame_status[0].status);
1038 seq = le16_to_cpu(frame_status[0].sequence);
1039 idx = SEQ_TO_INDEX(seq);
1040 txq_id = SEQ_TO_QUEUE(seq);
1041
1042 /* FIXME: code repetition */
1043 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
1044 agg->frame_count, agg->start_idx, idx);
1045
1046 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
1047 info->status.retry_count = tx_resp->failure_frame;
1048 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1049 info->flags |= iwl_is_tx_success(status)?
1050 IEEE80211_TX_STAT_ACK : 0;
1051 iwl4965_hwrate_to_tx_control(priv,
1052 le32_to_cpu(tx_resp->rate_n_flags),
1053 info);
1054 /* FIXME: code repetition end */
1055
1056 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
1057 status & 0xff, tx_resp->failure_frame);
1058 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
1059 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags));
1060
1061 agg->wait_for_ba = 0;
1062 } else {
1063 /* Two or more frames were attempted; expect block-ack */
1064 u64 bitmap = 0;
1065 int start = agg->start_idx;
1066
1067 /* Construct bit-map of pending frames within Tx window */
1068 for (i = 0; i < agg->frame_count; i++) {
1069 u16 sc;
1070 status = le16_to_cpu(frame_status[i].status);
1071 seq = le16_to_cpu(frame_status[i].sequence);
1072 idx = SEQ_TO_INDEX(seq);
1073 txq_id = SEQ_TO_QUEUE(seq);
1074
1075 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
1076 AGG_TX_STATE_ABORT_MSK))
1077 continue;
1078
1079 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
1080 agg->frame_count, txq_id, idx);
1081
1082 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
1083
1084 sc = le16_to_cpu(hdr->seq_ctrl);
1085 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
1086 IWL_ERROR("BUG_ON idx doesn't match seq control"
1087 " idx=%d, seq_idx=%d, seq=%d\n",
1088 idx, SEQ_TO_SN(sc),
1089 hdr->seq_ctrl);
1090 return -1;
1091 }
1092
1093 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
1094 i, idx, SEQ_TO_SN(sc));
1095
1096 sh = idx - start;
1097 if (sh > 64) {
1098 sh = (start - idx) + 0xff;
1099 bitmap = bitmap << sh;
1100 sh = 0;
1101 start = idx;
1102 } else if (sh < -64)
1103 sh = 0xff - (start - idx);
1104 else if (sh < 0) {
1105 sh = start - idx;
1106 start = idx;
1107 bitmap = bitmap << sh;
1108 sh = 0;
1109 }
1110 bitmap |= (1 << sh);
1111 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
1112 start, (u32)(bitmap & 0xFFFFFFFF));
1113 }
1114
1115 agg->bitmap = bitmap;
1116 agg->start_idx = start;
1117 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
1118 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
1119 agg->frame_count, agg->start_idx,
1120 (unsigned long long)agg->bitmap);
1121
1122 if (bitmap)
1123 agg->wait_for_ba = 1;
1124 }
1125 return 0;
1126}
1127
1128static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1129 struct iwl_rx_mem_buffer *rxb)
1130{
1131 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1132 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1133 int txq_id = SEQ_TO_QUEUE(sequence);
1134 int index = SEQ_TO_INDEX(sequence);
1135 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1136 struct ieee80211_tx_info *info;
1137 struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
1138 u32 status = le16_to_cpu(tx_resp->status.status);
1139#ifdef CONFIG_IWL4965_HT
1140 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION;
1141 u16 fc;
1142 struct ieee80211_hdr *hdr;
1143 u8 *qc = NULL;
1144#endif
1145
1146 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
1147 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
1148 "is out of range [0-%d] %d %d\n", txq_id,
1149 index, txq->q.n_bd, txq->q.write_ptr,
1150 txq->q.read_ptr);
1151 return;
1152 }
1153
1154 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
1155 memset(&info->status, 0, sizeof(info->status));
1156
1157#ifdef CONFIG_IWL4965_HT
1158 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
1159 fc = le16_to_cpu(hdr->frame_control);
1160 if (ieee80211_is_qos_data(fc)) {
1161 qc = ieee80211_get_qos_ctrl(hdr, ieee80211_get_hdrlen(fc));
1162 tid = qc[0] & 0xf;
1163 }
1164
1165 sta_id = iwl_get_ra_sta_id(priv, hdr);
1166 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
1167 IWL_ERROR("Station not known\n");
1168 return;
1169 }
1170
1171 if (txq->sched_retry) {
1172 const u32 scd_ssn = iwl5000_get_scd_ssn(tx_resp);
1173 struct iwl_ht_agg *agg = NULL;
1174
1175 if (!qc)
1176 return;
1177
1178 agg = &priv->stations[sta_id].tid[tid].agg;
1179
1180 iwl5000_tx_status_reply_tx(priv, agg, tx_resp, index);
1181
1182 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) {
1183 /* TODO: send BAR */
1184 }
1185
1186 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
1187 int freed, ampdu_q;
1188 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
1189 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
1190 "%d index %d\n", scd_ssn , index);
1191 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1192 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1193
1194 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
1195 txq_id >= 0 && priv->mac80211_registered &&
1196 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) {
1197 /* calculate mac80211 ampdu sw queue to wake */
1198 ampdu_q = txq_id - IWL_BACK_QUEUE_FIRST_ID +
1199 priv->hw->queues;
1200 if (agg->state == IWL_AGG_OFF)
1201 ieee80211_wake_queue(priv->hw, txq_id);
1202 else
1203 ieee80211_wake_queue(priv->hw, ampdu_q);
1204 }
1205 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1206 }
1207 } else {
1208#endif /* CONFIG_IWL4965_HT */
1209
1210 info->status.retry_count = tx_resp->failure_frame;
1211 info->flags = iwl_is_tx_success(status) ? IEEE80211_TX_STAT_ACK : 0;
1212 iwl4965_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
1213 info);
1214
1215 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
1216 "retries %d\n", txq_id, iwl_get_tx_fail_reason(status),
1217 status, le32_to_cpu(tx_resp->rate_n_flags),
1218 tx_resp->failure_frame);
1219
1220 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
1221#ifdef CONFIG_IWL4965_HT
1222 if (index != -1) {
1223 int freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1224 if (tid != MAX_TID_COUNT)
1225 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1226 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
1227 (txq_id >= 0) && priv->mac80211_registered)
1228 ieee80211_wake_queue(priv->hw, txq_id);
1229 if (tid != MAX_TID_COUNT)
1230 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1231 }
1232 }
1233#endif /* CONFIG_IWL4965_HT */
1234
1235 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
1236 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
1237}
1238
1239/* Currently 5000 is the supperset of everything */
1240static u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len)
1241{
1242 return len;
1243}
1244
1245static void iwl5000_rx_handler_setup(struct iwl_priv *priv)
1246{
1247 /* init calibration handlers */
1248 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
1249 iwl5000_rx_calib_result;
1250 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
1251 iwl5000_rx_calib_complete;
1252 priv->rx_handlers[REPLY_TX] = iwl5000_rx_reply_tx;
1253}
1254
1255
1256static int iwl5000_hw_valid_rtc_data_addr(u32 addr)
1257{
1258 return (addr >= RTC_DATA_LOWER_BOUND) &&
1259 (addr < IWL50_RTC_DATA_UPPER_BOUND);
1260}
1261
1262static int iwl5000_send_rxon_assoc(struct iwl_priv *priv)
1263{
1264 int ret = 0;
1265 struct iwl5000_rxon_assoc_cmd rxon_assoc;
1266 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
1267 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
1268
1269 if ((rxon1->flags == rxon2->flags) &&
1270 (rxon1->filter_flags == rxon2->filter_flags) &&
1271 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1272 (rxon1->ofdm_ht_single_stream_basic_rates ==
1273 rxon2->ofdm_ht_single_stream_basic_rates) &&
1274 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1275 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1276 (rxon1->ofdm_ht_triple_stream_basic_rates ==
1277 rxon2->ofdm_ht_triple_stream_basic_rates) &&
1278 (rxon1->acquisition_data == rxon2->acquisition_data) &&
1279 (rxon1->rx_chain == rxon2->rx_chain) &&
1280 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1281 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1282 return 0;
1283 }
1284
1285 rxon_assoc.flags = priv->staging_rxon.flags;
1286 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1287 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1288 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1289 rxon_assoc.reserved1 = 0;
1290 rxon_assoc.reserved2 = 0;
1291 rxon_assoc.reserved3 = 0;
1292 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1293 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1294 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1295 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1296 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1297 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
1298 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates;
1299 rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data;
1300
1301 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1302 sizeof(rxon_assoc), &rxon_assoc, NULL);
1303 if (ret)
1304 return ret;
1305
1306 return ret;
1307}
1308
1309static struct iwl_hcmd_ops iwl5000_hcmd = {
1310 .rxon_assoc = iwl5000_send_rxon_assoc,
1311};
1312
1313static struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
1314 .get_hcmd_size = iwl5000_get_hcmd_size,
1315 .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
1316#ifdef CONFIG_IWL5000_RUN_TIME_CALIB
1317 .gain_computation = iwl5000_gain_computation,
1318 .chain_noise_reset = iwl5000_chain_noise_reset,
1319#endif
1320};
1321
1322static struct iwl_lib_ops iwl5000_lib = {
1323 .set_hw_params = iwl5000_hw_set_hw_params,
1324 .alloc_shared_mem = iwl5000_alloc_shared_mem,
1325 .free_shared_mem = iwl5000_free_shared_mem,
1326 .shared_mem_rx_idx = iwl5000_shared_mem_rx_idx,
1327 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
1328 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
1329 .txq_set_sched = iwl5000_txq_set_sched,
1330 .rx_handler_setup = iwl5000_rx_handler_setup,
1331 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
1332 .load_ucode = iwl5000_load_ucode,
1333 .init_alive_start = iwl5000_init_alive_start,
1334 .alive_notify = iwl5000_alive_notify,
1335 .apm_ops = {
1336 .init = iwl5000_apm_init,
1337 .reset = iwl5000_apm_reset,
1338 .stop = iwl5000_apm_stop,
1339 .config = iwl5000_nic_config,
1340 .set_pwr_src = iwl4965_set_pwr_src,
1341 },
1342 .eeprom_ops = {
1343 .regulatory_bands = {
1344 EEPROM_5000_REG_BAND_1_CHANNELS,
1345 EEPROM_5000_REG_BAND_2_CHANNELS,
1346 EEPROM_5000_REG_BAND_3_CHANNELS,
1347 EEPROM_5000_REG_BAND_4_CHANNELS,
1348 EEPROM_5000_REG_BAND_5_CHANNELS,
1349 EEPROM_5000_REG_BAND_24_FAT_CHANNELS,
1350 EEPROM_5000_REG_BAND_52_FAT_CHANNELS
1351 },
1352 .verify_signature = iwlcore_eeprom_verify_signature,
1353 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
1354 .release_semaphore = iwlcore_eeprom_release_semaphore,
1355 .check_version = iwl5000_eeprom_check_version,
1356 .query_addr = iwl5000_eeprom_query_addr,
1357 },
1358};
1359
1360static struct iwl_ops iwl5000_ops = {
1361 .lib = &iwl5000_lib,
1362 .hcmd = &iwl5000_hcmd,
1363 .utils = &iwl5000_hcmd_utils,
1364};
1365
1366static struct iwl_mod_params iwl50_mod_params = {
1367 .num_of_queues = IWL50_NUM_QUEUES,
1368 .enable_qos = 1,
1369 .amsdu_size_8K = 1,
1370 .restart_fw = 1,
1371 /* the rest are 0 by default */
1372};
1373
1374
1375struct iwl_cfg iwl5300_agn_cfg = {
1376 .name = "5300AGN",
1377 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode",
1378 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1379 .ops = &iwl5000_ops,
1380 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1381 .mod_params = &iwl50_mod_params,
1382};
1383
1384struct iwl_cfg iwl5100_agn_cfg = {
1385 .name = "5100AGN",
1386 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode",
1387 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1388 .ops = &iwl5000_ops,
1389 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1390 .mod_params = &iwl50_mod_params,
1391};
1392
1393struct iwl_cfg iwl5350_agn_cfg = {
1394 .name = "5350AGN",
1395 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode",
1396 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1397 .ops = &iwl5000_ops,
1398 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1399 .mod_params = &iwl50_mod_params,
1400};
1401
1402module_param_named(disable50, iwl50_mod_params.disable, int, 0444);
1403MODULE_PARM_DESC(disable50,
1404 "manually disable the 50XX radio (default 0 [radio on])");
1405module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, 0444);
1406MODULE_PARM_DESC(swcrypto50,
1407 "using software crypto engine (default 0 [hardware])\n");
1408module_param_named(debug50, iwl50_mod_params.debug, int, 0444);
1409MODULE_PARM_DESC(debug50, "50XX debug output mask");
1410module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, 0444);
1411MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series");
1412module_param_named(qos_enable50, iwl50_mod_params.enable_qos, int, 0444);
1413MODULE_PARM_DESC(qos_enable50, "enable all 50XX QoS functionality");
1414module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K, int, 0444);
1415MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series");
1416module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, 0444);
1417MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
new file mode 100644
index 000000000000..a6c7f0d9a414
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -0,0 +1,806 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#include <linux/kernel.h>
64#include <net/mac80211.h>
65
66#include "iwl-dev.h"
67#include "iwl-core.h"
68#include "iwl-calib.h"
69#include "iwl-eeprom.h"
70
71/* "false alarms" are signals that our DSP tries to lock onto,
72 * but then determines that they are either noise, or transmissions
73 * from a distant wireless network (also "noise", really) that get
74 * "stepped on" by stronger transmissions within our own network.
75 * This algorithm attempts to set a sensitivity level that is high
76 * enough to receive all of our own network traffic, but not so
77 * high that our DSP gets too busy trying to lock onto non-network
78 * activity/noise. */
79static int iwl_sens_energy_cck(struct iwl_priv *priv,
80 u32 norm_fa,
81 u32 rx_enable_time,
82 struct statistics_general_data *rx_info)
83{
84 u32 max_nrg_cck = 0;
85 int i = 0;
86 u8 max_silence_rssi = 0;
87 u32 silence_ref = 0;
88 u8 silence_rssi_a = 0;
89 u8 silence_rssi_b = 0;
90 u8 silence_rssi_c = 0;
91 u32 val;
92
93 /* "false_alarms" values below are cross-multiplications to assess the
94 * numbers of false alarms within the measured period of actual Rx
95 * (Rx is off when we're txing), vs the min/max expected false alarms
96 * (some should be expected if rx is sensitive enough) in a
97 * hypothetical listening period of 200 time units (TU), 204.8 msec:
98 *
99 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
100 *
101 * */
102 u32 false_alarms = norm_fa * 200 * 1024;
103 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
104 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
105 struct iwl_sensitivity_data *data = NULL;
106 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
107
108 data = &(priv->sensitivity_data);
109
110 data->nrg_auto_corr_silence_diff = 0;
111
112 /* Find max silence rssi among all 3 receivers.
113 * This is background noise, which may include transmissions from other
114 * networks, measured during silence before our network's beacon */
115 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
116 ALL_BAND_FILTER) >> 8);
117 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
118 ALL_BAND_FILTER) >> 8);
119 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
120 ALL_BAND_FILTER) >> 8);
121
122 val = max(silence_rssi_b, silence_rssi_c);
123 max_silence_rssi = max(silence_rssi_a, (u8) val);
124
125 /* Store silence rssi in 20-beacon history table */
126 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
127 data->nrg_silence_idx++;
128 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
129 data->nrg_silence_idx = 0;
130
131 /* Find max silence rssi across 20 beacon history */
132 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
133 val = data->nrg_silence_rssi[i];
134 silence_ref = max(silence_ref, val);
135 }
136 IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n",
137 silence_rssi_a, silence_rssi_b, silence_rssi_c,
138 silence_ref);
139
140 /* Find max rx energy (min value!) among all 3 receivers,
141 * measured during beacon frame.
142 * Save it in 10-beacon history table. */
143 i = data->nrg_energy_idx;
144 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
145 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
146
147 data->nrg_energy_idx++;
148 if (data->nrg_energy_idx >= 10)
149 data->nrg_energy_idx = 0;
150
151 /* Find min rx energy (max value) across 10 beacon history.
152 * This is the minimum signal level that we want to receive well.
153 * Add backoff (margin so we don't miss slightly lower energy frames).
154 * This establishes an upper bound (min value) for energy threshold. */
155 max_nrg_cck = data->nrg_value[0];
156 for (i = 1; i < 10; i++)
157 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
158 max_nrg_cck += 6;
159
160 IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
161 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
162 rx_info->beacon_energy_c, max_nrg_cck - 6);
163
164 /* Count number of consecutive beacons with fewer-than-desired
165 * false alarms. */
166 if (false_alarms < min_false_alarms)
167 data->num_in_cck_no_fa++;
168 else
169 data->num_in_cck_no_fa = 0;
170 IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n",
171 data->num_in_cck_no_fa);
172
173 /* If we got too many false alarms this time, reduce sensitivity */
174 if ((false_alarms > max_false_alarms) &&
175 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
176 IWL_DEBUG_CALIB("norm FA %u > max FA %u\n",
177 false_alarms, max_false_alarms);
178 IWL_DEBUG_CALIB("... reducing sensitivity\n");
179 data->nrg_curr_state = IWL_FA_TOO_MANY;
180 /* Store for "fewer than desired" on later beacon */
181 data->nrg_silence_ref = silence_ref;
182
183 /* increase energy threshold (reduce nrg value)
184 * to decrease sensitivity */
185 if (data->nrg_th_cck >
186 (ranges->max_nrg_cck + NRG_STEP_CCK))
187 data->nrg_th_cck = data->nrg_th_cck
188 - NRG_STEP_CCK;
189 else
190 data->nrg_th_cck = ranges->max_nrg_cck;
191 /* Else if we got fewer than desired, increase sensitivity */
192 } else if (false_alarms < min_false_alarms) {
193 data->nrg_curr_state = IWL_FA_TOO_FEW;
194
195 /* Compare silence level with silence level for most recent
196 * healthy number or too many false alarms */
197 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
198 (s32)silence_ref;
199
200 IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n",
201 false_alarms, min_false_alarms,
202 data->nrg_auto_corr_silence_diff);
203
204 /* Increase value to increase sensitivity, but only if:
205 * 1a) previous beacon did *not* have *too many* false alarms
206 * 1b) AND there's a significant difference in Rx levels
207 * from a previous beacon with too many, or healthy # FAs
208 * OR 2) We've seen a lot of beacons (100) with too few
209 * false alarms */
210 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
211 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
212 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
213
214 IWL_DEBUG_CALIB("... increasing sensitivity\n");
215 /* Increase nrg value to increase sensitivity */
216 val = data->nrg_th_cck + NRG_STEP_CCK;
217 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
218 } else {
219 IWL_DEBUG_CALIB("... but not changing sensitivity\n");
220 }
221
222 /* Else we got a healthy number of false alarms, keep status quo */
223 } else {
224 IWL_DEBUG_CALIB(" FA in safe zone\n");
225 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
226
227 /* Store for use in "fewer than desired" with later beacon */
228 data->nrg_silence_ref = silence_ref;
229
230 /* If previous beacon had too many false alarms,
231 * give it some extra margin by reducing sensitivity again
232 * (but don't go below measured energy of desired Rx) */
233 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
234 IWL_DEBUG_CALIB("... increasing margin\n");
235 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
236 data->nrg_th_cck -= NRG_MARGIN;
237 else
238 data->nrg_th_cck = max_nrg_cck;
239 }
240 }
241
242 /* Make sure the energy threshold does not go above the measured
243 * energy of the desired Rx signals (reduced by backoff margin),
244 * or else we might start missing Rx frames.
245 * Lower value is higher energy, so we use max()!
246 */
247 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
248 IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
249
250 data->nrg_prev_state = data->nrg_curr_state;
251
252 /* Auto-correlation CCK algorithm */
253 if (false_alarms > min_false_alarms) {
254
255 /* increase auto_corr values to decrease sensitivity
256 * so the DSP won't be disturbed by the noise
257 */
258 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
259 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
260 else {
261 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
262 data->auto_corr_cck =
263 min((u32)ranges->auto_corr_max_cck, val);
264 }
265 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
266 data->auto_corr_cck_mrc =
267 min((u32)ranges->auto_corr_max_cck_mrc, val);
268 } else if ((false_alarms < min_false_alarms) &&
269 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
270 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
271
272 /* Decrease auto_corr values to increase sensitivity */
273 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
274 data->auto_corr_cck =
275 max((u32)ranges->auto_corr_min_cck, val);
276 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
277 data->auto_corr_cck_mrc =
278 max((u32)ranges->auto_corr_min_cck_mrc, val);
279 }
280
281 return 0;
282}
283
284
285static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
286 u32 norm_fa,
287 u32 rx_enable_time)
288{
289 u32 val;
290 u32 false_alarms = norm_fa * 200 * 1024;
291 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
292 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
293 struct iwl_sensitivity_data *data = NULL;
294 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
295
296 data = &(priv->sensitivity_data);
297
298 /* If we got too many false alarms this time, reduce sensitivity */
299 if (false_alarms > max_false_alarms) {
300
301 IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n",
302 false_alarms, max_false_alarms);
303
304 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
305 data->auto_corr_ofdm =
306 min((u32)ranges->auto_corr_max_ofdm, val);
307
308 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
309 data->auto_corr_ofdm_mrc =
310 min((u32)ranges->auto_corr_max_ofdm_mrc, val);
311
312 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
313 data->auto_corr_ofdm_x1 =
314 min((u32)ranges->auto_corr_max_ofdm_x1, val);
315
316 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
317 data->auto_corr_ofdm_mrc_x1 =
318 min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
319 }
320
321 /* Else if we got fewer than desired, increase sensitivity */
322 else if (false_alarms < min_false_alarms) {
323
324 IWL_DEBUG_CALIB("norm FA %u < min FA %u\n",
325 false_alarms, min_false_alarms);
326
327 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
328 data->auto_corr_ofdm =
329 max((u32)ranges->auto_corr_min_ofdm, val);
330
331 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
332 data->auto_corr_ofdm_mrc =
333 max((u32)ranges->auto_corr_min_ofdm_mrc, val);
334
335 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
336 data->auto_corr_ofdm_x1 =
337 max((u32)ranges->auto_corr_min_ofdm_x1, val);
338
339 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
340 data->auto_corr_ofdm_mrc_x1 =
341 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
342 } else {
343 IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
344 min_false_alarms, false_alarms, max_false_alarms);
345 }
346 return 0;
347}
348
349/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
350static int iwl_sensitivity_write(struct iwl_priv *priv)
351{
352 int ret = 0;
353 struct iwl_sensitivity_cmd cmd ;
354 struct iwl_sensitivity_data *data = NULL;
355 struct iwl_host_cmd cmd_out = {
356 .id = SENSITIVITY_CMD,
357 .len = sizeof(struct iwl_sensitivity_cmd),
358 .meta.flags = CMD_ASYNC,
359 .data = &cmd,
360 };
361
362 data = &(priv->sensitivity_data);
363
364 memset(&cmd, 0, sizeof(cmd));
365
366 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
367 cpu_to_le16((u16)data->auto_corr_ofdm);
368 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
369 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
370 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
371 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
372 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
373 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
374
375 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
376 cpu_to_le16((u16)data->auto_corr_cck);
377 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
378 cpu_to_le16((u16)data->auto_corr_cck_mrc);
379
380 cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] =
381 cpu_to_le16((u16)data->nrg_th_cck);
382 cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] =
383 cpu_to_le16((u16)data->nrg_th_ofdm);
384
385 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
386 __constant_cpu_to_le16(190);
387 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
388 __constant_cpu_to_le16(390);
389 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
390 __constant_cpu_to_le16(62);
391
392 IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
393 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
394 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
395 data->nrg_th_ofdm);
396
397 IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n",
398 data->auto_corr_cck, data->auto_corr_cck_mrc,
399 data->nrg_th_cck);
400
401 /* Update uCode's "work" table, and copy it to DSP */
402 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
403
404 /* Don't send command to uCode if nothing has changed */
405 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
406 sizeof(u16)*HD_TABLE_SIZE)) {
407 IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n");
408 return 0;
409 }
410
411 /* Copy table for comparison next time */
412 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
413 sizeof(u16)*HD_TABLE_SIZE);
414
415 ret = iwl_send_cmd(priv, &cmd_out);
416 if (ret)
417 IWL_ERROR("SENSITIVITY_CMD failed\n");
418
419 return ret;
420}
421
422void iwl_init_sensitivity(struct iwl_priv *priv)
423{
424 int ret = 0;
425 int i;
426 struct iwl_sensitivity_data *data = NULL;
427 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
428
429 if (priv->disable_sens_cal)
430 return;
431
432 IWL_DEBUG_CALIB("Start iwl_init_sensitivity\n");
433
434 /* Clear driver's sensitivity algo data */
435 data = &(priv->sensitivity_data);
436
437 if (ranges == NULL)
438 /* can happen if IWLWIFI_RUN_TIME_CALIB is selected
439 * but no IWLXXXX_RUN_TIME_CALIB for specific is selected */
440 return;
441
442 memset(data, 0, sizeof(struct iwl_sensitivity_data));
443
444 data->num_in_cck_no_fa = 0;
445 data->nrg_curr_state = IWL_FA_TOO_MANY;
446 data->nrg_prev_state = IWL_FA_TOO_MANY;
447 data->nrg_silence_ref = 0;
448 data->nrg_silence_idx = 0;
449 data->nrg_energy_idx = 0;
450
451 for (i = 0; i < 10; i++)
452 data->nrg_value[i] = 0;
453
454 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
455 data->nrg_silence_rssi[i] = 0;
456
457 data->auto_corr_ofdm = 90;
458 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
459 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
460 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
461 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
462 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
463 data->nrg_th_cck = ranges->nrg_th_cck;
464 data->nrg_th_ofdm = ranges->nrg_th_ofdm;
465
466 data->last_bad_plcp_cnt_ofdm = 0;
467 data->last_fa_cnt_ofdm = 0;
468 data->last_bad_plcp_cnt_cck = 0;
469 data->last_fa_cnt_cck = 0;
470
471 ret |= iwl_sensitivity_write(priv);
472 IWL_DEBUG_CALIB("<<return 0x%X\n", ret);
473}
474EXPORT_SYMBOL(iwl_init_sensitivity);
475
476void iwl_sensitivity_calibration(struct iwl_priv *priv,
477 struct iwl4965_notif_statistics *resp)
478{
479 u32 rx_enable_time;
480 u32 fa_cck;
481 u32 fa_ofdm;
482 u32 bad_plcp_cck;
483 u32 bad_plcp_ofdm;
484 u32 norm_fa_ofdm;
485 u32 norm_fa_cck;
486 struct iwl_sensitivity_data *data = NULL;
487 struct statistics_rx_non_phy *rx_info = &(resp->rx.general);
488 struct statistics_rx *statistics = &(resp->rx);
489 unsigned long flags;
490 struct statistics_general_data statis;
491
492 if (priv->disable_sens_cal)
493 return;
494
495 data = &(priv->sensitivity_data);
496
497 if (!iwl_is_associated(priv)) {
498 IWL_DEBUG_CALIB("<< - not associated\n");
499 return;
500 }
501
502 spin_lock_irqsave(&priv->lock, flags);
503 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
504 IWL_DEBUG_CALIB("<< invalid data.\n");
505 spin_unlock_irqrestore(&priv->lock, flags);
506 return;
507 }
508
509 /* Extract Statistics: */
510 rx_enable_time = le32_to_cpu(rx_info->channel_load);
511 fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt);
512 fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt);
513 bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err);
514 bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err);
515
516 statis.beacon_silence_rssi_a =
517 le32_to_cpu(statistics->general.beacon_silence_rssi_a);
518 statis.beacon_silence_rssi_b =
519 le32_to_cpu(statistics->general.beacon_silence_rssi_b);
520 statis.beacon_silence_rssi_c =
521 le32_to_cpu(statistics->general.beacon_silence_rssi_c);
522 statis.beacon_energy_a =
523 le32_to_cpu(statistics->general.beacon_energy_a);
524 statis.beacon_energy_b =
525 le32_to_cpu(statistics->general.beacon_energy_b);
526 statis.beacon_energy_c =
527 le32_to_cpu(statistics->general.beacon_energy_c);
528
529 spin_unlock_irqrestore(&priv->lock, flags);
530
531 IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
532
533 if (!rx_enable_time) {
534 IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n");
535 return;
536 }
537
538 /* These statistics increase monotonically, and do not reset
539 * at each beacon. Calculate difference from last value, or just
540 * use the new statistics value if it has reset or wrapped around. */
541 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
542 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
543 else {
544 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
545 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
546 }
547
548 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
549 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
550 else {
551 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
552 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
553 }
554
555 if (data->last_fa_cnt_ofdm > fa_ofdm)
556 data->last_fa_cnt_ofdm = fa_ofdm;
557 else {
558 fa_ofdm -= data->last_fa_cnt_ofdm;
559 data->last_fa_cnt_ofdm += fa_ofdm;
560 }
561
562 if (data->last_fa_cnt_cck > fa_cck)
563 data->last_fa_cnt_cck = fa_cck;
564 else {
565 fa_cck -= data->last_fa_cnt_cck;
566 data->last_fa_cnt_cck += fa_cck;
567 }
568
569 /* Total aborted signal locks */
570 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
571 norm_fa_cck = fa_cck + bad_plcp_cck;
572
573 IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
574 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
575
576 iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
577 iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
578 iwl_sensitivity_write(priv);
579
580 return;
581}
582EXPORT_SYMBOL(iwl_sensitivity_calibration);
583
584/*
585 * Accumulate 20 beacons of signal and noise statistics for each of
586 * 3 receivers/antennas/rx-chains, then figure out:
587 * 1) Which antennas are connected.
588 * 2) Differential rx gain settings to balance the 3 receivers.
589 */
590void iwl_chain_noise_calibration(struct iwl_priv *priv,
591 struct iwl4965_notif_statistics *stat_resp)
592{
593 struct iwl_chain_noise_data *data = NULL;
594
595 u32 chain_noise_a;
596 u32 chain_noise_b;
597 u32 chain_noise_c;
598 u32 chain_sig_a;
599 u32 chain_sig_b;
600 u32 chain_sig_c;
601 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
602 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
603 u32 max_average_sig;
604 u16 max_average_sig_antenna_i;
605 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
606 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
607 u16 i = 0;
608 u16 rxon_chnum = INITIALIZATION_VALUE;
609 u16 stat_chnum = INITIALIZATION_VALUE;
610 u8 rxon_band24;
611 u8 stat_band24;
612 u32 active_chains = 0;
613 u8 num_tx_chains;
614 unsigned long flags;
615 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
616
617 if (priv->disable_chain_noise_cal)
618 return;
619
620 data = &(priv->chain_noise_data);
621
622 /* Accumulate just the first 20 beacons after the first association,
623 * then we're done forever. */
624 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
625 if (data->state == IWL_CHAIN_NOISE_ALIVE)
626 IWL_DEBUG_CALIB("Wait for noise calib reset\n");
627 return;
628 }
629
630 spin_lock_irqsave(&priv->lock, flags);
631 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
632 IWL_DEBUG_CALIB(" << Interference data unavailable\n");
633 spin_unlock_irqrestore(&priv->lock, flags);
634 return;
635 }
636
637 rxon_band24 = !!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK);
638 rxon_chnum = le16_to_cpu(priv->staging_rxon.channel);
639 stat_band24 = !!(stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK);
640 stat_chnum = le32_to_cpu(stat_resp->flag) >> 16;
641
642 /* Make sure we accumulate data for just the associated channel
643 * (even if scanning). */
644 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
645 IWL_DEBUG_CALIB("Stats not from chan=%d, band24=%d\n",
646 rxon_chnum, rxon_band24);
647 spin_unlock_irqrestore(&priv->lock, flags);
648 return;
649 }
650
651 /* Accumulate beacon statistics values across 20 beacons */
652 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
653 IN_BAND_FILTER;
654 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
655 IN_BAND_FILTER;
656 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
657 IN_BAND_FILTER;
658
659 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
660 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
661 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
662
663 spin_unlock_irqrestore(&priv->lock, flags);
664
665 data->beacon_count++;
666
667 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
668 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
669 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
670
671 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
672 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
673 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
674
675 IWL_DEBUG_CALIB("chan=%d, band24=%d, beacon=%d\n",
676 rxon_chnum, rxon_band24, data->beacon_count);
677 IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n",
678 chain_sig_a, chain_sig_b, chain_sig_c);
679 IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n",
680 chain_noise_a, chain_noise_b, chain_noise_c);
681
682 /* If this is the 20th beacon, determine:
683 * 1) Disconnected antennas (using signal strengths)
684 * 2) Differential gain (using silence noise) to balance receivers */
685 if (data->beacon_count != CAL_NUM_OF_BEACONS)
686 return;
687
688 /* Analyze signal for disconnected antenna */
689 average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS;
690 average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS;
691 average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS;
692
693 if (average_sig[0] >= average_sig[1]) {
694 max_average_sig = average_sig[0];
695 max_average_sig_antenna_i = 0;
696 active_chains = (1 << max_average_sig_antenna_i);
697 } else {
698 max_average_sig = average_sig[1];
699 max_average_sig_antenna_i = 1;
700 active_chains = (1 << max_average_sig_antenna_i);
701 }
702
703 if (average_sig[2] >= max_average_sig) {
704 max_average_sig = average_sig[2];
705 max_average_sig_antenna_i = 2;
706 active_chains = (1 << max_average_sig_antenna_i);
707 }
708
709 IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n",
710 average_sig[0], average_sig[1], average_sig[2]);
711 IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n",
712 max_average_sig, max_average_sig_antenna_i);
713
714 /* Compare signal strengths for all 3 receivers. */
715 for (i = 0; i < NUM_RX_CHAINS; i++) {
716 if (i != max_average_sig_antenna_i) {
717 s32 rssi_delta = (max_average_sig - average_sig[i]);
718
719 /* If signal is very weak, compared with
720 * strongest, mark it as disconnected. */
721 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
722 data->disconn_array[i] = 1;
723 else
724 active_chains |= (1 << i);
725 IWL_DEBUG_CALIB("i = %d rssiDelta = %d "
726 "disconn_array[i] = %d\n",
727 i, rssi_delta, data->disconn_array[i]);
728 }
729 }
730
731 num_tx_chains = 0;
732 for (i = 0; i < NUM_RX_CHAINS; i++) {
733 /* loops on all the bits of
734 * priv->hw_setting.valid_tx_ant */
735 u8 ant_msk = (1 << i);
736 if (!(priv->hw_params.valid_tx_ant & ant_msk))
737 continue;
738
739 num_tx_chains++;
740 if (data->disconn_array[i] == 0)
741 /* there is a Tx antenna connected */
742 break;
743 if (num_tx_chains == priv->hw_params.tx_chains_num &&
744 data->disconn_array[i]) {
745 /* This is the last TX antenna and is also
746 * disconnected connect it anyway */
747 data->disconn_array[i] = 0;
748 active_chains |= ant_msk;
749 IWL_DEBUG_CALIB("All Tx chains are disconnected W/A - "
750 "declare %d as connected\n", i);
751 break;
752 }
753 }
754
755 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n",
756 active_chains);
757
758 /* Save for use within RXON, TX, SCAN commands, etc. */
759 /*priv->valid_antenna = active_chains;*/
760 /*FIXME: should be reflected in RX chains in RXON */
761
762 /* Analyze noise for rx balance */
763 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
764 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
765 average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS);
766
767 for (i = 0; i < NUM_RX_CHAINS; i++) {
768 if (!(data->disconn_array[i]) &&
769 (average_noise[i] <= min_average_noise)) {
770 /* This means that chain i is active and has
771 * lower noise values so far: */
772 min_average_noise = average_noise[i];
773 min_average_noise_antenna_i = i;
774 }
775 }
776
777 IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n",
778 average_noise[0], average_noise[1],
779 average_noise[2]);
780
781 IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n",
782 min_average_noise, min_average_noise_antenna_i);
783
784 priv->cfg->ops->utils->gain_computation(priv, average_noise,
785 min_average_noise_antenna_i, min_average_noise);
786}
787EXPORT_SYMBOL(iwl_chain_noise_calibration);
788
789
790void iwl_reset_run_time_calib(struct iwl_priv *priv)
791{
792 int i;
793 memset(&(priv->sensitivity_data), 0,
794 sizeof(struct iwl_sensitivity_data));
795 memset(&(priv->chain_noise_data), 0,
796 sizeof(struct iwl_chain_noise_data));
797 for (i = 0; i < NUM_RX_CHAINS; i++)
798 priv->chain_noise_data.delta_gain_code[i] =
799 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
800
801 /* Ask for statistics now, the uCode will send notification
802 * periodically after association */
803 iwl_send_statistics_request(priv, CMD_ASYNC);
804}
805EXPORT_SYMBOL(iwl_reset_run_time_calib);
806
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.h b/drivers/net/wireless/iwlwifi/iwl-calib.h
new file mode 100644
index 000000000000..b8e57c59eac8
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.h
@@ -0,0 +1,109 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#ifndef __iwl_calib_h__
63#define __iwl_calib_h__
64
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/version.h>
68
69#include <net/mac80211.h>
70#include "iwl-eeprom.h"
71#include "iwl-core.h"
72#include "iwl-dev.h"
73
74#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
75void iwl_chain_noise_calibration(struct iwl_priv *priv,
76 struct iwl4965_notif_statistics *stat_resp);
77void iwl_sensitivity_calibration(struct iwl_priv *priv,
78 struct iwl4965_notif_statistics *resp);
79
80void iwl_init_sensitivity(struct iwl_priv *priv);
81void iwl_reset_run_time_calib(struct iwl_priv *priv);
82static inline void iwl_chain_noise_reset(struct iwl_priv *priv)
83{
84
85 if (!priv->disable_chain_noise_cal &&
86 priv->cfg->ops->utils->chain_noise_reset)
87 priv->cfg->ops->utils->chain_noise_reset(priv);
88}
89#else
90static inline void iwl_chain_noise_calibration(struct iwl_priv *priv,
91 struct iwl4965_notif_statistics *stat_resp)
92{
93}
94static inline void iwl_sensitivity_calibration(struct iwl_priv *priv,
95 struct iwl4965_notif_statistics *resp)
96{
97}
98static inline void iwl_init_sensitivity(struct iwl_priv *priv)
99{
100}
101static inline void iwl_chain_noise_reset(struct iwl_priv *priv)
102{
103}
104static inline void iwl_reset_run_time_calib(struct iwl_priv *priv)
105{
106}
107#endif
108
109#endif /* __iwl_calib_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 3bcd107e2d71..fb6f5ffb9f1d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -61,9 +61,9 @@
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-4965-commands.h) only for uCode API definitions. 64 * Please use this file (iwl-commands.h) only for uCode API definitions.
65 * Please use iwl-4965-hw.h for hardware-related definitions. 65 * Please use iwl-4965-hw.h for hardware-related definitions.
66 * Please use iwl-4965.h for driver implementation definitions. 66 * Please use iwl-dev.h for driver implementation definitions.
67 */ 67 */
68 68
69#ifndef __iwl4965_commands_h__ 69#ifndef __iwl4965_commands_h__
@@ -93,6 +93,11 @@ enum {
93 REPLY_LEDS_CMD = 0x48, 93 REPLY_LEDS_CMD = 0x48,
94 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */ 94 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */
95 95
96 /* WiMAX coexistence */
97 COEX_PRIORITY_TABLE_CMD = 0x5a, /*5000 only */
98 COEX_MEDIUM_NOTIFICATION = 0x5b,
99 COEX_EVENT_CMD = 0x5c,
100
96 /* 802.11h related */ 101 /* 802.11h related */
97 RADAR_NOTIFICATION = 0x70, /* not used */ 102 RADAR_NOTIFICATION = 0x70, /* not used */
98 REPLY_QUIET_CMD = 0x71, /* not used */ 103 REPLY_QUIET_CMD = 0x71, /* not used */
@@ -269,10 +274,11 @@ struct iwl_cmd_header {
269 * 10 B active, A inactive 274 * 10 B active, A inactive
270 * 11 Both active 275 * 11 Both active
271 */ 276 */
272#define RATE_MCS_ANT_POS 14 277#define RATE_MCS_ANT_POS 14
273#define RATE_MCS_ANT_A_MSK 0x04000 278#define RATE_MCS_ANT_A_MSK 0x04000
274#define RATE_MCS_ANT_B_MSK 0x08000 279#define RATE_MCS_ANT_B_MSK 0x08000
275#define RATE_MCS_ANT_AB_MSK 0x0C000 280#define RATE_MCS_ANT_C_MSK 0x10000
281#define RATE_MCS_ANT_ABC_MSK 0x1C000
276 282
277 283
278/** 284/**
@@ -367,7 +373,7 @@ struct iwl4965_tx_power_db {
367 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation, 373 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
368 * for each of 5 frequency ranges. 374 * for each of 5 frequency ranges.
369 */ 375 */
370struct iwl4965_init_alive_resp { 376struct iwl_init_alive_resp {
371 u8 ucode_minor; 377 u8 ucode_minor;
372 u8 ucode_major; 378 u8 ucode_major;
373 __le16 reserved1; 379 __le16 reserved1;
@@ -443,7 +449,7 @@ struct iwl4965_init_alive_resp {
443 * The Linux driver can print both logs to the system log when a uCode error 449 * The Linux driver can print both logs to the system log when a uCode error
444 * occurs. 450 * occurs.
445 */ 451 */
446struct iwl4965_alive_resp { 452struct iwl_alive_resp {
447 u8 ucode_minor; 453 u8 ucode_minor;
448 u8 ucode_major; 454 u8 ucode_major;
449 __le16 reserved1; 455 __le16 reserved1;
@@ -467,7 +473,7 @@ union tsf {
467/* 473/*
468 * REPLY_ERROR = 0x2 (response only, not a command) 474 * REPLY_ERROR = 0x2 (response only, not a command)
469 */ 475 */
470struct iwl4965_error_resp { 476struct iwl_error_resp {
471 __le32 error_type; 477 __le32 error_type;
472 u8 cmd_id; 478 u8 cmd_id;
473 u8 reserved1; 479 u8 reserved1;
@@ -599,6 +605,46 @@ struct iwl4965_rxon_cmd {
599 u8 ofdm_ht_dual_stream_basic_rates; 605 u8 ofdm_ht_dual_stream_basic_rates;
600} __attribute__ ((packed)); 606} __attribute__ ((packed));
601 607
608/* 5000 HW just extend this cmmand */
609struct iwl_rxon_cmd {
610 u8 node_addr[6];
611 __le16 reserved1;
612 u8 bssid_addr[6];
613 __le16 reserved2;
614 u8 wlap_bssid_addr[6];
615 __le16 reserved3;
616 u8 dev_type;
617 u8 air_propagation;
618 __le16 rx_chain;
619 u8 ofdm_basic_rates;
620 u8 cck_basic_rates;
621 __le16 assoc_id;
622 __le32 flags;
623 __le32 filter_flags;
624 __le16 channel;
625 u8 ofdm_ht_single_stream_basic_rates;
626 u8 ofdm_ht_dual_stream_basic_rates;
627 u8 ofdm_ht_triple_stream_basic_rates;
628 u8 reserved5;
629 __le16 acquisition_data;
630 __le16 reserved6;
631} __attribute__ ((packed));
632
633struct iwl5000_rxon_assoc_cmd {
634 __le32 flags;
635 __le32 filter_flags;
636 u8 ofdm_basic_rates;
637 u8 cck_basic_rates;
638 __le16 reserved1;
639 u8 ofdm_ht_single_stream_basic_rates;
640 u8 ofdm_ht_dual_stream_basic_rates;
641 u8 ofdm_ht_triple_stream_basic_rates;
642 u8 reserved2;
643 __le16 rx_chain_select_flags;
644 __le16 acquisition_data;
645 __le32 reserved3;
646} __attribute__ ((packed));
647
602/* 648/*
603 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) 649 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
604 */ 650 */
@@ -613,6 +659,9 @@ struct iwl4965_rxon_assoc_cmd {
613 __le16 reserved; 659 __le16 reserved;
614} __attribute__ ((packed)); 660} __attribute__ ((packed));
615 661
662
663
664
616/* 665/*
617 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) 666 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
618 */ 667 */
@@ -711,6 +760,8 @@ struct iwl4965_qosparam_cmd {
711#define IWL_STA_ID 2 760#define IWL_STA_ID 2
712#define IWL4965_BROADCAST_ID 31 761#define IWL4965_BROADCAST_ID 31
713#define IWL4965_STATION_COUNT 32 762#define IWL4965_STATION_COUNT 32
763#define IWL5000_BROADCAST_ID 15
764#define IWL5000_STATION_COUNT 16
714 765
715#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ 766#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
716#define IWL_INVALID_STATION 255 767#define IWL_INVALID_STATION 255
@@ -766,6 +817,20 @@ struct iwl4965_keyinfo {
766 u8 key[16]; /* 16-byte unicast decryption key */ 817 u8 key[16]; /* 16-byte unicast decryption key */
767} __attribute__ ((packed)); 818} __attribute__ ((packed));
768 819
820/* 5000 */
821struct iwl_keyinfo {
822 __le16 key_flags;
823 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
824 u8 reserved1;
825 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
826 u8 key_offset;
827 u8 reserved2;
828 u8 key[16]; /* 16-byte unicast decryption key */
829 __le64 tx_secur_seq_cnt;
830 __le64 hw_tkip_mic_rx_key;
831 __le64 hw_tkip_mic_tx_key;
832} __attribute__ ((packed));
833
769/** 834/**
770 * struct sta_id_modify 835 * struct sta_id_modify
771 * @addr[ETH_ALEN]: station's MAC address 836 * @addr[ETH_ALEN]: station's MAC address
@@ -841,6 +906,38 @@ struct iwl4965_addsta_cmd {
841 __le32 reserved2; 906 __le32 reserved2;
842} __attribute__ ((packed)); 907} __attribute__ ((packed));
843 908
909/* 5000 */
910struct iwl_addsta_cmd {
911 u8 mode; /* 1: modify existing, 0: add new station */
912 u8 reserved[3];
913 struct sta_id_modify sta;
914 struct iwl_keyinfo key;
915 __le32 station_flags; /* STA_FLG_* */
916 __le32 station_flags_msk; /* STA_FLG_* */
917
918 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
919 * corresponding to bit (e.g. bit 5 controls TID 5).
920 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
921 __le16 tid_disable_tx;
922
923 __le16 reserved1;
924
925 /* TID for which to add block-ack support.
926 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
927 u8 add_immediate_ba_tid;
928
929 /* TID for which to remove block-ack support.
930 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
931 u8 remove_immediate_ba_tid;
932
933 /* Starting Sequence Number for added block-ack support.
934 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
935 __le16 add_immediate_ba_ssn;
936
937 __le32 reserved2;
938} __attribute__ ((packed));
939
940
844#define ADD_STA_SUCCESS_MSK 0x1 941#define ADD_STA_SUCCESS_MSK 0x1
845#define ADD_STA_NO_ROOM_IN_TABLE 0x2 942#define ADD_STA_NO_ROOM_IN_TABLE 0x2
846#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4 943#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
@@ -848,10 +945,28 @@ struct iwl4965_addsta_cmd {
848/* 945/*
849 * REPLY_ADD_STA = 0x18 (response) 946 * REPLY_ADD_STA = 0x18 (response)
850 */ 947 */
851struct iwl4965_add_sta_resp { 948struct iwl_add_sta_resp {
852 u8 status; /* ADD_STA_* */ 949 u8 status; /* ADD_STA_* */
853} __attribute__ ((packed)); 950} __attribute__ ((packed));
854 951
952#define REM_STA_SUCCESS_MSK 0x1
953/*
954 * REPLY_REM_STA = 0x19 (response)
955 */
956struct iwl_rem_sta_resp {
957 u8 status;
958} __attribute__ ((packed));
959
960/*
961 * REPLY_REM_STA = 0x19 (command)
962 */
963struct iwl_rem_sta_cmd {
964 u8 num_sta; /* number of removed stations */
965 u8 reserved[3];
966 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
967 u8 reserved2[2];
968} __attribute__ ((packed));
969
855/* 970/*
856 * REPLY_WEP_KEY = 0x20 971 * REPLY_WEP_KEY = 0x20
857 */ 972 */
@@ -1100,6 +1215,14 @@ struct iwl4965_rx_mpdu_res_start {
1100#define TX_CMD_SEC_KEY128 0x08 1215#define TX_CMD_SEC_KEY128 0x08
1101 1216
1102/* 1217/*
1218 * security overhead sizes
1219 */
1220#define WEP_IV_LEN 4
1221#define WEP_ICV_LEN 4
1222#define CCMP_MIC_LEN 8
1223#define TKIP_ICV_LEN 4
1224
1225/*
1103 * 4965 uCode updates these Tx attempt count values in host DRAM. 1226 * 4965 uCode updates these Tx attempt count values in host DRAM.
1104 * Used for managing Tx retries when expecting block-acks. 1227 * Used for managing Tx retries when expecting block-acks.
1105 * Driver should set these fields to 0. 1228 * Driver should set these fields to 0.
@@ -1113,7 +1236,7 @@ struct iwl4965_dram_scratch {
1113/* 1236/*
1114 * REPLY_TX = 0x1c (command) 1237 * REPLY_TX = 0x1c (command)
1115 */ 1238 */
1116struct iwl4965_tx_cmd { 1239struct iwl_tx_cmd {
1117 /* 1240 /*
1118 * MPDU byte count: 1241 * MPDU byte count:
1119 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, 1242 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1259,6 +1382,15 @@ enum {
1259 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ 1382 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1260}; 1383};
1261 1384
1385static inline int iwl_is_tx_success(u32 status)
1386{
1387 status &= TX_STATUS_MSK;
1388 return (status == TX_STATUS_SUCCESS)
1389 || (status == TX_STATUS_DIRECT_DONE);
1390}
1391
1392
1393
1262/* ******************************* 1394/* *******************************
1263 * TX aggregation status 1395 * TX aggregation status
1264 ******************************* */ 1396 ******************************* */
@@ -1313,6 +1445,11 @@ enum {
1313 * within the sending station (this 4965), rather than whether it was 1445 * within the sending station (this 4965), rather than whether it was
1314 * received successfully by the destination station. 1446 * received successfully by the destination station.
1315 */ 1447 */
1448struct agg_tx_status {
1449 __le16 status;
1450 __le16 sequence;
1451} __attribute__ ((packed));
1452
1316struct iwl4965_tx_resp { 1453struct iwl4965_tx_resp {
1317 u8 frame_count; /* 1 no aggregation, >1 aggregation */ 1454 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1318 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ 1455 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
@@ -1347,11 +1484,6 @@ struct iwl4965_tx_resp {
1347 __le32 status; /* TX status (for aggregation status of 1st frame) */ 1484 __le32 status; /* TX status (for aggregation status of 1st frame) */
1348} __attribute__ ((packed)); 1485} __attribute__ ((packed));
1349 1486
1350struct agg_tx_status {
1351 __le16 status;
1352 __le16 sequence;
1353} __attribute__ ((packed));
1354
1355struct iwl4965_tx_resp_agg { 1487struct iwl4965_tx_resp_agg {
1356 u8 frame_count; /* 1 no aggregation, >1 aggregation */ 1488 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1357 u8 reserved1; 1489 u8 reserved1;
@@ -1366,6 +1498,44 @@ struct iwl4965_tx_resp_agg {
1366 /* of 1st frame) */ 1498 /* of 1st frame) */
1367} __attribute__ ((packed)); 1499} __attribute__ ((packed));
1368 1500
1501struct iwl5000_tx_resp {
1502 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1503 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1504 u8 failure_rts; /* # failures due to unsuccessful RTS */
1505 u8 failure_frame; /* # failures due to no ACK (unused for agg) */
1506
1507 /* For non-agg: Rate at which frame was successful.
1508 * For agg: Rate at which all frames were transmitted. */
1509 __le32 rate_n_flags; /* RATE_MCS_* */
1510
1511 /* For non-agg: RTS + CTS + frame tx attempts time + ACK.
1512 * For agg: RTS + CTS + aggregation tx time + block-ack time. */
1513 __le16 wireless_media_time; /* uSecs */
1514
1515 __le16 reserved;
1516 __le32 pa_power1; /* RF power amplifier measurement (not used) */
1517 __le32 pa_power2;
1518
1519 __le32 tfd_info;
1520 __le16 seq_ctl;
1521 __le16 byte_cnt;
1522 __le32 tlc_info;
1523 /*
1524 * For non-agg: frame status TX_STATUS_*
1525 * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
1526 * fields follow this one, up to frame_count.
1527 * Bit fields:
1528 * 11- 0: AGG_TX_STATE_* status code
1529 * 15-12: Retry count for 1st frame in aggregation (retries
1530 * occur if tx failed for this frame when it was a
1531 * member of a previous aggregation block). If rate
1532 * scaling is used, retry count indicates the rate
1533 * table entry used for all frames in the new agg.
1534 * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
1535 */
1536 struct agg_tx_status status; /* TX status (in aggregation -
1537 * status of 1st frame) */
1538} __attribute__ ((packed));
1369/* 1539/*
1370 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) 1540 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
1371 * 1541 *
@@ -1853,6 +2023,7 @@ struct iwl4965_spectrum_notification {
1853#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK __constant_cpu_to_le16(1 << 0) 2023#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK __constant_cpu_to_le16(1 << 0)
1854#define IWL_POWER_SLEEP_OVER_DTIM_MSK __constant_cpu_to_le16(1 << 2) 2024#define IWL_POWER_SLEEP_OVER_DTIM_MSK __constant_cpu_to_le16(1 << 2)
1855#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le16(1 << 3) 2025#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le16(1 << 3)
2026#define IWL_POWER_FAST_PD __constant_cpu_to_le16(1 << 4)
1856 2027
1857struct iwl4965_powertable_cmd { 2028struct iwl4965_powertable_cmd {
1858 __le16 flags; 2029 __le16 flags;
@@ -2051,7 +2222,7 @@ struct iwl4965_scan_cmd {
2051 2222
2052 /* For active scans (set to all-0s for passive scans). 2223 /* For active scans (set to all-0s for passive scans).
2053 * Does not include payload. Must specify Tx rate; no rate scaling. */ 2224 * Does not include payload. Must specify Tx rate; no rate scaling. */
2054 struct iwl4965_tx_cmd tx_cmd; 2225 struct iwl_tx_cmd tx_cmd;
2055 2226
2056 /* For directed active scans (set to all-0s otherwise) */ 2227 /* For directed active scans (set to all-0s otherwise) */
2057 struct iwl4965_ssid_ie direct_scan[PROBE_OPTION_MAX]; 2228 struct iwl4965_ssid_ie direct_scan[PROBE_OPTION_MAX];
@@ -2148,7 +2319,7 @@ struct iwl4965_beacon_notif {
2148 * REPLY_TX_BEACON = 0x91 (command, has simple generic response) 2319 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
2149 */ 2320 */
2150struct iwl4965_tx_beacon_cmd { 2321struct iwl4965_tx_beacon_cmd {
2151 struct iwl4965_tx_cmd tx; 2322 struct iwl_tx_cmd tx;
2152 __le16 tim_idx; 2323 __le16 tim_idx;
2153 u8 tim_size; 2324 u8 tim_size;
2154 u8 reserved1; 2325 u8 reserved1;
@@ -2559,7 +2730,7 @@ struct iwl4965_missed_beacon_notif {
2559 */ 2730 */
2560 2731
2561/* 2732/*
2562 * Table entries in SENSITIVITY_CMD (struct iwl4965_sensitivity_cmd) 2733 * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
2563 */ 2734 */
2564#define HD_TABLE_SIZE (11) /* number of entries */ 2735#define HD_TABLE_SIZE (11) /* number of entries */
2565#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */ 2736#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
@@ -2574,18 +2745,18 @@ struct iwl4965_missed_beacon_notif {
2574#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) 2745#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
2575#define HD_OFDM_ENERGY_TH_IN_INDEX (10) 2746#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
2576 2747
2577/* Control field in struct iwl4965_sensitivity_cmd */ 2748/* Control field in struct iwl_sensitivity_cmd */
2578#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE __constant_cpu_to_le16(0) 2749#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE __constant_cpu_to_le16(0)
2579#define SENSITIVITY_CMD_CONTROL_WORK_TABLE __constant_cpu_to_le16(1) 2750#define SENSITIVITY_CMD_CONTROL_WORK_TABLE __constant_cpu_to_le16(1)
2580 2751
2581/** 2752/**
2582 * struct iwl4965_sensitivity_cmd 2753 * struct iwl_sensitivity_cmd
2583 * @control: (1) updates working table, (0) updates default table 2754 * @control: (1) updates working table, (0) updates default table
2584 * @table: energy threshold values, use HD_* as index into table 2755 * @table: energy threshold values, use HD_* as index into table
2585 * 2756 *
2586 * Always use "1" in "control" to update uCode's working table and DSP. 2757 * Always use "1" in "control" to update uCode's working table and DSP.
2587 */ 2758 */
2588struct iwl4965_sensitivity_cmd { 2759struct iwl_sensitivity_cmd {
2589 __le16 control; /* always use "1" */ 2760 __le16 control; /* always use "1" */
2590 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ 2761 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
2591} __attribute__ ((packed)); 2762} __attribute__ ((packed));
@@ -2659,6 +2830,86 @@ struct iwl4965_calibration_cmd {
2659 u8 reserved1; 2830 u8 reserved1;
2660} __attribute__ ((packed)); 2831} __attribute__ ((packed));
2661 2832
2833/* Phy calibration command for 5000 series */
2834
2835enum {
2836 IWL5000_PHY_CALIBRATE_DC_CMD = 8,
2837 IWL5000_PHY_CALIBRATE_LO_CMD = 9,
2838 IWL5000_PHY_CALIBRATE_RX_BB_CMD = 10,
2839 IWL5000_PHY_CALIBRATE_TX_IQ_CMD = 11,
2840 IWL5000_PHY_CALIBRATE_RX_IQ_CMD = 12,
2841 IWL5000_PHY_CALIBRATION_NOISE_CMD = 13,
2842 IWL5000_PHY_CALIBRATE_AGC_TABLE_CMD = 14,
2843 IWL5000_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
2844 IWL5000_PHY_CALIBRATE_BASE_BAND_CMD = 16,
2845 IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,
2846 IWL5000_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD = 18,
2847 IWL5000_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD = 19,
2848};
2849
2850enum {
2851 CALIBRATION_CFG_CMD = 0x65,
2852 CALIBRATION_RES_NOTIFICATION = 0x66,
2853 CALIBRATION_COMPLETE_NOTIFICATION = 0x67
2854};
2855
2856struct iwl_cal_crystal_freq_cmd {
2857 u8 cap_pin1;
2858 u8 cap_pin2;
2859} __attribute__ ((packed));
2860
2861struct iwl5000_calibration {
2862 u8 op_code;
2863 u8 first_group;
2864 u8 num_groups;
2865 u8 all_data_valid;
2866 struct iwl_cal_crystal_freq_cmd data;
2867} __attribute__ ((packed));
2868
2869#define IWL_CALIB_INIT_CFG_ALL __constant_cpu_to_le32(0xffffffff)
2870
2871struct iwl_calib_cfg_elmnt_s {
2872 __le32 is_enable;
2873 __le32 start;
2874 __le32 send_res;
2875 __le32 apply_res;
2876 __le32 reserved;
2877} __attribute__ ((packed));
2878
2879struct iwl_calib_cfg_status_s {
2880 struct iwl_calib_cfg_elmnt_s once;
2881 struct iwl_calib_cfg_elmnt_s perd;
2882 __le32 flags;
2883} __attribute__ ((packed));
2884
2885struct iwl5000_calib_cfg_cmd {
2886 struct iwl_calib_cfg_status_s ucd_calib_cfg;
2887 struct iwl_calib_cfg_status_s drv_calib_cfg;
2888 __le32 reserved1;
2889} __attribute__ ((packed));
2890
2891struct iwl5000_calib_hdr {
2892 u8 op_code;
2893 u8 first_group;
2894 u8 groups_num;
2895 u8 data_valid;
2896} __attribute__ ((packed));
2897
2898struct iwl5000_calibration_chain_noise_reset_cmd {
2899 u8 op_code; /* IWL5000_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
2900 u8 flags; /* not used */
2901 __le16 reserved;
2902} __attribute__ ((packed));
2903
2904struct iwl5000_calibration_chain_noise_gain_cmd {
2905 u8 op_code; /* IWL5000_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD */
2906 u8 flags; /* not used */
2907 __le16 reserved;
2908 u8 delta_gain_1;
2909 u8 delta_gain_2;
2910 __le16 reserved1;
2911} __attribute__ ((packed));
2912
2662/****************************************************************************** 2913/******************************************************************************
2663 * (12) 2914 * (12)
2664 * Miscellaneous Commands: 2915 * Miscellaneous Commands:
@@ -2682,30 +2933,81 @@ struct iwl4965_led_cmd {
2682 u8 reserved; 2933 u8 reserved;
2683} __attribute__ ((packed)); 2934} __attribute__ ((packed));
2684 2935
2936/*
2937 * Coexistence WIFI/WIMAX Command
2938 * COEX_PRIORITY_TABLE_CMD = 0x5a
2939 *
2940 */
2941enum {
2942 COEX_UNASSOC_IDLE = 0,
2943 COEX_UNASSOC_MANUAL_SCAN = 1,
2944 COEX_UNASSOC_AUTO_SCAN = 2,
2945 COEX_CALIBRATION = 3,
2946 COEX_PERIODIC_CALIBRATION = 4,
2947 COEX_CONNECTION_ESTAB = 5,
2948 COEX_ASSOCIATED_IDLE = 6,
2949 COEX_ASSOC_MANUAL_SCAN = 7,
2950 COEX_ASSOC_AUTO_SCAN = 8,
2951 COEX_ASSOC_ACTIVE_LEVEL = 9,
2952 COEX_RF_ON = 10,
2953 COEX_RF_OFF = 11,
2954 COEX_STAND_ALONE_DEBUG = 12,
2955 COEX_IPAN_ASSOC_LEVEL = 13,
2956 COEX_RSRVD1 = 14,
2957 COEX_RSRVD2 = 15,
2958 COEX_NUM_OF_EVENTS = 16
2959};
2960
2961struct iwl_wimax_coex_event_entry {
2962 u8 request_prio;
2963 u8 win_medium_prio;
2964 u8 reserved;
2965 u8 flags;
2966} __attribute__ ((packed));
2967
2968/* COEX flag masks */
2969
2970/* Staion table is valid */
2971#define COEX_FLAGS_STA_TABLE_VALID_MSK (0x1)
2972/* UnMask wakeup src at unassociated sleep */
2973#define COEX_FLAGS_UNASSOC_WA_UNMASK_MSK (0x4)
2974/* UnMask wakeup src at associated sleep */
2975#define COEX_FLAGS_ASSOC_WA_UNMASK_MSK (0x8)
2976/* Enable CoEx feature. */
2977#define COEX_FLAGS_COEX_ENABLE_MSK (0x80)
2978
2979struct iwl_wimax_coex_cmd {
2980 u8 flags;
2981 u8 reserved[3];
2982 struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS];
2983} __attribute__ ((packed));
2984
2685/****************************************************************************** 2985/******************************************************************************
2686 * (13) 2986 * (13)
2687 * Union of all expected notifications/responses: 2987 * Union of all expected notifications/responses:
2688 * 2988 *
2689 *****************************************************************************/ 2989 *****************************************************************************/
2690 2990
2691struct iwl4965_rx_packet { 2991struct iwl_rx_packet {
2692 __le32 len; 2992 __le32 len;
2693 struct iwl_cmd_header hdr; 2993 struct iwl_cmd_header hdr;
2694 union { 2994 union {
2695 struct iwl4965_alive_resp alive_frame; 2995 struct iwl_alive_resp alive_frame;
2696 struct iwl4965_rx_frame rx_frame; 2996 struct iwl4965_rx_frame rx_frame;
2697 struct iwl4965_tx_resp tx_resp; 2997 struct iwl4965_tx_resp tx_resp;
2698 struct iwl4965_spectrum_notification spectrum_notif; 2998 struct iwl4965_spectrum_notification spectrum_notif;
2699 struct iwl4965_csa_notification csa_notif; 2999 struct iwl4965_csa_notification csa_notif;
2700 struct iwl4965_error_resp err_resp; 3000 struct iwl_error_resp err_resp;
2701 struct iwl4965_card_state_notif card_state_notif; 3001 struct iwl4965_card_state_notif card_state_notif;
2702 struct iwl4965_beacon_notif beacon_status; 3002 struct iwl4965_beacon_notif beacon_status;
2703 struct iwl4965_add_sta_resp add_sta; 3003 struct iwl_add_sta_resp add_sta;
3004 struct iwl_rem_sta_resp rem_sta;
2704 struct iwl4965_sleep_notification sleep_notif; 3005 struct iwl4965_sleep_notification sleep_notif;
2705 struct iwl4965_spectrum_resp spectrum; 3006 struct iwl4965_spectrum_resp spectrum;
2706 struct iwl4965_notif_statistics stats; 3007 struct iwl4965_notif_statistics stats;
2707 struct iwl4965_compressed_ba_resp compressed_ba; 3008 struct iwl4965_compressed_ba_resp compressed_ba;
2708 struct iwl4965_missed_beacon_notif missed_beacon; 3009 struct iwl4965_missed_beacon_notif missed_beacon;
3010 struct iwl5000_calibration calib;
2709 __le32 status; 3011 __le32 status;
2710 u8 raw[0]; 3012 u8 raw[0];
2711 } u; 3013 } u;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 2dfd982d7d1f..61716ba90427 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -34,9 +34,11 @@
34struct iwl_priv; /* FIXME: remove */ 34struct iwl_priv; /* FIXME: remove */
35#include "iwl-debug.h" 35#include "iwl-debug.h"
36#include "iwl-eeprom.h" 36#include "iwl-eeprom.h"
37#include "iwl-4965.h" /* FIXME: remove */ 37#include "iwl-dev.h" /* FIXME: remove */
38#include "iwl-core.h" 38#include "iwl-core.h"
39#include "iwl-io.h"
39#include "iwl-rfkill.h" 40#include "iwl-rfkill.h"
41#include "iwl-power.h"
40 42
41 43
42MODULE_DESCRIPTION("iwl core"); 44MODULE_DESCRIPTION("iwl core");
@@ -44,10 +46,49 @@ MODULE_VERSION(IWLWIFI_VERSION);
44MODULE_AUTHOR(DRV_COPYRIGHT); 46MODULE_AUTHOR(DRV_COPYRIGHT);
45MODULE_LICENSE("GPL"); 47MODULE_LICENSE("GPL");
46 48
47#ifdef CONFIG_IWLWIFI_DEBUG 49#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
48u32 iwl_debug_level; 50 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
49EXPORT_SYMBOL(iwl_debug_level); 51 IWL_RATE_SISO_##s##M_PLCP, \
50#endif 52 IWL_RATE_MIMO2_##s##M_PLCP,\
53 IWL_RATE_MIMO3_##s##M_PLCP,\
54 IWL_RATE_##r##M_IEEE, \
55 IWL_RATE_##ip##M_INDEX, \
56 IWL_RATE_##in##M_INDEX, \
57 IWL_RATE_##rp##M_INDEX, \
58 IWL_RATE_##rn##M_INDEX, \
59 IWL_RATE_##pp##M_INDEX, \
60 IWL_RATE_##np##M_INDEX }
61
62/*
63 * Parameter order:
64 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
65 *
66 * If there isn't a valid next or previous rate then INV is used which
67 * maps to IWL_RATE_INVALID
68 *
69 */
70const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
71 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
72 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
73 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
74 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
75 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
76 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
77 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
78 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
79 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
80 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
81 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
82 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
83 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
84 /* FIXME:RS: ^^ should be INV (legacy) */
85};
86EXPORT_SYMBOL(iwl_rates);
87
88
89const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
90EXPORT_SYMBOL(iwl_bcast_addr);
91
51 92
52/* This function both allocates and initializes hw and priv. */ 93/* This function both allocates and initializes hw and priv. */
53struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 94struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
@@ -72,6 +113,108 @@ out:
72} 113}
73EXPORT_SYMBOL(iwl_alloc_all); 114EXPORT_SYMBOL(iwl_alloc_all);
74 115
116void iwl_hw_detect(struct iwl_priv *priv)
117{
118 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
119 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
120 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
121}
122EXPORT_SYMBOL(iwl_hw_detect);
123
124/* Tell nic where to find the "keep warm" buffer */
125int iwl_kw_init(struct iwl_priv *priv)
126{
127 unsigned long flags;
128 int ret;
129
130 spin_lock_irqsave(&priv->lock, flags);
131 ret = iwl_grab_nic_access(priv);
132 if (ret)
133 goto out;
134
135 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG,
136 priv->kw.dma_addr >> 4);
137 iwl_release_nic_access(priv);
138out:
139 spin_unlock_irqrestore(&priv->lock, flags);
140 return ret;
141}
142
143int iwl_kw_alloc(struct iwl_priv *priv)
144{
145 struct pci_dev *dev = priv->pci_dev;
146 struct iwl_kw *kw = &priv->kw;
147
148 kw->size = IWL_KW_SIZE;
149 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
150 if (!kw->v_addr)
151 return -ENOMEM;
152
153 return 0;
154}
155
156/**
157 * iwl_kw_free - Free the "keep warm" buffer
158 */
159void iwl_kw_free(struct iwl_priv *priv)
160{
161 struct pci_dev *dev = priv->pci_dev;
162 struct iwl_kw *kw = &priv->kw;
163
164 if (kw->v_addr) {
165 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
166 memset(kw, 0, sizeof(*kw));
167 }
168}
169
170int iwl_hw_nic_init(struct iwl_priv *priv)
171{
172 unsigned long flags;
173 struct iwl_rx_queue *rxq = &priv->rxq;
174 int ret;
175
176 /* nic_init */
177 spin_lock_irqsave(&priv->lock, flags);
178 priv->cfg->ops->lib->apm_ops.init(priv);
179 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32);
180 spin_unlock_irqrestore(&priv->lock, flags);
181
182 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
183
184 priv->cfg->ops->lib->apm_ops.config(priv);
185
186 /* Allocate the RX queue, or reset if it is already allocated */
187 if (!rxq->bd) {
188 ret = iwl_rx_queue_alloc(priv);
189 if (ret) {
190 IWL_ERROR("Unable to initialize Rx queue\n");
191 return -ENOMEM;
192 }
193 } else
194 iwl_rx_queue_reset(priv, rxq);
195
196 iwl_rx_replenish(priv);
197
198 iwl_rx_init(priv, rxq);
199
200 spin_lock_irqsave(&priv->lock, flags);
201
202 rxq->need_update = 1;
203 iwl_rx_queue_update_write_ptr(priv, rxq);
204
205 spin_unlock_irqrestore(&priv->lock, flags);
206
207 /* Allocate and init all Tx and Command queues */
208 ret = iwl_txq_ctx_reset(priv);
209 if (ret)
210 return ret;
211
212 set_bit(STATUS_INIT, &priv->status);
213
214 return 0;
215}
216EXPORT_SYMBOL(iwl_hw_nic_init);
217
75/** 218/**
76 * iwlcore_clear_stations_table - Clear the driver's station table 219 * iwlcore_clear_stations_table - Clear the driver's station table
77 * 220 *
@@ -90,7 +233,7 @@ void iwlcore_clear_stations_table(struct iwl_priv *priv)
90} 233}
91EXPORT_SYMBOL(iwlcore_clear_stations_table); 234EXPORT_SYMBOL(iwlcore_clear_stations_table);
92 235
93void iwlcore_reset_qos(struct iwl_priv *priv) 236void iwl_reset_qos(struct iwl_priv *priv)
94{ 237{
95 u16 cw_min = 15; 238 u16 cw_min = 15;
96 u16 cw_max = 1023; 239 u16 cw_max = 1023;
@@ -176,7 +319,427 @@ void iwlcore_reset_qos(struct iwl_priv *priv)
176 319
177 spin_unlock_irqrestore(&priv->lock, flags); 320 spin_unlock_irqrestore(&priv->lock, flags);
178} 321}
179EXPORT_SYMBOL(iwlcore_reset_qos); 322EXPORT_SYMBOL(iwl_reset_qos);
323
324#ifdef CONFIG_IWL4965_HT
325#define MAX_BIT_RATE_40_MHZ 0x96; /* 150 Mbps */
326#define MAX_BIT_RATE_20_MHZ 0x48; /* 72 Mbps */
327static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
328 struct ieee80211_ht_info *ht_info,
329 enum ieee80211_band band)
330{
331 u16 max_bit_rate = 0;
332 u8 rx_chains_num = priv->hw_params.rx_chains_num;
333 u8 tx_chains_num = priv->hw_params.tx_chains_num;
334
335 ht_info->cap = 0;
336 memset(ht_info->supp_mcs_set, 0, 16);
337
338 ht_info->ht_supported = 1;
339
340 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
341 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
342 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
343 (IWL_MIMO_PS_NONE << 2));
344
345 max_bit_rate = MAX_BIT_RATE_20_MHZ;
346 if (priv->hw_params.fat_channel & BIT(band)) {
347 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH;
348 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40;
349 ht_info->supp_mcs_set[4] = 0x01;
350 max_bit_rate = MAX_BIT_RATE_40_MHZ;
351 }
352
353 if (priv->cfg->mod_params->amsdu_size_8K)
354 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU;
355
356 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
357 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
358
359 ht_info->supp_mcs_set[0] = 0xFF;
360 if (rx_chains_num >= 2)
361 ht_info->supp_mcs_set[1] = 0xFF;
362 if (rx_chains_num >= 3)
363 ht_info->supp_mcs_set[2] = 0xFF;
364
365 /* Highest supported Rx data rate */
366 max_bit_rate *= rx_chains_num;
367 ht_info->supp_mcs_set[10] = (u8)(max_bit_rate & 0x00FF);
368 ht_info->supp_mcs_set[11] = (u8)((max_bit_rate & 0xFF00) >> 8);
369
370 /* Tx MCS capabilities */
371 ht_info->supp_mcs_set[12] = IEEE80211_HT_CAP_MCS_TX_DEFINED;
372 if (tx_chains_num != rx_chains_num) {
373 ht_info->supp_mcs_set[12] |= IEEE80211_HT_CAP_MCS_TX_RX_DIFF;
374 ht_info->supp_mcs_set[12] |= ((tx_chains_num - 1) << 2);
375 }
376}
377#else
378static inline void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
379 struct ieee80211_ht_info *ht_info,
380 enum ieee80211_band band)
381{
382}
383#endif /* CONFIG_IWL4965_HT */
384
385static void iwlcore_init_hw_rates(struct iwl_priv *priv,
386 struct ieee80211_rate *rates)
387{
388 int i;
389
390 for (i = 0; i < IWL_RATE_COUNT; i++) {
391 rates[i].bitrate = iwl_rates[i].ieee * 5;
392 rates[i].hw_value = i; /* Rate scaling will work on indexes */
393 rates[i].hw_value_short = i;
394 rates[i].flags = 0;
395 if ((i > IWL_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
396 /*
397 * If CCK != 1M then set short preamble rate flag.
398 */
399 rates[i].flags |=
400 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
401 0 : IEEE80211_RATE_SHORT_PREAMBLE;
402 }
403 }
404}
405
406/**
407 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
408 */
409static int iwlcore_init_geos(struct iwl_priv *priv)
410{
411 struct iwl_channel_info *ch;
412 struct ieee80211_supported_band *sband;
413 struct ieee80211_channel *channels;
414 struct ieee80211_channel *geo_ch;
415 struct ieee80211_rate *rates;
416 int i = 0;
417
418 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
419 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
420 IWL_DEBUG_INFO("Geography modes already initialized.\n");
421 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
422 return 0;
423 }
424
425 channels = kzalloc(sizeof(struct ieee80211_channel) *
426 priv->channel_count, GFP_KERNEL);
427 if (!channels)
428 return -ENOMEM;
429
430 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
431 GFP_KERNEL);
432 if (!rates) {
433 kfree(channels);
434 return -ENOMEM;
435 }
436
437 /* 5.2GHz channels start after the 2.4GHz channels */
438 sband = &priv->bands[IEEE80211_BAND_5GHZ];
439 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
440 /* just OFDM */
441 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
442 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
443
444 iwlcore_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_5GHZ);
445
446 sband = &priv->bands[IEEE80211_BAND_2GHZ];
447 sband->channels = channels;
448 /* OFDM & CCK */
449 sband->bitrates = rates;
450 sband->n_bitrates = IWL_RATE_COUNT;
451
452 iwlcore_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_2GHZ);
453
454 priv->ieee_channels = channels;
455 priv->ieee_rates = rates;
456
457 iwlcore_init_hw_rates(priv, rates);
458
459 for (i = 0; i < priv->channel_count; i++) {
460 ch = &priv->channel_info[i];
461
462 /* FIXME: might be removed if scan is OK */
463 if (!is_channel_valid(ch))
464 continue;
465
466 if (is_channel_a_band(ch))
467 sband = &priv->bands[IEEE80211_BAND_5GHZ];
468 else
469 sband = &priv->bands[IEEE80211_BAND_2GHZ];
470
471 geo_ch = &sband->channels[sband->n_channels++];
472
473 geo_ch->center_freq =
474 ieee80211_channel_to_frequency(ch->channel);
475 geo_ch->max_power = ch->max_power_avg;
476 geo_ch->max_antenna_gain = 0xff;
477 geo_ch->hw_value = ch->channel;
478
479 if (is_channel_valid(ch)) {
480 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
481 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
482
483 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
484 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
485
486 if (ch->flags & EEPROM_CHANNEL_RADAR)
487 geo_ch->flags |= IEEE80211_CHAN_RADAR;
488
489 switch (ch->fat_extension_channel) {
490 case HT_IE_EXT_CHANNEL_ABOVE:
491 /* only above is allowed, disable below */
492 geo_ch->flags |= IEEE80211_CHAN_NO_FAT_BELOW;
493 break;
494 case HT_IE_EXT_CHANNEL_BELOW:
495 /* only below is allowed, disable above */
496 geo_ch->flags |= IEEE80211_CHAN_NO_FAT_ABOVE;
497 break;
498 case HT_IE_EXT_CHANNEL_NONE:
499 /* fat not allowed: disable both*/
500 geo_ch->flags |= (IEEE80211_CHAN_NO_FAT_ABOVE |
501 IEEE80211_CHAN_NO_FAT_BELOW);
502 break;
503 case HT_IE_EXT_CHANNEL_MAX:
504 /* both above and below are permitted */
505 break;
506 }
507
508 if (ch->max_power_avg > priv->max_channel_txpower_limit)
509 priv->max_channel_txpower_limit =
510 ch->max_power_avg;
511 } else {
512 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
513 }
514
515 /* Save flags for reg domain usage */
516 geo_ch->orig_flags = geo_ch->flags;
517
518 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n",
519 ch->channel, geo_ch->center_freq,
520 is_channel_a_band(ch) ? "5.2" : "2.4",
521 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
522 "restricted" : "valid",
523 geo_ch->flags);
524 }
525
526 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
527 priv->cfg->sku & IWL_SKU_A) {
528 printk(KERN_INFO DRV_NAME
529 ": Incorrectly detected BG card as ABG. Please send "
530 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
531 priv->pci_dev->device, priv->pci_dev->subsystem_device);
532 priv->cfg->sku &= ~IWL_SKU_A;
533 }
534
535 printk(KERN_INFO DRV_NAME
536 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
537 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
538 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
539
540
541 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
542
543 return 0;
544}
545
546/*
547 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
548 */
549static void iwlcore_free_geos(struct iwl_priv *priv)
550{
551 kfree(priv->ieee_channels);
552 kfree(priv->ieee_rates);
553 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
554}
555
556#ifdef CONFIG_IWL4965_HT
557static u8 is_single_rx_stream(struct iwl_priv *priv)
558{
559 return !priv->current_ht_config.is_ht ||
560 ((priv->current_ht_config.supp_mcs_set[1] == 0) &&
561 (priv->current_ht_config.supp_mcs_set[2] == 0)) ||
562 priv->ps_mode == IWL_MIMO_PS_STATIC;
563}
564static u8 iwl_is_channel_extension(struct iwl_priv *priv,
565 enum ieee80211_band band,
566 u16 channel, u8 extension_chan_offset)
567{
568 const struct iwl_channel_info *ch_info;
569
570 ch_info = iwl_get_channel_info(priv, band, channel);
571 if (!is_channel_valid(ch_info))
572 return 0;
573
574 if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE)
575 return 0;
576
577 if ((ch_info->fat_extension_channel == extension_chan_offset) ||
578 (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX))
579 return 1;
580
581 return 0;
582}
583
584u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
585 struct ieee80211_ht_info *sta_ht_inf)
586{
587 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
588
589 if ((!iwl_ht_conf->is_ht) ||
590 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
591 (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE))
592 return 0;
593
594 if (sta_ht_inf) {
595 if ((!sta_ht_inf->ht_supported) ||
596 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH)))
597 return 0;
598 }
599
600 return iwl_is_channel_extension(priv, priv->band,
601 iwl_ht_conf->control_channel,
602 iwl_ht_conf->extension_chan_offset);
603}
604EXPORT_SYMBOL(iwl_is_fat_tx_allowed);
605
606void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
607{
608 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
609 u32 val;
610
611 if (!ht_info->is_ht)
612 return;
613
614 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
615 if (iwl_is_fat_tx_allowed(priv, NULL))
616 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
617 else
618 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
619 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
620
621 if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
622 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
623 le16_to_cpu(rxon->channel),
624 ht_info->control_channel);
625 rxon->channel = cpu_to_le16(ht_info->control_channel);
626 return;
627 }
628
629 /* Note: control channel is opposite of extension channel */
630 switch (ht_info->extension_chan_offset) {
631 case IWL_EXT_CHANNEL_OFFSET_ABOVE:
632 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
633 break;
634 case IWL_EXT_CHANNEL_OFFSET_BELOW:
635 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
636 break;
637 case IWL_EXT_CHANNEL_OFFSET_NONE:
638 default:
639 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
640 break;
641 }
642
643 val = ht_info->ht_protection;
644
645 rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
646
647 iwl_set_rxon_chain(priv);
648
649 IWL_DEBUG_ASSOC("supported HT rate 0x%X 0x%X 0x%X "
650 "rxon flags 0x%X operation mode :0x%X "
651 "extension channel offset 0x%x "
652 "control chan %d\n",
653 ht_info->supp_mcs_set[0],
654 ht_info->supp_mcs_set[1],
655 ht_info->supp_mcs_set[2],
656 le32_to_cpu(rxon->flags), ht_info->ht_protection,
657 ht_info->extension_chan_offset,
658 ht_info->control_channel);
659 return;
660}
661EXPORT_SYMBOL(iwl_set_rxon_ht);
662
663#else
664static inline u8 is_single_rx_stream(struct iwl_priv *priv)
665{
666 return 1;
667}
668#endif /*CONFIG_IWL4965_HT */
669
670/*
671 * Determine how many receiver/antenna chains to use.
672 * More provides better reception via diversity. Fewer saves power.
673 * MIMO (dual stream) requires at least 2, but works better with 3.
674 * This does not determine *which* chains to use, just how many.
675 */
676static int iwlcore_get_rx_chain_counter(struct iwl_priv *priv,
677 u8 *idle_state, u8 *rx_state)
678{
679 u8 is_single = is_single_rx_stream(priv);
680 u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1;
681
682 /* # of Rx chains to use when expecting MIMO. */
683 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
684 *rx_state = 2;
685 else
686 *rx_state = 3;
687
688 /* # Rx chains when idling and maybe trying to save power */
689 switch (priv->ps_mode) {
690 case IWL_MIMO_PS_STATIC:
691 case IWL_MIMO_PS_DYNAMIC:
692 *idle_state = (is_cam) ? 2 : 1;
693 break;
694 case IWL_MIMO_PS_NONE:
695 *idle_state = (is_cam) ? *rx_state : 1;
696 break;
697 default:
698 *idle_state = 1;
699 break;
700 }
701
702 return 0;
703}
704
705/**
706 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
707 *
708 * Selects how many and which Rx receivers/antennas/chains to use.
709 * This should not be used for scan command ... it puts data in wrong place.
710 */
711void iwl_set_rxon_chain(struct iwl_priv *priv)
712{
713 u8 is_single = is_single_rx_stream(priv);
714 u8 idle_state, rx_state;
715
716 priv->staging_rxon.rx_chain = 0;
717 rx_state = idle_state = 3;
718
719 /* Tell uCode which antennas are actually connected.
720 * Before first association, we assume all antennas are connected.
721 * Just after first association, iwl_chain_noise_calibration()
722 * checks which antennas actually *are* connected. */
723 priv->staging_rxon.rx_chain |=
724 cpu_to_le16(priv->hw_params.valid_rx_ant <<
725 RXON_RX_CHAIN_VALID_POS);
726
727 /* How many receivers should we use? */
728 iwlcore_get_rx_chain_counter(priv, &idle_state, &rx_state);
729 priv->staging_rxon.rx_chain |=
730 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS);
731 priv->staging_rxon.rx_chain |=
732 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS);
733
734 if (!is_single && (rx_state >= 2) &&
735 !test_bit(STATUS_POWER_PMI, &priv->status))
736 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
737 else
738 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
739
740 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
741}
742EXPORT_SYMBOL(iwl_set_rxon_chain);
180 743
181/** 744/**
182 * iwlcore_set_rxon_channel - Set the phymode and channel values in staging RXON 745 * iwlcore_set_rxon_channel - Set the phymode and channel values in staging RXON
@@ -188,7 +751,7 @@ EXPORT_SYMBOL(iwlcore_reset_qos);
188 * NOTE: Does not commit to the hardware; it sets appropriate bit fields 751 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
189 * in the staging RXON flag structure based on the phymode 752 * in the staging RXON flag structure based on the phymode
190 */ 753 */
191int iwlcore_set_rxon_channel(struct iwl_priv *priv, 754int iwl_set_rxon_channel(struct iwl_priv *priv,
192 enum ieee80211_band band, 755 enum ieee80211_band band,
193 u16 channel) 756 u16 channel)
194{ 757{
@@ -214,41 +777,143 @@ int iwlcore_set_rxon_channel(struct iwl_priv *priv,
214 777
215 return 0; 778 return 0;
216} 779}
217EXPORT_SYMBOL(iwlcore_set_rxon_channel); 780EXPORT_SYMBOL(iwl_set_rxon_channel);
218 781
219static void iwlcore_init_hw(struct iwl_priv *priv) 782int iwl_setup_mac(struct iwl_priv *priv)
220{ 783{
784 int ret;
221 struct ieee80211_hw *hw = priv->hw; 785 struct ieee80211_hw *hw = priv->hw;
222 hw->rate_control_algorithm = "iwl-4965-rs"; 786 hw->rate_control_algorithm = "iwl-4965-rs";
223 787
224 /* Tell mac80211 and its clients (e.g. Wireless Extensions) 788 /* Tell mac80211 our characteristics */
225 * the range of signal quality values that we'll provide. 789 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
226 * Negative values for level/noise indicate that we'll provide dBm. 790 IEEE80211_HW_SIGNAL_DBM |
227 * For WE, at least, non-0 values here *enable* display of values 791 IEEE80211_HW_NOISE_DBM;
228 * in app (iwconfig). */
229 hw->max_rssi = -20; /* signal level, negative indicates dBm */
230 hw->max_noise = -20; /* noise level, negative indicates dBm */
231 hw->max_signal = 100; /* link quality indication (%) */
232
233 /* Tell mac80211 our Tx characteristics */
234 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
235
236 /* Default value; 4 EDCA QOS priorities */ 792 /* Default value; 4 EDCA QOS priorities */
237 hw->queues = 4; 793 hw->queues = 4;
238#ifdef CONFIG_IWL4965_HT 794#ifdef CONFIG_IWL4965_HT
239 /* Enhanced value; more queues, to support 11n aggregation */ 795 /* Enhanced value; more queues, to support 11n aggregation */
240 hw->queues = 16; 796 hw->ampdu_queues = 12;
241#endif /* CONFIG_IWL4965_HT */ 797#endif /* CONFIG_IWL4965_HT */
798
799 hw->conf.beacon_int = 100;
800
801 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
802 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
803 &priv->bands[IEEE80211_BAND_2GHZ];
804 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
805 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
806 &priv->bands[IEEE80211_BAND_5GHZ];
807
808 ret = ieee80211_register_hw(priv->hw);
809 if (ret) {
810 IWL_ERROR("Failed to register hw (error %d)\n", ret);
811 return ret;
812 }
813 priv->mac80211_registered = 1;
814
815 return 0;
242} 816}
817EXPORT_SYMBOL(iwl_setup_mac);
818
243 819
244int iwl_setup(struct iwl_priv *priv) 820int iwl_init_drv(struct iwl_priv *priv)
245{ 821{
246 int ret = 0; 822 int ret;
247 iwlcore_init_hw(priv); 823 int i;
248 ret = priv->cfg->ops->lib->init_drv(priv); 824
825 priv->retry_rate = 1;
826 priv->ibss_beacon = NULL;
827
828 spin_lock_init(&priv->lock);
829 spin_lock_init(&priv->power_data.lock);
830 spin_lock_init(&priv->sta_lock);
831 spin_lock_init(&priv->hcmd_lock);
832 spin_lock_init(&priv->lq_mngr.lock);
833
834 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
835 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
836
837 INIT_LIST_HEAD(&priv->free_frames);
838
839 mutex_init(&priv->mutex);
840
841 /* Clear the driver's (not device's) station table */
842 iwlcore_clear_stations_table(priv);
843
844 priv->data_retry_limit = -1;
845 priv->ieee_channels = NULL;
846 priv->ieee_rates = NULL;
847 priv->band = IEEE80211_BAND_2GHZ;
848
849 priv->iw_mode = IEEE80211_IF_TYPE_STA;
850
851 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
852 priv->ps_mode = IWL_MIMO_PS_NONE;
853
854 /* Choose which receivers/antennas to use */
855 iwl_set_rxon_chain(priv);
856
857 if (priv->cfg->mod_params->enable_qos)
858 priv->qos_data.qos_enable = 1;
859
860 iwl_reset_qos(priv);
861
862 priv->qos_data.qos_active = 0;
863 priv->qos_data.qos_cap.val = 0;
864
865 iwl_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
866
867 priv->rates_mask = IWL_RATES_MASK;
868 /* If power management is turned on, default to AC mode */
869 priv->power_mode = IWL_POWER_AC;
870 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
871
872 ret = iwl_init_channel_map(priv);
873 if (ret) {
874 IWL_ERROR("initializing regulatory failed: %d\n", ret);
875 goto err;
876 }
877
878 ret = iwlcore_init_geos(priv);
879 if (ret) {
880 IWL_ERROR("initializing geos failed: %d\n", ret);
881 goto err_free_channel_map;
882 }
883
884 return 0;
885
886err_free_channel_map:
887 iwl_free_channel_map(priv);
888err:
249 return ret; 889 return ret;
250} 890}
251EXPORT_SYMBOL(iwl_setup); 891EXPORT_SYMBOL(iwl_init_drv);
892
893void iwl_free_calib_results(struct iwl_priv *priv)
894{
895 kfree(priv->calib_results.lo_res);
896 priv->calib_results.lo_res = NULL;
897 priv->calib_results.lo_res_len = 0;
898
899 kfree(priv->calib_results.tx_iq_res);
900 priv->calib_results.tx_iq_res = NULL;
901 priv->calib_results.tx_iq_res_len = 0;
902
903 kfree(priv->calib_results.tx_iq_perd_res);
904 priv->calib_results.tx_iq_perd_res = NULL;
905 priv->calib_results.tx_iq_perd_res_len = 0;
906}
907EXPORT_SYMBOL(iwl_free_calib_results);
908
909void iwl_uninit_drv(struct iwl_priv *priv)
910{
911 iwl_free_calib_results(priv);
912 iwlcore_free_geos(priv);
913 iwl_free_channel_map(priv);
914 kfree(priv->scan);
915}
916EXPORT_SYMBOL(iwl_uninit_drv);
252 917
253/* Low level driver call this function to update iwlcore with 918/* Low level driver call this function to update iwlcore with
254 * driver status. 919 * driver status.
@@ -263,8 +928,10 @@ int iwlcore_low_level_notify(struct iwl_priv *priv,
263 if (ret) 928 if (ret)
264 IWL_ERROR("Unable to initialize RFKILL system. " 929 IWL_ERROR("Unable to initialize RFKILL system. "
265 "Ignoring error: %d\n", ret); 930 "Ignoring error: %d\n", ret);
931 iwl_power_initialize(priv);
266 break; 932 break;
267 case IWLCORE_START_EVT: 933 case IWLCORE_START_EVT:
934 iwl_power_update_mode(priv, 1);
268 break; 935 break;
269 case IWLCORE_STOP_EVT: 936 case IWLCORE_STOP_EVT:
270 break; 937 break;
@@ -290,3 +957,319 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags)
290} 957}
291EXPORT_SYMBOL(iwl_send_statistics_request); 958EXPORT_SYMBOL(iwl_send_statistics_request);
292 959
960/**
961 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
962 * using sample data 100 bytes apart. If these sample points are good,
963 * it's a pretty good bet that everything between them is good, too.
964 */
965static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
966{
967 u32 val;
968 int ret = 0;
969 u32 errcnt = 0;
970 u32 i;
971
972 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
973
974 ret = iwl_grab_nic_access(priv);
975 if (ret)
976 return ret;
977
978 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
979 /* read data comes through single port, auto-incr addr */
980 /* NOTE: Use the debugless read so we don't flood kernel log
981 * if IWL_DL_IO is set */
982 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
983 i + RTC_INST_LOWER_BOUND);
984 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
985 if (val != le32_to_cpu(*image)) {
986 ret = -EIO;
987 errcnt++;
988 if (errcnt >= 3)
989 break;
990 }
991 }
992
993 iwl_release_nic_access(priv);
994
995 return ret;
996}
997
998/**
999 * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
1000 * looking at all data.
1001 */
1002static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
1003 u32 len)
1004{
1005 u32 val;
1006 u32 save_len = len;
1007 int ret = 0;
1008 u32 errcnt;
1009
1010 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
1011
1012 ret = iwl_grab_nic_access(priv);
1013 if (ret)
1014 return ret;
1015
1016 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
1017
1018 errcnt = 0;
1019 for (; len > 0; len -= sizeof(u32), image++) {
1020 /* read data comes through single port, auto-incr addr */
1021 /* NOTE: Use the debugless read so we don't flood kernel log
1022 * if IWL_DL_IO is set */
1023 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1024 if (val != le32_to_cpu(*image)) {
1025 IWL_ERROR("uCode INST section is invalid at "
1026 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1027 save_len - len, val, le32_to_cpu(*image));
1028 ret = -EIO;
1029 errcnt++;
1030 if (errcnt >= 20)
1031 break;
1032 }
1033 }
1034
1035 iwl_release_nic_access(priv);
1036
1037 if (!errcnt)
1038 IWL_DEBUG_INFO
1039 ("ucode image in INSTRUCTION memory is good\n");
1040
1041 return ret;
1042}
1043
1044/**
1045 * iwl_verify_ucode - determine which instruction image is in SRAM,
1046 * and verify its contents
1047 */
1048int iwl_verify_ucode(struct iwl_priv *priv)
1049{
1050 __le32 *image;
1051 u32 len;
1052 int ret;
1053
1054 /* Try bootstrap */
1055 image = (__le32 *)priv->ucode_boot.v_addr;
1056 len = priv->ucode_boot.len;
1057 ret = iwlcore_verify_inst_sparse(priv, image, len);
1058 if (!ret) {
1059 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
1060 return 0;
1061 }
1062
1063 /* Try initialize */
1064 image = (__le32 *)priv->ucode_init.v_addr;
1065 len = priv->ucode_init.len;
1066 ret = iwlcore_verify_inst_sparse(priv, image, len);
1067 if (!ret) {
1068 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
1069 return 0;
1070 }
1071
1072 /* Try runtime/protocol */
1073 image = (__le32 *)priv->ucode_code.v_addr;
1074 len = priv->ucode_code.len;
1075 ret = iwlcore_verify_inst_sparse(priv, image, len);
1076 if (!ret) {
1077 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
1078 return 0;
1079 }
1080
1081 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1082
1083 /* Since nothing seems to match, show first several data entries in
1084 * instruction SRAM, so maybe visual inspection will give a clue.
1085 * Selection of bootstrap image (vs. other images) is arbitrary. */
1086 image = (__le32 *)priv->ucode_boot.v_addr;
1087 len = priv->ucode_boot.len;
1088 ret = iwl_verify_inst_full(priv, image, len);
1089
1090 return ret;
1091}
1092EXPORT_SYMBOL(iwl_verify_ucode);
1093
1094
1095static const char *desc_lookup(int i)
1096{
1097 switch (i) {
1098 case 1:
1099 return "FAIL";
1100 case 2:
1101 return "BAD_PARAM";
1102 case 3:
1103 return "BAD_CHECKSUM";
1104 case 4:
1105 return "NMI_INTERRUPT";
1106 case 5:
1107 return "SYSASSERT";
1108 case 6:
1109 return "FATAL_ERROR";
1110 }
1111
1112 return "UNKNOWN";
1113}
1114
1115#define ERROR_START_OFFSET (1 * sizeof(u32))
1116#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1117
1118void iwl_dump_nic_error_log(struct iwl_priv *priv)
1119{
1120 u32 data2, line;
1121 u32 desc, time, count, base, data1;
1122 u32 blink1, blink2, ilink1, ilink2;
1123 int ret;
1124
1125 if (priv->ucode_type == UCODE_INIT)
1126 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1127 else
1128 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1129
1130 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1131 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
1132 return;
1133 }
1134
1135 ret = iwl_grab_nic_access(priv);
1136 if (ret) {
1137 IWL_WARNING("Can not read from adapter at this time.\n");
1138 return;
1139 }
1140
1141 count = iwl_read_targ_mem(priv, base);
1142
1143 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1144 IWL_ERROR("Start IWL Error Log Dump:\n");
1145 IWL_ERROR("Status: 0x%08lX, count: %d\n", priv->status, count);
1146 }
1147
1148 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
1149 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
1150 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
1151 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
1152 ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
1153 data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
1154 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
1155 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
1156 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
1157
1158 IWL_ERROR("Desc Time "
1159 "data1 data2 line\n");
1160 IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
1161 desc_lookup(desc), desc, time, data1, data2, line);
1162 IWL_ERROR("blink1 blink2 ilink1 ilink2\n");
1163 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
1164 ilink1, ilink2);
1165
1166 iwl_release_nic_access(priv);
1167}
1168EXPORT_SYMBOL(iwl_dump_nic_error_log);
1169
1170#define EVENT_START_OFFSET (4 * sizeof(u32))
1171
1172/**
1173 * iwl_print_event_log - Dump error event log to syslog
1174 *
1175 * NOTE: Must be called with iwl4965_grab_nic_access() already obtained!
1176 */
1177void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1178 u32 num_events, u32 mode)
1179{
1180 u32 i;
1181 u32 base; /* SRAM byte address of event log header */
1182 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1183 u32 ptr; /* SRAM byte address of log data */
1184 u32 ev, time, data; /* event log data */
1185
1186 if (num_events == 0)
1187 return;
1188 if (priv->ucode_type == UCODE_INIT)
1189 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1190 else
1191 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1192
1193 if (mode == 0)
1194 event_size = 2 * sizeof(u32);
1195 else
1196 event_size = 3 * sizeof(u32);
1197
1198 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1199
1200 /* "time" is actually "data" for mode 0 (no timestamp).
1201 * place event id # at far right for easier visual parsing. */
1202 for (i = 0; i < num_events; i++) {
1203 ev = iwl_read_targ_mem(priv, ptr);
1204 ptr += sizeof(u32);
1205 time = iwl_read_targ_mem(priv, ptr);
1206 ptr += sizeof(u32);
1207 if (mode == 0)
1208 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
1209 else {
1210 data = iwl_read_targ_mem(priv, ptr);
1211 ptr += sizeof(u32);
1212 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
1213 }
1214 }
1215}
1216EXPORT_SYMBOL(iwl_print_event_log);
1217
1218
1219void iwl_dump_nic_event_log(struct iwl_priv *priv)
1220{
1221 int ret;
1222 u32 base; /* SRAM byte address of event log header */
1223 u32 capacity; /* event log capacity in # entries */
1224 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1225 u32 num_wraps; /* # times uCode wrapped to top of log */
1226 u32 next_entry; /* index of next entry to be written by uCode */
1227 u32 size; /* # entries that we'll print */
1228
1229 if (priv->ucode_type == UCODE_INIT)
1230 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1231 else
1232 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1233
1234 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1235 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
1236 return;
1237 }
1238
1239 ret = iwl_grab_nic_access(priv);
1240 if (ret) {
1241 IWL_WARNING("Can not read from adapter at this time.\n");
1242 return;
1243 }
1244
1245 /* event log header */
1246 capacity = iwl_read_targ_mem(priv, base);
1247 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
1248 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1249 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1250
1251 size = num_wraps ? capacity : next_entry;
1252
1253 /* bail out if nothing in log */
1254 if (size == 0) {
1255 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
1256 iwl_release_nic_access(priv);
1257 return;
1258 }
1259
1260 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
1261 size, num_wraps);
1262
1263 /* if uCode has wrapped back to top of log, start at the oldest entry,
1264 * i.e the next one that uCode would fill. */
1265 if (num_wraps)
1266 iwl_print_event_log(priv, next_entry,
1267 capacity - next_entry, mode);
1268 /* (then/else) start at top of log */
1269 iwl_print_event_log(priv, 0, next_entry, mode);
1270
1271 iwl_release_nic_access(priv);
1272}
1273EXPORT_SYMBOL(iwl_dump_nic_event_log);
1274
1275
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 7193d97630dc..6b5af7afbb25 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -86,20 +86,42 @@ struct iwl_hcmd_ops {
86 int (*rxon_assoc)(struct iwl_priv *priv); 86 int (*rxon_assoc)(struct iwl_priv *priv);
87}; 87};
88struct iwl_hcmd_utils_ops { 88struct iwl_hcmd_utils_ops {
89 int (*enqueue_hcmd)(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 89 u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
90 u16 (*build_addsta_hcmd)(const struct iwl_addsta_cmd *cmd, u8 *data);
91#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
92 void (*gain_computation)(struct iwl_priv *priv,
93 u32 *average_noise,
94 u16 min_average_noise_antennat_i,
95 u32 min_average_noise);
96 void (*chain_noise_reset)(struct iwl_priv *priv);
97#endif
90}; 98};
91 99
92struct iwl_lib_ops { 100struct iwl_lib_ops {
93 /* iwlwifi driver (priv) init */
94 int (*init_drv)(struct iwl_priv *priv);
95 /* set hw dependant perameters */ 101 /* set hw dependant perameters */
96 int (*set_hw_params)(struct iwl_priv *priv); 102 int (*set_hw_params)(struct iwl_priv *priv);
97 103 /* ucode shared memory */
104 int (*alloc_shared_mem)(struct iwl_priv *priv);
105 void (*free_shared_mem)(struct iwl_priv *priv);
106 int (*shared_mem_rx_idx)(struct iwl_priv *priv);
107 /* Handling TX */
98 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv, 108 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
99 struct iwl4965_tx_queue *txq, 109 struct iwl_tx_queue *txq,
100 u16 byte_cnt); 110 u16 byte_cnt);
101 /* nic init */ 111 void (*txq_inval_byte_cnt_tbl)(struct iwl_priv *priv,
102 int (*hw_nic_init)(struct iwl_priv *priv); 112 struct iwl_tx_queue *txq);
113 void (*txq_set_sched)(struct iwl_priv *priv, u32 mask);
114#ifdef CONFIG_IWL4965_HT
115 /* aggregations */
116 int (*txq_agg_enable)(struct iwl_priv *priv, int txq_id, int tx_fifo,
117 int sta_id, int tid, u16 ssn_idx);
118 int (*txq_agg_disable)(struct iwl_priv *priv, u16 txq_id, u16 ssn_idx,
119 u8 tx_fifo);
120#endif /* CONFIG_IWL4965_HT */
121 /* setup Rx handler */
122 void (*rx_handler_setup)(struct iwl_priv *priv);
123 /* alive notification after init uCode load */
124 void (*init_alive_start)(struct iwl_priv *priv);
103 /* alive notification */ 125 /* alive notification */
104 int (*alive_notify)(struct iwl_priv *priv); 126 int (*alive_notify)(struct iwl_priv *priv);
105 /* check validity of rtc data address */ 127 /* check validity of rtc data address */
@@ -108,6 +130,17 @@ struct iwl_lib_ops {
108 int (*load_ucode)(struct iwl_priv *priv); 130 int (*load_ucode)(struct iwl_priv *priv);
109 /* rfkill */ 131 /* rfkill */
110 void (*radio_kill_sw)(struct iwl_priv *priv, int disable_radio); 132 void (*radio_kill_sw)(struct iwl_priv *priv, int disable_radio);
133 /* power management */
134 struct {
135 int (*init)(struct iwl_priv *priv);
136 int (*reset)(struct iwl_priv *priv);
137 void (*stop)(struct iwl_priv *priv);
138 void (*config)(struct iwl_priv *priv);
139 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
140 } apm_ops;
141 /* power */
142 int (*set_power)(struct iwl_priv *priv, void *cmd);
143 void (*update_chain_flags)(struct iwl_priv *priv);
111 /* eeprom operations (as defined in iwl-eeprom.h) */ 144 /* eeprom operations (as defined in iwl-eeprom.h) */
112 struct iwl_eeprom_ops eeprom_ops; 145 struct iwl_eeprom_ops eeprom_ops;
113}; 146};
@@ -127,12 +160,14 @@ struct iwl_mod_params {
127 int enable_qos; /* def: 1 = use quality of service */ 160 int enable_qos; /* def: 1 = use quality of service */
128 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ 161 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
129 int antenna; /* def: 0 = both antennas (use diversity) */ 162 int antenna; /* def: 0 = both antennas (use diversity) */
163 int restart_fw; /* def: 1 = restart firmware */
130}; 164};
131 165
132struct iwl_cfg { 166struct iwl_cfg {
133 const char *name; 167 const char *name;
134 const char *fw_name; 168 const char *fw_name;
135 unsigned int sku; 169 unsigned int sku;
170 int eeprom_size;
136 const struct iwl_ops *ops; 171 const struct iwl_ops *ops;
137 const struct iwl_mod_params *mod_params; 172 const struct iwl_mod_params *mod_params;
138}; 173};
@@ -143,14 +178,66 @@ struct iwl_cfg {
143 178
144struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 179struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
145 struct ieee80211_ops *hw_ops); 180 struct ieee80211_ops *hw_ops);
181void iwl_hw_detect(struct iwl_priv *priv);
146 182
147void iwlcore_clear_stations_table(struct iwl_priv *priv); 183void iwlcore_clear_stations_table(struct iwl_priv *priv);
148void iwlcore_reset_qos(struct iwl_priv *priv); 184void iwl_free_calib_results(struct iwl_priv *priv);
149int iwlcore_set_rxon_channel(struct iwl_priv *priv, 185void iwl_reset_qos(struct iwl_priv *priv);
186void iwl_set_rxon_chain(struct iwl_priv *priv);
187int iwl_set_rxon_channel(struct iwl_priv *priv,
150 enum ieee80211_band band, 188 enum ieee80211_band band,
151 u16 channel); 189 u16 channel);
190void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info);
191u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
192 struct ieee80211_ht_info *sta_ht_inf);
193int iwl_hw_nic_init(struct iwl_priv *priv);
194int iwl_setup_mac(struct iwl_priv *priv);
195int iwl_init_drv(struct iwl_priv *priv);
196void iwl_uninit_drv(struct iwl_priv *priv);
197/* "keep warm" functions */
198int iwl_kw_init(struct iwl_priv *priv);
199int iwl_kw_alloc(struct iwl_priv *priv);
200void iwl_kw_free(struct iwl_priv *priv);
201
202/*****************************************************
203* RX
204******************************************************/
205void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
206int iwl_rx_queue_alloc(struct iwl_priv *priv);
207void iwl_rx_handle(struct iwl_priv *priv);
208int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
209 struct iwl_rx_queue *q);
210void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
211void iwl_rx_replenish(struct iwl_priv *priv);
212int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
213/* FIXME: remove when TX is moved to iwl core */
214int iwl_rx_queue_restock(struct iwl_priv *priv);
215int iwl_rx_queue_space(const struct iwl_rx_queue *q);
216void iwl_rx_allocate(struct iwl_priv *priv);
217void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
218int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
219/* Handlers */
220void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
221 struct iwl_rx_mem_buffer *rxb);
222
223/* TX helpers */
152 224
153int iwl_setup(struct iwl_priv *priv); 225/*****************************************************
226* TX
227******************************************************/
228int iwl_txq_ctx_reset(struct iwl_priv *priv);
229int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
230/* FIXME: remove when free Tx is fully merged into iwlcore */
231int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
232void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
233int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *tfd,
234 dma_addr_t addr, u16 len);
235int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
236#ifdef CONFIG_IWL4965_HT
237int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
238int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
239int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id);
240#endif
154 241
155/***************************************************** 242/*****************************************************
156 * S e n d i n g H o s t C o m m a n d s * 243 * S e n d i n g H o s t C o m m a n d s *
@@ -167,6 +254,17 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
167 int (*callback)(struct iwl_priv *priv, 254 int (*callback)(struct iwl_priv *priv,
168 struct iwl_cmd *cmd, 255 struct iwl_cmd *cmd,
169 struct sk_buff *skb)); 256 struct sk_buff *skb));
257
258int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
259
260/*****************************************************
261* Error Handling Debugging
262******************************************************/
263void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
264 u32 num_events, u32 mode);
265void iwl_dump_nic_error_log(struct iwl_priv *priv);
266void iwl_dump_nic_event_log(struct iwl_priv *priv);
267
170/*************** DRIVER STATUS FUNCTIONS *****/ 268/*************** DRIVER STATUS FUNCTIONS *****/
171 269
172#define STATUS_HCMD_ACTIVE 0 /* host command in progress */ 270#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
@@ -235,6 +333,7 @@ enum iwlcore_card_notify {
235int iwlcore_low_level_notify(struct iwl_priv *priv, 333int iwlcore_low_level_notify(struct iwl_priv *priv,
236 enum iwlcore_card_notify notify); 334 enum iwlcore_card_notify notify);
237extern int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags); 335extern int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags);
336extern int iwl_verify_ucode(struct iwl_priv *priv);
238int iwl_send_lq_cmd(struct iwl_priv *priv, 337int iwl_send_lq_cmd(struct iwl_priv *priv,
239 struct iwl_link_quality_cmd *lq, u8 flags); 338 struct iwl_link_quality_cmd *lq, u8 flags);
240 339
@@ -243,4 +342,10 @@ static inline int iwl_send_rxon_assoc(struct iwl_priv *priv)
243 return priv->cfg->ops->hcmd->rxon_assoc(priv); 342 return priv->cfg->ops->hcmd->rxon_assoc(priv);
244} 343}
245 344
345static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
346 struct iwl_priv *priv, enum ieee80211_band band)
347{
348 return priv->hw->wiphy->bands[band];
349}
350
246#endif /* __iwl_core_h__ */ 351#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 12725796ea5f..545ed692d889 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -87,16 +87,16 @@
87/* EEPROM reads */ 87/* EEPROM reads */
88#define CSR_EEPROM_REG (CSR_BASE+0x02c) 88#define CSR_EEPROM_REG (CSR_BASE+0x02c)
89#define CSR_EEPROM_GP (CSR_BASE+0x030) 89#define CSR_EEPROM_GP (CSR_BASE+0x030)
90#define CSR_GIO_REG (CSR_BASE+0x03C)
90#define CSR_GP_UCODE (CSR_BASE+0x044) 91#define CSR_GP_UCODE (CSR_BASE+0x044)
91#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054) 92#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
92#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058) 93#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
93#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) 94#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
94#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) 95#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
95#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
96#define CSR_LED_REG (CSR_BASE+0x094) 96#define CSR_LED_REG (CSR_BASE+0x094)
97#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
97 98
98/* Analog phase-lock-loop configuration (3945 only) 99/* Analog phase-lock-loop configuration */
99 * Set bit 24. */
100#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) 100#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
101/* 101/*
102 * Indicates hardware rev, to determine CCK backoff for txpower calculation. 102 * Indicates hardware rev, to determine CCK backoff for txpower calculation.
@@ -107,9 +107,9 @@
107 107
108/* Bits for CSR_HW_IF_CONFIG_REG */ 108/* Bits for CSR_HW_IF_CONFIG_REG */
109#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010) 109#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
110#define CSR49_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00) 110#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
111#define CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) 111#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
112#define CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) 112#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
113 113
114#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100) 114#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100)
115#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200) 115#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200)
@@ -170,6 +170,10 @@
170#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \ 170#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \
171 CSR_FH_INT_BIT_TX_CHNL0) 171 CSR_FH_INT_BIT_TX_CHNL0)
172 172
173/* GPIO */
174#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
175#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
176#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200)
173 177
174/* RESET */ 178/* RESET */
175#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) 179#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
@@ -191,6 +195,16 @@
191#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) 195#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
192 196
193 197
198/* HW REV */
199#define CSR_HW_REV_TYPE_MSK (0x00000F0)
200#define CSR_HW_REV_TYPE_3945 (0x00000D0)
201#define CSR_HW_REV_TYPE_4965 (0x0000000)
202#define CSR_HW_REV_TYPE_5300 (0x0000020)
203#define CSR_HW_REV_TYPE_5350 (0x0000030)
204#define CSR_HW_REV_TYPE_5100 (0x0000050)
205#define CSR_HW_REV_TYPE_5150 (0x0000040)
206#define CSR_HW_REV_TYPE_NONE (0x00000F0)
207
194/* EEPROM REG */ 208/* EEPROM REG */
195#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) 209#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
196#define CSR_EEPROM_REG_BIT_CMD (0x00000002) 210#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@@ -200,17 +214,15 @@
200#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000) 214#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000)
201#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) 215#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
202 216
217/* CSR GIO */
218#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
219
203/* UCODE DRV GP */ 220/* UCODE DRV GP */
204#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001) 221#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
205#define CSR_UCODE_SW_BIT_RFKILL (0x00000002) 222#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
206#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004) 223#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
207#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008) 224#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
208 225
209/* GPIO */
210#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
211#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
212#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC CSR_GPIO_IN_BIT_AUX_POWER
213
214/* GI Chicken Bits */ 226/* GI Chicken Bits */
215#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) 227#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
216#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) 228#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
@@ -220,6 +232,10 @@
220#define CSR_LED_REG_TRUN_ON (0x78) 232#define CSR_LED_REG_TRUN_ON (0x78)
221#define CSR_LED_REG_TRUN_OFF (0x38) 233#define CSR_LED_REG_TRUN_OFF (0x38)
222 234
235/* ANA_PLL */
236#define CSR39_ANA_PLL_CFG_VAL (0x01000000)
237#define CSR50_ANA_PLL_CFG_VAL (0x00880300)
238
223/*=== HBUS (Host-side Bus) ===*/ 239/*=== HBUS (Host-side Bus) ===*/
224#define HBUS_BASE (0x400) 240#define HBUS_BASE (0x400)
225/* 241/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index c60724c21db8..11de561c7bf8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -30,37 +30,36 @@
30#define __iwl_debug_h__ 30#define __iwl_debug_h__
31 31
32#ifdef CONFIG_IWLWIFI_DEBUG 32#ifdef CONFIG_IWLWIFI_DEBUG
33extern u32 iwl_debug_level;
34#define IWL_DEBUG(level, fmt, args...) \ 33#define IWL_DEBUG(level, fmt, args...) \
35do { if (iwl_debug_level & (level)) \ 34do { if (priv->debug_level & (level)) \
36 printk(KERN_ERR DRV_NAME": %c %s " fmt, \ 35 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \
37 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 36 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
38 37
39#define IWL_DEBUG_LIMIT(level, fmt, args...) \ 38#define IWL_DEBUG_LIMIT(level, fmt, args...) \
40do { if ((iwl_debug_level & (level)) && net_ratelimit()) \ 39do { if ((priv->debug_level & (level)) && net_ratelimit()) \
41 printk(KERN_ERR DRV_NAME": %c %s " fmt, \ 40 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \
42 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 41 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
43 42
44static inline void iwl_print_hex_dump(int level, void *p, u32 len)
45{
46 if (!(iwl_debug_level & level))
47 return;
48
49 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
50 p, len, 1);
51}
52
53#ifdef CONFIG_IWLWIFI_DEBUGFS 43#ifdef CONFIG_IWLWIFI_DEBUGFS
54struct iwl_debugfs { 44struct iwl_debugfs {
55 const char *name; 45 const char *name;
56 struct dentry *dir_drv; 46 struct dentry *dir_drv;
57 struct dentry *dir_data; 47 struct dentry *dir_data;
58 struct dir_data_files{ 48 struct dentry *dir_rf;
49 struct dir_data_files {
59 struct dentry *file_sram; 50 struct dentry *file_sram;
51 struct dentry *file_eeprom;
60 struct dentry *file_stations; 52 struct dentry *file_stations;
61 struct dentry *file_rx_statistics; 53 struct dentry *file_rx_statistics;
62 struct dentry *file_tx_statistics; 54 struct dentry *file_tx_statistics;
55 struct dentry *file_log_event;
63 } dbgfs_data_files; 56 } dbgfs_data_files;
57 struct dir_rf_files {
58#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
59 struct dentry *file_disable_sensitivity;
60 struct dentry *file_disable_chain_noise;
61#endif /* CONFIG_IWLWIFI_RUN_TIME_CALIB */
62 } dbgfs_rf_files;
64 u32 sram_offset; 63 u32 sram_offset;
65 u32 sram_len; 64 u32 sram_len;
66}; 65};
@@ -76,9 +75,6 @@ static inline void IWL_DEBUG(int level, const char *fmt, ...)
76static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...) 75static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...)
77{ 76{
78} 77}
79static inline void iwl_print_hex_dump(int level, void *p, u32 len)
80{
81}
82#endif /* CONFIG_IWLWIFI_DEBUG */ 78#endif /* CONFIG_IWLWIFI_DEBUG */
83 79
84 80
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 9a30e1df311d..29e16ba69cdb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -34,7 +34,7 @@
34#include <net/mac80211.h> 34#include <net/mac80211.h>
35 35
36 36
37#include "iwl-4965.h" 37#include "iwl-dev.h"
38#include "iwl-debug.h" 38#include "iwl-debug.h"
39#include "iwl-core.h" 39#include "iwl-core.h"
40#include "iwl-io.h" 40#include "iwl-io.h"
@@ -55,6 +55,13 @@
55 goto err; \ 55 goto err; \
56} while (0) 56} while (0)
57 57
58#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
59 dbgfs->dbgfs_##parent##_files.file_##name = \
60 debugfs_create_bool(#name, 0644, dbgfs->dir_##parent, ptr); \
61 if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name)) \
62 goto err; \
63} while (0)
64
58#define DEBUGFS_REMOVE(name) do { \ 65#define DEBUGFS_REMOVE(name) do { \
59 debugfs_remove(name); \ 66 debugfs_remove(name); \
60 name = NULL; \ 67 name = NULL; \
@@ -85,6 +92,14 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
85 .open = iwl_dbgfs_open_file_generic, \ 92 .open = iwl_dbgfs_open_file_generic, \
86}; 93};
87 94
95#define DEBUGFS_WRITE_FILE_OPS(name) \
96 DEBUGFS_WRITE_FUNC(name); \
97static const struct file_operations iwl_dbgfs_##name##_ops = { \
98 .write = iwl_dbgfs_##name##_write, \
99 .open = iwl_dbgfs_open_file_generic, \
100};
101
102
88#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ 103#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
89 DEBUGFS_READ_FUNC(name); \ 104 DEBUGFS_READ_FUNC(name); \
90 DEBUGFS_WRITE_FUNC(name); \ 105 DEBUGFS_WRITE_FUNC(name); \
@@ -206,7 +221,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
206 size_t count, loff_t *ppos) 221 size_t count, loff_t *ppos)
207{ 222{
208 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 223 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
209 struct iwl4965_station_entry *station; 224 struct iwl_station_entry *station;
210 int max_sta = priv->hw_params.max_stations; 225 int max_sta = priv->hw_params.max_stations;
211 char *buf; 226 char *buf;
212 int i, j, pos = 0; 227 int i, j, pos = 0;
@@ -277,8 +292,70 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
277 return ret; 292 return ret;
278} 293}
279 294
295static ssize_t iwl_dbgfs_eeprom_read(struct file *file,
296 char __user *user_buf,
297 size_t count,
298 loff_t *ppos)
299{
300 ssize_t ret;
301 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
302 int pos = 0, ofs = 0, buf_size = 0;
303 const u8 *ptr;
304 char *buf;
305 size_t eeprom_len = priv->cfg->eeprom_size;
306 buf_size = 4 * eeprom_len + 256;
307
308 if (eeprom_len % 16) {
309 IWL_ERROR("EEPROM size is not multiple of 16.\n");
310 return -ENODATA;
311 }
312
313 /* 4 characters for byte 0xYY */
314 buf = kzalloc(buf_size, GFP_KERNEL);
315 if (!buf) {
316 IWL_ERROR("Can not allocate Buffer\n");
317 return -ENOMEM;
318 }
319
320 ptr = priv->eeprom;
321 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
322 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
323 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
324 buf_size - pos, 0);
325 pos += strlen(buf);
326 if (buf_size - pos > 0)
327 buf[pos++] = '\n';
328 }
329
330 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
331 kfree(buf);
332 return ret;
333}
334
335static ssize_t iwl_dbgfs_log_event_write(struct file *file,
336 const char __user *user_buf,
337 size_t count, loff_t *ppos)
338{
339 struct iwl_priv *priv = file->private_data;
340 u32 event_log_flag;
341 char buf[8];
342 int buf_size;
343
344 memset(buf, 0, sizeof(buf));
345 buf_size = min(count, sizeof(buf) - 1);
346 if (copy_from_user(buf, user_buf, buf_size))
347 return -EFAULT;
348 if (sscanf(buf, "%d", &event_log_flag) != 1)
349 return -EFAULT;
350 if (event_log_flag == 1)
351 iwl_dump_nic_event_log(priv);
352
353 return count;
354}
280 355
281DEBUGFS_READ_WRITE_FILE_OPS(sram); 356DEBUGFS_READ_WRITE_FILE_OPS(sram);
357DEBUGFS_WRITE_FILE_OPS(log_event);
358DEBUGFS_READ_FILE_OPS(eeprom);
282DEBUGFS_READ_FILE_OPS(stations); 359DEBUGFS_READ_FILE_OPS(stations);
283DEBUGFS_READ_FILE_OPS(rx_statistics); 360DEBUGFS_READ_FILE_OPS(rx_statistics);
284DEBUGFS_READ_FILE_OPS(tx_statistics); 361DEBUGFS_READ_FILE_OPS(tx_statistics);
@@ -290,6 +367,7 @@ DEBUGFS_READ_FILE_OPS(tx_statistics);
290int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) 367int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
291{ 368{
292 struct iwl_debugfs *dbgfs; 369 struct iwl_debugfs *dbgfs;
370 struct dentry *phyd = priv->hw->wiphy->debugfsdir;
293 371
294 dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL); 372 dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL);
295 if (!dbgfs) { 373 if (!dbgfs) {
@@ -298,17 +376,24 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
298 376
299 priv->dbgfs = dbgfs; 377 priv->dbgfs = dbgfs;
300 dbgfs->name = name; 378 dbgfs->name = name;
301 dbgfs->dir_drv = debugfs_create_dir(name, NULL); 379 dbgfs->dir_drv = debugfs_create_dir(name, phyd);
302 if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)){ 380 if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)){
303 goto err; 381 goto err;
304 } 382 }
305 383
306 DEBUGFS_ADD_DIR(data, dbgfs->dir_drv); 384 DEBUGFS_ADD_DIR(data, dbgfs->dir_drv);
385 DEBUGFS_ADD_DIR(rf, dbgfs->dir_drv);
386 DEBUGFS_ADD_FILE(eeprom, data);
307 DEBUGFS_ADD_FILE(sram, data); 387 DEBUGFS_ADD_FILE(sram, data);
388 DEBUGFS_ADD_FILE(log_event, data);
308 DEBUGFS_ADD_FILE(stations, data); 389 DEBUGFS_ADD_FILE(stations, data);
309 DEBUGFS_ADD_FILE(rx_statistics, data); 390 DEBUGFS_ADD_FILE(rx_statistics, data);
310 DEBUGFS_ADD_FILE(tx_statistics, data); 391 DEBUGFS_ADD_FILE(tx_statistics, data);
311 392#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
393 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
394 DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
395 &priv->disable_chain_noise_cal);
396#endif /* CONFIG_IWLWIFI_RUN_TIME_CALIB */
312 return 0; 397 return 0;
313 398
314err: 399err:
@@ -327,11 +412,18 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
327 if (!(priv->dbgfs)) 412 if (!(priv->dbgfs))
328 return; 413 return;
329 414
415 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_eeprom);
330 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_rx_statistics); 416 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_rx_statistics);
331 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_tx_statistics); 417 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_tx_statistics);
332 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sram); 418 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sram);
419 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event);
333 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations); 420 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations);
334 DEBUGFS_REMOVE(priv->dbgfs->dir_data); 421 DEBUGFS_REMOVE(priv->dbgfs->dir_data);
422#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
423 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
424 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise);
425#endif /* CONFIG_IWLWIFI_RUN_TIME_CALIB */
426 DEBUGFS_REMOVE(priv->dbgfs->dir_rf);
335 DEBUGFS_REMOVE(priv->dbgfs->dir_drv); 427 DEBUGFS_REMOVE(priv->dbgfs->dir_drv);
336 kfree(priv->dbgfs); 428 kfree(priv->dbgfs);
337 priv->dbgfs = NULL; 429 priv->dbgfs = NULL;
@@ -339,3 +431,4 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
339EXPORT_SYMBOL(iwl_dbgfs_unregister); 431EXPORT_SYMBOL(iwl_dbgfs_unregister);
340 432
341 433
434
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 581b98556c86..802f1a12b1aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -24,8 +24,8 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26/* 26/*
27 * Please use this file (iwl-4965.h) for driver implementation definitions. 27 * Please use this file (iwl-dev.h) for driver implementation definitions.
28 * Please use iwl-4965-commands.h for uCode API definitions. 28 * Please use iwl-commands.h for uCode API definitions.
29 * Please use iwl-4965-hw.h for hardware-related definitions. 29 * Please use iwl-4965-hw.h for hardware-related definitions.
30 */ 30 */
31 31
@@ -44,9 +44,13 @@
44#include "iwl-prph.h" 44#include "iwl-prph.h"
45#include "iwl-debug.h" 45#include "iwl-debug.h"
46#include "iwl-led.h" 46#include "iwl-led.h"
47#include "iwl-power.h"
47 48
48/* configuration for the iwl4965 */ 49/* configuration for the iwl4965 */
49extern struct iwl_cfg iwl4965_agn_cfg; 50extern struct iwl_cfg iwl4965_agn_cfg;
51extern struct iwl_cfg iwl5300_agn_cfg;
52extern struct iwl_cfg iwl5100_agn_cfg;
53extern struct iwl_cfg iwl5350_agn_cfg;
50 54
51/* Change firmware file name, using "-" and incrementing number, 55/* Change firmware file name, using "-" and incrementing number,
52 * *only* when uCode interface or architecture changes so that it 56 * *only* when uCode interface or architecture changes so that it
@@ -54,6 +58,8 @@ extern struct iwl_cfg iwl4965_agn_cfg;
54 * This number will also appear in << 8 position of 1st dword of uCode file */ 58 * This number will also appear in << 8 position of 1st dword of uCode file */
55#define IWL4965_UCODE_API "-1" 59#define IWL4965_UCODE_API "-1"
56 60
61/* CT-KILL constants */
62#define CT_KILL_THRESHOLD 110 /* in Celsius */
57 63
58/* Default noise level to report when noise measurement is not available. 64/* Default noise level to report when noise measurement is not available.
59 * This may be because we're: 65 * This may be because we're:
@@ -68,12 +74,6 @@ extern struct iwl_cfg iwl4965_agn_cfg;
68 * averages within an s8's (used in some apps) range of negative values. */ 74 * averages within an s8's (used in some apps) range of negative values. */
69#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) 75#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
70 76
71enum iwl4965_antenna {
72 IWL_ANTENNA_DIVERSITY,
73 IWL_ANTENNA_MAIN,
74 IWL_ANTENNA_AUX
75};
76
77/* 77/*
78 * RTS threshold here is total size [2347] minus 4 FCS bytes 78 * RTS threshold here is total size [2347] minus 4 FCS bytes
79 * Per spec: 79 * Per spec:
@@ -91,7 +91,7 @@ enum iwl4965_antenna {
91#define DEFAULT_SHORT_RETRY_LIMIT 7U 91#define DEFAULT_SHORT_RETRY_LIMIT 7U
92#define DEFAULT_LONG_RETRY_LIMIT 4U 92#define DEFAULT_LONG_RETRY_LIMIT 4U
93 93
94struct iwl4965_rx_mem_buffer { 94struct iwl_rx_mem_buffer {
95 dma_addr_t dma_addr; 95 dma_addr_t dma_addr;
96 struct sk_buff *skb; 96 struct sk_buff *skb;
97 struct list_head list; 97 struct list_head list;
@@ -102,7 +102,7 @@ struct iwl4965_rx_mem_buffer {
102 * 102 *
103 * Contains common data for Rx and Tx queues 103 * Contains common data for Rx and Tx queues
104 */ 104 */
105struct iwl4965_queue { 105struct iwl_queue {
106 int n_bd; /* number of BDs in this queue */ 106 int n_bd; /* number of BDs in this queue */
107 int write_ptr; /* 1-st empty entry (index) host_w*/ 107 int write_ptr; /* 1-st empty entry (index) host_w*/
108 int read_ptr; /* last used entry (index) host_r*/ 108 int read_ptr; /* last used entry (index) host_r*/
@@ -118,13 +118,12 @@ struct iwl4965_queue {
118#define MAX_NUM_OF_TBS (20) 118#define MAX_NUM_OF_TBS (20)
119 119
120/* One for each TFD */ 120/* One for each TFD */
121struct iwl4965_tx_info { 121struct iwl_tx_info {
122 struct ieee80211_tx_status status;
123 struct sk_buff *skb[MAX_NUM_OF_TBS]; 122 struct sk_buff *skb[MAX_NUM_OF_TBS];
124}; 123};
125 124
126/** 125/**
127 * struct iwl4965_tx_queue - Tx Queue for DMA 126 * struct iwl_tx_queue - Tx Queue for DMA
128 * @q: generic Rx/Tx queue descriptor 127 * @q: generic Rx/Tx queue descriptor
129 * @bd: base of circular buffer of TFDs 128 * @bd: base of circular buffer of TFDs
130 * @cmd: array of command/Tx buffers 129 * @cmd: array of command/Tx buffers
@@ -136,12 +135,12 @@ struct iwl4965_tx_info {
136 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 135 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
137 * descriptors) and required locking structures. 136 * descriptors) and required locking structures.
138 */ 137 */
139struct iwl4965_tx_queue { 138struct iwl_tx_queue {
140 struct iwl4965_queue q; 139 struct iwl_queue q;
141 struct iwl4965_tfd_frame *bd; 140 struct iwl_tfd_frame *bd;
142 struct iwl_cmd *cmd; 141 struct iwl_cmd *cmd;
143 dma_addr_t dma_addr_cmd; 142 dma_addr_t dma_addr_cmd;
144 struct iwl4965_tx_info *txb; 143 struct iwl_tx_info *txb;
145 int need_update; 144 int need_update;
146 int sched_retry; 145 int sched_retry;
147 int active; 146 int active;
@@ -199,9 +198,9 @@ enum {
199struct iwl_channel_info { 198struct iwl_channel_info {
200 struct iwl4965_channel_tgd_info tgd; 199 struct iwl4965_channel_tgd_info tgd;
201 struct iwl4965_channel_tgh_info tgh; 200 struct iwl4965_channel_tgh_info tgh;
202 struct iwl4965_eeprom_channel eeprom; /* EEPROM regulatory limit */ 201 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
203 struct iwl4965_eeprom_channel fat_eeprom; /* EEPROM regulatory limit for 202 struct iwl_eeprom_channel fat_eeprom; /* EEPROM regulatory limit for
204 * FAT channel */ 203 * FAT channel */
205 204
206 u8 channel; /* channel number */ 205 u8 channel; /* channel number */
207 u8 flags; /* flags copied from EEPROM */ 206 u8 flags; /* flags copied from EEPROM */
@@ -252,29 +251,9 @@ struct iwl4965_clip_group {
252 251
253/* Power management (not Tx power) structures */ 252/* Power management (not Tx power) structures */
254 253
255struct iwl4965_power_vec_entry { 254enum iwl_pwr_src {
256 struct iwl4965_powertable_cmd cmd; 255 IWL_PWR_SRC_VMAIN,
257 u8 no_dtim; 256 IWL_PWR_SRC_VAUX,
258};
259#define IWL_POWER_RANGE_0 (0)
260#define IWL_POWER_RANGE_1 (1)
261
262#define IWL_POWER_MODE_CAM 0x00 /* Continuously Aware Mode, always on */
263#define IWL_POWER_INDEX_3 0x03
264#define IWL_POWER_INDEX_5 0x05
265#define IWL_POWER_AC 0x06
266#define IWL_POWER_BATTERY 0x07
267#define IWL_POWER_LIMIT 0x07
268#define IWL_POWER_MASK 0x0F
269#define IWL_POWER_ENABLED 0x10
270#define IWL_POWER_LEVEL(x) ((x) & IWL_POWER_MASK)
271
272struct iwl4965_power_mgr {
273 spinlock_t lock;
274 struct iwl4965_power_vec_entry pwr_range_0[IWL_POWER_AC];
275 struct iwl4965_power_vec_entry pwr_range_1[IWL_POWER_AC];
276 u8 active_index;
277 u32 dtim_val;
278}; 257};
279 258
280#define IEEE80211_DATA_LEN 2304 259#define IEEE80211_DATA_LEN 2304
@@ -282,7 +261,7 @@ struct iwl4965_power_mgr {
282#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) 261#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
283#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) 262#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
284 263
285struct iwl4965_frame { 264struct iwl_frame {
286 union { 265 union {
287 struct ieee80211_hdr frame; 266 struct ieee80211_hdr frame;
288 struct iwl4965_tx_beacon_cmd beacon; 267 struct iwl4965_tx_beacon_cmd beacon;
@@ -328,6 +307,8 @@ struct iwl_cmd_meta {
328 307
329} __attribute__ ((packed)); 308} __attribute__ ((packed));
330 309
310#define IWL_CMD_MAX_PAYLOAD 320
311
331/** 312/**
332 * struct iwl_cmd 313 * struct iwl_cmd
333 * 314 *
@@ -339,7 +320,7 @@ struct iwl_cmd {
339 struct iwl_cmd_meta meta; /* driver data */ 320 struct iwl_cmd_meta meta; /* driver data */
340 struct iwl_cmd_header hdr; /* uCode API */ 321 struct iwl_cmd_header hdr; /* uCode API */
341 union { 322 union {
342 struct iwl4965_addsta_cmd addsta; 323 struct iwl_addsta_cmd addsta;
343 struct iwl4965_led_cmd led; 324 struct iwl4965_led_cmd led;
344 u32 flags; 325 u32 flags;
345 u8 val8; 326 u8 val8;
@@ -349,11 +330,12 @@ struct iwl_cmd {
349 struct iwl4965_rxon_time_cmd rxon_time; 330 struct iwl4965_rxon_time_cmd rxon_time;
350 struct iwl4965_powertable_cmd powertable; 331 struct iwl4965_powertable_cmd powertable;
351 struct iwl4965_qosparam_cmd qosparam; 332 struct iwl4965_qosparam_cmd qosparam;
352 struct iwl4965_tx_cmd tx; 333 struct iwl_tx_cmd tx;
353 struct iwl4965_tx_beacon_cmd tx_beacon; 334 struct iwl4965_tx_beacon_cmd tx_beacon;
354 struct iwl4965_rxon_assoc_cmd rxon_assoc; 335 struct iwl4965_rxon_assoc_cmd rxon_assoc;
336 struct iwl_rem_sta_cmd rm_sta;
355 u8 *indirect; 337 u8 *indirect;
356 u8 payload[360]; 338 u8 payload[IWL_CMD_MAX_PAYLOAD];
357 } __attribute__ ((packed)) cmd; 339 } __attribute__ ((packed)) cmd;
358} __attribute__ ((packed)); 340} __attribute__ ((packed));
359 341
@@ -378,7 +360,7 @@ struct iwl_host_cmd {
378#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 360#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
379 361
380/** 362/**
381 * struct iwl4965_rx_queue - Rx queue 363 * struct iwl_rx_queue - Rx queue
382 * @processed: Internal index to last handled Rx packet 364 * @processed: Internal index to last handled Rx packet
383 * @read: Shared index to newest available Rx buffer 365 * @read: Shared index to newest available Rx buffer
384 * @write: Shared index to oldest written Rx packet 366 * @write: Shared index to oldest written Rx packet
@@ -387,13 +369,13 @@ struct iwl_host_cmd {
387 * @rx_used: List of Rx buffers with no SKB 369 * @rx_used: List of Rx buffers with no SKB
388 * @need_update: flag to indicate we need to update read/write index 370 * @need_update: flag to indicate we need to update read/write index
389 * 371 *
390 * NOTE: rx_free and rx_used are used as a FIFO for iwl4965_rx_mem_buffers 372 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
391 */ 373 */
392struct iwl4965_rx_queue { 374struct iwl_rx_queue {
393 __le32 *bd; 375 __le32 *bd;
394 dma_addr_t dma_addr; 376 dma_addr_t dma_addr;
395 struct iwl4965_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; 377 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
396 struct iwl4965_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 378 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
397 u32 processed; 379 u32 processed;
398 u32 read; 380 u32 read;
399 u32 write; 381 u32 write;
@@ -421,7 +403,7 @@ struct iwl4965_rx_queue {
421 403
422#ifdef CONFIG_IWL4965_HT 404#ifdef CONFIG_IWL4965_HT
423/** 405/**
424 * struct iwl4965_ht_agg -- aggregation status while waiting for block-ack 406 * struct iwl_ht_agg -- aggregation status while waiting for block-ack
425 * @txq_id: Tx queue used for Tx attempt 407 * @txq_id: Tx queue used for Tx attempt
426 * @frame_count: # frames attempted by Tx command 408 * @frame_count: # frames attempted by Tx command
427 * @wait_for_ba: Expect block-ack before next Tx reply 409 * @wait_for_ba: Expect block-ack before next Tx reply
@@ -434,7 +416,7 @@ struct iwl4965_rx_queue {
434 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info 416 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
435 * until block ack arrives. 417 * until block ack arrives.
436 */ 418 */
437struct iwl4965_ht_agg { 419struct iwl_ht_agg {
438 u16 txq_id; 420 u16 txq_id;
439 u16 frame_count; 421 u16 frame_count;
440 u16 wait_for_ba; 422 u16 wait_for_ba;
@@ -450,19 +432,18 @@ struct iwl4965_ht_agg {
450 432
451#endif /* CONFIG_IWL4965_HT */ 433#endif /* CONFIG_IWL4965_HT */
452 434
453struct iwl4965_tid_data { 435struct iwl_tid_data {
454 u16 seq_number; 436 u16 seq_number;
455 u16 tfds_in_queue; 437 u16 tfds_in_queue;
456#ifdef CONFIG_IWL4965_HT 438#ifdef CONFIG_IWL4965_HT
457 struct iwl4965_ht_agg agg; 439 struct iwl_ht_agg agg;
458#endif /* CONFIG_IWL4965_HT */ 440#endif /* CONFIG_IWL4965_HT */
459}; 441};
460 442
461struct iwl4965_hw_key { 443struct iwl_hw_key {
462 enum ieee80211_key_alg alg; 444 enum ieee80211_key_alg alg;
463 int keylen; 445 int keylen;
464 u8 keyidx; 446 u8 keyidx;
465 struct ieee80211_key_conf *conf;
466 u8 key[32]; 447 u8 key[32];
467}; 448};
468 449
@@ -474,7 +455,6 @@ union iwl4965_ht_rate_supp {
474 }; 455 };
475}; 456};
476 457
477#ifdef CONFIG_IWL4965_HT
478#define CFG_HT_RX_AMPDU_FACTOR_DEF (0x3) 458#define CFG_HT_RX_AMPDU_FACTOR_DEF (0x3)
479#define CFG_HT_MPDU_DENSITY_2USEC (0x5) 459#define CFG_HT_MPDU_DENSITY_2USEC (0x5)
480#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_2USEC 460#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_2USEC
@@ -497,7 +477,6 @@ struct iwl_ht_info {
497 u8 ht_protection; 477 u8 ht_protection;
498 u8 non_GF_STA_present; 478 u8 non_GF_STA_present;
499}; 479};
500#endif /*CONFIG_IWL4965_HT */
501 480
502union iwl4965_qos_capabity { 481union iwl4965_qos_capabity {
503 struct { 482 struct {
@@ -530,12 +509,12 @@ struct iwl4965_qos_info {
530#define STA_PS_STATUS_WAKE 0 509#define STA_PS_STATUS_WAKE 0
531#define STA_PS_STATUS_SLEEP 1 510#define STA_PS_STATUS_SLEEP 1
532 511
533struct iwl4965_station_entry { 512struct iwl_station_entry {
534 struct iwl4965_addsta_cmd sta; 513 struct iwl_addsta_cmd sta;
535 struct iwl4965_tid_data tid[MAX_TID_COUNT]; 514 struct iwl_tid_data tid[MAX_TID_COUNT];
536 u8 used; 515 u8 used;
537 u8 ps_status; 516 u8 ps_status;
538 struct iwl4965_hw_key keyinfo; 517 struct iwl_hw_key keyinfo;
539}; 518};
540 519
541/* one for each uCode image (inst/data, boot/init/runtime) */ 520/* one for each uCode image (inst/data, boot/init/runtime) */
@@ -566,20 +545,51 @@ struct iwl4965_ibss_seq {
566 struct list_head list; 545 struct list_head list;
567}; 546};
568 547
548struct iwl_sensitivity_ranges {
549 u16 min_nrg_cck;
550 u16 max_nrg_cck;
551
552 u16 nrg_th_cck;
553 u16 nrg_th_ofdm;
554
555 u16 auto_corr_min_ofdm;
556 u16 auto_corr_min_ofdm_mrc;
557 u16 auto_corr_min_ofdm_x1;
558 u16 auto_corr_min_ofdm_mrc_x1;
559
560 u16 auto_corr_max_ofdm;
561 u16 auto_corr_max_ofdm_mrc;
562 u16 auto_corr_max_ofdm_x1;
563 u16 auto_corr_max_ofdm_mrc_x1;
564
565 u16 auto_corr_max_cck;
566 u16 auto_corr_max_cck_mrc;
567 u16 auto_corr_min_cck;
568 u16 auto_corr_min_cck_mrc;
569};
570
571
572#define IWL_FAT_CHANNEL_52 BIT(IEEE80211_BAND_5GHZ)
573
569/** 574/**
570 * struct iwl_hw_params 575 * struct iwl_hw_params
571 * @max_txq_num: Max # Tx queues supported 576 * @max_txq_num: Max # Tx queues supported
572 * @tx_cmd_len: Size of Tx command (but not including frame itself) 577 * @tx/rx_chains_num: Number of TX/RX chains
573 * @tx_ant_num: Number of TX antennas 578 * @valid_tx/rx_ant: usable antennas
574 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) 579 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
575 * @rx_buffer_size:
576 * @max_rxq_log: Log-base-2 of max_rxq_size 580 * @max_rxq_log: Log-base-2 of max_rxq_size
581 * @rx_buf_size: Rx buffer size
577 * @max_stations: 582 * @max_stations:
578 * @bcast_sta_id: 583 * @bcast_sta_id:
584 * @fat_channel: is 40MHz width possible in band 2.4
585 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
586 * @sw_crypto: 0 for hw, 1 for sw
587 * @max_xxx_size: for ucode uses
588 * @ct_kill_threshold: temperature threshold
589 * @struct iwl_sensitivity_ranges: range of sensitivity values
579 */ 590 */
580struct iwl_hw_params { 591struct iwl_hw_params {
581 u16 max_txq_num; 592 u16 max_txq_num;
582 u16 tx_cmd_len;
583 u8 tx_chains_num; 593 u8 tx_chains_num;
584 u8 rx_chains_num; 594 u8 rx_chains_num;
585 u8 valid_tx_ant; 595 u8 valid_tx_ant;
@@ -590,10 +600,19 @@ struct iwl_hw_params {
590 u32 max_pkt_size; 600 u32 max_pkt_size;
591 u8 max_stations; 601 u8 max_stations;
592 u8 bcast_sta_id; 602 u8 bcast_sta_id;
603 u8 fat_channel;
604 u8 sw_crypto;
605 u32 max_inst_size;
606 u32 max_data_size;
607 u32 max_bsm_size;
608 u32 ct_kill_threshold; /* value in hw-dependent units */
609#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
610 const struct iwl_sensitivity_ranges *sens;
611#endif
593}; 612};
594 613
595#define HT_SHORT_GI_20MHZ_ONLY (1 << 0) 614#define HT_SHORT_GI_20MHZ (1 << 0)
596#define HT_SHORT_GI_40MHZ_ONLY (1 << 1) 615#define HT_SHORT_GI_40MHZ (1 << 1)
597 616
598 617
599#define IWL_RX_HDR(x) ((struct iwl4965_rx_frame_hdr *)(\ 618#define IWL_RX_HDR(x) ((struct iwl4965_rx_frame_hdr *)(\
@@ -612,43 +631,33 @@ struct iwl_hw_params {
612 * for use by iwl-*.c 631 * for use by iwl-*.c
613 * 632 *
614 *****************************************************************************/ 633 *****************************************************************************/
615struct iwl4965_addsta_cmd; 634struct iwl_addsta_cmd;
616extern int iwl4965_send_add_station(struct iwl_priv *priv, 635extern int iwl_send_add_sta(struct iwl_priv *priv,
617 struct iwl4965_addsta_cmd *sta, u8 flags); 636 struct iwl_addsta_cmd *sta, u8 flags);
618extern u8 iwl4965_add_station_flags(struct iwl_priv *priv, const u8 *addr, 637u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
619 int is_ap, u8 flags, void *ht_data); 638 u8 flags, struct ieee80211_ht_info *ht_info);
620extern int iwl4965_is_network_packet(struct iwl_priv *priv, 639extern int iwl4965_is_network_packet(struct iwl_priv *priv,
621 struct ieee80211_hdr *header); 640 struct ieee80211_hdr *header);
622extern int iwl4965_power_init_handle(struct iwl_priv *priv); 641extern int iwl4965_power_init_handle(struct iwl_priv *priv);
623extern void iwl4965_handle_data_packet_monitor(struct iwl_priv *priv, 642extern void iwl4965_handle_data_packet_monitor(struct iwl_priv *priv,
624 struct iwl4965_rx_mem_buffer *rxb, 643 struct iwl_rx_mem_buffer *rxb,
625 void *data, short len, 644 void *data, short len,
626 struct ieee80211_rx_status *stats, 645 struct ieee80211_rx_status *stats,
627 u16 phy_flags); 646 u16 phy_flags);
628extern int iwl4965_is_duplicate_packet(struct iwl_priv *priv, 647extern int iwl4965_is_duplicate_packet(struct iwl_priv *priv,
629 struct ieee80211_hdr *header); 648 struct ieee80211_hdr *header);
630extern int iwl4965_rx_queue_alloc(struct iwl_priv *priv);
631extern void iwl4965_rx_queue_reset(struct iwl_priv *priv,
632 struct iwl4965_rx_queue *rxq);
633extern int iwl4965_calc_db_from_ratio(int sig_ratio); 649extern int iwl4965_calc_db_from_ratio(int sig_ratio);
634extern int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm); 650extern int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm);
635extern int iwl4965_tx_queue_init(struct iwl_priv *priv,
636 struct iwl4965_tx_queue *txq, int count, u32 id);
637extern void iwl4965_rx_replenish(void *data);
638extern void iwl4965_tx_queue_free(struct iwl_priv *priv, struct iwl4965_tx_queue *txq);
639extern unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv, 651extern unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
640 struct ieee80211_hdr *hdr, 652 struct ieee80211_hdr *hdr,
641 const u8 *dest, int left); 653 const u8 *dest, int left);
642extern int iwl4965_rx_queue_update_write_ptr(struct iwl_priv *priv, 654extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
643 struct iwl4965_rx_queue *q); 655int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src);
644extern void iwl4965_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb, 656
645 u32 decrypt_res,
646 struct ieee80211_rx_status *stats);
647extern __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr);
648int iwl4965_init_geos(struct iwl_priv *priv); 657int iwl4965_init_geos(struct iwl_priv *priv);
649void iwl4965_free_geos(struct iwl_priv *priv); 658void iwl4965_free_geos(struct iwl_priv *priv);
650 659
651extern const u8 iwl4965_broadcast_addr[ETH_ALEN]; 660extern const u8 iwl_bcast_addr[ETH_ALEN];
652int iwl4965_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 661int iwl4965_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
653 662
654/* 663/*
@@ -674,50 +683,59 @@ extern u8 iwl4965_sync_station(struct iwl_priv *priv, int sta_id,
674 * iwl4965_mac_ <-- mac80211 callback 683 * iwl4965_mac_ <-- mac80211 callback
675 * 684 *
676 ****************************************************************************/ 685 ****************************************************************************/
677extern void iwl4965_hw_rx_handler_setup(struct iwl_priv *priv);
678extern void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv); 686extern void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv);
679extern void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv); 687extern void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv);
680extern int iwl4965_hw_rxq_stop(struct iwl_priv *priv);
681extern int iwl4965_hw_set_hw_params(struct iwl_priv *priv); 688extern int iwl4965_hw_set_hw_params(struct iwl_priv *priv);
682extern int iwl4965_hw_nic_init(struct iwl_priv *priv); 689extern int iwl_rxq_stop(struct iwl_priv *priv);
683extern int iwl4965_hw_nic_stop_master(struct iwl_priv *priv); 690extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
684extern void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
685extern void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv);
686extern int iwl4965_hw_nic_reset(struct iwl_priv *priv);
687extern int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *tfd,
688 dma_addr_t addr, u16 len);
689extern int iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl4965_tx_queue *txq);
690extern int iwl4965_hw_get_temperature(struct iwl_priv *priv); 691extern int iwl4965_hw_get_temperature(struct iwl_priv *priv);
691extern int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
692 struct iwl4965_tx_queue *txq);
693extern unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, 692extern unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
694 struct iwl4965_frame *frame, u8 rate); 693 struct iwl_frame *frame, u8 rate);
695extern int iwl4965_hw_get_rx_read(struct iwl_priv *priv);
696extern void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv, 694extern void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv,
697 struct iwl_cmd *cmd, 695 struct iwl_cmd *cmd,
698 struct ieee80211_tx_control *ctrl, 696 struct ieee80211_tx_info *info,
699 struct ieee80211_hdr *hdr, 697 struct ieee80211_hdr *hdr,
700 int sta_id, int tx_id); 698 int sta_id, int tx_id);
701extern int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv); 699extern int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv);
702extern int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power); 700extern int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
703extern void iwl4965_hw_rx_statistics(struct iwl_priv *priv, 701extern void iwl4965_hw_rx_statistics(struct iwl_priv *priv,
704 struct iwl4965_rx_mem_buffer *rxb); 702 struct iwl_rx_mem_buffer *rxb);
705extern void iwl4965_disable_events(struct iwl_priv *priv); 703extern void iwl4965_disable_events(struct iwl_priv *priv);
706extern int iwl4965_get_temperature(const struct iwl_priv *priv); 704extern int iwl4965_get_temperature(const struct iwl_priv *priv);
705extern void iwl4965_rx_reply_rx(struct iwl_priv *priv,
706 struct iwl_rx_mem_buffer *rxb);
707 707
708/** 708/**
709 * iwl4965_hw_find_station - Find station id for a given BSSID 709 * iwl_find_station - Find station id for a given BSSID
710 * @bssid: MAC address of station ID to find 710 * @bssid: MAC address of station ID to find
711 * 711 *
712 * NOTE: This should not be hardware specific but the code has 712 * NOTE: This should not be hardware specific but the code has
713 * not yet been merged into a single common layer for managing the 713 * not yet been merged into a single common layer for managing the
714 * station tables. 714 * station tables.
715 */ 715 */
716extern u8 iwl4965_hw_find_station(struct iwl_priv *priv, const u8 *bssid); 716extern u8 iwl_find_station(struct iwl_priv *priv, const u8 *bssid);
717 717
718extern int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel); 718extern int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel);
719extern int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); 719extern int iwl_queue_space(const struct iwl_queue *q);
720extern int iwl4965_queue_space(const struct iwl4965_queue *q); 720static inline int iwl_queue_used(const struct iwl_queue *q, int i)
721{
722 return q->write_ptr > q->read_ptr ?
723 (i >= q->read_ptr && i < q->write_ptr) :
724 !(i < q->read_ptr && i >= q->write_ptr);
725}
726
727
728static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
729{
730 /* This is for scan command, the big buffer at end of command array */
731 if (is_huge)
732 return q->n_window; /* must be power of 2 */
733
734 /* Otherwise, use normal size buffers */
735 return index & (q->n_window - 1);
736}
737
738
721struct iwl_priv; 739struct iwl_priv;
722 740
723extern void iwl4965_radio_kill_sw(struct iwl_priv *priv, int disable_radio); 741extern void iwl4965_radio_kill_sw(struct iwl_priv *priv, int disable_radio);
@@ -725,45 +743,37 @@ extern void iwl4965_radio_kill_sw(struct iwl_priv *priv, int disable_radio);
725 * Forward declare iwl-4965.c functions for iwl-base.c 743 * Forward declare iwl-4965.c functions for iwl-base.c
726 */ 744 */
727extern int iwl4965_tx_queue_update_wr_ptr(struct iwl_priv *priv, 745extern int iwl4965_tx_queue_update_wr_ptr(struct iwl_priv *priv,
728 struct iwl4965_tx_queue *txq, 746 struct iwl_tx_queue *txq,
729 u16 byte_cnt); 747 u16 byte_cnt);
730extern void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr,
731 int is_ap);
732extern void iwl4965_set_rxon_chain(struct iwl_priv *priv);
733extern int iwl4965_alive_notify(struct iwl_priv *priv); 748extern int iwl4965_alive_notify(struct iwl_priv *priv);
734extern void iwl4965_update_rate_scaling(struct iwl_priv *priv, u8 mode); 749extern void iwl4965_update_rate_scaling(struct iwl_priv *priv, u8 mode);
735extern void iwl4965_chain_noise_reset(struct iwl_priv *priv);
736extern void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags,
737 u8 force);
738extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv); 750extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv);
739extern void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, 751extern void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv,
740 u32 rate_n_flags, 752 u32 rate_n_flags,
741 struct ieee80211_tx_control *control); 753 struct ieee80211_tx_info *info);
742 754
743#ifdef CONFIG_IWL4965_HT 755#ifdef CONFIG_IWL4965_HT
744void iwl4965_init_ht_hw_capab(struct iwl_priv *priv, 756extern void iwl4965_init_ht_hw_capab(const struct iwl_priv *priv,
745 struct ieee80211_ht_info *ht_info, 757 struct ieee80211_ht_info *ht_info,
746 enum ieee80211_band band); 758 enum ieee80211_band band);
747void iwl4965_set_rxon_ht(struct iwl_priv *priv, 759void iwl4965_set_rxon_ht(struct iwl_priv *priv,
748 struct iwl_ht_info *ht_info); 760 struct iwl_ht_info *ht_info);
749void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index,
750 struct ieee80211_ht_info *sta_ht_inf);
751int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, 761int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
752 enum ieee80211_ampdu_mlme_action action, 762 enum ieee80211_ampdu_mlme_action action,
753 const u8 *addr, u16 tid, u16 *ssn); 763 const u8 *addr, u16 tid, u16 *ssn);
754int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id, 764int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
755 u8 tid, int txq_id); 765 u8 tid, int txq_id);
756#else 766#else
757static inline void iwl4965_init_ht_hw_capab(struct iwl_priv *priv, 767static inline void iwl4965_init_ht_hw_capab(const struct iwl_priv *priv,
758 struct ieee80211_ht_info *ht_info, 768 struct ieee80211_ht_info *ht_info,
759 enum ieee80211_band band) {} 769 enum ieee80211_band band) {}
760 770
761#endif /*CONFIG_IWL4965_HT */ 771#endif /*CONFIG_IWL4965_HT */
762/* Structures, enum, and defines specific to the 4965 */ 772/* Structures, enum, and defines specific to the 4965 */
763 773
764#define IWL4965_KW_SIZE 0x1000 /*4k */ 774#define IWL_KW_SIZE 0x1000 /*4k */
765 775
766struct iwl4965_kw { 776struct iwl_kw {
767 dma_addr_t dma_addr; 777 dma_addr_t dma_addr;
768 void *v_addr; 778 void *v_addr;
769 size_t size; 779 size_t size;
@@ -787,8 +797,8 @@ struct iwl4965_kw {
787#define IWL_EXT_CHANNEL_OFFSET_RESERVE1 2 797#define IWL_EXT_CHANNEL_OFFSET_RESERVE1 2
788#define IWL_EXT_CHANNEL_OFFSET_BELOW 3 798#define IWL_EXT_CHANNEL_OFFSET_BELOW 3
789 799
790#define NRG_NUM_PREV_STAT_L 20 800#define IWL_TX_CRC_SIZE 4
791#define NUM_RX_CHAINS (3) 801#define IWL_TX_DELIMITER_SIZE 4
792 802
793#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 803#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
794 804
@@ -818,23 +828,8 @@ struct iwl4965_lq_mngr {
818#define MAX_FA_CCK 50 828#define MAX_FA_CCK 50
819#define MIN_FA_CCK 5 829#define MIN_FA_CCK 5
820 830
821#define NRG_MIN_CCK 97
822#define NRG_MAX_CCK 0
823
824#define AUTO_CORR_MIN_OFDM 85
825#define AUTO_CORR_MIN_OFDM_MRC 170
826#define AUTO_CORR_MIN_OFDM_X1 105
827#define AUTO_CORR_MIN_OFDM_MRC_X1 220
828#define AUTO_CORR_MAX_OFDM 120
829#define AUTO_CORR_MAX_OFDM_MRC 210
830#define AUTO_CORR_MAX_OFDM_X1 140
831#define AUTO_CORR_MAX_OFDM_MRC_X1 270
832#define AUTO_CORR_STEP_OFDM 1 831#define AUTO_CORR_STEP_OFDM 1
833 832
834#define AUTO_CORR_MIN_CCK (125)
835#define AUTO_CORR_MAX_CCK (200)
836#define AUTO_CORR_MIN_CCK_MRC 200
837#define AUTO_CORR_MAX_CCK_MRC 400
838#define AUTO_CORR_STEP_CCK 3 833#define AUTO_CORR_STEP_CCK 3
839#define AUTO_CORR_MAX_TH_CCK 160 834#define AUTO_CORR_MAX_TH_CCK 160
840 835
@@ -853,6 +848,9 @@ struct iwl4965_lq_mngr {
853#define IN_BAND_FILTER 0xFF 848#define IN_BAND_FILTER 0xFF
854#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF 849#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
855 850
851#define NRG_NUM_PREV_STAT_L 20
852#define NUM_RX_CHAINS 3
853
856enum iwl4965_false_alarm_state { 854enum iwl4965_false_alarm_state {
857 IWL_FA_TOO_MANY = 0, 855 IWL_FA_TOO_MANY = 0,
858 IWL_FA_TOO_FEW = 1, 856 IWL_FA_TOO_FEW = 1,
@@ -865,11 +863,6 @@ enum iwl4965_chain_noise_state {
865 IWL_CHAIN_NOISE_CALIBRATED = 2, 863 IWL_CHAIN_NOISE_CALIBRATED = 2,
866}; 864};
867 865
868enum iwl4965_sensitivity_state {
869 IWL_SENS_CALIB_ALLOWED = 0,
870 IWL_SENS_CALIB_NEED_REINIT = 1,
871};
872
873enum iwl4965_calib_enabled_state { 866enum iwl4965_calib_enabled_state {
874 IWL_CALIB_DISABLED = 0, /* must be 0 */ 867 IWL_CALIB_DISABLED = 0, /* must be 0 */
875 IWL_CALIB_ENABLED = 1, 868 IWL_CALIB_ENABLED = 1,
@@ -884,8 +877,24 @@ struct statistics_general_data {
884 u32 beacon_energy_c; 877 u32 beacon_energy_c;
885}; 878};
886 879
880struct iwl_calib_results {
881 void *tx_iq_res;
882 void *tx_iq_perd_res;
883 void *lo_res;
884 u32 tx_iq_res_len;
885 u32 tx_iq_perd_res_len;
886 u32 lo_res_len;
887};
888
889enum ucode_type {
890 UCODE_NONE = 0,
891 UCODE_INIT,
892 UCODE_RT
893};
894
895#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
887/* Sensitivity calib data */ 896/* Sensitivity calib data */
888struct iwl4965_sensitivity_data { 897struct iwl_sensitivity_data {
889 u32 auto_corr_ofdm; 898 u32 auto_corr_ofdm;
890 u32 auto_corr_ofdm_mrc; 899 u32 auto_corr_ofdm_mrc;
891 u32 auto_corr_ofdm_x1; 900 u32 auto_corr_ofdm_x1;
@@ -909,12 +918,10 @@ struct iwl4965_sensitivity_data {
909 s32 nrg_auto_corr_silence_diff; 918 s32 nrg_auto_corr_silence_diff;
910 u32 num_in_cck_no_fa; 919 u32 num_in_cck_no_fa;
911 u32 nrg_th_ofdm; 920 u32 nrg_th_ofdm;
912
913 u8 state;
914}; 921};
915 922
916/* Chain noise (differential Rx gain) calib data */ 923/* Chain noise (differential Rx gain) calib data */
917struct iwl4965_chain_noise_data { 924struct iwl_chain_noise_data {
918 u8 state; 925 u8 state;
919 u16 beacon_count; 926 u16 beacon_count;
920 u32 chain_noise_a; 927 u32 chain_noise_a;
@@ -927,6 +934,7 @@ struct iwl4965_chain_noise_data {
927 u8 delta_gain_code[NUM_RX_CHAINS]; 934 u8 delta_gain_code[NUM_RX_CHAINS];
928 u8 radio_write; 935 u8 radio_write;
929}; 936};
937#endif /* CONFIG_IWLWIFI_RUN_TIME_CALIB */
930 938
931#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */ 939#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
932#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ 940#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
@@ -960,7 +968,7 @@ struct iwl_priv {
960 bool add_radiotap; 968 bool add_radiotap;
961 969
962 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, 970 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
963 struct iwl4965_rx_mem_buffer *rxb); 971 struct iwl_rx_mem_buffer *rxb);
964 972
965 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 973 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
966 974
@@ -985,6 +993,9 @@ struct iwl_priv {
985 s32 temperature; /* degrees Kelvin */ 993 s32 temperature; /* degrees Kelvin */
986 s32 last_temperature; 994 s32 last_temperature;
987 995
996 /* init calibration results */
997 struct iwl_calib_results calib_results;
998
988 /* Scan related variables */ 999 /* Scan related variables */
989 unsigned long last_scan_jiffies; 1000 unsigned long last_scan_jiffies;
990 unsigned long next_scan_jiffies; 1001 unsigned long next_scan_jiffies;
@@ -1007,6 +1018,9 @@ struct iwl_priv {
1007 1018
1008 /* pci hardware address support */ 1019 /* pci hardware address support */
1009 void __iomem *hw_base; 1020 void __iomem *hw_base;
1021 u32 hw_rev;
1022 u32 hw_wa_rev;
1023 u8 rev_id;
1010 1024
1011 /* uCode images, save to reload in case of failure */ 1025 /* uCode images, save to reload in case of failure */
1012 struct fw_desc ucode_code; /* runtime inst */ 1026 struct fw_desc ucode_code; /* runtime inst */
@@ -1015,6 +1029,8 @@ struct iwl_priv {
1015 struct fw_desc ucode_init; /* initialization inst */ 1029 struct fw_desc ucode_init; /* initialization inst */
1016 struct fw_desc ucode_init_data; /* initialization data */ 1030 struct fw_desc ucode_init_data; /* initialization data */
1017 struct fw_desc ucode_boot; /* bootstrap inst */ 1031 struct fw_desc ucode_boot; /* bootstrap inst */
1032 enum ucode_type ucode_type;
1033 u8 ucode_write_complete; /* the image write is complete */
1018 1034
1019 1035
1020 struct iwl4965_rxon_time_cmd rxon_timing; 1036 struct iwl4965_rxon_time_cmd rxon_timing;
@@ -1023,16 +1039,16 @@ struct iwl_priv {
1023 * changed via explicit cast within the 1039 * changed via explicit cast within the
1024 * routines that actually update the physical 1040 * routines that actually update the physical
1025 * hardware */ 1041 * hardware */
1026 const struct iwl4965_rxon_cmd active_rxon; 1042 const struct iwl_rxon_cmd active_rxon;
1027 struct iwl4965_rxon_cmd staging_rxon; 1043 struct iwl_rxon_cmd staging_rxon;
1028 1044
1029 int error_recovering; 1045 int error_recovering;
1030 struct iwl4965_rxon_cmd recovery_rxon; 1046 struct iwl_rxon_cmd recovery_rxon;
1031 1047
1032 /* 1st responses from initialize and runtime uCode images. 1048 /* 1st responses from initialize and runtime uCode images.
1033 * 4965's initialize alive response contains some calibration data. */ 1049 * 4965's initialize alive response contains some calibration data. */
1034 struct iwl4965_init_alive_resp card_alive_init; 1050 struct iwl_init_alive_resp card_alive_init;
1035 struct iwl4965_alive_resp card_alive; 1051 struct iwl_alive_resp card_alive;
1036#ifdef CONFIG_IWLWIFI_RFKILL 1052#ifdef CONFIG_IWLWIFI_RFKILL
1037 struct iwl_rfkill_mngr rfkill_mngr; 1053 struct iwl_rfkill_mngr rfkill_mngr;
1038#endif 1054#endif
@@ -1050,13 +1066,12 @@ struct iwl_priv {
1050 1066
1051 u8 assoc_station_added; 1067 u8 assoc_station_added;
1052 u8 use_ant_b_for_management_frame; /* Tx antenna selection */ 1068 u8 use_ant_b_for_management_frame; /* Tx antenna selection */
1053 u8 valid_antenna; /* Bit mask of antennas actually connected */
1054#ifdef CONFIG_IWL4965_SENSITIVITY
1055 struct iwl4965_sensitivity_data sensitivity_data;
1056 struct iwl4965_chain_noise_data chain_noise_data;
1057 u8 start_calib; 1069 u8 start_calib;
1070#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
1071 struct iwl_sensitivity_data sensitivity_data;
1072 struct iwl_chain_noise_data chain_noise_data;
1058 __le16 sensitivity_tbl[HD_TABLE_SIZE]; 1073 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1059#endif /*CONFIG_IWL4965_SENSITIVITY*/ 1074#endif /*CONFIG_IWLWIFI_RUN_TIME_CALIB*/
1060 1075
1061#ifdef CONFIG_IWL4965_HT 1076#ifdef CONFIG_IWL4965_HT
1062 struct iwl_ht_info current_ht_config; 1077 struct iwl_ht_info current_ht_config;
@@ -1075,10 +1090,10 @@ struct iwl_priv {
1075 int activity_timer_active; 1090 int activity_timer_active;
1076 1091
1077 /* Rx and Tx DMA processing queues */ 1092 /* Rx and Tx DMA processing queues */
1078 struct iwl4965_rx_queue rxq; 1093 struct iwl_rx_queue rxq;
1079 struct iwl4965_tx_queue txq[IWL_MAX_NUM_QUEUES]; 1094 struct iwl_tx_queue txq[IWL_MAX_NUM_QUEUES];
1080 unsigned long txq_ctx_active_msk; 1095 unsigned long txq_ctx_active_msk;
1081 struct iwl4965_kw kw; /* keep warm address */ 1096 struct iwl_kw kw; /* keep warm address */
1082 u32 scd_base_addr; /* scheduler sram base address */ 1097 u32 scd_base_addr; /* scheduler sram base address */
1083 1098
1084 unsigned long status; 1099 unsigned long status;
@@ -1092,7 +1107,7 @@ struct iwl_priv {
1092 u64 bytes; 1107 u64 bytes;
1093 } tx_stats[3], rx_stats[3]; 1108 } tx_stats[3], rx_stats[3];
1094 1109
1095 struct iwl4965_power_mgr power_data; 1110 struct iwl_power_mgr power_data;
1096 1111
1097 struct iwl4965_notif_statistics statistics; 1112 struct iwl4965_notif_statistics statistics;
1098 unsigned long last_statistics_time; 1113 unsigned long last_statistics_time;
@@ -1111,7 +1126,7 @@ struct iwl_priv {
1111 /*station table variables */ 1126 /*station table variables */
1112 spinlock_t sta_lock; 1127 spinlock_t sta_lock;
1113 int num_stations; 1128 int num_stations;
1114 struct iwl4965_station_entry stations[IWL_STATION_COUNT]; 1129 struct iwl_station_entry stations[IWL_STATION_COUNT];
1115 struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; 1130 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
1116 u8 default_wep_key; 1131 u8 default_wep_key;
1117 u8 key_mapping_key; 1132 u8 key_mapping_key;
@@ -1122,8 +1137,6 @@ struct iwl_priv {
1122 1137
1123 u8 mac80211_registered; 1138 u8 mac80211_registered;
1124 1139
1125 u32 notif_missed_beacons;
1126
1127 /* Rx'd packet timing information */ 1140 /* Rx'd packet timing information */
1128 u32 last_beacon_time; 1141 u32 last_beacon_time;
1129 u64 last_tsf; 1142 u64 last_tsf;
@@ -1137,7 +1150,8 @@ struct iwl_priv {
1137 struct list_head ibss_mac_hash[IWL_IBSS_MAC_HASH_SIZE]; 1150 struct list_head ibss_mac_hash[IWL_IBSS_MAC_HASH_SIZE];
1138 1151
1139 /* eeprom */ 1152 /* eeprom */
1140 struct iwl4965_eeprom eeprom; 1153 u8 *eeprom;
1154 struct iwl_eeprom_calib_info *calib_info;
1141 1155
1142 enum ieee80211_if_types iw_mode; 1156 enum ieee80211_if_types iw_mode;
1143 1157
@@ -1151,6 +1165,7 @@ struct iwl_priv {
1151 struct iwl_hw_params hw_params; 1165 struct iwl_hw_params hw_params;
1152 /* driver/uCode shared Tx Byte Counts and Rx status */ 1166 /* driver/uCode shared Tx Byte Counts and Rx status */
1153 void *shared_virt; 1167 void *shared_virt;
1168 int rb_closed_offset;
1154 /* Physical Pointer to Tx Byte Counts and Rx status */ 1169 /* Physical Pointer to Tx Byte Counts and Rx status */
1155 dma_addr_t shared_phys; 1170 dma_addr_t shared_phys;
1156 1171
@@ -1176,6 +1191,7 @@ struct iwl_priv {
1176 struct work_struct report_work; 1191 struct work_struct report_work;
1177 struct work_struct request_scan; 1192 struct work_struct request_scan;
1178 struct work_struct beacon_update; 1193 struct work_struct beacon_update;
1194 struct work_struct set_monitor;
1179 1195
1180 struct tasklet_struct irq_tasklet; 1196 struct tasklet_struct irq_tasklet;
1181 1197
@@ -1197,6 +1213,7 @@ struct iwl_priv {
1197 1213
1198#ifdef CONFIG_IWLWIFI_DEBUG 1214#ifdef CONFIG_IWLWIFI_DEBUG
1199 /* debugging info */ 1215 /* debugging info */
1216 u32 debug_level;
1200 u32 framecnt_to_us; 1217 u32 framecnt_to_us;
1201 atomic_t restrict_refcnt; 1218 atomic_t restrict_refcnt;
1202#ifdef CONFIG_IWLWIFI_DEBUGFS 1219#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1206,12 +1223,56 @@ struct iwl_priv {
1206#endif /* CONFIG_IWLWIFI_DEBUG */ 1223#endif /* CONFIG_IWLWIFI_DEBUG */
1207 1224
1208 struct work_struct txpower_work; 1225 struct work_struct txpower_work;
1209#ifdef CONFIG_IWL4965_SENSITIVITY 1226#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
1227 u32 disable_sens_cal;
1228 u32 disable_chain_noise_cal;
1229#endif /* CONFIG_IWLWIFI_RUN_TIME_CALIB */
1230#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
1210 struct work_struct sensitivity_work; 1231 struct work_struct sensitivity_work;
1211#endif 1232#endif /* CONFIG_IWL4965_RUN_TIME_CALIB */
1212 struct timer_list statistics_periodic; 1233 struct timer_list statistics_periodic;
1213}; /*iwl_priv */ 1234}; /*iwl_priv */
1214 1235
1236static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1237{
1238 set_bit(txq_id, &priv->txq_ctx_active_msk);
1239}
1240
1241static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1242{
1243 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1244}
1245
1246#ifdef CONFIG_IWLWIF_DEBUG
1247const char *iwl_get_tx_fail_reason(u32 status);
1248#else
1249static inline const char *iwl_get_tx_fail_reason(u32 status) { return ""; }
1250#endif
1251
1252
1253#ifdef CONFIG_IWL4965_HT
1254static inline int iwl_get_ra_sta_id(struct iwl_priv *priv,
1255 struct ieee80211_hdr *hdr)
1256{
1257 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
1258 return IWL_AP_ID;
1259 } else {
1260 u8 *da = ieee80211_get_DA(hdr);
1261 return iwl_find_station(priv, da);
1262 }
1263}
1264
1265static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
1266 int txq_id, int idx)
1267{
1268 if (priv->txq[txq_id].txb[idx].skb[0])
1269 return (struct ieee80211_hdr *)priv->txq[txq_id].
1270 txb[idx].skb[0]->data;
1271 return NULL;
1272}
1273#endif
1274
1275
1215static inline int iwl_is_associated(struct iwl_priv *priv) 1276static inline int iwl_is_associated(struct iwl_priv *priv)
1216{ 1277{
1217 return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; 1278 return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
@@ -1224,11 +1285,6 @@ static inline int is_channel_valid(const struct iwl_channel_info *ch_info)
1224 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; 1285 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1225} 1286}
1226 1287
1227static inline int is_channel_narrow(const struct iwl_channel_info *ch_info)
1228{
1229 return (ch_info->flags & EEPROM_CHANNEL_NARROW) ? 1 : 0;
1230}
1231
1232static inline int is_channel_radar(const struct iwl_channel_info *ch_info) 1288static inline int is_channel_radar(const struct iwl_channel_info *ch_info)
1233{ 1289{
1234 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; 1290 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
@@ -1254,6 +1310,23 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
1254 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0; 1310 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
1255} 1311}
1256 1312
1313#ifdef CONFIG_IWLWIFI_DEBUG
1314static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
1315 void *p, u32 len)
1316{
1317 if (!(priv->debug_level & level))
1318 return;
1319
1320 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
1321 p, len, 1);
1322}
1323#else
1324static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
1325 void *p, u32 len)
1326{
1327}
1328#endif
1329
1257extern const struct iwl_channel_info *iwl_get_channel_info( 1330extern const struct iwl_channel_info *iwl_get_channel_info(
1258 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel); 1331 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
1259 1332
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index a07d5dcb7abc..11f9d9557a0e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -68,8 +68,8 @@
68 68
69#include <net/mac80211.h> 69#include <net/mac80211.h>
70 70
71#include "iwl-4965-commands.h" 71#include "iwl-commands.h"
72#include "iwl-4965.h" 72#include "iwl-dev.h"
73#include "iwl-core.h" 73#include "iwl-core.h"
74#include "iwl-debug.h" 74#include "iwl-debug.h"
75#include "iwl-eeprom.h" 75#include "iwl-eeprom.h"
@@ -193,6 +193,12 @@ void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv)
193} 193}
194EXPORT_SYMBOL(iwlcore_eeprom_release_semaphore); 194EXPORT_SYMBOL(iwlcore_eeprom_release_semaphore);
195 195
196const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
197{
198 BUG_ON(offset >= priv->cfg->eeprom_size);
199 return &priv->eeprom[offset];
200}
201EXPORT_SYMBOL(iwlcore_eeprom_query_addr);
196 202
197/** 203/**
198 * iwl_eeprom_init - read EEPROM contents 204 * iwl_eeprom_init - read EEPROM contents
@@ -203,30 +209,35 @@ EXPORT_SYMBOL(iwlcore_eeprom_release_semaphore);
203 */ 209 */
204int iwl_eeprom_init(struct iwl_priv *priv) 210int iwl_eeprom_init(struct iwl_priv *priv)
205{ 211{
206 u16 *e = (u16 *)&priv->eeprom; 212 u16 *e;
207 u32 gp = iwl_read32(priv, CSR_EEPROM_GP); 213 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
208 u32 r; 214 u32 r;
209 int sz = sizeof(priv->eeprom); 215 int sz = priv->cfg->eeprom_size;
210 int ret; 216 int ret;
211 int i; 217 int i;
212 u16 addr; 218 u16 addr;
213 219
214 /* The EEPROM structure has several padding buffers within it 220 /* allocate eeprom */
215 * and when adding new EEPROM maps is subject to programmer errors 221 priv->eeprom = kzalloc(sz, GFP_KERNEL);
216 * which may be very difficult to identify without explicitly 222 if (!priv->eeprom) {
217 * checking the resulting size of the eeprom map. */ 223 ret = -ENOMEM;
218 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE); 224 goto alloc_err;
225 }
226 e = (u16 *)priv->eeprom;
219 227
220 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { 228 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
229 if (ret < 0) {
221 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); 230 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
222 return -ENOENT; 231 ret = -ENOENT;
232 goto err;
223 } 233 }
224 234
225 /* Make sure driver (instead of uCode) is allowed to read EEPROM */ 235 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
226 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv); 236 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
227 if (ret < 0) { 237 if (ret < 0) {
228 IWL_ERROR("Failed to acquire EEPROM semaphore.\n"); 238 IWL_ERROR("Failed to acquire EEPROM semaphore.\n");
229 return -ENOENT; 239 ret = -ENOENT;
240 goto err;
230 } 241 }
231 242
232 /* eeprom is an array of 16bit values */ 243 /* eeprom is an array of 16bit values */
@@ -250,61 +261,98 @@ int iwl_eeprom_init(struct iwl_priv *priv)
250 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16)); 261 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
251 } 262 }
252 ret = 0; 263 ret = 0;
253
254done: 264done:
255 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv); 265 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
266err:
267 if (ret)
268 kfree(priv->eeprom);
269alloc_err:
256 return ret; 270 return ret;
257} 271}
258EXPORT_SYMBOL(iwl_eeprom_init); 272EXPORT_SYMBOL(iwl_eeprom_init);
259 273
274void iwl_eeprom_free(struct iwl_priv *priv)
275{
276 if(priv->eeprom)
277 kfree(priv->eeprom);
278 priv->eeprom = NULL;
279}
280EXPORT_SYMBOL(iwl_eeprom_free);
281
282int iwl_eeprom_check_version(struct iwl_priv *priv)
283{
284 return priv->cfg->ops->lib->eeprom_ops.check_version(priv);
285}
286EXPORT_SYMBOL(iwl_eeprom_check_version);
287
288const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
289{
290 return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset);
291}
292EXPORT_SYMBOL(iwl_eeprom_query_addr);
293
294u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
295{
296 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
297}
298EXPORT_SYMBOL(iwl_eeprom_query16);
260 299
261void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) 300void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
262{ 301{
263 memcpy(mac, priv->eeprom.mac_address, 6); 302 const u8 *addr = priv->cfg->ops->lib->eeprom_ops.query_addr(priv,
303 EEPROM_MAC_ADDRESS);
304 memcpy(mac, addr, ETH_ALEN);
264} 305}
265EXPORT_SYMBOL(iwl_eeprom_get_mac); 306EXPORT_SYMBOL(iwl_eeprom_get_mac);
266 307
267static void iwl_init_band_reference(const struct iwl_priv *priv, 308static void iwl_init_band_reference(const struct iwl_priv *priv,
268 int band, 309 int eep_band, int *eeprom_ch_count,
269 int *eeprom_ch_count, 310 const struct iwl_eeprom_channel **eeprom_ch_info,
270 const struct iwl4965_eeprom_channel 311 const u8 **eeprom_ch_index)
271 **eeprom_ch_info,
272 const u8 **eeprom_ch_index)
273{ 312{
274 switch (band) { 313 u32 offset = priv->cfg->ops->lib->
314 eeprom_ops.regulatory_bands[eep_band - 1];
315 switch (eep_band) {
275 case 1: /* 2.4GHz band */ 316 case 1: /* 2.4GHz band */
276 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); 317 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
277 *eeprom_ch_info = priv->eeprom.band_1_channels; 318 *eeprom_ch_info = (struct iwl_eeprom_channel *)
319 iwl_eeprom_query_addr(priv, offset);
278 *eeprom_ch_index = iwl_eeprom_band_1; 320 *eeprom_ch_index = iwl_eeprom_band_1;
279 break; 321 break;
280 case 2: /* 4.9GHz band */ 322 case 2: /* 4.9GHz band */
281 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); 323 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
282 *eeprom_ch_info = priv->eeprom.band_2_channels; 324 *eeprom_ch_info = (struct iwl_eeprom_channel *)
325 iwl_eeprom_query_addr(priv, offset);
283 *eeprom_ch_index = iwl_eeprom_band_2; 326 *eeprom_ch_index = iwl_eeprom_band_2;
284 break; 327 break;
285 case 3: /* 5.2GHz band */ 328 case 3: /* 5.2GHz band */
286 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); 329 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
287 *eeprom_ch_info = priv->eeprom.band_3_channels; 330 *eeprom_ch_info = (struct iwl_eeprom_channel *)
331 iwl_eeprom_query_addr(priv, offset);
288 *eeprom_ch_index = iwl_eeprom_band_3; 332 *eeprom_ch_index = iwl_eeprom_band_3;
289 break; 333 break;
290 case 4: /* 5.5GHz band */ 334 case 4: /* 5.5GHz band */
291 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); 335 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
292 *eeprom_ch_info = priv->eeprom.band_4_channels; 336 *eeprom_ch_info = (struct iwl_eeprom_channel *)
337 iwl_eeprom_query_addr(priv, offset);
293 *eeprom_ch_index = iwl_eeprom_band_4; 338 *eeprom_ch_index = iwl_eeprom_band_4;
294 break; 339 break;
295 case 5: /* 5.7GHz band */ 340 case 5: /* 5.7GHz band */
296 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); 341 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
297 *eeprom_ch_info = priv->eeprom.band_5_channels; 342 *eeprom_ch_info = (struct iwl_eeprom_channel *)
343 iwl_eeprom_query_addr(priv, offset);
298 *eeprom_ch_index = iwl_eeprom_band_5; 344 *eeprom_ch_index = iwl_eeprom_band_5;
299 break; 345 break;
300 case 6: /* 2.4GHz FAT channels */ 346 case 6: /* 2.4GHz FAT channels */
301 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); 347 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
302 *eeprom_ch_info = priv->eeprom.band_24_channels; 348 *eeprom_ch_info = (struct iwl_eeprom_channel *)
349 iwl_eeprom_query_addr(priv, offset);
303 *eeprom_ch_index = iwl_eeprom_band_6; 350 *eeprom_ch_index = iwl_eeprom_band_6;
304 break; 351 break;
305 case 7: /* 5 GHz FAT channels */ 352 case 7: /* 5 GHz FAT channels */
306 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); 353 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
307 *eeprom_ch_info = priv->eeprom.band_52_channels; 354 *eeprom_ch_info = (struct iwl_eeprom_channel *)
355 iwl_eeprom_query_addr(priv, offset);
308 *eeprom_ch_index = iwl_eeprom_band_7; 356 *eeprom_ch_index = iwl_eeprom_band_7;
309 break; 357 break;
310 default: 358 default:
@@ -317,13 +365,13 @@ static void iwl_init_band_reference(const struct iwl_priv *priv,
317 ? # x " " : "") 365 ? # x " " : "")
318 366
319/** 367/**
320 * iwl4965_set_fat_chan_info - Copy fat channel info into driver's priv. 368 * iwl_set_fat_chan_info - Copy fat channel info into driver's priv.
321 * 369 *
322 * Does not set up a command, or touch hardware. 370 * Does not set up a command, or touch hardware.
323 */ 371 */
324static int iwl4965_set_fat_chan_info(struct iwl_priv *priv, 372static int iwl_set_fat_chan_info(struct iwl_priv *priv,
325 enum ieee80211_band band, u16 channel, 373 enum ieee80211_band band, u16 channel,
326 const struct iwl4965_eeprom_channel *eeprom_ch, 374 const struct iwl_eeprom_channel *eeprom_ch,
327 u8 fat_extension_channel) 375 u8 fat_extension_channel)
328{ 376{
329 struct iwl_channel_info *ch_info; 377 struct iwl_channel_info *ch_info;
@@ -334,7 +382,7 @@ static int iwl4965_set_fat_chan_info(struct iwl_priv *priv,
334 if (!is_channel_valid(ch_info)) 382 if (!is_channel_valid(ch_info))
335 return -1; 383 return -1;
336 384
337 IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x" 385 IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s(0x%02x"
338 " %ddBm): Ad-Hoc %ssupported\n", 386 " %ddBm): Ad-Hoc %ssupported\n",
339 ch_info->channel, 387 ch_info->channel,
340 is_channel_a_band(ch_info) ? 388 is_channel_a_band(ch_info) ?
@@ -343,7 +391,6 @@ static int iwl4965_set_fat_chan_info(struct iwl_priv *priv,
343 CHECK_AND_PRINT(ACTIVE), 391 CHECK_AND_PRINT(ACTIVE),
344 CHECK_AND_PRINT(RADAR), 392 CHECK_AND_PRINT(RADAR),
345 CHECK_AND_PRINT(WIDE), 393 CHECK_AND_PRINT(WIDE),
346 CHECK_AND_PRINT(NARROW),
347 CHECK_AND_PRINT(DFS), 394 CHECK_AND_PRINT(DFS),
348 eeprom_ch->flags, 395 eeprom_ch->flags,
349 eeprom_ch->max_power_avg, 396 eeprom_ch->max_power_avg,
@@ -372,7 +419,7 @@ int iwl_init_channel_map(struct iwl_priv *priv)
372{ 419{
373 int eeprom_ch_count = 0; 420 int eeprom_ch_count = 0;
374 const u8 *eeprom_ch_index = NULL; 421 const u8 *eeprom_ch_index = NULL;
375 const struct iwl4965_eeprom_channel *eeprom_ch_info = NULL; 422 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
376 int band, ch; 423 int band, ch;
377 struct iwl_channel_info *ch_info; 424 struct iwl_channel_info *ch_info;
378 425
@@ -381,12 +428,6 @@ int iwl_init_channel_map(struct iwl_priv *priv)
381 return 0; 428 return 0;
382 } 429 }
383 430
384 if (priv->eeprom.version < 0x2f) {
385 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
386 priv->eeprom.version);
387 return -EINVAL;
388 }
389
390 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n"); 431 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
391 432
392 priv->channel_count = 433 priv->channel_count =
@@ -447,7 +488,7 @@ int iwl_init_channel_map(struct iwl_priv *priv)
447 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; 488 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
448 ch_info->min_power = 0; 489 ch_info->min_power = 0;
449 490
450 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x" 491 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
451 " %ddBm): Ad-Hoc %ssupported\n", 492 " %ddBm): Ad-Hoc %ssupported\n",
452 ch_info->channel, 493 ch_info->channel,
453 is_channel_a_band(ch_info) ? 494 is_channel_a_band(ch_info) ?
@@ -457,7 +498,6 @@ int iwl_init_channel_map(struct iwl_priv *priv)
457 CHECK_AND_PRINT_I(ACTIVE), 498 CHECK_AND_PRINT_I(ACTIVE),
458 CHECK_AND_PRINT_I(RADAR), 499 CHECK_AND_PRINT_I(RADAR),
459 CHECK_AND_PRINT_I(WIDE), 500 CHECK_AND_PRINT_I(WIDE),
460 CHECK_AND_PRINT_I(NARROW),
461 CHECK_AND_PRINT_I(DFS), 501 CHECK_AND_PRINT_I(DFS),
462 eeprom_ch_info[ch].flags, 502 eeprom_ch_info[ch].flags,
463 eeprom_ch_info[ch].max_power_avg, 503 eeprom_ch_info[ch].max_power_avg,
@@ -502,16 +542,16 @@ int iwl_init_channel_map(struct iwl_priv *priv)
502 fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE; 542 fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE;
503 543
504 /* Set up driver's info for lower half */ 544 /* Set up driver's info for lower half */
505 iwl4965_set_fat_chan_info(priv, ieeeband, 545 iwl_set_fat_chan_info(priv, ieeeband,
506 eeprom_ch_index[ch], 546 eeprom_ch_index[ch],
507 &(eeprom_ch_info[ch]), 547 &(eeprom_ch_info[ch]),
508 fat_extension_chan); 548 fat_extension_chan);
509 549
510 /* Set up driver's info for upper half */ 550 /* Set up driver's info for upper half */
511 iwl4965_set_fat_chan_info(priv, ieeeband, 551 iwl_set_fat_chan_info(priv, ieeeband,
512 (eeprom_ch_index[ch] + 4), 552 (eeprom_ch_index[ch] + 4),
513 &(eeprom_ch_info[ch]), 553 &(eeprom_ch_info[ch]),
514 HT_IE_EXT_CHANNEL_BELOW); 554 HT_IE_EXT_CHANNEL_BELOW);
515 } 555 }
516 } 556 }
517 557
@@ -520,23 +560,21 @@ int iwl_init_channel_map(struct iwl_priv *priv)
520EXPORT_SYMBOL(iwl_init_channel_map); 560EXPORT_SYMBOL(iwl_init_channel_map);
521 561
522/* 562/*
523 * iwl_free_channel_map - undo allocations in iwl4965_init_channel_map 563 * iwl_free_channel_map - undo allocations in iwl_init_channel_map
524 */ 564 */
525void iwl_free_channel_map(struct iwl_priv *priv) 565void iwl_free_channel_map(struct iwl_priv *priv)
526{ 566{
527 kfree(priv->channel_info); 567 kfree(priv->channel_info);
528 priv->channel_count = 0; 568 priv->channel_count = 0;
529} 569}
530EXPORT_SYMBOL(iwl_free_channel_map);
531 570
532/** 571/**
533 * iwl_get_channel_info - Find driver's private channel info 572 * iwl_get_channel_info - Find driver's private channel info
534 * 573 *
535 * Based on band and channel number. 574 * Based on band and channel number.
536 */ 575 */
537const struct iwl_channel_info *iwl_get_channel_info( 576const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
538 const struct iwl_priv *priv, 577 enum ieee80211_band band, u16 channel)
539 enum ieee80211_band band, u16 channel)
540{ 578{
541 int i; 579 int i;
542 580
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index bd0a042ca77f..d3a2a5b4ac56 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -106,7 +106,7 @@ enum {
106 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */ 106 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
107 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */ 107 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
108 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */ 108 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
109 EEPROM_CHANNEL_NARROW = (1 << 6), /* 10 MHz channel (not used) */ 109 /* Bit 6 Reserved (was Narrow Channel) */
110 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */ 110 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
111}; 111};
112 112
@@ -116,7 +116,7 @@ enum {
116 116
117/* *regulatory* channel data format in eeprom, one for each channel. 117/* *regulatory* channel data format in eeprom, one for each channel.
118 * There are separate entries for FAT (40 MHz) vs. normal (20 MHz) channels. */ 118 * There are separate entries for FAT (40 MHz) vs. normal (20 MHz) channels. */
119struct iwl4965_eeprom_channel { 119struct iwl_eeprom_channel {
120 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */ 120 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
121 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ 121 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
122} __attribute__ ((packed)); 122} __attribute__ ((packed));
@@ -131,17 +131,55 @@ struct iwl4965_eeprom_channel {
131 * each of 3 target output levels */ 131 * each of 3 target output levels */
132#define EEPROM_TX_POWER_MEASUREMENTS (3) 132#define EEPROM_TX_POWER_MEASUREMENTS (3)
133 133
134#define EEPROM_4965_TX_POWER_VERSION (2) 134/* 4965 Specific */
135/* 4965 driver does not work with txpower calibration version < 5 */
136#define EEPROM_4965_TX_POWER_VERSION (5)
137#define EEPROM_4965_EEPROM_VERSION (0x2f)
138#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
139#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
140#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
141#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
142
143/* 5000 Specific */
144#define EEPROM_5000_TX_POWER_VERSION (4)
145#define EEPROM_5000_EEPROM_VERSION (0x11A)
146
147/*5000 calibrations */
148#define EEPROM_5000_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
149#define EEPROM_5000_XTAL ((2*0x128) | EEPROM_5000_CALIB_ALL)
150
151/* 5000 links */
152#define EEPROM_5000_LINK_HOST (2*0x64)
153#define EEPROM_5000_LINK_GENERAL (2*0x65)
154#define EEPROM_5000_LINK_REGULATORY (2*0x66)
155#define EEPROM_5000_LINK_CALIBRATION (2*0x67)
156#define EEPROM_5000_LINK_PROCESS_ADJST (2*0x68)
157#define EEPROM_5000_LINK_OTHERS (2*0x69)
158
159/* 5000 regulatory - indirect access */
160#define EEPROM_5000_REG_SKU_ID ((0x02)\
161 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 4 bytes */
162#define EEPROM_5000_REG_BAND_1_CHANNELS ((0x08)\
163 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */
164#define EEPROM_5000_REG_BAND_2_CHANNELS ((0x26)\
165 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */
166#define EEPROM_5000_REG_BAND_3_CHANNELS ((0x42)\
167 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
168#define EEPROM_5000_REG_BAND_4_CHANNELS ((0x5C)\
169 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
170#define EEPROM_5000_REG_BAND_5_CHANNELS ((0x74)\
171 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */
172#define EEPROM_5000_REG_BAND_24_FAT_CHANNELS ((0x82)\
173 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
174#define EEPROM_5000_REG_BAND_52_FAT_CHANNELS ((0x92)\
175 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
135 176
136/* 4965 driver does not work with txpower calibration version < 5.
137 * Look for this in calib_version member of struct iwl4965_eeprom. */
138#define EEPROM_TX_POWER_VERSION_NEW (5)
139 177
140/* 2.4 GHz */ 178/* 2.4 GHz */
141extern const u8 iwl_eeprom_band_1[14]; 179extern const u8 iwl_eeprom_band_1[14];
142 180
143/* 181/*
144 * 4965 factory calibration data for one txpower level, on one channel, 182 * factory calibration data for one txpower level, on one channel,
145 * measured on one of the 2 tx chains (radio transmitter and associated 183 * measured on one of the 2 tx chains (radio transmitter and associated
146 * antenna). EEPROM contains: 184 * antenna). EEPROM contains:
147 * 185 *
@@ -154,7 +192,7 @@ extern const u8 iwl_eeprom_band_1[14];
154 * 192 *
155 * 4) RF power amplifier detector level measurement (not used). 193 * 4) RF power amplifier detector level measurement (not used).
156 */ 194 */
157struct iwl4965_eeprom_calib_measure { 195struct iwl_eeprom_calib_measure {
158 u8 temperature; /* Device temperature (Celsius) */ 196 u8 temperature; /* Device temperature (Celsius) */
159 u8 gain_idx; /* Index into gain table */ 197 u8 gain_idx; /* Index into gain table */
160 u8 actual_pow; /* Measured RF output power, half-dBm */ 198 u8 actual_pow; /* Measured RF output power, half-dBm */
@@ -163,22 +201,22 @@ struct iwl4965_eeprom_calib_measure {
163 201
164 202
165/* 203/*
166 * 4965 measurement set for one channel. EEPROM contains: 204 * measurement set for one channel. EEPROM contains:
167 * 205 *
168 * 1) Channel number measured 206 * 1) Channel number measured
169 * 207 *
170 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters 208 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
171 * (a.k.a. "tx chains") (6 measurements altogether) 209 * (a.k.a. "tx chains") (6 measurements altogether)
172 */ 210 */
173struct iwl4965_eeprom_calib_ch_info { 211struct iwl_eeprom_calib_ch_info {
174 u8 ch_num; 212 u8 ch_num;
175 struct iwl4965_eeprom_calib_measure 213 struct iwl_eeprom_calib_measure
176 measurements[EEPROM_TX_POWER_TX_CHAINS] 214 measurements[EEPROM_TX_POWER_TX_CHAINS]
177 [EEPROM_TX_POWER_MEASUREMENTS]; 215 [EEPROM_TX_POWER_MEASUREMENTS];
178} __attribute__ ((packed)); 216} __attribute__ ((packed));
179 217
180/* 218/*
181 * 4965 txpower subband info. 219 * txpower subband info.
182 * 220 *
183 * For each frequency subband, EEPROM contains the following: 221 * For each frequency subband, EEPROM contains the following:
184 * 222 *
@@ -187,16 +225,16 @@ struct iwl4965_eeprom_calib_ch_info {
187 * 225 *
188 * 2) Sample measurement sets for 2 channels close to the range endpoints. 226 * 2) Sample measurement sets for 2 channels close to the range endpoints.
189 */ 227 */
190struct iwl4965_eeprom_calib_subband_info { 228struct iwl_eeprom_calib_subband_info {
191 u8 ch_from; /* channel number of lowest channel in subband */ 229 u8 ch_from; /* channel number of lowest channel in subband */
192 u8 ch_to; /* channel number of highest channel in subband */ 230 u8 ch_to; /* channel number of highest channel in subband */
193 struct iwl4965_eeprom_calib_ch_info ch1; 231 struct iwl_eeprom_calib_ch_info ch1;
194 struct iwl4965_eeprom_calib_ch_info ch2; 232 struct iwl_eeprom_calib_ch_info ch2;
195} __attribute__ ((packed)); 233} __attribute__ ((packed));
196 234
197 235
198/* 236/*
199 * 4965 txpower calibration info. EEPROM contains: 237 * txpower calibration info. EEPROM contains:
200 * 238 *
201 * 1) Factory-measured saturation power levels (maximum levels at which 239 * 1) Factory-measured saturation power levels (maximum levels at which
202 * tx power amplifier can output a signal without too much distortion). 240 * tx power amplifier can output a signal without too much distortion).
@@ -212,55 +250,58 @@ struct iwl4965_eeprom_calib_subband_info {
212 * characteristics of the analog radio circuitry vary with frequency. 250 * characteristics of the analog radio circuitry vary with frequency.
213 * 251 *
214 * Not all sets need to be filled with data; 252 * Not all sets need to be filled with data;
215 * struct iwl4965_eeprom_calib_subband_info contains range of channels 253 * struct iwl_eeprom_calib_subband_info contains range of channels
216 * (0 if unused) for each set of data. 254 * (0 if unused) for each set of data.
217 */ 255 */
218struct iwl4965_eeprom_calib_info { 256struct iwl_eeprom_calib_info {
219 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ 257 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
220 u8 saturation_power52; /* half-dBm */ 258 u8 saturation_power52; /* half-dBm */
221 s16 voltage; /* signed */ 259 s16 voltage; /* signed */
222 struct iwl4965_eeprom_calib_subband_info 260 struct iwl_eeprom_calib_subband_info
223 band_info[EEPROM_TX_POWER_BANDS]; 261 band_info[EEPROM_TX_POWER_BANDS];
224} __attribute__ ((packed)); 262} __attribute__ ((packed));
225 263
226 264
227 265#define ADDRESS_MSK 0x0000FFFF
228/* 266#define INDIRECT_TYPE_MSK 0x000F0000
229 * 4965 EEPROM map 267#define INDIRECT_HOST 0x00010000
230 */ 268#define INDIRECT_GENERAL 0x00020000
231struct iwl4965_eeprom { 269#define INDIRECT_REGULATORY 0x00030000
232 u8 reserved0[16]; 270#define INDIRECT_CALIBRATION 0x00040000
233 u16 device_id; /* abs.ofs: 16 */ 271#define INDIRECT_PROCESS_ADJST 0x00050000
234 u8 reserved1[2]; 272#define INDIRECT_OTHERS 0x00060000
235 u16 pmc; /* abs.ofs: 20 */ 273#define INDIRECT_ADDRESS 0x00100000
236 u8 reserved2[20]; 274
237 u8 mac_address[6]; /* abs.ofs: 42 */ 275/* General */
238 u8 reserved3[58]; 276#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
239 u16 board_revision; /* abs.ofs: 106 */ 277#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
240 u8 reserved4[11]; 278#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
241 u8 board_pba_number[9]; /* abs.ofs: 119 */ 279#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
242 u8 reserved5[8]; 280#define EEPROM_VERSION (2*0x44) /* 2 bytes */
243 u16 version; /* abs.ofs: 136 */ 281#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */
244 u8 sku_cap; /* abs.ofs: 138 */ 282#define EEPROM_LEDS_MODE (2*0x45+1) /* 1 bytes */
245 u8 leds_mode; /* abs.ofs: 139 */ 283#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
246 u16 oem_mode; 284#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
247 u16 wowlan_mode; /* abs.ofs: 142 */ 285#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
248 u16 leds_time_interval; /* abs.ofs: 144 */ 286#define EEPROM_3945_M_VERSION (2*0x4A) /* 1 bytes */
249 u8 leds_off_time; /* abs.ofs: 146 */ 287#define EEPROM_ANTENNA_SWITCH_TYPE (2*0x4A+1) /* 1 bytes */
250 u8 leds_on_time; /* abs.ofs: 147 */ 288
251 u8 almgor_m_version; /* abs.ofs: 148 */ 289/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
252 u8 antenna_switch_type; /* abs.ofs: 149 */ 290#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
253 u8 reserved6[8]; 291#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
254 u16 board_revision_4965; /* abs.ofs: 158 */ 292#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
255 u8 reserved7[13]; 293#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
256 u8 board_pba_number_4965[9]; /* abs.ofs: 173 */ 294#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
257 u8 reserved8[10]; 295#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
258 u8 sku_id[4]; /* abs.ofs: 192 */ 296
297#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
298#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
299#define EEPROM_5000_RF_CFG_TYPE_MAX 0x3
259 300
260/* 301/*
261 * Per-channel regulatory data. 302 * Per-channel regulatory data.
262 * 303 *
263 * Each channel that *might* be supported by 3945 or 4965 has a fixed location 304 * Each channel that *might* be supported by iwl has a fixed location
264 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory 305 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
265 * txpower (MSB). 306 * txpower (MSB).
266 * 307 *
@@ -269,40 +310,38 @@ struct iwl4965_eeprom {
269 * 310 *
270 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 311 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
271 */ 312 */
272 u16 band_1_count; /* abs.ofs: 196 */ 313#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
273 struct iwl4965_eeprom_channel band_1_channels[14]; /* abs.ofs: 196 */ 314#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
315#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
274 316
275/* 317/*
276 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, 318 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
277 * 5.0 GHz channels 7, 8, 11, 12, 16 319 * 5.0 GHz channels 7, 8, 11, 12, 16
278 * (4915-5080MHz) (none of these is ever supported) 320 * (4915-5080MHz) (none of these is ever supported)
279 */ 321 */
280 u16 band_2_count; /* abs.ofs: 226 */ 322#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
281 struct iwl4965_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */ 323#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
282 324
283/* 325/*
284 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 326 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
285 * (5170-5320MHz) 327 * (5170-5320MHz)
286 */ 328 */
287 u16 band_3_count; /* abs.ofs: 254 */ 329#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
288 struct iwl4965_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */ 330#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
289 331
290/* 332/*
291 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 333 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
292 * (5500-5700MHz) 334 * (5500-5700MHz)
293 */ 335 */
294 u16 band_4_count; /* abs.ofs: 280 */ 336#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
295 struct iwl4965_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */ 337#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
296 338
297/* 339/*
298 * 5.7 GHz channels 145, 149, 153, 157, 161, 165 340 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
299 * (5725-5825MHz) 341 * (5725-5825MHz)
300 */ 342 */
301 u16 band_5_count; /* abs.ofs: 304 */ 343#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
302 struct iwl4965_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */ 344#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
303
304 u8 reserved10[2];
305
306 345
307/* 346/*
308 * 2.4 GHz FAT channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11) 347 * 2.4 GHz FAT channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
@@ -319,52 +358,35 @@ struct iwl4965_eeprom {
319 * 358 *
320 * NOTE: 4965 does not support FAT channels on 2.4 GHz. 359 * NOTE: 4965 does not support FAT channels on 2.4 GHz.
321 */ 360 */
322 struct iwl4965_eeprom_channel band_24_channels[7]; /* abs.ofs: 320 */ 361#define EEPROM_4965_REGULATORY_BAND_24_FAT_CHANNELS (2*0xA0) /* 14 bytes */
323 u8 reserved11[2];
324 362
325/* 363/*
326 * 5.2 GHz FAT channels 36 (40), 44 (48), 52 (56), 60 (64), 364 * 5.2 GHz FAT channels 36 (40), 44 (48), 52 (56), 60 (64),
327 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161) 365 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
328 */ 366 */
329 struct iwl4965_eeprom_channel band_52_channels[11]; /* abs.ofs: 336 */ 367#define EEPROM_4965_REGULATORY_BAND_52_FAT_CHANNELS (2*0xA8) /* 22 bytes */
330 u8 reserved12[6];
331
332/*
333 * 4965 driver requires txpower calibration format version 5 or greater.
334 * Driver does not work with txpower calibration version < 5.
335 * This value is simply a 16-bit number, no major/minor versions here.
336 */
337 u16 calib_version; /* abs.ofs: 364 */
338 u8 reserved13[2];
339 u8 reserved14[96]; /* abs.ofs: 368 */
340
341/*
342 * 4965 Txpower calibration data.
343 */
344 struct iwl4965_eeprom_calib_info calib_info; /* abs.ofs: 464 */
345
346 u8 reserved16[140]; /* fill out to full 1024 byte block */
347
348
349} __attribute__ ((packed));
350
351#define IWL_EEPROM_IMAGE_SIZE 1024
352
353/* End of EEPROM */
354 368
355struct iwl_eeprom_ops { 369struct iwl_eeprom_ops {
370 const u32 regulatory_bands[7];
356 int (*verify_signature) (struct iwl_priv *priv); 371 int (*verify_signature) (struct iwl_priv *priv);
357 int (*acquire_semaphore) (struct iwl_priv *priv); 372 int (*acquire_semaphore) (struct iwl_priv *priv);
358 void (*release_semaphore) (struct iwl_priv *priv); 373 void (*release_semaphore) (struct iwl_priv *priv);
374 int (*check_version) (struct iwl_priv *priv);
375 const u8* (*query_addr) (const struct iwl_priv *priv, size_t offset);
359}; 376};
360 377
361 378
362void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac); 379void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
363int iwl_eeprom_init(struct iwl_priv *priv); 380int iwl_eeprom_init(struct iwl_priv *priv);
381void iwl_eeprom_free(struct iwl_priv *priv);
382int iwl_eeprom_check_version(struct iwl_priv *priv);
383const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
384u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
364 385
365int iwlcore_eeprom_verify_signature(struct iwl_priv *priv); 386int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
366int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv); 387int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
367void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv); 388void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
389const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
368 390
369int iwl_init_channel_map(struct iwl_priv *priv); 391int iwl_init_channel_map(struct iwl_priv *priv);
370void iwl_free_channel_map(struct iwl_priv *priv); 392void iwl_free_channel_map(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
new file mode 100644
index 000000000000..944642450d3d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -0,0 +1,391 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64/****************************/
65/* Flow Handler Definitions */
66/****************************/
67
68/**
69 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
70 * Addresses are offsets from device's PCI hardware base address.
71 */
72#define FH_MEM_LOWER_BOUND (0x1000)
73#define FH_MEM_UPPER_BOUND (0x1EF0)
74
75/**
76 * Keep-Warm (KW) buffer base address.
77 *
78 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
79 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
80 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
81 * from going into a power-savings mode that would cause higher DRAM latency,
82 * and possible data over/under-runs, before all Tx/Rx is complete.
83 *
84 * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
85 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
86 * automatically invokes keep-warm accesses when normal accesses might not
87 * be sufficient to maintain fast DRAM response.
88 *
89 * Bit fields:
90 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
91 */
92#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
93
94
95/**
96 * TFD Circular Buffers Base (CBBC) addresses
97 *
98 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
99 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
100 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
101 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
102 * aligned (address bits 0-7 must be 0).
103 *
104 * Bit fields in each pointer register:
105 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
106 */
107#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
108#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
109
110/* Find TFD CB base pointer for given queue (range 0-15). */
111#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
112
113
114/**
115 * Rx SRAM Control and Status Registers (RSCSR)
116 *
117 * These registers provide handshake between driver and 4965 for the Rx queue
118 * (this queue handles *all* command responses, notifications, Rx data, etc.
119 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
120 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
121 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
122 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
123 * mapping between RBDs and RBs.
124 *
125 * Driver must allocate host DRAM memory for the following, and set the
126 * physical address of each into 4965 registers:
127 *
128 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
129 * entries (although any power of 2, up to 4096, is selectable by driver).
130 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
131 * (typically 4K, although 8K or 16K are also selectable by driver).
132 * Driver sets up RB size and number of RBDs in the CB via Rx config
133 * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
134 *
135 * Bit fields within one RBD:
136 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
137 *
138 * Driver sets physical address [35:8] of base of RBD circular buffer
139 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
140 *
141 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
142 * (RBs) have been filled, via a "write pointer", actually the index of
143 * the RB's corresponding RBD within the circular buffer. Driver sets
144 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
145 *
146 * Bit fields in lower dword of Rx status buffer (upper dword not used
147 * by driver; see struct iwl4965_shared, val0):
148 * 31-12: Not used by driver
149 * 11- 0: Index of last filled Rx buffer descriptor
150 * (4965 writes, driver reads this value)
151 *
152 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
153 * enter pointers to these RBs into contiguous RBD circular buffer entries,
154 * and update the 4965's "write" index register,
155 * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
156 *
157 * This "write" index corresponds to the *next* RBD that the driver will make
158 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
159 * the circular buffer. This value should initially be 0 (before preparing any
160 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
161 * wrap back to 0 at the end of the circular buffer (but don't wrap before
162 * "read" index has advanced past 1! See below).
163 * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
164 *
165 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
166 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
167 * to tell the driver the index of the latest filled RBD. The driver must
168 * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
169 *
170 * The driver must also internally keep track of a third index, which is the
171 * next RBD to process. When receiving an Rx interrupt, driver should process
172 * all filled but unprocessed RBs up to, but not including, the RB
173 * corresponding to the "read" index. For example, if "read" index becomes "1",
174 * driver may process the RB pointed to by RBD 0. Depending on volume of
175 * traffic, there may be many RBs to process.
176 *
177 * If read index == write index, 4965 thinks there is no room to put new data.
178 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
179 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
180 * and "read" indexes; that is, make sure that there are no more than 254
181 * buffers waiting to be filled.
182 */
183#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
184#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
185#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
186
187/**
188 * Physical base address of 8-byte Rx Status buffer.
189 * Bit fields:
190 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
191 */
192#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
193
194/**
195 * Physical base address of Rx Buffer Descriptor Circular Buffer.
196 * Bit fields:
197 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
198 */
199#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
200
201/**
202 * Rx write pointer (index, really!).
203 * Bit fields:
204 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
205 * NOTE: For 256-entry circular buffer, use only bits [7:0].
206 */
207#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
208#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
209
210
211/**
212 * Rx Config/Status Registers (RCSR)
213 * Rx Config Reg for channel 0 (only channel used)
214 *
215 * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
216 * normal operation (see bit fields).
217 *
218 * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
219 * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
220 * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
221 *
222 * Bit fields:
223 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
224 * '10' operate normally
225 * 29-24: reserved
226 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
227 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
228 * 19-18: reserved
229 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
230 * '10' 12K, '11' 16K.
231 * 15-14: reserved
232 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
233 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
234 * typical value 0x10 (about 1/2 msec)
235 * 3- 0: reserved
236 */
237#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
238#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
239#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
240
241#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
242
243#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
244#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
245#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
246#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
247#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
248#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
249
250#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT (20)
251#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_BITSHIFT (4)
252#define RX_RB_TIMEOUT (0x10)
253
254#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
255#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
256#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
257
258#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
259#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
260#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
261#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
262
263#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
264#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
265
266
267/**
268 * Rx Shared Status Registers (RSSR)
269 *
270 * After stopping Rx DMA channel (writing 0 to
271 * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
272 * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
273 *
274 * Bit fields:
275 * 24: 1 = Channel 0 is idle
276 *
277 * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
278 * contain default values that should not be altered by the driver.
279 */
280#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
281#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
282
283#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
284#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
285#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
286 (FH_MEM_RSSR_LOWER_BOUND + 0x008)
287
288#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
289
290
291/**
292 * Transmit DMA Channel Control/Status Registers (TCSR)
293 *
294 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
295 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
296 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
297 *
298 * To use a Tx DMA channel, driver must initialize its
299 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
300 *
301 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
302 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
303 *
304 * All other bits should be 0.
305 *
306 * Bit fields:
307 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
308 * '10' operate normally
309 * 29- 4: Reserved, set to "0"
310 * 3: Enable internal DMA requests (1, normal operation), disable (0)
311 * 2- 0: Reserved, set to "0"
312 */
313#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
314#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
315
316/* Find Control/Status reg for given Tx DMA/FIFO channel */
317#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
318 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl)
319
320#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
321#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
322
323#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
324#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
325#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
326
327#define FH_TCSR_CHNL_NUM (7)
328
329#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
330#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
331#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
332
333#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
334#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
335#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
336
337#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
338#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
339#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
340 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl)
341#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
342 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl + 0x4)
343#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
344 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl + 0x8)
345
346/**
347 * Tx Shared Status Registers (TSSR)
348 *
349 * After stopping Tx DMA channel (writing 0 to
350 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
351 * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
352 * (channel's buffers empty | no pending requests).
353 *
354 * Bit fields:
355 * 31-24: 1 = Channel buffers empty (channel 7:0)
356 * 23-16: 1 = No pending requests (channel 7:0)
357 */
358#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
359#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
360
361#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
362
363#define FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) ((1 << (_chnl)) << 24)
364#define FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) ((1 << (_chnl)) << 16)
365
366#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) \
367 (FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) | \
368 FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl))
369
370
371
372#define FH_REGS_LOWER_BOUND (0x1000)
373#define FH_REGS_UPPER_BOUND (0x2000)
374
375/* Tx service channels */
376#define FH_SRVC_CHNL (9)
377#define FH_SRVC_LOWER_BOUND (FH_REGS_LOWER_BOUND + 0x9C8)
378#define FH_SRVC_UPPER_BOUND (FH_REGS_LOWER_BOUND + 0x9D0)
379#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
380 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
381
382/* TFDB Area - TFDs buffer table */
383#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
384#define FH_TFDIB_LOWER_BOUND (FH_REGS_LOWER_BOUND + 0x900)
385#define FH_TFDIB_UPPER_BOUND (FH_REGS_LOWER_BOUND + 0x958)
386#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
387#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
388
389/* TCSR: tx_config register values */
390#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
391
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index fdb27f1cdc08..6c537360820b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -31,7 +31,7 @@
31#include <linux/version.h> 31#include <linux/version.h>
32#include <net/mac80211.h> 32#include <net/mac80211.h>
33 33
34#include "iwl-4965.h" /* FIXME: remove */ 34#include "iwl-dev.h" /* FIXME: remove */
35#include "iwl-debug.h" 35#include "iwl-debug.h"
36#include "iwl-eeprom.h" 36#include "iwl-eeprom.h"
37#include "iwl-core.h" 37#include "iwl-core.h"
@@ -56,6 +56,7 @@ const char *get_cmd_string(u8 cmd)
56 IWL_CMD(REPLY_RATE_SCALE); 56 IWL_CMD(REPLY_RATE_SCALE);
57 IWL_CMD(REPLY_LEDS_CMD); 57 IWL_CMD(REPLY_LEDS_CMD);
58 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); 58 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
59 IWL_CMD(COEX_PRIORITY_TABLE_CMD);
59 IWL_CMD(RADAR_NOTIFICATION); 60 IWL_CMD(RADAR_NOTIFICATION);
60 IWL_CMD(REPLY_QUIET_CMD); 61 IWL_CMD(REPLY_QUIET_CMD);
61 IWL_CMD(REPLY_CHANNEL_SWITCH); 62 IWL_CMD(REPLY_CHANNEL_SWITCH);
@@ -89,6 +90,9 @@ const char *get_cmd_string(u8 cmd)
89 IWL_CMD(REPLY_RX_MPDU_CMD); 90 IWL_CMD(REPLY_RX_MPDU_CMD);
90 IWL_CMD(REPLY_RX); 91 IWL_CMD(REPLY_RX);
91 IWL_CMD(REPLY_COMPRESSED_BA); 92 IWL_CMD(REPLY_COMPRESSED_BA);
93 IWL_CMD(CALIBRATION_CFG_CMD);
94 IWL_CMD(CALIBRATION_RES_NOTIFICATION);
95 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
92 default: 96 default:
93 return "UNKNOWN"; 97 return "UNKNOWN";
94 98
@@ -101,7 +105,7 @@ EXPORT_SYMBOL(get_cmd_string);
101static int iwl_generic_cmd_callback(struct iwl_priv *priv, 105static int iwl_generic_cmd_callback(struct iwl_priv *priv,
102 struct iwl_cmd *cmd, struct sk_buff *skb) 106 struct iwl_cmd *cmd, struct sk_buff *skb)
103{ 107{
104 struct iwl4965_rx_packet *pkt = NULL; 108 struct iwl_rx_packet *pkt = NULL;
105 109
106 if (!skb) { 110 if (!skb) {
107 IWL_ERROR("Error: Response NULL in %s.\n", 111 IWL_ERROR("Error: Response NULL in %s.\n",
@@ -109,7 +113,7 @@ static int iwl_generic_cmd_callback(struct iwl_priv *priv,
109 return 1; 113 return 1;
110 } 114 }
111 115
112 pkt = (struct iwl4965_rx_packet *)skb->data; 116 pkt = (struct iwl_rx_packet *)skb->data;
113 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 117 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
114 IWL_ERROR("Bad return from %s (0x%08X)\n", 118 IWL_ERROR("Bad return from %s (0x%08X)\n",
115 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 119 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
@@ -139,7 +143,7 @@ static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
139 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 143 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
140 return -EBUSY; 144 return -EBUSY;
141 145
142 ret = priv->cfg->ops->utils->enqueue_hcmd(priv, cmd); 146 ret = iwl_enqueue_hcmd(priv, cmd);
143 if (ret < 0) { 147 if (ret < 0) {
144 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n", 148 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n",
145 get_cmd_string(cmd->id), ret); 149 get_cmd_string(cmd->id), ret);
@@ -170,7 +174,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
170 if (cmd->meta.flags & CMD_WANT_SKB) 174 if (cmd->meta.flags & CMD_WANT_SKB)
171 cmd->meta.source = &cmd->meta; 175 cmd->meta.source = &cmd->meta;
172 176
173 cmd_idx = priv->cfg->ops->utils->enqueue_hcmd(priv, cmd); 177 cmd_idx = iwl_enqueue_hcmd(priv, cmd);
174 if (cmd_idx < 0) { 178 if (cmd_idx < 0) {
175 ret = cmd_idx; 179 ret = cmd_idx;
176 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n", 180 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index a443472bea62..dedefa06ad8f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -136,6 +136,8 @@ static inline void iwl_set_bits16(__le16 *dst, u8 pos, u8 len, int val)
136 136
137#define KELVIN_TO_CELSIUS(x) ((x)-273) 137#define KELVIN_TO_CELSIUS(x) ((x)-273)
138#define CELSIUS_TO_KELVIN(x) ((x)+273) 138#define CELSIUS_TO_KELVIN(x) ((x)+273)
139#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
140
139 141
140#define IEEE80211_CHAN_W_RADAR_DETECT 0x00000010 142#define IEEE80211_CHAN_W_RADAR_DETECT 0x00000010
141 143
@@ -235,6 +237,25 @@ static inline int ieee80211_is_reassoc_response(u16 fc)
235 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_RESP); 237 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_RESP);
236} 238}
237 239
240static inline int ieee80211_is_qos_data(u16 fc)
241{
242 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
243 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_QOS_DATA);
244}
245/**
246 * ieee80211_get_qos_ctrl - get pointer to the QoS control field
247 *
248 * This function returns the pointer to 802.11 header QoS field (2 bytes)
249 * This function doesn't check whether hdr is a QoS hdr, use with care
250 * @hdr: struct ieee80211_hdr *hdr
251 * @hdr_len: header length
252 */
253
254static inline u8 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr, int hdr_len)
255{
256 return ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
257}
258
238static inline int iwl_check_bits(unsigned long field, unsigned long mask) 259static inline int iwl_check_bits(unsigned long field, unsigned long mask)
239{ 260{
240 return ((field & mask) == mask) ? 1 : 0; 261 return ((field & mask) == mask) ? 1 : 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 03fdf5b434a1..aa6ad18494ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -39,7 +39,7 @@
39#include <linux/etherdevice.h> 39#include <linux/etherdevice.h>
40#include <asm/unaligned.h> 40#include <asm/unaligned.h>
41 41
42#include "iwl-4965.h" 42#include "iwl-dev.h"
43#include "iwl-core.h" 43#include "iwl-core.h"
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
new file mode 100644
index 000000000000..2e71803e09ba
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -0,0 +1,423 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/init.h>
34
35#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-commands.h"
41#include "iwl-debug.h"
42#include "iwl-power.h"
43#include "iwl-helpers.h"
44
45/*
46 * Setting power level allow the card to go to sleep when not busy
47 * there are three factor that decide the power level to go to, they
48 * are list here with its priority
49 * 1- critical_power_setting this will be set according to card temperature.
50 * 2- system_power_setting this will be set by system PM manager.
51 * 3- user_power_setting this will be set by user either by writing to sys or
52 * mac80211
53 *
54 * if system_power_setting and user_power_setting is set to auto
55 * the power level will be decided according to association status and battery
56 * status.
57 *
58 */
59
60#define MSEC_TO_USEC 1024
61#define IWL_POWER_RANGE_0_MAX (2)
62#define IWL_POWER_RANGE_1_MAX (10)
63
64
65#define NOSLP __constant_cpu_to_le16(0), 0, 0
66#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
67#define SLP_TOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
68#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
69 __constant_cpu_to_le32(X1), \
70 __constant_cpu_to_le32(X2), \
71 __constant_cpu_to_le32(X3), \
72 __constant_cpu_to_le32(X4)}
73
74#define IWL_POWER_ON_BATTERY IWL_POWER_INDEX_5
75#define IWL_POWER_ON_AC_DISASSOC IWL_POWER_MODE_CAM
76#define IWL_POWER_ON_AC_ASSOC IWL_POWER_MODE_CAM
77
78
79#define IWL_CT_KILL_TEMPERATURE 110
80#define IWL_MIN_POWER_TEMPERATURE 100
81#define IWL_REDUCED_POWER_TEMPERATURE 95
82
83/* default power management (not Tx power) table values */
84/* for tim 0-10 */
85static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = {
86 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
87 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
88 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
89 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
90 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
91 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2}
92};
93
94
95/* for tim = 3-10 */
96static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = {
97 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
98 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
99 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
100 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
101 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
102 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 7, 10, 10)}, 2}
103};
104
105/* for tim > 11 */
106static struct iwl_power_vec_entry range_2[IWL_POWER_AC] = {
107 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
108 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
109 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
110 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
111 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
112 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
113};
114
115/* decide the right power level according to association status
116 * and battery status
117 */
118static u16 iwl_get_auto_power_mode(struct iwl_priv *priv)
119{
120 u16 mode = priv->power_data.user_power_setting;
121
122 switch (priv->power_data.user_power_setting) {
123 case IWL_POWER_AUTO:
124 /* if running on battery */
125 if (priv->power_data.is_battery_active)
126 mode = IWL_POWER_ON_BATTERY;
127 else if (iwl_is_associated(priv))
128 mode = IWL_POWER_ON_AC_ASSOC;
129 else
130 mode = IWL_POWER_ON_AC_DISASSOC;
131 break;
132 case IWL_POWER_BATTERY:
133 mode = IWL_POWER_INDEX_3;
134 break;
135 case IWL_POWER_AC:
136 mode = IWL_POWER_MODE_CAM;
137 break;
138 }
139 return mode;
140}
141
142/* initialize to default */
143static int iwl_power_init_handle(struct iwl_priv *priv)
144{
145 int ret = 0, i;
146 struct iwl_power_mgr *pow_data;
147 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC;
148 u16 pci_pm;
149
150 IWL_DEBUG_POWER("Initialize power \n");
151
152 pow_data = &(priv->power_data);
153
154 memset(pow_data, 0, sizeof(*pow_data));
155
156 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
157 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
158 memcpy(&pow_data->pwr_range_2[0], &range_2[0], size);
159
160 ret = pci_read_config_word(priv->pci_dev,
161 PCI_LINK_CTRL, &pci_pm);
162 if (ret != 0)
163 return 0;
164 else {
165 struct iwl4965_powertable_cmd *cmd;
166
167 IWL_DEBUG_POWER("adjust power command flags\n");
168
169 for (i = 0; i < IWL_POWER_AC; i++) {
170 cmd = &pow_data->pwr_range_0[i].cmd;
171
172 if (pci_pm & 0x1)
173 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
174 else
175 cmd->flags |= IWL_POWER_PCI_PM_MSK;
176 }
177 }
178 return ret;
179}
180
181/* adjust power command according to dtim period and power level*/
182static int iwl_update_power_command(struct iwl_priv *priv,
183 struct iwl4965_powertable_cmd *cmd,
184 u16 mode)
185{
186 int ret = 0, i;
187 u8 skip;
188 u32 max_sleep = 0;
189 struct iwl_power_vec_entry *range;
190 u8 period = 0;
191 struct iwl_power_mgr *pow_data;
192
193 if (mode > IWL_POWER_INDEX_5) {
194 IWL_DEBUG_POWER("Error invalid power mode \n");
195 return -1;
196 }
197 pow_data = &(priv->power_data);
198
199 if (pow_data->dtim_period <= IWL_POWER_RANGE_0_MAX)
200 range = &pow_data->pwr_range_0[0];
201 else if (pow_data->dtim_period <= IWL_POWER_RANGE_1_MAX)
202 range = &pow_data->pwr_range_1[0];
203 else
204 range = &pow_data->pwr_range_2[0];
205
206 period = pow_data->dtim_period;
207 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd));
208
209 if (period == 0) {
210 period = 1;
211 skip = 0;
212 } else
213 skip = range[mode].no_dtim;
214
215 if (skip == 0) {
216 max_sleep = period;
217 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
218 } else {
219 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
220 max_sleep = le32_to_cpu(slp_itrvl);
221 if (max_sleep == 0xFF)
222 max_sleep = period * (skip + 1);
223 else if (max_sleep > period)
224 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
225 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
226 }
227
228 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
229 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
230 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
231 }
232
233 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
234 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
235 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
236 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
237 le32_to_cpu(cmd->sleep_interval[0]),
238 le32_to_cpu(cmd->sleep_interval[1]),
239 le32_to_cpu(cmd->sleep_interval[2]),
240 le32_to_cpu(cmd->sleep_interval[3]),
241 le32_to_cpu(cmd->sleep_interval[4]));
242
243 return ret;
244}
245
246
247/*
248 * calucaute the final power mode index
249 */
250int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh)
251{
252 struct iwl_power_mgr *setting = &(priv->power_data);
253 int ret = 0;
254 u16 uninitialized_var(final_mode);
255
256 /* If on battery, set to 3,
257 * if plugged into AC power, set to CAM ("continuously aware mode"),
258 * else user level */
259
260 switch (setting->system_power_setting) {
261 case IWL_POWER_AUTO:
262 final_mode = iwl_get_auto_power_mode(priv);
263 break;
264 case IWL_POWER_BATTERY:
265 final_mode = IWL_POWER_INDEX_3;
266 break;
267 case IWL_POWER_AC:
268 final_mode = IWL_POWER_MODE_CAM;
269 break;
270 default:
271 final_mode = setting->system_power_setting;
272 }
273
274 if (setting->critical_power_setting > final_mode)
275 final_mode = setting->critical_power_setting;
276
277 /* driver only support CAM for non STA network */
278 if (priv->iw_mode != IEEE80211_IF_TYPE_STA)
279 final_mode = IWL_POWER_MODE_CAM;
280
281 if (!iwl_is_rfkill(priv) && !setting->power_disabled &&
282 ((setting->power_mode != final_mode) || refresh)) {
283 struct iwl4965_powertable_cmd cmd;
284
285 if (final_mode != IWL_POWER_MODE_CAM)
286 set_bit(STATUS_POWER_PMI, &priv->status);
287
288 iwl_update_power_command(priv, &cmd, final_mode);
289 cmd.keep_alive_beacons = 0;
290
291 if (final_mode == IWL_POWER_INDEX_5)
292 cmd.flags |= IWL_POWER_FAST_PD;
293
294 if (priv->cfg->ops->lib->set_power)
295 ret = priv->cfg->ops->lib->set_power(priv, &cmd);
296
297 if (final_mode == IWL_POWER_MODE_CAM)
298 clear_bit(STATUS_POWER_PMI, &priv->status);
299 else
300 set_bit(STATUS_POWER_PMI, &priv->status);
301
302 if (priv->cfg->ops->lib->update_chain_flags)
303 priv->cfg->ops->lib->update_chain_flags(priv);
304
305 if (!ret)
306 setting->power_mode = final_mode;
307 }
308
309 return ret;
310}
311EXPORT_SYMBOL(iwl_power_update_mode);
312
313/* Allow other iwl code to disable/enable power management active
314 * this will be usefull for rate scale to disable PM during heavy
315 * Tx/Rx activities
316 */
317int iwl_power_disable_management(struct iwl_priv *priv)
318{
319 u16 prev_mode;
320 int ret = 0;
321
322 if (priv->power_data.power_disabled)
323 return -EBUSY;
324
325 prev_mode = priv->power_data.user_power_setting;
326 priv->power_data.user_power_setting = IWL_POWER_MODE_CAM;
327 ret = iwl_power_update_mode(priv, 0);
328 priv->power_data.power_disabled = 1;
329 priv->power_data.user_power_setting = prev_mode;
330
331 return ret;
332}
333EXPORT_SYMBOL(iwl_power_disable_management);
334
335/* Allow other iwl code to disable/enable power management active
336 * this will be usefull for rate scale to disable PM during hight
337 * valume activities
338 */
339int iwl_power_enable_management(struct iwl_priv *priv)
340{
341 int ret = 0;
342
343 priv->power_data.power_disabled = 0;
344 ret = iwl_power_update_mode(priv, 0);
345 return ret;
346}
347EXPORT_SYMBOL(iwl_power_enable_management);
348
349/* set user_power_setting */
350int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode)
351{
352 int ret = 0;
353
354 if (mode > IWL_POWER_LIMIT)
355 return -EINVAL;
356
357 priv->power_data.user_power_setting = mode;
358
359 ret = iwl_power_update_mode(priv, 0);
360
361 return ret;
362}
363EXPORT_SYMBOL(iwl_power_set_user_mode);
364
365
366/* set system_power_setting. This should be set by over all
367 * PM application.
368 */
369int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode)
370{
371 int ret = 0;
372
373 if (mode > IWL_POWER_LIMIT)
374 return -EINVAL;
375
376 priv->power_data.system_power_setting = mode;
377
378 ret = iwl_power_update_mode(priv, 0);
379
380 return ret;
381}
382EXPORT_SYMBOL(iwl_power_set_system_mode);
383
384/* initilize to default */
385void iwl_power_initialize(struct iwl_priv *priv)
386{
387
388 iwl_power_init_handle(priv);
389 priv->power_data.user_power_setting = IWL_POWER_AUTO;
390 priv->power_data.power_disabled = 0;
391 priv->power_data.system_power_setting = IWL_POWER_AUTO;
392 priv->power_data.is_battery_active = 0;
393 priv->power_data.power_disabled = 0;
394 priv->power_data.critical_power_setting = 0;
395}
396EXPORT_SYMBOL(iwl_power_initialize);
397
398/* set critical_power_setting according to temperature value */
399int iwl_power_temperature_change(struct iwl_priv *priv)
400{
401 int ret = 0;
402 u16 new_critical = priv->power_data.critical_power_setting;
403 s32 temperature = KELVIN_TO_CELSIUS(priv->last_temperature);
404
405 if (temperature > IWL_CT_KILL_TEMPERATURE)
406 return 0;
407 else if (temperature > IWL_MIN_POWER_TEMPERATURE)
408 new_critical = IWL_POWER_INDEX_5;
409 else if (temperature > IWL_REDUCED_POWER_TEMPERATURE)
410 new_critical = IWL_POWER_INDEX_3;
411 else
412 new_critical = IWL_POWER_MODE_CAM;
413
414 if (new_critical != priv->power_data.critical_power_setting)
415 priv->power_data.critical_power_setting = new_critical;
416
417 if (priv->power_data.critical_power_setting >
418 priv->power_data.power_mode)
419 ret = iwl_power_update_mode(priv, 0);
420
421 return ret;
422}
423EXPORT_SYMBOL(iwl_power_temperature_change);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
new file mode 100644
index 000000000000..b066724a1c2b
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -0,0 +1,76 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_power_setting_h__
29#define __iwl_power_setting_h__
30
31#include <net/mac80211.h>
32#include "iwl-commands.h"
33
34struct iwl_priv;
35
36#define IWL_POWER_MODE_CAM 0x00 /* Continuously Aware Mode, always on */
37#define IWL_POWER_INDEX_3 0x03
38#define IWL_POWER_INDEX_5 0x05
39#define IWL_POWER_AC 0x06
40#define IWL_POWER_BATTERY 0x07
41#define IWL_POWER_AUTO 0x08
42#define IWL_POWER_LIMIT 0x08
43#define IWL_POWER_MASK 0x0F
44#define IWL_POWER_ENABLED 0x10
45
46/* Power management (not Tx power) structures */
47
48struct iwl_power_vec_entry {
49 struct iwl4965_powertable_cmd cmd;
50 u8 no_dtim;
51};
52
53struct iwl_power_mgr {
54 spinlock_t lock;
55 struct iwl_power_vec_entry pwr_range_0[IWL_POWER_AC];
56 struct iwl_power_vec_entry pwr_range_1[IWL_POWER_AC];
57 struct iwl_power_vec_entry pwr_range_2[IWL_POWER_AC];
58 u32 dtim_period;
59 /* final power level that used to calculate final power command */
60 u8 power_mode;
61 u8 user_power_setting; /* set by user through mac80211 or sysfs */
62 u8 system_power_setting; /* set by kernel syatem tools */
63 u8 critical_power_setting; /* set if driver over heated */
64 u8 is_battery_active; /* DC/AC power */
65 u8 power_disabled; /* flag to disable using power saving level */
66};
67
68int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh);
69int iwl_power_disable_management(struct iwl_priv *priv);
70int iwl_power_enable_management(struct iwl_priv *priv);
71int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode);
72int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode);
73void iwl_power_initialize(struct iwl_priv *priv);
74int iwl_power_temperature_change(struct iwl_priv *priv);
75
76#endif /* __iwl_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index c9cf8eef1a90..70d9c7568b98 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -239,40 +239,307 @@
239#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C) 239#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C)
240#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030) 240#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030)
241 241
242/**
243 * Tx Scheduler
244 *
245 * The Tx Scheduler selects the next frame to be transmitted, chosing TFDs
246 * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
247 * host DRAM. It steers each frame's Tx command (which contains the frame
248 * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
249 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
250 * but one DMA channel may take input from several queues.
251 *
252 * Tx DMA channels have dedicated purposes. For 4965, they are used as follows:
253 *
254 * 0 -- EDCA BK (background) frames, lowest priority
255 * 1 -- EDCA BE (best effort) frames, normal priority
256 * 2 -- EDCA VI (video) frames, higher priority
257 * 3 -- EDCA VO (voice) and management frames, highest priority
258 * 4 -- Commands (e.g. RXON, etc.)
259 * 5 -- HCCA short frames
260 * 6 -- HCCA long frames
261 * 7 -- not used by driver (device-internal only)
262 *
263 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
264 * In addition, driver can map queues 7-15 to Tx DMA/FIFO channels 0-3 to
265 * support 11n aggregation via EDCA DMA channels.
266 *
267 * The driver sets up each queue to work in one of two modes:
268 *
269 * 1) Scheduler-Ack, in which the scheduler automatically supports a
270 * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
271 * contains TFDs for a unique combination of Recipient Address (RA)
272 * and Traffic Identifier (TID), that is, traffic of a given
273 * Quality-Of-Service (QOS) priority, destined for a single station.
274 *
275 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
276 * each frame within the BA window, including whether it's been transmitted,
277 * and whether it's been acknowledged by the receiving station. The device
278 * automatically processes block-acks received from the receiving STA,
279 * and reschedules un-acked frames to be retransmitted (successful
280 * Tx completion may end up being out-of-order).
281 *
282 * The driver must maintain the queue's Byte Count table in host DRAM
283 * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
284 * This mode does not support fragmentation.
285 *
286 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
287 * The device may automatically retry Tx, but will retry only one frame
288 * at a time, until receiving ACK from receiving station, or reaching
289 * retry limit and giving up.
290 *
291 * The command queue (#4) must use this mode!
292 * This mode does not require use of the Byte Count table in host DRAM.
293 *
294 * Driver controls scheduler operation via 3 means:
295 * 1) Scheduler registers
296 * 2) Shared scheduler data base in internal 4956 SRAM
297 * 3) Shared data in host DRAM
298 *
299 * Initialization:
300 *
301 * When loading, driver should allocate memory for:
302 * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
303 * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
304 * (1024 bytes for each queue).
305 *
306 * After receiving "Alive" response from uCode, driver must initialize
307 * the scheduler (especially for queue #4, the command queue, otherwise
308 * the driver can't issue commands!):
309 */
310
311/**
312 * Max Tx window size is the max number of contiguous TFDs that the scheduler
313 * can keep track of at one time when creating block-ack chains of frames.
314 * Note that "64" matches the number of ack bits in a block-ack packet.
315 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
316 * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
317 */
318#define SCD_WIN_SIZE 64
319#define SCD_FRAME_LIMIT 64
320
321/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
322#define IWL49_SCD_START_OFFSET 0xa02c00
323
324/*
325 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
326 * Value is valid only after "Alive" response from uCode.
327 */
328#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0)
329
330/*
331 * Driver may need to update queue-empty bits after changing queue's
332 * write and read pointers (indexes) during (re-)initialization (i.e. when
333 * scheduler is not tracking what's happening).
334 * Bit fields:
335 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
336 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
337 * NOTE: This register is not used by Linux driver.
338 */
339#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4)
340
341/*
342 * Physical base address of array of byte count (BC) circular buffers (CBs).
343 * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
344 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
345 * Others are spaced by 1024 bytes.
346 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
347 * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
348 * Bit fields:
349 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
350 */
351#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10)
352
353/*
354 * Enables any/all Tx DMA/FIFO channels.
355 * Scheduler generates requests for only the active channels.
356 * Set this to 0xff to enable all 8 channels (normal usage).
357 * Bit fields:
358 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
359 */
360#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
361/*
362 * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
363 * Initialized and updated by driver as new TFDs are added to queue.
364 * NOTE: If using Block Ack, index must correspond to frame's
365 * Start Sequence Number; index = (SSN & 0xff)
366 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
367 */
368#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
369
370/*
371 * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
372 * For FIFO mode, index indicates next frame to transmit.
373 * For Scheduler-ACK mode, index indicates first frame in Tx window.
374 * Initialized by driver, updated by scheduler.
375 */
376#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
377
378/*
379 * Select which queues work in chain mode (1) vs. not (0).
380 * Use chain mode to build chains of aggregated frames.
381 * Bit fields:
382 * 31-16: Reserved
383 * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
384 * NOTE: If driver sets up queue for chain mode, it should be also set up
385 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
386 */
387#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0)
388
389/*
390 * Select which queues interrupt driver when scheduler increments
391 * a queue's read pointer (index).
392 * Bit fields:
393 * 31-16: Reserved
394 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
395 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
396 * from Rx queue to read Tx command responses and update Tx queues.
397 */
398#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4)
399
400/*
401 * Queue search status registers. One for each queue.
402 * Sets up queue mode and assigns queue to Tx DMA channel.
403 * Bit fields:
404 * 19-10: Write mask/enable bits for bits 0-9
405 * 9: Driver should init to "0"
406 * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
407 * Driver should init to "1" for aggregation mode, or "0" otherwise.
408 * 7-6: Driver should init to "0"
409 * 5: Window Size Left; indicates whether scheduler can request
410 * another TFD, based on window size, etc. Driver should init
411 * this bit to "1" for aggregation mode, or "0" for non-agg.
412 * 4-1: Tx FIFO to use (range 0-7).
413 * 0: Queue is active (1), not active (0).
414 * Other bits should be written as "0"
415 *
416 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
417 * via SCD_QUEUECHAIN_SEL.
418 */
419#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
420 (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
421
422/* Bit field positions */
423#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
424#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
425#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
426#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
427
428/* Write masks */
429#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
430#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
431
432/**
433 * 4965 internal SRAM structures for scheduler, shared with driver ...
434 *
435 * Driver should clear and initialize the following areas after receiving
436 * "Alive" response from 4965 uCode, i.e. after initial
437 * uCode load, or after a uCode load done for error recovery:
438 *
439 * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
440 * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
441 * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
442 *
443 * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
444 * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
445 * All OFFSET values must be added to this base address.
446 */
447
448/*
449 * Queue context. One 8-byte entry for each of 16 queues.
450 *
451 * Driver should clear this entire area (size 0x80) to 0 after receiving
452 * "Alive" notification from uCode. Additionally, driver should init
453 * each queue's entry as follows:
454 *
455 * LS Dword bit fields:
456 * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
457 *
458 * MS Dword bit fields:
459 * 16-22: Frame limit. Driver should init to 10 (0xa).
460 *
461 * Driver should init all other bits to 0.
462 *
463 * Init must be done after driver receives "Alive" response from 4965 uCode,
464 * and when setting up queue for aggregation.
465 */
466#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380
467#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
468 (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
469
470#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
471#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
472#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
473#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
474
475/*
476 * Tx Status Bitmap
477 *
478 * Driver should clear this entire area (size 0x100) to 0 after receiving
479 * "Alive" notification from uCode. Area is used only by device itself;
480 * no other support (besides clearing) is required from driver.
481 */
482#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
483
242/* 484/*
243 * 4965 Tx Scheduler registers. 485 * RAxTID to queue translation mapping.
244 * Details are documented in iwl-4965-hw.h 486 *
487 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
488 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
489 * one QOS priority level destined for one station (for this wireless link,
490 * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
491 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
492 * mode, the device ignores the mapping value.
493 *
494 * Bit fields, for each 16-bit map:
495 * 15-9: Reserved, set to 0
496 * 8-4: Index into device's station table for recipient station
497 * 3-0: Traffic ID (tid), range 0-15
498 *
499 * Driver should clear this entire area (size 32 bytes) to 0 after receiving
500 * "Alive" notification from uCode. To update a 16-bit map value, driver
501 * must read a dword-aligned value from device SRAM, replace the 16-bit map
502 * value of interest, and write the dword value back into device SRAM.
245 */ 503 */
246#define IWL49_SCD_BASE (PRPH_BASE + 0xa02c00) 504#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500
247 505
248#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_BASE + 0x0) 506/* Find translation table dword to read/write for given queue */
249#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_BASE + 0x4) 507#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
250#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_BASE + 0x10) 508 ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
251#define IWL49_SCD_AIT (IWL49_SCD_BASE + 0x18) 509
252#define IWL49_SCD_TXFACT (IWL49_SCD_BASE + 0x1c) 510#define IWL_SCD_TXFIFO_POS_TID (0)
253#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_BASE + 0x24 + (x) * 4) 511#define IWL_SCD_TXFIFO_POS_RA (4)
254#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_BASE + 0x64 + (x) * 4) 512#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
255#define IWL49_SCD_SETQUEUENUM (IWL49_SCD_BASE + 0xa4) 513
256#define IWL49_SCD_SET_TXSTAT_TXED (IWL49_SCD_BASE + 0xa8) 514/* 5000 SCD */
257#define IWL49_SCD_SET_TXSTAT_DONE (IWL49_SCD_BASE + 0xac) 515#define IWL50_SCD_QUEUE_STTS_REG_POS_TXF (0)
258#define IWL49_SCD_SET_TXSTAT_NOT_SCHD (IWL49_SCD_BASE + 0xb0) 516#define IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
259#define IWL49_SCD_DECREASE_CREDIT (IWL49_SCD_BASE + 0xb4) 517#define IWL50_SCD_QUEUE_STTS_REG_POS_WSL (4)
260#define IWL49_SCD_DECREASE_SCREDIT (IWL49_SCD_BASE + 0xb8) 518#define IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
261#define IWL49_SCD_LOAD_CREDIT (IWL49_SCD_BASE + 0xbc) 519#define IWL50_SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
262#define IWL49_SCD_LOAD_SCREDIT (IWL49_SCD_BASE + 0xc0) 520
263#define IWL49_SCD_BAR (IWL49_SCD_BASE + 0xc4) 521#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
264#define IWL49_SCD_BAR_DW0 (IWL49_SCD_BASE + 0xc8) 522#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
265#define IWL49_SCD_BAR_DW1 (IWL49_SCD_BASE + 0xcc) 523#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
266#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_BASE + 0xd0) 524#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
267#define IWL49_SCD_QUERY_REQ (IWL49_SCD_BASE + 0xd8) 525#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
268#define IWL49_SCD_QUERY_RES (IWL49_SCD_BASE + 0xdc) 526#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
269#define IWL49_SCD_PENDING_FRAMES (IWL49_SCD_BASE + 0xe0) 527#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
270#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_BASE + 0xe4) 528#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
271#define IWL49_SCD_INTERRUPT_THRESHOLD (IWL49_SCD_BASE + 0xe8) 529
272#define IWL49_SCD_QUERY_MIN_FRAME_SIZE (IWL49_SCD_BASE + 0x100) 530#define IWL50_SCD_CONTEXT_DATA_OFFSET (0x600)
273#define IWL49_SCD_QUEUE_STATUS_BITS(x) (IWL49_SCD_BASE + 0x104 + (x) * 4) 531#define IWL50_SCD_TX_STTS_BITMAP_OFFSET (0x7B1)
274 532#define IWL50_SCD_TRANSLATE_TBL_OFFSET (0x7E0)
275/* SP SCD */ 533
534#define IWL50_SCD_CONTEXT_QUEUE_OFFSET(x)\
535 (IWL50_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
536
537#define IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
538 ((IWL50_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc)
539
540#define IWL50_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\
541 (~(1<<IWL_CMD_QUEUE_NUM)))
542
276#define IWL50_SCD_BASE (PRPH_BASE + 0xa02c00) 543#define IWL50_SCD_BASE (PRPH_BASE + 0xa02c00)
277 544
278#define IWL50_SCD_SRAM_BASE_ADDR (IWL50_SCD_BASE + 0x0) 545#define IWL50_SCD_SRAM_BASE_ADDR (IWL50_SCD_BASE + 0x0)
@@ -287,4 +554,6 @@
287#define IWL50_SCD_INTERRUPT_MASK (IWL50_SCD_BASE + 0x108) 554#define IWL50_SCD_INTERRUPT_MASK (IWL50_SCD_BASE + 0x108)
288#define IWL50_SCD_QUEUE_STATUS_BITS(x) (IWL50_SCD_BASE + 0x10c + (x) * 4) 555#define IWL50_SCD_QUEUE_STATUS_BITS(x) (IWL50_SCD_BASE + 0x10c + (x) * 4)
289 556
557/*********************** END TX SCHEDULER *************************************/
558
290#endif /* __iwl_prph_h__ */ 559#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.c b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
index 5980a5621cb8..59c8a716bd96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
@@ -33,7 +33,7 @@
33#include <net/mac80211.h> 33#include <net/mac80211.h>
34 34
35#include "iwl-eeprom.h" 35#include "iwl-eeprom.h"
36#include "iwl-4965.h" 36#include "iwl-dev.h"
37#include "iwl-core.h" 37#include "iwl-core.h"
38#include "iwl-helpers.h" 38#include "iwl-helpers.h"
39 39
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
new file mode 100644
index 000000000000..cc61c937320f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -0,0 +1,470 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31#include "iwl-eeprom.h"
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-io.h"
36#include "iwl-calib.h"
37#include "iwl-helpers.h"
38/************************** RX-FUNCTIONS ****************************/
39/*
40 * Rx theory of operation
41 *
42 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
43 * each of which point to Receive Buffers to be filled by the NIC. These get
44 * used not only for Rx frames, but for any command response or notification
45 * from the NIC. The driver and NIC manage the Rx buffers by means
46 * of indexes into the circular buffer.
47 *
48 * Rx Queue Indexes
49 * The host/firmware share two index registers for managing the Rx buffers.
50 *
51 * The READ index maps to the first position that the firmware may be writing
52 * to -- the driver can read up to (but not including) this position and get
53 * good data.
54 * The READ index is managed by the firmware once the card is enabled.
55 *
56 * The WRITE index maps to the last position the driver has read from -- the
57 * position preceding WRITE is the last slot the firmware can place a packet.
58 *
59 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
60 * WRITE = READ.
61 *
62 * During initialization, the host sets up the READ queue position to the first
63 * INDEX position, and WRITE to the last (READ - 1 wrapped)
64 *
65 * When the firmware places a packet in a buffer, it will advance the READ index
66 * and fire the RX interrupt. The driver can then query the READ index and
67 * process as many packets as possible, moving the WRITE index forward as it
68 * resets the Rx queue buffers with new memory.
69 *
70 * The management in the driver is as follows:
71 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
72 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
73 * to replenish the iwl->rxq->rx_free.
74 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
75 * iwl->rxq is replenished and the READ INDEX is updated (updating the
76 * 'processed' and 'read' driver indexes as well)
77 * + A received packet is processed and handed to the kernel network stack,
78 * detached from the iwl->rxq. The driver 'processed' index is updated.
79 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
80 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
81 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
82 * were enough free buffers and RX_STALLED is set it is cleared.
83 *
84 *
85 * Driver sequence:
86 *
87 * iwl_rx_queue_alloc() Allocates rx_free
88 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
89 * iwl_rx_queue_restock
90 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
91 * queue, updates firmware pointers, and updates
92 * the WRITE index. If insufficient rx_free buffers
93 * are available, schedules iwl_rx_replenish
94 *
95 * -- enable interrupts --
96 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
97 * READ INDEX, detaching the SKB from the pool.
98 * Moves the packet buffer from queue to rx_used.
99 * Calls iwl_rx_queue_restock to refill any empty
100 * slots.
101 * ...
102 *
103 */
104
105/**
106 * iwl_rx_queue_space - Return number of free slots available in queue.
107 */
108int iwl_rx_queue_space(const struct iwl_rx_queue *q)
109{
110 int s = q->read - q->write;
111 if (s <= 0)
112 s += RX_QUEUE_SIZE;
113 /* keep some buffer to not confuse full and empty queue */
114 s -= 2;
115 if (s < 0)
116 s = 0;
117 return s;
118}
119EXPORT_SYMBOL(iwl_rx_queue_space);
120
121/**
122 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
123 */
124int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
125{
126 u32 reg = 0;
127 int ret = 0;
128 unsigned long flags;
129
130 spin_lock_irqsave(&q->lock, flags);
131
132 if (q->need_update == 0)
133 goto exit_unlock;
134
135 /* If power-saving is in use, make sure device is awake */
136 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
137 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
138
139 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
140 iwl_set_bit(priv, CSR_GP_CNTRL,
141 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
142 goto exit_unlock;
143 }
144
145 ret = iwl_grab_nic_access(priv);
146 if (ret)
147 goto exit_unlock;
148
149 /* Device expects a multiple of 8 */
150 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
151 q->write & ~0x7);
152 iwl_release_nic_access(priv);
153
154 /* Else device is assumed to be awake */
155 } else
156 /* Device expects a multiple of 8 */
157 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
158
159
160 q->need_update = 0;
161
162 exit_unlock:
163 spin_unlock_irqrestore(&q->lock, flags);
164 return ret;
165}
166EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
167/**
168 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
169 */
170static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
171 dma_addr_t dma_addr)
172{
173 return cpu_to_le32((u32)(dma_addr >> 8));
174}
175
176/**
177 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
178 *
179 * If there are slots in the RX queue that need to be restocked,
180 * and we have free pre-allocated buffers, fill the ranks as much
181 * as we can, pulling from rx_free.
182 *
183 * This moves the 'write' index forward to catch up with 'processed', and
184 * also updates the memory address in the firmware to reference the new
185 * target buffer.
186 */
187int iwl_rx_queue_restock(struct iwl_priv *priv)
188{
189 struct iwl_rx_queue *rxq = &priv->rxq;
190 struct list_head *element;
191 struct iwl_rx_mem_buffer *rxb;
192 unsigned long flags;
193 int write;
194 int ret = 0;
195
196 spin_lock_irqsave(&rxq->lock, flags);
197 write = rxq->write & ~0x7;
198 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
199 /* Get next free Rx buffer, remove from free list */
200 element = rxq->rx_free.next;
201 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
202 list_del(element);
203
204 /* Point to Rx buffer via next RBD in circular buffer */
205 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
206 rxq->queue[rxq->write] = rxb;
207 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
208 rxq->free_count--;
209 }
210 spin_unlock_irqrestore(&rxq->lock, flags);
211 /* If the pre-allocated buffer pool is dropping low, schedule to
212 * refill it */
213 if (rxq->free_count <= RX_LOW_WATERMARK)
214 queue_work(priv->workqueue, &priv->rx_replenish);
215
216
217 /* If we've added more space for the firmware to place data, tell it.
218 * Increment device's write pointer in multiples of 8. */
219 if ((write != (rxq->write & ~0x7))
220 || (abs(rxq->write - rxq->read) > 7)) {
221 spin_lock_irqsave(&rxq->lock, flags);
222 rxq->need_update = 1;
223 spin_unlock_irqrestore(&rxq->lock, flags);
224 ret = iwl_rx_queue_update_write_ptr(priv, rxq);
225 }
226
227 return ret;
228}
229EXPORT_SYMBOL(iwl_rx_queue_restock);
230
231
232/**
233 * iwl_rx_replenish - Move all used packet from rx_used to rx_free
234 *
235 * When moving to rx_free an SKB is allocated for the slot.
236 *
237 * Also restock the Rx queue via iwl_rx_queue_restock.
238 * This is called as a scheduled work item (except for during initialization)
239 */
240void iwl_rx_allocate(struct iwl_priv *priv)
241{
242 struct iwl_rx_queue *rxq = &priv->rxq;
243 struct list_head *element;
244 struct iwl_rx_mem_buffer *rxb;
245 unsigned long flags;
246 spin_lock_irqsave(&rxq->lock, flags);
247 while (!list_empty(&rxq->rx_used)) {
248 element = rxq->rx_used.next;
249 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
250
251 /* Alloc a new receive buffer */
252 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size,
253 __GFP_NOWARN | GFP_ATOMIC);
254 if (!rxb->skb) {
255 if (net_ratelimit())
256 printk(KERN_CRIT DRV_NAME
257 ": Can not allocate SKB buffers\n");
258 /* We don't reschedule replenish work here -- we will
259 * call the restock method and if it still needs
260 * more buffers it will schedule replenish */
261 break;
262 }
263 priv->alloc_rxb_skb++;
264 list_del(element);
265
266 /* Get physical address of RB/SKB */
267 rxb->dma_addr =
268 pci_map_single(priv->pci_dev, rxb->skb->data,
269 priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE);
270 list_add_tail(&rxb->list, &rxq->rx_free);
271 rxq->free_count++;
272 }
273 spin_unlock_irqrestore(&rxq->lock, flags);
274}
275EXPORT_SYMBOL(iwl_rx_allocate);
276
277void iwl_rx_replenish(struct iwl_priv *priv)
278{
279 unsigned long flags;
280
281 iwl_rx_allocate(priv);
282
283 spin_lock_irqsave(&priv->lock, flags);
284 iwl_rx_queue_restock(priv);
285 spin_unlock_irqrestore(&priv->lock, flags);
286}
287EXPORT_SYMBOL(iwl_rx_replenish);
288
289
290/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
291 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
292 * This free routine walks the list of POOL entries and if SKB is set to
293 * non NULL it is unmapped and freed
294 */
295void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
296{
297 int i;
298 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
299 if (rxq->pool[i].skb != NULL) {
300 pci_unmap_single(priv->pci_dev,
301 rxq->pool[i].dma_addr,
302 priv->hw_params.rx_buf_size,
303 PCI_DMA_FROMDEVICE);
304 dev_kfree_skb(rxq->pool[i].skb);
305 }
306 }
307
308 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
309 rxq->dma_addr);
310 rxq->bd = NULL;
311}
312EXPORT_SYMBOL(iwl_rx_queue_free);
313
314int iwl_rx_queue_alloc(struct iwl_priv *priv)
315{
316 struct iwl_rx_queue *rxq = &priv->rxq;
317 struct pci_dev *dev = priv->pci_dev;
318 int i;
319
320 spin_lock_init(&rxq->lock);
321 INIT_LIST_HEAD(&rxq->rx_free);
322 INIT_LIST_HEAD(&rxq->rx_used);
323
324 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
325 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
326 if (!rxq->bd)
327 return -ENOMEM;
328
329 /* Fill the rx_used queue with _all_ of the Rx buffers */
330 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
331 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
332
333 /* Set us so that we have processed and used all buffers, but have
334 * not restocked the Rx queue with fresh buffers */
335 rxq->read = rxq->write = 0;
336 rxq->free_count = 0;
337 rxq->need_update = 0;
338 return 0;
339}
340EXPORT_SYMBOL(iwl_rx_queue_alloc);
341
342void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
343{
344 unsigned long flags;
345 int i;
346 spin_lock_irqsave(&rxq->lock, flags);
347 INIT_LIST_HEAD(&rxq->rx_free);
348 INIT_LIST_HEAD(&rxq->rx_used);
349 /* Fill the rx_used queue with _all_ of the Rx buffers */
350 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
351 /* In the reset function, these buffers may have been allocated
352 * to an SKB, so we need to unmap and free potential storage */
353 if (rxq->pool[i].skb != NULL) {
354 pci_unmap_single(priv->pci_dev,
355 rxq->pool[i].dma_addr,
356 priv->hw_params.rx_buf_size,
357 PCI_DMA_FROMDEVICE);
358 priv->alloc_rxb_skb--;
359 dev_kfree_skb(rxq->pool[i].skb);
360 rxq->pool[i].skb = NULL;
361 }
362 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
363 }
364
365 /* Set us so that we have processed and used all buffers, but have
366 * not restocked the Rx queue with fresh buffers */
367 rxq->read = rxq->write = 0;
368 rxq->free_count = 0;
369 spin_unlock_irqrestore(&rxq->lock, flags);
370}
371EXPORT_SYMBOL(iwl_rx_queue_reset);
372
373int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
374{
375 int ret;
376 unsigned long flags;
377 unsigned int rb_size;
378
379 spin_lock_irqsave(&priv->lock, flags);
380 ret = iwl_grab_nic_access(priv);
381 if (ret) {
382 spin_unlock_irqrestore(&priv->lock, flags);
383 return ret;
384 }
385
386 if (priv->cfg->mod_params->amsdu_size_8K)
387 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
388 else
389 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
390
391 /* Stop Rx DMA */
392 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
393
394 /* Reset driver's Rx queue write index */
395 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
396
397 /* Tell device where to find RBD circular buffer in DRAM */
398 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
399 rxq->dma_addr >> 8);
400
401 /* Tell device where in DRAM to update its Rx status */
402 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
403 (priv->shared_phys + priv->rb_closed_offset) >> 4);
404
405 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
406 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
407 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
408 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
409 rb_size |
410 /* 0x10 << 4 | */
411 (RX_QUEUE_SIZE_LOG <<
412 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
413
414 /*
415 * iwl_write32(priv,CSR_INT_COAL_REG,0);
416 */
417
418 iwl_release_nic_access(priv);
419 spin_unlock_irqrestore(&priv->lock, flags);
420
421 return 0;
422}
423
424int iwl_rxq_stop(struct iwl_priv *priv)
425{
426 int ret;
427 unsigned long flags;
428
429 spin_lock_irqsave(&priv->lock, flags);
430 ret = iwl_grab_nic_access(priv);
431 if (unlikely(ret)) {
432 spin_unlock_irqrestore(&priv->lock, flags);
433 return ret;
434 }
435
436 /* stop Rx DMA */
437 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
438 ret = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
439 (1 << 24), 1000);
440 if (ret < 0)
441 IWL_ERROR("Can't stop Rx DMA.\n");
442
443 iwl_release_nic_access(priv);
444 spin_unlock_irqrestore(&priv->lock, flags);
445
446 return 0;
447}
448EXPORT_SYMBOL(iwl_rxq_stop);
449
450void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
451 struct iwl_rx_mem_buffer *rxb)
452
453{
454#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
455 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
456 struct iwl4965_missed_beacon_notif *missed_beacon;
457
458 missed_beacon = &pkt->u.missed_beacon;
459 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
460 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
461 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
462 le32_to_cpu(missed_beacon->total_missed_becons),
463 le32_to_cpu(missed_beacon->num_recvd_beacons),
464 le32_to_cpu(missed_beacon->num_expected_beacons));
465 if (!test_bit(STATUS_SCANNING, &priv->status))
466 iwl_init_sensitivity(priv);
467 }
468#endif /* CONFIG_IWLWIFI_RUN_TIME_CALIB */
469}
470EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index e4fdfaa2b9b2..983f10760fb0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -28,16 +28,404 @@
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31#include <linux/etherdevice.h>
31 32
32#include "iwl-eeprom.h" 33#include "iwl-eeprom.h"
33#include "iwl-4965.h" 34#include "iwl-dev.h"
34#include "iwl-core.h" 35#include "iwl-core.h"
35#include "iwl-sta.h" 36#include "iwl-sta.h"
36#include "iwl-io.h" 37#include "iwl-io.h"
37#include "iwl-helpers.h" 38#include "iwl-helpers.h"
38#include "iwl-4965.h"
39#include "iwl-sta.h"
40 39
40
41#define IWL_STA_DRIVER_ACTIVE 0x1 /* ucode entry is active */
42#define IWL_STA_UCODE_ACTIVE 0x2 /* ucode entry is active */
43
44u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
45{
46 int i;
47 int start = 0;
48 int ret = IWL_INVALID_STATION;
49 unsigned long flags;
50 DECLARE_MAC_BUF(mac);
51
52 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) ||
53 (priv->iw_mode == IEEE80211_IF_TYPE_AP))
54 start = IWL_STA_ID;
55
56 if (is_broadcast_ether_addr(addr))
57 return priv->hw_params.bcast_sta_id;
58
59 spin_lock_irqsave(&priv->sta_lock, flags);
60 for (i = start; i < priv->hw_params.max_stations; i++)
61 if (priv->stations[i].used &&
62 (!compare_ether_addr(priv->stations[i].sta.sta.addr,
63 addr))) {
64 ret = i;
65 goto out;
66 }
67
68 IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n",
69 print_mac(mac, addr), priv->num_stations);
70
71 out:
72 spin_unlock_irqrestore(&priv->sta_lock, flags);
73 return ret;
74}
75EXPORT_SYMBOL(iwl_find_station);
76
77static int iwl_add_sta_callback(struct iwl_priv *priv,
78 struct iwl_cmd *cmd, struct sk_buff *skb)
79{
80 struct iwl_rx_packet *res = NULL;
81
82 if (!skb) {
83 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
84 return 1;
85 }
86
87 res = (struct iwl_rx_packet *)skb->data;
88 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
89 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
90 res->hdr.flags);
91 return 1;
92 }
93
94 switch (res->u.add_sta.status) {
95 case ADD_STA_SUCCESS_MSK:
96 /* FIXME: implement iwl_sta_ucode_activate(priv, addr); */
97 /* fail through */
98 default:
99 IWL_DEBUG_HC("Received REPLY_ADD_STA:(0x%08X)\n",
100 res->u.add_sta.status);
101 break;
102 }
103
104 /* We didn't cache the SKB; let the caller free it */
105 return 1;
106}
107
108
109
110int iwl_send_add_sta(struct iwl_priv *priv,
111 struct iwl_addsta_cmd *sta, u8 flags)
112{
113 struct iwl_rx_packet *res = NULL;
114 int ret = 0;
115 u8 data[sizeof(*sta)];
116 struct iwl_host_cmd cmd = {
117 .id = REPLY_ADD_STA,
118 .meta.flags = flags,
119 .data = data,
120 };
121
122 if (flags & CMD_ASYNC)
123 cmd.meta.u.callback = iwl_add_sta_callback;
124 else
125 cmd.meta.flags |= CMD_WANT_SKB;
126
127 cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
128 ret = iwl_send_cmd(priv, &cmd);
129
130 if (ret || (flags & CMD_ASYNC))
131 return ret;
132
133 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
134 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
135 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
136 res->hdr.flags);
137 ret = -EIO;
138 }
139
140 if (ret == 0) {
141 switch (res->u.add_sta.status) {
142 case ADD_STA_SUCCESS_MSK:
143 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
144 break;
145 default:
146 ret = -EIO;
147 IWL_WARNING("REPLY_ADD_STA failed\n");
148 break;
149 }
150 }
151
152 priv->alloc_rxb_skb--;
153 dev_kfree_skb_any(cmd.meta.u.skb);
154
155 return ret;
156}
157EXPORT_SYMBOL(iwl_send_add_sta);
158
159#ifdef CONFIG_IWL4965_HT
160
161static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
162 struct ieee80211_ht_info *sta_ht_inf)
163{
164 __le32 sta_flags;
165 u8 mimo_ps_mode;
166
167 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
168 goto done;
169
170 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
171
172 sta_flags = priv->stations[index].sta.station_flags;
173
174 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
175
176 switch (mimo_ps_mode) {
177 case WLAN_HT_CAP_MIMO_PS_STATIC:
178 sta_flags |= STA_FLG_MIMO_DIS_MSK;
179 break;
180 case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
181 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
182 break;
183 case WLAN_HT_CAP_MIMO_PS_DISABLED:
184 break;
185 default:
186 IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
187 break;
188 }
189
190 sta_flags |= cpu_to_le32(
191 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
192
193 sta_flags |= cpu_to_le32(
194 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
195
196 if (iwl_is_fat_tx_allowed(priv, sta_ht_inf))
197 sta_flags |= STA_FLG_FAT_EN_MSK;
198 else
199 sta_flags &= ~STA_FLG_FAT_EN_MSK;
200
201 priv->stations[index].sta.station_flags = sta_flags;
202 done:
203 return;
204}
205#else
206static inline void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
207 struct ieee80211_ht_info *sta_ht_info)
208{
209}
210#endif
211
212/**
213 * iwl_add_station_flags - Add station to tables in driver and device
214 */
215u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
216 u8 flags, struct ieee80211_ht_info *ht_info)
217{
218 int i;
219 int index = IWL_INVALID_STATION;
220 struct iwl_station_entry *station;
221 unsigned long flags_spin;
222 DECLARE_MAC_BUF(mac);
223
224 spin_lock_irqsave(&priv->sta_lock, flags_spin);
225 if (is_ap)
226 index = IWL_AP_ID;
227 else if (is_broadcast_ether_addr(addr))
228 index = priv->hw_params.bcast_sta_id;
229 else
230 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
231 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
232 addr)) {
233 index = i;
234 break;
235 }
236
237 if (!priv->stations[i].used &&
238 index == IWL_INVALID_STATION)
239 index = i;
240 }
241
242
243 /* These two conditions have the same outcome, but keep them separate
244 since they have different meanings */
245 if (unlikely(index == IWL_INVALID_STATION)) {
246 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
247 return index;
248 }
249
250 if (priv->stations[index].used &&
251 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
252 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
253 return index;
254 }
255
256
257 IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr));
258 station = &priv->stations[index];
259 station->used = 1;
260 priv->num_stations++;
261
262 /* Set up the REPLY_ADD_STA command to send to device */
263 memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
264 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
265 station->sta.mode = 0;
266 station->sta.sta.sta_id = index;
267 station->sta.station_flags = 0;
268
269 /* BCAST station and IBSS stations do not work in HT mode */
270 if (index != priv->hw_params.bcast_sta_id &&
271 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
272 iwl_set_ht_add_station(priv, index, ht_info);
273
274 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
275
276 /* Add station to device's station table */
277 iwl_send_add_sta(priv, &station->sta, flags);
278 return index;
279
280}
281EXPORT_SYMBOL(iwl_add_station_flags);
282
283
284static int iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr)
285{
286 unsigned long flags;
287 u8 sta_id;
288 DECLARE_MAC_BUF(mac);
289
290 sta_id = iwl_find_station(priv, addr);
291 if (sta_id != IWL_INVALID_STATION) {
292 IWL_DEBUG_ASSOC("Removed STA from Ucode: %s\n",
293 print_mac(mac, addr));
294 spin_lock_irqsave(&priv->sta_lock, flags);
295 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
296 memset(&priv->stations[sta_id], 0,
297 sizeof(struct iwl_station_entry));
298 spin_unlock_irqrestore(&priv->sta_lock, flags);
299 return 0;
300 }
301 return -EINVAL;
302}
303
304static int iwl_remove_sta_callback(struct iwl_priv *priv,
305 struct iwl_cmd *cmd, struct sk_buff *skb)
306{
307 struct iwl_rx_packet *res = NULL;
308 const char *addr = cmd->cmd.rm_sta.addr;
309
310 if (!skb) {
311 IWL_ERROR("Error: Response NULL in REPLY_REMOVE_STA.\n");
312 return 1;
313 }
314
315 res = (struct iwl_rx_packet *)skb->data;
316 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
317 IWL_ERROR("Bad return from REPLY_REMOVE_STA (0x%08X)\n",
318 res->hdr.flags);
319 return 1;
320 }
321
322 switch (res->u.rem_sta.status) {
323 case REM_STA_SUCCESS_MSK:
324 iwl_sta_ucode_deactivate(priv, addr);
325 break;
326 default:
327 break;
328 }
329
330 /* We didn't cache the SKB; let the caller free it */
331 return 1;
332}
333
334static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
335 u8 flags)
336{
337 struct iwl_rx_packet *res = NULL;
338 int ret;
339
340 struct iwl_rem_sta_cmd rm_sta_cmd;
341
342 struct iwl_host_cmd cmd = {
343 .id = REPLY_REMOVE_STA,
344 .len = sizeof(struct iwl_rem_sta_cmd),
345 .meta.flags = flags,
346 .data = &rm_sta_cmd,
347 };
348
349 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
350 rm_sta_cmd.num_sta = 1;
351 memcpy(&rm_sta_cmd.addr, addr , ETH_ALEN);
352
353 if (flags & CMD_ASYNC)
354 cmd.meta.u.callback = iwl_remove_sta_callback;
355 else
356 cmd.meta.flags |= CMD_WANT_SKB;
357 ret = iwl_send_cmd(priv, &cmd);
358
359 if (ret || (flags & CMD_ASYNC))
360 return ret;
361
362 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
363 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
364 IWL_ERROR("Bad return from REPLY_REMOVE_STA (0x%08X)\n",
365 res->hdr.flags);
366 ret = -EIO;
367 }
368
369 if (!ret) {
370 switch (res->u.rem_sta.status) {
371 case REM_STA_SUCCESS_MSK:
372 iwl_sta_ucode_deactivate(priv, addr);
373 IWL_DEBUG_ASSOC("REPLY_REMOVE_STA PASSED\n");
374 break;
375 default:
376 ret = -EIO;
377 IWL_ERROR("REPLY_REMOVE_STA failed\n");
378 break;
379 }
380 }
381
382 priv->alloc_rxb_skb--;
383 dev_kfree_skb_any(cmd.meta.u.skb);
384
385 return ret;
386}
387/**
388 * iwl_remove_station - Remove driver's knowledge of station.
389 *
390 */
391u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
392{
393 int index = IWL_INVALID_STATION;
394 int i;
395 unsigned long flags;
396
397 spin_lock_irqsave(&priv->sta_lock, flags);
398
399 if (is_ap)
400 index = IWL_AP_ID;
401 else if (is_broadcast_ether_addr(addr))
402 index = priv->hw_params.bcast_sta_id;
403 else
404 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
405 if (priv->stations[i].used &&
406 !compare_ether_addr(priv->stations[i].sta.sta.addr,
407 addr)) {
408 index = i;
409 break;
410 }
411
412 if (unlikely(index == IWL_INVALID_STATION))
413 goto out;
414
415 if (priv->stations[index].used) {
416 priv->stations[index].used = 0;
417 priv->num_stations--;
418 }
419
420 BUG_ON(priv->num_stations < 0);
421 spin_unlock_irqrestore(&priv->sta_lock, flags);
422 iwl_send_remove_station(priv, addr, CMD_ASYNC);
423 return index;
424out:
425 spin_unlock_irqrestore(&priv->sta_lock, flags);
426 return 0;
427}
428EXPORT_SYMBOL(iwl_remove_station);
41int iwl_get_free_ucode_key_index(struct iwl_priv *priv) 429int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
42{ 430{
43 int i; 431 int i;
@@ -91,6 +479,7 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
91 else 479 else
92 return 0; 480 return 0;
93} 481}
482EXPORT_SYMBOL(iwl_send_static_wepkey_cmd);
94 483
95int iwl_remove_default_wep_key(struct iwl_priv *priv, 484int iwl_remove_default_wep_key(struct iwl_priv *priv,
96 struct ieee80211_key_conf *keyconf) 485 struct ieee80211_key_conf *keyconf)
@@ -111,6 +500,7 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
111 500
112 return ret; 501 return ret;
113} 502}
503EXPORT_SYMBOL(iwl_remove_default_wep_key);
114 504
115int iwl_set_default_wep_key(struct iwl_priv *priv, 505int iwl_set_default_wep_key(struct iwl_priv *priv,
116 struct ieee80211_key_conf *keyconf) 506 struct ieee80211_key_conf *keyconf)
@@ -119,7 +509,7 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
119 unsigned long flags; 509 unsigned long flags;
120 510
121 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; 511 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
122 keyconf->hw_key_idx = keyconf->keyidx; 512 keyconf->hw_key_idx = HW_KEY_DEFAULT;
123 priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP; 513 priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP;
124 514
125 spin_lock_irqsave(&priv->sta_lock, flags); 515 spin_lock_irqsave(&priv->sta_lock, flags);
@@ -138,6 +528,7 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
138 528
139 return ret; 529 return ret;
140} 530}
531EXPORT_SYMBOL(iwl_set_default_wep_key);
141 532
142static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv, 533static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
143 struct ieee80211_key_conf *keyconf, 534 struct ieee80211_key_conf *keyconf,
@@ -148,7 +539,6 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
148 int ret; 539 int ret;
149 540
150 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; 541 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
151 keyconf->hw_key_idx = keyconf->keyidx;
152 542
153 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK); 543 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
154 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 544 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
@@ -172,15 +562,18 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
172 memcpy(&priv->stations[sta_id].sta.key.key[3], 562 memcpy(&priv->stations[sta_id].sta.key.key[3],
173 keyconf->key, keyconf->keylen); 563 keyconf->key, keyconf->keylen);
174 564
175 priv->stations[sta_id].sta.key.key_offset = 565 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
566 == STA_KEY_FLG_NO_ENC)
567 priv->stations[sta_id].sta.key.key_offset =
176 iwl_get_free_ucode_key_index(priv); 568 iwl_get_free_ucode_key_index(priv);
177 priv->stations[sta_id].sta.key.key_flags = key_flags; 569 /* else, we are overriding an existing key => no need to allocated room
570 * in uCode. */
178 571
572 priv->stations[sta_id].sta.key.key_flags = key_flags;
179 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 573 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
180 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 574 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
181 575
182 ret = iwl4965_send_add_station(priv, 576 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
183 &priv->stations[sta_id].sta, CMD_ASYNC);
184 577
185 spin_unlock_irqrestore(&priv->sta_lock, flags); 578 spin_unlock_irqrestore(&priv->sta_lock, flags);
186 579
@@ -202,7 +595,6 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
202 key_flags |= STA_KEY_MULTICAST_MSK; 595 key_flags |= STA_KEY_MULTICAST_MSK;
203 596
204 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 597 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
205 keyconf->hw_key_idx = keyconf->keyidx;
206 598
207 spin_lock_irqsave(&priv->sta_lock, flags); 599 spin_lock_irqsave(&priv->sta_lock, flags);
208 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 600 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
@@ -214,8 +606,13 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
214 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 606 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
215 keyconf->keylen); 607 keyconf->keylen);
216 608
217 priv->stations[sta_id].sta.key.key_offset = 609 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
218 iwl_get_free_ucode_key_index(priv); 610 == STA_KEY_FLG_NO_ENC)
611 priv->stations[sta_id].sta.key.key_offset =
612 iwl_get_free_ucode_key_index(priv);
613 /* else, we are overriding an existing key => no need to allocated room
614 * in uCode. */
615
219 priv->stations[sta_id].sta.key.key_flags = key_flags; 616 priv->stations[sta_id].sta.key.key_flags = key_flags;
220 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 617 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
221 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 618 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
@@ -223,8 +620,7 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
223 spin_unlock_irqrestore(&priv->sta_lock, flags); 620 spin_unlock_irqrestore(&priv->sta_lock, flags);
224 621
225 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); 622 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
226 return iwl4965_send_add_station(priv, 623 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
227 &priv->stations[sta_id].sta, CMD_ASYNC);
228} 624}
229 625
230static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, 626static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
@@ -236,15 +632,18 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
236 632
237 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 633 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
238 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 634 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
239 keyconf->hw_key_idx = keyconf->keyidx;
240 635
241 spin_lock_irqsave(&priv->sta_lock, flags); 636 spin_lock_irqsave(&priv->sta_lock, flags);
242 637
243 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 638 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
244 priv->stations[sta_id].keyinfo.conf = keyconf;
245 priv->stations[sta_id].keyinfo.keylen = 16; 639 priv->stations[sta_id].keyinfo.keylen = 16;
246 priv->stations[sta_id].sta.key.key_offset = 640
641 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
642 == STA_KEY_FLG_NO_ENC)
643 priv->stations[sta_id].sta.key.key_offset =
247 iwl_get_free_ucode_key_index(priv); 644 iwl_get_free_ucode_key_index(priv);
645 /* else, we are overriding an existing key => no need to allocated room
646 * in uCode. */
248 647
249 /* This copy is acutally not needed: we get the key with each TX */ 648 /* This copy is acutally not needed: we get the key with each TX */
250 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); 649 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
@@ -256,54 +655,78 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
256 return ret; 655 return ret;
257} 656}
258 657
259int iwl_remove_dynamic_key(struct iwl_priv *priv, u8 sta_id) 658int iwl_remove_dynamic_key(struct iwl_priv *priv,
659 struct ieee80211_key_conf *keyconf,
660 u8 sta_id)
260{ 661{
261 unsigned long flags; 662 unsigned long flags;
663 int ret = 0;
664 u16 key_flags;
665 u8 keyidx;
262 666
263 priv->key_mapping_key = 0; 667 priv->key_mapping_key--;
264 668
265 spin_lock_irqsave(&priv->sta_lock, flags); 669 spin_lock_irqsave(&priv->sta_lock, flags);
670 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
671 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
672
673 if (keyconf->keyidx != keyidx) {
674 /* We need to remove a key with index different that the one
675 * in the uCode. This means that the key we need to remove has
676 * been replaced by another one with different index.
677 * Don't do anything and return ok
678 */
679 spin_unlock_irqrestore(&priv->sta_lock, flags);
680 return 0;
681 }
682
266 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, 683 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
267 &priv->ucode_key_table)) 684 &priv->ucode_key_table))
268 IWL_ERROR("index %d not used in uCode key table.\n", 685 IWL_ERROR("index %d not used in uCode key table.\n",
269 priv->stations[sta_id].sta.key.key_offset); 686 priv->stations[sta_id].sta.key.key_offset);
270 memset(&priv->stations[sta_id].keyinfo, 0, 687 memset(&priv->stations[sta_id].keyinfo, 0,
271 sizeof(struct iwl4965_hw_key)); 688 sizeof(struct iwl_hw_key));
272 memset(&priv->stations[sta_id].sta.key, 0, 689 memset(&priv->stations[sta_id].sta.key, 0,
273 sizeof(struct iwl4965_keyinfo)); 690 sizeof(struct iwl4965_keyinfo));
274 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; 691 priv->stations[sta_id].sta.key.key_flags =
692 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
693 priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
275 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 694 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
276 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 695 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
277 spin_unlock_irqrestore(&priv->sta_lock, flags);
278 696
279 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n"); 697 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
280 return iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0); 698 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
699 spin_unlock_irqrestore(&priv->sta_lock, flags);
700 return ret;
281} 701}
702EXPORT_SYMBOL(iwl_remove_dynamic_key);
282 703
283int iwl_set_dynamic_key(struct iwl_priv *priv, 704int iwl_set_dynamic_key(struct iwl_priv *priv,
284 struct ieee80211_key_conf *key, u8 sta_id) 705 struct ieee80211_key_conf *keyconf, u8 sta_id)
285{ 706{
286 int ret; 707 int ret;
287 708
288 priv->key_mapping_key = 1; 709 priv->key_mapping_key++;
710 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
289 711
290 switch (key->alg) { 712 switch (keyconf->alg) {
291 case ALG_CCMP: 713 case ALG_CCMP:
292 ret = iwl_set_ccmp_dynamic_key_info(priv, key, sta_id); 714 ret = iwl_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
293 break; 715 break;
294 case ALG_TKIP: 716 case ALG_TKIP:
295 ret = iwl_set_tkip_dynamic_key_info(priv, key, sta_id); 717 ret = iwl_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
296 break; 718 break;
297 case ALG_WEP: 719 case ALG_WEP:
298 ret = iwl_set_wep_dynamic_key_info(priv, key, sta_id); 720 ret = iwl_set_wep_dynamic_key_info(priv, keyconf, sta_id);
299 break; 721 break;
300 default: 722 default:
301 IWL_ERROR("Unknown alg: %s alg = %d\n", __func__, key->alg); 723 IWL_ERROR("Unknown alg: %s alg = %d\n", __func__, keyconf->alg);
302 ret = -EINVAL; 724 ret = -EINVAL;
303 } 725 }
304 726
305 return ret; 727 return ret;
306} 728}
729EXPORT_SYMBOL(iwl_set_dynamic_key);
307 730
308#ifdef CONFIG_IWLWIFI_DEBUG 731#ifdef CONFIG_IWLWIFI_DEBUG
309static void iwl_dump_lq_cmd(struct iwl_priv *priv, 732static void iwl_dump_lq_cmd(struct iwl_priv *priv,
@@ -353,3 +776,168 @@ int iwl_send_lq_cmd(struct iwl_priv *priv,
353} 776}
354EXPORT_SYMBOL(iwl_send_lq_cmd); 777EXPORT_SYMBOL(iwl_send_lq_cmd);
355 778
779/**
780 * iwl_sta_init_lq - Initialize a station's hardware rate table
781 *
782 * The uCode's station table contains a table of fallback rates
783 * for automatic fallback during transmission.
784 *
785 * NOTE: This sets up a default set of values. These will be replaced later
786 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
787 * rc80211_simple.
788 *
789 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
790 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
791 * which requires station table entry to exist).
792 */
793static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, int is_ap)
794{
795 int i, r;
796 struct iwl_link_quality_cmd link_cmd = {
797 .reserved1 = 0,
798 };
799 u16 rate_flags;
800
801 /* Set up the rate scaling to start at selected rate, fall back
802 * all the way down to 1M in IEEE order, and then spin on 1M */
803 if (is_ap)
804 r = IWL_RATE_54M_INDEX;
805 else if (priv->band == IEEE80211_BAND_5GHZ)
806 r = IWL_RATE_6M_INDEX;
807 else
808 r = IWL_RATE_1M_INDEX;
809
810 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
811 rate_flags = 0;
812 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
813 rate_flags |= RATE_MCS_CCK_MSK;
814
815 /* Use Tx antenna B only */
816 rate_flags |= RATE_MCS_ANT_B_MSK; /*FIXME:RS*/
817
818 link_cmd.rs_table[i].rate_n_flags =
819 iwl4965_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
820 r = iwl4965_get_prev_ieee_rate(r);
821 }
822
823 link_cmd.general_params.single_stream_ant_msk = 2;
824 link_cmd.general_params.dual_stream_ant_msk = 3;
825 link_cmd.agg_params.agg_dis_start_th = 3;
826 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
827
828 /* Update the rate scaling for control frame Tx to AP */
829 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
830
831 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
832 sizeof(link_cmd), &link_cmd, NULL);
833}
834/**
835 * iwl_rxon_add_station - add station into station table.
836 *
837 * there is only one AP station with id= IWL_AP_ID
838 * NOTE: mutex must be held before calling this fnction
839 */
840int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
841{
842 u8 sta_id;
843
844 /* Add station to device's station table */
845#ifdef CONFIG_IWL4965_HT
846 struct ieee80211_conf *conf = &priv->hw->conf;
847 struct ieee80211_ht_info *cur_ht_config = &conf->ht_conf;
848
849 if ((is_ap) &&
850 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
851 (priv->iw_mode == IEEE80211_IF_TYPE_STA))
852 sta_id = iwl_add_station_flags(priv, addr, is_ap,
853 0, cur_ht_config);
854 else
855#endif /* CONFIG_IWL4965_HT */
856 sta_id = iwl_add_station_flags(priv, addr, is_ap,
857 0, NULL);
858
859 /* Set up default rate scaling table in device's station table */
860 iwl_sta_init_lq(priv, addr, is_ap);
861
862 return sta_id;
863}
864EXPORT_SYMBOL(iwl_rxon_add_station);
865
866
867/**
868 * iwl_get_sta_id - Find station's index within station table
869 *
870 * If new IBSS station, create new entry in station table
871 */
872int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
873{
874 int sta_id;
875 u16 fc = le16_to_cpu(hdr->frame_control);
876 DECLARE_MAC_BUF(mac);
877
878 /* If this frame is broadcast or management, use broadcast station id */
879 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
880 is_multicast_ether_addr(hdr->addr1))
881 return priv->hw_params.bcast_sta_id;
882
883 switch (priv->iw_mode) {
884
885 /* If we are a client station in a BSS network, use the special
886 * AP station entry (that's the only station we communicate with) */
887 case IEEE80211_IF_TYPE_STA:
888 return IWL_AP_ID;
889
890 /* If we are an AP, then find the station, or use BCAST */
891 case IEEE80211_IF_TYPE_AP:
892 sta_id = iwl_find_station(priv, hdr->addr1);
893 if (sta_id != IWL_INVALID_STATION)
894 return sta_id;
895 return priv->hw_params.bcast_sta_id;
896
897 /* If this frame is going out to an IBSS network, find the station,
898 * or create a new station table entry */
899 case IEEE80211_IF_TYPE_IBSS:
900 sta_id = iwl_find_station(priv, hdr->addr1);
901 if (sta_id != IWL_INVALID_STATION)
902 return sta_id;
903
904 /* Create new station table entry */
905 sta_id = iwl_add_station_flags(priv, hdr->addr1,
906 0, CMD_ASYNC, NULL);
907
908 if (sta_id != IWL_INVALID_STATION)
909 return sta_id;
910
911 IWL_DEBUG_DROP("Station %s not in station map. "
912 "Defaulting to broadcast...\n",
913 print_mac(mac, hdr->addr1));
914 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
915 return priv->hw_params.bcast_sta_id;
916
917 default:
918 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
919 return priv->hw_params.bcast_sta_id;
920 }
921}
922EXPORT_SYMBOL(iwl_get_sta_id);
923
924
925/**
926 * iwl_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
927 */
928void iwl_sta_modify_enable_tid_tx(struct iwl_priv *priv, int sta_id, int tid)
929{
930 unsigned long flags;
931
932 /* Remove "disable" flag, to enable Tx for this TID */
933 spin_lock_irqsave(&priv->sta_lock, flags);
934 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
935 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
936 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
937 spin_unlock_irqrestore(&priv->sta_lock, flags);
938
939 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
940}
941EXPORT_SYMBOL(iwl_sta_modify_enable_tid_tx);
942
943
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 44f272ecc827..3d55716f5301 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -29,13 +29,8 @@
29#ifndef __iwl_sta_h__ 29#ifndef __iwl_sta_h__
30#define __iwl_sta_h__ 30#define __iwl_sta_h__
31 31
32#include <net/mac80211.h> 32#define HW_KEY_DYNAMIC 0
33 33#define HW_KEY_DEFAULT 1
34#include "iwl-eeprom.h"
35#include "iwl-core.h"
36#include "iwl-4965.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39 34
40int iwl_get_free_ucode_key_index(struct iwl_priv *priv); 35int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
41int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty); 36int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty);
@@ -43,7 +38,12 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
43 struct ieee80211_key_conf *key); 38 struct ieee80211_key_conf *key);
44int iwl_set_default_wep_key(struct iwl_priv *priv, 39int iwl_set_default_wep_key(struct iwl_priv *priv,
45 struct ieee80211_key_conf *key); 40 struct ieee80211_key_conf *key);
46int iwl_remove_dynamic_key(struct iwl_priv *priv, u8 sta_id);
47int iwl_set_dynamic_key(struct iwl_priv *priv, 41int iwl_set_dynamic_key(struct iwl_priv *priv,
48 struct ieee80211_key_conf *key, u8 sta_id); 42 struct ieee80211_key_conf *key, u8 sta_id);
43int iwl_remove_dynamic_key(struct iwl_priv *priv,
44 struct ieee80211_key_conf *key, u8 sta_id);
45int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap);
46u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap);
47int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
48void iwl_sta_modify_enable_tid_tx(struct iwl_priv *priv, int sta_id, int tid);
49#endif /* __iwl_sta_h__ */ 49#endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
new file mode 100644
index 000000000000..cfe6f4b233dd
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -0,0 +1,1393 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <net/mac80211.h>
32#include "iwl-eeprom.h"
33#include "iwl-dev.h"
34#include "iwl-core.h"
35#include "iwl-sta.h"
36#include "iwl-io.h"
37#include "iwl-helpers.h"
38
39#ifdef CONFIG_IWL4965_HT
40
41static const u16 default_tid_to_tx_fifo[] = {
42 IWL_TX_FIFO_AC1,
43 IWL_TX_FIFO_AC0,
44 IWL_TX_FIFO_AC0,
45 IWL_TX_FIFO_AC1,
46 IWL_TX_FIFO_AC2,
47 IWL_TX_FIFO_AC2,
48 IWL_TX_FIFO_AC3,
49 IWL_TX_FIFO_AC3,
50 IWL_TX_FIFO_NONE,
51 IWL_TX_FIFO_NONE,
52 IWL_TX_FIFO_NONE,
53 IWL_TX_FIFO_NONE,
54 IWL_TX_FIFO_NONE,
55 IWL_TX_FIFO_NONE,
56 IWL_TX_FIFO_NONE,
57 IWL_TX_FIFO_NONE,
58 IWL_TX_FIFO_AC3
59};
60
61#endif /*CONFIG_IWL4965_HT */
62
63
64
65/**
66 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
67 *
68 * Does NOT advance any TFD circular buffer read/write indexes
69 * Does NOT free the TFD itself (which is within circular buffer)
70 */
71int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
72{
73 struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0];
74 struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
75 struct pci_dev *dev = priv->pci_dev;
76 int i;
77 int counter = 0;
78 int index, is_odd;
79
80 /* Host command buffers stay mapped in memory, nothing to clean */
81 if (txq->q.id == IWL_CMD_QUEUE_NUM)
82 return 0;
83
84 /* Sanity check on number of chunks */
85 counter = IWL_GET_BITS(*bd, num_tbs);
86 if (counter > MAX_NUM_OF_TBS) {
87 IWL_ERROR("Too many chunks: %i\n", counter);
88 /* @todo issue fatal error, it is quite serious situation */
89 return 0;
90 }
91
92 /* Unmap chunks, if any.
93 * TFD info for odd chunks is different format than for even chunks. */
94 for (i = 0; i < counter; i++) {
95 index = i / 2;
96 is_odd = i & 0x1;
97
98 if (is_odd)
99 pci_unmap_single(
100 dev,
101 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
102 (IWL_GET_BITS(bd->pa[index],
103 tb2_addr_hi20) << 16),
104 IWL_GET_BITS(bd->pa[index], tb2_len),
105 PCI_DMA_TODEVICE);
106
107 else if (i > 0)
108 pci_unmap_single(dev,
109 le32_to_cpu(bd->pa[index].tb1_addr),
110 IWL_GET_BITS(bd->pa[index], tb1_len),
111 PCI_DMA_TODEVICE);
112
113 /* Free SKB, if any, for this chunk */
114 if (txq->txb[txq->q.read_ptr].skb[i]) {
115 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
116
117 dev_kfree_skb(skb);
118 txq->txb[txq->q.read_ptr].skb[i] = NULL;
119 }
120 }
121 return 0;
122}
123EXPORT_SYMBOL(iwl_hw_txq_free_tfd);
124
125
126int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
127 dma_addr_t addr, u16 len)
128{
129 int index, is_odd;
130 struct iwl_tfd_frame *tfd = ptr;
131 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
132
133 /* Each TFD can point to a maximum 20 Tx buffers */
134 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
135 IWL_ERROR("Error can not send more than %d chunks\n",
136 MAX_NUM_OF_TBS);
137 return -EINVAL;
138 }
139
140 index = num_tbs / 2;
141 is_odd = num_tbs & 0x1;
142
143 if (!is_odd) {
144 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
145 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
146 iwl_get_dma_hi_address(addr));
147 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
148 } else {
149 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
150 (u32) (addr & 0xffff));
151 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
152 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
153 }
154
155 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1);
156
157 return 0;
158}
159EXPORT_SYMBOL(iwl_hw_txq_attach_buf_to_tfd);
160
161/**
162 * iwl_txq_update_write_ptr - Send new write index to hardware
163 */
164int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
165{
166 u32 reg = 0;
167 int ret = 0;
168 int txq_id = txq->q.id;
169
170 if (txq->need_update == 0)
171 return ret;
172
173 /* if we're trying to save power */
174 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
175 /* wake up nic if it's powered down ...
176 * uCode will wake up, and interrupt us again, so next
177 * time we'll skip this part. */
178 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
179
180 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
181 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
182 iwl_set_bit(priv, CSR_GP_CNTRL,
183 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
184 return ret;
185 }
186
187 /* restore this queue's parameters in nic hardware. */
188 ret = iwl_grab_nic_access(priv);
189 if (ret)
190 return ret;
191 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
192 txq->q.write_ptr | (txq_id << 8));
193 iwl_release_nic_access(priv);
194
195 /* else not in power-save mode, uCode will never sleep when we're
196 * trying to tx (during RFKILL, we're not trying to tx). */
197 } else
198 iwl_write32(priv, HBUS_TARG_WRPTR,
199 txq->q.write_ptr | (txq_id << 8));
200
201 txq->need_update = 0;
202
203 return ret;
204}
205EXPORT_SYMBOL(iwl_txq_update_write_ptr);
206
207
208/**
209 * iwl_tx_queue_free - Deallocate DMA queue.
210 * @txq: Transmit queue to deallocate.
211 *
212 * Empty queue by removing and destroying all BD's.
213 * Free all buffers.
214 * 0-fill, but do not free "txq" descriptor structure.
215 */
216static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
217{
218 struct iwl_queue *q = &txq->q;
219 struct pci_dev *dev = priv->pci_dev;
220 int len;
221
222 if (q->n_bd == 0)
223 return;
224
225 /* first, empty all BD's */
226 for (; q->write_ptr != q->read_ptr;
227 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
228 iwl_hw_txq_free_tfd(priv, txq);
229
230 len = sizeof(struct iwl_cmd) * q->n_window;
231 if (q->id == IWL_CMD_QUEUE_NUM)
232 len += IWL_MAX_SCAN_SIZE;
233
234 /* De-alloc array of command/tx buffers */
235 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
236
237 /* De-alloc circular buffer of TFDs */
238 if (txq->q.n_bd)
239 pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) *
240 txq->q.n_bd, txq->bd, txq->q.dma_addr);
241
242 /* De-alloc array of per-TFD driver data */
243 kfree(txq->txb);
244 txq->txb = NULL;
245
246 /* 0-fill queue descriptor structure */
247 memset(txq, 0, sizeof(*txq));
248}
249
250/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
251 * DMA services
252 *
253 * Theory of operation
254 *
255 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
256 * of buffer descriptors, each of which points to one or more data buffers for
257 * the device to read from or fill. Driver and device exchange status of each
258 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
259 * entries in each circular buffer, to protect against confusing empty and full
260 * queue states.
261 *
262 * The device reads or writes the data in the queues via the device's several
263 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
264 *
265 * For Tx queue, there are low mark and high mark limits. If, after queuing
266 * the packet for Tx, free space become < low mark, Tx queue stopped. When
267 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
268 * Tx queue resumed.
269 *
270 * See more detailed info in iwl-4965-hw.h.
271 ***************************************************/
272
273int iwl_queue_space(const struct iwl_queue *q)
274{
275 int s = q->read_ptr - q->write_ptr;
276
277 if (q->read_ptr > q->write_ptr)
278 s -= q->n_bd;
279
280 if (s <= 0)
281 s += q->n_window;
282 /* keep some reserve to not confuse empty and full situations */
283 s -= 2;
284 if (s < 0)
285 s = 0;
286 return s;
287}
288EXPORT_SYMBOL(iwl_queue_space);
289
290
291/**
292 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
293 */
294static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
295 int count, int slots_num, u32 id)
296{
297 q->n_bd = count;
298 q->n_window = slots_num;
299 q->id = id;
300
301 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
302 * and iwl_queue_dec_wrap are broken. */
303 BUG_ON(!is_power_of_2(count));
304
305 /* slots_num must be power-of-two size, otherwise
306 * get_cmd_index is broken. */
307 BUG_ON(!is_power_of_2(slots_num));
308
309 q->low_mark = q->n_window / 4;
310 if (q->low_mark < 4)
311 q->low_mark = 4;
312
313 q->high_mark = q->n_window / 8;
314 if (q->high_mark < 2)
315 q->high_mark = 2;
316
317 q->write_ptr = q->read_ptr = 0;
318
319 return 0;
320}
321
322/**
323 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
324 */
325static int iwl_tx_queue_alloc(struct iwl_priv *priv,
326 struct iwl_tx_queue *txq, u32 id)
327{
328 struct pci_dev *dev = priv->pci_dev;
329
330 /* Driver private data, only for Tx (not command) queues,
331 * not shared with device. */
332 if (id != IWL_CMD_QUEUE_NUM) {
333 txq->txb = kmalloc(sizeof(txq->txb[0]) *
334 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
335 if (!txq->txb) {
336 IWL_ERROR("kmalloc for auxiliary BD "
337 "structures failed\n");
338 goto error;
339 }
340 } else
341 txq->txb = NULL;
342
343 /* Circular buffer of transmit frame descriptors (TFDs),
344 * shared with device */
345 txq->bd = pci_alloc_consistent(dev,
346 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
347 &txq->q.dma_addr);
348
349 if (!txq->bd) {
350 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
351 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
352 goto error;
353 }
354 txq->q.id = id;
355
356 return 0;
357
358 error:
359 kfree(txq->txb);
360 txq->txb = NULL;
361
362 return -ENOMEM;
363}
364
365/*
366 * Tell nic where to find circular buffer of Tx Frame Descriptors for
367 * given Tx queue, and enable the DMA channel used for that queue.
368 *
369 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
370 * channels supported in hardware.
371 */
372static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
373 struct iwl_tx_queue *txq)
374{
375 int rc;
376 unsigned long flags;
377 int txq_id = txq->q.id;
378
379 spin_lock_irqsave(&priv->lock, flags);
380 rc = iwl_grab_nic_access(priv);
381 if (rc) {
382 spin_unlock_irqrestore(&priv->lock, flags);
383 return rc;
384 }
385
386 /* Circular buffer (TFD queue in DRAM) physical base address */
387 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
388 txq->q.dma_addr >> 8);
389
390 /* Enable DMA channel, using same id as for TFD queue */
391 iwl_write_direct32(
392 priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
393 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
394 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
395 iwl_release_nic_access(priv);
396 spin_unlock_irqrestore(&priv->lock, flags);
397
398 return 0;
399}
400
401/**
402 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
403 */
404static int iwl_tx_queue_init(struct iwl_priv *priv,
405 struct iwl_tx_queue *txq,
406 int slots_num, u32 txq_id)
407{
408 struct pci_dev *dev = priv->pci_dev;
409 int len;
410 int rc = 0;
411
412 /*
413 * Alloc buffer array for commands (Tx or other types of commands).
414 * For the command queue (#4), allocate command space + one big
415 * command for scan, since scan command is very huge; the system will
416 * not have two scans at the same time, so only one is needed.
417 * For normal Tx queues (all other queues), no super-size command
418 * space is needed.
419 */
420 len = sizeof(struct iwl_cmd) * slots_num;
421 if (txq_id == IWL_CMD_QUEUE_NUM)
422 len += IWL_MAX_SCAN_SIZE;
423 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
424 if (!txq->cmd)
425 return -ENOMEM;
426
427 /* Alloc driver data array and TFD circular buffer */
428 rc = iwl_tx_queue_alloc(priv, txq, txq_id);
429 if (rc) {
430 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
431
432 return -ENOMEM;
433 }
434 txq->need_update = 0;
435
436 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
437 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
438 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
439
440 /* Initialize queue's high/low-water marks, and head/tail indexes */
441 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
442
443 /* Tell device where to find queue */
444 iwl_hw_tx_queue_init(priv, txq);
445
446 return 0;
447}
448/**
449 * iwl_hw_txq_ctx_free - Free TXQ Context
450 *
451 * Destroy all TX DMA queues and structures
452 */
453void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
454{
455 int txq_id;
456
457 /* Tx queues */
458 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
459 iwl_tx_queue_free(priv, &priv->txq[txq_id]);
460
461 /* Keep-warm buffer */
462 iwl_kw_free(priv);
463}
464EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
465
466
467/**
468 * iwl_txq_ctx_reset - Reset TX queue context
469 * Destroys all DMA structures and initialise them again
470 *
471 * @param priv
472 * @return error code
473 */
474int iwl_txq_ctx_reset(struct iwl_priv *priv)
475{
476 int ret = 0;
477 int txq_id, slots_num;
478 unsigned long flags;
479
480 iwl_kw_free(priv);
481
482 /* Free all tx/cmd queues and keep-warm buffer */
483 iwl_hw_txq_ctx_free(priv);
484
485 /* Alloc keep-warm buffer */
486 ret = iwl_kw_alloc(priv);
487 if (ret) {
488 IWL_ERROR("Keep Warm allocation failed");
489 goto error_kw;
490 }
491 spin_lock_irqsave(&priv->lock, flags);
492 ret = iwl_grab_nic_access(priv);
493 if (unlikely(ret)) {
494 spin_unlock_irqrestore(&priv->lock, flags);
495 goto error_reset;
496 }
497
498 /* Turn off all Tx DMA fifos */
499 priv->cfg->ops->lib->txq_set_sched(priv, 0);
500
501 iwl_release_nic_access(priv);
502 spin_unlock_irqrestore(&priv->lock, flags);
503
504
505 /* Tell nic where to find the keep-warm buffer */
506 ret = iwl_kw_init(priv);
507 if (ret) {
508 IWL_ERROR("kw_init failed\n");
509 goto error_reset;
510 }
511
512 /* Alloc and init all Tx queues, including the command queue (#4) */
513 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
514 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
515 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
516 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
517 txq_id);
518 if (ret) {
519 IWL_ERROR("Tx %d queue init failed\n", txq_id);
520 goto error;
521 }
522 }
523
524 return ret;
525
526 error:
527 iwl_hw_txq_ctx_free(priv);
528 error_reset:
529 iwl_kw_free(priv);
530 error_kw:
531 return ret;
532}
533/**
534 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
535 */
536void iwl_txq_ctx_stop(struct iwl_priv *priv)
537{
538
539 int txq_id;
540 unsigned long flags;
541
542
543 /* Turn off all Tx DMA fifos */
544 spin_lock_irqsave(&priv->lock, flags);
545 if (iwl_grab_nic_access(priv)) {
546 spin_unlock_irqrestore(&priv->lock, flags);
547 return;
548 }
549
550 priv->cfg->ops->lib->txq_set_sched(priv, 0);
551
552 /* Stop each Tx DMA channel, and wait for it to be idle */
553 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
554 iwl_write_direct32(priv,
555 FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
556 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
557 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
558 (txq_id), 200);
559 }
560 iwl_release_nic_access(priv);
561 spin_unlock_irqrestore(&priv->lock, flags);
562
563 /* Deallocate memory for all Tx queues */
564 iwl_hw_txq_ctx_free(priv);
565}
566EXPORT_SYMBOL(iwl_txq_ctx_stop);
567
568/*
569 * handle build REPLY_TX command notification.
570 */
571static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
572 struct iwl_tx_cmd *tx_cmd,
573 struct ieee80211_tx_info *info,
574 struct ieee80211_hdr *hdr,
575 int is_unicast, u8 std_id)
576{
577 u16 fc = le16_to_cpu(hdr->frame_control);
578 __le32 tx_flags = tx_cmd->tx_flags;
579
580 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
581 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
582 tx_flags |= TX_CMD_FLG_ACK_MSK;
583 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
584 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
585 if (ieee80211_is_probe_response(fc) &&
586 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
587 tx_flags |= TX_CMD_FLG_TSF_MSK;
588 } else {
589 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
590 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
591 }
592
593 if (ieee80211_is_back_request(fc))
594 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
595
596
597 tx_cmd->sta_id = std_id;
598 if (ieee80211_get_morefrag(hdr))
599 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
600
601 if (ieee80211_is_qos_data(fc)) {
602 u8 *qc = ieee80211_get_qos_ctrl(hdr, ieee80211_get_hdrlen(fc));
603 tx_cmd->tid_tspec = qc[0] & 0xf;
604 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
605 } else {
606 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
607 }
608
609 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
610 tx_flags |= TX_CMD_FLG_RTS_MSK;
611 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
612 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
613 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
614 tx_flags |= TX_CMD_FLG_CTS_MSK;
615 }
616
617 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
618 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
619
620 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
621 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
622 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
623 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
624 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
625 else
626 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
627 } else {
628 tx_cmd->timeout.pm_frame_timeout = 0;
629 }
630
631 tx_cmd->driver_txop = 0;
632 tx_cmd->tx_flags = tx_flags;
633 tx_cmd->next_frame_len = 0;
634}
635
636#define RTS_HCCA_RETRY_LIMIT 3
637#define RTS_DFAULT_RETRY_LIMIT 60
638
639static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
640 struct iwl_tx_cmd *tx_cmd,
641 struct ieee80211_tx_info *info,
642 u16 fc, int sta_id,
643 int is_hcca)
644{
645 u8 rts_retry_limit = 0;
646 u8 data_retry_limit = 0;
647 u8 rate_plcp;
648 u16 rate_flags = 0;
649 int rate_idx;
650
651 rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
652 IWL_RATE_COUNT - 1);
653
654 rate_plcp = iwl_rates[rate_idx].plcp;
655
656 rts_retry_limit = (is_hcca) ?
657 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
658
659 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
660 rate_flags |= RATE_MCS_CCK_MSK;
661
662
663 if (ieee80211_is_probe_response(fc)) {
664 data_retry_limit = 3;
665 if (data_retry_limit < rts_retry_limit)
666 rts_retry_limit = data_retry_limit;
667 } else
668 data_retry_limit = IWL_DEFAULT_TX_RETRY;
669
670 if (priv->data_retry_limit != -1)
671 data_retry_limit = priv->data_retry_limit;
672
673
674 if (ieee80211_is_data(fc)) {
675 tx_cmd->initial_rate_index = 0;
676 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
677 } else {
678 switch (fc & IEEE80211_FCTL_STYPE) {
679 case IEEE80211_STYPE_AUTH:
680 case IEEE80211_STYPE_DEAUTH:
681 case IEEE80211_STYPE_ASSOC_REQ:
682 case IEEE80211_STYPE_REASSOC_REQ:
683 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
684 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
685 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
686 }
687 break;
688 default:
689 break;
690 }
691
692 /* Alternate between antenna A and B for successive frames */
693 if (priv->use_ant_b_for_management_frame) {
694 priv->use_ant_b_for_management_frame = 0;
695 rate_flags |= RATE_MCS_ANT_B_MSK;
696 } else {
697 priv->use_ant_b_for_management_frame = 1;
698 rate_flags |= RATE_MCS_ANT_A_MSK;
699 }
700 }
701
702 tx_cmd->rts_retry_limit = rts_retry_limit;
703 tx_cmd->data_retry_limit = data_retry_limit;
704 tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
705}
706
707static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
708 struct ieee80211_tx_info *info,
709 struct iwl_tx_cmd *tx_cmd,
710 struct sk_buff *skb_frag,
711 int sta_id)
712{
713 struct ieee80211_key_conf *keyconf = info->control.hw_key;
714
715 switch (keyconf->alg) {
716 case ALG_CCMP:
717 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
718 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
719 if (info->flags & IEEE80211_TX_CTL_AMPDU)
720 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
721 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
722 break;
723
724 case ALG_TKIP:
725 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
726 ieee80211_get_tkip_key(keyconf, skb_frag,
727 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
728 IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n");
729 break;
730
731 case ALG_WEP:
732 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
733 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
734
735 if (keyconf->keylen == WEP_KEY_LEN_128)
736 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
737
738 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
739
740 IWL_DEBUG_TX("Configuring packet for WEP encryption "
741 "with key %d\n", keyconf->keyidx);
742 break;
743
744 default:
745 printk(KERN_ERR "Unknown encode alg %d\n", keyconf->alg);
746 break;
747 }
748}
749
750static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
751{
752 /* 0 - mgmt, 1 - cnt, 2 - data */
753 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
754 priv->tx_stats[idx].cnt++;
755 priv->tx_stats[idx].bytes += len;
756}
757
758/*
759 * start REPLY_TX command process
760 */
761int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
762{
763 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
764 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
765 struct iwl_tfd_frame *tfd;
766 u32 *control_flags;
767 int txq_id = skb_get_queue_mapping(skb);
768 struct iwl_tx_queue *txq = NULL;
769 struct iwl_queue *q = NULL;
770 dma_addr_t phys_addr;
771 dma_addr_t txcmd_phys;
772 dma_addr_t scratch_phys;
773 struct iwl_cmd *out_cmd = NULL;
774 struct iwl_tx_cmd *tx_cmd;
775 u16 len, idx, len_org;
776 u16 seq_number = 0;
777 u8 id, hdr_len, unicast;
778 u8 sta_id;
779 u16 fc;
780 u8 wait_write_ptr = 0;
781 u8 tid = 0;
782 u8 *qc = NULL;
783 unsigned long flags;
784 int ret;
785
786 spin_lock_irqsave(&priv->lock, flags);
787 if (iwl_is_rfkill(priv)) {
788 IWL_DEBUG_DROP("Dropping - RF KILL\n");
789 goto drop_unlock;
790 }
791
792 if (!priv->vif) {
793 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
794 goto drop_unlock;
795 }
796
797 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
798 IWL_INVALID_RATE) {
799 IWL_ERROR("ERROR: No TX rate available.\n");
800 goto drop_unlock;
801 }
802
803 unicast = !is_multicast_ether_addr(hdr->addr1);
804 id = 0;
805
806 fc = le16_to_cpu(hdr->frame_control);
807
808#ifdef CONFIG_IWLWIFI_DEBUG
809 if (ieee80211_is_auth(fc))
810 IWL_DEBUG_TX("Sending AUTH frame\n");
811 else if (ieee80211_is_assoc_request(fc))
812 IWL_DEBUG_TX("Sending ASSOC frame\n");
813 else if (ieee80211_is_reassoc_request(fc))
814 IWL_DEBUG_TX("Sending REASSOC frame\n");
815#endif
816
817 /* drop all data frame if we are not associated */
818 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
819 (!iwl_is_associated(priv) ||
820 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) ||
821 !priv->assoc_station_added)) {
822 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
823 goto drop_unlock;
824 }
825
826 spin_unlock_irqrestore(&priv->lock, flags);
827
828 hdr_len = ieee80211_get_hdrlen(fc);
829
830 /* Find (or create) index into station table for destination station */
831 sta_id = iwl_get_sta_id(priv, hdr);
832 if (sta_id == IWL_INVALID_STATION) {
833 DECLARE_MAC_BUF(mac);
834
835 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
836 print_mac(mac, hdr->addr1));
837 goto drop;
838 }
839
840 IWL_DEBUG_TX("station Id %d\n", sta_id);
841
842 if (ieee80211_is_qos_data(fc)) {
843 qc = ieee80211_get_qos_ctrl(hdr, hdr_len);
844 tid = qc[0] & 0xf;
845 seq_number = priv->stations[sta_id].tid[tid].seq_number &
846 IEEE80211_SCTL_SEQ;
847 hdr->seq_ctrl = cpu_to_le16(seq_number) |
848 (hdr->seq_ctrl &
849 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
850 seq_number += 0x10;
851#ifdef CONFIG_IWL4965_HT
852 /* aggregation is on for this <sta,tid> */
853 if (info->flags & IEEE80211_TX_CTL_AMPDU)
854 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
855 priv->stations[sta_id].tid[tid].tfds_in_queue++;
856#endif /* CONFIG_IWL4965_HT */
857 }
858
859 /* Descriptor for chosen Tx queue */
860 txq = &priv->txq[txq_id];
861 q = &txq->q;
862
863 spin_lock_irqsave(&priv->lock, flags);
864
865 /* Set up first empty TFD within this queue's circular TFD buffer */
866 tfd = &txq->bd[q->write_ptr];
867 memset(tfd, 0, sizeof(*tfd));
868 control_flags = (u32 *) tfd;
869 idx = get_cmd_index(q, q->write_ptr, 0);
870
871 /* Set up driver data for this TFD */
872 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
873 txq->txb[q->write_ptr].skb[0] = skb;
874
875 /* Set up first empty entry in queue's array of Tx/cmd buffers */
876 out_cmd = &txq->cmd[idx];
877 tx_cmd = &out_cmd->cmd.tx;
878 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
879 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
880
881 /*
882 * Set up the Tx-command (not MAC!) header.
883 * Store the chosen Tx queue and TFD index within the sequence field;
884 * after Tx, uCode's Tx response will return this value so driver can
885 * locate the frame within the tx queue and do post-tx processing.
886 */
887 out_cmd->hdr.cmd = REPLY_TX;
888 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
889 INDEX_TO_SEQ(q->write_ptr)));
890
891 /* Copy MAC header from skb into command buffer */
892 memcpy(tx_cmd->hdr, hdr, hdr_len);
893
894 /*
895 * Use the first empty entry in this queue's command buffer array
896 * to contain the Tx command and MAC header concatenated together
897 * (payload data will be in another buffer).
898 * Size of this varies, due to varying MAC header length.
899 * If end is not dword aligned, we'll have 2 extra bytes at the end
900 * of the MAC header (device reads on dword boundaries).
901 * We'll tell device about this padding later.
902 */
903 len = sizeof(struct iwl_tx_cmd) +
904 sizeof(struct iwl_cmd_header) + hdr_len;
905
906 len_org = len;
907 len = (len + 3) & ~3;
908
909 if (len_org != len)
910 len_org = 1;
911 else
912 len_org = 0;
913
914 /* Physical address of this Tx command's header (not MAC header!),
915 * within command buffer array. */
916 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
917 offsetof(struct iwl_cmd, hdr);
918
919 /* Add buffer containing Tx command and MAC(!) header to TFD's
920 * first entry */
921 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
922
923 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT))
924 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
925
926 /* Set up TFD's 2nd entry to point directly to remainder of skb,
927 * if any (802.11 null frames have no payload). */
928 len = skb->len - hdr_len;
929 if (len) {
930 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
931 len, PCI_DMA_TODEVICE);
932 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
933 }
934
935 /* Tell NIC about any 2-byte padding after MAC header */
936 if (len_org)
937 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
938
939 /* Total # bytes to be transmitted */
940 len = (u16)skb->len;
941 tx_cmd->len = cpu_to_le16(len);
942 /* TODO need this for burst mode later on */
943 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, unicast, sta_id);
944
945 /* set is_hcca to 0; it probably will never be implemented */
946 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
947
948 iwl_update_tx_stats(priv, fc, len);
949
950 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
951 offsetof(struct iwl_tx_cmd, scratch);
952 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
953 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys);
954
955 if (!ieee80211_get_morefrag(hdr)) {
956 txq->need_update = 1;
957 if (qc)
958 priv->stations[sta_id].tid[tid].seq_number = seq_number;
959 } else {
960 wait_write_ptr = 1;
961 txq->need_update = 0;
962 }
963
964 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
965
966 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
967
968 /* Set up entry for this TFD in Tx byte-count array */
969 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
970
971 /* Tell device the write index *just past* this latest filled TFD */
972 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
973 ret = iwl_txq_update_write_ptr(priv, txq);
974 spin_unlock_irqrestore(&priv->lock, flags);
975
976 if (ret)
977 return ret;
978
979 if ((iwl_queue_space(q) < q->high_mark)
980 && priv->mac80211_registered) {
981 if (wait_write_ptr) {
982 spin_lock_irqsave(&priv->lock, flags);
983 txq->need_update = 1;
984 iwl_txq_update_write_ptr(priv, txq);
985 spin_unlock_irqrestore(&priv->lock, flags);
986 }
987
988 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
989 }
990
991 return 0;
992
993drop_unlock:
994 spin_unlock_irqrestore(&priv->lock, flags);
995drop:
996 return -1;
997}
998EXPORT_SYMBOL(iwl_tx_skb);
999
1000/*************** HOST COMMAND QUEUE FUNCTIONS *****/
1001
1002/**
1003 * iwl_enqueue_hcmd - enqueue a uCode command
1004 * @priv: device private data point
1005 * @cmd: a point to the ucode command structure
1006 *
1007 * The function returns < 0 values to indicate the operation is
1008 * failed. On success, it turns the index (> 0) of command in the
1009 * command queue.
1010 */
1011int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1012{
1013 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1014 struct iwl_queue *q = &txq->q;
1015 struct iwl_tfd_frame *tfd;
1016 u32 *control_flags;
1017 struct iwl_cmd *out_cmd;
1018 u32 idx;
1019 u16 fix_size;
1020 dma_addr_t phys_addr;
1021 int ret;
1022 unsigned long flags;
1023
1024 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
1025 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
1026
1027 /* If any of the command structures end up being larger than
1028 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
1029 * we will need to increase the size of the TFD entries */
1030 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
1031 !(cmd->meta.flags & CMD_SIZE_HUGE));
1032
1033 if (iwl_is_rfkill(priv)) {
1034 IWL_DEBUG_INFO("Not sending command - RF KILL");
1035 return -EIO;
1036 }
1037
1038 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
1039 IWL_ERROR("No space for Tx\n");
1040 return -ENOSPC;
1041 }
1042
1043 spin_lock_irqsave(&priv->hcmd_lock, flags);
1044
1045 tfd = &txq->bd[q->write_ptr];
1046 memset(tfd, 0, sizeof(*tfd));
1047
1048 control_flags = (u32 *) tfd;
1049
1050 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
1051 out_cmd = &txq->cmd[idx];
1052
1053 out_cmd->hdr.cmd = cmd->id;
1054 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
1055 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
1056
1057 /* At this point, the out_cmd now has all of the incoming cmd
1058 * information */
1059
1060 out_cmd->hdr.flags = 0;
1061 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
1062 INDEX_TO_SEQ(q->write_ptr));
1063 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
1064 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
1065
1066 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
1067 offsetof(struct iwl_cmd, hdr);
1068 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
1069
1070 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
1071 "%d bytes at %d[%d]:%d\n",
1072 get_cmd_string(out_cmd->hdr.cmd),
1073 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1074 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1075
1076 txq->need_update = 1;
1077
1078 /* Set up entry in queue's byte count circular buffer */
1079 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
1080
1081 /* Increment and update queue's write index */
1082 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1083 ret = iwl_txq_update_write_ptr(priv, txq);
1084
1085 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
1086 return ret ? ret : idx;
1087}
1088
1089int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1090{
1091 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1092 struct iwl_queue *q = &txq->q;
1093 struct iwl_tx_info *tx_info;
1094 int nfreed = 0;
1095
1096 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1097 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1098 "is out of range [0-%d] %d %d.\n", txq_id,
1099 index, q->n_bd, q->write_ptr, q->read_ptr);
1100 return 0;
1101 }
1102
1103 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
1104 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1105
1106 tx_info = &txq->txb[txq->q.read_ptr];
1107 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
1108 tx_info->skb[0] = NULL;
1109
1110 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1111 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1112
1113 iwl_hw_txq_free_tfd(priv, txq);
1114 nfreed++;
1115 }
1116 return nfreed;
1117}
1118EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1119
1120
1121/**
1122 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1123 *
1124 * When FW advances 'R' index, all entries between old and new 'R' index
1125 * need to be reclaimed. As result, some free space forms. If there is
1126 * enough free space (> low mark), wake the stack that feeds us.
1127 */
1128static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1129{
1130 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1131 struct iwl_queue *q = &txq->q;
1132 int nfreed = 0;
1133
1134 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1135 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1136 "is out of range [0-%d] %d %d.\n", txq_id,
1137 index, q->n_bd, q->write_ptr, q->read_ptr);
1138 return;
1139 }
1140
1141 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
1142 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1143
1144 if (nfreed > 1) {
1145 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
1146 q->write_ptr, q->read_ptr);
1147 queue_work(priv->workqueue, &priv->restart);
1148 }
1149 nfreed++;
1150 }
1151}
1152
1153/**
1154 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
1155 * @rxb: Rx buffer to reclaim
1156 *
1157 * If an Rx buffer has an async callback associated with it the callback
1158 * will be executed. The attached skb (if present) will only be freed
1159 * if the callback returns 1
1160 */
1161void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1162{
1163 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1164 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1165 int txq_id = SEQ_TO_QUEUE(sequence);
1166 int index = SEQ_TO_INDEX(sequence);
1167 int huge = sequence & SEQ_HUGE_FRAME;
1168 int cmd_index;
1169 struct iwl_cmd *cmd;
1170
1171 /* If a Tx command is being handled and it isn't in the actual
1172 * command queue then there a command routing bug has been introduced
1173 * in the queue management code. */
1174 if (txq_id != IWL_CMD_QUEUE_NUM)
1175 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
1176 txq_id, pkt->hdr.cmd);
1177 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
1178
1179 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
1180 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
1181
1182 /* Input error checking is done when commands are added to queue. */
1183 if (cmd->meta.flags & CMD_WANT_SKB) {
1184 cmd->meta.source->u.skb = rxb->skb;
1185 rxb->skb = NULL;
1186 } else if (cmd->meta.u.callback &&
1187 !cmd->meta.u.callback(priv, cmd, rxb->skb))
1188 rxb->skb = NULL;
1189
1190 iwl_hcmd_queue_reclaim(priv, txq_id, index);
1191
1192 if (!(cmd->meta.flags & CMD_ASYNC)) {
1193 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1194 wake_up_interruptible(&priv->wait_command_queue);
1195 }
1196}
1197EXPORT_SYMBOL(iwl_tx_cmd_complete);
1198
1199
1200#ifdef CONFIG_IWL4965_HT
1201/*
1202 * Find first available (lowest unused) Tx Queue, mark it "active".
1203 * Called only when finding queue for aggregation.
1204 * Should never return anything < 7, because they should already
1205 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1206 */
1207static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1208{
1209 int txq_id;
1210
1211 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1212 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1213 return txq_id;
1214 return -1;
1215}
1216
1217int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1218{
1219 int sta_id;
1220 int tx_fifo;
1221 int txq_id;
1222 int ret;
1223 unsigned long flags;
1224 struct iwl_tid_data *tid_data;
1225 DECLARE_MAC_BUF(mac);
1226
1227 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1228 tx_fifo = default_tid_to_tx_fifo[tid];
1229 else
1230 return -EINVAL;
1231
1232 IWL_WARNING("%s on ra = %s tid = %d\n",
1233 __func__, print_mac(mac, ra), tid);
1234
1235 sta_id = iwl_find_station(priv, ra);
1236 if (sta_id == IWL_INVALID_STATION)
1237 return -ENXIO;
1238
1239 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1240 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
1241 return -ENXIO;
1242 }
1243
1244 txq_id = iwl_txq_ctx_activate_free(priv);
1245 if (txq_id == -1)
1246 return -ENXIO;
1247
1248 spin_lock_irqsave(&priv->sta_lock, flags);
1249 tid_data = &priv->stations[sta_id].tid[tid];
1250 *ssn = SEQ_TO_SN(tid_data->seq_number);
1251 tid_data->agg.txq_id = txq_id;
1252 spin_unlock_irqrestore(&priv->sta_lock, flags);
1253
1254 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1255 sta_id, tid, *ssn);
1256 if (ret)
1257 return ret;
1258
1259 if (tid_data->tfds_in_queue == 0) {
1260 printk(KERN_ERR "HW queue is empty\n");
1261 tid_data->agg.state = IWL_AGG_ON;
1262 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1263 } else {
1264 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
1265 tid_data->tfds_in_queue);
1266 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1267 }
1268 return ret;
1269}
1270EXPORT_SYMBOL(iwl_tx_agg_start);
1271
1272int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1273{
1274 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1275 struct iwl_tid_data *tid_data;
1276 int ret, write_ptr, read_ptr;
1277 unsigned long flags;
1278 DECLARE_MAC_BUF(mac);
1279
1280 if (!ra) {
1281 IWL_ERROR("ra = NULL\n");
1282 return -EINVAL;
1283 }
1284
1285 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1286 tx_fifo_id = default_tid_to_tx_fifo[tid];
1287 else
1288 return -EINVAL;
1289
1290 sta_id = iwl_find_station(priv, ra);
1291
1292 if (sta_id == IWL_INVALID_STATION)
1293 return -ENXIO;
1294
1295 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1296 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
1297
1298 tid_data = &priv->stations[sta_id].tid[tid];
1299 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1300 txq_id = tid_data->agg.txq_id;
1301 write_ptr = priv->txq[txq_id].q.write_ptr;
1302 read_ptr = priv->txq[txq_id].q.read_ptr;
1303
1304 /* The queue is not empty */
1305 if (write_ptr != read_ptr) {
1306 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
1307 priv->stations[sta_id].tid[tid].agg.state =
1308 IWL_EMPTYING_HW_QUEUE_DELBA;
1309 return 0;
1310 }
1311
1312 IWL_DEBUG_HT("HW queue is empty\n");
1313 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1314
1315 spin_lock_irqsave(&priv->lock, flags);
1316 ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1317 tx_fifo_id);
1318 spin_unlock_irqrestore(&priv->lock, flags);
1319
1320 if (ret)
1321 return ret;
1322
1323 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1324
1325 return 0;
1326}
1327EXPORT_SYMBOL(iwl_tx_agg_stop);
1328
1329int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1330{
1331 struct iwl_queue *q = &priv->txq[txq_id].q;
1332 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1333 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1334
1335 switch (priv->stations[sta_id].tid[tid].agg.state) {
1336 case IWL_EMPTYING_HW_QUEUE_DELBA:
1337 /* We are reclaiming the last packet of the */
1338 /* aggregated HW queue */
1339 if (txq_id == tid_data->agg.txq_id &&
1340 q->read_ptr == q->write_ptr) {
1341 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1342 int tx_fifo = default_tid_to_tx_fifo[tid];
1343 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
1344 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1345 ssn, tx_fifo);
1346 tid_data->agg.state = IWL_AGG_OFF;
1347 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1348 }
1349 break;
1350 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1351 /* We are reclaiming the last packet of the queue */
1352 if (tid_data->tfds_in_queue == 0) {
1353 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
1354 tid_data->agg.state = IWL_AGG_ON;
1355 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1356 }
1357 break;
1358 }
1359 return 0;
1360}
1361EXPORT_SYMBOL(iwl_txq_check_empty);
1362#endif /* CONFIG_IWL4965_HT */
1363
1364#ifdef CONFIG_IWLWIF_DEBUG
1365#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1366
1367const char *iwl_get_tx_fail_reason(u32 status)
1368{
1369 switch (status & TX_STATUS_MSK) {
1370 case TX_STATUS_SUCCESS:
1371 return "SUCCESS";
1372 TX_STATUS_ENTRY(SHORT_LIMIT);
1373 TX_STATUS_ENTRY(LONG_LIMIT);
1374 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1375 TX_STATUS_ENTRY(MGMNT_ABORT);
1376 TX_STATUS_ENTRY(NEXT_FRAG);
1377 TX_STATUS_ENTRY(LIFE_EXPIRE);
1378 TX_STATUS_ENTRY(DEST_PS);
1379 TX_STATUS_ENTRY(ABORTED);
1380 TX_STATUS_ENTRY(BT_RETRY);
1381 TX_STATUS_ENTRY(STA_INVALID);
1382 TX_STATUS_ENTRY(FRAG_DROPPED);
1383 TX_STATUS_ENTRY(TID_DISABLE);
1384 TX_STATUS_ENTRY(FRAME_FLUSHED);
1385 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1386 TX_STATUS_ENTRY(TX_LOCKED);
1387 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1388 }
1389
1390 return "UNKNOWN";
1391}
1392EXPORT_SYMBOL(iwl_get_tx_fail_reason);
1393#endif /* CONFIG_IWLWIFI_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 13925b627e3b..72279e07fe32 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -102,16 +102,6 @@ MODULE_VERSION(DRV_VERSION);
102MODULE_AUTHOR(DRV_COPYRIGHT); 102MODULE_AUTHOR(DRV_COPYRIGHT);
103MODULE_LICENSE("GPL"); 103MODULE_LICENSE("GPL");
104 104
105static __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
106{
107 u16 fc = le16_to_cpu(hdr->frame_control);
108 int hdr_len = ieee80211_get_hdrlen(fc);
109
110 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
111 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
112 return NULL;
113}
114
115static const struct ieee80211_supported_band *iwl3945_get_band( 105static const struct ieee80211_supported_band *iwl3945_get_band(
116 struct iwl3945_priv *priv, enum ieee80211_band band) 106 struct iwl3945_priv *priv, enum ieee80211_band band)
117{ 107{
@@ -2386,12 +2376,13 @@ static int iwl3945_set_mode(struct iwl3945_priv *priv, int mode)
2386} 2376}
2387 2377
2388static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv, 2378static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv,
2389 struct ieee80211_tx_control *ctl, 2379 struct ieee80211_tx_info *info,
2390 struct iwl3945_cmd *cmd, 2380 struct iwl3945_cmd *cmd,
2391 struct sk_buff *skb_frag, 2381 struct sk_buff *skb_frag,
2392 int last_frag) 2382 int last_frag)
2393{ 2383{
2394 struct iwl3945_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo; 2384 struct iwl3945_hw_key *keyinfo =
2385 &priv->stations[info->control.hw_key->hw_key_idx].keyinfo;
2395 2386
2396 switch (keyinfo->alg) { 2387 switch (keyinfo->alg) {
2397 case ALG_CCMP: 2388 case ALG_CCMP:
@@ -2414,7 +2405,7 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv,
2414 2405
2415 case ALG_WEP: 2406 case ALG_WEP:
2416 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP | 2407 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2417 (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; 2408 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2418 2409
2419 if (keyinfo->keylen == 13) 2410 if (keyinfo->keylen == 13)
2420 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128; 2411 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
@@ -2422,7 +2413,7 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv,
2422 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen); 2413 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2423 2414
2424 IWL_DEBUG_TX("Configuring packet for WEP encryption " 2415 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2425 "with key %d\n", ctl->key_idx); 2416 "with key %d\n", info->control.hw_key->hw_key_idx);
2426 break; 2417 break;
2427 2418
2428 default: 2419 default:
@@ -2436,16 +2427,15 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv,
2436 */ 2427 */
2437static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv, 2428static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv,
2438 struct iwl3945_cmd *cmd, 2429 struct iwl3945_cmd *cmd,
2439 struct ieee80211_tx_control *ctrl, 2430 struct ieee80211_tx_info *info,
2440 struct ieee80211_hdr *hdr, 2431 struct ieee80211_hdr *hdr,
2441 int is_unicast, u8 std_id) 2432 int is_unicast, u8 std_id)
2442{ 2433{
2443 __le16 *qc;
2444 u16 fc = le16_to_cpu(hdr->frame_control); 2434 u16 fc = le16_to_cpu(hdr->frame_control);
2445 __le32 tx_flags = cmd->cmd.tx.tx_flags; 2435 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2446 2436
2447 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 2437 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2448 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) { 2438 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
2449 tx_flags |= TX_CMD_FLG_ACK_MSK; 2439 tx_flags |= TX_CMD_FLG_ACK_MSK;
2450 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) 2440 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2451 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 2441 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
@@ -2461,17 +2451,18 @@ static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv,
2461 if (ieee80211_get_morefrag(hdr)) 2451 if (ieee80211_get_morefrag(hdr))
2462 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; 2452 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2463 2453
2464 qc = ieee80211_get_qos_ctrl(hdr); 2454 if (ieee80211_is_qos_data(fc)) {
2465 if (qc) { 2455 u8 *qc = ieee80211_get_qos_ctrl(hdr, ieee80211_get_hdrlen(fc));
2466 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf); 2456 cmd->cmd.tx.tid_tspec = qc[0] & 0xf;
2467 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; 2457 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2468 } else 2458 } else {
2469 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 2459 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2460 }
2470 2461
2471 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) { 2462 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
2472 tx_flags |= TX_CMD_FLG_RTS_MSK; 2463 tx_flags |= TX_CMD_FLG_RTS_MSK;
2473 tx_flags &= ~TX_CMD_FLG_CTS_MSK; 2464 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2474 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { 2465 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
2475 tx_flags &= ~TX_CMD_FLG_RTS_MSK; 2466 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2476 tx_flags |= TX_CMD_FLG_CTS_MSK; 2467 tx_flags |= TX_CMD_FLG_CTS_MSK;
2477 } 2468 }
@@ -2555,25 +2546,27 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2555/* 2546/*
2556 * start REPLY_TX command process 2547 * start REPLY_TX command process
2557 */ 2548 */
2558static int iwl3945_tx_skb(struct iwl3945_priv *priv, 2549static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2559 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2560{ 2550{
2561 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2551 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2552 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2562 struct iwl3945_tfd_frame *tfd; 2553 struct iwl3945_tfd_frame *tfd;
2563 u32 *control_flags; 2554 u32 *control_flags;
2564 int txq_id = ctl->queue; 2555 int txq_id = skb_get_queue_mapping(skb);
2565 struct iwl3945_tx_queue *txq = NULL; 2556 struct iwl3945_tx_queue *txq = NULL;
2566 struct iwl3945_queue *q = NULL; 2557 struct iwl3945_queue *q = NULL;
2567 dma_addr_t phys_addr; 2558 dma_addr_t phys_addr;
2568 dma_addr_t txcmd_phys; 2559 dma_addr_t txcmd_phys;
2569 struct iwl3945_cmd *out_cmd = NULL; 2560 struct iwl3945_cmd *out_cmd = NULL;
2570 u16 len, idx, len_org; 2561 u16 len, idx, len_org, hdr_len;
2571 u8 id, hdr_len, unicast; 2562 u8 id;
2563 u8 unicast;
2572 u8 sta_id; 2564 u8 sta_id;
2565 u8 tid = 0;
2573 u16 seq_number = 0; 2566 u16 seq_number = 0;
2574 u16 fc; 2567 u16 fc;
2575 __le16 *qc;
2576 u8 wait_write_ptr = 0; 2568 u8 wait_write_ptr = 0;
2569 u8 *qc = NULL;
2577 unsigned long flags; 2570 unsigned long flags;
2578 int rc; 2571 int rc;
2579 2572
@@ -2588,7 +2581,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2588 goto drop_unlock; 2581 goto drop_unlock;
2589 } 2582 }
2590 2583
2591 if ((ctl->tx_rate->hw_value & 0xFF) == IWL_INVALID_RATE) { 2584 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
2592 IWL_ERROR("ERROR: No TX rate available.\n"); 2585 IWL_ERROR("ERROR: No TX rate available.\n");
2593 goto drop_unlock; 2586 goto drop_unlock;
2594 } 2587 }
@@ -2631,9 +2624,9 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2631 2624
2632 IWL_DEBUG_RATE("station Id %d\n", sta_id); 2625 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2633 2626
2634 qc = ieee80211_get_qos_ctrl(hdr); 2627 if (ieee80211_is_qos_data(fc)) {
2635 if (qc) { 2628 qc = ieee80211_get_qos_ctrl(hdr, hdr_len);
2636 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf); 2629 tid = qc[0] & 0xf;
2637 seq_number = priv->stations[sta_id].tid[tid].seq_number & 2630 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2638 IEEE80211_SCTL_SEQ; 2631 IEEE80211_SCTL_SEQ;
2639 hdr->seq_ctrl = cpu_to_le16(seq_number) | 2632 hdr->seq_ctrl = cpu_to_le16(seq_number) |
@@ -2657,8 +2650,6 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2657 /* Set up driver data for this TFD */ 2650 /* Set up driver data for this TFD */
2658 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl3945_tx_info)); 2651 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl3945_tx_info));
2659 txq->txb[q->write_ptr].skb[0] = skb; 2652 txq->txb[q->write_ptr].skb[0] = skb;
2660 memcpy(&(txq->txb[q->write_ptr].status.control),
2661 ctl, sizeof(struct ieee80211_tx_control));
2662 2653
2663 /* Init first empty entry in queue's array of Tx/cmd buffers */ 2654 /* Init first empty entry in queue's array of Tx/cmd buffers */
2664 out_cmd = &txq->cmd[idx]; 2655 out_cmd = &txq->cmd[idx];
@@ -2707,8 +2698,8 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2707 * first entry */ 2698 * first entry */
2708 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); 2699 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
2709 2700
2710 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) 2701 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT))
2711 iwl3945_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0); 2702 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0);
2712 2703
2713 /* Set up TFD's 2nd entry to point directly to remainder of skb, 2704 /* Set up TFD's 2nd entry to point directly to remainder of skb,
2714 * if any (802.11 null frames have no payload). */ 2705 * if any (802.11 null frames have no payload). */
@@ -2733,10 +2724,10 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2733 out_cmd->cmd.tx.len = cpu_to_le16(len); 2724 out_cmd->cmd.tx.len = cpu_to_le16(len);
2734 2725
2735 /* TODO need this for burst mode later on */ 2726 /* TODO need this for burst mode later on */
2736 iwl3945_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id); 2727 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, unicast, sta_id);
2737 2728
2738 /* set is_hcca to 0; it probably will never be implemented */ 2729 /* set is_hcca to 0; it probably will never be implemented */
2739 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0); 2730 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
2740 2731
2741 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; 2732 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
2742 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; 2733 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
@@ -2744,7 +2735,6 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2744 if (!ieee80211_get_morefrag(hdr)) { 2735 if (!ieee80211_get_morefrag(hdr)) {
2745 txq->need_update = 1; 2736 txq->need_update = 1;
2746 if (qc) { 2737 if (qc) {
2747 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2748 priv->stations[sta_id].tid[tid].seq_number = seq_number; 2738 priv->stations[sta_id].tid[tid].seq_number = seq_number;
2749 } 2739 }
2750 } else { 2740 } else {
@@ -2775,7 +2765,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2775 spin_unlock_irqrestore(&priv->lock, flags); 2765 spin_unlock_irqrestore(&priv->lock, flags);
2776 } 2766 }
2777 2767
2778 ieee80211_stop_queue(priv->hw, ctl->queue); 2768 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
2779 } 2769 }
2780 2770
2781 return 0; 2771 return 0;
@@ -3238,7 +3228,7 @@ static void iwl3945_bg_beacon_update(struct work_struct *work)
3238 struct sk_buff *beacon; 3228 struct sk_buff *beacon;
3239 3229
3240 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ 3230 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
3241 beacon = ieee80211_beacon_get(priv->hw, priv->vif, NULL); 3231 beacon = ieee80211_beacon_get(priv->hw, priv->vif);
3242 3232
3243 if (!beacon) { 3233 if (!beacon) {
3244 IWL_ERROR("update beacon failed\n"); 3234 IWL_ERROR("update beacon failed\n");
@@ -4840,7 +4830,7 @@ static int iwl3945_init_channel_map(struct iwl3945_priv *priv)
4840 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; 4830 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
4841 ch_info->min_power = 0; 4831 ch_info->min_power = 0;
4842 4832
4843 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x" 4833 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
4844 " %ddBm): Ad-Hoc %ssupported\n", 4834 " %ddBm): Ad-Hoc %ssupported\n",
4845 ch_info->channel, 4835 ch_info->channel,
4846 is_channel_a_band(ch_info) ? 4836 is_channel_a_band(ch_info) ?
@@ -4850,7 +4840,6 @@ static int iwl3945_init_channel_map(struct iwl3945_priv *priv)
4850 CHECK_AND_PRINT(ACTIVE), 4840 CHECK_AND_PRINT(ACTIVE),
4851 CHECK_AND_PRINT(RADAR), 4841 CHECK_AND_PRINT(RADAR),
4852 CHECK_AND_PRINT(WIDE), 4842 CHECK_AND_PRINT(WIDE),
4853 CHECK_AND_PRINT(NARROW),
4854 CHECK_AND_PRINT(DFS), 4843 CHECK_AND_PRINT(DFS),
4855 eeprom_ch_info[ch].flags, 4844 eeprom_ch_info[ch].flags,
4856 eeprom_ch_info[ch].max_power_avg, 4845 eeprom_ch_info[ch].max_power_avg,
@@ -4986,9 +4975,6 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4986 if (scan_ch->type & 1) 4975 if (scan_ch->type & 1)
4987 scan_ch->type |= (direct_mask << 1); 4976 scan_ch->type |= (direct_mask << 1);
4988 4977
4989 if (is_channel_narrow(ch_info))
4990 scan_ch->type |= (1 << 7);
4991
4992 scan_ch->active_dwell = cpu_to_le16(active_dwell); 4978 scan_ch->active_dwell = cpu_to_le16(active_dwell);
4993 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 4979 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
4994 4980
@@ -5835,7 +5821,7 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5835 if (iwl3945_is_rfkill(priv)) 5821 if (iwl3945_is_rfkill(priv))
5836 return; 5822 return;
5837 5823
5838 ieee80211_start_queues(priv->hw); 5824 ieee80211_wake_queues(priv->hw);
5839 5825
5840 priv->active_rate = priv->rates_mask; 5826 priv->active_rate = priv->rates_mask;
5841 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; 5827 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
@@ -5861,9 +5847,6 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5861 /* Configure the adapter for unassociated operation */ 5847 /* Configure the adapter for unassociated operation */
5862 iwl3945_commit_rxon(priv); 5848 iwl3945_commit_rxon(priv);
5863 5849
5864 /* At this point, the NIC is initialized and operational */
5865 priv->notif_missed_beacons = 0;
5866
5867 iwl3945_reg_txpower_periodic(priv); 5850 iwl3945_reg_txpower_periodic(priv);
5868 5851
5869 iwl3945_led_register(priv); 5852 iwl3945_led_register(priv);
@@ -6147,6 +6130,24 @@ static void iwl3945_bg_rf_kill(struct work_struct *work)
6147 mutex_unlock(&priv->mutex); 6130 mutex_unlock(&priv->mutex);
6148} 6131}
6149 6132
6133static void iwl3945_bg_set_monitor(struct work_struct *work)
6134{
6135 struct iwl3945_priv *priv = container_of(work,
6136 struct iwl3945_priv, set_monitor);
6137
6138 IWL_DEBUG(IWL_DL_STATE, "setting monitor mode\n");
6139
6140 mutex_lock(&priv->mutex);
6141
6142 if (!iwl3945_is_ready(priv))
6143 IWL_DEBUG(IWL_DL_STATE, "leave - not ready\n");
6144 else
6145 if (iwl3945_set_mode(priv, IEEE80211_IF_TYPE_MNTR) != 0)
6146 IWL_ERROR("iwl3945_set_mode() failed\n");
6147
6148 mutex_unlock(&priv->mutex);
6149}
6150
6150#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) 6151#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
6151 6152
6152static void iwl3945_bg_scan_check(struct work_struct *data) 6153static void iwl3945_bg_scan_check(struct work_struct *data)
@@ -6675,8 +6676,7 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
6675 IWL_DEBUG_MAC80211("leave\n"); 6676 IWL_DEBUG_MAC80211("leave\n");
6676} 6677}
6677 6678
6678static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 6679static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
6679 struct ieee80211_tx_control *ctl)
6680{ 6680{
6681 struct iwl3945_priv *priv = hw->priv; 6681 struct iwl3945_priv *priv = hw->priv;
6682 6682
@@ -6688,9 +6688,9 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
6688 } 6688 }
6689 6689
6690 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 6690 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
6691 ctl->tx_rate->bitrate); 6691 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
6692 6692
6693 if (iwl3945_tx_skb(priv, skb, ctl)) 6693 if (iwl3945_tx_skb(priv, skb))
6694 dev_kfree_skb_any(skb); 6694 dev_kfree_skb_any(skb);
6695 6695
6696 IWL_DEBUG_MAC80211("leave\n"); 6696 IWL_DEBUG_MAC80211("leave\n");
@@ -6999,7 +6999,22 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
6999 * XXX: dummy 6999 * XXX: dummy
7000 * see also iwl3945_connection_init_rx_config 7000 * see also iwl3945_connection_init_rx_config
7001 */ 7001 */
7002 *total_flags = 0; 7002 struct iwl3945_priv *priv = hw->priv;
7003 int new_flags = 0;
7004 if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
7005 if (*total_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
7006 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n",
7007 IEEE80211_IF_TYPE_MNTR,
7008 changed_flags, *total_flags);
7009 /* queue work 'cuz mac80211 is holding a lock which
7010 * prevents us from issuing (synchronous) f/w cmds */
7011 queue_work(priv->workqueue, &priv->set_monitor);
7012 new_flags &= FIF_PROMISC_IN_BSS |
7013 FIF_OTHER_BSS |
7014 FIF_ALLMULTI;
7015 }
7016 }
7017 *total_flags = new_flags;
7003} 7018}
7004 7019
7005static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw, 7020static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
@@ -7057,9 +7072,10 @@ static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
7057 rc = -EAGAIN; 7072 rc = -EAGAIN;
7058 goto out_unlock; 7073 goto out_unlock;
7059 } 7074 }
7060 /* if we just finished scan ask for delay */ 7075 /* if we just finished scan ask for delay for a broadcast scan */
7061 if (priv->last_scan_jiffies && time_after(priv->last_scan_jiffies + 7076 if ((len == 0) && priv->last_scan_jiffies &&
7062 IWL_DELAY_NEXT_SCAN, jiffies)) { 7077 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN,
7078 jiffies)) {
7063 rc = -EAGAIN; 7079 rc = -EAGAIN;
7064 goto out_unlock; 7080 goto out_unlock;
7065 } 7081 }
@@ -7146,7 +7162,7 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
7146 return rc; 7162 return rc;
7147} 7163}
7148 7164
7149static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, int queue, 7165static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
7150 const struct ieee80211_tx_queue_params *params) 7166 const struct ieee80211_tx_queue_params *params)
7151{ 7167{
7152 struct iwl3945_priv *priv = hw->priv; 7168 struct iwl3945_priv *priv = hw->priv;
@@ -7220,9 +7236,9 @@ static int iwl3945_mac_get_tx_stats(struct ieee80211_hw *hw,
7220 q = &txq->q; 7236 q = &txq->q;
7221 avail = iwl3945_queue_space(q); 7237 avail = iwl3945_queue_space(q);
7222 7238
7223 stats->data[i].len = q->n_window - avail; 7239 stats[i].len = q->n_window - avail;
7224 stats->data[i].limit = q->n_window - q->high_mark; 7240 stats[i].limit = q->n_window - q->high_mark;
7225 stats->data[i].count = q->n_window; 7241 stats[i].count = q->n_window;
7226 7242
7227 } 7243 }
7228 spin_unlock_irqrestore(&priv->lock, flags); 7244 spin_unlock_irqrestore(&priv->lock, flags);
@@ -7311,8 +7327,7 @@ static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7311 7327
7312} 7328}
7313 7329
7314static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 7330static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
7315 struct ieee80211_tx_control *control)
7316{ 7331{
7317 struct iwl3945_priv *priv = hw->priv; 7332 struct iwl3945_priv *priv = hw->priv;
7318 unsigned long flags; 7333 unsigned long flags;
@@ -7875,6 +7890,7 @@ static void iwl3945_setup_deferred_work(struct iwl3945_priv *priv)
7875 INIT_WORK(&priv->abort_scan, iwl3945_bg_abort_scan); 7890 INIT_WORK(&priv->abort_scan, iwl3945_bg_abort_scan);
7876 INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill); 7891 INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill);
7877 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); 7892 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
7893 INIT_WORK(&priv->set_monitor, iwl3945_bg_set_monitor);
7878 INIT_DELAYED_WORK(&priv->post_associate, iwl3945_bg_post_associate); 7894 INIT_DELAYED_WORK(&priv->post_associate, iwl3945_bg_post_associate);
7879 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 7895 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
7880 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 7896 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
@@ -7997,17 +8013,10 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7997 8013
7998 priv->ibss_beacon = NULL; 8014 priv->ibss_beacon = NULL;
7999 8015
8000 /* Tell mac80211 and its clients (e.g. Wireless Extensions) 8016 /* Tell mac80211 our characteristics */
8001 * the range of signal quality values that we'll provide. 8017 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
8002 * Negative values for level/noise indicate that we'll provide dBm. 8018 IEEE80211_HW_SIGNAL_DBM |
8003 * For WE, at least, non-0 values here *enable* display of values 8019 IEEE80211_HW_NOISE_DBM;
8004 * in app (iwconfig). */
8005 hw->max_rssi = -20; /* signal level, negative indicates dBm */
8006 hw->max_noise = -20; /* noise level, negative indicates dBm */
8007 hw->max_signal = 100; /* link quality indication (%) */
8008
8009 /* Tell mac80211 our Tx characteristics */
8010 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
8011 8020
8012 /* 4 EDCA QOS priorities */ 8021 /* 4 EDCA QOS priorities */
8013 hw->queues = 4; 8022 hw->queues = 4;
@@ -8248,7 +8257,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
8248 8257
8249 iwl3945_free_channel_map(priv); 8258 iwl3945_free_channel_map(priv);
8250 iwl3945_free_geos(priv); 8259 iwl3945_free_geos(priv);
8251 8260 kfree(priv->scan);
8252 if (priv->ibss_beacon) 8261 if (priv->ibss_beacon)
8253 dev_kfree_skb(priv->ibss_beacon); 8262 dev_kfree_skb(priv->ibss_beacon);
8254 8263
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c
index 883b42f7e998..c71daec8c746 100644
--- a/drivers/net/wireless/iwlwifi/iwl4965-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c
@@ -46,14 +46,13 @@
46#include <asm/div64.h> 46#include <asm/div64.h>
47 47
48#include "iwl-eeprom.h" 48#include "iwl-eeprom.h"
49#include "iwl-4965.h" 49#include "iwl-dev.h"
50#include "iwl-core.h" 50#include "iwl-core.h"
51#include "iwl-io.h" 51#include "iwl-io.h"
52#include "iwl-helpers.h" 52#include "iwl-helpers.h"
53#include "iwl-sta.h" 53#include "iwl-sta.h"
54#include "iwl-calib.h"
54 55
55static int iwl4965_tx_queue_update_write_ptr(struct iwl_priv *priv,
56 struct iwl4965_tx_queue *txq);
57 56
58/****************************************************************************** 57/******************************************************************************
59 * 58 *
@@ -88,22 +87,6 @@ MODULE_VERSION(DRV_VERSION);
88MODULE_AUTHOR(DRV_COPYRIGHT); 87MODULE_AUTHOR(DRV_COPYRIGHT);
89MODULE_LICENSE("GPL"); 88MODULE_LICENSE("GPL");
90 89
91__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
92{
93 u16 fc = le16_to_cpu(hdr->frame_control);
94 int hdr_len = ieee80211_get_hdrlen(fc);
95
96 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
97 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
98 return NULL;
99}
100
101static const struct ieee80211_supported_band *iwl4965_get_hw_mode(
102 struct iwl_priv *priv, enum ieee80211_band band)
103{
104 return priv->hw->wiphy->bands[band];
105}
106
107static int iwl4965_is_empty_essid(const char *essid, int essid_len) 90static int iwl4965_is_empty_essid(const char *essid, int essid_len)
108{ 91{
109 /* Single white space is for Linksys APs */ 92 /* Single white space is for Linksys APs */
@@ -144,236 +127,6 @@ static const char *iwl4965_escape_essid(const char *essid, u8 essid_len)
144 return escaped; 127 return escaped;
145} 128}
146 129
147/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
148 * DMA services
149 *
150 * Theory of operation
151 *
152 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
153 * of buffer descriptors, each of which points to one or more data buffers for
154 * the device to read from or fill. Driver and device exchange status of each
155 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
156 * entries in each circular buffer, to protect against confusing empty and full
157 * queue states.
158 *
159 * The device reads or writes the data in the queues via the device's several
160 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
161 *
162 * For Tx queue, there are low mark and high mark limits. If, after queuing
163 * the packet for Tx, free space become < low mark, Tx queue stopped. When
164 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
165 * Tx queue resumed.
166 *
167 * The 4965 operates with up to 17 queues: One receive queue, one transmit
168 * queue (#4) for sending commands to the device firmware, and 15 other
169 * Tx queues that may be mapped to prioritized Tx DMA/FIFO channels.
170 *
171 * See more detailed info in iwl-4965-hw.h.
172 ***************************************************/
173
174int iwl4965_queue_space(const struct iwl4965_queue *q)
175{
176 int s = q->read_ptr - q->write_ptr;
177
178 if (q->read_ptr > q->write_ptr)
179 s -= q->n_bd;
180
181 if (s <= 0)
182 s += q->n_window;
183 /* keep some reserve to not confuse empty and full situations */
184 s -= 2;
185 if (s < 0)
186 s = 0;
187 return s;
188}
189
190
191static inline int x2_queue_used(const struct iwl4965_queue *q, int i)
192{
193 return q->write_ptr > q->read_ptr ?
194 (i >= q->read_ptr && i < q->write_ptr) :
195 !(i < q->read_ptr && i >= q->write_ptr);
196}
197
198static inline u8 get_cmd_index(struct iwl4965_queue *q, u32 index, int is_huge)
199{
200 /* This is for scan command, the big buffer at end of command array */
201 if (is_huge)
202 return q->n_window; /* must be power of 2 */
203
204 /* Otherwise, use normal size buffers */
205 return index & (q->n_window - 1);
206}
207
208/**
209 * iwl4965_queue_init - Initialize queue's high/low-water and read/write indexes
210 */
211static int iwl4965_queue_init(struct iwl_priv *priv, struct iwl4965_queue *q,
212 int count, int slots_num, u32 id)
213{
214 q->n_bd = count;
215 q->n_window = slots_num;
216 q->id = id;
217
218 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
219 * and iwl_queue_dec_wrap are broken. */
220 BUG_ON(!is_power_of_2(count));
221
222 /* slots_num must be power-of-two size, otherwise
223 * get_cmd_index is broken. */
224 BUG_ON(!is_power_of_2(slots_num));
225
226 q->low_mark = q->n_window / 4;
227 if (q->low_mark < 4)
228 q->low_mark = 4;
229
230 q->high_mark = q->n_window / 8;
231 if (q->high_mark < 2)
232 q->high_mark = 2;
233
234 q->write_ptr = q->read_ptr = 0;
235
236 return 0;
237}
238
239/**
240 * iwl4965_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
241 */
242static int iwl4965_tx_queue_alloc(struct iwl_priv *priv,
243 struct iwl4965_tx_queue *txq, u32 id)
244{
245 struct pci_dev *dev = priv->pci_dev;
246
247 /* Driver private data, only for Tx (not command) queues,
248 * not shared with device. */
249 if (id != IWL_CMD_QUEUE_NUM) {
250 txq->txb = kmalloc(sizeof(txq->txb[0]) *
251 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
252 if (!txq->txb) {
253 IWL_ERROR("kmalloc for auxiliary BD "
254 "structures failed\n");
255 goto error;
256 }
257 } else
258 txq->txb = NULL;
259
260 /* Circular buffer of transmit frame descriptors (TFDs),
261 * shared with device */
262 txq->bd = pci_alloc_consistent(dev,
263 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
264 &txq->q.dma_addr);
265
266 if (!txq->bd) {
267 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
268 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
269 goto error;
270 }
271 txq->q.id = id;
272
273 return 0;
274
275 error:
276 if (txq->txb) {
277 kfree(txq->txb);
278 txq->txb = NULL;
279 }
280
281 return -ENOMEM;
282}
283
284/**
285 * iwl4965_tx_queue_init - Allocate and initialize one tx/cmd queue
286 */
287int iwl4965_tx_queue_init(struct iwl_priv *priv,
288 struct iwl4965_tx_queue *txq, int slots_num, u32 txq_id)
289{
290 struct pci_dev *dev = priv->pci_dev;
291 int len;
292 int rc = 0;
293
294 /*
295 * Alloc buffer array for commands (Tx or other types of commands).
296 * For the command queue (#4), allocate command space + one big
297 * command for scan, since scan command is very huge; the system will
298 * not have two scans at the same time, so only one is needed.
299 * For normal Tx queues (all other queues), no super-size command
300 * space is needed.
301 */
302 len = sizeof(struct iwl_cmd) * slots_num;
303 if (txq_id == IWL_CMD_QUEUE_NUM)
304 len += IWL_MAX_SCAN_SIZE;
305 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
306 if (!txq->cmd)
307 return -ENOMEM;
308
309 /* Alloc driver data array and TFD circular buffer */
310 rc = iwl4965_tx_queue_alloc(priv, txq, txq_id);
311 if (rc) {
312 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
313
314 return -ENOMEM;
315 }
316 txq->need_update = 0;
317
318 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
319 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
320 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
321
322 /* Initialize queue's high/low-water marks, and head/tail indexes */
323 iwl4965_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
324
325 /* Tell device where to find queue */
326 iwl4965_hw_tx_queue_init(priv, txq);
327
328 return 0;
329}
330
331/**
332 * iwl4965_tx_queue_free - Deallocate DMA queue.
333 * @txq: Transmit queue to deallocate.
334 *
335 * Empty queue by removing and destroying all BD's.
336 * Free all buffers.
337 * 0-fill, but do not free "txq" descriptor structure.
338 */
339void iwl4965_tx_queue_free(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
340{
341 struct iwl4965_queue *q = &txq->q;
342 struct pci_dev *dev = priv->pci_dev;
343 int len;
344
345 if (q->n_bd == 0)
346 return;
347
348 /* first, empty all BD's */
349 for (; q->write_ptr != q->read_ptr;
350 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
351 iwl4965_hw_txq_free_tfd(priv, txq);
352
353 len = sizeof(struct iwl_cmd) * q->n_window;
354 if (q->id == IWL_CMD_QUEUE_NUM)
355 len += IWL_MAX_SCAN_SIZE;
356
357 /* De-alloc array of command/tx buffers */
358 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
359
360 /* De-alloc circular buffer of TFDs */
361 if (txq->q.n_bd)
362 pci_free_consistent(dev, sizeof(struct iwl4965_tfd_frame) *
363 txq->q.n_bd, txq->bd, txq->q.dma_addr);
364
365 /* De-alloc array of per-TFD driver data */
366 if (txq->txb) {
367 kfree(txq->txb);
368 txq->txb = NULL;
369 }
370
371 /* 0-fill queue descriptor structure */
372 memset(txq, 0, sizeof(*txq));
373}
374
375const u8 iwl4965_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
376
377/*************** STATION TABLE MANAGEMENT **** 130/*************** STATION TABLE MANAGEMENT ****
378 * mac80211 should be examined to determine if sta_info is duplicating 131 * mac80211 should be examined to determine if sta_info is duplicating
379 * the functionality provided here 132 * the functionality provided here
@@ -381,213 +134,11 @@ const u8 iwl4965_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
381 134
382/**************************************************************/ 135/**************************************************************/
383 136
384#if 0 /* temporary disable till we add real remove station */
385/**
386 * iwl4965_remove_station - Remove driver's knowledge of station.
387 *
388 * NOTE: This does not remove station from device's station table.
389 */
390static u8 iwl4965_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
391{
392 int index = IWL_INVALID_STATION;
393 int i;
394 unsigned long flags;
395
396 spin_lock_irqsave(&priv->sta_lock, flags);
397
398 if (is_ap)
399 index = IWL_AP_ID;
400 else if (is_broadcast_ether_addr(addr))
401 index = priv->hw_params.bcast_sta_id;
402 else
403 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
404 if (priv->stations[i].used &&
405 !compare_ether_addr(priv->stations[i].sta.sta.addr,
406 addr)) {
407 index = i;
408 break;
409 }
410
411 if (unlikely(index == IWL_INVALID_STATION))
412 goto out;
413
414 if (priv->stations[index].used) {
415 priv->stations[index].used = 0;
416 priv->num_stations--;
417 }
418
419 BUG_ON(priv->num_stations < 0);
420
421out:
422 spin_unlock_irqrestore(&priv->sta_lock, flags);
423 return 0;
424}
425#endif
426
427/**
428 * iwl4965_add_station_flags - Add station to tables in driver and device
429 */
430u8 iwl4965_add_station_flags(struct iwl_priv *priv, const u8 *addr,
431 int is_ap, u8 flags, void *ht_data)
432{
433 int i;
434 int index = IWL_INVALID_STATION;
435 struct iwl4965_station_entry *station;
436 unsigned long flags_spin;
437 DECLARE_MAC_BUF(mac);
438
439 spin_lock_irqsave(&priv->sta_lock, flags_spin);
440 if (is_ap)
441 index = IWL_AP_ID;
442 else if (is_broadcast_ether_addr(addr))
443 index = priv->hw_params.bcast_sta_id;
444 else
445 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
446 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
447 addr)) {
448 index = i;
449 break;
450 }
451
452 if (!priv->stations[i].used &&
453 index == IWL_INVALID_STATION)
454 index = i;
455 }
456
457
458 /* These two conditions have the same outcome, but keep them separate
459 since they have different meanings */
460 if (unlikely(index == IWL_INVALID_STATION)) {
461 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
462 return index;
463 }
464
465 if (priv->stations[index].used &&
466 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
467 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
468 return index;
469 }
470 137
471 138
472 IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr));
473 station = &priv->stations[index];
474 station->used = 1;
475 priv->num_stations++;
476
477 /* Set up the REPLY_ADD_STA command to send to device */
478 memset(&station->sta, 0, sizeof(struct iwl4965_addsta_cmd));
479 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
480 station->sta.mode = 0;
481 station->sta.sta.sta_id = index;
482 station->sta.station_flags = 0;
483
484#ifdef CONFIG_IWL4965_HT
485 /* BCAST station and IBSS stations do not work in HT mode */
486 if (index != priv->hw_params.bcast_sta_id &&
487 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
488 iwl4965_set_ht_add_station(priv, index,
489 (struct ieee80211_ht_info *) ht_data);
490#endif /*CONFIG_IWL4965_HT*/
491
492 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
493
494 /* Add station to device's station table */
495 iwl4965_send_add_station(priv, &station->sta, flags);
496 return index;
497
498}
499
500
501
502/*************** HOST COMMAND QUEUE FUNCTIONS *****/
503
504/**
505 * iwl4965_enqueue_hcmd - enqueue a uCode command
506 * @priv: device private data point
507 * @cmd: a point to the ucode command structure
508 *
509 * The function returns < 0 values to indicate the operation is
510 * failed. On success, it turns the index (> 0) of command in the
511 * command queue.
512 */
513int iwl4965_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
514{
515 struct iwl4965_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
516 struct iwl4965_queue *q = &txq->q;
517 struct iwl4965_tfd_frame *tfd;
518 u32 *control_flags;
519 struct iwl_cmd *out_cmd;
520 u32 idx;
521 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
522 dma_addr_t phys_addr;
523 int ret;
524 unsigned long flags;
525
526 /* If any of the command structures end up being larger than
527 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
528 * we will need to increase the size of the TFD entries */
529 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
530 !(cmd->meta.flags & CMD_SIZE_HUGE));
531
532 if (iwl_is_rfkill(priv)) {
533 IWL_DEBUG_INFO("Not sending command - RF KILL");
534 return -EIO;
535 }
536
537 if (iwl4965_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
538 IWL_ERROR("No space for Tx\n");
539 return -ENOSPC;
540 }
541
542 spin_lock_irqsave(&priv->hcmd_lock, flags);
543
544 tfd = &txq->bd[q->write_ptr];
545 memset(tfd, 0, sizeof(*tfd));
546
547 control_flags = (u32 *) tfd;
548
549 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
550 out_cmd = &txq->cmd[idx];
551
552 out_cmd->hdr.cmd = cmd->id;
553 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
554 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
555
556 /* At this point, the out_cmd now has all of the incoming cmd
557 * information */
558
559 out_cmd->hdr.flags = 0;
560 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
561 INDEX_TO_SEQ(q->write_ptr));
562 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
563 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
564
565 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
566 offsetof(struct iwl_cmd, hdr);
567 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
568
569 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
570 "%d bytes at %d[%d]:%d\n",
571 get_cmd_string(out_cmd->hdr.cmd),
572 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
573 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
574
575 txq->need_update = 1;
576
577 /* Set up entry in queue's byte count circular buffer */
578 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
579
580 /* Increment and update queue's write index */
581 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
582 ret = iwl4965_tx_queue_update_write_ptr(priv, txq);
583
584 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
585 return ret ? ret : idx;
586}
587
588static void iwl4965_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt) 139static void iwl4965_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
589{ 140{
590 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon; 141 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
591 142
592 if (hw_decrypt) 143 if (hw_decrypt)
593 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; 144 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
@@ -597,45 +148,13 @@ static void iwl4965_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
597} 148}
598 149
599/** 150/**
600 * iwl4965_rxon_add_station - add station into station table.
601 *
602 * there is only one AP station with id= IWL_AP_ID
603 * NOTE: mutex must be held before calling this fnction
604 */
605static int iwl4965_rxon_add_station(struct iwl_priv *priv,
606 const u8 *addr, int is_ap)
607{
608 u8 sta_id;
609
610 /* Add station to device's station table */
611#ifdef CONFIG_IWL4965_HT
612 struct ieee80211_conf *conf = &priv->hw->conf;
613 struct ieee80211_ht_info *cur_ht_config = &conf->ht_conf;
614
615 if ((is_ap) &&
616 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
617 (priv->iw_mode == IEEE80211_IF_TYPE_STA))
618 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
619 0, cur_ht_config);
620 else
621#endif /* CONFIG_IWL4965_HT */
622 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
623 0, NULL);
624
625 /* Set up default rate scaling table in device's station table */
626 iwl4965_add_station(priv, addr, is_ap);
627
628 return sta_id;
629}
630
631/**
632 * iwl4965_check_rxon_cmd - validate RXON structure is valid 151 * iwl4965_check_rxon_cmd - validate RXON structure is valid
633 * 152 *
634 * NOTE: This is really only useful during development and can eventually 153 * NOTE: This is really only useful during development and can eventually
635 * be #ifdef'd out once the driver is stable and folks aren't actively 154 * be #ifdef'd out once the driver is stable and folks aren't actively
636 * making changes 155 * making changes
637 */ 156 */
638static int iwl4965_check_rxon_cmd(struct iwl4965_rxon_cmd *rxon) 157static int iwl4965_check_rxon_cmd(struct iwl_rxon_cmd *rxon)
639{ 158{
640 int error = 0; 159 int error = 0;
641 int counter = 1; 160 int counter = 1;
@@ -760,7 +279,7 @@ static int iwl4965_full_rxon_required(struct iwl_priv *priv)
760static int iwl4965_commit_rxon(struct iwl_priv *priv) 279static int iwl4965_commit_rxon(struct iwl_priv *priv)
761{ 280{
762 /* cast away the const for active_rxon in this function */ 281 /* cast away the const for active_rxon in this function */
763 struct iwl4965_rxon_cmd *active_rxon = (void *)&priv->active_rxon; 282 struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
764 DECLARE_MAC_BUF(mac); 283 DECLARE_MAC_BUF(mac);
765 int rc = 0; 284 int rc = 0;
766 285
@@ -795,14 +314,6 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
795 /* station table will be cleared */ 314 /* station table will be cleared */
796 priv->assoc_station_added = 0; 315 priv->assoc_station_added = 0;
797 316
798#ifdef CONFIG_IWL4965_SENSITIVITY
799 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
800 if (!priv->error_recovering)
801 priv->start_calib = 0;
802
803 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
804#endif /* CONFIG_IWL4965_SENSITIVITY */
805
806 /* If we are currently associated and the new config requires 317 /* If we are currently associated and the new config requires
807 * an RXON_ASSOC and the new config wants the associated mask enabled, 318 * an RXON_ASSOC and the new config wants the associated mask enabled,
808 * we must clear the associated from the active configuration 319 * we must clear the associated from the active configuration
@@ -813,7 +324,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
813 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 324 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
814 325
815 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 326 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
816 sizeof(struct iwl4965_rxon_cmd), 327 sizeof(struct iwl_rxon_cmd),
817 &priv->active_rxon); 328 &priv->active_rxon);
818 329
819 /* If the mask clearing failed then we set 330 /* If the mask clearing failed then we set
@@ -835,24 +346,22 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
835 le16_to_cpu(priv->staging_rxon.channel), 346 le16_to_cpu(priv->staging_rxon.channel),
836 print_mac(mac, priv->staging_rxon.bssid_addr)); 347 print_mac(mac, priv->staging_rxon.bssid_addr));
837 348
838 iwl4965_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto); 349 iwl4965_set_rxon_hwcrypto(priv, !priv->hw_params.sw_crypto);
839 /* Apply the new configuration */ 350 /* Apply the new configuration */
840 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 351 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
841 sizeof(struct iwl4965_rxon_cmd), &priv->staging_rxon); 352 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
842 if (rc) { 353 if (rc) {
843 IWL_ERROR("Error setting new configuration (%d).\n", rc); 354 IWL_ERROR("Error setting new configuration (%d).\n", rc);
844 return rc; 355 return rc;
845 } 356 }
846 357
358 iwl_remove_station(priv, iwl_bcast_addr, 0);
847 iwlcore_clear_stations_table(priv); 359 iwlcore_clear_stations_table(priv);
848 360
849#ifdef CONFIG_IWL4965_SENSITIVITY
850 if (!priv->error_recovering) 361 if (!priv->error_recovering)
851 priv->start_calib = 0; 362 priv->start_calib = 0;
852 363
853 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT; 364 iwl_init_sensitivity(priv);
854 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
855#endif /* CONFIG_IWL4965_SENSITIVITY */
856 365
857 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 366 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
858 367
@@ -865,7 +374,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
865 } 374 }
866 375
867 /* Add the broadcast address so we can send broadcast frames */ 376 /* Add the broadcast address so we can send broadcast frames */
868 if (iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0) == 377 if (iwl_rxon_add_station(priv, iwl_bcast_addr, 0) ==
869 IWL_INVALID_STATION) { 378 IWL_INVALID_STATION) {
870 IWL_ERROR("Error adding BROADCAST address for transmit.\n"); 379 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
871 return -EIO; 380 return -EIO;
@@ -875,7 +384,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
875 * add the IWL_AP_ID to the station rate table */ 384 * add the IWL_AP_ID to the station rate table */
876 if (iwl_is_associated(priv) && 385 if (iwl_is_associated(priv) &&
877 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) { 386 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
878 if (iwl4965_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1) 387 if (iwl_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1)
879 == IWL_INVALID_STATION) { 388 == IWL_INVALID_STATION) {
880 IWL_ERROR("Error adding AP address for transmit.\n"); 389 IWL_ERROR("Error adding AP address for transmit.\n");
881 return -EIO; 390 return -EIO;
@@ -889,6 +398,13 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
889 return 0; 398 return 0;
890} 399}
891 400
401void iwl4965_update_chain_flags(struct iwl_priv *priv)
402{
403
404 iwl_set_rxon_chain(priv);
405 iwl4965_commit_rxon(priv);
406}
407
892static int iwl4965_send_bt_config(struct iwl_priv *priv) 408static int iwl4965_send_bt_config(struct iwl_priv *priv)
893{ 409{
894 struct iwl4965_bt_cmd bt_cmd = { 410 struct iwl4965_bt_cmd bt_cmd = {
@@ -905,8 +421,8 @@ static int iwl4965_send_bt_config(struct iwl_priv *priv)
905 421
906static int iwl4965_send_scan_abort(struct iwl_priv *priv) 422static int iwl4965_send_scan_abort(struct iwl_priv *priv)
907{ 423{
908 int rc = 0; 424 int ret = 0;
909 struct iwl4965_rx_packet *res; 425 struct iwl_rx_packet *res;
910 struct iwl_host_cmd cmd = { 426 struct iwl_host_cmd cmd = {
911 .id = REPLY_SCAN_ABORT_CMD, 427 .id = REPLY_SCAN_ABORT_CMD,
912 .meta.flags = CMD_WANT_SKB, 428 .meta.flags = CMD_WANT_SKB,
@@ -920,13 +436,13 @@ static int iwl4965_send_scan_abort(struct iwl_priv *priv)
920 return 0; 436 return 0;
921 } 437 }
922 438
923 rc = iwl_send_cmd_sync(priv, &cmd); 439 ret = iwl_send_cmd_sync(priv, &cmd);
924 if (rc) { 440 if (ret) {
925 clear_bit(STATUS_SCAN_ABORTING, &priv->status); 441 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
926 return rc; 442 return ret;
927 } 443 }
928 444
929 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data; 445 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
930 if (res->u.status != CAN_ABORT_STATUS) { 446 if (res->u.status != CAN_ABORT_STATUS) {
931 /* The scan abort will return 1 for success or 447 /* The scan abort will return 1 for success or
932 * 2 for "failure". A failure condition can be 448 * 2 for "failure". A failure condition can be
@@ -941,14 +457,7 @@ static int iwl4965_send_scan_abort(struct iwl_priv *priv)
941 457
942 dev_kfree_skb_any(cmd.meta.u.skb); 458 dev_kfree_skb_any(cmd.meta.u.skb);
943 459
944 return rc; 460 return ret;
945}
946
947static int iwl4965_card_state_sync_callback(struct iwl_priv *priv,
948 struct iwl_cmd *cmd,
949 struct sk_buff *skb)
950{
951 return 1;
952} 461}
953 462
954/* 463/*
@@ -970,88 +479,10 @@ static int iwl4965_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_fla
970 .meta.flags = meta_flag, 479 .meta.flags = meta_flag,
971 }; 480 };
972 481
973 if (meta_flag & CMD_ASYNC)
974 cmd.meta.u.callback = iwl4965_card_state_sync_callback;
975
976 return iwl_send_cmd(priv, &cmd); 482 return iwl_send_cmd(priv, &cmd);
977} 483}
978 484
979static int iwl4965_add_sta_sync_callback(struct iwl_priv *priv, 485static void iwl_clear_free_frames(struct iwl_priv *priv)
980 struct iwl_cmd *cmd, struct sk_buff *skb)
981{
982 struct iwl4965_rx_packet *res = NULL;
983
984 if (!skb) {
985 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
986 return 1;
987 }
988
989 res = (struct iwl4965_rx_packet *)skb->data;
990 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
991 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
992 res->hdr.flags);
993 return 1;
994 }
995
996 switch (res->u.add_sta.status) {
997 case ADD_STA_SUCCESS_MSK:
998 break;
999 default:
1000 break;
1001 }
1002
1003 /* We didn't cache the SKB; let the caller free it */
1004 return 1;
1005}
1006
1007int iwl4965_send_add_station(struct iwl_priv *priv,
1008 struct iwl4965_addsta_cmd *sta, u8 flags)
1009{
1010 struct iwl4965_rx_packet *res = NULL;
1011 int rc = 0;
1012 struct iwl_host_cmd cmd = {
1013 .id = REPLY_ADD_STA,
1014 .len = sizeof(struct iwl4965_addsta_cmd),
1015 .meta.flags = flags,
1016 .data = sta,
1017 };
1018
1019 if (flags & CMD_ASYNC)
1020 cmd.meta.u.callback = iwl4965_add_sta_sync_callback;
1021 else
1022 cmd.meta.flags |= CMD_WANT_SKB;
1023
1024 rc = iwl_send_cmd(priv, &cmd);
1025
1026 if (rc || (flags & CMD_ASYNC))
1027 return rc;
1028
1029 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
1030 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1031 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1032 res->hdr.flags);
1033 rc = -EIO;
1034 }
1035
1036 if (rc == 0) {
1037 switch (res->u.add_sta.status) {
1038 case ADD_STA_SUCCESS_MSK:
1039 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1040 break;
1041 default:
1042 rc = -EIO;
1043 IWL_WARNING("REPLY_ADD_STA failed\n");
1044 break;
1045 }
1046 }
1047
1048 priv->alloc_rxb_skb--;
1049 dev_kfree_skb_any(cmd.meta.u.skb);
1050
1051 return rc;
1052}
1053
1054static void iwl4965_clear_free_frames(struct iwl_priv *priv)
1055{ 486{
1056 struct list_head *element; 487 struct list_head *element;
1057 488
@@ -1061,7 +492,7 @@ static void iwl4965_clear_free_frames(struct iwl_priv *priv)
1061 while (!list_empty(&priv->free_frames)) { 492 while (!list_empty(&priv->free_frames)) {
1062 element = priv->free_frames.next; 493 element = priv->free_frames.next;
1063 list_del(element); 494 list_del(element);
1064 kfree(list_entry(element, struct iwl4965_frame, list)); 495 kfree(list_entry(element, struct iwl_frame, list));
1065 priv->frames_count--; 496 priv->frames_count--;
1066 } 497 }
1067 498
@@ -1072,9 +503,9 @@ static void iwl4965_clear_free_frames(struct iwl_priv *priv)
1072 } 503 }
1073} 504}
1074 505
1075static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl_priv *priv) 506static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv)
1076{ 507{
1077 struct iwl4965_frame *frame; 508 struct iwl_frame *frame;
1078 struct list_head *element; 509 struct list_head *element;
1079 if (list_empty(&priv->free_frames)) { 510 if (list_empty(&priv->free_frames)) {
1080 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 511 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
@@ -1089,10 +520,10 @@ static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
1089 520
1090 element = priv->free_frames.next; 521 element = priv->free_frames.next;
1091 list_del(element); 522 list_del(element);
1092 return list_entry(element, struct iwl4965_frame, list); 523 return list_entry(element, struct iwl_frame, list);
1093} 524}
1094 525
1095static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl4965_frame *frame) 526static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
1096{ 527{
1097 memset(frame, 0, sizeof(*frame)); 528 memset(frame, 0, sizeof(*frame));
1098 list_add(&frame->list, &priv->free_frames); 529 list_add(&frame->list, &priv->free_frames);
@@ -1116,27 +547,39 @@ unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
1116 return priv->ibss_beacon->len; 547 return priv->ibss_beacon->len;
1117} 548}
1118 549
1119static u8 iwl4965_rate_get_lowest_plcp(int rate_mask) 550static u8 iwl4965_rate_get_lowest_plcp(struct iwl_priv *priv)
1120{ 551{
1121 u8 i; 552 int i;
553 int rate_mask;
1122 554
555 /* Set rate mask*/
556 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
557 rate_mask = priv->active_rate_basic & 0xF;
558 else
559 rate_mask = priv->active_rate_basic & 0xFF0;
560
561 /* Find lowest valid rate */
1123 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID; 562 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
1124 i = iwl4965_rates[i].next_ieee) { 563 i = iwl_rates[i].next_ieee) {
1125 if (rate_mask & (1 << i)) 564 if (rate_mask & (1 << i))
1126 return iwl4965_rates[i].plcp; 565 return iwl_rates[i].plcp;
1127 } 566 }
1128 567
1129 return IWL_RATE_INVALID; 568 /* No valid rate was found. Assign the lowest one */
569 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
570 return IWL_RATE_1M_PLCP;
571 else
572 return IWL_RATE_6M_PLCP;
1130} 573}
1131 574
1132static int iwl4965_send_beacon_cmd(struct iwl_priv *priv) 575static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
1133{ 576{
1134 struct iwl4965_frame *frame; 577 struct iwl_frame *frame;
1135 unsigned int frame_size; 578 unsigned int frame_size;
1136 int rc; 579 int rc;
1137 u8 rate; 580 u8 rate;
1138 581
1139 frame = iwl4965_get_free_frame(priv); 582 frame = iwl_get_free_frame(priv);
1140 583
1141 if (!frame) { 584 if (!frame) {
1142 IWL_ERROR("Could not obtain free frame buffer for beacon " 585 IWL_ERROR("Could not obtain free frame buffer for beacon "
@@ -1144,23 +587,14 @@ static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
1144 return -ENOMEM; 587 return -ENOMEM;
1145 } 588 }
1146 589
1147 if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) { 590 rate = iwl4965_rate_get_lowest_plcp(priv);
1148 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic &
1149 0xFF0);
1150 if (rate == IWL_INVALID_RATE)
1151 rate = IWL_RATE_6M_PLCP;
1152 } else {
1153 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
1154 if (rate == IWL_INVALID_RATE)
1155 rate = IWL_RATE_1M_PLCP;
1156 }
1157 591
1158 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate); 592 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate);
1159 593
1160 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, 594 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
1161 &frame->u.cmd[0]); 595 &frame->u.cmd[0]);
1162 596
1163 iwl4965_free_frame(priv, frame); 597 iwl_free_frame(priv, frame);
1164 598
1165 return rc; 599 return rc;
1166} 600}
@@ -1171,15 +605,6 @@ static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
1171 * 605 *
1172 ******************************************************************************/ 606 ******************************************************************************/
1173 607
1174static void iwl4965_unset_hw_params(struct iwl_priv *priv)
1175{
1176 if (priv->shared_virt)
1177 pci_free_consistent(priv->pci_dev,
1178 sizeof(struct iwl4965_shared),
1179 priv->shared_virt,
1180 priv->shared_phys);
1181}
1182
1183/** 608/**
1184 * iwl4965_supported_rate_to_ie - fill in the supported rate in IE field 609 * iwl4965_supported_rate_to_ie - fill in the supported rate in IE field
1185 * 610 *
@@ -1196,7 +621,7 @@ static u16 iwl4965_supported_rate_to_ie(u8 *ie, u16 supported_rate,
1196 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) { 621 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1197 if (bit & supported_rate) { 622 if (bit & supported_rate) {
1198 ret_rates |= bit; 623 ret_rates |= bit;
1199 rates[*cnt] = iwl4965_rates[i].ieee | 624 rates[*cnt] = iwl_rates[i].ieee |
1200 ((bit & basic_rate) ? 0x80 : 0x00); 625 ((bit & basic_rate) ? 0x80 : 0x00);
1201 (*cnt)++; 626 (*cnt)++;
1202 (*left)--; 627 (*left)--;
@@ -1209,6 +634,91 @@ static u16 iwl4965_supported_rate_to_ie(u8 *ie, u16 supported_rate,
1209 return ret_rates; 634 return ret_rates;
1210} 635}
1211 636
637#ifdef CONFIG_IWL4965_HT
638static void iwl4965_ht_conf(struct iwl_priv *priv,
639 struct ieee80211_bss_conf *bss_conf)
640{
641 struct ieee80211_ht_info *ht_conf = bss_conf->ht_conf;
642 struct ieee80211_ht_bss_info *ht_bss_conf = bss_conf->ht_bss_conf;
643 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
644
645 IWL_DEBUG_MAC80211("enter: \n");
646
647 iwl_conf->is_ht = bss_conf->assoc_ht;
648
649 if (!iwl_conf->is_ht)
650 return;
651
652 priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
653
654 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
655 iwl_conf->sgf |= HT_SHORT_GI_20MHZ;
656 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
657 iwl_conf->sgf |= HT_SHORT_GI_40MHZ;
658
659 iwl_conf->is_green_field = !!(ht_conf->cap & IEEE80211_HT_CAP_GRN_FLD);
660 iwl_conf->max_amsdu_size =
661 !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
662
663 iwl_conf->supported_chan_width =
664 !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH);
665 iwl_conf->extension_chan_offset =
666 ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_SEC_OFFSET;
667 /* If no above or below channel supplied disable FAT channel */
668 if (iwl_conf->extension_chan_offset != IWL_EXT_CHANNEL_OFFSET_ABOVE &&
669 iwl_conf->extension_chan_offset != IWL_EXT_CHANNEL_OFFSET_BELOW)
670 iwl_conf->supported_chan_width = 0;
671
672 iwl_conf->tx_mimo_ps_mode =
673 (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
674 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
675
676 iwl_conf->control_channel = ht_bss_conf->primary_channel;
677 iwl_conf->tx_chan_width =
678 !!(ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_WIDTH);
679 iwl_conf->ht_protection =
680 ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_HT_PROTECTION;
681 iwl_conf->non_GF_STA_present =
682 !!(ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_NON_GF_STA_PRSNT);
683
684 IWL_DEBUG_MAC80211("control channel %d\n", iwl_conf->control_channel);
685 IWL_DEBUG_MAC80211("leave\n");
686}
687
688static void iwl_ht_cap_to_ie(const struct ieee80211_supported_band *sband,
689 u8 *pos, int *left)
690{
691 struct ieee80211_ht_cap *ht_cap;
692
693 if (!sband || !sband->ht_info.ht_supported)
694 return;
695
696 if (*left < sizeof(struct ieee80211_ht_cap))
697 return;
698
699 *pos++ = sizeof(struct ieee80211_ht_cap);
700 ht_cap = (struct ieee80211_ht_cap *) pos;
701
702 ht_cap->cap_info = cpu_to_le16(sband->ht_info.cap);
703 memcpy(ht_cap->supp_mcs_set, sband->ht_info.supp_mcs_set, 16);
704 ht_cap->ampdu_params_info =
705 (sband->ht_info.ampdu_factor & IEEE80211_HT_CAP_AMPDU_FACTOR) |
706 ((sband->ht_info.ampdu_density << 2) &
707 IEEE80211_HT_CAP_AMPDU_DENSITY);
708 *left -= sizeof(struct ieee80211_ht_cap);
709}
710#else
711static inline void iwl4965_ht_conf(struct iwl_priv *priv,
712 struct ieee80211_bss_conf *bss_conf)
713{
714}
715static void iwl_ht_cap_to_ie(const struct ieee80211_supported_band *sband,
716 u8 *pos, int *left)
717{
718}
719#endif
720
721
1212/** 722/**
1213 * iwl4965_fill_probe_req - fill in all required fields and IE for probe request 723 * iwl4965_fill_probe_req - fill in all required fields and IE for probe request
1214 */ 724 */
@@ -1220,10 +730,8 @@ static u16 iwl4965_fill_probe_req(struct iwl_priv *priv,
1220 int len = 0; 730 int len = 0;
1221 u8 *pos = NULL; 731 u8 *pos = NULL;
1222 u16 active_rates, ret_rates, cck_rates, active_rate_basic; 732 u16 active_rates, ret_rates, cck_rates, active_rate_basic;
1223#ifdef CONFIG_IWL4965_HT
1224 const struct ieee80211_supported_band *sband = 733 const struct ieee80211_supported_band *sband =
1225 iwl4965_get_hw_mode(priv, band); 734 iwl_get_hw_mode(priv, band);
1226#endif /* CONFIG_IWL4965_HT */
1227 735
1228 /* Make sure there is enough space for the probe request, 736 /* Make sure there is enough space for the probe request,
1229 * two mandatory IEs and the data */ 737 * two mandatory IEs and the data */
@@ -1233,9 +741,9 @@ static u16 iwl4965_fill_probe_req(struct iwl_priv *priv,
1233 len += 24; 741 len += 24;
1234 742
1235 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 743 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1236 memcpy(frame->da, iwl4965_broadcast_addr, ETH_ALEN); 744 memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
1237 memcpy(frame->sa, priv->mac_addr, ETH_ALEN); 745 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
1238 memcpy(frame->bssid, iwl4965_broadcast_addr, ETH_ALEN); 746 memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
1239 frame->seq_ctrl = 0; 747 frame->seq_ctrl = 0;
1240 748
1241 /* fill in our indirect SSID IE */ 749 /* fill in our indirect SSID IE */
@@ -1306,24 +814,19 @@ static u16 iwl4965_fill_probe_req(struct iwl_priv *priv,
1306 if (*pos > 0) 814 if (*pos > 0)
1307 len += 2 + *pos; 815 len += 2 + *pos;
1308 816
1309#ifdef CONFIG_IWL4965_HT
1310 if (sband && sband->ht_info.ht_supported) {
1311 struct ieee80211_ht_cap *ht_cap;
1312 pos += (*pos) + 1;
1313 *pos++ = WLAN_EID_HT_CAPABILITY;
1314 *pos++ = sizeof(struct ieee80211_ht_cap);
1315 ht_cap = (struct ieee80211_ht_cap *)pos;
1316 ht_cap->cap_info = cpu_to_le16(sband->ht_info.cap);
1317 memcpy(ht_cap->supp_mcs_set, sband->ht_info.supp_mcs_set, 16);
1318 ht_cap->ampdu_params_info =(sband->ht_info.ampdu_factor &
1319 IEEE80211_HT_CAP_AMPDU_FACTOR) |
1320 ((sband->ht_info.ampdu_density << 2) &
1321 IEEE80211_HT_CAP_AMPDU_DENSITY);
1322 len += 2 + sizeof(struct ieee80211_ht_cap);
1323 }
1324#endif /*CONFIG_IWL4965_HT */
1325
1326 fill_end: 817 fill_end:
818 /* fill in HT IE */
819 left -= 2;
820 if (left < 0)
821 return 0;
822
823 *pos++ = WLAN_EID_HT_CAPABILITY;
824 *pos = 0;
825
826 iwl_ht_cap_to_ie(sband, pos, &left);
827
828 if (*pos > 0)
829 len += 2 + *pos;
1327 return (u16)len; 830 return (u16)len;
1328} 831}
1329 832
@@ -1376,184 +879,6 @@ static void iwl4965_activate_qos(struct iwl_priv *priv, u8 force)
1376 } 879 }
1377} 880}
1378 881
1379/*
1380 * Power management (not Tx power!) functions
1381 */
1382#define MSEC_TO_USEC 1024
1383
1384#define NOSLP __constant_cpu_to_le16(0), 0, 0
1385#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
1386#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
1387#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
1388 __constant_cpu_to_le32(X1), \
1389 __constant_cpu_to_le32(X2), \
1390 __constant_cpu_to_le32(X3), \
1391 __constant_cpu_to_le32(X4)}
1392
1393
1394/* default power management (not Tx power) table values */
1395/* for tim 0-10 */
1396static struct iwl4965_power_vec_entry range_0[IWL_POWER_AC] = {
1397 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1398 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
1399 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
1400 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
1401 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
1402 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
1403};
1404
1405/* for tim > 10 */
1406static struct iwl4965_power_vec_entry range_1[IWL_POWER_AC] = {
1407 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1408 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
1409 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
1410 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
1411 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
1412 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
1413 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
1414 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
1415 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
1416 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
1417};
1418
1419int iwl4965_power_init_handle(struct iwl_priv *priv)
1420{
1421 int rc = 0, i;
1422 struct iwl4965_power_mgr *pow_data;
1423 int size = sizeof(struct iwl4965_power_vec_entry) * IWL_POWER_AC;
1424 u16 pci_pm;
1425
1426 IWL_DEBUG_POWER("Initialize power \n");
1427
1428 pow_data = &(priv->power_data);
1429
1430 memset(pow_data, 0, sizeof(*pow_data));
1431
1432 pow_data->active_index = IWL_POWER_RANGE_0;
1433 pow_data->dtim_val = 0xffff;
1434
1435 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
1436 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
1437
1438 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
1439 if (rc != 0)
1440 return 0;
1441 else {
1442 struct iwl4965_powertable_cmd *cmd;
1443
1444 IWL_DEBUG_POWER("adjust power command flags\n");
1445
1446 for (i = 0; i < IWL_POWER_AC; i++) {
1447 cmd = &pow_data->pwr_range_0[i].cmd;
1448
1449 if (pci_pm & 0x1)
1450 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
1451 else
1452 cmd->flags |= IWL_POWER_PCI_PM_MSK;
1453 }
1454 }
1455 return rc;
1456}
1457
1458static int iwl4965_update_power_cmd(struct iwl_priv *priv,
1459 struct iwl4965_powertable_cmd *cmd, u32 mode)
1460{
1461 int rc = 0, i;
1462 u8 skip;
1463 u32 max_sleep = 0;
1464 struct iwl4965_power_vec_entry *range;
1465 u8 period = 0;
1466 struct iwl4965_power_mgr *pow_data;
1467
1468 if (mode > IWL_POWER_INDEX_5) {
1469 IWL_DEBUG_POWER("Error invalid power mode \n");
1470 return -1;
1471 }
1472 pow_data = &(priv->power_data);
1473
1474 if (pow_data->active_index == IWL_POWER_RANGE_0)
1475 range = &pow_data->pwr_range_0[0];
1476 else
1477 range = &pow_data->pwr_range_1[1];
1478
1479 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd));
1480
1481#ifdef IWL_MAC80211_DISABLE
1482 if (priv->assoc_network != NULL) {
1483 unsigned long flags;
1484
1485 period = priv->assoc_network->tim.tim_period;
1486 }
1487#endif /*IWL_MAC80211_DISABLE */
1488 skip = range[mode].no_dtim;
1489
1490 if (period == 0) {
1491 period = 1;
1492 skip = 0;
1493 }
1494
1495 if (skip == 0) {
1496 max_sleep = period;
1497 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
1498 } else {
1499 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
1500 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
1501 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
1502 }
1503
1504 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
1505 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1506 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
1507 }
1508
1509 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
1510 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1511 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1512 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1513 le32_to_cpu(cmd->sleep_interval[0]),
1514 le32_to_cpu(cmd->sleep_interval[1]),
1515 le32_to_cpu(cmd->sleep_interval[2]),
1516 le32_to_cpu(cmd->sleep_interval[3]),
1517 le32_to_cpu(cmd->sleep_interval[4]));
1518
1519 return rc;
1520}
1521
1522static int iwl4965_send_power_mode(struct iwl_priv *priv, u32 mode)
1523{
1524 u32 uninitialized_var(final_mode);
1525 int rc;
1526 struct iwl4965_powertable_cmd cmd;
1527
1528 /* If on battery, set to 3,
1529 * if plugged into AC power, set to CAM ("continuously aware mode"),
1530 * else user level */
1531 switch (mode) {
1532 case IWL_POWER_BATTERY:
1533 final_mode = IWL_POWER_INDEX_3;
1534 break;
1535 case IWL_POWER_AC:
1536 final_mode = IWL_POWER_MODE_CAM;
1537 break;
1538 default:
1539 final_mode = mode;
1540 break;
1541 }
1542
1543 cmd.keep_alive_beacons = 0;
1544
1545 iwl4965_update_power_cmd(priv, &cmd, final_mode);
1546
1547 rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
1548
1549 if (final_mode == IWL_POWER_MODE_CAM)
1550 clear_bit(STATUS_POWER_PMI, &priv->status);
1551 else
1552 set_bit(STATUS_POWER_PMI, &priv->status);
1553
1554 return rc;
1555}
1556
1557int iwl4965_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) 882int iwl4965_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
1558{ 883{
1559 /* Filter incoming packets to determine if they are targeted toward 884 /* Filter incoming packets to determine if they are targeted toward
@@ -1584,33 +909,7 @@ int iwl4965_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *heade
1584 return 1; 909 return 1;
1585} 910}
1586 911
1587#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1588
1589static const char *iwl4965_get_tx_fail_reason(u32 status)
1590{
1591 switch (status & TX_STATUS_MSK) {
1592 case TX_STATUS_SUCCESS:
1593 return "SUCCESS";
1594 TX_STATUS_ENTRY(SHORT_LIMIT);
1595 TX_STATUS_ENTRY(LONG_LIMIT);
1596 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1597 TX_STATUS_ENTRY(MGMNT_ABORT);
1598 TX_STATUS_ENTRY(NEXT_FRAG);
1599 TX_STATUS_ENTRY(LIFE_EXPIRE);
1600 TX_STATUS_ENTRY(DEST_PS);
1601 TX_STATUS_ENTRY(ABORTED);
1602 TX_STATUS_ENTRY(BT_RETRY);
1603 TX_STATUS_ENTRY(STA_INVALID);
1604 TX_STATUS_ENTRY(FRAG_DROPPED);
1605 TX_STATUS_ENTRY(TID_DISABLE);
1606 TX_STATUS_ENTRY(FRAME_FLUSHED);
1607 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1608 TX_STATUS_ENTRY(TX_LOCKED);
1609 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1610 }
1611 912
1612 return "UNKNOWN";
1613}
1614 913
1615/** 914/**
1616 * iwl4965_scan_cancel - Cancel any currently executing HW scan 915 * iwl4965_scan_cancel - Cancel any currently executing HW scan
@@ -1785,8 +1084,8 @@ static int iwl4965_scan_initiate(struct iwl_priv *priv)
1785} 1084}
1786 1085
1787 1086
1788static void iwl4965_set_flags_for_phymode(struct iwl_priv *priv, 1087static void iwl_set_flags_for_band(struct iwl_priv *priv,
1789 enum ieee80211_band band) 1088 enum ieee80211_band band)
1790{ 1089{
1791 if (band == IEEE80211_BAND_5GHZ) { 1090 if (band == IEEE80211_BAND_5GHZ) {
1792 priv->staging_rxon.flags &= 1091 priv->staging_rxon.flags &=
@@ -1871,7 +1170,7 @@ static void iwl4965_connection_init_rx_config(struct iwl_priv *priv)
1871 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); 1170 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
1872 priv->band = ch_info->band; 1171 priv->band = ch_info->band;
1873 1172
1874 iwl4965_set_flags_for_phymode(priv, priv->band); 1173 iwl_set_flags_for_band(priv, priv->band);
1875 1174
1876 priv->staging_rxon.ofdm_basic_rates = 1175 priv->staging_rxon.ofdm_basic_rates =
1877 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 1176 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
@@ -1884,7 +1183,7 @@ static void iwl4965_connection_init_rx_config(struct iwl_priv *priv)
1884 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN); 1183 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
1885 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff; 1184 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
1886 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff; 1185 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
1887 iwl4965_set_rxon_chain(priv); 1186 iwl_set_rxon_chain(priv);
1888} 1187}
1889 1188
1890static int iwl4965_set_mode(struct iwl_priv *priv, int mode) 1189static int iwl4965_set_mode(struct iwl_priv *priv, int mode)
@@ -1926,448 +1225,13 @@ static int iwl4965_set_mode(struct iwl_priv *priv, int mode)
1926 return 0; 1225 return 0;
1927} 1226}
1928 1227
1929static void iwl4965_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
1930 struct ieee80211_tx_control *ctl,
1931 struct iwl_cmd *cmd,
1932 struct sk_buff *skb_frag,
1933 int sta_id)
1934{
1935 struct iwl4965_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
1936 struct iwl_wep_key *wepkey;
1937 int keyidx = 0;
1938
1939 BUG_ON(ctl->key_idx > 3);
1940
1941 switch (keyinfo->alg) {
1942 case ALG_CCMP:
1943 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
1944 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
1945 if (ctl->flags & IEEE80211_TXCTL_AMPDU)
1946 cmd->cmd.tx.tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
1947 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
1948 break;
1949
1950 case ALG_TKIP:
1951 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
1952 ieee80211_get_tkip_key(keyinfo->conf, skb_frag,
1953 IEEE80211_TKIP_P2_KEY, cmd->cmd.tx.key);
1954 IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n");
1955 break;
1956
1957 case ALG_WEP:
1958 wepkey = &priv->wep_keys[ctl->key_idx];
1959 cmd->cmd.tx.sec_ctl = 0;
1960 if (priv->default_wep_key) {
1961 /* the WEP key was sent as static */
1962 keyidx = ctl->key_idx;
1963 memcpy(&cmd->cmd.tx.key[3], wepkey->key,
1964 wepkey->key_size);
1965 if (wepkey->key_size == WEP_KEY_LEN_128)
1966 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
1967 } else {
1968 /* the WEP key was sent as dynamic */
1969 keyidx = keyinfo->keyidx;
1970 memcpy(&cmd->cmd.tx.key[3], keyinfo->key,
1971 keyinfo->keylen);
1972 if (keyinfo->keylen == WEP_KEY_LEN_128)
1973 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
1974 }
1975
1976 cmd->cmd.tx.sec_ctl |= (TX_CMD_SEC_WEP |
1977 (keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
1978
1979 IWL_DEBUG_TX("Configuring packet for WEP encryption "
1980 "with key %d\n", keyidx);
1981 break;
1982
1983 default:
1984 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
1985 break;
1986 }
1987}
1988
1989/*
1990 * handle build REPLY_TX command notification.
1991 */
1992static void iwl4965_build_tx_cmd_basic(struct iwl_priv *priv,
1993 struct iwl_cmd *cmd,
1994 struct ieee80211_tx_control *ctrl,
1995 struct ieee80211_hdr *hdr,
1996 int is_unicast, u8 std_id)
1997{
1998 __le16 *qc;
1999 u16 fc = le16_to_cpu(hdr->frame_control);
2000 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2001
2002 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2003 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2004 tx_flags |= TX_CMD_FLG_ACK_MSK;
2005 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2006 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2007 if (ieee80211_is_probe_response(fc) &&
2008 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2009 tx_flags |= TX_CMD_FLG_TSF_MSK;
2010 } else {
2011 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2012 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2013 }
2014
2015 if (ieee80211_is_back_request(fc))
2016 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
2017
2018
2019 cmd->cmd.tx.sta_id = std_id;
2020 if (ieee80211_get_morefrag(hdr))
2021 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2022
2023 qc = ieee80211_get_qos_ctrl(hdr);
2024 if (qc) {
2025 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2026 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2027 } else
2028 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2029
2030 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2031 tx_flags |= TX_CMD_FLG_RTS_MSK;
2032 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2033 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2034 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2035 tx_flags |= TX_CMD_FLG_CTS_MSK;
2036 }
2037
2038 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2039 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2040
2041 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2042 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2043 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2044 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
2045 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3);
2046 else
2047 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2);
2048 } else {
2049 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2050 }
2051
2052 cmd->cmd.tx.driver_txop = 0;
2053 cmd->cmd.tx.tx_flags = tx_flags;
2054 cmd->cmd.tx.next_frame_len = 0;
2055}
2056static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
2057{
2058 /* 0 - mgmt, 1 - cnt, 2 - data */
2059 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
2060 priv->tx_stats[idx].cnt++;
2061 priv->tx_stats[idx].bytes += len;
2062}
2063/**
2064 * iwl4965_get_sta_id - Find station's index within station table
2065 *
2066 * If new IBSS station, create new entry in station table
2067 */
2068static int iwl4965_get_sta_id(struct iwl_priv *priv,
2069 struct ieee80211_hdr *hdr)
2070{
2071 int sta_id;
2072 u16 fc = le16_to_cpu(hdr->frame_control);
2073 DECLARE_MAC_BUF(mac);
2074
2075 /* If this frame is broadcast or management, use broadcast station id */
2076 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2077 is_multicast_ether_addr(hdr->addr1))
2078 return priv->hw_params.bcast_sta_id;
2079
2080 switch (priv->iw_mode) {
2081
2082 /* If we are a client station in a BSS network, use the special
2083 * AP station entry (that's the only station we communicate with) */
2084 case IEEE80211_IF_TYPE_STA:
2085 return IWL_AP_ID;
2086
2087 /* If we are an AP, then find the station, or use BCAST */
2088 case IEEE80211_IF_TYPE_AP:
2089 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
2090 if (sta_id != IWL_INVALID_STATION)
2091 return sta_id;
2092 return priv->hw_params.bcast_sta_id;
2093
2094 /* If this frame is going out to an IBSS network, find the station,
2095 * or create a new station table entry */
2096 case IEEE80211_IF_TYPE_IBSS:
2097 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
2098 if (sta_id != IWL_INVALID_STATION)
2099 return sta_id;
2100
2101 /* Create new station table entry */
2102 sta_id = iwl4965_add_station_flags(priv, hdr->addr1,
2103 0, CMD_ASYNC, NULL);
2104
2105 if (sta_id != IWL_INVALID_STATION)
2106 return sta_id;
2107
2108 IWL_DEBUG_DROP("Station %s not in station map. "
2109 "Defaulting to broadcast...\n",
2110 print_mac(mac, hdr->addr1));
2111 iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
2112 return priv->hw_params.bcast_sta_id;
2113
2114 default:
2115 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
2116 return priv->hw_params.bcast_sta_id;
2117 }
2118}
2119
2120/*
2121 * start REPLY_TX command process
2122 */
2123static int iwl4965_tx_skb(struct iwl_priv *priv,
2124 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2125{
2126 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2127 struct iwl4965_tfd_frame *tfd;
2128 u32 *control_flags;
2129 int txq_id = ctl->queue;
2130 struct iwl4965_tx_queue *txq = NULL;
2131 struct iwl4965_queue *q = NULL;
2132 dma_addr_t phys_addr;
2133 dma_addr_t txcmd_phys;
2134 dma_addr_t scratch_phys;
2135 struct iwl_cmd *out_cmd = NULL;
2136 u16 len, idx, len_org;
2137 u8 id, hdr_len, unicast;
2138 u8 sta_id;
2139 u16 seq_number = 0;
2140 u16 fc;
2141 __le16 *qc;
2142 u8 wait_write_ptr = 0;
2143 unsigned long flags;
2144 int rc;
2145
2146 spin_lock_irqsave(&priv->lock, flags);
2147 if (iwl_is_rfkill(priv)) {
2148 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2149 goto drop_unlock;
2150 }
2151
2152 if (!priv->vif) {
2153 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
2154 goto drop_unlock;
2155 }
2156
2157 if ((ctl->tx_rate->hw_value & 0xFF) == IWL_INVALID_RATE) {
2158 IWL_ERROR("ERROR: No TX rate available.\n");
2159 goto drop_unlock;
2160 }
2161
2162 unicast = !is_multicast_ether_addr(hdr->addr1);
2163 id = 0;
2164
2165 fc = le16_to_cpu(hdr->frame_control);
2166
2167#ifdef CONFIG_IWLWIFI_DEBUG
2168 if (ieee80211_is_auth(fc))
2169 IWL_DEBUG_TX("Sending AUTH frame\n");
2170 else if (ieee80211_is_assoc_request(fc))
2171 IWL_DEBUG_TX("Sending ASSOC frame\n");
2172 else if (ieee80211_is_reassoc_request(fc))
2173 IWL_DEBUG_TX("Sending REASSOC frame\n");
2174#endif
2175
2176 /* drop all data frame if we are not associated */
2177 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
2178 (!iwl_is_associated(priv) ||
2179 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) ||
2180 !priv->assoc_station_added)) {
2181 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
2182 goto drop_unlock;
2183 }
2184
2185 spin_unlock_irqrestore(&priv->lock, flags);
2186
2187 hdr_len = ieee80211_get_hdrlen(fc);
2188
2189 /* Find (or create) index into station table for destination station */
2190 sta_id = iwl4965_get_sta_id(priv, hdr);
2191 if (sta_id == IWL_INVALID_STATION) {
2192 DECLARE_MAC_BUF(mac);
2193
2194 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
2195 print_mac(mac, hdr->addr1));
2196 goto drop;
2197 }
2198
2199 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2200
2201 qc = ieee80211_get_qos_ctrl(hdr);
2202 if (qc) {
2203 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2204 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2205 IEEE80211_SCTL_SEQ;
2206 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2207 (hdr->seq_ctrl &
2208 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2209 seq_number += 0x10;
2210#ifdef CONFIG_IWL4965_HT
2211 /* aggregation is on for this <sta,tid> */
2212 if (ctl->flags & IEEE80211_TXCTL_AMPDU)
2213 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
2214 priv->stations[sta_id].tid[tid].tfds_in_queue++;
2215#endif /* CONFIG_IWL4965_HT */
2216 }
2217
2218 /* Descriptor for chosen Tx queue */
2219 txq = &priv->txq[txq_id];
2220 q = &txq->q;
2221
2222 spin_lock_irqsave(&priv->lock, flags);
2223
2224 /* Set up first empty TFD within this queue's circular TFD buffer */
2225 tfd = &txq->bd[q->write_ptr];
2226 memset(tfd, 0, sizeof(*tfd));
2227 control_flags = (u32 *) tfd;
2228 idx = get_cmd_index(q, q->write_ptr, 0);
2229
2230 /* Set up driver data for this TFD */
2231 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl4965_tx_info));
2232 txq->txb[q->write_ptr].skb[0] = skb;
2233 memcpy(&(txq->txb[q->write_ptr].status.control),
2234 ctl, sizeof(struct ieee80211_tx_control));
2235
2236 /* Set up first empty entry in queue's array of Tx/cmd buffers */
2237 out_cmd = &txq->cmd[idx];
2238 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2239 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
2240
2241 /*
2242 * Set up the Tx-command (not MAC!) header.
2243 * Store the chosen Tx queue and TFD index within the sequence field;
2244 * after Tx, uCode's Tx response will return this value so driver can
2245 * locate the frame within the tx queue and do post-tx processing.
2246 */
2247 out_cmd->hdr.cmd = REPLY_TX;
2248 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2249 INDEX_TO_SEQ(q->write_ptr)));
2250
2251 /* Copy MAC header from skb into command buffer */
2252 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
2253
2254 /*
2255 * Use the first empty entry in this queue's command buffer array
2256 * to contain the Tx command and MAC header concatenated together
2257 * (payload data will be in another buffer).
2258 * Size of this varies, due to varying MAC header length.
2259 * If end is not dword aligned, we'll have 2 extra bytes at the end
2260 * of the MAC header (device reads on dword boundaries).
2261 * We'll tell device about this padding later.
2262 */
2263 len = priv->hw_params.tx_cmd_len +
2264 sizeof(struct iwl_cmd_header) + hdr_len;
2265
2266 len_org = len;
2267 len = (len + 3) & ~3;
2268
2269 if (len_org != len)
2270 len_org = 1;
2271 else
2272 len_org = 0;
2273
2274 /* Physical address of this Tx command's header (not MAC header!),
2275 * within command buffer array. */
2276 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
2277 offsetof(struct iwl_cmd, hdr);
2278
2279 /* Add buffer containing Tx command and MAC(!) header to TFD's
2280 * first entry */
2281 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
2282
2283 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
2284 iwl4965_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, sta_id);
2285
2286 /* Set up TFD's 2nd entry to point directly to remainder of skb,
2287 * if any (802.11 null frames have no payload). */
2288 len = skb->len - hdr_len;
2289 if (len) {
2290 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2291 len, PCI_DMA_TODEVICE);
2292 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
2293 }
2294
2295 /* Tell 4965 about any 2-byte padding after MAC header */
2296 if (len_org)
2297 out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2298
2299 /* Total # bytes to be transmitted */
2300 len = (u16)skb->len;
2301 out_cmd->cmd.tx.len = cpu_to_le16(len);
2302
2303 /* TODO need this for burst mode later on */
2304 iwl4965_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
2305
2306 /* set is_hcca to 0; it probably will never be implemented */
2307 iwl4965_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
2308
2309 iwl_update_tx_stats(priv, fc, len);
2310
2311 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
2312 offsetof(struct iwl4965_tx_cmd, scratch);
2313 out_cmd->cmd.tx.dram_lsb_ptr = cpu_to_le32(scratch_phys);
2314 out_cmd->cmd.tx.dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys);
2315
2316 if (!ieee80211_get_morefrag(hdr)) {
2317 txq->need_update = 1;
2318 if (qc) {
2319 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2320 priv->stations[sta_id].tid[tid].seq_number = seq_number;
2321 }
2322 } else {
2323 wait_write_ptr = 1;
2324 txq->need_update = 0;
2325 }
2326
2327 iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
2328 sizeof(out_cmd->cmd.tx));
2329
2330 iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
2331 ieee80211_get_hdrlen(fc));
2332
2333 /* Set up entry for this TFD in Tx byte-count array */
2334 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
2335
2336 /* Tell device the write index *just past* this latest filled TFD */
2337 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
2338 rc = iwl4965_tx_queue_update_write_ptr(priv, txq);
2339 spin_unlock_irqrestore(&priv->lock, flags);
2340
2341 if (rc)
2342 return rc;
2343
2344 if ((iwl4965_queue_space(q) < q->high_mark)
2345 && priv->mac80211_registered) {
2346 if (wait_write_ptr) {
2347 spin_lock_irqsave(&priv->lock, flags);
2348 txq->need_update = 1;
2349 iwl4965_tx_queue_update_write_ptr(priv, txq);
2350 spin_unlock_irqrestore(&priv->lock, flags);
2351 }
2352
2353 ieee80211_stop_queue(priv->hw, ctl->queue);
2354 }
2355
2356 return 0;
2357
2358drop_unlock:
2359 spin_unlock_irqrestore(&priv->lock, flags);
2360drop:
2361 return -1;
2362}
2363
2364static void iwl4965_set_rate(struct iwl_priv *priv) 1228static void iwl4965_set_rate(struct iwl_priv *priv)
2365{ 1229{
2366 const struct ieee80211_supported_band *hw = NULL; 1230 const struct ieee80211_supported_band *hw = NULL;
2367 struct ieee80211_rate *rate; 1231 struct ieee80211_rate *rate;
2368 int i; 1232 int i;
2369 1233
2370 hw = iwl4965_get_hw_mode(priv, priv->band); 1234 hw = iwl_get_hw_mode(priv, priv->band);
2371 if (!hw) { 1235 if (!hw) {
2372 IWL_ERROR("Failed to set rate: unable to get hw mode\n"); 1236 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
2373 return; 1237 return;
@@ -2466,45 +1330,6 @@ void iwl4965_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
2466 return; 1330 return;
2467} 1331}
2468 1332
2469void iwl4965_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
2470 u32 decrypt_res, struct ieee80211_rx_status *stats)
2471{
2472 u16 fc =
2473 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
2474
2475 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2476 return;
2477
2478 if (!(fc & IEEE80211_FCTL_PROTECTED))
2479 return;
2480
2481 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2482 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2483 case RX_RES_STATUS_SEC_TYPE_TKIP:
2484 /* The uCode has got a bad phase 1 Key, pushes the packet.
2485 * Decryption will be done in SW. */
2486 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2487 RX_RES_STATUS_BAD_KEY_TTAK)
2488 break;
2489
2490 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2491 RX_RES_STATUS_BAD_ICV_MIC)
2492 stats->flag |= RX_FLAG_MMIC_ERROR;
2493 case RX_RES_STATUS_SEC_TYPE_WEP:
2494 case RX_RES_STATUS_SEC_TYPE_CCMP:
2495 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2496 RX_RES_STATUS_DECRYPT_OK) {
2497 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2498 stats->flag |= RX_FLAG_DECRYPTED;
2499 }
2500 break;
2501
2502 default:
2503 break;
2504 }
2505}
2506
2507
2508#define IWL_PACKET_RETRY_TIME HZ 1333#define IWL_PACKET_RETRY_TIME HZ
2509 1334
2510int iwl4965_is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) 1335int iwl4965_is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
@@ -2629,7 +1454,7 @@ static int iwl4965_get_measurement(struct iwl_priv *priv,
2629 u8 type) 1454 u8 type)
2630{ 1455{
2631 struct iwl4965_spectrum_cmd spectrum; 1456 struct iwl4965_spectrum_cmd spectrum;
2632 struct iwl4965_rx_packet *res; 1457 struct iwl_rx_packet *res;
2633 struct iwl_host_cmd cmd = { 1458 struct iwl_host_cmd cmd = {
2634 .id = REPLY_SPECTRUM_MEASUREMENT_CMD, 1459 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
2635 .data = (void *)&spectrum, 1460 .data = (void *)&spectrum,
@@ -2674,7 +1499,7 @@ static int iwl4965_get_measurement(struct iwl_priv *priv,
2674 if (rc) 1499 if (rc)
2675 return rc; 1500 return rc;
2676 1501
2677 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data; 1502 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
2678 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 1503 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
2679 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n"); 1504 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
2680 rc = -EIO; 1505 rc = -EIO;
@@ -2704,351 +1529,16 @@ static int iwl4965_get_measurement(struct iwl_priv *priv,
2704} 1529}
2705#endif 1530#endif
2706 1531
2707static void iwl4965_txstatus_to_ieee(struct iwl_priv *priv,
2708 struct iwl4965_tx_info *tx_sta)
2709{
2710
2711 tx_sta->status.ack_signal = 0;
2712 tx_sta->status.excessive_retries = 0;
2713 tx_sta->status.queue_length = 0;
2714 tx_sta->status.queue_number = 0;
2715
2716 if (in_interrupt())
2717 ieee80211_tx_status_irqsafe(priv->hw,
2718 tx_sta->skb[0], &(tx_sta->status));
2719 else
2720 ieee80211_tx_status(priv->hw,
2721 tx_sta->skb[0], &(tx_sta->status));
2722
2723 tx_sta->skb[0] = NULL;
2724}
2725
2726/**
2727 * iwl4965_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
2728 *
2729 * When FW advances 'R' index, all entries between old and new 'R' index
2730 * need to be reclaimed. As result, some free space forms. If there is
2731 * enough free space (> low mark), wake the stack that feeds us.
2732 */
2733int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
2734{
2735 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
2736 struct iwl4965_queue *q = &txq->q;
2737 int nfreed = 0;
2738
2739 if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
2740 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
2741 "is out of range [0-%d] %d %d.\n", txq_id,
2742 index, q->n_bd, q->write_ptr, q->read_ptr);
2743 return 0;
2744 }
2745
2746 for (index = iwl_queue_inc_wrap(index, q->n_bd);
2747 q->read_ptr != index;
2748 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2749 if (txq_id != IWL_CMD_QUEUE_NUM) {
2750 iwl4965_txstatus_to_ieee(priv,
2751 &(txq->txb[txq->q.read_ptr]));
2752 iwl4965_hw_txq_free_tfd(priv, txq);
2753 } else if (nfreed > 1) {
2754 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
2755 q->write_ptr, q->read_ptr);
2756 queue_work(priv->workqueue, &priv->restart);
2757 }
2758 nfreed++;
2759 }
2760
2761/* if (iwl4965_queue_space(q) > q->low_mark && (txq_id >= 0) &&
2762 (txq_id != IWL_CMD_QUEUE_NUM) &&
2763 priv->mac80211_registered)
2764 ieee80211_wake_queue(priv->hw, txq_id); */
2765
2766
2767 return nfreed;
2768}
2769
2770static int iwl4965_is_tx_success(u32 status)
2771{
2772 status &= TX_STATUS_MSK;
2773 return (status == TX_STATUS_SUCCESS)
2774 || (status == TX_STATUS_DIRECT_DONE);
2775}
2776
2777/****************************************************************************** 1532/******************************************************************************
2778 * 1533 *
2779 * Generic RX handler implementations 1534 * Generic RX handler implementations
2780 * 1535 *
2781 ******************************************************************************/ 1536 ******************************************************************************/
2782#ifdef CONFIG_IWL4965_HT 1537static void iwl_rx_reply_alive(struct iwl_priv *priv,
2783 1538 struct iwl_rx_mem_buffer *rxb)
2784static inline int iwl4965_get_ra_sta_id(struct iwl_priv *priv,
2785 struct ieee80211_hdr *hdr)
2786{ 1539{
2787 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) 1540 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
2788 return IWL_AP_ID; 1541 struct iwl_alive_resp *palive;
2789 else {
2790 u8 *da = ieee80211_get_DA(hdr);
2791 return iwl4965_hw_find_station(priv, da);
2792 }
2793}
2794
2795static struct ieee80211_hdr *iwl4965_tx_queue_get_hdr(
2796 struct iwl_priv *priv, int txq_id, int idx)
2797{
2798 if (priv->txq[txq_id].txb[idx].skb[0])
2799 return (struct ieee80211_hdr *)priv->txq[txq_id].
2800 txb[idx].skb[0]->data;
2801 return NULL;
2802}
2803
2804static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
2805{
2806 __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
2807 tx_resp->frame_count);
2808 return le32_to_cpu(*scd_ssn) & MAX_SN;
2809
2810}
2811
2812/**
2813 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
2814 */
2815static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2816 struct iwl4965_ht_agg *agg,
2817 struct iwl4965_tx_resp_agg *tx_resp,
2818 u16 start_idx)
2819{
2820 u16 status;
2821 struct agg_tx_status *frame_status = &tx_resp->status;
2822 struct ieee80211_tx_status *tx_status = NULL;
2823 struct ieee80211_hdr *hdr = NULL;
2824 int i, sh;
2825 int txq_id, idx;
2826 u16 seq;
2827
2828 if (agg->wait_for_ba)
2829 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
2830
2831 agg->frame_count = tx_resp->frame_count;
2832 agg->start_idx = start_idx;
2833 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
2834 agg->bitmap = 0;
2835
2836 /* # frames attempted by Tx command */
2837 if (agg->frame_count == 1) {
2838 /* Only one frame was attempted; no block-ack will arrive */
2839 status = le16_to_cpu(frame_status[0].status);
2840 seq = le16_to_cpu(frame_status[0].sequence);
2841 idx = SEQ_TO_INDEX(seq);
2842 txq_id = SEQ_TO_QUEUE(seq);
2843
2844 /* FIXME: code repetition */
2845 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
2846 agg->frame_count, agg->start_idx, idx);
2847
2848 tx_status = &(priv->txq[txq_id].txb[idx].status);
2849 tx_status->retry_count = tx_resp->failure_frame;
2850 tx_status->queue_number = status & 0xff;
2851 tx_status->queue_length = tx_resp->failure_rts;
2852 tx_status->control.flags &= ~IEEE80211_TXCTL_AMPDU;
2853 tx_status->flags = iwl4965_is_tx_success(status)?
2854 IEEE80211_TX_STATUS_ACK : 0;
2855 iwl4965_hwrate_to_tx_control(priv,
2856 le32_to_cpu(tx_resp->rate_n_flags),
2857 &tx_status->control);
2858 /* FIXME: code repetition end */
2859
2860 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
2861 status & 0xff, tx_resp->failure_frame);
2862 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
2863 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags));
2864
2865 agg->wait_for_ba = 0;
2866 } else {
2867 /* Two or more frames were attempted; expect block-ack */
2868 u64 bitmap = 0;
2869 int start = agg->start_idx;
2870
2871 /* Construct bit-map of pending frames within Tx window */
2872 for (i = 0; i < agg->frame_count; i++) {
2873 u16 sc;
2874 status = le16_to_cpu(frame_status[i].status);
2875 seq = le16_to_cpu(frame_status[i].sequence);
2876 idx = SEQ_TO_INDEX(seq);
2877 txq_id = SEQ_TO_QUEUE(seq);
2878
2879 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
2880 AGG_TX_STATE_ABORT_MSK))
2881 continue;
2882
2883 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
2884 agg->frame_count, txq_id, idx);
2885
2886 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, idx);
2887
2888 sc = le16_to_cpu(hdr->seq_ctrl);
2889 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
2890 IWL_ERROR("BUG_ON idx doesn't match seq control"
2891 " idx=%d, seq_idx=%d, seq=%d\n",
2892 idx, SEQ_TO_SN(sc),
2893 hdr->seq_ctrl);
2894 return -1;
2895 }
2896
2897 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
2898 i, idx, SEQ_TO_SN(sc));
2899
2900 sh = idx - start;
2901 if (sh > 64) {
2902 sh = (start - idx) + 0xff;
2903 bitmap = bitmap << sh;
2904 sh = 0;
2905 start = idx;
2906 } else if (sh < -64)
2907 sh = 0xff - (start - idx);
2908 else if (sh < 0) {
2909 sh = start - idx;
2910 start = idx;
2911 bitmap = bitmap << sh;
2912 sh = 0;
2913 }
2914 bitmap |= (1 << sh);
2915 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
2916 start, (u32)(bitmap & 0xFFFFFFFF));
2917 }
2918
2919 agg->bitmap = bitmap;
2920 agg->start_idx = start;
2921 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
2922 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
2923 agg->frame_count, agg->start_idx,
2924 (unsigned long long)agg->bitmap);
2925
2926 if (bitmap)
2927 agg->wait_for_ba = 1;
2928 }
2929 return 0;
2930}
2931#endif
2932
2933/**
2934 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
2935 */
2936static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2937 struct iwl4965_rx_mem_buffer *rxb)
2938{
2939 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
2940 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2941 int txq_id = SEQ_TO_QUEUE(sequence);
2942 int index = SEQ_TO_INDEX(sequence);
2943 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
2944 struct ieee80211_tx_status *tx_status;
2945 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
2946 u32 status = le32_to_cpu(tx_resp->status);
2947#ifdef CONFIG_IWL4965_HT
2948 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION;
2949 struct ieee80211_hdr *hdr;
2950 __le16 *qc;
2951#endif
2952
2953 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
2954 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
2955 "is out of range [0-%d] %d %d\n", txq_id,
2956 index, txq->q.n_bd, txq->q.write_ptr,
2957 txq->q.read_ptr);
2958 return;
2959 }
2960
2961#ifdef CONFIG_IWL4965_HT
2962 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, index);
2963 qc = ieee80211_get_qos_ctrl(hdr);
2964
2965 if (qc)
2966 tid = le16_to_cpu(*qc) & 0xf;
2967
2968 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
2969 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
2970 IWL_ERROR("Station not known\n");
2971 return;
2972 }
2973
2974 if (txq->sched_retry) {
2975 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
2976 struct iwl4965_ht_agg *agg = NULL;
2977
2978 if (!qc)
2979 return;
2980
2981 agg = &priv->stations[sta_id].tid[tid].agg;
2982
2983 iwl4965_tx_status_reply_tx(priv, agg,
2984 (struct iwl4965_tx_resp_agg *)tx_resp, index);
2985
2986 if ((tx_resp->frame_count == 1) &&
2987 !iwl4965_is_tx_success(status)) {
2988 /* TODO: send BAR */
2989 }
2990
2991 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2992 int freed;
2993 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2994 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
2995 "%d index %d\n", scd_ssn , index);
2996 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
2997 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
2998
2999 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
3000 txq_id >= 0 && priv->mac80211_registered &&
3001 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
3002 ieee80211_wake_queue(priv->hw, txq_id);
3003
3004 iwl4965_check_empty_hw_queue(priv, sta_id, tid, txq_id);
3005 }
3006 } else {
3007#endif /* CONFIG_IWL4965_HT */
3008 tx_status = &(txq->txb[txq->q.read_ptr].status);
3009
3010 tx_status->retry_count = tx_resp->failure_frame;
3011 tx_status->queue_number = status;
3012 tx_status->queue_length = tx_resp->bt_kill_count;
3013 tx_status->queue_length |= tx_resp->failure_rts;
3014 tx_status->flags =
3015 iwl4965_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
3016 iwl4965_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
3017 &tx_status->control);
3018
3019 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
3020 "retries %d\n", txq_id, iwl4965_get_tx_fail_reason(status),
3021 status, le32_to_cpu(tx_resp->rate_n_flags),
3022 tx_resp->failure_frame);
3023
3024 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3025 if (index != -1) {
3026 int freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
3027#ifdef CONFIG_IWL4965_HT
3028 if (tid != MAX_TID_COUNT)
3029 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
3030 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
3031 (txq_id >= 0) &&
3032 priv->mac80211_registered)
3033 ieee80211_wake_queue(priv->hw, txq_id);
3034 if (tid != MAX_TID_COUNT)
3035 iwl4965_check_empty_hw_queue(priv, sta_id, tid, txq_id);
3036#endif
3037 }
3038#ifdef CONFIG_IWL4965_HT
3039 }
3040#endif /* CONFIG_IWL4965_HT */
3041
3042 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3043 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
3044}
3045
3046
3047static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
3048 struct iwl4965_rx_mem_buffer *rxb)
3049{
3050 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3051 struct iwl4965_alive_resp *palive;
3052 struct delayed_work *pwork; 1542 struct delayed_work *pwork;
3053 1543
3054 palive = &pkt->u.alive_frame; 1544 palive = &pkt->u.alive_frame;
@@ -3062,12 +1552,12 @@ static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
3062 IWL_DEBUG_INFO("Initialization Alive received.\n"); 1552 IWL_DEBUG_INFO("Initialization Alive received.\n");
3063 memcpy(&priv->card_alive_init, 1553 memcpy(&priv->card_alive_init,
3064 &pkt->u.alive_frame, 1554 &pkt->u.alive_frame,
3065 sizeof(struct iwl4965_init_alive_resp)); 1555 sizeof(struct iwl_init_alive_resp));
3066 pwork = &priv->init_alive_start; 1556 pwork = &priv->init_alive_start;
3067 } else { 1557 } else {
3068 IWL_DEBUG_INFO("Runtime Alive received.\n"); 1558 IWL_DEBUG_INFO("Runtime Alive received.\n");
3069 memcpy(&priv->card_alive, &pkt->u.alive_frame, 1559 memcpy(&priv->card_alive, &pkt->u.alive_frame,
3070 sizeof(struct iwl4965_alive_resp)); 1560 sizeof(struct iwl_alive_resp));
3071 pwork = &priv->alive_start; 1561 pwork = &priv->alive_start;
3072 } 1562 }
3073 1563
@@ -3080,19 +1570,10 @@ static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
3080 IWL_WARNING("uCode did not respond OK.\n"); 1570 IWL_WARNING("uCode did not respond OK.\n");
3081} 1571}
3082 1572
3083static void iwl4965_rx_reply_add_sta(struct iwl_priv *priv,
3084 struct iwl4965_rx_mem_buffer *rxb)
3085{
3086 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3087
3088 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
3089 return;
3090}
3091
3092static void iwl4965_rx_reply_error(struct iwl_priv *priv, 1573static void iwl4965_rx_reply_error(struct iwl_priv *priv,
3093 struct iwl4965_rx_mem_buffer *rxb) 1574 struct iwl_rx_mem_buffer *rxb)
3094{ 1575{
3095 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1576 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3096 1577
3097 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) " 1578 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
3098 "seq 0x%04X ser 0x%08X\n", 1579 "seq 0x%04X ser 0x%08X\n",
@@ -3105,10 +1586,10 @@ static void iwl4965_rx_reply_error(struct iwl_priv *priv,
3105 1586
3106#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x 1587#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3107 1588
3108static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb) 1589static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
3109{ 1590{
3110 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1591 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3111 struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon; 1592 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
3112 struct iwl4965_csa_notification *csa = &(pkt->u.csa_notif); 1593 struct iwl4965_csa_notification *csa = &(pkt->u.csa_notif);
3113 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n", 1594 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3114 le16_to_cpu(csa->channel), le32_to_cpu(csa->status)); 1595 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
@@ -3117,15 +1598,15 @@ static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *
3117} 1598}
3118 1599
3119static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv, 1600static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv,
3120 struct iwl4965_rx_mem_buffer *rxb) 1601 struct iwl_rx_mem_buffer *rxb)
3121{ 1602{
3122#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 1603#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
3123 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1604 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3124 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif); 1605 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif);
3125 1606
3126 if (!report->state) { 1607 if (!report->state) {
3127 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO, 1608 IWL_DEBUG(IWL_DL_11H,
3128 "Spectrum Measure Notification: Start\n"); 1609 "Spectrum Measure Notification: Start\n");
3129 return; 1610 return;
3130 } 1611 }
3131 1612
@@ -3135,10 +1616,10 @@ static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv,
3135} 1616}
3136 1617
3137static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv, 1618static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv,
3138 struct iwl4965_rx_mem_buffer *rxb) 1619 struct iwl_rx_mem_buffer *rxb)
3139{ 1620{
3140#ifdef CONFIG_IWLWIFI_DEBUG 1621#ifdef CONFIG_IWLWIFI_DEBUG
3141 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1622 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3142 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif); 1623 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif);
3143 IWL_DEBUG_RX("sleep mode: %d, src: %d\n", 1624 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3144 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 1625 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
@@ -3146,13 +1627,13 @@ static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv,
3146} 1627}
3147 1628
3148static void iwl4965_rx_pm_debug_statistics_notif(struct iwl_priv *priv, 1629static void iwl4965_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
3149 struct iwl4965_rx_mem_buffer *rxb) 1630 struct iwl_rx_mem_buffer *rxb)
3150{ 1631{
3151 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1632 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3152 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled " 1633 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3153 "notification for %s:\n", 1634 "notification for %s:\n",
3154 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd)); 1635 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
3155 iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len)); 1636 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
3156} 1637}
3157 1638
3158static void iwl4965_bg_beacon_update(struct work_struct *work) 1639static void iwl4965_bg_beacon_update(struct work_struct *work)
@@ -3162,7 +1643,7 @@ static void iwl4965_bg_beacon_update(struct work_struct *work)
3162 struct sk_buff *beacon; 1643 struct sk_buff *beacon;
3163 1644
3164 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ 1645 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
3165 beacon = ieee80211_beacon_get(priv->hw, priv->vif, NULL); 1646 beacon = ieee80211_beacon_get(priv->hw, priv->vif);
3166 1647
3167 if (!beacon) { 1648 if (!beacon) {
3168 IWL_ERROR("update beacon failed\n"); 1649 IWL_ERROR("update beacon failed\n");
@@ -3181,10 +1662,10 @@ static void iwl4965_bg_beacon_update(struct work_struct *work)
3181} 1662}
3182 1663
3183static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, 1664static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
3184 struct iwl4965_rx_mem_buffer *rxb) 1665 struct iwl_rx_mem_buffer *rxb)
3185{ 1666{
3186#ifdef CONFIG_IWLWIFI_DEBUG 1667#ifdef CONFIG_IWLWIFI_DEBUG
3187 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1668 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3188 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status); 1669 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status);
3189 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 1670 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
3190 1671
@@ -3204,10 +1685,10 @@ static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
3204 1685
3205/* Service response to REPLY_SCAN_CMD (0x80) */ 1686/* Service response to REPLY_SCAN_CMD (0x80) */
3206static void iwl4965_rx_reply_scan(struct iwl_priv *priv, 1687static void iwl4965_rx_reply_scan(struct iwl_priv *priv,
3207 struct iwl4965_rx_mem_buffer *rxb) 1688 struct iwl_rx_mem_buffer *rxb)
3208{ 1689{
3209#ifdef CONFIG_IWLWIFI_DEBUG 1690#ifdef CONFIG_IWLWIFI_DEBUG
3210 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1691 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3211 struct iwl4965_scanreq_notification *notif = 1692 struct iwl4965_scanreq_notification *notif =
3212 (struct iwl4965_scanreq_notification *)pkt->u.raw; 1693 (struct iwl4965_scanreq_notification *)pkt->u.raw;
3213 1694
@@ -3217,9 +1698,9 @@ static void iwl4965_rx_reply_scan(struct iwl_priv *priv,
3217 1698
3218/* Service SCAN_START_NOTIFICATION (0x82) */ 1699/* Service SCAN_START_NOTIFICATION (0x82) */
3219static void iwl4965_rx_scan_start_notif(struct iwl_priv *priv, 1700static void iwl4965_rx_scan_start_notif(struct iwl_priv *priv,
3220 struct iwl4965_rx_mem_buffer *rxb) 1701 struct iwl_rx_mem_buffer *rxb)
3221{ 1702{
3222 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1703 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3223 struct iwl4965_scanstart_notification *notif = 1704 struct iwl4965_scanstart_notification *notif =
3224 (struct iwl4965_scanstart_notification *)pkt->u.raw; 1705 (struct iwl4965_scanstart_notification *)pkt->u.raw;
3225 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); 1706 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
@@ -3234,9 +1715,9 @@ static void iwl4965_rx_scan_start_notif(struct iwl_priv *priv,
3234 1715
3235/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ 1716/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
3236static void iwl4965_rx_scan_results_notif(struct iwl_priv *priv, 1717static void iwl4965_rx_scan_results_notif(struct iwl_priv *priv,
3237 struct iwl4965_rx_mem_buffer *rxb) 1718 struct iwl_rx_mem_buffer *rxb)
3238{ 1719{
3239 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1720 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3240 struct iwl4965_scanresults_notification *notif = 1721 struct iwl4965_scanresults_notification *notif =
3241 (struct iwl4965_scanresults_notification *)pkt->u.raw; 1722 (struct iwl4965_scanresults_notification *)pkt->u.raw;
3242 1723
@@ -3259,9 +1740,9 @@ static void iwl4965_rx_scan_results_notif(struct iwl_priv *priv,
3259 1740
3260/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ 1741/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
3261static void iwl4965_rx_scan_complete_notif(struct iwl_priv *priv, 1742static void iwl4965_rx_scan_complete_notif(struct iwl_priv *priv,
3262 struct iwl4965_rx_mem_buffer *rxb) 1743 struct iwl_rx_mem_buffer *rxb)
3263{ 1744{
3264 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1745 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3265 struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 1746 struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
3266 1747
3267 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", 1748 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
@@ -3317,9 +1798,9 @@ reschedule:
3317/* Handle notification from uCode that card's power state is changing 1798/* Handle notification from uCode that card's power state is changing
3318 * due to software, hardware, or critical temperature RFKILL */ 1799 * due to software, hardware, or critical temperature RFKILL */
3319static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, 1800static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
3320 struct iwl4965_rx_mem_buffer *rxb) 1801 struct iwl_rx_mem_buffer *rxb)
3321{ 1802{
3322 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1803 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3323 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 1804 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
3324 unsigned long status = priv->status; 1805 unsigned long status = priv->status;
3325 1806
@@ -3385,6 +1866,17 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
3385 wake_up_interruptible(&priv->wait_command_queue); 1866 wake_up_interruptible(&priv->wait_command_queue);
3386} 1867}
3387 1868
1869/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
1870 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
1871static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
1872 struct iwl_rx_mem_buffer *rxb)
1873{
1874 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1875 priv->last_phy_res[0] = 1;
1876 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
1877 sizeof(struct iwl4965_rx_phy_res));
1878}
1879
3388/** 1880/**
3389 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks 1881 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
3390 * 1882 *
@@ -3396,8 +1888,7 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
3396 */ 1888 */
3397static void iwl4965_setup_rx_handlers(struct iwl_priv *priv) 1889static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
3398{ 1890{
3399 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive; 1891 priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
3400 priv->rx_handlers[REPLY_ADD_STA] = iwl4965_rx_reply_add_sta;
3401 priv->rx_handlers[REPLY_ERROR] = iwl4965_rx_reply_error; 1892 priv->rx_handlers[REPLY_ERROR] = iwl4965_rx_reply_error;
3402 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl4965_rx_csa; 1893 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl4965_rx_csa;
3403 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = 1894 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
@@ -3414,498 +1905,47 @@ static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
3414 */ 1905 */
3415 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_hw_rx_statistics; 1906 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_hw_rx_statistics;
3416 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_hw_rx_statistics; 1907 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_hw_rx_statistics;
3417 1908 /* scan handlers */
3418 priv->rx_handlers[REPLY_SCAN_CMD] = iwl4965_rx_reply_scan; 1909 priv->rx_handlers[REPLY_SCAN_CMD] = iwl4965_rx_reply_scan;
3419 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl4965_rx_scan_start_notif; 1910 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl4965_rx_scan_start_notif;
3420 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] = 1911 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
3421 iwl4965_rx_scan_results_notif; 1912 iwl4965_rx_scan_results_notif;
3422 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = 1913 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
3423 iwl4965_rx_scan_complete_notif; 1914 iwl4965_rx_scan_complete_notif;
1915 /* status change handler */
3424 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl4965_rx_card_state_notif; 1916 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl4965_rx_card_state_notif;
3425 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
3426 1917
1918 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
1919 iwl_rx_missed_beacon_notif;
1920 /* Rx handlers */
1921 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
1922 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
3427 /* Set up hardware specific Rx handlers */ 1923 /* Set up hardware specific Rx handlers */
3428 iwl4965_hw_rx_handler_setup(priv); 1924 priv->cfg->ops->lib->rx_handler_setup(priv);
3429}
3430
3431/**
3432 * iwl4965_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3433 * @rxb: Rx buffer to reclaim
3434 *
3435 * If an Rx buffer has an async callback associated with it the callback
3436 * will be executed. The attached skb (if present) will only be freed
3437 * if the callback returns 1
3438 */
3439static void iwl4965_tx_cmd_complete(struct iwl_priv *priv,
3440 struct iwl4965_rx_mem_buffer *rxb)
3441{
3442 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
3443 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3444 int txq_id = SEQ_TO_QUEUE(sequence);
3445 int index = SEQ_TO_INDEX(sequence);
3446 int huge = sequence & SEQ_HUGE_FRAME;
3447 int cmd_index;
3448 struct iwl_cmd *cmd;
3449
3450 /* If a Tx command is being handled and it isn't in the actual
3451 * command queue then there a command routing bug has been introduced
3452 * in the queue management code. */
3453 if (txq_id != IWL_CMD_QUEUE_NUM)
3454 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
3455 txq_id, pkt->hdr.cmd);
3456 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
3457
3458 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
3459 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
3460
3461 /* Input error checking is done when commands are added to queue. */
3462 if (cmd->meta.flags & CMD_WANT_SKB) {
3463 cmd->meta.source->u.skb = rxb->skb;
3464 rxb->skb = NULL;
3465 } else if (cmd->meta.u.callback &&
3466 !cmd->meta.u.callback(priv, cmd, rxb->skb))
3467 rxb->skb = NULL;
3468
3469 iwl4965_tx_queue_reclaim(priv, txq_id, index);
3470
3471 if (!(cmd->meta.flags & CMD_ASYNC)) {
3472 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
3473 wake_up_interruptible(&priv->wait_command_queue);
3474 }
3475}
3476
3477/************************** RX-FUNCTIONS ****************************/
3478/*
3479 * Rx theory of operation
3480 *
3481 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
3482 * each of which point to Receive Buffers to be filled by 4965. These get
3483 * used not only for Rx frames, but for any command response or notification
3484 * from the 4965. The driver and 4965 manage the Rx buffers by means
3485 * of indexes into the circular buffer.
3486 *
3487 * Rx Queue Indexes
3488 * The host/firmware share two index registers for managing the Rx buffers.
3489 *
3490 * The READ index maps to the first position that the firmware may be writing
3491 * to -- the driver can read up to (but not including) this position and get
3492 * good data.
3493 * The READ index is managed by the firmware once the card is enabled.
3494 *
3495 * The WRITE index maps to the last position the driver has read from -- the
3496 * position preceding WRITE is the last slot the firmware can place a packet.
3497 *
3498 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3499 * WRITE = READ.
3500 *
3501 * During initialization, the host sets up the READ queue position to the first
3502 * INDEX position, and WRITE to the last (READ - 1 wrapped)
3503 *
3504 * When the firmware places a packet in a buffer, it will advance the READ index
3505 * and fire the RX interrupt. The driver can then query the READ index and
3506 * process as many packets as possible, moving the WRITE index forward as it
3507 * resets the Rx queue buffers with new memory.
3508 *
3509 * The management in the driver is as follows:
3510 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
3511 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
3512 * to replenish the iwl->rxq->rx_free.
3513 * + In iwl4965_rx_replenish (scheduled) if 'processed' != 'read' then the
3514 * iwl->rxq is replenished and the READ INDEX is updated (updating the
3515 * 'processed' and 'read' driver indexes as well)
3516 * + A received packet is processed and handed to the kernel network stack,
3517 * detached from the iwl->rxq. The driver 'processed' index is updated.
3518 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
3519 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
3520 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
3521 * were enough free buffers and RX_STALLED is set it is cleared.
3522 *
3523 *
3524 * Driver sequence:
3525 *
3526 * iwl4965_rx_queue_alloc() Allocates rx_free
3527 * iwl4965_rx_replenish() Replenishes rx_free list from rx_used, and calls
3528 * iwl4965_rx_queue_restock
3529 * iwl4965_rx_queue_restock() Moves available buffers from rx_free into Rx
3530 * queue, updates firmware pointers, and updates
3531 * the WRITE index. If insufficient rx_free buffers
3532 * are available, schedules iwl4965_rx_replenish
3533 *
3534 * -- enable interrupts --
3535 * ISR - iwl4965_rx() Detach iwl4965_rx_mem_buffers from pool up to the
3536 * READ INDEX, detaching the SKB from the pool.
3537 * Moves the packet buffer from queue to rx_used.
3538 * Calls iwl4965_rx_queue_restock to refill any empty
3539 * slots.
3540 * ...
3541 *
3542 */
3543
3544/**
3545 * iwl4965_rx_queue_space - Return number of free slots available in queue.
3546 */
3547static int iwl4965_rx_queue_space(const struct iwl4965_rx_queue *q)
3548{
3549 int s = q->read - q->write;
3550 if (s <= 0)
3551 s += RX_QUEUE_SIZE;
3552 /* keep some buffer to not confuse full and empty queue */
3553 s -= 2;
3554 if (s < 0)
3555 s = 0;
3556 return s;
3557}
3558
3559/**
3560 * iwl4965_rx_queue_update_write_ptr - Update the write pointer for the RX queue
3561 */
3562int iwl4965_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl4965_rx_queue *q)
3563{
3564 u32 reg = 0;
3565 int rc = 0;
3566 unsigned long flags;
3567
3568 spin_lock_irqsave(&q->lock, flags);
3569
3570 if (q->need_update == 0)
3571 goto exit_unlock;
3572
3573 /* If power-saving is in use, make sure device is awake */
3574 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
3575 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
3576
3577 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
3578 iwl_set_bit(priv, CSR_GP_CNTRL,
3579 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3580 goto exit_unlock;
3581 }
3582
3583 rc = iwl_grab_nic_access(priv);
3584 if (rc)
3585 goto exit_unlock;
3586
3587 /* Device expects a multiple of 8 */
3588 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
3589 q->write & ~0x7);
3590 iwl_release_nic_access(priv);
3591
3592 /* Else device is assumed to be awake */
3593 } else
3594 /* Device expects a multiple of 8 */
3595 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
3596
3597
3598 q->need_update = 0;
3599
3600 exit_unlock:
3601 spin_unlock_irqrestore(&q->lock, flags);
3602 return rc;
3603}
3604
3605/**
3606 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
3607 */
3608static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
3609 dma_addr_t dma_addr)
3610{
3611 return cpu_to_le32((u32)(dma_addr >> 8));
3612}
3613
3614
3615/**
3616 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
3617 *
3618 * If there are slots in the RX queue that need to be restocked,
3619 * and we have free pre-allocated buffers, fill the ranks as much
3620 * as we can, pulling from rx_free.
3621 *
3622 * This moves the 'write' index forward to catch up with 'processed', and
3623 * also updates the memory address in the firmware to reference the new
3624 * target buffer.
3625 */
3626static int iwl4965_rx_queue_restock(struct iwl_priv *priv)
3627{
3628 struct iwl4965_rx_queue *rxq = &priv->rxq;
3629 struct list_head *element;
3630 struct iwl4965_rx_mem_buffer *rxb;
3631 unsigned long flags;
3632 int write, rc;
3633
3634 spin_lock_irqsave(&rxq->lock, flags);
3635 write = rxq->write & ~0x7;
3636 while ((iwl4965_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
3637 /* Get next free Rx buffer, remove from free list */
3638 element = rxq->rx_free.next;
3639 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
3640 list_del(element);
3641
3642 /* Point to Rx buffer via next RBD in circular buffer */
3643 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, rxb->dma_addr);
3644 rxq->queue[rxq->write] = rxb;
3645 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
3646 rxq->free_count--;
3647 }
3648 spin_unlock_irqrestore(&rxq->lock, flags);
3649 /* If the pre-allocated buffer pool is dropping low, schedule to
3650 * refill it */
3651 if (rxq->free_count <= RX_LOW_WATERMARK)
3652 queue_work(priv->workqueue, &priv->rx_replenish);
3653
3654
3655 /* If we've added more space for the firmware to place data, tell it.
3656 * Increment device's write pointer in multiples of 8. */
3657 if ((write != (rxq->write & ~0x7))
3658 || (abs(rxq->write - rxq->read) > 7)) {
3659 spin_lock_irqsave(&rxq->lock, flags);
3660 rxq->need_update = 1;
3661 spin_unlock_irqrestore(&rxq->lock, flags);
3662 rc = iwl4965_rx_queue_update_write_ptr(priv, rxq);
3663 if (rc)
3664 return rc;
3665 }
3666
3667 return 0;
3668}
3669
3670/**
3671 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
3672 *
3673 * When moving to rx_free an SKB is allocated for the slot.
3674 *
3675 * Also restock the Rx queue via iwl4965_rx_queue_restock.
3676 * This is called as a scheduled work item (except for during initialization)
3677 */
3678static void iwl4965_rx_allocate(struct iwl_priv *priv)
3679{
3680 struct iwl4965_rx_queue *rxq = &priv->rxq;
3681 struct list_head *element;
3682 struct iwl4965_rx_mem_buffer *rxb;
3683 unsigned long flags;
3684 spin_lock_irqsave(&rxq->lock, flags);
3685 while (!list_empty(&rxq->rx_used)) {
3686 element = rxq->rx_used.next;
3687 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
3688
3689 /* Alloc a new receive buffer */
3690 rxb->skb =
3691 alloc_skb(priv->hw_params.rx_buf_size,
3692 __GFP_NOWARN | GFP_ATOMIC);
3693 if (!rxb->skb) {
3694 if (net_ratelimit())
3695 printk(KERN_CRIT DRV_NAME
3696 ": Can not allocate SKB buffers\n");
3697 /* We don't reschedule replenish work here -- we will
3698 * call the restock method and if it still needs
3699 * more buffers it will schedule replenish */
3700 break;
3701 }
3702 priv->alloc_rxb_skb++;
3703 list_del(element);
3704
3705 /* Get physical address of RB/SKB */
3706 rxb->dma_addr =
3707 pci_map_single(priv->pci_dev, rxb->skb->data,
3708 priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE);
3709 list_add_tail(&rxb->list, &rxq->rx_free);
3710 rxq->free_count++;
3711 }
3712 spin_unlock_irqrestore(&rxq->lock, flags);
3713} 1925}
3714 1926
3715/* 1927/*
3716 * this should be called while priv->lock is locked 1928 * this should be called while priv->lock is locked
3717*/ 1929*/
3718static void __iwl4965_rx_replenish(void *data) 1930static void __iwl_rx_replenish(struct iwl_priv *priv)
3719{
3720 struct iwl_priv *priv = data;
3721
3722 iwl4965_rx_allocate(priv);
3723 iwl4965_rx_queue_restock(priv);
3724}
3725
3726
3727void iwl4965_rx_replenish(void *data)
3728{
3729 struct iwl_priv *priv = data;
3730 unsigned long flags;
3731
3732 iwl4965_rx_allocate(priv);
3733
3734 spin_lock_irqsave(&priv->lock, flags);
3735 iwl4965_rx_queue_restock(priv);
3736 spin_unlock_irqrestore(&priv->lock, flags);
3737}
3738
3739/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
3740 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
3741 * This free routine walks the list of POOL entries and if SKB is set to
3742 * non NULL it is unmapped and freed
3743 */
3744static void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
3745{
3746 int i;
3747 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
3748 if (rxq->pool[i].skb != NULL) {
3749 pci_unmap_single(priv->pci_dev,
3750 rxq->pool[i].dma_addr,
3751 priv->hw_params.rx_buf_size,
3752 PCI_DMA_FROMDEVICE);
3753 dev_kfree_skb(rxq->pool[i].skb);
3754 }
3755 }
3756
3757 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
3758 rxq->dma_addr);
3759 rxq->bd = NULL;
3760}
3761
3762int iwl4965_rx_queue_alloc(struct iwl_priv *priv)
3763{
3764 struct iwl4965_rx_queue *rxq = &priv->rxq;
3765 struct pci_dev *dev = priv->pci_dev;
3766 int i;
3767
3768 spin_lock_init(&rxq->lock);
3769 INIT_LIST_HEAD(&rxq->rx_free);
3770 INIT_LIST_HEAD(&rxq->rx_used);
3771
3772 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
3773 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
3774 if (!rxq->bd)
3775 return -ENOMEM;
3776
3777 /* Fill the rx_used queue with _all_ of the Rx buffers */
3778 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
3779 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3780
3781 /* Set us so that we have processed and used all buffers, but have
3782 * not restocked the Rx queue with fresh buffers */
3783 rxq->read = rxq->write = 0;
3784 rxq->free_count = 0;
3785 rxq->need_update = 0;
3786 return 0;
3787}
3788
3789void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
3790{ 1931{
3791 unsigned long flags; 1932 iwl_rx_allocate(priv);
3792 int i; 1933 iwl_rx_queue_restock(priv);
3793 spin_lock_irqsave(&rxq->lock, flags);
3794 INIT_LIST_HEAD(&rxq->rx_free);
3795 INIT_LIST_HEAD(&rxq->rx_used);
3796 /* Fill the rx_used queue with _all_ of the Rx buffers */
3797 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3798 /* In the reset function, these buffers may have been allocated
3799 * to an SKB, so we need to unmap and free potential storage */
3800 if (rxq->pool[i].skb != NULL) {
3801 pci_unmap_single(priv->pci_dev,
3802 rxq->pool[i].dma_addr,
3803 priv->hw_params.rx_buf_size,
3804 PCI_DMA_FROMDEVICE);
3805 priv->alloc_rxb_skb--;
3806 dev_kfree_skb(rxq->pool[i].skb);
3807 rxq->pool[i].skb = NULL;
3808 }
3809 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3810 }
3811
3812 /* Set us so that we have processed and used all buffers, but have
3813 * not restocked the Rx queue with fresh buffers */
3814 rxq->read = rxq->write = 0;
3815 rxq->free_count = 0;
3816 spin_unlock_irqrestore(&rxq->lock, flags);
3817} 1934}
3818 1935
3819/* Convert linear signal-to-noise ratio into dB */
3820static u8 ratio2dB[100] = {
3821/* 0 1 2 3 4 5 6 7 8 9 */
3822 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
3823 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
3824 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
3825 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
3826 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
3827 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
3828 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
3829 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
3830 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
3831 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
3832};
3833
3834/* Calculates a relative dB value from a ratio of linear
3835 * (i.e. not dB) signal levels.
3836 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
3837int iwl4965_calc_db_from_ratio(int sig_ratio)
3838{
3839 /* 1000:1 or higher just report as 60 dB */
3840 if (sig_ratio >= 1000)
3841 return 60;
3842
3843 /* 100:1 or higher, divide by 10 and use table,
3844 * add 20 dB to make up for divide by 10 */
3845 if (sig_ratio >= 100)
3846 return (20 + (int)ratio2dB[sig_ratio/10]);
3847
3848 /* We shouldn't see this */
3849 if (sig_ratio < 1)
3850 return 0;
3851
3852 /* Use table for ratios 1:1 - 99:1 */
3853 return (int)ratio2dB[sig_ratio];
3854}
3855
3856#define PERFECT_RSSI (-20) /* dBm */
3857#define WORST_RSSI (-95) /* dBm */
3858#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
3859
3860/* Calculate an indication of rx signal quality (a percentage, not dBm!).
3861 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
3862 * about formulas used below. */
3863int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm)
3864{
3865 int sig_qual;
3866 int degradation = PERFECT_RSSI - rssi_dbm;
3867
3868 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
3869 * as indicator; formula is (signal dbm - noise dbm).
3870 * SNR at or above 40 is a great signal (100%).
3871 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
3872 * Weakest usable signal is usually 10 - 15 dB SNR. */
3873 if (noise_dbm) {
3874 if (rssi_dbm - noise_dbm >= 40)
3875 return 100;
3876 else if (rssi_dbm < noise_dbm)
3877 return 0;
3878 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
3879
3880 /* Else use just the signal level.
3881 * This formula is a least squares fit of data points collected and
3882 * compared with a reference system that had a percentage (%) display
3883 * for signal quality. */
3884 } else
3885 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
3886 (15 * RSSI_RANGE + 62 * degradation)) /
3887 (RSSI_RANGE * RSSI_RANGE);
3888
3889 if (sig_qual > 100)
3890 sig_qual = 100;
3891 else if (sig_qual < 1)
3892 sig_qual = 0;
3893
3894 return sig_qual;
3895}
3896 1936
3897/** 1937/**
3898 * iwl4965_rx_handle - Main entry function for receiving responses from uCode 1938 * iwl_rx_handle - Main entry function for receiving responses from uCode
3899 * 1939 *
3900 * Uses the priv->rx_handlers callback function array to invoke 1940 * Uses the priv->rx_handlers callback function array to invoke
3901 * the appropriate handlers, including command responses, 1941 * the appropriate handlers, including command responses,
3902 * frame-received notifications, and other notifications. 1942 * frame-received notifications, and other notifications.
3903 */ 1943 */
3904static void iwl4965_rx_handle(struct iwl_priv *priv) 1944void iwl_rx_handle(struct iwl_priv *priv)
3905{ 1945{
3906 struct iwl4965_rx_mem_buffer *rxb; 1946 struct iwl_rx_mem_buffer *rxb;
3907 struct iwl4965_rx_packet *pkt; 1947 struct iwl_rx_packet *pkt;
3908 struct iwl4965_rx_queue *rxq = &priv->rxq; 1948 struct iwl_rx_queue *rxq = &priv->rxq;
3909 u32 r, i; 1949 u32 r, i;
3910 int reclaim; 1950 int reclaim;
3911 unsigned long flags; 1951 unsigned long flags;
@@ -3914,14 +1954,14 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
3914 1954
3915 /* uCode's read index (stored in shared DRAM) indicates the last Rx 1955 /* uCode's read index (stored in shared DRAM) indicates the last Rx
3916 * buffer that the driver may process (last buffer filled by ucode). */ 1956 * buffer that the driver may process (last buffer filled by ucode). */
3917 r = iwl4965_hw_get_rx_read(priv); 1957 r = priv->cfg->ops->lib->shared_mem_rx_idx(priv);
3918 i = rxq->read; 1958 i = rxq->read;
3919 1959
3920 /* Rx interrupt, but nothing sent from uCode */ 1960 /* Rx interrupt, but nothing sent from uCode */
3921 if (i == r) 1961 if (i == r)
3922 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i); 1962 IWL_DEBUG(IWL_DL_RX, "r = %d, i = %d\n", r, i);
3923 1963
3924 if (iwl4965_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2)) 1964 if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
3925 fill_rx = 1; 1965 fill_rx = 1;
3926 1966
3927 while (i != r) { 1967 while (i != r) {
@@ -3937,7 +1977,7 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
3937 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, 1977 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
3938 priv->hw_params.rx_buf_size, 1978 priv->hw_params.rx_buf_size,
3939 PCI_DMA_FROMDEVICE); 1979 PCI_DMA_FROMDEVICE);
3940 pkt = (struct iwl4965_rx_packet *)rxb->skb->data; 1980 pkt = (struct iwl_rx_packet *)rxb->skb->data;
3941 1981
3942 /* Reclaim a command buffer only if this packet is a response 1982 /* Reclaim a command buffer only if this packet is a response
3943 * to a (driver-originated) command. 1983 * to a (driver-originated) command.
@@ -3956,13 +1996,12 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
3956 * handle those that need handling via function in 1996 * handle those that need handling via function in
3957 * rx_handlers table. See iwl4965_setup_rx_handlers() */ 1997 * rx_handlers table. See iwl4965_setup_rx_handlers() */
3958 if (priv->rx_handlers[pkt->hdr.cmd]) { 1998 if (priv->rx_handlers[pkt->hdr.cmd]) {
3959 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, 1999 IWL_DEBUG(IWL_DL_RX, "r = %d, i = %d, %s, 0x%02x\n", r,
3960 "r = %d, i = %d, %s, 0x%02x\n", r, i, 2000 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3961 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3962 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); 2001 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
3963 } else { 2002 } else {
3964 /* No handling needed */ 2003 /* No handling needed */
3965 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, 2004 IWL_DEBUG(IWL_DL_RX,
3966 "r %d i %d No handler needed for %s, 0x%02x\n", 2005 "r %d i %d No handler needed for %s, 0x%02x\n",
3967 r, i, get_cmd_string(pkt->hdr.cmd), 2006 r, i, get_cmd_string(pkt->hdr.cmd),
3968 pkt->hdr.cmd); 2007 pkt->hdr.cmd);
@@ -3973,7 +2012,7 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
3973 * fire off the (possibly) blocking iwl_send_cmd() 2012 * fire off the (possibly) blocking iwl_send_cmd()
3974 * as we reclaim the driver command queue */ 2013 * as we reclaim the driver command queue */
3975 if (rxb && rxb->skb) 2014 if (rxb && rxb->skb)
3976 iwl4965_tx_cmd_complete(priv, rxb); 2015 iwl_tx_cmd_complete(priv, rxb);
3977 else 2016 else
3978 IWL_WARNING("Claim null rxb?\n"); 2017 IWL_WARNING("Claim null rxb?\n");
3979 } 2018 }
@@ -4000,7 +2039,7 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
4000 count++; 2039 count++;
4001 if (count >= 8) { 2040 if (count >= 8) {
4002 priv->rxq.read = i; 2041 priv->rxq.read = i;
4003 __iwl4965_rx_replenish(priv); 2042 __iwl_rx_replenish(priv);
4004 count = 0; 2043 count = 0;
4005 } 2044 }
4006 } 2045 }
@@ -4008,62 +2047,94 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
4008 2047
4009 /* Backtrack one entry */ 2048 /* Backtrack one entry */
4010 priv->rxq.read = i; 2049 priv->rxq.read = i;
4011 iwl4965_rx_queue_restock(priv); 2050 iwl_rx_queue_restock(priv);
4012} 2051}
2052/* Convert linear signal-to-noise ratio into dB */
2053static u8 ratio2dB[100] = {
2054/* 0 1 2 3 4 5 6 7 8 9 */
2055 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
2056 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
2057 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
2058 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
2059 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
2060 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
2061 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
2062 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
2063 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
2064 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
2065};
4013 2066
4014/** 2067/* Calculates a relative dB value from a ratio of linear
4015 * iwl4965_tx_queue_update_write_ptr - Send new write index to hardware 2068 * (i.e. not dB) signal levels.
4016 */ 2069 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
4017static int iwl4965_tx_queue_update_write_ptr(struct iwl_priv *priv, 2070int iwl4965_calc_db_from_ratio(int sig_ratio)
4018 struct iwl4965_tx_queue *txq)
4019{ 2071{
4020 u32 reg = 0; 2072 /* 1000:1 or higher just report as 60 dB */
4021 int rc = 0; 2073 if (sig_ratio >= 1000)
4022 int txq_id = txq->q.id; 2074 return 60;
4023 2075
4024 if (txq->need_update == 0) 2076 /* 100:1 or higher, divide by 10 and use table,
4025 return rc; 2077 * add 20 dB to make up for divide by 10 */
2078 if (sig_ratio >= 100)
2079 return (20 + (int)ratio2dB[sig_ratio/10]);
4026 2080
4027 /* if we're trying to save power */ 2081 /* We shouldn't see this */
4028 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 2082 if (sig_ratio < 1)
4029 /* wake up nic if it's powered down ... 2083 return 0;
4030 * uCode will wake up, and interrupt us again, so next
4031 * time we'll skip this part. */
4032 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
4033
4034 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4035 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
4036 iwl_set_bit(priv, CSR_GP_CNTRL,
4037 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4038 return rc;
4039 }
4040 2084
4041 /* restore this queue's parameters in nic hardware. */ 2085 /* Use table for ratios 1:1 - 99:1 */
4042 rc = iwl_grab_nic_access(priv); 2086 return (int)ratio2dB[sig_ratio];
4043 if (rc) 2087}
4044 return rc;
4045 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
4046 txq->q.write_ptr | (txq_id << 8));
4047 iwl_release_nic_access(priv);
4048 2088
4049 /* else not in power-save mode, uCode will never sleep when we're 2089#define PERFECT_RSSI (-20) /* dBm */
4050 * trying to tx (during RFKILL, we're not trying to tx). */ 2090#define WORST_RSSI (-95) /* dBm */
2091#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
2092
2093/* Calculate an indication of rx signal quality (a percentage, not dBm!).
2094 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
2095 * about formulas used below. */
2096int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm)
2097{
2098 int sig_qual;
2099 int degradation = PERFECT_RSSI - rssi_dbm;
2100
2101 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
2102 * as indicator; formula is (signal dbm - noise dbm).
2103 * SNR at or above 40 is a great signal (100%).
2104 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
2105 * Weakest usable signal is usually 10 - 15 dB SNR. */
2106 if (noise_dbm) {
2107 if (rssi_dbm - noise_dbm >= 40)
2108 return 100;
2109 else if (rssi_dbm < noise_dbm)
2110 return 0;
2111 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
2112
2113 /* Else use just the signal level.
2114 * This formula is a least squares fit of data points collected and
2115 * compared with a reference system that had a percentage (%) display
2116 * for signal quality. */
4051 } else 2117 } else
4052 iwl_write32(priv, HBUS_TARG_WRPTR, 2118 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
4053 txq->q.write_ptr | (txq_id << 8)); 2119 (15 * RSSI_RANGE + 62 * degradation)) /
2120 (RSSI_RANGE * RSSI_RANGE);
4054 2121
4055 txq->need_update = 0; 2122 if (sig_qual > 100)
2123 sig_qual = 100;
2124 else if (sig_qual < 1)
2125 sig_qual = 0;
4056 2126
4057 return rc; 2127 return sig_qual;
4058} 2128}
4059 2129
4060#ifdef CONFIG_IWLWIFI_DEBUG 2130#ifdef CONFIG_IWLWIFI_DEBUG
4061static void iwl4965_print_rx_config_cmd(struct iwl4965_rxon_cmd *rxon) 2131static void iwl4965_print_rx_config_cmd(struct iwl_priv *priv)
4062{ 2132{
2133 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
4063 DECLARE_MAC_BUF(mac); 2134 DECLARE_MAC_BUF(mac);
4064 2135
4065 IWL_DEBUG_RADIO("RX CONFIG:\n"); 2136 IWL_DEBUG_RADIO("RX CONFIG:\n");
4066 iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); 2137 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4067 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); 2138 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4068 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); 2139 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4069 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n", 2140 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
@@ -4109,173 +2180,6 @@ static inline void iwl4965_disable_interrupts(struct iwl_priv *priv)
4109 IWL_DEBUG_ISR("Disabled interrupts\n"); 2180 IWL_DEBUG_ISR("Disabled interrupts\n");
4110} 2181}
4111 2182
4112static const char *desc_lookup(int i)
4113{
4114 switch (i) {
4115 case 1:
4116 return "FAIL";
4117 case 2:
4118 return "BAD_PARAM";
4119 case 3:
4120 return "BAD_CHECKSUM";
4121 case 4:
4122 return "NMI_INTERRUPT";
4123 case 5:
4124 return "SYSASSERT";
4125 case 6:
4126 return "FATAL_ERROR";
4127 }
4128
4129 return "UNKNOWN";
4130}
4131
4132#define ERROR_START_OFFSET (1 * sizeof(u32))
4133#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4134
4135static void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
4136{
4137 u32 data2, line;
4138 u32 desc, time, count, base, data1;
4139 u32 blink1, blink2, ilink1, ilink2;
4140 int rc;
4141
4142 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4143
4144 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
4145 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4146 return;
4147 }
4148
4149 rc = iwl_grab_nic_access(priv);
4150 if (rc) {
4151 IWL_WARNING("Can not read from adapter at this time.\n");
4152 return;
4153 }
4154
4155 count = iwl_read_targ_mem(priv, base);
4156
4157 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4158 IWL_ERROR("Start IWL Error Log Dump:\n");
4159 IWL_ERROR("Status: 0x%08lX, count: %d\n", priv->status, count);
4160 }
4161
4162 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
4163 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
4164 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
4165 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
4166 ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
4167 data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
4168 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
4169 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
4170 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
4171
4172 IWL_ERROR("Desc Time "
4173 "data1 data2 line\n");
4174 IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
4175 desc_lookup(desc), desc, time, data1, data2, line);
4176 IWL_ERROR("blink1 blink2 ilink1 ilink2\n");
4177 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
4178 ilink1, ilink2);
4179
4180 iwl_release_nic_access(priv);
4181}
4182
4183#define EVENT_START_OFFSET (4 * sizeof(u32))
4184
4185/**
4186 * iwl4965_print_event_log - Dump error event log to syslog
4187 *
4188 * NOTE: Must be called with iwl_grab_nic_access() already obtained!
4189 */
4190static void iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
4191 u32 num_events, u32 mode)
4192{
4193 u32 i;
4194 u32 base; /* SRAM byte address of event log header */
4195 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4196 u32 ptr; /* SRAM byte address of log data */
4197 u32 ev, time, data; /* event log data */
4198
4199 if (num_events == 0)
4200 return;
4201
4202 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4203
4204 if (mode == 0)
4205 event_size = 2 * sizeof(u32);
4206 else
4207 event_size = 3 * sizeof(u32);
4208
4209 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4210
4211 /* "time" is actually "data" for mode 0 (no timestamp).
4212 * place event id # at far right for easier visual parsing. */
4213 for (i = 0; i < num_events; i++) {
4214 ev = iwl_read_targ_mem(priv, ptr);
4215 ptr += sizeof(u32);
4216 time = iwl_read_targ_mem(priv, ptr);
4217 ptr += sizeof(u32);
4218 if (mode == 0)
4219 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4220 else {
4221 data = iwl_read_targ_mem(priv, ptr);
4222 ptr += sizeof(u32);
4223 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4224 }
4225 }
4226}
4227
4228static void iwl4965_dump_nic_event_log(struct iwl_priv *priv)
4229{
4230 int rc;
4231 u32 base; /* SRAM byte address of event log header */
4232 u32 capacity; /* event log capacity in # entries */
4233 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
4234 u32 num_wraps; /* # times uCode wrapped to top of log */
4235 u32 next_entry; /* index of next entry to be written by uCode */
4236 u32 size; /* # entries that we'll print */
4237
4238 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4239 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
4240 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4241 return;
4242 }
4243
4244 rc = iwl_grab_nic_access(priv);
4245 if (rc) {
4246 IWL_WARNING("Can not read from adapter at this time.\n");
4247 return;
4248 }
4249
4250 /* event log header */
4251 capacity = iwl_read_targ_mem(priv, base);
4252 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
4253 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
4254 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
4255
4256 size = num_wraps ? capacity : next_entry;
4257
4258 /* bail out if nothing in log */
4259 if (size == 0) {
4260 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
4261 iwl_release_nic_access(priv);
4262 return;
4263 }
4264
4265 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
4266 size, num_wraps);
4267
4268 /* if uCode has wrapped back to top of log, start at the oldest entry,
4269 * i.e the next one that uCode would fill. */
4270 if (num_wraps)
4271 iwl4965_print_event_log(priv, next_entry,
4272 capacity - next_entry, mode);
4273
4274 /* (then/else) start at top of log */
4275 iwl4965_print_event_log(priv, 0, next_entry, mode);
4276
4277 iwl_release_nic_access(priv);
4278}
4279 2183
4280/** 2184/**
4281 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card 2185 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card
@@ -4289,10 +2193,10 @@ static void iwl4965_irq_handle_error(struct iwl_priv *priv)
4289 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 2193 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4290 2194
4291#ifdef CONFIG_IWLWIFI_DEBUG 2195#ifdef CONFIG_IWLWIFI_DEBUG
4292 if (iwl_debug_level & IWL_DL_FW_ERRORS) { 2196 if (priv->debug_level & IWL_DL_FW_ERRORS) {
4293 iwl4965_dump_nic_error_log(priv); 2197 iwl_dump_nic_error_log(priv);
4294 iwl4965_dump_nic_event_log(priv); 2198 iwl_dump_nic_event_log(priv);
4295 iwl4965_print_rx_config_cmd(&priv->staging_rxon); 2199 iwl4965_print_rx_config_cmd(priv);
4296 } 2200 }
4297#endif 2201#endif
4298 2202
@@ -4303,7 +2207,7 @@ static void iwl4965_irq_handle_error(struct iwl_priv *priv)
4303 clear_bit(STATUS_READY, &priv->status); 2207 clear_bit(STATUS_READY, &priv->status);
4304 2208
4305 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { 2209 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
4306 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS, 2210 IWL_DEBUG(IWL_DL_FW_ERRORS,
4307 "Restarting adapter due to uCode error.\n"); 2211 "Restarting adapter due to uCode error.\n");
4308 2212
4309 if (iwl_is_associated(priv)) { 2213 if (iwl_is_associated(priv)) {
@@ -4311,7 +2215,8 @@ static void iwl4965_irq_handle_error(struct iwl_priv *priv)
4311 sizeof(priv->recovery_rxon)); 2215 sizeof(priv->recovery_rxon));
4312 priv->error_recovering = 1; 2216 priv->error_recovering = 1;
4313 } 2217 }
4314 queue_work(priv->workqueue, &priv->restart); 2218 if (priv->cfg->mod_params->restart_fw)
2219 queue_work(priv->workqueue, &priv->restart);
4315 } 2220 }
4316} 2221}
4317 2222
@@ -4324,7 +2229,7 @@ static void iwl4965_error_recovery(struct iwl_priv *priv)
4324 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2229 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
4325 iwl4965_commit_rxon(priv); 2230 iwl4965_commit_rxon(priv);
4326 2231
4327 iwl4965_rxon_add_station(priv, priv->bssid, 1); 2232 iwl_rxon_add_station(priv, priv->bssid, 1);
4328 2233
4329 spin_lock_irqsave(&priv->lock, flags); 2234 spin_lock_irqsave(&priv->lock, flags);
4330 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id); 2235 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
@@ -4356,7 +2261,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4356 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); 2261 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
4357 2262
4358#ifdef CONFIG_IWLWIFI_DEBUG 2263#ifdef CONFIG_IWLWIFI_DEBUG
4359 if (iwl_debug_level & IWL_DL_ISR) { 2264 if (priv->debug_level & IWL_DL_ISR) {
4360 /* just for debug */ 2265 /* just for debug */
4361 inta_mask = iwl_read32(priv, CSR_INT_MASK); 2266 inta_mask = iwl_read32(priv, CSR_INT_MASK);
4362 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", 2267 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
@@ -4390,7 +2295,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4390 } 2295 }
4391 2296
4392#ifdef CONFIG_IWLWIFI_DEBUG 2297#ifdef CONFIG_IWLWIFI_DEBUG
4393 if (iwl_debug_level & (IWL_DL_ISR)) { 2298 if (priv->debug_level & (IWL_DL_ISR)) {
4394 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 2299 /* NIC fires this, but we don't use it, redundant with WAKEUP */
4395 if (inta & CSR_INT_BIT_SCD) 2300 if (inta & CSR_INT_BIT_SCD)
4396 IWL_DEBUG_ISR("Scheduler finished to transmit " 2301 IWL_DEBUG_ISR("Scheduler finished to transmit "
@@ -4411,8 +2316,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4411 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) 2316 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4412 hw_rf_kill = 1; 2317 hw_rf_kill = 1;
4413 2318
4414 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR, 2319 IWL_DEBUG(IWL_DL_RF_KILL, "RF_KILL bit toggled to %s.\n",
4415 "RF_KILL bit toggled to %s.\n",
4416 hw_rf_kill ? "disable radio":"enable radio"); 2320 hw_rf_kill ? "disable radio":"enable radio");
4417 2321
4418 /* Queue restart only if RF_KILL switch was set to "kill" 2322 /* Queue restart only if RF_KILL switch was set to "kill"
@@ -4444,13 +2348,13 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4444 /* uCode wakes up after power-down sleep */ 2348 /* uCode wakes up after power-down sleep */
4445 if (inta & CSR_INT_BIT_WAKEUP) { 2349 if (inta & CSR_INT_BIT_WAKEUP) {
4446 IWL_DEBUG_ISR("Wakeup interrupt\n"); 2350 IWL_DEBUG_ISR("Wakeup interrupt\n");
4447 iwl4965_rx_queue_update_write_ptr(priv, &priv->rxq); 2351 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
4448 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[0]); 2352 iwl_txq_update_write_ptr(priv, &priv->txq[0]);
4449 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[1]); 2353 iwl_txq_update_write_ptr(priv, &priv->txq[1]);
4450 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[2]); 2354 iwl_txq_update_write_ptr(priv, &priv->txq[2]);
4451 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[3]); 2355 iwl_txq_update_write_ptr(priv, &priv->txq[3]);
4452 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[4]); 2356 iwl_txq_update_write_ptr(priv, &priv->txq[4]);
4453 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[5]); 2357 iwl_txq_update_write_ptr(priv, &priv->txq[5]);
4454 2358
4455 handled |= CSR_INT_BIT_WAKEUP; 2359 handled |= CSR_INT_BIT_WAKEUP;
4456 } 2360 }
@@ -4459,13 +2363,16 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4459 * Rx "responses" (frame-received notification), and other 2363 * Rx "responses" (frame-received notification), and other
4460 * notifications from uCode come through here*/ 2364 * notifications from uCode come through here*/
4461 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 2365 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4462 iwl4965_rx_handle(priv); 2366 iwl_rx_handle(priv);
4463 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 2367 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4464 } 2368 }
4465 2369
4466 if (inta & CSR_INT_BIT_FH_TX) { 2370 if (inta & CSR_INT_BIT_FH_TX) {
4467 IWL_DEBUG_ISR("Tx interrupt\n"); 2371 IWL_DEBUG_ISR("Tx interrupt\n");
4468 handled |= CSR_INT_BIT_FH_TX; 2372 handled |= CSR_INT_BIT_FH_TX;
2373 /* FH finished to write, send event */
2374 priv->ucode_write_complete = 1;
2375 wake_up_interruptible(&priv->wait_command_queue);
4469 } 2376 }
4470 2377
4471 if (inta & ~handled) 2378 if (inta & ~handled)
@@ -4483,7 +2390,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4483 iwl4965_enable_interrupts(priv); 2390 iwl4965_enable_interrupts(priv);
4484 2391
4485#ifdef CONFIG_IWLWIFI_DEBUG 2392#ifdef CONFIG_IWLWIFI_DEBUG
4486 if (iwl_debug_level & (IWL_DL_ISR)) { 2393 if (priv->debug_level & (IWL_DL_ISR)) {
4487 inta = iwl_read32(priv, CSR_INT); 2394 inta = iwl_read32(priv, CSR_INT);
4488 inta_mask = iwl_read32(priv, CSR_INT_MASK); 2395 inta_mask = iwl_read32(priv, CSR_INT_MASK);
4489 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 2396 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
@@ -4620,7 +2527,7 @@ static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
4620 u16 active_dwell = 0; 2527 u16 active_dwell = 0;
4621 int added, i; 2528 int added, i;
4622 2529
4623 sband = iwl4965_get_hw_mode(priv, band); 2530 sband = iwl_get_hw_mode(priv, band);
4624 if (!sband) 2531 if (!sband)
4625 return 0; 2532 return 0;
4626 2533
@@ -4652,9 +2559,6 @@ static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
4652 if (scan_ch->type & 1) 2559 if (scan_ch->type & 1)
4653 scan_ch->type |= (direct_mask << 1); 2560 scan_ch->type |= (direct_mask << 1);
4654 2561
4655 if (is_channel_narrow(ch_info))
4656 scan_ch->type |= (1 << 7);
4657
4658 scan_ch->active_dwell = cpu_to_le16(active_dwell); 2562 scan_ch->active_dwell = cpu_to_le16(active_dwell);
4659 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 2563 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
4660 2564
@@ -4687,163 +2591,6 @@ static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
4687 return added; 2591 return added;
4688} 2592}
4689 2593
4690static void iwl4965_init_hw_rates(struct iwl_priv *priv,
4691 struct ieee80211_rate *rates)
4692{
4693 int i;
4694
4695 for (i = 0; i < IWL_RATE_COUNT; i++) {
4696 rates[i].bitrate = iwl4965_rates[i].ieee * 5;
4697 rates[i].hw_value = i; /* Rate scaling will work on indexes */
4698 rates[i].hw_value_short = i;
4699 rates[i].flags = 0;
4700 if ((i > IWL_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
4701 /*
4702 * If CCK != 1M then set short preamble rate flag.
4703 */
4704 rates[i].flags |=
4705 (iwl4965_rates[i].plcp == IWL_RATE_1M_PLCP) ?
4706 0 : IEEE80211_RATE_SHORT_PREAMBLE;
4707 }
4708 }
4709}
4710
4711/**
4712 * iwl4965_init_geos - Initialize mac80211's geo/channel info based from eeprom
4713 */
4714int iwl4965_init_geos(struct iwl_priv *priv)
4715{
4716 struct iwl_channel_info *ch;
4717 struct ieee80211_supported_band *sband;
4718 struct ieee80211_channel *channels;
4719 struct ieee80211_channel *geo_ch;
4720 struct ieee80211_rate *rates;
4721 int i = 0;
4722
4723 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
4724 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
4725 IWL_DEBUG_INFO("Geography modes already initialized.\n");
4726 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4727 return 0;
4728 }
4729
4730 channels = kzalloc(sizeof(struct ieee80211_channel) *
4731 priv->channel_count, GFP_KERNEL);
4732 if (!channels)
4733 return -ENOMEM;
4734
4735 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
4736 GFP_KERNEL);
4737 if (!rates) {
4738 kfree(channels);
4739 return -ENOMEM;
4740 }
4741
4742 /* 5.2GHz channels start after the 2.4GHz channels */
4743 sband = &priv->bands[IEEE80211_BAND_5GHZ];
4744 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
4745 /* just OFDM */
4746 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
4747 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
4748
4749 iwl4965_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_5GHZ);
4750
4751 sband = &priv->bands[IEEE80211_BAND_2GHZ];
4752 sband->channels = channels;
4753 /* OFDM & CCK */
4754 sband->bitrates = rates;
4755 sband->n_bitrates = IWL_RATE_COUNT;
4756
4757 iwl4965_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_2GHZ);
4758
4759 priv->ieee_channels = channels;
4760 priv->ieee_rates = rates;
4761
4762 iwl4965_init_hw_rates(priv, rates);
4763
4764 for (i = 0; i < priv->channel_count; i++) {
4765 ch = &priv->channel_info[i];
4766
4767 /* FIXME: might be removed if scan is OK */
4768 if (!is_channel_valid(ch))
4769 continue;
4770
4771 if (is_channel_a_band(ch))
4772 sband = &priv->bands[IEEE80211_BAND_5GHZ];
4773 else
4774 sband = &priv->bands[IEEE80211_BAND_2GHZ];
4775
4776 geo_ch = &sband->channels[sband->n_channels++];
4777
4778 geo_ch->center_freq = ieee80211_channel_to_frequency(ch->channel);
4779 geo_ch->max_power = ch->max_power_avg;
4780 geo_ch->max_antenna_gain = 0xff;
4781 geo_ch->hw_value = ch->channel;
4782
4783 if (is_channel_valid(ch)) {
4784 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
4785 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
4786
4787 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
4788 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
4789
4790 if (ch->flags & EEPROM_CHANNEL_RADAR)
4791 geo_ch->flags |= IEEE80211_CHAN_RADAR;
4792
4793 if (ch->max_power_avg > priv->max_channel_txpower_limit)
4794 priv->max_channel_txpower_limit =
4795 ch->max_power_avg;
4796 } else {
4797 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
4798 }
4799
4800 /* Save flags for reg domain usage */
4801 geo_ch->orig_flags = geo_ch->flags;
4802
4803 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n",
4804 ch->channel, geo_ch->center_freq,
4805 is_channel_a_band(ch) ? "5.2" : "2.4",
4806 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
4807 "restricted" : "valid",
4808 geo_ch->flags);
4809 }
4810
4811 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
4812 priv->cfg->sku & IWL_SKU_A) {
4813 printk(KERN_INFO DRV_NAME
4814 ": Incorrectly detected BG card as ABG. Please send "
4815 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
4816 priv->pci_dev->device, priv->pci_dev->subsystem_device);
4817 priv->cfg->sku &= ~IWL_SKU_A;
4818 }
4819
4820 printk(KERN_INFO DRV_NAME
4821 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
4822 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
4823 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
4824
4825 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
4826 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
4827 &priv->bands[IEEE80211_BAND_2GHZ];
4828 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
4829 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
4830 &priv->bands[IEEE80211_BAND_5GHZ];
4831
4832 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4833
4834 return 0;
4835}
4836
4837/*
4838 * iwl4965_free_geos - undo allocations in iwl4965_init_geos
4839 */
4840void iwl4965_free_geos(struct iwl_priv *priv)
4841{
4842 kfree(priv->ieee_channels);
4843 kfree(priv->ieee_rates);
4844 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
4845}
4846
4847/****************************************************************************** 2594/******************************************************************************
4848 * 2595 *
4849 * uCode download functions 2596 * uCode download functions
@@ -4860,146 +2607,6 @@ static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
4860 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); 2607 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
4861} 2608}
4862 2609
4863/**
4864 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
4865 * looking at all data.
4866 */
4867static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
4868 u32 len)
4869{
4870 u32 val;
4871 u32 save_len = len;
4872 int rc = 0;
4873 u32 errcnt;
4874
4875 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4876
4877 rc = iwl_grab_nic_access(priv);
4878 if (rc)
4879 return rc;
4880
4881 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
4882
4883 errcnt = 0;
4884 for (; len > 0; len -= sizeof(u32), image++) {
4885 /* read data comes through single port, auto-incr addr */
4886 /* NOTE: Use the debugless read so we don't flood kernel log
4887 * if IWL_DL_IO is set */
4888 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
4889 if (val != le32_to_cpu(*image)) {
4890 IWL_ERROR("uCode INST section is invalid at "
4891 "offset 0x%x, is 0x%x, s/b 0x%x\n",
4892 save_len - len, val, le32_to_cpu(*image));
4893 rc = -EIO;
4894 errcnt++;
4895 if (errcnt >= 20)
4896 break;
4897 }
4898 }
4899
4900 iwl_release_nic_access(priv);
4901
4902 if (!errcnt)
4903 IWL_DEBUG_INFO
4904 ("ucode image in INSTRUCTION memory is good\n");
4905
4906 return rc;
4907}
4908
4909
4910/**
4911 * iwl4965_verify_inst_sparse - verify runtime uCode image in card vs. host,
4912 * using sample data 100 bytes apart. If these sample points are good,
4913 * it's a pretty good bet that everything between them is good, too.
4914 */
4915static int iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
4916{
4917 u32 val;
4918 int rc = 0;
4919 u32 errcnt = 0;
4920 u32 i;
4921
4922 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4923
4924 rc = iwl_grab_nic_access(priv);
4925 if (rc)
4926 return rc;
4927
4928 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
4929 /* read data comes through single port, auto-incr addr */
4930 /* NOTE: Use the debugless read so we don't flood kernel log
4931 * if IWL_DL_IO is set */
4932 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
4933 i + RTC_INST_LOWER_BOUND);
4934 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
4935 if (val != le32_to_cpu(*image)) {
4936#if 0 /* Enable this if you want to see details */
4937 IWL_ERROR("uCode INST section is invalid at "
4938 "offset 0x%x, is 0x%x, s/b 0x%x\n",
4939 i, val, *image);
4940#endif
4941 rc = -EIO;
4942 errcnt++;
4943 if (errcnt >= 3)
4944 break;
4945 }
4946 }
4947
4948 iwl_release_nic_access(priv);
4949
4950 return rc;
4951}
4952
4953
4954/**
4955 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
4956 * and verify its contents
4957 */
4958static int iwl4965_verify_ucode(struct iwl_priv *priv)
4959{
4960 __le32 *image;
4961 u32 len;
4962 int rc = 0;
4963
4964 /* Try bootstrap */
4965 image = (__le32 *)priv->ucode_boot.v_addr;
4966 len = priv->ucode_boot.len;
4967 rc = iwl4965_verify_inst_sparse(priv, image, len);
4968 if (rc == 0) {
4969 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
4970 return 0;
4971 }
4972
4973 /* Try initialize */
4974 image = (__le32 *)priv->ucode_init.v_addr;
4975 len = priv->ucode_init.len;
4976 rc = iwl4965_verify_inst_sparse(priv, image, len);
4977 if (rc == 0) {
4978 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
4979 return 0;
4980 }
4981
4982 /* Try runtime/protocol */
4983 image = (__le32 *)priv->ucode_code.v_addr;
4984 len = priv->ucode_code.len;
4985 rc = iwl4965_verify_inst_sparse(priv, image, len);
4986 if (rc == 0) {
4987 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
4988 return 0;
4989 }
4990
4991 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
4992
4993 /* Since nothing seems to match, show first several data entries in
4994 * instruction SRAM, so maybe visual inspection will give a clue.
4995 * Selection of bootstrap image (vs. other images) is arbitrary. */
4996 image = (__le32 *)priv->ucode_boot.v_addr;
4997 len = priv->ucode_boot.len;
4998 rc = iwl4965_verify_inst_full(priv, image, len);
4999
5000 return rc;
5001}
5002
5003static void iwl4965_nic_start(struct iwl_priv *priv) 2610static void iwl4965_nic_start(struct iwl_priv *priv)
5004{ 2611{
5005 /* Remove all resets to allow NIC to operate */ 2612 /* Remove all resets to allow NIC to operate */
@@ -5075,34 +2682,34 @@ static int iwl4965_read_ucode(struct iwl_priv *priv)
5075 } 2682 }
5076 2683
5077 /* Verify that uCode images will fit in card's SRAM */ 2684 /* Verify that uCode images will fit in card's SRAM */
5078 if (inst_size > IWL_MAX_INST_SIZE) { 2685 if (inst_size > priv->hw_params.max_inst_size) {
5079 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n", 2686 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
5080 inst_size); 2687 inst_size);
5081 ret = -EINVAL; 2688 ret = -EINVAL;
5082 goto err_release; 2689 goto err_release;
5083 } 2690 }
5084 2691
5085 if (data_size > IWL_MAX_DATA_SIZE) { 2692 if (data_size > priv->hw_params.max_data_size) {
5086 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n", 2693 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
5087 data_size); 2694 data_size);
5088 ret = -EINVAL; 2695 ret = -EINVAL;
5089 goto err_release; 2696 goto err_release;
5090 } 2697 }
5091 if (init_size > IWL_MAX_INST_SIZE) { 2698 if (init_size > priv->hw_params.max_inst_size) {
5092 IWL_DEBUG_INFO 2699 IWL_DEBUG_INFO
5093 ("uCode init instr len %d too large to fit in\n", 2700 ("uCode init instr len %d too large to fit in\n",
5094 init_size); 2701 init_size);
5095 ret = -EINVAL; 2702 ret = -EINVAL;
5096 goto err_release; 2703 goto err_release;
5097 } 2704 }
5098 if (init_data_size > IWL_MAX_DATA_SIZE) { 2705 if (init_data_size > priv->hw_params.max_data_size) {
5099 IWL_DEBUG_INFO 2706 IWL_DEBUG_INFO
5100 ("uCode init data len %d too large to fit in\n", 2707 ("uCode init data len %d too large to fit in\n",
5101 init_data_size); 2708 init_data_size);
5102 ret = -EINVAL; 2709 ret = -EINVAL;
5103 goto err_release; 2710 goto err_release;
5104 } 2711 }
5105 if (boot_size > IWL_MAX_BSM_SIZE) { 2712 if (boot_size > priv->hw_params.max_bsm_size) {
5106 IWL_DEBUG_INFO 2713 IWL_DEBUG_INFO
5107 ("uCode boot instr len %d too large to fit in\n", 2714 ("uCode boot instr len %d too large to fit in\n",
5108 boot_size); 2715 boot_size);
@@ -5203,111 +2810,12 @@ static int iwl4965_read_ucode(struct iwl_priv *priv)
5203 return ret; 2810 return ret;
5204} 2811}
5205 2812
5206
5207/** 2813/**
5208 * iwl4965_set_ucode_ptrs - Set uCode address location 2814 * iwl_alive_start - called after REPLY_ALIVE notification received
5209 *
5210 * Tell initialization uCode where to find runtime uCode.
5211 *
5212 * BSM registers initially contain pointers to initialization uCode.
5213 * We need to replace them to load runtime uCode inst and data,
5214 * and to save runtime data when powering down.
5215 */
5216static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
5217{
5218 dma_addr_t pinst;
5219 dma_addr_t pdata;
5220 int rc = 0;
5221 unsigned long flags;
5222
5223 /* bits 35:4 for 4965 */
5224 pinst = priv->ucode_code.p_addr >> 4;
5225 pdata = priv->ucode_data_backup.p_addr >> 4;
5226
5227 spin_lock_irqsave(&priv->lock, flags);
5228 rc = iwl_grab_nic_access(priv);
5229 if (rc) {
5230 spin_unlock_irqrestore(&priv->lock, flags);
5231 return rc;
5232 }
5233
5234 /* Tell bootstrap uCode where to find image to load */
5235 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
5236 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
5237 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
5238 priv->ucode_data.len);
5239
5240 /* Inst bytecount must be last to set up, bit 31 signals uCode
5241 * that all new ptr/size info is in place */
5242 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
5243 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
5244
5245 iwl_release_nic_access(priv);
5246
5247 spin_unlock_irqrestore(&priv->lock, flags);
5248
5249 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
5250
5251 return rc;
5252}
5253
5254/**
5255 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
5256 *
5257 * Called after REPLY_ALIVE notification received from "initialize" uCode.
5258 *
5259 * The 4965 "initialize" ALIVE reply contains calibration data for:
5260 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
5261 * (3945 does not contain this data).
5262 *
5263 * Tell "initialize" uCode to go ahead and load the runtime uCode.
5264*/
5265static void iwl4965_init_alive_start(struct iwl_priv *priv)
5266{
5267 /* Check alive response for "valid" sign from uCode */
5268 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
5269 /* We had an error bringing up the hardware, so take it
5270 * all the way back down so we can try again */
5271 IWL_DEBUG_INFO("Initialize Alive failed.\n");
5272 goto restart;
5273 }
5274
5275 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
5276 * This is a paranoid check, because we would not have gotten the
5277 * "initialize" alive if code weren't properly loaded. */
5278 if (iwl4965_verify_ucode(priv)) {
5279 /* Runtime instruction load was bad;
5280 * take it all the way back down so we can try again */
5281 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
5282 goto restart;
5283 }
5284
5285 /* Calculate temperature */
5286 priv->temperature = iwl4965_get_temperature(priv);
5287
5288 /* Send pointers to protocol/runtime uCode image ... init code will
5289 * load and launch runtime uCode, which will send us another "Alive"
5290 * notification. */
5291 IWL_DEBUG_INFO("Initialization Alive received.\n");
5292 if (iwl4965_set_ucode_ptrs(priv)) {
5293 /* Runtime instruction load won't happen;
5294 * take it all the way back down so we can try again */
5295 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
5296 goto restart;
5297 }
5298 return;
5299
5300 restart:
5301 queue_work(priv->workqueue, &priv->restart);
5302}
5303
5304
5305/**
5306 * iwl4965_alive_start - called after REPLY_ALIVE notification received
5307 * from protocol/runtime uCode (initialization uCode's 2815 * from protocol/runtime uCode (initialization uCode's
5308 * Alive gets handled by iwl4965_init_alive_start()). 2816 * Alive gets handled by iwl_init_alive_start()).
5309 */ 2817 */
5310static void iwl4965_alive_start(struct iwl_priv *priv) 2818static void iwl_alive_start(struct iwl_priv *priv)
5311{ 2819{
5312 int ret = 0; 2820 int ret = 0;
5313 2821
@@ -5323,7 +2831,7 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
5323 /* Initialize uCode has loaded Runtime uCode ... verify inst image. 2831 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
5324 * This is a paranoid check, because we would not have gotten the 2832 * This is a paranoid check, because we would not have gotten the
5325 * "runtime" alive if code weren't properly loaded. */ 2833 * "runtime" alive if code weren't properly loaded. */
5326 if (iwl4965_verify_ucode(priv)) { 2834 if (iwl_verify_ucode(priv)) {
5327 /* Runtime instruction load was bad; 2835 /* Runtime instruction load was bad;
5328 * take it all the way back down so we can try again */ 2836 * take it all the way back down so we can try again */
5329 IWL_DEBUG_INFO("Bad runtime uCode load.\n"); 2837 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
@@ -5331,7 +2839,6 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
5331 } 2839 }
5332 2840
5333 iwlcore_clear_stations_table(priv); 2841 iwlcore_clear_stations_table(priv);
5334
5335 ret = priv->cfg->ops->lib->alive_notify(priv); 2842 ret = priv->cfg->ops->lib->alive_notify(priv);
5336 if (ret) { 2843 if (ret) {
5337 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n", 2844 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n",
@@ -5348,16 +2855,14 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
5348 if (iwl_is_rfkill(priv)) 2855 if (iwl_is_rfkill(priv))
5349 return; 2856 return;
5350 2857
5351 ieee80211_start_queues(priv->hw); 2858 ieee80211_wake_queues(priv->hw);
5352 2859
5353 priv->active_rate = priv->rates_mask; 2860 priv->active_rate = priv->rates_mask;
5354 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; 2861 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
5355 2862
5356 iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
5357
5358 if (iwl_is_associated(priv)) { 2863 if (iwl_is_associated(priv)) {
5359 struct iwl4965_rxon_cmd *active_rxon = 2864 struct iwl_rxon_cmd *active_rxon =
5360 (struct iwl4965_rxon_cmd *)(&priv->active_rxon); 2865 (struct iwl_rxon_cmd *)&priv->active_rxon;
5361 2866
5362 memcpy(&priv->staging_rxon, &priv->active_rxon, 2867 memcpy(&priv->staging_rxon, &priv->active_rxon,
5363 sizeof(priv->staging_rxon)); 2868 sizeof(priv->staging_rxon));
@@ -5371,12 +2876,12 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
5371 /* Configure Bluetooth device coexistence support */ 2876 /* Configure Bluetooth device coexistence support */
5372 iwl4965_send_bt_config(priv); 2877 iwl4965_send_bt_config(priv);
5373 2878
2879 iwl_reset_run_time_calib(priv);
2880
5374 /* Configure the adapter for unassociated operation */ 2881 /* Configure the adapter for unassociated operation */
5375 iwl4965_commit_rxon(priv); 2882 iwl4965_commit_rxon(priv);
5376 2883
5377 /* At this point, the NIC is initialized and operational */ 2884 /* At this point, the NIC is initialized and operational */
5378 priv->notif_missed_beacons = 0;
5379
5380 iwl4965_rf_kill_ct_config(priv); 2885 iwl4965_rf_kill_ct_config(priv);
5381 2886
5382 iwl_leds_register(priv); 2887 iwl_leds_register(priv);
@@ -5402,12 +2907,9 @@ static void __iwl4965_down(struct iwl_priv *priv)
5402{ 2907{
5403 unsigned long flags; 2908 unsigned long flags;
5404 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); 2909 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
5405 struct ieee80211_conf *conf = NULL;
5406 2910
5407 IWL_DEBUG_INFO(DRV_NAME " is going down\n"); 2911 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
5408 2912
5409 conf = ieee80211_get_hw_conf(priv->hw);
5410
5411 if (!exit_pending) 2913 if (!exit_pending)
5412 set_bit(STATUS_EXIT_PENDING, &priv->status); 2914 set_bit(STATUS_EXIT_PENDING, &priv->status);
5413 2915
@@ -5469,8 +2971,8 @@ static void __iwl4965_down(struct iwl_priv *priv)
5469 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2971 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5470 spin_unlock_irqrestore(&priv->lock, flags); 2972 spin_unlock_irqrestore(&priv->lock, flags);
5471 2973
5472 iwl4965_hw_txq_ctx_stop(priv); 2974 iwl_txq_ctx_stop(priv);
5473 iwl4965_hw_rxq_stop(priv); 2975 iwl_rxq_stop(priv);
5474 2976
5475 spin_lock_irqsave(&priv->lock, flags); 2977 spin_lock_irqsave(&priv->lock, flags);
5476 if (!iwl_grab_nic_access(priv)) { 2978 if (!iwl_grab_nic_access(priv)) {
@@ -5482,19 +2984,19 @@ static void __iwl4965_down(struct iwl_priv *priv)
5482 2984
5483 udelay(5); 2985 udelay(5);
5484 2986
5485 iwl4965_hw_nic_stop_master(priv); 2987 /* FIXME: apm_ops.suspend(priv) */
5486 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 2988 priv->cfg->ops->lib->apm_ops.reset(priv);
5487 iwl4965_hw_nic_reset(priv); 2989 priv->cfg->ops->lib->free_shared_mem(priv);
5488 2990
5489 exit: 2991 exit:
5490 memset(&priv->card_alive, 0, sizeof(struct iwl4965_alive_resp)); 2992 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
5491 2993
5492 if (priv->ibss_beacon) 2994 if (priv->ibss_beacon)
5493 dev_kfree_skb(priv->ibss_beacon); 2995 dev_kfree_skb(priv->ibss_beacon);
5494 priv->ibss_beacon = NULL; 2996 priv->ibss_beacon = NULL;
5495 2997
5496 /* clear out any free frames */ 2998 /* clear out any free frames */
5497 iwl4965_clear_free_frames(priv); 2999 iwl_clear_free_frames(priv);
5498} 3000}
5499 3001
5500static void iwl4965_down(struct iwl_priv *priv) 3002static void iwl4965_down(struct iwl_priv *priv)
@@ -5546,7 +3048,13 @@ static int __iwl4965_up(struct iwl_priv *priv)
5546 iwl_rfkill_set_hw_state(priv); 3048 iwl_rfkill_set_hw_state(priv);
5547 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 3049 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
5548 3050
5549 ret = priv->cfg->ops->lib->hw_nic_init(priv); 3051 ret = priv->cfg->ops->lib->alloc_shared_mem(priv);
3052 if (ret) {
3053 IWL_ERROR("Unable to allocate shared memory\n");
3054 return ret;
3055 }
3056
3057 ret = iwl_hw_nic_init(priv);
5550 if (ret) { 3058 if (ret) {
5551 IWL_ERROR("Unable to init nic\n"); 3059 IWL_ERROR("Unable to init nic\n");
5552 return ret; 3060 return ret;
@@ -5613,7 +3121,7 @@ static int __iwl4965_up(struct iwl_priv *priv)
5613 * 3121 *
5614 *****************************************************************************/ 3122 *****************************************************************************/
5615 3123
5616static void iwl4965_bg_init_alive_start(struct work_struct *data) 3124static void iwl_bg_init_alive_start(struct work_struct *data)
5617{ 3125{
5618 struct iwl_priv *priv = 3126 struct iwl_priv *priv =
5619 container_of(data, struct iwl_priv, init_alive_start.work); 3127 container_of(data, struct iwl_priv, init_alive_start.work);
@@ -5622,11 +3130,11 @@ static void iwl4965_bg_init_alive_start(struct work_struct *data)
5622 return; 3130 return;
5623 3131
5624 mutex_lock(&priv->mutex); 3132 mutex_lock(&priv->mutex);
5625 iwl4965_init_alive_start(priv); 3133 priv->cfg->ops->lib->init_alive_start(priv);
5626 mutex_unlock(&priv->mutex); 3134 mutex_unlock(&priv->mutex);
5627} 3135}
5628 3136
5629static void iwl4965_bg_alive_start(struct work_struct *data) 3137static void iwl_bg_alive_start(struct work_struct *data)
5630{ 3138{
5631 struct iwl_priv *priv = 3139 struct iwl_priv *priv =
5632 container_of(data, struct iwl_priv, alive_start.work); 3140 container_of(data, struct iwl_priv, alive_start.work);
@@ -5635,7 +3143,7 @@ static void iwl4965_bg_alive_start(struct work_struct *data)
5635 return; 3143 return;
5636 3144
5637 mutex_lock(&priv->mutex); 3145 mutex_lock(&priv->mutex);
5638 iwl4965_alive_start(priv); 3146 iwl_alive_start(priv);
5639 mutex_unlock(&priv->mutex); 3147 mutex_unlock(&priv->mutex);
5640} 3148}
5641 3149
@@ -5651,7 +3159,7 @@ static void iwl4965_bg_rf_kill(struct work_struct *work)
5651 mutex_lock(&priv->mutex); 3159 mutex_lock(&priv->mutex);
5652 3160
5653 if (!iwl_is_rfkill(priv)) { 3161 if (!iwl_is_rfkill(priv)) {
5654 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL, 3162 IWL_DEBUG(IWL_DL_RF_KILL,
5655 "HW and/or SW RF Kill no longer active, restarting " 3163 "HW and/or SW RF Kill no longer active, restarting "
5656 "device\n"); 3164 "device\n");
5657 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) 3165 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -5674,6 +3182,24 @@ static void iwl4965_bg_rf_kill(struct work_struct *work)
5674 mutex_unlock(&priv->mutex); 3182 mutex_unlock(&priv->mutex);
5675} 3183}
5676 3184
3185static void iwl4965_bg_set_monitor(struct work_struct *work)
3186{
3187 struct iwl_priv *priv = container_of(work,
3188 struct iwl_priv, set_monitor);
3189
3190 IWL_DEBUG(IWL_DL_STATE, "setting monitor mode\n");
3191
3192 mutex_lock(&priv->mutex);
3193
3194 if (!iwl_is_ready(priv))
3195 IWL_DEBUG(IWL_DL_STATE, "leave - not ready\n");
3196 else
3197 if (iwl4965_set_mode(priv, IEEE80211_IF_TYPE_MNTR) != 0)
3198 IWL_ERROR("iwl4965_set_mode() failed\n");
3199
3200 mutex_unlock(&priv->mutex);
3201}
3202
5677#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) 3203#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
5678 3204
5679static void iwl4965_bg_scan_check(struct work_struct *data) 3205static void iwl4965_bg_scan_check(struct work_struct *data)
@@ -5687,9 +3213,9 @@ static void iwl4965_bg_scan_check(struct work_struct *data)
5687 mutex_lock(&priv->mutex); 3213 mutex_lock(&priv->mutex);
5688 if (test_bit(STATUS_SCANNING, &priv->status) || 3214 if (test_bit(STATUS_SCANNING, &priv->status) ||
5689 test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 3215 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
5690 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, 3216 IWL_DEBUG(IWL_DL_SCAN, "Scan completion watchdog resetting "
5691 "Scan completion watchdog resetting adapter (%dms)\n", 3217 "adapter (%dms)\n",
5692 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); 3218 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
5693 3219
5694 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) 3220 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
5695 iwl4965_send_scan_abort(priv); 3221 iwl4965_send_scan_abort(priv);
@@ -5887,6 +3413,8 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
5887 direct_mask, 3413 direct_mask,
5888 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 3414 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
5889 3415
3416 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
3417 RXON_FILTER_BCON_AWARE_MSK);
5890 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 3418 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
5891 scan->channel_count * sizeof(struct iwl4965_scan_channel); 3419 scan->channel_count * sizeof(struct iwl4965_scan_channel);
5892 cmd.data = scan; 3420 cmd.data = scan;
@@ -5941,7 +3469,7 @@ static void iwl4965_bg_rx_replenish(struct work_struct *data)
5941 return; 3469 return;
5942 3470
5943 mutex_lock(&priv->mutex); 3471 mutex_lock(&priv->mutex);
5944 iwl4965_rx_replenish(priv); 3472 iwl_rx_replenish(priv);
5945 mutex_unlock(&priv->mutex); 3473 mutex_unlock(&priv->mutex);
5946} 3474}
5947 3475
@@ -5989,9 +3517,9 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
5989 3517
5990#ifdef CONFIG_IWL4965_HT 3518#ifdef CONFIG_IWL4965_HT
5991 if (priv->current_ht_config.is_ht) 3519 if (priv->current_ht_config.is_ht)
5992 iwl4965_set_rxon_ht(priv, &priv->current_ht_config); 3520 iwl_set_rxon_ht(priv, &priv->current_ht_config);
5993#endif /* CONFIG_IWL4965_HT*/ 3521#endif /* CONFIG_IWL4965_HT*/
5994 iwl4965_set_rxon_chain(priv); 3522 iwl_set_rxon_chain(priv);
5995 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); 3523 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
5996 3524
5997 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n", 3525 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
@@ -6025,8 +3553,8 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
6025 /* clear out the station table */ 3553 /* clear out the station table */
6026 iwlcore_clear_stations_table(priv); 3554 iwlcore_clear_stations_table(priv);
6027 3555
6028 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0); 3556 iwl_rxon_add_station(priv, iwl_bcast_addr, 0);
6029 iwl4965_rxon_add_station(priv, priv->bssid, 0); 3557 iwl_rxon_add_station(priv, priv->bssid, 0);
6030 iwl4965_rate_scale_init(priv->hw, IWL_STA_ID); 3558 iwl4965_rate_scale_init(priv->hw, IWL_STA_ID);
6031 iwl4965_send_beacon_cmd(priv); 3559 iwl4965_send_beacon_cmd(priv);
6032 3560
@@ -6040,17 +3568,16 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
6040 3568
6041 iwl4965_sequence_reset(priv); 3569 iwl4965_sequence_reset(priv);
6042 3570
6043#ifdef CONFIG_IWL4965_SENSITIVITY
6044 /* Enable Rx differential gain and sensitivity calibrations */ 3571 /* Enable Rx differential gain and sensitivity calibrations */
6045 iwl4965_chain_noise_reset(priv); 3572 iwl_chain_noise_reset(priv);
6046 priv->start_calib = 1; 3573 priv->start_calib = 1;
6047#endif /* CONFIG_IWL4965_SENSITIVITY */
6048 3574
6049 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 3575 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
6050 priv->assoc_station_added = 1; 3576 priv->assoc_station_added = 1;
6051 3577
6052 iwl4965_activate_qos(priv, 0); 3578 iwl4965_activate_qos(priv, 0);
6053 3579
3580 iwl_power_update_mode(priv, 0);
6054 /* we have just associated, don't start scan too early */ 3581 /* we have just associated, don't start scan too early */
6055 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; 3582 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
6056} 3583}
@@ -6089,7 +3616,7 @@ static void iwl4965_bg_scan_completed(struct work_struct *work)
6089 struct iwl_priv *priv = 3616 struct iwl_priv *priv =
6090 container_of(work, struct iwl_priv, scan_completed); 3617 container_of(work, struct iwl_priv, scan_completed);
6091 3618
6092 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n"); 3619 IWL_DEBUG(IWL_DL_SCAN, "SCAN complete scan\n");
6093 3620
6094 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3621 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6095 return; 3622 return;
@@ -6138,7 +3665,7 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
6138 /* we should be verifying the device is ready to be opened */ 3665 /* we should be verifying the device is ready to be opened */
6139 mutex_lock(&priv->mutex); 3666 mutex_lock(&priv->mutex);
6140 3667
6141 memset(&priv->staging_rxon, 0, sizeof(struct iwl4965_rxon_cmd)); 3668 memset(&priv->staging_rxon, 0, sizeof(struct iwl_rxon_cmd));
6142 /* fetch ucode file from disk, alloc and copy to bus-master buffers ... 3669 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
6143 * ucode filename and max sizes are card-specific. */ 3670 * ucode filename and max sizes are card-specific. */
6144 3671
@@ -6163,21 +3690,23 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
6163 if (test_bit(STATUS_IN_SUSPEND, &priv->status)) 3690 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
6164 return 0; 3691 return 0;
6165 3692
6166 /* Wait for START_ALIVE from ucode. Otherwise callbacks from 3693 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
6167 * mac80211 will not be run successfully. */ 3694 * mac80211 will not be run successfully. */
6168 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 3695 if (priv->ucode_type == UCODE_RT) {
6169 test_bit(STATUS_READY, &priv->status), 3696 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
6170 UCODE_READY_TIMEOUT); 3697 test_bit(STATUS_READY, &priv->status),
6171 if (!ret) { 3698 UCODE_READY_TIMEOUT);
6172 if (!test_bit(STATUS_READY, &priv->status)) { 3699 if (!ret) {
6173 IWL_ERROR("Wait for START_ALIVE timeout after %dms.\n", 3700 if (!test_bit(STATUS_READY, &priv->status)) {
6174 jiffies_to_msecs(UCODE_READY_TIMEOUT)); 3701 IWL_ERROR("START_ALIVE timeout after %dms.\n",
6175 ret = -ETIMEDOUT; 3702 jiffies_to_msecs(UCODE_READY_TIMEOUT));
6176 goto out_release_irq; 3703 ret = -ETIMEDOUT;
3704 goto out_release_irq;
3705 }
6177 } 3706 }
6178 }
6179 3707
6180 priv->is_open = 1; 3708 priv->is_open = 1;
3709 }
6181 IWL_DEBUG_MAC80211("leave\n"); 3710 IWL_DEBUG_MAC80211("leave\n");
6182 return 0; 3711 return 0;
6183 3712
@@ -6225,8 +3754,7 @@ static void iwl4965_mac_stop(struct ieee80211_hw *hw)
6225 IWL_DEBUG_MAC80211("leave\n"); 3754 IWL_DEBUG_MAC80211("leave\n");
6226} 3755}
6227 3756
6228static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 3757static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
6229 struct ieee80211_tx_control *ctl)
6230{ 3758{
6231 struct iwl_priv *priv = hw->priv; 3759 struct iwl_priv *priv = hw->priv;
6232 3760
@@ -6238,9 +3766,9 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
6238 } 3766 }
6239 3767
6240 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 3768 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
6241 ctl->tx_rate->bitrate); 3769 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
6242 3770
6243 if (iwl4965_tx_skb(priv, skb, ctl)) 3771 if (iwl_tx_skb(priv, skb))
6244 dev_kfree_skb_any(skb); 3772 dev_kfree_skb_any(skb);
6245 3773
6246 IWL_DEBUG_MAC80211("leave\n"); 3774 IWL_DEBUG_MAC80211("leave\n");
@@ -6295,6 +3823,7 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
6295 const struct iwl_channel_info *ch_info; 3823 const struct iwl_channel_info *ch_info;
6296 unsigned long flags; 3824 unsigned long flags;
6297 int ret = 0; 3825 int ret = 0;
3826 u16 channel;
6298 3827
6299 mutex_lock(&priv->mutex); 3828 mutex_lock(&priv->mutex);
6300 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value); 3829 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
@@ -6315,22 +3844,21 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
6315 return 0; 3844 return 0;
6316 } 3845 }
6317 3846
6318 spin_lock_irqsave(&priv->lock, flags); 3847 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
6319 3848 ch_info = iwl_get_channel_info(priv, conf->channel->band, channel);
6320 ch_info = iwl_get_channel_info(priv, conf->channel->band,
6321 ieee80211_frequency_to_channel(conf->channel->center_freq));
6322 if (!is_channel_valid(ch_info)) { 3849 if (!is_channel_valid(ch_info)) {
6323 IWL_DEBUG_MAC80211("leave - invalid channel\n"); 3850 IWL_DEBUG_MAC80211("leave - invalid channel\n");
6324 spin_unlock_irqrestore(&priv->lock, flags);
6325 ret = -EINVAL; 3851 ret = -EINVAL;
6326 goto out; 3852 goto out;
6327 } 3853 }
6328 3854
3855 spin_lock_irqsave(&priv->lock, flags);
3856
6329#ifdef CONFIG_IWL4965_HT 3857#ifdef CONFIG_IWL4965_HT
6330 /* if we are switching from ht to 2.4 clear flags 3858 /* if we are switching from ht to 2.4 clear flags
6331 * from any ht related info since 2.4 does not 3859 * from any ht related info since 2.4 does not
6332 * support ht */ 3860 * support ht */
6333 if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel->hw_value) 3861 if ((le16_to_cpu(priv->staging_rxon.channel) != channel)
6334#ifdef IEEE80211_CONF_CHANNEL_SWITCH 3862#ifdef IEEE80211_CONF_CHANNEL_SWITCH
6335 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) 3863 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH)
6336#endif 3864#endif
@@ -6338,10 +3866,9 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
6338 priv->staging_rxon.flags = 0; 3866 priv->staging_rxon.flags = 0;
6339#endif /* CONFIG_IWL4965_HT */ 3867#endif /* CONFIG_IWL4965_HT */
6340 3868
6341 iwlcore_set_rxon_channel(priv, conf->channel->band, 3869 iwl_set_rxon_channel(priv, conf->channel->band, channel);
6342 ieee80211_frequency_to_channel(conf->channel->center_freq));
6343 3870
6344 iwl4965_set_flags_for_phymode(priv, conf->channel->band); 3871 iwl_set_flags_for_band(priv, conf->channel->band);
6345 3872
6346 /* The list of supported rates and rate mask can be different 3873 /* The list of supported rates and rate mask can be different
6347 * for each band; since the band may have changed, reset 3874 * for each band; since the band may have changed, reset
@@ -6410,7 +3937,7 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
6410 IWL_WARNING("REPLY_RXON_TIMING failed - " 3937 IWL_WARNING("REPLY_RXON_TIMING failed - "
6411 "Attempting to continue.\n"); 3938 "Attempting to continue.\n");
6412 3939
6413 iwl4965_set_rxon_chain(priv); 3940 iwl_set_rxon_chain(priv);
6414 3941
6415 /* FIXME: what should be the assoc_id for AP? */ 3942 /* FIXME: what should be the assoc_id for AP? */
6416 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); 3943 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
@@ -6438,7 +3965,7 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
6438 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3965 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
6439 iwl4965_commit_rxon(priv); 3966 iwl4965_commit_rxon(priv);
6440 iwl4965_activate_qos(priv, 1); 3967 iwl4965_activate_qos(priv, 1);
6441 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0); 3968 iwl_rxon_add_station(priv, iwl_bcast_addr, 0);
6442 } 3969 }
6443 iwl4965_send_beacon_cmd(priv); 3970 iwl4965_send_beacon_cmd(priv);
6444 3971
@@ -6527,7 +4054,7 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
6527 else { 4054 else {
6528 rc = iwl4965_commit_rxon(priv); 4055 rc = iwl4965_commit_rxon(priv);
6529 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) 4056 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
6530 iwl4965_rxon_add_station( 4057 iwl_rxon_add_station(
6531 priv, priv->active_rxon.bssid_addr, 1); 4058 priv, priv->active_rxon.bssid_addr, 1);
6532 } 4059 }
6533 4060
@@ -6562,7 +4089,22 @@ static void iwl4965_configure_filter(struct ieee80211_hw *hw,
6562 * XXX: dummy 4089 * XXX: dummy
6563 * see also iwl4965_connection_init_rx_config 4090 * see also iwl4965_connection_init_rx_config
6564 */ 4091 */
6565 *total_flags = 0; 4092 struct iwl_priv *priv = hw->priv;
4093 int new_flags = 0;
4094 if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
4095 if (*total_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
4096 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n",
4097 IEEE80211_IF_TYPE_MNTR,
4098 changed_flags, *total_flags);
4099 /* queue work 'cuz mac80211 is holding a lock which
4100 * prevents us from issuing (synchronous) f/w cmds */
4101 queue_work(priv->workqueue, &priv->set_monitor);
4102 new_flags &= FIF_PROMISC_IN_BSS |
4103 FIF_OTHER_BSS |
4104 FIF_ALLMULTI;
4105 }
4106 }
4107 *total_flags = new_flags;
6566} 4108}
6567 4109
6568static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw, 4110static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
@@ -6592,64 +4134,6 @@ static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
6592 4134
6593} 4135}
6594 4136
6595
6596#ifdef CONFIG_IWL4965_HT
6597static void iwl4965_ht_conf(struct iwl_priv *priv,
6598 struct ieee80211_bss_conf *bss_conf)
6599{
6600 struct ieee80211_ht_info *ht_conf = bss_conf->ht_conf;
6601 struct ieee80211_ht_bss_info *ht_bss_conf = bss_conf->ht_bss_conf;
6602 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
6603
6604 IWL_DEBUG_MAC80211("enter: \n");
6605
6606 iwl_conf->is_ht = bss_conf->assoc_ht;
6607
6608 if (!iwl_conf->is_ht)
6609 return;
6610
6611 priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
6612
6613 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
6614 iwl_conf->sgf |= 0x1;
6615 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
6616 iwl_conf->sgf |= 0x2;
6617
6618 iwl_conf->is_green_field = !!(ht_conf->cap & IEEE80211_HT_CAP_GRN_FLD);
6619 iwl_conf->max_amsdu_size =
6620 !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
6621
6622 iwl_conf->supported_chan_width =
6623 !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH);
6624 iwl_conf->extension_chan_offset =
6625 ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_SEC_OFFSET;
6626 /* If no above or below channel supplied disable FAT channel */
6627 if (iwl_conf->extension_chan_offset != IWL_EXT_CHANNEL_OFFSET_ABOVE &&
6628 iwl_conf->extension_chan_offset != IWL_EXT_CHANNEL_OFFSET_BELOW)
6629 iwl_conf->supported_chan_width = 0;
6630
6631 iwl_conf->tx_mimo_ps_mode =
6632 (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
6633 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
6634
6635 iwl_conf->control_channel = ht_bss_conf->primary_channel;
6636 iwl_conf->tx_chan_width =
6637 !!(ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_WIDTH);
6638 iwl_conf->ht_protection =
6639 ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_HT_PROTECTION;
6640 iwl_conf->non_GF_STA_present =
6641 !!(ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_NON_GF_STA_PRSNT);
6642
6643 IWL_DEBUG_MAC80211("control channel %d\n", iwl_conf->control_channel);
6644 IWL_DEBUG_MAC80211("leave\n");
6645}
6646#else
6647static inline void iwl4965_ht_conf(struct iwl_priv *priv,
6648 struct ieee80211_bss_conf *bss_conf)
6649{
6650}
6651#endif
6652
6653#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) 4137#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
6654static void iwl4965_bss_info_changed(struct ieee80211_hw *hw, 4138static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
6655 struct ieee80211_vif *vif, 4139 struct ieee80211_vif *vif,
@@ -6680,7 +4164,7 @@ static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
6680 if (changes & BSS_CHANGED_HT) { 4164 if (changes & BSS_CHANGED_HT) {
6681 IWL_DEBUG_MAC80211("HT %d\n", bss_conf->assoc_ht); 4165 IWL_DEBUG_MAC80211("HT %d\n", bss_conf->assoc_ht);
6682 iwl4965_ht_conf(priv, bss_conf); 4166 iwl4965_ht_conf(priv, bss_conf);
6683 iwl4965_set_rxon_chain(priv); 4167 iwl_set_rxon_chain(priv);
6684 } 4168 }
6685 4169
6686 if (changes & BSS_CHANGED_ASSOC) { 4170 if (changes & BSS_CHANGED_ASSOC) {
@@ -6780,7 +4264,7 @@ static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
6780 4264
6781 IWL_DEBUG_MAC80211("enter\n"); 4265 IWL_DEBUG_MAC80211("enter\n");
6782 4266
6783 sta_id = iwl4965_hw_find_station(priv, addr); 4267 sta_id = iwl_find_station(priv, addr);
6784 if (sta_id == IWL_INVALID_STATION) { 4268 if (sta_id == IWL_INVALID_STATION) {
6785 IWL_DEBUG_MAC80211("leave - %s not in station map.\n", 4269 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
6786 print_mac(mac, addr)); 4270 print_mac(mac, addr));
@@ -6808,7 +4292,7 @@ static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
6808 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 4292 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
6809 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 4293 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
6810 4294
6811 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 4295 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
6812 4296
6813 spin_unlock_irqrestore(&priv->sta_lock, flags); 4297 spin_unlock_irqrestore(&priv->sta_lock, flags);
6814 4298
@@ -6827,7 +4311,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6827 4311
6828 IWL_DEBUG_MAC80211("enter\n"); 4312 IWL_DEBUG_MAC80211("enter\n");
6829 4313
6830 if (priv->cfg->mod_params->sw_crypto) { 4314 if (priv->hw_params.sw_crypto) {
6831 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n"); 4315 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
6832 return -EOPNOTSUPP; 4316 return -EOPNOTSUPP;
6833 } 4317 }
@@ -6836,7 +4320,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6836 /* only support pairwise keys */ 4320 /* only support pairwise keys */
6837 return -EOPNOTSUPP; 4321 return -EOPNOTSUPP;
6838 4322
6839 sta_id = iwl4965_hw_find_station(priv, addr); 4323 sta_id = iwl_find_station(priv, addr);
6840 if (sta_id == IWL_INVALID_STATION) { 4324 if (sta_id == IWL_INVALID_STATION) {
6841 IWL_DEBUG_MAC80211("leave - %s not in station map.\n", 4325 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
6842 print_mac(mac, addr)); 4326 print_mac(mac, addr));
@@ -6857,7 +4341,8 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6857 if (cmd == SET_KEY) 4341 if (cmd == SET_KEY)
6858 is_default_wep_key = !priv->key_mapping_key; 4342 is_default_wep_key = !priv->key_mapping_key;
6859 else 4343 else
6860 is_default_wep_key = priv->default_wep_key; 4344 is_default_wep_key =
4345 (key->hw_key_idx == HW_KEY_DEFAULT);
6861 } 4346 }
6862 4347
6863 switch (cmd) { 4348 switch (cmd) {
@@ -6873,7 +4358,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6873 if (is_default_wep_key) 4358 if (is_default_wep_key)
6874 ret = iwl_remove_default_wep_key(priv, key); 4359 ret = iwl_remove_default_wep_key(priv, key);
6875 else 4360 else
6876 ret = iwl_remove_dynamic_key(priv, sta_id); 4361 ret = iwl_remove_dynamic_key(priv, key, sta_id);
6877 4362
6878 IWL_DEBUG_MAC80211("disable hwcrypto key\n"); 4363 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
6879 break; 4364 break;
@@ -6886,7 +4371,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6886 return ret; 4371 return ret;
6887} 4372}
6888 4373
6889static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue, 4374static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
6890 const struct ieee80211_tx_queue_params *params) 4375 const struct ieee80211_tx_queue_params *params)
6891{ 4376{
6892 struct iwl_priv *priv = hw->priv; 4377 struct iwl_priv *priv = hw->priv;
@@ -6942,8 +4427,8 @@ static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
6942{ 4427{
6943 struct iwl_priv *priv = hw->priv; 4428 struct iwl_priv *priv = hw->priv;
6944 int i, avail; 4429 int i, avail;
6945 struct iwl4965_tx_queue *txq; 4430 struct iwl_tx_queue *txq;
6946 struct iwl4965_queue *q; 4431 struct iwl_queue *q;
6947 unsigned long flags; 4432 unsigned long flags;
6948 4433
6949 IWL_DEBUG_MAC80211("enter\n"); 4434 IWL_DEBUG_MAC80211("enter\n");
@@ -6958,11 +4443,11 @@ static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
6958 for (i = 0; i < AC_NUM; i++) { 4443 for (i = 0; i < AC_NUM; i++) {
6959 txq = &priv->txq[i]; 4444 txq = &priv->txq[i];
6960 q = &txq->q; 4445 q = &txq->q;
6961 avail = iwl4965_queue_space(q); 4446 avail = iwl_queue_space(q);
6962 4447
6963 stats->data[i].len = q->n_window - avail; 4448 stats[i].len = q->n_window - avail;
6964 stats->data[i].limit = q->n_window - q->high_mark; 4449 stats[i].limit = q->n_window - q->high_mark;
6965 stats->data[i].count = q->n_window; 4450 stats[i].count = q->n_window;
6966 4451
6967 } 4452 }
6968 spin_unlock_irqrestore(&priv->lock, flags); 4453 spin_unlock_irqrestore(&priv->lock, flags);
@@ -6975,6 +4460,9 @@ static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
6975static int iwl4965_mac_get_stats(struct ieee80211_hw *hw, 4460static int iwl4965_mac_get_stats(struct ieee80211_hw *hw,
6976 struct ieee80211_low_level_stats *stats) 4461 struct ieee80211_low_level_stats *stats)
6977{ 4462{
4463 struct iwl_priv *priv = hw->priv;
4464
4465 priv = hw->priv;
6978 IWL_DEBUG_MAC80211("enter\n"); 4466 IWL_DEBUG_MAC80211("enter\n");
6979 IWL_DEBUG_MAC80211("leave\n"); 4467 IWL_DEBUG_MAC80211("leave\n");
6980 4468
@@ -6983,6 +4471,9 @@ static int iwl4965_mac_get_stats(struct ieee80211_hw *hw,
6983 4471
6984static u64 iwl4965_mac_get_tsf(struct ieee80211_hw *hw) 4472static u64 iwl4965_mac_get_tsf(struct ieee80211_hw *hw)
6985{ 4473{
4474 struct iwl_priv *priv;
4475
4476 priv = hw->priv;
6986 IWL_DEBUG_MAC80211("enter\n"); 4477 IWL_DEBUG_MAC80211("enter\n");
6987 IWL_DEBUG_MAC80211("leave\n"); 4478 IWL_DEBUG_MAC80211("leave\n");
6988 4479
@@ -7004,7 +4495,7 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
7004 spin_unlock_irqrestore(&priv->lock, flags); 4495 spin_unlock_irqrestore(&priv->lock, flags);
7005#endif /* CONFIG_IWL4965_HT */ 4496#endif /* CONFIG_IWL4965_HT */
7006 4497
7007 iwlcore_reset_qos(priv); 4498 iwl_reset_qos(priv);
7008 4499
7009 cancel_delayed_work(&priv->post_associate); 4500 cancel_delayed_work(&priv->post_associate);
7010 4501
@@ -7041,6 +4532,8 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
7041 iwl4965_commit_rxon(priv); 4532 iwl4965_commit_rxon(priv);
7042 } 4533 }
7043 4534
4535 iwl_power_update_mode(priv, 0);
4536
7044 /* Per mac80211.h: This is only used in IBSS mode... */ 4537 /* Per mac80211.h: This is only used in IBSS mode... */
7045 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { 4538 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
7046 4539
@@ -7056,8 +4549,7 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
7056 IWL_DEBUG_MAC80211("leave\n"); 4549 IWL_DEBUG_MAC80211("leave\n");
7057} 4550}
7058 4551
7059static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 4552static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
7060 struct ieee80211_tx_control *control)
7061{ 4553{
7062 struct iwl_priv *priv = hw->priv; 4554 struct iwl_priv *priv = hw->priv;
7063 unsigned long flags; 4555 unsigned long flags;
@@ -7089,7 +4581,7 @@ static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7089 IWL_DEBUG_MAC80211("leave\n"); 4581 IWL_DEBUG_MAC80211("leave\n");
7090 spin_unlock_irqrestore(&priv->lock, flags); 4582 spin_unlock_irqrestore(&priv->lock, flags);
7091 4583
7092 iwlcore_reset_qos(priv); 4584 iwl_reset_qos(priv);
7093 4585
7094 queue_work(priv->workqueue, &priv->post_associate.work); 4586 queue_work(priv->workqueue, &priv->post_associate.work);
7095 4587
@@ -7114,13 +4606,18 @@ static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7114 * See the level definitions in iwl for details. 4606 * See the level definitions in iwl for details.
7115 */ 4607 */
7116 4608
7117static ssize_t show_debug_level(struct device_driver *d, char *buf) 4609static ssize_t show_debug_level(struct device *d,
4610 struct device_attribute *attr, char *buf)
7118{ 4611{
7119 return sprintf(buf, "0x%08X\n", iwl_debug_level); 4612 struct iwl_priv *priv = d->driver_data;
4613
4614 return sprintf(buf, "0x%08X\n", priv->debug_level);
7120} 4615}
7121static ssize_t store_debug_level(struct device_driver *d, 4616static ssize_t store_debug_level(struct device *d,
4617 struct device_attribute *attr,
7122 const char *buf, size_t count) 4618 const char *buf, size_t count)
7123{ 4619{
4620 struct iwl_priv *priv = d->driver_data;
7124 char *p = (char *)buf; 4621 char *p = (char *)buf;
7125 u32 val; 4622 u32 val;
7126 4623
@@ -7129,17 +4626,37 @@ static ssize_t store_debug_level(struct device_driver *d,
7129 printk(KERN_INFO DRV_NAME 4626 printk(KERN_INFO DRV_NAME
7130 ": %s is not in hex or decimal form.\n", buf); 4627 ": %s is not in hex or decimal form.\n", buf);
7131 else 4628 else
7132 iwl_debug_level = val; 4629 priv->debug_level = val;
7133 4630
7134 return strnlen(buf, count); 4631 return strnlen(buf, count);
7135} 4632}
7136 4633
7137static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, 4634static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
7138 show_debug_level, store_debug_level); 4635 show_debug_level, store_debug_level);
4636
7139 4637
7140#endif /* CONFIG_IWLWIFI_DEBUG */ 4638#endif /* CONFIG_IWLWIFI_DEBUG */
7141 4639
7142 4640
4641static ssize_t show_version(struct device *d,
4642 struct device_attribute *attr, char *buf)
4643{
4644 struct iwl_priv *priv = d->driver_data;
4645 struct iwl_alive_resp *palive = &priv->card_alive;
4646
4647 if (palive->is_valid)
4648 return sprintf(buf, "fw version: 0x%01X.0x%01X.0x%01X.0x%01X\n"
4649 "fw type: 0x%01X 0x%01X\n",
4650 palive->ucode_major, palive->ucode_minor,
4651 palive->sw_rev[0], palive->sw_rev[1],
4652 palive->ver_type, palive->ver_subtype);
4653
4654 else
4655 return sprintf(buf, "fw not loaded\n");
4656}
4657
4658static DEVICE_ATTR(version, S_IWUSR | S_IRUGO, show_version, NULL);
4659
7143static ssize_t show_temperature(struct device *d, 4660static ssize_t show_temperature(struct device *d,
7144 struct device_attribute *attr, char *buf) 4661 struct device_attribute *attr, char *buf)
7145{ 4662{
@@ -7372,20 +4889,11 @@ static ssize_t store_power_level(struct device *d,
7372 goto out; 4889 goto out;
7373 } 4890 }
7374 4891
7375 if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC)) 4892 rc = iwl_power_set_user_mode(priv, mode);
7376 mode = IWL_POWER_AC; 4893 if (rc) {
7377 else 4894 IWL_DEBUG_MAC80211("failed setting power mode.\n");
7378 mode |= IWL_POWER_ENABLED; 4895 goto out;
7379
7380 if (mode != priv->power_mode) {
7381 rc = iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(mode));
7382 if (rc) {
7383 IWL_DEBUG_MAC80211("failed setting power mode.\n");
7384 goto out;
7385 }
7386 priv->power_mode = mode;
7387 } 4896 }
7388
7389 rc = count; 4897 rc = count;
7390 4898
7391 out: 4899 out:
@@ -7415,7 +4923,7 @@ static ssize_t show_power_level(struct device *d,
7415 struct device_attribute *attr, char *buf) 4923 struct device_attribute *attr, char *buf)
7416{ 4924{
7417 struct iwl_priv *priv = dev_get_drvdata(d); 4925 struct iwl_priv *priv = dev_get_drvdata(d);
7418 int level = IWL_POWER_LEVEL(priv->power_mode); 4926 int level = priv->power_data.power_mode;
7419 char *p = buf; 4927 char *p = buf;
7420 4928
7421 p += sprintf(p, "%d ", level); 4929 p += sprintf(p, "%d ", level);
@@ -7433,14 +4941,14 @@ static ssize_t show_power_level(struct device *d,
7433 timeout_duration[level - 1] / 1000, 4941 timeout_duration[level - 1] / 1000,
7434 period_duration[level - 1] / 1000); 4942 period_duration[level - 1] / 1000);
7435 } 4943 }
7436 4944/*
7437 if (!(priv->power_mode & IWL_POWER_ENABLED)) 4945 if (!(priv->power_mode & IWL_POWER_ENABLED))
7438 p += sprintf(p, " OFF\n"); 4946 p += sprintf(p, " OFF\n");
7439 else 4947 else
7440 p += sprintf(p, " \n"); 4948 p += sprintf(p, " \n");
7441 4949*/
4950 p += sprintf(p, " \n");
7442 return (p - buf + 1); 4951 return (p - buf + 1);
7443
7444} 4952}
7445 4953
7446static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level, 4954static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
@@ -7493,44 +5001,6 @@ static ssize_t show_statistics(struct device *d,
7493 5001
7494static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); 5002static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
7495 5003
7496static ssize_t show_antenna(struct device *d,
7497 struct device_attribute *attr, char *buf)
7498{
7499 struct iwl_priv *priv = dev_get_drvdata(d);
7500
7501 if (!iwl_is_alive(priv))
7502 return -EAGAIN;
7503
7504 return sprintf(buf, "%d\n", priv->antenna);
7505}
7506
7507static ssize_t store_antenna(struct device *d,
7508 struct device_attribute *attr,
7509 const char *buf, size_t count)
7510{
7511 int ant;
7512 struct iwl_priv *priv = dev_get_drvdata(d);
7513
7514 if (count == 0)
7515 return 0;
7516
7517 if (sscanf(buf, "%1i", &ant) != 1) {
7518 IWL_DEBUG_INFO("not in hex or decimal form.\n");
7519 return count;
7520 }
7521
7522 if ((ant >= 0) && (ant <= 2)) {
7523 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
7524 priv->antenna = (enum iwl4965_antenna)ant;
7525 } else
7526 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
7527
7528
7529 return count;
7530}
7531
7532static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
7533
7534static ssize_t show_status(struct device *d, 5004static ssize_t show_status(struct device *d,
7535 struct device_attribute *attr, char *buf) 5005 struct device_attribute *attr, char *buf)
7536{ 5006{
@@ -7542,34 +5012,6 @@ static ssize_t show_status(struct device *d,
7542 5012
7543static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); 5013static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
7544 5014
7545static ssize_t dump_error_log(struct device *d,
7546 struct device_attribute *attr,
7547 const char *buf, size_t count)
7548{
7549 char *p = (char *)buf;
7550
7551 if (p[0] == '1')
7552 iwl4965_dump_nic_error_log((struct iwl_priv *)d->driver_data);
7553
7554 return strnlen(buf, count);
7555}
7556
7557static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
7558
7559static ssize_t dump_event_log(struct device *d,
7560 struct device_attribute *attr,
7561 const char *buf, size_t count)
7562{
7563 char *p = (char *)buf;
7564
7565 if (p[0] == '1')
7566 iwl4965_dump_nic_event_log((struct iwl_priv *)d->driver_data);
7567
7568 return strnlen(buf, count);
7569}
7570
7571static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
7572
7573/***************************************************************************** 5015/*****************************************************************************
7574 * 5016 *
7575 * driver setup and teardown 5017 * driver setup and teardown
@@ -7590,9 +5032,10 @@ static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
7590 INIT_WORK(&priv->abort_scan, iwl4965_bg_abort_scan); 5032 INIT_WORK(&priv->abort_scan, iwl4965_bg_abort_scan);
7591 INIT_WORK(&priv->rf_kill, iwl4965_bg_rf_kill); 5033 INIT_WORK(&priv->rf_kill, iwl4965_bg_rf_kill);
7592 INIT_WORK(&priv->beacon_update, iwl4965_bg_beacon_update); 5034 INIT_WORK(&priv->beacon_update, iwl4965_bg_beacon_update);
5035 INIT_WORK(&priv->set_monitor, iwl4965_bg_set_monitor);
7593 INIT_DELAYED_WORK(&priv->post_associate, iwl4965_bg_post_associate); 5036 INIT_DELAYED_WORK(&priv->post_associate, iwl4965_bg_post_associate);
7594 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start); 5037 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
7595 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start); 5038 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
7596 INIT_DELAYED_WORK(&priv->scan_check, iwl4965_bg_scan_check); 5039 INIT_DELAYED_WORK(&priv->scan_check, iwl4965_bg_scan_check);
7597 5040
7598 iwl4965_hw_setup_deferred_work(priv); 5041 iwl4965_hw_setup_deferred_work(priv);
@@ -7613,10 +5056,7 @@ static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
7613} 5056}
7614 5057
7615static struct attribute *iwl4965_sysfs_entries[] = { 5058static struct attribute *iwl4965_sysfs_entries[] = {
7616 &dev_attr_antenna.attr,
7617 &dev_attr_channels.attr, 5059 &dev_attr_channels.attr,
7618 &dev_attr_dump_errors.attr,
7619 &dev_attr_dump_events.attr,
7620 &dev_attr_flags.attr, 5060 &dev_attr_flags.attr,
7621 &dev_attr_filter_flags.attr, 5061 &dev_attr_filter_flags.attr,
7622#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 5062#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
@@ -7629,6 +5069,10 @@ static struct attribute *iwl4965_sysfs_entries[] = {
7629 &dev_attr_status.attr, 5069 &dev_attr_status.attr,
7630 &dev_attr_temperature.attr, 5070 &dev_attr_temperature.attr,
7631 &dev_attr_tx_power.attr, 5071 &dev_attr_tx_power.attr,
5072#ifdef CONFIG_IWLWIFI_DEBUG
5073 &dev_attr_debug_level.attr,
5074#endif
5075 &dev_attr_version.attr,
7632 5076
7633 NULL 5077 NULL
7634}; 5078};
@@ -7678,7 +5122,9 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7678 /* Disabling hardware scan means that mac80211 will perform scans 5122 /* Disabling hardware scan means that mac80211 will perform scans
7679 * "the hard way", rather than using device's scan. */ 5123 * "the hard way", rather than using device's scan. */
7680 if (cfg->mod_params->disable_hw_scan) { 5124 if (cfg->mod_params->disable_hw_scan) {
7681 IWL_DEBUG_INFO("Disabling hw_scan\n"); 5125 if (cfg->mod_params->debug & IWL_DL_INFO)
5126 dev_printk(KERN_DEBUG, &(pdev->dev),
5127 "Disabling hw_scan\n");
7682 iwl4965_hw_ops.hw_scan = NULL; 5128 iwl4965_hw_ops.hw_scan = NULL;
7683 } 5129 }
7684 5130
@@ -7697,7 +5143,7 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7697 priv->pci_dev = pdev; 5143 priv->pci_dev = pdev;
7698 5144
7699#ifdef CONFIG_IWLWIFI_DEBUG 5145#ifdef CONFIG_IWLWIFI_DEBUG
7700 iwl_debug_level = priv->cfg->mod_params->debug; 5146 priv->debug_level = priv->cfg->mod_params->debug;
7701 atomic_set(&priv->restrict_refcnt, 0); 5147 atomic_set(&priv->restrict_refcnt, 0);
7702#endif 5148#endif
7703 5149
@@ -7711,13 +5157,19 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7711 5157
7712 pci_set_master(pdev); 5158 pci_set_master(pdev);
7713 5159
7714 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 5160 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
7715 if (!err) 5161 if (!err)
7716 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 5162 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
5163 if (err) {
5164 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
5165 if (!err)
5166 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
5167 /* both attempts failed: */
7717 if (err) { 5168 if (err) {
7718 printk(KERN_WARNING DRV_NAME 5169 printk(KERN_WARNING "%s: No suitable DMA available.\n",
7719 ": No suitable DMA available.\n"); 5170 DRV_NAME);
7720 goto out_pci_disable_device; 5171 goto out_pci_disable_device;
5172 }
7721 } 5173 }
7722 5174
7723 err = pci_request_regions(pdev, DRV_NAME); 5175 err = pci_request_regions(pdev, DRV_NAME);
@@ -7743,31 +5195,31 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7743 (unsigned long long) pci_resource_len(pdev, 0)); 5195 (unsigned long long) pci_resource_len(pdev, 0));
7744 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base); 5196 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
7745 5197
5198 iwl_hw_detect(priv);
7746 printk(KERN_INFO DRV_NAME 5199 printk(KERN_INFO DRV_NAME
7747 ": Detected Intel Wireless WiFi Link %s\n", priv->cfg->name); 5200 ": Detected Intel Wireless WiFi Link %s REV=0x%X\n",
5201 priv->cfg->name, priv->hw_rev);
7748 5202
7749 /***************** 5203 /* amp init */
7750 * 4. Read EEPROM 5204 err = priv->cfg->ops->lib->apm_ops.init(priv);
7751 *****************/
7752 /* nic init */
7753 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
7754 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
7755
7756 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7757 err = iwl_poll_bit(priv, CSR_GP_CNTRL,
7758 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7759 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
7760 if (err < 0) { 5205 if (err < 0) {
7761 IWL_DEBUG_INFO("Failed to init the card\n"); 5206 IWL_DEBUG_INFO("Failed to init APMG\n");
7762 goto out_iounmap; 5207 goto out_iounmap;
7763 } 5208 }
5209 /*****************
5210 * 4. Read EEPROM
5211 *****************/
7764 /* Read the EEPROM */ 5212 /* Read the EEPROM */
7765 err = iwl_eeprom_init(priv); 5213 err = iwl_eeprom_init(priv);
7766 if (err) { 5214 if (err) {
7767 IWL_ERROR("Unable to init EEPROM\n"); 5215 IWL_ERROR("Unable to init EEPROM\n");
7768 goto out_iounmap; 5216 goto out_iounmap;
7769 } 5217 }
7770 /* MAC Address location in EEPROM same for 3945/4965 */ 5218 err = iwl_eeprom_check_version(priv);
5219 if (err)
5220 goto out_iounmap;
5221
5222 /* extract MAC Address */
7771 iwl_eeprom_get_mac(priv, priv->mac_addr); 5223 iwl_eeprom_get_mac(priv, priv->mac_addr);
7772 IWL_DEBUG_INFO("MAC address: %s\n", print_mac(mac, priv->mac_addr)); 5224 IWL_DEBUG_INFO("MAC address: %s\n", print_mac(mac, priv->mac_addr));
7773 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); 5225 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
@@ -7778,16 +5230,16 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7778 /* Device-specific setup */ 5230 /* Device-specific setup */
7779 if (priv->cfg->ops->lib->set_hw_params(priv)) { 5231 if (priv->cfg->ops->lib->set_hw_params(priv)) {
7780 IWL_ERROR("failed to set hw parameters\n"); 5232 IWL_ERROR("failed to set hw parameters\n");
7781 goto out_iounmap; 5233 goto out_free_eeprom;
7782 } 5234 }
7783 5235
7784 /******************* 5236 /*******************
7785 * 6. Setup hw/priv 5237 * 6. Setup priv
7786 *******************/ 5238 *******************/
7787 5239
7788 err = iwl_setup(priv); 5240 err = iwl_init_drv(priv);
7789 if (err) 5241 if (err)
7790 goto out_unset_hw_params; 5242 goto out_free_eeprom;
7791 /* At this point both hw and priv are initialized. */ 5243 /* At this point both hw and priv are initialized. */
7792 5244
7793 /********************************** 5245 /**********************************
@@ -7800,9 +5252,6 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7800 IWL_DEBUG_INFO("Radio disabled.\n"); 5252 IWL_DEBUG_INFO("Radio disabled.\n");
7801 } 5253 }
7802 5254
7803 if (priv->cfg->mod_params->enable_qos)
7804 priv->qos_data.qos_enable = 1;
7805
7806 /******************** 5255 /********************
7807 * 8. Setup services 5256 * 8. Setup services
7808 ********************/ 5257 ********************/
@@ -7813,14 +5262,9 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7813 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group); 5262 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group);
7814 if (err) { 5263 if (err) {
7815 IWL_ERROR("failed to create sysfs device attributes\n"); 5264 IWL_ERROR("failed to create sysfs device attributes\n");
7816 goto out_unset_hw_params; 5265 goto out_uninit_drv;
7817 } 5266 }
7818 5267
7819 err = iwl_dbgfs_register(priv, DRV_NAME);
7820 if (err) {
7821 IWL_ERROR("failed to create debugfs files\n");
7822 goto out_remove_sysfs;
7823 }
7824 5268
7825 iwl4965_setup_deferred_work(priv); 5269 iwl4965_setup_deferred_work(priv);
7826 iwl4965_setup_rx_handlers(priv); 5270 iwl4965_setup_rx_handlers(priv);
@@ -7831,14 +5275,28 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7831 pci_save_state(pdev); 5275 pci_save_state(pdev);
7832 pci_disable_device(pdev); 5276 pci_disable_device(pdev);
7833 5277
5278 /**********************************
5279 * 10. Setup and register mac80211
5280 **********************************/
5281
5282 err = iwl_setup_mac(priv);
5283 if (err)
5284 goto out_remove_sysfs;
5285
5286 err = iwl_dbgfs_register(priv, DRV_NAME);
5287 if (err)
5288 IWL_ERROR("failed to create debugfs files\n");
5289
7834 /* notify iwlcore to init */ 5290 /* notify iwlcore to init */
7835 iwlcore_low_level_notify(priv, IWLCORE_INIT_EVT); 5291 iwlcore_low_level_notify(priv, IWLCORE_INIT_EVT);
7836 return 0; 5292 return 0;
7837 5293
7838 out_remove_sysfs: 5294 out_remove_sysfs:
7839 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group); 5295 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
7840 out_unset_hw_params: 5296 out_uninit_drv:
7841 iwl4965_unset_hw_params(priv); 5297 iwl_uninit_drv(priv);
5298 out_free_eeprom:
5299 iwl_eeprom_free(priv);
7842 out_iounmap: 5300 out_iounmap:
7843 pci_iounmap(pdev, priv->hw_base); 5301 pci_iounmap(pdev, priv->hw_base);
7844 out_pci_release_regions: 5302 out_pci_release_regions:
@@ -7864,6 +5322,9 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
7864 5322
7865 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n"); 5323 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
7866 5324
5325 iwl_dbgfs_unregister(priv);
5326 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
5327
7867 if (priv->mac80211_registered) { 5328 if (priv->mac80211_registered) {
7868 ieee80211_unregister_hw(priv->hw); 5329 ieee80211_unregister_hw(priv->hw);
7869 priv->mac80211_registered = 0; 5330 priv->mac80211_registered = 0;
@@ -7891,17 +5352,15 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
7891 } 5352 }
7892 5353
7893 iwlcore_low_level_notify(priv, IWLCORE_REMOVE_EVT); 5354 iwlcore_low_level_notify(priv, IWLCORE_REMOVE_EVT);
7894 iwl_dbgfs_unregister(priv);
7895 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
7896 5355
7897 iwl4965_dealloc_ucode_pci(priv); 5356 iwl4965_dealloc_ucode_pci(priv);
7898 5357
7899 if (priv->rxq.bd) 5358 if (priv->rxq.bd)
7900 iwl4965_rx_queue_free(priv, &priv->rxq); 5359 iwl_rx_queue_free(priv, &priv->rxq);
7901 iwl4965_hw_txq_ctx_free(priv); 5360 iwl_hw_txq_ctx_free(priv);
7902 5361
7903 iwl4965_unset_hw_params(priv);
7904 iwlcore_clear_stations_table(priv); 5362 iwlcore_clear_stations_table(priv);
5363 iwl_eeprom_free(priv);
7905 5364
7906 5365
7907 /*netif_stop_queue(dev); */ 5366 /*netif_stop_queue(dev); */
@@ -7918,8 +5377,7 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
7918 pci_disable_device(pdev); 5377 pci_disable_device(pdev);
7919 pci_set_drvdata(pdev, NULL); 5378 pci_set_drvdata(pdev, NULL);
7920 5379
7921 iwl_free_channel_map(priv); 5380 iwl_uninit_drv(priv);
7922 iwl4965_free_geos(priv);
7923 5381
7924 if (priv->ibss_beacon) 5382 if (priv->ibss_beacon)
7925 dev_kfree_skb(priv->ibss_beacon); 5383 dev_kfree_skb(priv->ibss_beacon);
@@ -7969,6 +5427,11 @@ static int iwl4965_pci_resume(struct pci_dev *pdev)
7969static struct pci_device_id iwl_hw_card_ids[] = { 5427static struct pci_device_id iwl_hw_card_ids[] = {
7970 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)}, 5428 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
7971 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)}, 5429 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
5430#ifdef CONFIG_IWL5000
5431 {IWL_PCI_DEVICE(0x4235, PCI_ANY_ID, iwl5300_agn_cfg)},
5432 {IWL_PCI_DEVICE(0x4232, PCI_ANY_ID, iwl5100_agn_cfg)},
5433 {IWL_PCI_DEVICE(0x423A, PCI_ANY_ID, iwl5350_agn_cfg)},
5434#endif /* CONFIG_IWL5000 */
7972 {0} 5435 {0}
7973}; 5436};
7974MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); 5437MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
@@ -8002,20 +5465,9 @@ static int __init iwl4965_init(void)
8002 IWL_ERROR("Unable to initialize PCI module\n"); 5465 IWL_ERROR("Unable to initialize PCI module\n");
8003 goto error_register; 5466 goto error_register;
8004 } 5467 }
8005#ifdef CONFIG_IWLWIFI_DEBUG
8006 ret = driver_create_file(&iwl_driver.driver, &driver_attr_debug_level);
8007 if (ret) {
8008 IWL_ERROR("Unable to create driver sysfs file\n");
8009 goto error_debug;
8010 }
8011#endif
8012 5468
8013 return ret; 5469 return ret;
8014 5470
8015#ifdef CONFIG_IWLWIFI_DEBUG
8016error_debug:
8017 pci_unregister_driver(&iwl_driver);
8018#endif
8019error_register: 5471error_register:
8020 iwl4965_rate_control_unregister(); 5472 iwl4965_rate_control_unregister();
8021 return ret; 5473 return ret;
@@ -8023,9 +5475,6 @@ error_register:
8023 5475
8024static void __exit iwl4965_exit(void) 5476static void __exit iwl4965_exit(void)
8025{ 5477{
8026#ifdef CONFIG_IWLWIFI_DEBUG
8027 driver_remove_file(&iwl_driver.driver, &driver_attr_debug_level);
8028#endif
8029 pci_unregister_driver(&iwl_driver); 5478 pci_unregister_driver(&iwl_driver);
8030 iwl4965_rate_control_unregister(); 5479 iwl4965_rate_control_unregister();
8031} 5480}
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index f0724e31adfd..02080a3682a9 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -1,9 +1,5 @@
1libertas-objs := main.o wext.o \ 1libertas-objs := main.o wext.o rx.o tx.o cmd.o cmdresp.o scan.o 11d.o \
2 rx.o tx.o cmd.o \ 2 debugfs.o persistcfg.o ethtool.o assoc.o
3 cmdresp.o scan.o \
4 11d.o \
5 debugfs.o \
6 ethtool.o assoc.o
7 3
8usb8xxx-objs += if_usb.o 4usb8xxx-objs += if_usb.o
9libertas_cs-objs += if_cs.o 5libertas_cs-objs += if_cs.o
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index c9c3640ce9fb..a267d6e65f03 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -603,7 +603,8 @@ static int assoc_helper_channel(struct lbs_private *priv,
603 /* Change mesh channel first; 21.p21 firmware won't let 603 /* Change mesh channel first; 21.p21 firmware won't let
604 you change channel otherwise (even though it'll return 604 you change channel otherwise (even though it'll return
605 an error to this */ 605 an error to this */
606 lbs_mesh_config(priv, 0, assoc_req->channel); 606 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP,
607 assoc_req->channel);
607 } 608 }
608 609
609 lbs_deb_assoc("ASSOC: channel: %d -> %d\n", 610 lbs_deb_assoc("ASSOC: channel: %d -> %d\n",
@@ -642,7 +643,8 @@ static int assoc_helper_channel(struct lbs_private *priv,
642 643
643 restore_mesh: 644 restore_mesh:
644 if (priv->mesh_dev) 645 if (priv->mesh_dev)
645 lbs_mesh_config(priv, 1, priv->curbssparams.channel); 646 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
647 priv->curbssparams.channel);
646 648
647 done: 649 done:
648 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 650 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -1248,7 +1250,7 @@ static int get_common_rates(struct lbs_private *priv,
1248 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size); 1250 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
1249 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate); 1251 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
1250 1252
1251 if (!priv->auto_rate) { 1253 if (!priv->enablehwauto) {
1252 for (i = 0; i < tmp_size; i++) { 1254 for (i = 0; i < tmp_size; i++) {
1253 if (tmp[i] == priv->cur_rate) 1255 if (tmp[i] == priv->cur_rate)
1254 goto done; 1256 goto done;
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 8124fd9b1353..75427e61898d 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#include <net/iw_handler.h> 6#include <net/iw_handler.h>
7#include <net/ieee80211.h>
7#include <linux/kfifo.h> 8#include <linux/kfifo.h>
8#include "host.h" 9#include "host.h"
9#include "hostcmd.h" 10#include "hostcmd.h"
@@ -109,7 +110,7 @@ int lbs_update_hw_spec(struct lbs_private *priv)
109 * CF card firmware 5.0.16p0: cap 0x00000303 110 * CF card firmware 5.0.16p0: cap 0x00000303
110 * USB dongle firmware 5.110.17p2: cap 0x00000303 111 * USB dongle firmware 5.110.17p2: cap 0x00000303
111 */ 112 */
112 printk("libertas: %s, fw %u.%u.%up%u, cap 0x%08x\n", 113 lbs_pr_info("%s, fw %u.%u.%up%u, cap 0x%08x\n",
113 print_mac(mac, cmd.permanentaddr), 114 print_mac(mac, cmd.permanentaddr),
114 priv->fwrelease >> 24 & 0xff, 115 priv->fwrelease >> 24 & 0xff,
115 priv->fwrelease >> 16 & 0xff, 116 priv->fwrelease >> 16 & 0xff,
@@ -675,58 +676,60 @@ static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd,
675 return 0; 676 return 0;
676} 677}
677 678
678static int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv, 679static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok)
679 struct cmd_ds_command *cmd,
680 u16 cmd_action)
681{ 680{
682 struct cmd_ds_802_11_rate_adapt_rateset 681/* Bit Rate
683 *rateadapt = &cmd->params.rateset; 682* 15:13 Reserved
684 683* 12 54 Mbps
685 lbs_deb_enter(LBS_DEB_CMD); 684* 11 48 Mbps
686 cmd->size = 685* 10 36 Mbps
687 cpu_to_le16(sizeof(struct cmd_ds_802_11_rate_adapt_rateset) 686* 9 24 Mbps
688 + S_DS_GEN); 687* 8 18 Mbps
689 cmd->command = cpu_to_le16(CMD_802_11_RATE_ADAPT_RATESET); 688* 7 12 Mbps
690 689* 6 9 Mbps
691 rateadapt->action = cpu_to_le16(cmd_action); 690* 5 6 Mbps
692 rateadapt->enablehwauto = cpu_to_le16(priv->enablehwauto); 691* 4 Reserved
693 rateadapt->bitmap = cpu_to_le16(priv->ratebitmap); 692* 3 11 Mbps
694 693* 2 5.5 Mbps
695 lbs_deb_leave(LBS_DEB_CMD); 694* 1 2 Mbps
696 return 0; 695* 0 1 Mbps
696**/
697
698 uint16_t ratemask;
699 int i = lbs_data_rate_to_fw_index(rate);
700 if (lower_rates_ok)
701 ratemask = (0x1fef >> (12 - i));
702 else
703 ratemask = (1 << i);
704 return cpu_to_le16(ratemask);
697} 705}
698 706
699/** 707int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
700 * @brief Get the current data rate 708 uint16_t cmd_action)
701 *
702 * @param priv A pointer to struct lbs_private structure
703 *
704 * @return The data rate on success, error on failure
705 */
706int lbs_get_data_rate(struct lbs_private *priv)
707{ 709{
708 struct cmd_ds_802_11_data_rate cmd; 710 struct cmd_ds_802_11_rate_adapt_rateset cmd;
709 int ret = -1; 711 int ret;
710 712
711 lbs_deb_enter(LBS_DEB_CMD); 713 lbs_deb_enter(LBS_DEB_CMD);
712 714
713 memset(&cmd, 0, sizeof(cmd)); 715 if (!priv->cur_rate && !priv->enablehwauto)
714 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 716 return -EINVAL;
715 cmd.action = cpu_to_le16(CMD_ACT_GET_TX_RATE);
716
717 ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd);
718 if (ret)
719 goto out;
720 717
721 lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof (cmd)); 718 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
722 719
723 ret = (int) lbs_fw_index_to_data_rate(cmd.rates[0]); 720 cmd.action = cpu_to_le16(cmd_action);
724 lbs_deb_cmd("DATA_RATE: current rate 0x%02x\n", ret); 721 cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
722 cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
723 ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
724 if (!ret && cmd_action == CMD_ACT_GET) {
725 priv->ratebitmap = le16_to_cpu(cmd.bitmap);
726 priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
727 }
725 728
726out:
727 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); 729 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
728 return ret; 730 return ret;
729} 731}
732EXPORT_SYMBOL_GPL(lbs_cmd_802_11_rate_adapt_rateset);
730 733
731/** 734/**
732 * @brief Set the data rate 735 * @brief Set the data rate
@@ -778,28 +781,6 @@ out:
778 return ret; 781 return ret;
779} 782}
780 783
781static int lbs_cmd_mac_multicast_adr(struct lbs_private *priv,
782 struct cmd_ds_command *cmd,
783 u16 cmd_action)
784{
785 struct cmd_ds_mac_multicast_adr *pMCastAdr = &cmd->params.madr;
786
787 lbs_deb_enter(LBS_DEB_CMD);
788 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_mac_multicast_adr) +
789 S_DS_GEN);
790 cmd->command = cpu_to_le16(CMD_MAC_MULTICAST_ADR);
791
792 lbs_deb_cmd("MULTICAST_ADR: setting %d addresses\n", pMCastAdr->nr_of_adrs);
793 pMCastAdr->action = cpu_to_le16(cmd_action);
794 pMCastAdr->nr_of_adrs =
795 cpu_to_le16((u16) priv->nr_of_multicastmacaddr);
796 memcpy(pMCastAdr->maclist, priv->multicastlist,
797 priv->nr_of_multicastmacaddr * ETH_ALEN);
798
799 lbs_deb_leave(LBS_DEB_CMD);
800 return 0;
801}
802
803/** 784/**
804 * @brief Get the radio channel 785 * @brief Get the radio channel
805 * 786 *
@@ -1052,24 +1033,69 @@ int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
1052 return ret; 1033 return ret;
1053} 1034}
1054 1035
1055int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan) 1036int lbs_mesh_config_send(struct lbs_private *priv,
1037 struct cmd_ds_mesh_config *cmd,
1038 uint16_t action, uint16_t type)
1039{
1040 int ret;
1041
1042 lbs_deb_enter(LBS_DEB_CMD);
1043
1044 cmd->hdr.command = cpu_to_le16(CMD_MESH_CONFIG);
1045 cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_mesh_config));
1046 cmd->hdr.result = 0;
1047
1048 cmd->type = cpu_to_le16(type);
1049 cmd->action = cpu_to_le16(action);
1050
1051 ret = lbs_cmd_with_response(priv, CMD_MESH_CONFIG, cmd);
1052
1053 lbs_deb_leave(LBS_DEB_CMD);
1054 return ret;
1055}
1056
1057/* This function is the CMD_MESH_CONFIG legacy function. It only handles the
1058 * START and STOP actions. The extended actions supported by CMD_MESH_CONFIG
1059 * are all handled by preparing a struct cmd_ds_mesh_config and passing it to
1060 * lbs_mesh_config_send.
1061 */
1062int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
1056{ 1063{
1057 struct cmd_ds_mesh_config cmd; 1064 struct cmd_ds_mesh_config cmd;
1065 struct mrvl_meshie *ie;
1058 1066
1059 memset(&cmd, 0, sizeof(cmd)); 1067 memset(&cmd, 0, sizeof(cmd));
1060 cmd.action = cpu_to_le16(enable);
1061 cmd.channel = cpu_to_le16(chan); 1068 cmd.channel = cpu_to_le16(chan);
1062 cmd.type = cpu_to_le16(priv->mesh_tlv); 1069 ie = (struct mrvl_meshie *)cmd.data;
1063 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 1070
1064 1071 switch (action) {
1065 if (enable) { 1072 case CMD_ACT_MESH_CONFIG_START:
1066 cmd.length = cpu_to_le16(priv->mesh_ssid_len); 1073 ie->hdr.id = MFIE_TYPE_GENERIC;
1067 memcpy(cmd.data, priv->mesh_ssid, priv->mesh_ssid_len); 1074 ie->val.oui[0] = 0x00;
1075 ie->val.oui[1] = 0x50;
1076 ie->val.oui[2] = 0x43;
1077 ie->val.type = MARVELL_MESH_IE_TYPE;
1078 ie->val.subtype = MARVELL_MESH_IE_SUBTYPE;
1079 ie->val.version = MARVELL_MESH_IE_VERSION;
1080 ie->val.active_protocol_id = MARVELL_MESH_PROTO_ID_HWMP;
1081 ie->val.active_metric_id = MARVELL_MESH_METRIC_ID;
1082 ie->val.mesh_capability = MARVELL_MESH_CAPABILITY;
1083 ie->val.mesh_id_len = priv->mesh_ssid_len;
1084 memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len);
1085 ie->hdr.len = sizeof(struct mrvl_meshie_val) -
1086 IW_ESSID_MAX_SIZE + priv->mesh_ssid_len;
1087 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val));
1088 break;
1089 case CMD_ACT_MESH_CONFIG_STOP:
1090 break;
1091 default:
1092 return -1;
1068 } 1093 }
1069 lbs_deb_cmd("mesh config enable %d TLV %x channel %d SSID %s\n", 1094 lbs_deb_cmd("mesh config action %d type %x channel %d SSID %s\n",
1070 enable, priv->mesh_tlv, chan, 1095 action, priv->mesh_tlv, chan,
1071 escape_essid(priv->mesh_ssid, priv->mesh_ssid_len)); 1096 escape_essid(priv->mesh_ssid, priv->mesh_ssid_len));
1072 return lbs_cmd_with_response(priv, CMD_MESH_CONFIG, &cmd); 1097
1098 return lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
1073} 1099}
1074 1100
1075static int lbs_cmd_bcn_ctrl(struct lbs_private * priv, 1101static int lbs_cmd_bcn_ctrl(struct lbs_private * priv,
@@ -1144,7 +1170,7 @@ static void lbs_submit_command(struct lbs_private *priv,
1144 struct cmd_header *cmd; 1170 struct cmd_header *cmd;
1145 uint16_t cmdsize; 1171 uint16_t cmdsize;
1146 uint16_t command; 1172 uint16_t command;
1147 int timeo = 5 * HZ; 1173 int timeo = 3 * HZ;
1148 int ret; 1174 int ret;
1149 1175
1150 lbs_deb_enter(LBS_DEB_HOST); 1176 lbs_deb_enter(LBS_DEB_HOST);
@@ -1162,7 +1188,7 @@ static void lbs_submit_command(struct lbs_private *priv,
1162 /* These commands take longer */ 1188 /* These commands take longer */
1163 if (command == CMD_802_11_SCAN || command == CMD_802_11_ASSOCIATE || 1189 if (command == CMD_802_11_SCAN || command == CMD_802_11_ASSOCIATE ||
1164 command == CMD_802_11_AUTHENTICATE) 1190 command == CMD_802_11_AUTHENTICATE)
1165 timeo = 10 * HZ; 1191 timeo = 5 * HZ;
1166 1192
1167 lbs_deb_cmd("DNLD_CMD: command 0x%04x, seq %d, size %d\n", 1193 lbs_deb_cmd("DNLD_CMD: command 0x%04x, seq %d, size %d\n",
1168 command, le16_to_cpu(cmd->seqnum), cmdsize); 1194 command, le16_to_cpu(cmd->seqnum), cmdsize);
@@ -1174,7 +1200,7 @@ static void lbs_submit_command(struct lbs_private *priv,
1174 lbs_pr_info("DNLD_CMD: hw_host_to_card failed: %d\n", ret); 1200 lbs_pr_info("DNLD_CMD: hw_host_to_card failed: %d\n", ret);
1175 /* Let the timer kick in and retry, and potentially reset 1201 /* Let the timer kick in and retry, and potentially reset
1176 the whole thing if the condition persists */ 1202 the whole thing if the condition persists */
1177 timeo = HZ; 1203 timeo = HZ/4;
1178 } 1204 }
1179 1205
1180 /* Setup the timer after transmit command */ 1206 /* Setup the timer after transmit command */
@@ -1279,8 +1305,7 @@ void lbs_set_mac_control(struct lbs_private *priv)
1279 cmd.action = cpu_to_le16(priv->mac_control); 1305 cmd.action = cpu_to_le16(priv->mac_control);
1280 cmd.reserved = 0; 1306 cmd.reserved = 0;
1281 1307
1282 lbs_cmd_async(priv, CMD_MAC_CONTROL, 1308 lbs_cmd_async(priv, CMD_MAC_CONTROL, &cmd.hdr, sizeof(cmd));
1283 &cmd.hdr, sizeof(cmd));
1284 1309
1285 lbs_deb_leave(LBS_DEB_CMD); 1310 lbs_deb_leave(LBS_DEB_CMD);
1286} 1311}
@@ -1387,15 +1412,6 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1387 cmd_action, pdata_buf); 1412 cmd_action, pdata_buf);
1388 break; 1413 break;
1389 1414
1390 case CMD_802_11_RATE_ADAPT_RATESET:
1391 ret = lbs_cmd_802_11_rate_adapt_rateset(priv,
1392 cmdptr, cmd_action);
1393 break;
1394
1395 case CMD_MAC_MULTICAST_ADR:
1396 ret = lbs_cmd_mac_multicast_adr(priv, cmdptr, cmd_action);
1397 break;
1398
1399 case CMD_802_11_MONITOR_MODE: 1415 case CMD_802_11_MONITOR_MODE:
1400 ret = lbs_cmd_802_11_monitor_mode(cmdptr, 1416 ret = lbs_cmd_802_11_monitor_mode(cmdptr,
1401 cmd_action, pdata_buf); 1417 cmd_action, pdata_buf);
@@ -1484,7 +1500,7 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1484 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action); 1500 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action);
1485 break; 1501 break;
1486 default: 1502 default:
1487 lbs_deb_host("PREP_CMD: unknown command 0x%04x\n", cmd_no); 1503 lbs_pr_err("PREP_CMD: unknown command 0x%04x\n", cmd_no);
1488 ret = -1; 1504 ret = -1;
1489 break; 1505 break;
1490 } 1506 }
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 3dfc2d43c224..a53b51f8bdb4 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -34,18 +34,22 @@ int lbs_update_hw_spec(struct lbs_private *priv);
34int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action, 34int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
35 struct cmd_ds_mesh_access *cmd); 35 struct cmd_ds_mesh_access *cmd);
36 36
37int lbs_get_data_rate(struct lbs_private *priv);
38int lbs_set_data_rate(struct lbs_private *priv, u8 rate); 37int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
39 38
40int lbs_get_channel(struct lbs_private *priv); 39int lbs_get_channel(struct lbs_private *priv);
41int lbs_set_channel(struct lbs_private *priv, u8 channel); 40int lbs_set_channel(struct lbs_private *priv, u8 channel);
42 41
42int lbs_mesh_config_send(struct lbs_private *priv,
43 struct cmd_ds_mesh_config *cmd,
44 uint16_t action, uint16_t type);
43int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan); 45int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
44 46
45int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria); 47int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria);
46int lbs_suspend(struct lbs_private *priv); 48int lbs_suspend(struct lbs_private *priv);
47int lbs_resume(struct lbs_private *priv); 49void lbs_resume(struct lbs_private *priv);
48 50
51int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
52 uint16_t cmd_action);
49int lbs_cmd_802_11_inactivity_timeout(struct lbs_private *priv, 53int lbs_cmd_802_11_inactivity_timeout(struct lbs_private *priv,
50 uint16_t cmd_action, uint16_t *timeout); 54 uint16_t cmd_action, uint16_t *timeout);
51int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, 55int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 5abecb7673e6..24de3c3cf877 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -203,22 +203,6 @@ static int lbs_ret_802_11_rf_tx_power(struct lbs_private *priv,
203 return 0; 203 return 0;
204} 204}
205 205
206static int lbs_ret_802_11_rate_adapt_rateset(struct lbs_private *priv,
207 struct cmd_ds_command *resp)
208{
209 struct cmd_ds_802_11_rate_adapt_rateset *rates = &resp->params.rateset;
210
211 lbs_deb_enter(LBS_DEB_CMD);
212
213 if (rates->action == CMD_ACT_GET) {
214 priv->enablehwauto = le16_to_cpu(rates->enablehwauto);
215 priv->ratebitmap = le16_to_cpu(rates->bitmap);
216 }
217
218 lbs_deb_leave(LBS_DEB_CMD);
219 return 0;
220}
221
222static int lbs_ret_802_11_rssi(struct lbs_private *priv, 206static int lbs_ret_802_11_rssi(struct lbs_private *priv,
223 struct cmd_ds_command *resp) 207 struct cmd_ds_command *resp)
224{ 208{
@@ -316,16 +300,11 @@ static inline int handle_cmd_response(struct lbs_private *priv,
316 300
317 break; 301 break;
318 302
319 case CMD_RET(CMD_MAC_MULTICAST_ADR):
320 case CMD_RET(CMD_802_11_RESET): 303 case CMD_RET(CMD_802_11_RESET):
321 case CMD_RET(CMD_802_11_AUTHENTICATE): 304 case CMD_RET(CMD_802_11_AUTHENTICATE):
322 case CMD_RET(CMD_802_11_BEACON_STOP): 305 case CMD_RET(CMD_802_11_BEACON_STOP):
323 break; 306 break;
324 307
325 case CMD_RET(CMD_802_11_RATE_ADAPT_RATESET):
326 ret = lbs_ret_802_11_rate_adapt_rateset(priv, resp);
327 break;
328
329 case CMD_RET(CMD_802_11_RSSI): 308 case CMD_RET(CMD_802_11_RSSI):
330 ret = lbs_ret_802_11_rssi(priv, resp); 309 ret = lbs_ret_802_11_rssi(priv, resp);
331 break; 310 break;
@@ -376,8 +355,8 @@ static inline int handle_cmd_response(struct lbs_private *priv,
376 break; 355 break;
377 356
378 default: 357 default:
379 lbs_deb_host("CMD_RESP: unknown cmd response 0x%04x\n", 358 lbs_pr_err("CMD_RESP: unknown cmd response 0x%04x\n",
380 le16_to_cpu(resp->command)); 359 le16_to_cpu(resp->command));
381 break; 360 break;
382 } 361 }
383 lbs_deb_leave(LBS_DEB_HOST); 362 lbs_deb_leave(LBS_DEB_HOST);
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index b652fa301e19..a8ac974dacac 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -60,13 +60,17 @@ void lbs_mac_event_disconnected(struct lbs_private *priv);
60 60
61void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str); 61void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str);
62 62
63/* persistcfg.c */
64void lbs_persist_config_init(struct net_device *net);
65void lbs_persist_config_remove(struct net_device *net);
66
63/* main.c */ 67/* main.c */
64struct chan_freq_power *lbs_get_region_cfp_table(u8 region, 68struct chan_freq_power *lbs_get_region_cfp_table(u8 region,
65 int *cfp_no); 69 int *cfp_no);
66struct lbs_private *lbs_add_card(void *card, struct device *dmdev); 70struct lbs_private *lbs_add_card(void *card, struct device *dmdev);
67int lbs_remove_card(struct lbs_private *priv); 71void lbs_remove_card(struct lbs_private *priv);
68int lbs_start_card(struct lbs_private *priv); 72int lbs_start_card(struct lbs_private *priv);
69int lbs_stop_card(struct lbs_private *priv); 73void lbs_stop_card(struct lbs_private *priv);
70void lbs_host_to_card_done(struct lbs_private *priv); 74void lbs_host_to_card_done(struct lbs_private *priv);
71 75
72int lbs_update_channel(struct lbs_private *priv); 76int lbs_update_channel(struct lbs_private *priv);
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index d39520111062..12e687550bce 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -40,6 +40,7 @@
40#define LBS_DEB_THREAD 0x00100000 40#define LBS_DEB_THREAD 0x00100000
41#define LBS_DEB_HEX 0x00200000 41#define LBS_DEB_HEX 0x00200000
42#define LBS_DEB_SDIO 0x00400000 42#define LBS_DEB_SDIO 0x00400000
43#define LBS_DEB_SYSFS 0x00800000
43 44
44extern unsigned int lbs_debug; 45extern unsigned int lbs_debug;
45 46
@@ -81,7 +82,8 @@ do { if ((lbs_debug & (grp)) == (grp)) \
81#define lbs_deb_usbd(dev, fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usbd", "%s:" fmt, (dev)->bus_id, ##args) 82#define lbs_deb_usbd(dev, fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usbd", "%s:" fmt, (dev)->bus_id, ##args)
82#define lbs_deb_cs(fmt, args...) LBS_DEB_LL(LBS_DEB_CS, " cs", fmt, ##args) 83#define lbs_deb_cs(fmt, args...) LBS_DEB_LL(LBS_DEB_CS, " cs", fmt, ##args)
83#define lbs_deb_thread(fmt, args...) LBS_DEB_LL(LBS_DEB_THREAD, " thread", fmt, ##args) 84#define lbs_deb_thread(fmt, args...) LBS_DEB_LL(LBS_DEB_THREAD, " thread", fmt, ##args)
84#define lbs_deb_sdio(fmt, args...) LBS_DEB_LL(LBS_DEB_SDIO, " thread", fmt, ##args) 85#define lbs_deb_sdio(fmt, args...) LBS_DEB_LL(LBS_DEB_SDIO, " sdio", fmt, ##args)
86#define lbs_deb_sysfs(fmt, args...) LBS_DEB_LL(LBS_DEB_SYSFS, " sysfs", fmt, ##args)
85 87
86#define lbs_pr_info(format, args...) \ 88#define lbs_pr_info(format, args...) \
87 printk(KERN_INFO DRV_NAME": " format, ## args) 89 printk(KERN_INFO DRV_NAME": " format, ## args)
@@ -170,6 +172,16 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
170 172
171#define MARVELL_MESH_IE_LENGTH 9 173#define MARVELL_MESH_IE_LENGTH 9
172 174
175/* Values used to populate the struct mrvl_mesh_ie. The only time you need this
176 * is when enabling the mesh using CMD_MESH_CONFIG.
177 */
178#define MARVELL_MESH_IE_TYPE 4
179#define MARVELL_MESH_IE_SUBTYPE 0
180#define MARVELL_MESH_IE_VERSION 0
181#define MARVELL_MESH_PROTO_ID_HWMP 0
182#define MARVELL_MESH_METRIC_ID 0
183#define MARVELL_MESH_CAPABILITY 0
184
173/** INT status Bit Definition*/ 185/** INT status Bit Definition*/
174#define MRVDRV_TX_DNLD_RDY 0x0001 186#define MRVDRV_TX_DNLD_RDY 0x0001
175#define MRVDRV_RX_UPLD_RDY 0x0002 187#define MRVDRV_RX_UPLD_RDY 0x0002
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 0d9edb9b11f5..f5bb40c54d85 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -140,6 +140,8 @@ struct lbs_private {
140 wait_queue_head_t waitq; 140 wait_queue_head_t waitq;
141 struct workqueue_struct *work_thread; 141 struct workqueue_struct *work_thread;
142 142
143 struct work_struct mcast_work;
144
143 /** Scanning */ 145 /** Scanning */
144 struct delayed_work scan_work; 146 struct delayed_work scan_work;
145 struct delayed_work assoc_work; 147 struct delayed_work assoc_work;
@@ -151,6 +153,7 @@ struct lbs_private {
151 153
152 /** Hardware access */ 154 /** Hardware access */
153 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb); 155 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
156 void (*reset_card) (struct lbs_private *priv);
154 157
155 /* Wake On LAN */ 158 /* Wake On LAN */
156 uint32_t wol_criteria; 159 uint32_t wol_criteria;
@@ -234,8 +237,8 @@ struct lbs_private {
234 /** 802.11 statistics */ 237 /** 802.11 statistics */
235// struct cmd_DS_802_11_GET_STAT wlan802_11Stat; 238// struct cmd_DS_802_11_GET_STAT wlan802_11Stat;
236 239
237 u16 enablehwauto; 240 uint16_t enablehwauto;
238 u16 ratebitmap; 241 uint16_t ratebitmap;
239 242
240 u32 fragthsd; 243 u32 fragthsd;
241 u32 rtsthsd; 244 u32 rtsthsd;
@@ -293,7 +296,6 @@ struct lbs_private {
293 296
294 /** data rate stuff */ 297 /** data rate stuff */
295 u8 cur_rate; 298 u8 cur_rate;
296 u8 auto_rate;
297 299
298 /** RF calibration data */ 300 /** RF calibration data */
299 301
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 3915c3144fad..c92e41b4faf4 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -256,6 +256,23 @@ enum cmd_mesh_access_opts {
256 CMD_ACT_MESH_GET_AUTOSTART_ENABLED, 256 CMD_ACT_MESH_GET_AUTOSTART_ENABLED,
257}; 257};
258 258
259/* Define actions and types for CMD_MESH_CONFIG */
260enum cmd_mesh_config_actions {
261 CMD_ACT_MESH_CONFIG_STOP = 0,
262 CMD_ACT_MESH_CONFIG_START,
263 CMD_ACT_MESH_CONFIG_SET,
264 CMD_ACT_MESH_CONFIG_GET,
265};
266
267enum cmd_mesh_config_types {
268 CMD_TYPE_MESH_SET_BOOTFLAG = 1,
269 CMD_TYPE_MESH_SET_BOOTTIME,
270 CMD_TYPE_MESH_SET_DEF_CHANNEL,
271 CMD_TYPE_MESH_SET_MESH_IE,
272 CMD_TYPE_MESH_GET_DEFAULTS,
273 CMD_TYPE_MESH_GET_MESH_IE, /* GET_DEFAULTS is superset of GET_MESHIE */
274};
275
259/** Card Event definition */ 276/** Card Event definition */
260#define MACREG_INT_CODE_TX_PPA_FREE 0 277#define MACREG_INT_CODE_TX_PPA_FREE 0
261#define MACREG_INT_CODE_TX_DMA_DONE 1 278#define MACREG_INT_CODE_TX_DMA_DONE 1
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
index f29bc5bbda3e..913b480211a9 100644
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ b/drivers/net/wireless/libertas/hostcmd.h
@@ -219,6 +219,7 @@ struct cmd_ds_mac_control {
219}; 219};
220 220
221struct cmd_ds_mac_multicast_adr { 221struct cmd_ds_mac_multicast_adr {
222 struct cmd_header hdr;
222 __le16 action; 223 __le16 action;
223 __le16 nr_of_adrs; 224 __le16 nr_of_adrs;
224 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE]; 225 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
@@ -499,6 +500,7 @@ struct cmd_ds_802_11_data_rate {
499}; 500};
500 501
501struct cmd_ds_802_11_rate_adapt_rateset { 502struct cmd_ds_802_11_rate_adapt_rateset {
503 struct cmd_header hdr;
502 __le16 action; 504 __le16 action;
503 __le16 enablehwauto; 505 __le16 enablehwauto;
504 __le16 bitmap; 506 __le16 bitmap;
@@ -702,8 +704,6 @@ struct cmd_ds_command {
702 struct cmd_ds_802_11_rf_tx_power txp; 704 struct cmd_ds_802_11_rf_tx_power txp;
703 struct cmd_ds_802_11_rf_antenna rant; 705 struct cmd_ds_802_11_rf_antenna rant;
704 struct cmd_ds_802_11_monitor_mode monitor; 706 struct cmd_ds_802_11_monitor_mode monitor;
705 struct cmd_ds_802_11_rate_adapt_rateset rateset;
706 struct cmd_ds_mac_multicast_adr madr;
707 struct cmd_ds_802_11_ad_hoc_join adj; 707 struct cmd_ds_802_11_ad_hoc_join adj;
708 struct cmd_ds_802_11_rssi rssi; 708 struct cmd_ds_802_11_rssi rssi;
709 struct cmd_ds_802_11_rssi_rsp rssirsp; 709 struct cmd_ds_802_11_rssi_rsp rssirsp;
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 54280e292ea5..873ab10a0786 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -148,76 +148,72 @@ static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 r
148{ 148{
149 int i; 149 int i;
150 150
151 for (i = 0; i < 1000; i++) { 151 for (i = 0; i < 100000; i++) {
152 u8 val = if_cs_read8(card, addr); 152 u8 val = if_cs_read8(card, addr);
153 if (val == reg) 153 if (val == reg)
154 return i; 154 return i;
155 udelay(500); 155 udelay(5);
156 } 156 }
157 return -ETIME; 157 return -ETIME;
158} 158}
159 159
160 160
161 161
162/* Host control registers and their bit definitions */ 162/* First the bitmasks for the host/card interrupt/status registers: */
163#define IF_CS_BIT_TX 0x0001
164#define IF_CS_BIT_RX 0x0002
165#define IF_CS_BIT_COMMAND 0x0004
166#define IF_CS_BIT_RESP 0x0008
167#define IF_CS_BIT_EVENT 0x0010
168#define IF_CS_BIT_MASK 0x001f
163 169
164#define IF_CS_H_STATUS 0x00000000 170/* And now the individual registers and assorted masks */
165#define IF_CS_H_STATUS_TX_OVER 0x0001 171#define IF_CS_HOST_STATUS 0x00000000
166#define IF_CS_H_STATUS_RX_OVER 0x0002
167#define IF_CS_H_STATUS_DNLD_OVER 0x0004
168 172
169#define IF_CS_H_INT_CAUSE 0x00000002 173#define IF_CS_HOST_INT_CAUSE 0x00000002
170#define IF_CS_H_IC_TX_OVER 0x0001
171#define IF_CS_H_IC_RX_OVER 0x0002
172#define IF_CS_H_IC_DNLD_OVER 0x0004
173#define IF_CS_H_IC_POWER_DOWN 0x0008
174#define IF_CS_H_IC_HOST_EVENT 0x0010
175#define IF_CS_H_IC_MASK 0x001f
176 174
177#define IF_CS_H_INT_MASK 0x00000004 175#define IF_CS_HOST_INT_MASK 0x00000004
178#define IF_CS_H_IM_MASK 0x001f
179 176
180#define IF_CS_H_WRITE_LEN 0x00000014 177#define IF_CS_HOST_WRITE 0x00000016
178#define IF_CS_HOST_WRITE_LEN 0x00000014
181 179
182#define IF_CS_H_WRITE 0x00000016 180#define IF_CS_HOST_CMD 0x0000001A
181#define IF_CS_HOST_CMD_LEN 0x00000018
183 182
184#define IF_CS_H_CMD_LEN 0x00000018 183#define IF_CS_READ 0x00000010
184#define IF_CS_READ_LEN 0x00000024
185 185
186#define IF_CS_H_CMD 0x0000001A 186#define IF_CS_CARD_CMD 0x00000012
187#define IF_CS_CARD_CMD_LEN 0x00000030
187 188
188#define IF_CS_C_READ_LEN 0x00000024 189#define IF_CS_CARD_STATUS 0x00000020
190#define IF_CS_CARD_STATUS_MASK 0x7f00
189 191
190#define IF_CS_H_READ 0x00000010 192#define IF_CS_CARD_INT_CAUSE 0x00000022
191 193
192/* Card control registers and their bit definitions */ 194#define IF_CS_CARD_SQ_READ_LOW 0x00000028
193 195#define IF_CS_CARD_SQ_HELPER_OK 0x10
194#define IF_CS_C_STATUS 0x00000020
195#define IF_CS_C_S_TX_DNLD_RDY 0x0001
196#define IF_CS_C_S_RX_UPLD_RDY 0x0002
197#define IF_CS_C_S_CMD_DNLD_RDY 0x0004
198#define IF_CS_C_S_CMD_UPLD_RDY 0x0008
199#define IF_CS_C_S_CARDEVENT 0x0010
200#define IF_CS_C_S_MASK 0x001f
201#define IF_CS_C_S_STATUS_MASK 0x7f00
202
203#define IF_CS_C_INT_CAUSE 0x00000022
204#define IF_CS_C_IC_MASK 0x001f
205
206#define IF_CS_C_SQ_READ_LOW 0x00000028
207#define IF_CS_C_SQ_HELPER_OK 0x10
208
209#define IF_CS_C_CMD_LEN 0x00000030
210
211#define IF_CS_C_CMD 0x00000012
212 196
213#define IF_CS_SCRATCH 0x0000003F 197#define IF_CS_SCRATCH 0x0000003F
214 198
215 199
216 200
217/********************************************************************/ 201/********************************************************************/
218/* I/O */ 202/* I/O and interrupt handling */
219/********************************************************************/ 203/********************************************************************/
220 204
205static inline void if_cs_enable_ints(struct if_cs_card *card)
206{
207 lbs_deb_enter(LBS_DEB_CS);
208 if_cs_write16(card, IF_CS_HOST_INT_MASK, 0);
209}
210
211static inline void if_cs_disable_ints(struct if_cs_card *card)
212{
213 lbs_deb_enter(LBS_DEB_CS);
214 if_cs_write16(card, IF_CS_HOST_INT_MASK, IF_CS_BIT_MASK);
215}
216
221/* 217/*
222 * Called from if_cs_host_to_card to send a command to the hardware 218 * Called from if_cs_host_to_card to send a command to the hardware
223 */ 219 */
@@ -228,11 +224,12 @@ static int if_cs_send_cmd(struct lbs_private *priv, u8 *buf, u16 nb)
228 int loops = 0; 224 int loops = 0;
229 225
230 lbs_deb_enter(LBS_DEB_CS); 226 lbs_deb_enter(LBS_DEB_CS);
227 if_cs_disable_ints(card);
231 228
232 /* Is hardware ready? */ 229 /* Is hardware ready? */
233 while (1) { 230 while (1) {
234 u16 val = if_cs_read16(card, IF_CS_C_STATUS); 231 u16 val = if_cs_read16(card, IF_CS_CARD_STATUS);
235 if (val & IF_CS_C_S_CMD_DNLD_RDY) 232 if (val & IF_CS_BIT_COMMAND)
236 break; 233 break;
237 if (++loops > 100) { 234 if (++loops > 100) {
238 lbs_pr_err("card not ready for commands\n"); 235 lbs_pr_err("card not ready for commands\n");
@@ -241,51 +238,56 @@ static int if_cs_send_cmd(struct lbs_private *priv, u8 *buf, u16 nb)
241 mdelay(1); 238 mdelay(1);
242 } 239 }
243 240
244 if_cs_write16(card, IF_CS_H_CMD_LEN, nb); 241 if_cs_write16(card, IF_CS_HOST_CMD_LEN, nb);
245 242
246 if_cs_write16_rep(card, IF_CS_H_CMD, buf, nb / 2); 243 if_cs_write16_rep(card, IF_CS_HOST_CMD, buf, nb / 2);
247 /* Are we supposed to transfer an odd amount of bytes? */ 244 /* Are we supposed to transfer an odd amount of bytes? */
248 if (nb & 1) 245 if (nb & 1)
249 if_cs_write8(card, IF_CS_H_CMD, buf[nb-1]); 246 if_cs_write8(card, IF_CS_HOST_CMD, buf[nb-1]);
250 247
251 /* "Assert the download over interrupt command in the Host 248 /* "Assert the download over interrupt command in the Host
252 * status register" */ 249 * status register" */
253 if_cs_write16(card, IF_CS_H_STATUS, IF_CS_H_STATUS_DNLD_OVER); 250 if_cs_write16(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND);
254 251
255 /* "Assert the download over interrupt command in the Card 252 /* "Assert the download over interrupt command in the Card
256 * interrupt case register" */ 253 * interrupt case register" */
257 if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_DNLD_OVER); 254 if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND);
258 ret = 0; 255 ret = 0;
259 256
260done: 257done:
258 if_cs_enable_ints(card);
261 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 259 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
262 return ret; 260 return ret;
263} 261}
264 262
265
266/* 263/*
267 * Called from if_cs_host_to_card to send a data to the hardware 264 * Called from if_cs_host_to_card to send a data to the hardware
268 */ 265 */
269static void if_cs_send_data(struct lbs_private *priv, u8 *buf, u16 nb) 266static void if_cs_send_data(struct lbs_private *priv, u8 *buf, u16 nb)
270{ 267{
271 struct if_cs_card *card = (struct if_cs_card *)priv->card; 268 struct if_cs_card *card = (struct if_cs_card *)priv->card;
269 u16 status;
272 270
273 lbs_deb_enter(LBS_DEB_CS); 271 lbs_deb_enter(LBS_DEB_CS);
272 if_cs_disable_ints(card);
273
274 status = if_cs_read16(card, IF_CS_CARD_STATUS);
275 BUG_ON((status & IF_CS_BIT_TX) == 0);
274 276
275 if_cs_write16(card, IF_CS_H_WRITE_LEN, nb); 277 if_cs_write16(card, IF_CS_HOST_WRITE_LEN, nb);
276 278
277 /* write even number of bytes, then odd byte if necessary */ 279 /* write even number of bytes, then odd byte if necessary */
278 if_cs_write16_rep(card, IF_CS_H_WRITE, buf, nb / 2); 280 if_cs_write16_rep(card, IF_CS_HOST_WRITE, buf, nb / 2);
279 if (nb & 1) 281 if (nb & 1)
280 if_cs_write8(card, IF_CS_H_WRITE, buf[nb-1]); 282 if_cs_write8(card, IF_CS_HOST_WRITE, buf[nb-1]);
281 283
282 if_cs_write16(card, IF_CS_H_STATUS, IF_CS_H_STATUS_TX_OVER); 284 if_cs_write16(card, IF_CS_HOST_STATUS, IF_CS_BIT_TX);
283 if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_STATUS_TX_OVER); 285 if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_TX);
286 if_cs_enable_ints(card);
284 287
285 lbs_deb_leave(LBS_DEB_CS); 288 lbs_deb_leave(LBS_DEB_CS);
286} 289}
287 290
288
289/* 291/*
290 * Get the command result out of the card. 292 * Get the command result out of the card.
291 */ 293 */
@@ -293,27 +295,28 @@ static int if_cs_receive_cmdres(struct lbs_private *priv, u8 *data, u32 *len)
293{ 295{
294 unsigned long flags; 296 unsigned long flags;
295 int ret = -1; 297 int ret = -1;
296 u16 val; 298 u16 status;
297 299
298 lbs_deb_enter(LBS_DEB_CS); 300 lbs_deb_enter(LBS_DEB_CS);
299 301
300 /* is hardware ready? */ 302 /* is hardware ready? */
301 val = if_cs_read16(priv->card, IF_CS_C_STATUS); 303 status = if_cs_read16(priv->card, IF_CS_CARD_STATUS);
302 if ((val & IF_CS_C_S_CMD_UPLD_RDY) == 0) { 304 if ((status & IF_CS_BIT_RESP) == 0) {
303 lbs_pr_err("card not ready for CMD\n"); 305 lbs_pr_err("no cmd response in card\n");
306 *len = 0;
304 goto out; 307 goto out;
305 } 308 }
306 309
307 *len = if_cs_read16(priv->card, IF_CS_C_CMD_LEN); 310 *len = if_cs_read16(priv->card, IF_CS_CARD_CMD_LEN);
308 if ((*len == 0) || (*len > LBS_CMD_BUFFER_SIZE)) { 311 if ((*len == 0) || (*len > LBS_CMD_BUFFER_SIZE)) {
309 lbs_pr_err("card cmd buffer has invalid # of bytes (%d)\n", *len); 312 lbs_pr_err("card cmd buffer has invalid # of bytes (%d)\n", *len);
310 goto out; 313 goto out;
311 } 314 }
312 315
313 /* read even number of bytes, then odd byte if necessary */ 316 /* read even number of bytes, then odd byte if necessary */
314 if_cs_read16_rep(priv->card, IF_CS_C_CMD, data, *len/sizeof(u16)); 317 if_cs_read16_rep(priv->card, IF_CS_CARD_CMD, data, *len/sizeof(u16));
315 if (*len & 1) 318 if (*len & 1)
316 data[*len-1] = if_cs_read8(priv->card, IF_CS_C_CMD); 319 data[*len-1] = if_cs_read8(priv->card, IF_CS_CARD_CMD);
317 320
318 /* This is a workaround for a firmware that reports too much 321 /* This is a workaround for a firmware that reports too much
319 * bytes */ 322 * bytes */
@@ -330,7 +333,6 @@ out:
330 return ret; 333 return ret;
331} 334}
332 335
333
334static struct sk_buff *if_cs_receive_data(struct lbs_private *priv) 336static struct sk_buff *if_cs_receive_data(struct lbs_private *priv)
335{ 337{
336 struct sk_buff *skb = NULL; 338 struct sk_buff *skb = NULL;
@@ -339,7 +341,7 @@ static struct sk_buff *if_cs_receive_data(struct lbs_private *priv)
339 341
340 lbs_deb_enter(LBS_DEB_CS); 342 lbs_deb_enter(LBS_DEB_CS);
341 343
342 len = if_cs_read16(priv->card, IF_CS_C_READ_LEN); 344 len = if_cs_read16(priv->card, IF_CS_READ_LEN);
343 if (len == 0 || len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) { 345 if (len == 0 || len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) {
344 lbs_pr_err("card data buffer has invalid # of bytes (%d)\n", len); 346 lbs_pr_err("card data buffer has invalid # of bytes (%d)\n", len);
345 priv->stats.rx_dropped++; 347 priv->stats.rx_dropped++;
@@ -354,38 +356,19 @@ static struct sk_buff *if_cs_receive_data(struct lbs_private *priv)
354 data = skb->data; 356 data = skb->data;
355 357
356 /* read even number of bytes, then odd byte if necessary */ 358 /* read even number of bytes, then odd byte if necessary */
357 if_cs_read16_rep(priv->card, IF_CS_H_READ, data, len/sizeof(u16)); 359 if_cs_read16_rep(priv->card, IF_CS_READ, data, len/sizeof(u16));
358 if (len & 1) 360 if (len & 1)
359 data[len-1] = if_cs_read8(priv->card, IF_CS_H_READ); 361 data[len-1] = if_cs_read8(priv->card, IF_CS_READ);
360 362
361dat_err: 363dat_err:
362 if_cs_write16(priv->card, IF_CS_H_STATUS, IF_CS_H_STATUS_RX_OVER); 364 if_cs_write16(priv->card, IF_CS_HOST_STATUS, IF_CS_BIT_RX);
363 if_cs_write16(priv->card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_RX_OVER); 365 if_cs_write16(priv->card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_RX);
364 366
365out: 367out:
366 lbs_deb_leave_args(LBS_DEB_CS, "ret %p", skb); 368 lbs_deb_leave_args(LBS_DEB_CS, "ret %p", skb);
367 return skb; 369 return skb;
368} 370}
369 371
370
371
372/********************************************************************/
373/* Interrupts */
374/********************************************************************/
375
376static inline void if_cs_enable_ints(struct if_cs_card *card)
377{
378 lbs_deb_enter(LBS_DEB_CS);
379 if_cs_write16(card, IF_CS_H_INT_MASK, 0);
380}
381
382static inline void if_cs_disable_ints(struct if_cs_card *card)
383{
384 lbs_deb_enter(LBS_DEB_CS);
385 if_cs_write16(card, IF_CS_H_INT_MASK, IF_CS_H_IM_MASK);
386}
387
388
389static irqreturn_t if_cs_interrupt(int irq, void *data) 372static irqreturn_t if_cs_interrupt(int irq, void *data)
390{ 373{
391 struct if_cs_card *card = data; 374 struct if_cs_card *card = data;
@@ -394,10 +377,8 @@ static irqreturn_t if_cs_interrupt(int irq, void *data)
394 377
395 lbs_deb_enter(LBS_DEB_CS); 378 lbs_deb_enter(LBS_DEB_CS);
396 379
397 cause = if_cs_read16(card, IF_CS_C_INT_CAUSE); 380 /* Ask card interrupt cause register if there is something for us */
398 if_cs_write16(card, IF_CS_C_INT_CAUSE, cause & IF_CS_C_IC_MASK); 381 cause = if_cs_read16(card, IF_CS_CARD_INT_CAUSE);
399
400 lbs_deb_cs("cause 0x%04x\n", cause);
401 if (cause == 0) { 382 if (cause == 0) {
402 /* Not for us */ 383 /* Not for us */
403 return IRQ_NONE; 384 return IRQ_NONE;
@@ -409,11 +390,11 @@ static irqreturn_t if_cs_interrupt(int irq, void *data)
409 return IRQ_HANDLED; 390 return IRQ_HANDLED;
410 } 391 }
411 392
412 /* TODO: I'm not sure what the best ordering is */ 393 /* Clear interrupt cause */
413 394 if_cs_write16(card, IF_CS_CARD_INT_CAUSE, cause & IF_CS_BIT_MASK);
414 cause = if_cs_read16(card, IF_CS_C_STATUS) & IF_CS_C_S_MASK; 395 lbs_deb_cs("cause 0x%04x\n", cause);
415 396
416 if (cause & IF_CS_C_S_RX_UPLD_RDY) { 397 if (cause & IF_CS_BIT_RX) {
417 struct sk_buff *skb; 398 struct sk_buff *skb;
418 lbs_deb_cs("rx packet\n"); 399 lbs_deb_cs("rx packet\n");
419 skb = if_cs_receive_data(priv); 400 skb = if_cs_receive_data(priv);
@@ -421,16 +402,16 @@ static irqreturn_t if_cs_interrupt(int irq, void *data)
421 lbs_process_rxed_packet(priv, skb); 402 lbs_process_rxed_packet(priv, skb);
422 } 403 }
423 404
424 if (cause & IF_CS_H_IC_TX_OVER) { 405 if (cause & IF_CS_BIT_TX) {
425 lbs_deb_cs("tx over\n"); 406 lbs_deb_cs("tx done\n");
426 lbs_host_to_card_done(priv); 407 lbs_host_to_card_done(priv);
427 } 408 }
428 409
429 if (cause & IF_CS_C_S_CMD_UPLD_RDY) { 410 if (cause & IF_CS_BIT_RESP) {
430 unsigned long flags; 411 unsigned long flags;
431 u8 i; 412 u8 i;
432 413
433 lbs_deb_cs("cmd upload ready\n"); 414 lbs_deb_cs("cmd resp\n");
434 spin_lock_irqsave(&priv->driver_lock, flags); 415 spin_lock_irqsave(&priv->driver_lock, flags);
435 i = (priv->resp_idx == 0) ? 1 : 0; 416 i = (priv->resp_idx == 0) ? 1 : 0;
436 spin_unlock_irqrestore(&priv->driver_lock, flags); 417 spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -444,15 +425,16 @@ static irqreturn_t if_cs_interrupt(int irq, void *data)
444 spin_unlock_irqrestore(&priv->driver_lock, flags); 425 spin_unlock_irqrestore(&priv->driver_lock, flags);
445 } 426 }
446 427
447 if (cause & IF_CS_H_IC_HOST_EVENT) { 428 if (cause & IF_CS_BIT_EVENT) {
448 u16 event = if_cs_read16(priv->card, IF_CS_C_STATUS) 429 u16 event = if_cs_read16(priv->card, IF_CS_CARD_STATUS)
449 & IF_CS_C_S_STATUS_MASK; 430 & IF_CS_CARD_STATUS_MASK;
450 if_cs_write16(priv->card, IF_CS_H_INT_CAUSE, 431 if_cs_write16(priv->card, IF_CS_HOST_INT_CAUSE,
451 IF_CS_H_IC_HOST_EVENT); 432 IF_CS_BIT_EVENT);
452 lbs_deb_cs("eventcause 0x%04x\n", event); 433 lbs_deb_cs("host event 0x%04x\n", event);
453 lbs_queue_event(priv, event >> 8 & 0xff); 434 lbs_queue_event(priv, event >> 8 & 0xff);
454 } 435 }
455 436
437 lbs_deb_leave(LBS_DEB_CS);
456 return IRQ_HANDLED; 438 return IRQ_HANDLED;
457} 439}
458 440
@@ -514,26 +496,26 @@ static int if_cs_prog_helper(struct if_cs_card *card)
514 496
515 /* "write the number of bytes to be sent to the I/O Command 497 /* "write the number of bytes to be sent to the I/O Command
516 * write length register" */ 498 * write length register" */
517 if_cs_write16(card, IF_CS_H_CMD_LEN, count); 499 if_cs_write16(card, IF_CS_HOST_CMD_LEN, count);
518 500
519 /* "write this to I/O Command port register as 16 bit writes */ 501 /* "write this to I/O Command port register as 16 bit writes */
520 if (count) 502 if (count)
521 if_cs_write16_rep(card, IF_CS_H_CMD, 503 if_cs_write16_rep(card, IF_CS_HOST_CMD,
522 &fw->data[sent], 504 &fw->data[sent],
523 count >> 1); 505 count >> 1);
524 506
525 /* "Assert the download over interrupt command in the Host 507 /* "Assert the download over interrupt command in the Host
526 * status register" */ 508 * status register" */
527 if_cs_write8(card, IF_CS_H_STATUS, IF_CS_H_STATUS_DNLD_OVER); 509 if_cs_write8(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND);
528 510
529 /* "Assert the download over interrupt command in the Card 511 /* "Assert the download over interrupt command in the Card
530 * interrupt case register" */ 512 * interrupt case register" */
531 if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_DNLD_OVER); 513 if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND);
532 514
533 /* "The host polls the Card Status register ... for 50 ms before 515 /* "The host polls the Card Status register ... for 50 ms before
534 declaring a failure */ 516 declaring a failure */
535 ret = if_cs_poll_while_fw_download(card, IF_CS_C_STATUS, 517 ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_STATUS,
536 IF_CS_C_S_CMD_DNLD_RDY); 518 IF_CS_BIT_COMMAND);
537 if (ret < 0) { 519 if (ret < 0) {
538 lbs_pr_err("can't download helper at 0x%x, ret %d\n", 520 lbs_pr_err("can't download helper at 0x%x, ret %d\n",
539 sent, ret); 521 sent, ret);
@@ -575,14 +557,15 @@ static int if_cs_prog_real(struct if_cs_card *card)
575 } 557 }
576 lbs_deb_cs("fw size %td\n", fw->size); 558 lbs_deb_cs("fw size %td\n", fw->size);
577 559
578 ret = if_cs_poll_while_fw_download(card, IF_CS_C_SQ_READ_LOW, IF_CS_C_SQ_HELPER_OK); 560 ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_SQ_READ_LOW,
561 IF_CS_CARD_SQ_HELPER_OK);
579 if (ret < 0) { 562 if (ret < 0) {
580 lbs_pr_err("helper firmware doesn't answer\n"); 563 lbs_pr_err("helper firmware doesn't answer\n");
581 goto err_release; 564 goto err_release;
582 } 565 }
583 566
584 for (sent = 0; sent < fw->size; sent += len) { 567 for (sent = 0; sent < fw->size; sent += len) {
585 len = if_cs_read16(card, IF_CS_C_SQ_READ_LOW); 568 len = if_cs_read16(card, IF_CS_CARD_SQ_READ_LOW);
586 if (len & 1) { 569 if (len & 1) {
587 retry++; 570 retry++;
588 lbs_pr_info("odd, need to retry this firmware block\n"); 571 lbs_pr_info("odd, need to retry this firmware block\n");
@@ -600,16 +583,16 @@ static int if_cs_prog_real(struct if_cs_card *card)
600 } 583 }
601 584
602 585
603 if_cs_write16(card, IF_CS_H_CMD_LEN, len); 586 if_cs_write16(card, IF_CS_HOST_CMD_LEN, len);
604 587
605 if_cs_write16_rep(card, IF_CS_H_CMD, 588 if_cs_write16_rep(card, IF_CS_HOST_CMD,
606 &fw->data[sent], 589 &fw->data[sent],
607 (len+1) >> 1); 590 (len+1) >> 1);
608 if_cs_write8(card, IF_CS_H_STATUS, IF_CS_H_STATUS_DNLD_OVER); 591 if_cs_write8(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND);
609 if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_DNLD_OVER); 592 if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND);
610 593
611 ret = if_cs_poll_while_fw_download(card, IF_CS_C_STATUS, 594 ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_STATUS,
612 IF_CS_C_S_CMD_DNLD_RDY); 595 IF_CS_BIT_COMMAND);
613 if (ret < 0) { 596 if (ret < 0) {
614 lbs_pr_err("can't download firmware at 0x%x\n", sent); 597 lbs_pr_err("can't download firmware at 0x%x\n", sent);
615 goto err_release; 598 goto err_release;
@@ -837,7 +820,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
837 820
838 /* Clear any interrupt cause that happend while sending 821 /* Clear any interrupt cause that happend while sending
839 * firmware/initializing card */ 822 * firmware/initializing card */
840 if_cs_write16(card, IF_CS_C_INT_CAUSE, IF_CS_C_IC_MASK); 823 if_cs_write16(card, IF_CS_CARD_INT_CAUSE, IF_CS_BIT_MASK);
841 if_cs_enable_ints(card); 824 if_cs_enable_ints(card);
842 825
843 /* And finally bring the card up */ 826 /* And finally bring the card up */
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 8032df72aaab..24783103a7dd 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -7,6 +7,10 @@
7#include <linux/netdevice.h> 7#include <linux/netdevice.h>
8#include <linux/usb.h> 8#include <linux/usb.h>
9 9
10#ifdef CONFIG_OLPC
11#include <asm/olpc.h>
12#endif
13
10#define DRV_NAME "usb8xxx" 14#define DRV_NAME "usb8xxx"
11 15
12#include "host.h" 16#include "host.h"
@@ -146,6 +150,14 @@ static void if_usb_fw_timeo(unsigned long priv)
146 wake_up(&cardp->fw_wq); 150 wake_up(&cardp->fw_wq);
147} 151}
148 152
153#ifdef CONFIG_OLPC
154static void if_usb_reset_olpc_card(struct lbs_private *priv)
155{
156 printk(KERN_CRIT "Resetting OLPC wireless via EC...\n");
157 olpc_ec_cmd(0x25, NULL, 0, NULL, 0);
158}
159#endif
160
149/** 161/**
150 * @brief sets the configuration values 162 * @brief sets the configuration values
151 * @param ifnum interface number 163 * @param ifnum interface number
@@ -231,6 +243,11 @@ static int if_usb_probe(struct usb_interface *intf,
231 cardp->priv->fw_ready = 1; 243 cardp->priv->fw_ready = 1;
232 244
233 priv->hw_host_to_card = if_usb_host_to_card; 245 priv->hw_host_to_card = if_usb_host_to_card;
246#ifdef CONFIG_OLPC
247 if (machine_is_olpc())
248 priv->reset_card = if_usb_reset_olpc_card;
249#endif
250
234 cardp->boot2_version = udev->descriptor.bcdDevice; 251 cardp->boot2_version = udev->descriptor.bcdDevice;
235 252
236 if_usb_submit_rx_urb(cardp); 253 if_usb_submit_rx_urb(cardp);
@@ -364,6 +381,11 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
364 ret = usb_reset_device(cardp->udev); 381 ret = usb_reset_device(cardp->udev);
365 msleep(100); 382 msleep(100);
366 383
384#ifdef CONFIG_OLPC
385 if (ret && machine_is_olpc())
386 if_usb_reset_olpc_card(NULL);
387#endif
388
367 lbs_deb_leave_args(LBS_DEB_USB, "ret %d", ret); 389 lbs_deb_leave_args(LBS_DEB_USB, "ret %d", ret);
368 390
369 return ret; 391 return ret;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index acfc4bfcc262..abd6d9ed8f4b 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -11,6 +11,7 @@
11#include <linux/if_arp.h> 11#include <linux/if_arp.h>
12#include <linux/kthread.h> 12#include <linux/kthread.h>
13#include <linux/kfifo.h> 13#include <linux/kfifo.h>
14#include <linux/stddef.h>
14 15
15#include <net/iw_handler.h> 16#include <net/iw_handler.h>
16#include <net/ieee80211.h> 17#include <net/ieee80211.h>
@@ -343,14 +344,15 @@ static ssize_t lbs_mesh_set(struct device *dev,
343{ 344{
344 struct lbs_private *priv = to_net_dev(dev)->priv; 345 struct lbs_private *priv = to_net_dev(dev)->priv;
345 int enable; 346 int enable;
346 int ret; 347 int ret, action = CMD_ACT_MESH_CONFIG_STOP;
347 348
348 sscanf(buf, "%x", &enable); 349 sscanf(buf, "%x", &enable);
349 enable = !!enable; 350 enable = !!enable;
350 if (enable == !!priv->mesh_dev) 351 if (enable == !!priv->mesh_dev)
351 return count; 352 return count;
352 353 if (enable)
353 ret = lbs_mesh_config(priv, enable, priv->curbssparams.channel); 354 action = CMD_ACT_MESH_CONFIG_START;
355 ret = lbs_mesh_config(priv, action, priv->curbssparams.channel);
354 if (ret) 356 if (ret)
355 return ret; 357 return ret;
356 358
@@ -446,6 +448,8 @@ static int lbs_mesh_stop(struct net_device *dev)
446 448
447 spin_unlock_irq(&priv->driver_lock); 449 spin_unlock_irq(&priv->driver_lock);
448 450
451 schedule_work(&priv->mcast_work);
452
449 lbs_deb_leave(LBS_DEB_MESH); 453 lbs_deb_leave(LBS_DEB_MESH);
450 return 0; 454 return 0;
451} 455}
@@ -467,6 +471,8 @@ static int lbs_eth_stop(struct net_device *dev)
467 netif_stop_queue(dev); 471 netif_stop_queue(dev);
468 spin_unlock_irq(&priv->driver_lock); 472 spin_unlock_irq(&priv->driver_lock);
469 473
474 schedule_work(&priv->mcast_work);
475
470 lbs_deb_leave(LBS_DEB_NET); 476 lbs_deb_leave(LBS_DEB_NET);
471 return 0; 477 return 0;
472} 478}
@@ -563,89 +569,116 @@ done:
563 return ret; 569 return ret;
564} 570}
565 571
566static int lbs_copy_multicast_address(struct lbs_private *priv, 572
567 struct net_device *dev) 573static inline int mac_in_list(unsigned char *list, int list_len,
574 unsigned char *mac)
568{ 575{
569 int i = 0; 576 while (list_len) {
570 struct dev_mc_list *mcptr = dev->mc_list; 577 if (!memcmp(list, mac, ETH_ALEN))
578 return 1;
579 list += ETH_ALEN;
580 list_len--;
581 }
582 return 0;
583}
584
585
586static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
587 struct net_device *dev, int nr_addrs)
588{
589 int i = nr_addrs;
590 struct dev_mc_list *mc_list;
591 DECLARE_MAC_BUF(mac);
592
593 if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST))
594 return nr_addrs;
595
596 netif_tx_lock_bh(dev);
597 for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) {
598 if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) {
599 lbs_deb_net("mcast address %s:%s skipped\n", dev->name,
600 print_mac(mac, mc_list->dmi_addr));
601 continue;
602 }
571 603
572 for (i = 0; i < dev->mc_count; i++) { 604 if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE)
573 memcpy(&priv->multicastlist[i], mcptr->dmi_addr, ETH_ALEN); 605 break;
574 mcptr = mcptr->next; 606 memcpy(&cmd->maclist[6*i], mc_list->dmi_addr, ETH_ALEN);
607 lbs_deb_net("mcast address %s:%s added to filter\n", dev->name,
608 print_mac(mac, mc_list->dmi_addr));
609 i++;
575 } 610 }
611 netif_tx_unlock_bh(dev);
612 if (mc_list)
613 return -EOVERFLOW;
614
576 return i; 615 return i;
577} 616}
578 617
579static void lbs_set_multicast_list(struct net_device *dev) 618static void lbs_set_mcast_worker(struct work_struct *work)
580{ 619{
581 struct lbs_private *priv = dev->priv; 620 struct lbs_private *priv = container_of(work, struct lbs_private, mcast_work);
582 int old_mac_control; 621 struct cmd_ds_mac_multicast_adr mcast_cmd;
583 DECLARE_MAC_BUF(mac); 622 int dev_flags;
623 int nr_addrs;
624 int old_mac_control = priv->mac_control;
584 625
585 lbs_deb_enter(LBS_DEB_NET); 626 lbs_deb_enter(LBS_DEB_NET);
586 627
587 old_mac_control = priv->mac_control; 628 dev_flags = priv->dev->flags;
588 629 if (priv->mesh_dev)
589 if (dev->flags & IFF_PROMISC) { 630 dev_flags |= priv->mesh_dev->flags;
590 lbs_deb_net("enable promiscuous mode\n"); 631
591 priv->mac_control |= 632 if (dev_flags & IFF_PROMISC) {
592 CMD_ACT_MAC_PROMISCUOUS_ENABLE; 633 priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
593 priv->mac_control &= 634 priv->mac_control &= ~(CMD_ACT_MAC_ALL_MULTICAST_ENABLE |
594 ~(CMD_ACT_MAC_ALL_MULTICAST_ENABLE | 635 CMD_ACT_MAC_MULTICAST_ENABLE);
595 CMD_ACT_MAC_MULTICAST_ENABLE); 636 goto out_set_mac_control;
596 } else { 637 } else if (dev_flags & IFF_ALLMULTI) {
597 /* Multicast */ 638 do_allmulti:
598 priv->mac_control &= 639 priv->mac_control |= CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
599 ~CMD_ACT_MAC_PROMISCUOUS_ENABLE; 640 priv->mac_control &= ~(CMD_ACT_MAC_PROMISCUOUS_ENABLE |
600 641 CMD_ACT_MAC_MULTICAST_ENABLE);
601 if (dev->flags & IFF_ALLMULTI || dev->mc_count > 642 goto out_set_mac_control;
602 MRVDRV_MAX_MULTICAST_LIST_SIZE) {
603 lbs_deb_net( "enabling all multicast\n");
604 priv->mac_control |=
605 CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
606 priv->mac_control &=
607 ~CMD_ACT_MAC_MULTICAST_ENABLE;
608 } else {
609 priv->mac_control &=
610 ~CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
611
612 if (!dev->mc_count) {
613 lbs_deb_net("no multicast addresses, "
614 "disabling multicast\n");
615 priv->mac_control &=
616 ~CMD_ACT_MAC_MULTICAST_ENABLE;
617 } else {
618 int i;
619
620 priv->mac_control |=
621 CMD_ACT_MAC_MULTICAST_ENABLE;
622
623 priv->nr_of_multicastmacaddr =
624 lbs_copy_multicast_address(priv, dev);
625
626 lbs_deb_net("multicast addresses: %d\n",
627 dev->mc_count);
628
629 for (i = 0; i < dev->mc_count; i++) {
630 lbs_deb_net("Multicast address %d: %s\n",
631 i, print_mac(mac,
632 priv->multicastlist[i]));
633 }
634 /* send multicast addresses to firmware */
635 lbs_prepare_and_send_command(priv,
636 CMD_MAC_MULTICAST_ADR,
637 CMD_ACT_SET, 0, 0,
638 NULL);
639 }
640 }
641 } 643 }
642 644
645 /* Once for priv->dev, again for priv->mesh_dev if it exists */
646 nr_addrs = lbs_add_mcast_addrs(&mcast_cmd, priv->dev, 0);
647 if (nr_addrs >= 0 && priv->mesh_dev)
648 nr_addrs = lbs_add_mcast_addrs(&mcast_cmd, priv->mesh_dev, nr_addrs);
649 if (nr_addrs < 0)
650 goto do_allmulti;
651
652 if (nr_addrs) {
653 int size = offsetof(struct cmd_ds_mac_multicast_adr,
654 maclist[6*nr_addrs]);
655
656 mcast_cmd.action = cpu_to_le16(CMD_ACT_SET);
657 mcast_cmd.hdr.size = cpu_to_le16(size);
658 mcast_cmd.nr_of_adrs = cpu_to_le16(nr_addrs);
659
660 lbs_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &mcast_cmd.hdr, size);
661
662 priv->mac_control |= CMD_ACT_MAC_MULTICAST_ENABLE;
663 } else
664 priv->mac_control &= ~CMD_ACT_MAC_MULTICAST_ENABLE;
665
666 priv->mac_control &= ~(CMD_ACT_MAC_PROMISCUOUS_ENABLE |
667 CMD_ACT_MAC_ALL_MULTICAST_ENABLE);
668 out_set_mac_control:
643 if (priv->mac_control != old_mac_control) 669 if (priv->mac_control != old_mac_control)
644 lbs_set_mac_control(priv); 670 lbs_set_mac_control(priv);
645 671
646 lbs_deb_leave(LBS_DEB_NET); 672 lbs_deb_leave(LBS_DEB_NET);
647} 673}
648 674
675static void lbs_set_multicast_list(struct net_device *dev)
676{
677 struct lbs_private *priv = dev->priv;
678
679 schedule_work(&priv->mcast_work);
680}
681
649/** 682/**
650 * @brief This function handles the major jobs in the LBS driver. 683 * @brief This function handles the major jobs in the LBS driver.
651 * It handles all events generated by firmware, RX data received 684 * It handles all events generated by firmware, RX data received
@@ -689,20 +722,20 @@ static int lbs_thread(void *data)
689 shouldsleep = 1; /* Something is en route to the device already */ 722 shouldsleep = 1; /* Something is en route to the device already */
690 else if (priv->tx_pending_len > 0) 723 else if (priv->tx_pending_len > 0)
691 shouldsleep = 0; /* We've a packet to send */ 724 shouldsleep = 0; /* We've a packet to send */
725 else if (priv->resp_len[priv->resp_idx])
726 shouldsleep = 0; /* We have a command response */
692 else if (priv->cur_cmd) 727 else if (priv->cur_cmd)
693 shouldsleep = 1; /* Can't send a command; one already running */ 728 shouldsleep = 1; /* Can't send a command; one already running */
694 else if (!list_empty(&priv->cmdpendingq)) 729 else if (!list_empty(&priv->cmdpendingq))
695 shouldsleep = 0; /* We have a command to send */ 730 shouldsleep = 0; /* We have a command to send */
696 else if (__kfifo_len(priv->event_fifo)) 731 else if (__kfifo_len(priv->event_fifo))
697 shouldsleep = 0; /* We have an event to process */ 732 shouldsleep = 0; /* We have an event to process */
698 else if (priv->resp_len[priv->resp_idx])
699 shouldsleep = 0; /* We have a command response */
700 else 733 else
701 shouldsleep = 1; /* No command */ 734 shouldsleep = 1; /* No command */
702 735
703 if (shouldsleep) { 736 if (shouldsleep) {
704 lbs_deb_thread("sleeping, connect_status %d, " 737 lbs_deb_thread("sleeping, connect_status %d, "
705 "ps_mode %d, ps_state %d\n", 738 "psmode %d, psstate %d\n",
706 priv->connect_status, 739 priv->connect_status,
707 priv->psmode, priv->psstate); 740 priv->psmode, priv->psstate);
708 spin_unlock_irq(&priv->driver_lock); 741 spin_unlock_irq(&priv->driver_lock);
@@ -749,16 +782,21 @@ static int lbs_thread(void *data)
749 if (priv->cmd_timed_out && priv->cur_cmd) { 782 if (priv->cmd_timed_out && priv->cur_cmd) {
750 struct cmd_ctrl_node *cmdnode = priv->cur_cmd; 783 struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
751 784
752 if (++priv->nr_retries > 10) { 785 if (++priv->nr_retries > 3) {
753 lbs_pr_info("Excessive timeouts submitting command %x\n", 786 lbs_pr_info("Excessive timeouts submitting "
754 le16_to_cpu(cmdnode->cmdbuf->command)); 787 "command 0x%04x\n",
788 le16_to_cpu(cmdnode->cmdbuf->command));
755 lbs_complete_command(priv, cmdnode, -ETIMEDOUT); 789 lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
756 priv->nr_retries = 0; 790 priv->nr_retries = 0;
791 if (priv->reset_card)
792 priv->reset_card(priv);
757 } else { 793 } else {
758 priv->cur_cmd = NULL; 794 priv->cur_cmd = NULL;
759 priv->dnld_sent = DNLD_RES_RECEIVED; 795 priv->dnld_sent = DNLD_RES_RECEIVED;
760 lbs_pr_info("requeueing command %x due to timeout (#%d)\n", 796 lbs_pr_info("requeueing command 0x%04x due "
761 le16_to_cpu(cmdnode->cmdbuf->command), priv->nr_retries); 797 "to timeout (#%d)\n",
798 le16_to_cpu(cmdnode->cmdbuf->command),
799 priv->nr_retries);
762 800
763 /* Stick it back at the _top_ of the pending queue 801 /* Stick it back at the _top_ of the pending queue
764 for immediate resubmission */ 802 for immediate resubmission */
@@ -890,7 +928,7 @@ int lbs_suspend(struct lbs_private *priv)
890} 928}
891EXPORT_SYMBOL_GPL(lbs_suspend); 929EXPORT_SYMBOL_GPL(lbs_suspend);
892 930
893int lbs_resume(struct lbs_private *priv) 931void lbs_resume(struct lbs_private *priv)
894{ 932{
895 lbs_deb_enter(LBS_DEB_FW); 933 lbs_deb_enter(LBS_DEB_FW);
896 934
@@ -906,7 +944,6 @@ int lbs_resume(struct lbs_private *priv)
906 netif_device_attach(priv->mesh_dev); 944 netif_device_attach(priv->mesh_dev);
907 945
908 lbs_deb_leave(LBS_DEB_FW); 946 lbs_deb_leave(LBS_DEB_FW);
909 return 0;
910} 947}
911EXPORT_SYMBOL_GPL(lbs_resume); 948EXPORT_SYMBOL_GPL(lbs_resume);
912 949
@@ -929,20 +966,10 @@ static int lbs_setup_firmware(struct lbs_private *priv)
929 */ 966 */
930 memset(priv->current_addr, 0xff, ETH_ALEN); 967 memset(priv->current_addr, 0xff, ETH_ALEN);
931 ret = lbs_update_hw_spec(priv); 968 ret = lbs_update_hw_spec(priv);
932 if (ret) { 969 if (ret)
933 ret = -1;
934 goto done; 970 goto done;
935 }
936 971
937 lbs_set_mac_control(priv); 972 lbs_set_mac_control(priv);
938
939 ret = lbs_get_data_rate(priv);
940 if (ret < 0) {
941 ret = -1;
942 goto done;
943 }
944
945 ret = 0;
946done: 973done:
947 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); 974 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
948 return ret; 975 return ret;
@@ -960,12 +987,11 @@ static void command_timer_fn(unsigned long data)
960 lbs_deb_enter(LBS_DEB_CMD); 987 lbs_deb_enter(LBS_DEB_CMD);
961 spin_lock_irqsave(&priv->driver_lock, flags); 988 spin_lock_irqsave(&priv->driver_lock, flags);
962 989
963 if (!priv->cur_cmd) { 990 if (!priv->cur_cmd)
964 lbs_pr_info("Command timer expired; no pending command\n");
965 goto out; 991 goto out;
966 }
967 992
968 lbs_pr_info("Command %x timed out\n", le16_to_cpu(priv->cur_cmd->cmdbuf->command)); 993 lbs_pr_info("command 0x%04x timed out\n",
994 le16_to_cpu(priv->cur_cmd->cmdbuf->command));
969 995
970 priv->cmd_timed_out = 1; 996 priv->cmd_timed_out = 1;
971 wake_up_interruptible(&priv->waitq); 997 wake_up_interruptible(&priv->waitq);
@@ -1019,7 +1045,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
1019 priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL; 1045 priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL;
1020 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; 1046 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
1021 priv->radioon = RADIO_ON; 1047 priv->radioon = RADIO_ON;
1022 priv->auto_rate = 1; 1048 priv->enablehwauto = 1;
1023 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE; 1049 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
1024 priv->psmode = LBS802_11POWERMODECAM; 1050 priv->psmode = LBS802_11POWERMODECAM;
1025 priv->psstate = PS_STATE_FULL_POWER; 1051 priv->psstate = PS_STATE_FULL_POWER;
@@ -1134,6 +1160,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
1134 priv->work_thread = create_singlethread_workqueue("lbs_worker"); 1160 priv->work_thread = create_singlethread_workqueue("lbs_worker");
1135 INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker); 1161 INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker);
1136 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker); 1162 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
1163 INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
1137 INIT_WORK(&priv->sync_channel, lbs_sync_channel_worker); 1164 INIT_WORK(&priv->sync_channel, lbs_sync_channel_worker);
1138 1165
1139 sprintf(priv->mesh_ssid, "mesh"); 1166 sprintf(priv->mesh_ssid, "mesh");
@@ -1156,7 +1183,7 @@ done:
1156EXPORT_SYMBOL_GPL(lbs_add_card); 1183EXPORT_SYMBOL_GPL(lbs_add_card);
1157 1184
1158 1185
1159int lbs_remove_card(struct lbs_private *priv) 1186void lbs_remove_card(struct lbs_private *priv)
1160{ 1187{
1161 struct net_device *dev = priv->dev; 1188 struct net_device *dev = priv->dev;
1162 union iwreq_data wrqu; 1189 union iwreq_data wrqu;
@@ -1168,8 +1195,9 @@ int lbs_remove_card(struct lbs_private *priv)
1168 1195
1169 dev = priv->dev; 1196 dev = priv->dev;
1170 1197
1171 cancel_delayed_work(&priv->scan_work); 1198 cancel_delayed_work_sync(&priv->scan_work);
1172 cancel_delayed_work(&priv->assoc_work); 1199 cancel_delayed_work_sync(&priv->assoc_work);
1200 cancel_work_sync(&priv->mcast_work);
1173 destroy_workqueue(priv->work_thread); 1201 destroy_workqueue(priv->work_thread);
1174 1202
1175 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) { 1203 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
@@ -1191,7 +1219,6 @@ int lbs_remove_card(struct lbs_private *priv)
1191 free_netdev(dev); 1219 free_netdev(dev);
1192 1220
1193 lbs_deb_leave(LBS_DEB_MAIN); 1221 lbs_deb_leave(LBS_DEB_MAIN);
1194 return 0;
1195} 1222}
1196EXPORT_SYMBOL_GPL(lbs_remove_card); 1223EXPORT_SYMBOL_GPL(lbs_remove_card);
1197 1224
@@ -1236,9 +1263,11 @@ int lbs_start_card(struct lbs_private *priv)
1236 useful */ 1263 useful */
1237 1264
1238 priv->mesh_tlv = 0x100 + 291; 1265 priv->mesh_tlv = 0x100 + 291;
1239 if (lbs_mesh_config(priv, 1, priv->curbssparams.channel)) { 1266 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1267 priv->curbssparams.channel)) {
1240 priv->mesh_tlv = 0x100 + 37; 1268 priv->mesh_tlv = 0x100 + 37;
1241 if (lbs_mesh_config(priv, 1, priv->curbssparams.channel)) 1269 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1270 priv->curbssparams.channel))
1242 priv->mesh_tlv = 0; 1271 priv->mesh_tlv = 0;
1243 } 1272 }
1244 if (priv->mesh_tlv) { 1273 if (priv->mesh_tlv) {
@@ -1262,24 +1291,28 @@ done:
1262EXPORT_SYMBOL_GPL(lbs_start_card); 1291EXPORT_SYMBOL_GPL(lbs_start_card);
1263 1292
1264 1293
1265int lbs_stop_card(struct lbs_private *priv) 1294void lbs_stop_card(struct lbs_private *priv)
1266{ 1295{
1267 struct net_device *dev = priv->dev; 1296 struct net_device *dev = priv->dev;
1268 int ret = -1;
1269 struct cmd_ctrl_node *cmdnode; 1297 struct cmd_ctrl_node *cmdnode;
1270 unsigned long flags; 1298 unsigned long flags;
1271 1299
1272 lbs_deb_enter(LBS_DEB_MAIN); 1300 lbs_deb_enter(LBS_DEB_MAIN);
1273 1301
1302 if (!priv)
1303 goto out;
1304
1274 netif_stop_queue(priv->dev); 1305 netif_stop_queue(priv->dev);
1275 netif_carrier_off(priv->dev); 1306 netif_carrier_off(priv->dev);
1276 1307
1277 lbs_debugfs_remove_one(priv); 1308 lbs_debugfs_remove_one(priv);
1278 device_remove_file(&dev->dev, &dev_attr_lbs_rtap); 1309 device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
1279 if (priv->mesh_tlv) 1310 if (priv->mesh_tlv) {
1280 device_remove_file(&dev->dev, &dev_attr_lbs_mesh); 1311 device_remove_file(&dev->dev, &dev_attr_lbs_mesh);
1312 }
1281 1313
1282 /* Flush pending command nodes */ 1314 /* Flush pending command nodes */
1315 del_timer_sync(&priv->command_timer);
1283 spin_lock_irqsave(&priv->driver_lock, flags); 1316 spin_lock_irqsave(&priv->driver_lock, flags);
1284 list_for_each_entry(cmdnode, &priv->cmdpendingq, list) { 1317 list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
1285 cmdnode->result = -ENOENT; 1318 cmdnode->result = -ENOENT;
@@ -1290,8 +1323,8 @@ int lbs_stop_card(struct lbs_private *priv)
1290 1323
1291 unregister_netdev(dev); 1324 unregister_netdev(dev);
1292 1325
1293 lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); 1326out:
1294 return ret; 1327 lbs_deb_leave(LBS_DEB_MAIN);
1295} 1328}
1296EXPORT_SYMBOL_GPL(lbs_stop_card); 1329EXPORT_SYMBOL_GPL(lbs_stop_card);
1297 1330
@@ -1332,6 +1365,8 @@ static int lbs_add_mesh(struct lbs_private *priv)
1332#ifdef WIRELESS_EXT 1365#ifdef WIRELESS_EXT
1333 mesh_dev->wireless_handlers = (struct iw_handler_def *)&mesh_handler_def; 1366 mesh_dev->wireless_handlers = (struct iw_handler_def *)&mesh_handler_def;
1334#endif 1367#endif
1368 mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1369 mesh_dev->set_multicast_list = lbs_set_multicast_list;
1335 /* Register virtual mesh interface */ 1370 /* Register virtual mesh interface */
1336 ret = register_netdev(mesh_dev); 1371 ret = register_netdev(mesh_dev);
1337 if (ret) { 1372 if (ret) {
@@ -1343,6 +1378,8 @@ static int lbs_add_mesh(struct lbs_private *priv)
1343 if (ret) 1378 if (ret)
1344 goto err_unregister; 1379 goto err_unregister;
1345 1380
1381 lbs_persist_config_init(mesh_dev);
1382
1346 /* Everything successful */ 1383 /* Everything successful */
1347 ret = 0; 1384 ret = 0;
1348 goto done; 1385 goto done;
@@ -1369,8 +1406,9 @@ static void lbs_remove_mesh(struct lbs_private *priv)
1369 1406
1370 lbs_deb_enter(LBS_DEB_MESH); 1407 lbs_deb_enter(LBS_DEB_MESH);
1371 netif_stop_queue(mesh_dev); 1408 netif_stop_queue(mesh_dev);
1372 netif_carrier_off(priv->mesh_dev); 1409 netif_carrier_off(mesh_dev);
1373 sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group); 1410 sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
1411 lbs_persist_config_remove(mesh_dev);
1374 unregister_netdev(mesh_dev); 1412 unregister_netdev(mesh_dev);
1375 priv->mesh_dev = NULL; 1413 priv->mesh_dev = NULL;
1376 free_netdev(mesh_dev); 1414 free_netdev(mesh_dev);
@@ -1533,10 +1571,11 @@ static void lbs_remove_rtap(struct lbs_private *priv)
1533{ 1571{
1534 lbs_deb_enter(LBS_DEB_MAIN); 1572 lbs_deb_enter(LBS_DEB_MAIN);
1535 if (priv->rtap_net_dev == NULL) 1573 if (priv->rtap_net_dev == NULL)
1536 return; 1574 goto out;
1537 unregister_netdev(priv->rtap_net_dev); 1575 unregister_netdev(priv->rtap_net_dev);
1538 free_netdev(priv->rtap_net_dev); 1576 free_netdev(priv->rtap_net_dev);
1539 priv->rtap_net_dev = NULL; 1577 priv->rtap_net_dev = NULL;
1578out:
1540 lbs_deb_leave(LBS_DEB_MAIN); 1579 lbs_deb_leave(LBS_DEB_MAIN);
1541} 1580}
1542 1581
@@ -1563,7 +1602,6 @@ static int lbs_add_rtap(struct lbs_private *priv)
1563 rtap_dev->stop = lbs_rtap_stop; 1602 rtap_dev->stop = lbs_rtap_stop;
1564 rtap_dev->get_stats = lbs_rtap_get_stats; 1603 rtap_dev->get_stats = lbs_rtap_get_stats;
1565 rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit; 1604 rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit;
1566 rtap_dev->set_multicast_list = lbs_set_multicast_list;
1567 rtap_dev->priv = priv; 1605 rtap_dev->priv = priv;
1568 SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent); 1606 SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent);
1569 1607
diff --git a/drivers/net/wireless/libertas/persistcfg.c b/drivers/net/wireless/libertas/persistcfg.c
new file mode 100644
index 000000000000..6d0ff8decaf7
--- /dev/null
+++ b/drivers/net/wireless/libertas/persistcfg.c
@@ -0,0 +1,453 @@
1#include <linux/moduleparam.h>
2#include <linux/delay.h>
3#include <linux/etherdevice.h>
4#include <linux/netdevice.h>
5#include <linux/if_arp.h>
6#include <linux/kthread.h>
7#include <linux/kfifo.h>
8
9#include "host.h"
10#include "decl.h"
11#include "dev.h"
12#include "wext.h"
13#include "debugfs.h"
14#include "scan.h"
15#include "assoc.h"
16#include "cmd.h"
17
18static int mesh_get_default_parameters(struct device *dev,
19 struct mrvl_mesh_defaults *defs)
20{
21 struct lbs_private *priv = to_net_dev(dev)->priv;
22 struct cmd_ds_mesh_config cmd;
23 int ret;
24
25 memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config));
26 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_GET,
27 CMD_TYPE_MESH_GET_DEFAULTS);
28
29 if (ret)
30 return -EOPNOTSUPP;
31
32 memcpy(defs, &cmd.data[0], sizeof(struct mrvl_mesh_defaults));
33
34 return 0;
35}
36
37/**
38 * @brief Get function for sysfs attribute bootflag
39 */
40static ssize_t bootflag_get(struct device *dev,
41 struct device_attribute *attr, char *buf)
42{
43 struct mrvl_mesh_defaults defs;
44 int ret;
45
46 ret = mesh_get_default_parameters(dev, &defs);
47
48 if (ret)
49 return ret;
50
51 return snprintf(buf, 12, "0x%x\n", le32_to_cpu(defs.bootflag));
52}
53
54/**
55 * @brief Set function for sysfs attribute bootflag
56 */
57static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr,
58 const char *buf, size_t count)
59{
60 struct lbs_private *priv = to_net_dev(dev)->priv;
61 struct cmd_ds_mesh_config cmd;
62 uint32_t datum;
63 int ret;
64
65 memset(&cmd, 0, sizeof(cmd));
66 ret = sscanf(buf, "%x", &datum);
67 if (ret != 1)
68 return -EINVAL;
69
70 *((__le32 *)&cmd.data[0]) = cpu_to_le32(!!datum);
71 cmd.length = cpu_to_le16(sizeof(uint32_t));
72 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
73 CMD_TYPE_MESH_SET_BOOTFLAG);
74 if (ret)
75 return ret;
76
77 return strlen(buf);
78}
79
80/**
81 * @brief Get function for sysfs attribute boottime
82 */
83static ssize_t boottime_get(struct device *dev,
84 struct device_attribute *attr, char *buf)
85{
86 struct mrvl_mesh_defaults defs;
87 int ret;
88
89 ret = mesh_get_default_parameters(dev, &defs);
90
91 if (ret)
92 return ret;
93
94 return snprintf(buf, 12, "0x%x\n", defs.boottime);
95}
96
97/**
98 * @brief Set function for sysfs attribute boottime
99 */
100static ssize_t boottime_set(struct device *dev,
101 struct device_attribute *attr, const char *buf, size_t count)
102{
103 struct lbs_private *priv = to_net_dev(dev)->priv;
104 struct cmd_ds_mesh_config cmd;
105 uint32_t datum;
106 int ret;
107
108 memset(&cmd, 0, sizeof(cmd));
109 ret = sscanf(buf, "%x", &datum);
110 if (ret != 1)
111 return -EINVAL;
112
113 /* A too small boot time will result in the device booting into
114 * standalone (no-host) mode before the host can take control of it,
115 * so the change will be hard to revert. This may be a desired
116 * feature (e.g to configure a very fast boot time for devices that
117 * will not be attached to a host), but dangerous. So I'm enforcing a
118 * lower limit of 20 seconds: remove and recompile the driver if this
119 * does not work for you.
120 */
121 datum = (datum < 20) ? 20 : datum;
122 cmd.data[0] = datum;
123 cmd.length = cpu_to_le16(sizeof(uint8_t));
124 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
125 CMD_TYPE_MESH_SET_BOOTTIME);
126 if (ret)
127 return ret;
128
129 return strlen(buf);
130}
131
132/**
133 * @brief Get function for sysfs attribute channel
134 */
135static ssize_t channel_get(struct device *dev,
136 struct device_attribute *attr, char *buf)
137{
138 struct mrvl_mesh_defaults defs;
139 int ret;
140
141 ret = mesh_get_default_parameters(dev, &defs);
142
143 if (ret)
144 return ret;
145
146 return snprintf(buf, 12, "0x%x\n", le16_to_cpu(defs.channel));
147}
148
149/**
150 * @brief Set function for sysfs attribute channel
151 */
152static ssize_t channel_set(struct device *dev, struct device_attribute *attr,
153 const char *buf, size_t count)
154{
155 struct lbs_private *priv = to_net_dev(dev)->priv;
156 struct cmd_ds_mesh_config cmd;
157 uint16_t datum;
158 int ret;
159
160 memset(&cmd, 0, sizeof(cmd));
161 ret = sscanf(buf, "%hx", &datum);
162 if (ret != 1 || datum < 1 || datum > 11)
163 return -EINVAL;
164
165 *((__le16 *)&cmd.data[0]) = cpu_to_le16(datum);
166 cmd.length = cpu_to_le16(sizeof(uint16_t));
167 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
168 CMD_TYPE_MESH_SET_DEF_CHANNEL);
169 if (ret)
170 return ret;
171
172 return strlen(buf);
173}
174
175/**
176 * @brief Get function for sysfs attribute mesh_id
177 */
178static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
179 char *buf)
180{
181 struct mrvl_mesh_defaults defs;
182 int maxlen;
183 int ret;
184
185 ret = mesh_get_default_parameters(dev, &defs);
186
187 if (ret)
188 return ret;
189
190 if (defs.meshie.val.mesh_id_len > IW_ESSID_MAX_SIZE) {
191 lbs_pr_err("inconsistent mesh ID length");
192 defs.meshie.val.mesh_id_len = IW_ESSID_MAX_SIZE;
193 }
194
195 /* SSID not null terminated: reserve room for \0 + \n */
196 maxlen = defs.meshie.val.mesh_id_len + 2;
197 maxlen = (PAGE_SIZE > maxlen) ? maxlen : PAGE_SIZE;
198
199 defs.meshie.val.mesh_id[defs.meshie.val.mesh_id_len] = '\0';
200
201 return snprintf(buf, maxlen, "%s\n", defs.meshie.val.mesh_id);
202}
203
204/**
205 * @brief Set function for sysfs attribute mesh_id
206 */
207static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
208 const char *buf, size_t count)
209{
210 struct cmd_ds_mesh_config cmd;
211 struct mrvl_mesh_defaults defs;
212 struct mrvl_meshie *ie;
213 struct lbs_private *priv = to_net_dev(dev)->priv;
214 int len;
215 int ret;
216
217 if (count < 2 || count > IW_ESSID_MAX_SIZE + 1)
218 return -EINVAL;
219
220 memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config));
221 ie = (struct mrvl_meshie *) &cmd.data[0];
222
223 /* fetch all other Information Element parameters */
224 ret = mesh_get_default_parameters(dev, &defs);
225
226 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
227
228 /* transfer IE elements */
229 memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
230
231 len = count - 1;
232 memcpy(ie->val.mesh_id, buf, len);
233 /* SSID len */
234 ie->val.mesh_id_len = len;
235 /* IE len */
236 ie->hdr.len = sizeof(struct mrvl_meshie_val) - IW_ESSID_MAX_SIZE + len;
237
238 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
239 CMD_TYPE_MESH_SET_MESH_IE);
240 if (ret)
241 return ret;
242
243 return strlen(buf);
244}
245
246/**
247 * @brief Get function for sysfs attribute protocol_id
248 */
249static ssize_t protocol_id_get(struct device *dev,
250 struct device_attribute *attr, char *buf)
251{
252 struct mrvl_mesh_defaults defs;
253 int ret;
254
255 ret = mesh_get_default_parameters(dev, &defs);
256
257 if (ret)
258 return ret;
259
260 return snprintf(buf, 5, "%d\n", defs.meshie.val.active_protocol_id);
261}
262
263/**
264 * @brief Set function for sysfs attribute protocol_id
265 */
266static ssize_t protocol_id_set(struct device *dev,
267 struct device_attribute *attr, const char *buf, size_t count)
268{
269 struct cmd_ds_mesh_config cmd;
270 struct mrvl_mesh_defaults defs;
271 struct mrvl_meshie *ie;
272 struct lbs_private *priv = to_net_dev(dev)->priv;
273 uint32_t datum;
274 int ret;
275
276 memset(&cmd, 0, sizeof(cmd));
277 ret = sscanf(buf, "%x", &datum);
278 if (ret != 1)
279 return -EINVAL;
280
281 /* fetch all other Information Element parameters */
282 ret = mesh_get_default_parameters(dev, &defs);
283
284 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
285
286 /* transfer IE elements */
287 ie = (struct mrvl_meshie *) &cmd.data[0];
288 memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
289 /* update protocol id */
290 ie->val.active_protocol_id = datum;
291
292 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
293 CMD_TYPE_MESH_SET_MESH_IE);
294 if (ret)
295 return ret;
296
297 return strlen(buf);
298}
299
300/**
301 * @brief Get function for sysfs attribute metric_id
302 */
303static ssize_t metric_id_get(struct device *dev,
304 struct device_attribute *attr, char *buf)
305{
306 struct mrvl_mesh_defaults defs;
307 int ret;
308
309 ret = mesh_get_default_parameters(dev, &defs);
310
311 if (ret)
312 return ret;
313
314 return snprintf(buf, 5, "%d\n", defs.meshie.val.active_metric_id);
315}
316
317/**
318 * @brief Set function for sysfs attribute metric_id
319 */
320static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr,
321 const char *buf, size_t count)
322{
323 struct cmd_ds_mesh_config cmd;
324 struct mrvl_mesh_defaults defs;
325 struct mrvl_meshie *ie;
326 struct lbs_private *priv = to_net_dev(dev)->priv;
327 uint32_t datum;
328 int ret;
329
330 memset(&cmd, 0, sizeof(cmd));
331 ret = sscanf(buf, "%x", &datum);
332 if (ret != 1)
333 return -EINVAL;
334
335 /* fetch all other Information Element parameters */
336 ret = mesh_get_default_parameters(dev, &defs);
337
338 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
339
340 /* transfer IE elements */
341 ie = (struct mrvl_meshie *) &cmd.data[0];
342 memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
343 /* update metric id */
344 ie->val.active_metric_id = datum;
345
346 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
347 CMD_TYPE_MESH_SET_MESH_IE);
348 if (ret)
349 return ret;
350
351 return strlen(buf);
352}
353
354/**
355 * @brief Get function for sysfs attribute capability
356 */
357static ssize_t capability_get(struct device *dev,
358 struct device_attribute *attr, char *buf)
359{
360 struct mrvl_mesh_defaults defs;
361 int ret;
362
363 ret = mesh_get_default_parameters(dev, &defs);
364
365 if (ret)
366 return ret;
367
368 return snprintf(buf, 5, "%d\n", defs.meshie.val.mesh_capability);
369}
370
371/**
372 * @brief Set function for sysfs attribute capability
373 */
374static ssize_t capability_set(struct device *dev, struct device_attribute *attr,
375 const char *buf, size_t count)
376{
377 struct cmd_ds_mesh_config cmd;
378 struct mrvl_mesh_defaults defs;
379 struct mrvl_meshie *ie;
380 struct lbs_private *priv = to_net_dev(dev)->priv;
381 uint32_t datum;
382 int ret;
383
384 memset(&cmd, 0, sizeof(cmd));
385 ret = sscanf(buf, "%x", &datum);
386 if (ret != 1)
387 return -EINVAL;
388
389 /* fetch all other Information Element parameters */
390 ret = mesh_get_default_parameters(dev, &defs);
391
392 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
393
394 /* transfer IE elements */
395 ie = (struct mrvl_meshie *) &cmd.data[0];
396 memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
397 /* update value */
398 ie->val.mesh_capability = datum;
399
400 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
401 CMD_TYPE_MESH_SET_MESH_IE);
402 if (ret)
403 return ret;
404
405 return strlen(buf);
406}
407
408
409static DEVICE_ATTR(bootflag, 0644, bootflag_get, bootflag_set);
410static DEVICE_ATTR(boottime, 0644, boottime_get, boottime_set);
411static DEVICE_ATTR(channel, 0644, channel_get, channel_set);
412static DEVICE_ATTR(mesh_id, 0644, mesh_id_get, mesh_id_set);
413static DEVICE_ATTR(protocol_id, 0644, protocol_id_get, protocol_id_set);
414static DEVICE_ATTR(metric_id, 0644, metric_id_get, metric_id_set);
415static DEVICE_ATTR(capability, 0644, capability_get, capability_set);
416
417static struct attribute *boot_opts_attrs[] = {
418 &dev_attr_bootflag.attr,
419 &dev_attr_boottime.attr,
420 &dev_attr_channel.attr,
421 NULL
422};
423
424static struct attribute_group boot_opts_group = {
425 .name = "boot_options",
426 .attrs = boot_opts_attrs,
427};
428
429static struct attribute *mesh_ie_attrs[] = {
430 &dev_attr_mesh_id.attr,
431 &dev_attr_protocol_id.attr,
432 &dev_attr_metric_id.attr,
433 &dev_attr_capability.attr,
434 NULL
435};
436
437static struct attribute_group mesh_ie_group = {
438 .name = "mesh_ie",
439 .attrs = mesh_ie_attrs,
440};
441
442void lbs_persist_config_init(struct net_device *dev)
443{
444 int ret;
445 ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group);
446 ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group);
447}
448
449void lbs_persist_config_remove(struct net_device *dev)
450{
451 sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group);
452 sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group);
453}
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 05af7316f698..5749f22b296f 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -237,7 +237,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
237 /* Take the data rate from the rxpd structure 237 /* Take the data rate from the rxpd structure
238 * only if the rate is auto 238 * only if the rate is auto
239 */ 239 */
240 if (priv->auto_rate) 240 if (priv->enablehwauto)
241 priv->cur_rate = lbs_fw_index_to_data_rate(p_rx_pd->rx_rate); 241 priv->cur_rate = lbs_fw_index_to_data_rate(p_rx_pd->rx_rate);
242 242
243 lbs_compute_rssi(priv, p_rx_pd); 243 lbs_compute_rssi(priv, p_rx_pd);
@@ -383,7 +383,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
383 /* Take the data rate from the rxpd structure 383 /* Take the data rate from the rxpd structure
384 * only if the rate is auto 384 * only if the rate is auto
385 */ 385 */
386 if (priv->auto_rate) 386 if (priv->enablehwauto)
387 priv->cur_rate = lbs_fw_index_to_data_rate(prxpd->rx_rate); 387 priv->cur_rate = lbs_fw_index_to_data_rate(prxpd->rx_rate);
388 388
389 lbs_compute_rssi(priv, prxpd); 389 lbs_compute_rssi(priv, prxpd);
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index 4031be420862..e0c2599da92f 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -6,6 +6,8 @@
6 6
7#include <linux/if_ether.h> 7#include <linux/if_ether.h>
8#include <asm/byteorder.h> 8#include <asm/byteorder.h>
9#include <linux/wireless.h>
10#include <net/ieee80211.h>
9 11
10struct ieeetypes_cfparamset { 12struct ieeetypes_cfparamset {
11 u8 elementid; 13 u8 elementid;
@@ -252,4 +254,32 @@ struct mrvlietypes_ledbhv {
252 struct led_bhv ledbhv[1]; 254 struct led_bhv ledbhv[1];
253} __attribute__ ((packed)); 255} __attribute__ ((packed));
254 256
257/* Meant to be packed as the value member of a struct ieee80211_info_element.
258 * Note that the len member of the ieee80211_info_element varies depending on
259 * the mesh_id_len */
260struct mrvl_meshie_val {
261 uint8_t oui[P80211_OUI_LEN];
262 uint8_t type;
263 uint8_t subtype;
264 uint8_t version;
265 uint8_t active_protocol_id;
266 uint8_t active_metric_id;
267 uint8_t mesh_capability;
268 uint8_t mesh_id_len;
269 uint8_t mesh_id[IW_ESSID_MAX_SIZE];
270} __attribute__ ((packed));
271
272struct mrvl_meshie {
273 struct ieee80211_info_element hdr;
274 struct mrvl_meshie_val val;
275} __attribute__ ((packed));
276
277struct mrvl_mesh_defaults {
278 __le32 bootflag;
279 uint8_t boottime;
280 uint8_t reserved;
281 __le16 channel;
282 struct mrvl_meshie meshie;
283} __attribute__ ((packed));
284
255#endif 285#endif
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 0973d015a520..8b3ed77860b3 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -1002,7 +1002,7 @@ static int lbs_mesh_set_freq(struct net_device *dev,
1002 else if (priv->mode == IW_MODE_ADHOC) 1002 else if (priv->mode == IW_MODE_ADHOC)
1003 lbs_stop_adhoc_network(priv); 1003 lbs_stop_adhoc_network(priv);
1004 } 1004 }
1005 lbs_mesh_config(priv, 1, fwrq->m); 1005 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, fwrq->m);
1006 lbs_update_channel(priv); 1006 lbs_update_channel(priv);
1007 ret = 0; 1007 ret = 0;
1008 1008
@@ -1021,29 +1021,38 @@ static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
1021 1021
1022 lbs_deb_enter(LBS_DEB_WEXT); 1022 lbs_deb_enter(LBS_DEB_WEXT);
1023 lbs_deb_wext("vwrq->value %d\n", vwrq->value); 1023 lbs_deb_wext("vwrq->value %d\n", vwrq->value);
1024 lbs_deb_wext("vwrq->fixed %d\n", vwrq->fixed);
1025
1026 if (vwrq->fixed && vwrq->value == -1)
1027 goto out;
1024 1028
1025 /* Auto rate? */ 1029 /* Auto rate? */
1026 if (vwrq->value == -1) { 1030 priv->enablehwauto = !vwrq->fixed;
1027 priv->auto_rate = 1; 1031
1032 if (vwrq->value == -1)
1028 priv->cur_rate = 0; 1033 priv->cur_rate = 0;
1029 } else { 1034 else {
1030 if (vwrq->value % 100000) 1035 if (vwrq->value % 100000)
1031 goto out; 1036 goto out;
1032 1037
1038 new_rate = vwrq->value / 500000;
1039 priv->cur_rate = new_rate;
1040 /* the rest is only needed for lbs_set_data_rate() */
1033 memset(rates, 0, sizeof(rates)); 1041 memset(rates, 0, sizeof(rates));
1034 copy_active_data_rates(priv, rates); 1042 copy_active_data_rates(priv, rates);
1035 new_rate = vwrq->value / 500000;
1036 if (!memchr(rates, new_rate, sizeof(rates))) { 1043 if (!memchr(rates, new_rate, sizeof(rates))) {
1037 lbs_pr_alert("fixed data rate 0x%X out of range\n", 1044 lbs_pr_alert("fixed data rate 0x%X out of range\n",
1038 new_rate); 1045 new_rate);
1039 goto out; 1046 goto out;
1040 } 1047 }
1041
1042 priv->cur_rate = new_rate;
1043 priv->auto_rate = 0;
1044 } 1048 }
1045 1049
1046 ret = lbs_set_data_rate(priv, new_rate); 1050 /* Try the newer command first (Firmware Spec 5.1 and above) */
1051 ret = lbs_cmd_802_11_rate_adapt_rateset(priv, CMD_ACT_SET);
1052
1053 /* Fallback to older version */
1054 if (ret)
1055 ret = lbs_set_data_rate(priv, new_rate);
1047 1056
1048out: 1057out:
1049 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 1058 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
@@ -1060,7 +1069,7 @@ static int lbs_get_rate(struct net_device *dev, struct iw_request_info *info,
1060 if (priv->connect_status == LBS_CONNECTED) { 1069 if (priv->connect_status == LBS_CONNECTED) {
1061 vwrq->value = priv->cur_rate * 500000; 1070 vwrq->value = priv->cur_rate * 500000;
1062 1071
1063 if (priv->auto_rate) 1072 if (priv->enablehwauto)
1064 vwrq->fixed = 0; 1073 vwrq->fixed = 0;
1065 else 1074 else
1066 vwrq->fixed = 1; 1075 vwrq->fixed = 1;
@@ -2011,7 +2020,8 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2011 priv->mesh_ssid_len = dwrq->length; 2020 priv->mesh_ssid_len = dwrq->length;
2012 } 2021 }
2013 2022
2014 lbs_mesh_config(priv, 1, priv->curbssparams.channel); 2023 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
2024 priv->curbssparams.channel);
2015 out: 2025 out:
2016 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 2026 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
2017 return ret; 2027 return ret;
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 06d2c67f4c81..c6f27b9022f9 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -64,7 +64,7 @@ struct p54_common {
64 unsigned int tx_hdr_len; 64 unsigned int tx_hdr_len;
65 void *cached_vdcf; 65 void *cached_vdcf;
66 unsigned int fw_var; 66 unsigned int fw_var;
67 struct ieee80211_tx_queue_stats tx_stats; 67 struct ieee80211_tx_queue_stats tx_stats[4];
68}; 68};
69 69
70int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb); 70int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb);
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 63f9badf3f52..9f7224de6fd1 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -146,10 +146,10 @@ void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
146 146
147 if (priv->fw_var >= 0x300) { 147 if (priv->fw_var >= 0x300) {
148 /* Firmware supports QoS, use it! */ 148 /* Firmware supports QoS, use it! */
149 priv->tx_stats.data[0].limit = 3; 149 priv->tx_stats[0].limit = 3;
150 priv->tx_stats.data[1].limit = 4; 150 priv->tx_stats[1].limit = 4;
151 priv->tx_stats.data[2].limit = 3; 151 priv->tx_stats[2].limit = 3;
152 priv->tx_stats.data[3].limit = 1; 152 priv->tx_stats[3].limit = 1;
153 dev->queues = 4; 153 dev->queues = 4;
154 } 154 }
155} 155}
@@ -355,7 +355,7 @@ static void p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb)
355 struct ieee80211_rx_status rx_status = {0}; 355 struct ieee80211_rx_status rx_status = {0};
356 u16 freq = le16_to_cpu(hdr->freq); 356 u16 freq = le16_to_cpu(hdr->freq);
357 357
358 rx_status.ssi = hdr->rssi; 358 rx_status.signal = hdr->rssi;
359 /* XX correct? */ 359 /* XX correct? */
360 rx_status.rate_idx = hdr->rate & 0xf; 360 rx_status.rate_idx = hdr->rate & 0xf;
361 rx_status.freq = freq; 361 rx_status.freq = freq;
@@ -375,11 +375,8 @@ static void inline p54_wake_free_queues(struct ieee80211_hw *dev)
375 struct p54_common *priv = dev->priv; 375 struct p54_common *priv = dev->priv;
376 int i; 376 int i;
377 377
378 /* ieee80211_start_queues is great if all queues are really empty.
379 * But, what if some are full? */
380
381 for (i = 0; i < dev->queues; i++) 378 for (i = 0; i < dev->queues; i++)
382 if (priv->tx_stats.data[i].len < priv->tx_stats.data[i].limit) 379 if (priv->tx_stats[i].len < priv->tx_stats[i].limit)
383 ieee80211_wake_queue(dev, i); 380 ieee80211_wake_queue(dev, i);
384} 381}
385 382
@@ -395,45 +392,42 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
395 u32 last_addr = priv->rx_start; 392 u32 last_addr = priv->rx_start;
396 393
397 while (entry != (struct sk_buff *)&priv->tx_queue) { 394 while (entry != (struct sk_buff *)&priv->tx_queue) {
398 range = (struct memrecord *)&entry->cb; 395 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
396 range = (void *)info->driver_data;
399 if (range->start_addr == addr) { 397 if (range->start_addr == addr) {
400 struct ieee80211_tx_status status;
401 struct p54_control_hdr *entry_hdr; 398 struct p54_control_hdr *entry_hdr;
402 struct p54_tx_control_allocdata *entry_data; 399 struct p54_tx_control_allocdata *entry_data;
403 int pad = 0; 400 int pad = 0;
404 401
405 if (entry->next != (struct sk_buff *)&priv->tx_queue) 402 if (entry->next != (struct sk_buff *)&priv->tx_queue) {
406 freed = ((struct memrecord *)&entry->next->cb)->start_addr - last_addr; 403 struct ieee80211_tx_info *ni;
407 else 404 struct memrecord *mr;
405
406 ni = IEEE80211_SKB_CB(entry->next);
407 mr = (struct memrecord *)ni->driver_data;
408 freed = mr->start_addr - last_addr;
409 } else
408 freed = priv->rx_end - last_addr; 410 freed = priv->rx_end - last_addr;
409 411
410 last_addr = range->end_addr; 412 last_addr = range->end_addr;
411 __skb_unlink(entry, &priv->tx_queue); 413 __skb_unlink(entry, &priv->tx_queue);
412 if (!range->control) { 414 memset(&info->status, 0, sizeof(info->status));
413 kfree_skb(entry); 415 priv->tx_stats[skb_get_queue_mapping(skb)].len--;
414 break;
415 }
416 memset(&status, 0, sizeof(status));
417 memcpy(&status.control, range->control,
418 sizeof(status.control));
419 kfree(range->control);
420 priv->tx_stats.data[status.control.queue].len--;
421
422 entry_hdr = (struct p54_control_hdr *) entry->data; 416 entry_hdr = (struct p54_control_hdr *) entry->data;
423 entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data; 417 entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data;
424 if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0) 418 if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0)
425 pad = entry_data->align[0]; 419 pad = entry_data->align[0];
426 420
427 if (!(status.control.flags & IEEE80211_TXCTL_NO_ACK)) { 421 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
428 if (!(payload->status & 0x01)) 422 if (!(payload->status & 0x01))
429 status.flags |= IEEE80211_TX_STATUS_ACK; 423 info->flags |= IEEE80211_TX_STAT_ACK;
430 else 424 else
431 status.excessive_retries = 1; 425 info->status.excessive_retries = 1;
432 } 426 }
433 status.retry_count = payload->retries - 1; 427 info->status.retry_count = payload->retries - 1;
434 status.ack_signal = le16_to_cpu(payload->ack_rssi); 428 info->status.ack_signal = le16_to_cpu(payload->ack_rssi);
435 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data)); 429 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
436 ieee80211_tx_status_irqsafe(dev, entry, &status); 430 ieee80211_tx_status_irqsafe(dev, entry);
437 break; 431 break;
438 } else 432 } else
439 last_addr = range->end_addr; 433 last_addr = range->end_addr;
@@ -498,13 +492,11 @@ EXPORT_SYMBOL_GPL(p54_rx);
498 * allocated areas. 492 * allocated areas.
499 */ 493 */
500static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb, 494static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
501 struct p54_control_hdr *data, u32 len, 495 struct p54_control_hdr *data, u32 len)
502 struct ieee80211_tx_control *control)
503{ 496{
504 struct p54_common *priv = dev->priv; 497 struct p54_common *priv = dev->priv;
505 struct sk_buff *entry = priv->tx_queue.next; 498 struct sk_buff *entry = priv->tx_queue.next;
506 struct sk_buff *target_skb = NULL; 499 struct sk_buff *target_skb = NULL;
507 struct memrecord *range;
508 u32 last_addr = priv->rx_start; 500 u32 last_addr = priv->rx_start;
509 u32 largest_hole = 0; 501 u32 largest_hole = 0;
510 u32 target_addr = priv->rx_start; 502 u32 target_addr = priv->rx_start;
@@ -516,7 +508,8 @@ static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
516 left = skb_queue_len(&priv->tx_queue); 508 left = skb_queue_len(&priv->tx_queue);
517 while (left--) { 509 while (left--) {
518 u32 hole_size; 510 u32 hole_size;
519 range = (struct memrecord *)&entry->cb; 511 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
512 struct memrecord *range = (void *)info->driver_data;
520 hole_size = range->start_addr - last_addr; 513 hole_size = range->start_addr - last_addr;
521 if (!target_skb && hole_size >= len) { 514 if (!target_skb && hole_size >= len) {
522 target_skb = entry->prev; 515 target_skb = entry->prev;
@@ -531,17 +524,18 @@ static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
531 target_skb = priv->tx_queue.prev; 524 target_skb = priv->tx_queue.prev;
532 largest_hole = max(largest_hole, priv->rx_end - last_addr - len); 525 largest_hole = max(largest_hole, priv->rx_end - last_addr - len);
533 if (!skb_queue_empty(&priv->tx_queue)) { 526 if (!skb_queue_empty(&priv->tx_queue)) {
534 range = (struct memrecord *)&target_skb->cb; 527 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(target_skb);
528 struct memrecord *range = (void *)info->driver_data;
535 target_addr = range->end_addr; 529 target_addr = range->end_addr;
536 } 530 }
537 } else 531 } else
538 largest_hole = max(largest_hole, priv->rx_end - last_addr); 532 largest_hole = max(largest_hole, priv->rx_end - last_addr);
539 533
540 if (skb) { 534 if (skb) {
541 range = (struct memrecord *)&skb->cb; 535 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
536 struct memrecord *range = (void *)info->driver_data;
542 range->start_addr = target_addr; 537 range->start_addr = target_addr;
543 range->end_addr = target_addr + len; 538 range->end_addr = target_addr + len;
544 range->control = control;
545 __skb_queue_after(&priv->tx_queue, target_skb, skb); 539 __skb_queue_after(&priv->tx_queue, target_skb, skb);
546 if (largest_hole < IEEE80211_MAX_RTS_THRESHOLD + 0x170 + 540 if (largest_hole < IEEE80211_MAX_RTS_THRESHOLD + 0x170 +
547 sizeof(struct p54_control_hdr)) 541 sizeof(struct p54_control_hdr))
@@ -552,32 +546,27 @@ static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
552 data->req_id = cpu_to_le32(target_addr + 0x70); 546 data->req_id = cpu_to_le32(target_addr + 0x70);
553} 547}
554 548
555static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb, 549static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
556 struct ieee80211_tx_control *control)
557{ 550{
558 struct ieee80211_tx_queue_stats_data *current_queue; 551 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
552 struct ieee80211_tx_queue_stats *current_queue;
559 struct p54_common *priv = dev->priv; 553 struct p54_common *priv = dev->priv;
560 struct p54_control_hdr *hdr; 554 struct p54_control_hdr *hdr;
561 struct p54_tx_control_allocdata *txhdr; 555 struct p54_tx_control_allocdata *txhdr;
562 struct ieee80211_tx_control *control_copy;
563 size_t padding, len; 556 size_t padding, len;
564 u8 rate; 557 u8 rate;
565 558
566 current_queue = &priv->tx_stats.data[control->queue]; 559 current_queue = &priv->tx_stats[skb_get_queue_mapping(skb)];
567 if (unlikely(current_queue->len > current_queue->limit)) 560 if (unlikely(current_queue->len > current_queue->limit))
568 return NETDEV_TX_BUSY; 561 return NETDEV_TX_BUSY;
569 current_queue->len++; 562 current_queue->len++;
570 current_queue->count++; 563 current_queue->count++;
571 if (current_queue->len == current_queue->limit) 564 if (current_queue->len == current_queue->limit)
572 ieee80211_stop_queue(dev, control->queue); 565 ieee80211_stop_queue(dev, skb_get_queue_mapping(skb));
573 566
574 padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3; 567 padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3;
575 len = skb->len; 568 len = skb->len;
576 569
577 control_copy = kmalloc(sizeof(*control), GFP_ATOMIC);
578 if (control_copy)
579 memcpy(control_copy, control, sizeof(*control));
580
581 txhdr = (struct p54_tx_control_allocdata *) 570 txhdr = (struct p54_tx_control_allocdata *)
582 skb_push(skb, sizeof(*txhdr) + padding); 571 skb_push(skb, sizeof(*txhdr) + padding);
583 hdr = (struct p54_control_hdr *) skb_push(skb, sizeof(*hdr)); 572 hdr = (struct p54_control_hdr *) skb_push(skb, sizeof(*hdr));
@@ -587,35 +576,37 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
587 else 576 else
588 hdr->magic1 = cpu_to_le16(0x0010); 577 hdr->magic1 = cpu_to_le16(0x0010);
589 hdr->len = cpu_to_le16(len); 578 hdr->len = cpu_to_le16(len);
590 hdr->type = (control->flags & IEEE80211_TXCTL_NO_ACK) ? 0 : cpu_to_le16(1); 579 hdr->type = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 0 : cpu_to_le16(1);
591 hdr->retry1 = hdr->retry2 = control->retry_limit; 580 hdr->retry1 = hdr->retry2 = info->control.retry_limit;
592 p54_assign_address(dev, skb, hdr, skb->len, control_copy);
593 581
594 memset(txhdr->wep_key, 0x0, 16); 582 memset(txhdr->wep_key, 0x0, 16);
595 txhdr->padding = 0; 583 txhdr->padding = 0;
596 txhdr->padding2 = 0; 584 txhdr->padding2 = 0;
597 585
598 /* TODO: add support for alternate retry TX rates */ 586 /* TODO: add support for alternate retry TX rates */
599 rate = control->tx_rate->hw_value; 587 rate = ieee80211_get_tx_rate(dev, info)->hw_value;
600 if (control->flags & IEEE80211_TXCTL_SHORT_PREAMBLE) 588 if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE)
601 rate |= 0x10; 589 rate |= 0x10;
602 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) 590 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
603 rate |= 0x40; 591 rate |= 0x40;
604 else if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) 592 else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
605 rate |= 0x20; 593 rate |= 0x20;
606 memset(txhdr->rateset, rate, 8); 594 memset(txhdr->rateset, rate, 8);
607 txhdr->wep_key_present = 0; 595 txhdr->wep_key_present = 0;
608 txhdr->wep_key_len = 0; 596 txhdr->wep_key_len = 0;
609 txhdr->frame_type = cpu_to_le32(control->queue + 4); 597 txhdr->frame_type = cpu_to_le32(skb_get_queue_mapping(skb) + 4);
610 txhdr->magic4 = 0; 598 txhdr->magic4 = 0;
611 txhdr->antenna = (control->antenna_sel_tx == 0) ? 599 txhdr->antenna = (info->antenna_sel_tx == 0) ?
612 2 : control->antenna_sel_tx - 1; 600 2 : info->antenna_sel_tx - 1;
613 txhdr->output_power = 0x7f; // HW Maximum 601 txhdr->output_power = 0x7f; // HW Maximum
614 txhdr->magic5 = (control->flags & IEEE80211_TXCTL_NO_ACK) ? 602 txhdr->magic5 = (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
615 0 : ((rate > 0x3) ? cpu_to_le32(0x33) : cpu_to_le32(0x23)); 603 0 : ((rate > 0x3) ? cpu_to_le32(0x33) : cpu_to_le32(0x23));
616 if (padding) 604 if (padding)
617 txhdr->align[0] = padding; 605 txhdr->align[0] = padding;
618 606
607 /* modifies skb->cb and with it info, so must be last! */
608 p54_assign_address(dev, skb, hdr, skb->len);
609
619 priv->tx(dev, hdr, skb->len, 0); 610 priv->tx(dev, hdr, skb->len, 0);
620 return 0; 611 return 0;
621} 612}
@@ -638,7 +629,7 @@ static int p54_set_filter(struct ieee80211_hw *dev, u16 filter_type,
638 filter = (struct p54_tx_control_filter *) hdr->data; 629 filter = (struct p54_tx_control_filter *) hdr->data;
639 hdr->magic1 = cpu_to_le16(0x8001); 630 hdr->magic1 = cpu_to_le16(0x8001);
640 hdr->len = cpu_to_le16(sizeof(*filter)); 631 hdr->len = cpu_to_le16(sizeof(*filter));
641 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*filter), NULL); 632 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*filter));
642 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_FILTER_SET); 633 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_FILTER_SET);
643 634
644 filter->filter_type = cpu_to_le16(filter_type); 635 filter->filter_type = cpu_to_le16(filter_type);
@@ -682,7 +673,7 @@ static int p54_set_freq(struct ieee80211_hw *dev, __le16 freq)
682 hdr->magic1 = cpu_to_le16(0x8001); 673 hdr->magic1 = cpu_to_le16(0x8001);
683 hdr->len = cpu_to_le16(sizeof(*chan)); 674 hdr->len = cpu_to_le16(sizeof(*chan));
684 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_CHANNEL_CHANGE); 675 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_CHANNEL_CHANGE);
685 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + payload_len, NULL); 676 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + payload_len);
686 677
687 chan->magic1 = cpu_to_le16(0x1); 678 chan->magic1 = cpu_to_le16(0x1);
688 chan->magic2 = cpu_to_le16(0x0); 679 chan->magic2 = cpu_to_le16(0x0);
@@ -755,7 +746,7 @@ static int p54_set_leds(struct ieee80211_hw *dev, int mode, int link, int act)
755 hdr->magic1 = cpu_to_le16(0x8001); 746 hdr->magic1 = cpu_to_le16(0x8001);
756 hdr->len = cpu_to_le16(sizeof(*led)); 747 hdr->len = cpu_to_le16(sizeof(*led));
757 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_LED); 748 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_LED);
758 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*led), NULL); 749 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*led));
759 750
760 led = (struct p54_tx_control_led *) hdr->data; 751 led = (struct p54_tx_control_led *) hdr->data;
761 led->mode = cpu_to_le16(mode); 752 led->mode = cpu_to_le16(mode);
@@ -805,7 +796,7 @@ static void p54_set_vdcf(struct ieee80211_hw *dev)
805 796
806 hdr = (void *)priv->cached_vdcf + priv->tx_hdr_len; 797 hdr = (void *)priv->cached_vdcf + priv->tx_hdr_len;
807 798
808 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*vdcf), NULL); 799 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*vdcf));
809 800
810 vdcf = (struct p54_tx_control_vdcf *) hdr->data; 801 vdcf = (struct p54_tx_control_vdcf *) hdr->data;
811 802
@@ -841,12 +832,8 @@ static void p54_stop(struct ieee80211_hw *dev)
841{ 832{
842 struct p54_common *priv = dev->priv; 833 struct p54_common *priv = dev->priv;
843 struct sk_buff *skb; 834 struct sk_buff *skb;
844 while ((skb = skb_dequeue(&priv->tx_queue))) { 835 while ((skb = skb_dequeue(&priv->tx_queue)))
845 struct memrecord *range = (struct memrecord *)&skb->cb;
846 if (range->control)
847 kfree(range->control);
848 kfree_skb(skb); 836 kfree_skb(skb);
849 }
850 priv->stop(dev); 837 priv->stop(dev);
851 priv->mode = IEEE80211_IF_TYPE_INVALID; 838 priv->mode = IEEE80211_IF_TYPE_INVALID;
852} 839}
@@ -936,7 +923,7 @@ static void p54_configure_filter(struct ieee80211_hw *dev,
936 } 923 }
937} 924}
938 925
939static int p54_conf_tx(struct ieee80211_hw *dev, int queue, 926static int p54_conf_tx(struct ieee80211_hw *dev, u16 queue,
940 const struct ieee80211_tx_queue_params *params) 927 const struct ieee80211_tx_queue_params *params)
941{ 928{
942 struct p54_common *priv = dev->priv; 929 struct p54_common *priv = dev->priv;
@@ -945,7 +932,7 @@ static int p54_conf_tx(struct ieee80211_hw *dev, int queue,
945 vdcf = (struct p54_tx_control_vdcf *)(((struct p54_control_hdr *) 932 vdcf = (struct p54_tx_control_vdcf *)(((struct p54_control_hdr *)
946 ((void *)priv->cached_vdcf + priv->tx_hdr_len))->data); 933 ((void *)priv->cached_vdcf + priv->tx_hdr_len))->data);
947 934
948 if ((params) && !((queue < 0) || (queue > 4))) { 935 if ((params) && !(queue > 4)) {
949 P54_SET_QUEUE(vdcf->queue[queue], params->aifs, 936 P54_SET_QUEUE(vdcf->queue[queue], params->aifs,
950 params->cw_min, params->cw_max, params->txop); 937 params->cw_min, params->cw_max, params->txop);
951 } else 938 } else
@@ -967,11 +954,8 @@ static int p54_get_tx_stats(struct ieee80211_hw *dev,
967 struct ieee80211_tx_queue_stats *stats) 954 struct ieee80211_tx_queue_stats *stats)
968{ 955{
969 struct p54_common *priv = dev->priv; 956 struct p54_common *priv = dev->priv;
970 unsigned int i;
971 957
972 for (i = 0; i < dev->queues; i++) 958 memcpy(stats, &priv->tx_stats, sizeof(stats[0]) * dev->queues);
973 memcpy(&stats->data[i], &priv->tx_stats.data[i],
974 sizeof(stats->data[i]));
975 959
976 return 0; 960 return 0;
977} 961}
@@ -1004,11 +988,12 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
1004 skb_queue_head_init(&priv->tx_queue); 988 skb_queue_head_init(&priv->tx_queue);
1005 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz; 989 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz;
1006 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | /* not sure */ 990 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | /* not sure */
1007 IEEE80211_HW_RX_INCLUDES_FCS; 991 IEEE80211_HW_RX_INCLUDES_FCS |
992 IEEE80211_HW_SIGNAL_UNSPEC;
1008 dev->channel_change_time = 1000; /* TODO: find actual value */ 993 dev->channel_change_time = 1000; /* TODO: find actual value */
1009 dev->max_rssi = 127; 994 dev->max_signal = 127;
1010 995
1011 priv->tx_stats.data[0].limit = 5; 996 priv->tx_stats[0].limit = 5;
1012 dev->queues = 1; 997 dev->queues = 1;
1013 998
1014 dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 + 999 dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 +
diff --git a/drivers/net/wireless/p54/p54common.h b/drivers/net/wireless/p54/p54common.h
index c15b56e1d75e..2245fcce92dc 100644
--- a/drivers/net/wireless/p54/p54common.h
+++ b/drivers/net/wireless/p54/p54common.h
@@ -152,7 +152,6 @@ struct pda_pa_curve_data {
152struct memrecord { 152struct memrecord {
153 u32 start_addr; 153 u32 start_addr;
154 u32 end_addr; 154 u32 end_addr;
155 struct ieee80211_tx_control *control;
156}; 155};
157 156
158struct p54_eeprom_lm86 { 157struct p54_eeprom_lm86 {
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index fa527723fbe0..7dd4add4bf4e 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -665,7 +665,7 @@ static int p54p_resume(struct pci_dev *pdev)
665 665
666 if (priv->common.mode != IEEE80211_IF_TYPE_INVALID) { 666 if (priv->common.mode != IEEE80211_IF_TYPE_INVALID) {
667 p54p_open(dev); 667 p54p_open(dev);
668 ieee80211_start_queues(dev); 668 ieee80211_wake_queues(dev);
669 } 669 }
670 670
671 return 0; 671 return 0;
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 18c9931e3267..3954897d0678 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1135,7 +1135,7 @@ static int rndis_iw_get_range(struct net_device *dev,
1135 /* fill in 802.11g rates */ 1135 /* fill in 802.11g rates */
1136 if (has_80211g_rates) { 1136 if (has_80211g_rates) {
1137 num = range->num_bitrates; 1137 num = range->num_bitrates;
1138 for (i = 0; i < sizeof(rates_80211g); i++) { 1138 for (i = 0; i < ARRAY_SIZE(rates_80211g); i++) {
1139 for (j = 0; j < num; j++) { 1139 for (j = 0; j < num; j++) {
1140 if (range->bitrate[j] == 1140 if (range->bitrate[j] ==
1141 rates_80211g[i] * 1000000) 1141 rates_80211g[i] * 1000000)
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index ab1029e79884..0ace76149422 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -5,12 +5,16 @@ config RT2X00
5 This will enable the experimental support for the Ralink drivers, 5 This will enable the experimental support for the Ralink drivers,
6 developed in the rt2x00 project <http://rt2x00.serialmonkey.com>. 6 developed in the rt2x00 project <http://rt2x00.serialmonkey.com>.
7 7
8 These drivers will make use of the mac80211 stack. 8 These drivers make use of the mac80211 stack.
9 9
10 When building one of the individual drivers, the rt2x00 library 10 When building one of the individual drivers, the rt2x00 library
11 will also be created. That library (when the driver is built as 11 will also be created. That library (when the driver is built as
12 a module) will be called "rt2x00lib.ko". 12 a module) will be called "rt2x00lib.ko".
13 13
14 Additionally PCI and USB libraries will also be build depending
15 on the types of drivers being selected, these libraries will be
16 called "rt2x00pci.ko" and "rt2x00usb.ko".
17
14if RT2X00 18if RT2X00
15 19
16config RT2X00_LIB 20config RT2X00_LIB
@@ -40,26 +44,27 @@ config RT2X00_LIB_LEDS
40 depends on RT2X00_LIB 44 depends on RT2X00_LIB
41 45
42config RT2400PCI 46config RT2400PCI
43 tristate "Ralink rt2400 pci/pcmcia support" 47 tristate "Ralink rt2400 (PCI/PCMCIA) support"
44 depends on PCI 48 depends on PCI
45 select RT2X00_LIB_PCI 49 select RT2X00_LIB_PCI
46 select EEPROM_93CX6 50 select EEPROM_93CX6
47 ---help--- 51 ---help---
48 This is an experimental driver for the Ralink rt2400 wireless chip. 52 This adds support for rt2400 wireless chipset family.
53 Supported chips: RT2460.
49 54
50 When compiled as a module, this driver will be called "rt2400pci.ko". 55 When compiled as a module, this driver will be called "rt2400pci.ko".
51 56
52config RT2400PCI_RFKILL 57config RT2400PCI_RFKILL
53 bool "RT2400 rfkill support" 58 bool "Ralink rt2400 rfkill support"
54 depends on RT2400PCI 59 depends on RT2400PCI
55 select RT2X00_LIB_RFKILL 60 select RT2X00_LIB_RFKILL
56 ---help--- 61 ---help---
57 This adds support for integrated rt2400 devices that feature a 62 This adds support for integrated rt2400 hardware that features a
58 hardware button to control the radio state. 63 hardware button to control the radio state.
59 This feature depends on the RF switch subsystem rfkill. 64 This feature depends on the RF switch subsystem rfkill.
60 65
61config RT2400PCI_LEDS 66config RT2400PCI_LEDS
62 bool "RT2400 leds support" 67 bool "Ralink rt2400 leds support"
63 depends on RT2400PCI 68 depends on RT2400PCI
64 select LEDS_CLASS 69 select LEDS_CLASS
65 select RT2X00_LIB_LEDS 70 select RT2X00_LIB_LEDS
@@ -67,26 +72,27 @@ config RT2400PCI_LEDS
67 This adds support for led triggers provided my mac80211. 72 This adds support for led triggers provided my mac80211.
68 73
69config RT2500PCI 74config RT2500PCI
70 tristate "Ralink rt2500 pci/pcmcia support" 75 tristate "Ralink rt2500 (PCI/PCMCIA) support"
71 depends on PCI 76 depends on PCI
72 select RT2X00_LIB_PCI 77 select RT2X00_LIB_PCI
73 select EEPROM_93CX6 78 select EEPROM_93CX6
74 ---help--- 79 ---help---
75 This is an experimental driver for the Ralink rt2500 wireless chip. 80 This adds support for rt2500 wireless chipset family.
81 Supported chips: RT2560.
76 82
77 When compiled as a module, this driver will be called "rt2500pci.ko". 83 When compiled as a module, this driver will be called "rt2500pci.ko".
78 84
79config RT2500PCI_RFKILL 85config RT2500PCI_RFKILL
80 bool "RT2500 rfkill support" 86 bool "Ralink rt2500 rfkill support"
81 depends on RT2500PCI 87 depends on RT2500PCI
82 select RT2X00_LIB_RFKILL 88 select RT2X00_LIB_RFKILL
83 ---help--- 89 ---help---
84 This adds support for integrated rt2500 devices that feature a 90 This adds support for integrated rt2500 hardware that features a
85 hardware button to control the radio state. 91 hardware button to control the radio state.
86 This feature depends on the RF switch subsystem rfkill. 92 This feature depends on the RF switch subsystem rfkill.
87 93
88config RT2500PCI_LEDS 94config RT2500PCI_LEDS
89 bool "RT2500 leds support" 95 bool "Ralink rt2500 leds support"
90 depends on RT2500PCI 96 depends on RT2500PCI
91 select LEDS_CLASS 97 select LEDS_CLASS
92 select RT2X00_LIB_LEDS 98 select RT2X00_LIB_LEDS
@@ -94,28 +100,29 @@ config RT2500PCI_LEDS
94 This adds support for led triggers provided my mac80211. 100 This adds support for led triggers provided my mac80211.
95 101
96config RT61PCI 102config RT61PCI
97 tristate "Ralink rt61 pci/pcmcia support" 103 tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
98 depends on PCI 104 depends on PCI
99 select RT2X00_LIB_PCI 105 select RT2X00_LIB_PCI
100 select RT2X00_LIB_FIRMWARE 106 select RT2X00_LIB_FIRMWARE
101 select CRC_ITU_T 107 select CRC_ITU_T
102 select EEPROM_93CX6 108 select EEPROM_93CX6
103 ---help--- 109 ---help---
104 This is an experimental driver for the Ralink rt61 wireless chip. 110 This adds support for rt2501 wireless chipset family.
111 Supported chips: RT2561, RT2561S & RT2661.
105 112
106 When compiled as a module, this driver will be called "rt61pci.ko". 113 When compiled as a module, this driver will be called "rt61pci.ko".
107 114
108config RT61PCI_RFKILL 115config RT61PCI_RFKILL
109 bool "RT61 rfkill support" 116 bool "Ralink rt2501/rt61 rfkill support"
110 depends on RT61PCI 117 depends on RT61PCI
111 select RT2X00_LIB_RFKILL 118 select RT2X00_LIB_RFKILL
112 ---help--- 119 ---help---
113 This adds support for integrated rt61 devices that feature a 120 This adds support for integrated rt61 hardware that features a
114 hardware button to control the radio state. 121 hardware button to control the radio state.
115 This feature depends on the RF switch subsystem rfkill. 122 This feature depends on the RF switch subsystem rfkill.
116 123
117config RT61PCI_LEDS 124config RT61PCI_LEDS
118 bool "RT61 leds support" 125 bool "Ralink rt2501/rt61 leds support"
119 depends on RT61PCI 126 depends on RT61PCI
120 select LEDS_CLASS 127 select LEDS_CLASS
121 select RT2X00_LIB_LEDS 128 select RT2X00_LIB_LEDS
@@ -123,16 +130,17 @@ config RT61PCI_LEDS
123 This adds support for led triggers provided my mac80211. 130 This adds support for led triggers provided my mac80211.
124 131
125config RT2500USB 132config RT2500USB
126 tristate "Ralink rt2500 usb support" 133 tristate "Ralink rt2500 (USB) support"
127 depends on USB 134 depends on USB
128 select RT2X00_LIB_USB 135 select RT2X00_LIB_USB
129 ---help--- 136 ---help---
130 This is an experimental driver for the Ralink rt2500 wireless chip. 137 This adds support for rt2500 wireless chipset family.
138 Supported chips: RT2571 & RT2572.
131 139
132 When compiled as a module, this driver will be called "rt2500usb.ko". 140 When compiled as a module, this driver will be called "rt2500usb.ko".
133 141
134config RT2500USB_LEDS 142config RT2500USB_LEDS
135 bool "RT2500 leds support" 143 bool "Ralink rt2500 leds support"
136 depends on RT2500USB 144 depends on RT2500USB
137 select LEDS_CLASS 145 select LEDS_CLASS
138 select RT2X00_LIB_LEDS 146 select RT2X00_LIB_LEDS
@@ -140,18 +148,19 @@ config RT2500USB_LEDS
140 This adds support for led triggers provided my mac80211. 148 This adds support for led triggers provided my mac80211.
141 149
142config RT73USB 150config RT73USB
143 tristate "Ralink rt73 usb support" 151 tristate "Ralink rt2501/rt73 (USB) support"
144 depends on USB 152 depends on USB
145 select RT2X00_LIB_USB 153 select RT2X00_LIB_USB
146 select RT2X00_LIB_FIRMWARE 154 select RT2X00_LIB_FIRMWARE
147 select CRC_ITU_T 155 select CRC_ITU_T
148 ---help--- 156 ---help---
149 This is an experimental driver for the Ralink rt73 wireless chip. 157 This adds support for rt2501 wireless chipset family.
158 Supported chips: RT2571W, RT2573 & RT2671.
150 159
151 When compiled as a module, this driver will be called "rt73usb.ko". 160 When compiled as a module, this driver will be called "rt73usb.ko".
152 161
153config RT73USB_LEDS 162config RT73USB_LEDS
154 bool "RT73 leds support" 163 bool "Ralink rt2501/rt73 leds support"
155 depends on RT73USB 164 depends on RT73USB
156 select LEDS_CLASS 165 select LEDS_CLASS
157 select RT2X00_LIB_LEDS 166 select RT2X00_LIB_LEDS
@@ -164,7 +173,7 @@ config RT2X00_LIB_DEBUGFS
164 ---help--- 173 ---help---
165 Enable creation of debugfs files for the rt2x00 drivers. 174 Enable creation of debugfs files for the rt2x00 drivers.
166 These debugfs files support both reading and writing of the 175 These debugfs files support both reading and writing of the
167 most important register types of the rt2x00 devices. 176 most important register types of the rt2x00 hardware.
168 177
169config RT2X00_DEBUG 178config RT2X00_DEBUG
170 bool "Ralink debug output" 179 bool "Ralink debug output"
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 560b9c73c0b9..900140d3b304 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -620,48 +620,38 @@ static void rt2400pci_link_tuner(struct rt2x00_dev *rt2x00dev)
620static void rt2400pci_init_rxentry(struct rt2x00_dev *rt2x00dev, 620static void rt2400pci_init_rxentry(struct rt2x00_dev *rt2x00dev,
621 struct queue_entry *entry) 621 struct queue_entry *entry)
622{ 622{
623 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data; 623 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
624 u32 word; 624 u32 word;
625 625
626 rt2x00_desc_read(priv_rx->desc, 2, &word); 626 rt2x00_desc_read(entry_priv->desc, 2, &word);
627 rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH, 627 rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH,
628 entry->queue->data_size); 628 entry->queue->data_size);
629 rt2x00_desc_write(priv_rx->desc, 2, word); 629 rt2x00_desc_write(entry_priv->desc, 2, word);
630 630
631 rt2x00_desc_read(priv_rx->desc, 1, &word); 631 rt2x00_desc_read(entry_priv->desc, 1, &word);
632 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, priv_rx->data_dma); 632 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, entry_priv->data_dma);
633 rt2x00_desc_write(priv_rx->desc, 1, word); 633 rt2x00_desc_write(entry_priv->desc, 1, word);
634 634
635 rt2x00_desc_read(priv_rx->desc, 0, &word); 635 rt2x00_desc_read(entry_priv->desc, 0, &word);
636 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); 636 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
637 rt2x00_desc_write(priv_rx->desc, 0, word); 637 rt2x00_desc_write(entry_priv->desc, 0, word);
638} 638}
639 639
640static void rt2400pci_init_txentry(struct rt2x00_dev *rt2x00dev, 640static void rt2400pci_init_txentry(struct rt2x00_dev *rt2x00dev,
641 struct queue_entry *entry) 641 struct queue_entry *entry)
642{ 642{
643 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data; 643 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
644 u32 word; 644 u32 word;
645 645
646 rt2x00_desc_read(priv_tx->desc, 1, &word); 646 rt2x00_desc_read(entry_priv->desc, 0, &word);
647 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, priv_tx->data_dma);
648 rt2x00_desc_write(priv_tx->desc, 1, word);
649
650 rt2x00_desc_read(priv_tx->desc, 2, &word);
651 rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH,
652 entry->queue->data_size);
653 rt2x00_desc_write(priv_tx->desc, 2, word);
654
655 rt2x00_desc_read(priv_tx->desc, 0, &word);
656 rt2x00_set_field32(&word, TXD_W0_VALID, 0); 647 rt2x00_set_field32(&word, TXD_W0_VALID, 0);
657 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); 648 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
658 rt2x00_desc_write(priv_tx->desc, 0, word); 649 rt2x00_desc_write(entry_priv->desc, 0, word);
659} 650}
660 651
661static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev) 652static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
662{ 653{
663 struct queue_entry_priv_pci_rx *priv_rx; 654 struct queue_entry_priv_pci *entry_priv;
664 struct queue_entry_priv_pci_tx *priv_tx;
665 u32 reg; 655 u32 reg;
666 656
667 /* 657 /*
@@ -674,28 +664,28 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
674 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); 664 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
675 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); 665 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
676 666
677 priv_tx = rt2x00dev->tx[1].entries[0].priv_data; 667 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
678 rt2x00pci_register_read(rt2x00dev, TXCSR3, &reg); 668 rt2x00pci_register_read(rt2x00dev, TXCSR3, &reg);
679 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER, 669 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER,
680 priv_tx->desc_dma); 670 entry_priv->desc_dma);
681 rt2x00pci_register_write(rt2x00dev, TXCSR3, reg); 671 rt2x00pci_register_write(rt2x00dev, TXCSR3, reg);
682 672
683 priv_tx = rt2x00dev->tx[0].entries[0].priv_data; 673 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
684 rt2x00pci_register_read(rt2x00dev, TXCSR5, &reg); 674 rt2x00pci_register_read(rt2x00dev, TXCSR5, &reg);
685 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER, 675 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER,
686 priv_tx->desc_dma); 676 entry_priv->desc_dma);
687 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); 677 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
688 678
689 priv_tx = rt2x00dev->bcn[1].entries[0].priv_data; 679 entry_priv = rt2x00dev->bcn[1].entries[0].priv_data;
690 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg); 680 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
691 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER, 681 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
692 priv_tx->desc_dma); 682 entry_priv->desc_dma);
693 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); 683 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
694 684
695 priv_tx = rt2x00dev->bcn[0].entries[0].priv_data; 685 entry_priv = rt2x00dev->bcn[0].entries[0].priv_data;
696 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg); 686 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
697 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER, 687 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
698 priv_tx->desc_dma); 688 entry_priv->desc_dma);
699 rt2x00pci_register_write(rt2x00dev, TXCSR6, reg); 689 rt2x00pci_register_write(rt2x00dev, TXCSR6, reg);
700 690
701 rt2x00pci_register_read(rt2x00dev, RXCSR1, &reg); 691 rt2x00pci_register_read(rt2x00dev, RXCSR1, &reg);
@@ -703,9 +693,10 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
703 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit); 693 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit);
704 rt2x00pci_register_write(rt2x00dev, RXCSR1, reg); 694 rt2x00pci_register_write(rt2x00dev, RXCSR1, reg);
705 695
706 priv_rx = rt2x00dev->rx->entries[0].priv_data; 696 entry_priv = rt2x00dev->rx->entries[0].priv_data;
707 rt2x00pci_register_read(rt2x00dev, RXCSR2, &reg); 697 rt2x00pci_register_read(rt2x00dev, RXCSR2, &reg);
708 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER, priv_rx->desc_dma); 698 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER,
699 entry_priv->desc_dma);
709 rt2x00pci_register_write(rt2x00dev, RXCSR2, reg); 700 rt2x00pci_register_write(rt2x00dev, RXCSR2, reg);
710 701
711 return 0; 702 return 0;
@@ -1001,17 +992,22 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1001 */ 992 */
1002static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 993static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1003 struct sk_buff *skb, 994 struct sk_buff *skb,
1004 struct txentry_desc *txdesc, 995 struct txentry_desc *txdesc)
1005 struct ieee80211_tx_control *control)
1006{ 996{
1007 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 997 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
998 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
1008 __le32 *txd = skbdesc->desc; 999 __le32 *txd = skbdesc->desc;
1009 u32 word; 1000 u32 word;
1010 1001
1011 /* 1002 /*
1012 * Start writing the descriptor words. 1003 * Start writing the descriptor words.
1013 */ 1004 */
1005 rt2x00_desc_read(entry_priv->desc, 1, &word);
1006 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, entry_priv->data_dma);
1007 rt2x00_desc_write(entry_priv->desc, 1, word);
1008
1014 rt2x00_desc_read(txd, 2, &word); 1009 rt2x00_desc_read(txd, 2, &word);
1010 rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, skbdesc->data_len);
1015 rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, skbdesc->data_len); 1011 rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, skbdesc->data_len);
1016 rt2x00_desc_write(txd, 2, word); 1012 rt2x00_desc_write(txd, 2, word);
1017 1013
@@ -1046,8 +1042,7 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1046 test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)); 1042 test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags));
1047 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1043 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1048 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1044 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1049 !!(control->flags & 1045 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1050 IEEE80211_TXCTL_LONG_RETRY_LIMIT));
1051 rt2x00_desc_write(txd, 0, word); 1046 rt2x00_desc_write(txd, 0, word);
1052} 1047}
1053 1048
@@ -1055,11 +1050,11 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1055 * TX data initialization 1050 * TX data initialization
1056 */ 1051 */
1057static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1052static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1058 const unsigned int queue) 1053 const enum data_queue_qid queue)
1059{ 1054{
1060 u32 reg; 1055 u32 reg;
1061 1056
1062 if (queue == RT2X00_BCN_QUEUE_BEACON) { 1057 if (queue == QID_BEACON) {
1063 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 1058 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
1064 if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) { 1059 if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) {
1065 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1); 1060 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
@@ -1071,12 +1066,9 @@ static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1071 } 1066 }
1072 1067
1073 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 1068 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1074 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, 1069 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE));
1075 (queue == IEEE80211_TX_QUEUE_DATA0)); 1070 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK));
1076 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1071 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue == QID_ATIM));
1077 (queue == IEEE80211_TX_QUEUE_DATA1));
1078 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM,
1079 (queue == RT2X00_BCN_QUEUE_ATIM));
1080 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 1072 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1081} 1073}
1082 1074
@@ -1086,16 +1078,15 @@ static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1086static void rt2400pci_fill_rxdone(struct queue_entry *entry, 1078static void rt2400pci_fill_rxdone(struct queue_entry *entry,
1087 struct rxdone_entry_desc *rxdesc) 1079 struct rxdone_entry_desc *rxdesc)
1088{ 1080{
1089 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data; 1081 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1090 u32 word0; 1082 u32 word0;
1091 u32 word2; 1083 u32 word2;
1092 u32 word3; 1084 u32 word3;
1093 1085
1094 rt2x00_desc_read(priv_rx->desc, 0, &word0); 1086 rt2x00_desc_read(entry_priv->desc, 0, &word0);
1095 rt2x00_desc_read(priv_rx->desc, 2, &word2); 1087 rt2x00_desc_read(entry_priv->desc, 2, &word2);
1096 rt2x00_desc_read(priv_rx->desc, 3, &word3); 1088 rt2x00_desc_read(entry_priv->desc, 3, &word3);
1097 1089
1098 rxdesc->flags = 0;
1099 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1090 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1100 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1091 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1101 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) 1092 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
@@ -1111,7 +1102,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
1111 entry->queue->rt2x00dev->rssi_offset; 1102 entry->queue->rt2x00dev->rssi_offset;
1112 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1103 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1113 1104
1114 rxdesc->dev_flags = RXDONE_SIGNAL_PLCP; 1105 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1115 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1106 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1116 rxdesc->dev_flags |= RXDONE_MY_BSS; 1107 rxdesc->dev_flags |= RXDONE_MY_BSS;
1117} 1108}
@@ -1120,18 +1111,18 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
1120 * Interrupt functions. 1111 * Interrupt functions.
1121 */ 1112 */
1122static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, 1113static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
1123 const enum ieee80211_tx_queue queue_idx) 1114 const enum data_queue_qid queue_idx)
1124{ 1115{
1125 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 1116 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
1126 struct queue_entry_priv_pci_tx *priv_tx; 1117 struct queue_entry_priv_pci *entry_priv;
1127 struct queue_entry *entry; 1118 struct queue_entry *entry;
1128 struct txdone_entry_desc txdesc; 1119 struct txdone_entry_desc txdesc;
1129 u32 word; 1120 u32 word;
1130 1121
1131 while (!rt2x00queue_empty(queue)) { 1122 while (!rt2x00queue_empty(queue)) {
1132 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 1123 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
1133 priv_tx = entry->priv_data; 1124 entry_priv = entry->priv_data;
1134 rt2x00_desc_read(priv_tx->desc, 0, &word); 1125 rt2x00_desc_read(entry_priv->desc, 0, &word);
1135 1126
1136 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || 1127 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
1137 !rt2x00_get_field32(word, TXD_W0_VALID)) 1128 !rt2x00_get_field32(word, TXD_W0_VALID))
@@ -1140,7 +1131,18 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
1140 /* 1131 /*
1141 * Obtain the status about this packet. 1132 * Obtain the status about this packet.
1142 */ 1133 */
1143 txdesc.status = rt2x00_get_field32(word, TXD_W0_RESULT); 1134 txdesc.flags = 0;
1135 switch (rt2x00_get_field32(word, TXD_W0_RESULT)) {
1136 case 0: /* Success */
1137 case 1: /* Success with retry */
1138 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
1139 break;
1140 case 2: /* Failure, excessive retries */
1141 __set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
1142 /* Don't break, this is a failed frame! */
1143 default: /* Failure */
1144 __set_bit(TXDONE_FAILURE, &txdesc.flags);
1145 }
1144 txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT); 1146 txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT);
1145 1147
1146 rt2x00pci_txdone(rt2x00dev, entry, &txdesc); 1148 rt2x00pci_txdone(rt2x00dev, entry, &txdesc);
@@ -1187,19 +1189,19 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
1187 * 3 - Atim ring transmit done interrupt. 1189 * 3 - Atim ring transmit done interrupt.
1188 */ 1190 */
1189 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) 1191 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
1190 rt2400pci_txdone(rt2x00dev, RT2X00_BCN_QUEUE_ATIM); 1192 rt2400pci_txdone(rt2x00dev, QID_ATIM);
1191 1193
1192 /* 1194 /*
1193 * 4 - Priority ring transmit done interrupt. 1195 * 4 - Priority ring transmit done interrupt.
1194 */ 1196 */
1195 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) 1197 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
1196 rt2400pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); 1198 rt2400pci_txdone(rt2x00dev, QID_AC_BE);
1197 1199
1198 /* 1200 /*
1199 * 5 - Tx ring transmit done interrupt. 1201 * 5 - Tx ring transmit done interrupt.
1200 */ 1202 */
1201 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) 1203 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
1202 rt2400pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA1); 1204 rt2400pci_txdone(rt2x00dev, QID_AC_BK);
1203 1205
1204 return IRQ_HANDLED; 1206 return IRQ_HANDLED;
1205} 1207}
@@ -1364,11 +1366,9 @@ static void rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1364 /* 1366 /*
1365 * Initialize all hw fields. 1367 * Initialize all hw fields.
1366 */ 1368 */
1367 rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; 1369 rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1370 IEEE80211_HW_SIGNAL_DBM;
1368 rt2x00dev->hw->extra_tx_headroom = 0; 1371 rt2x00dev->hw->extra_tx_headroom = 0;
1369 rt2x00dev->hw->max_signal = MAX_SIGNAL;
1370 rt2x00dev->hw->max_rssi = MAX_RX_SSI;
1371 rt2x00dev->hw->queues = 2;
1372 1372
1373 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev); 1373 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev);
1374 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 1374 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -1445,8 +1445,7 @@ static int rt2400pci_set_retry_limit(struct ieee80211_hw *hw,
1445 return 0; 1445 return 0;
1446} 1446}
1447 1447
1448static int rt2400pci_conf_tx(struct ieee80211_hw *hw, 1448static int rt2400pci_conf_tx(struct ieee80211_hw *hw, u16 queue,
1449 int queue,
1450 const struct ieee80211_tx_queue_params *params) 1449 const struct ieee80211_tx_queue_params *params)
1451{ 1450{
1452 struct rt2x00_dev *rt2x00dev = hw->priv; 1451 struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -1456,7 +1455,7 @@ static int rt2400pci_conf_tx(struct ieee80211_hw *hw,
1456 * per queue. So by default we only configure the TX queue, 1455 * per queue. So by default we only configure the TX queue,
1457 * and ignore all other configurations. 1456 * and ignore all other configurations.
1458 */ 1457 */
1459 if (queue != IEEE80211_TX_QUEUE_DATA0) 1458 if (queue != 0)
1460 return -EINVAL; 1459 return -EINVAL;
1461 1460
1462 if (rt2x00mac_conf_tx(hw, queue, params)) 1461 if (rt2x00mac_conf_tx(hw, queue, params))
@@ -1485,18 +1484,27 @@ static u64 rt2400pci_get_tsf(struct ieee80211_hw *hw)
1485 return tsf; 1484 return tsf;
1486} 1485}
1487 1486
1488static int rt2400pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 1487static int rt2400pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
1489 struct ieee80211_tx_control *control)
1490{ 1488{
1491 struct rt2x00_dev *rt2x00dev = hw->priv; 1489 struct rt2x00_dev *rt2x00dev = hw->priv;
1492 struct rt2x00_intf *intf = vif_to_intf(control->vif); 1490 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1493 struct queue_entry_priv_pci_tx *priv_tx; 1491 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
1492 struct queue_entry_priv_pci *entry_priv;
1494 struct skb_frame_desc *skbdesc; 1493 struct skb_frame_desc *skbdesc;
1494 struct txentry_desc txdesc;
1495 u32 reg; 1495 u32 reg;
1496 1496
1497 if (unlikely(!intf->beacon)) 1497 if (unlikely(!intf->beacon))
1498 return -ENOBUFS; 1498 return -ENOBUFS;
1499 priv_tx = intf->beacon->priv_data; 1499 entry_priv = intf->beacon->priv_data;
1500
1501 /*
1502 * Copy all TX descriptor information into txdesc,
1503 * after that we are free to use the skb->cb array
1504 * for our information.
1505 */
1506 intf->beacon->skb = skb;
1507 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
1500 1508
1501 /* 1509 /*
1502 * Fill in skb descriptor 1510 * Fill in skb descriptor
@@ -1506,7 +1514,7 @@ static int rt2400pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
1506 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED; 1514 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
1507 skbdesc->data = skb->data; 1515 skbdesc->data = skb->data;
1508 skbdesc->data_len = skb->len; 1516 skbdesc->data_len = skb->len;
1509 skbdesc->desc = priv_tx->desc; 1517 skbdesc->desc = entry_priv->desc;
1510 skbdesc->desc_len = intf->beacon->queue->desc_size; 1518 skbdesc->desc_len = intf->beacon->queue->desc_size;
1511 skbdesc->entry = intf->beacon; 1519 skbdesc->entry = intf->beacon;
1512 1520
@@ -1521,20 +1529,13 @@ static int rt2400pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
1521 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1529 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1522 1530
1523 /* 1531 /*
1524 * mac80211 doesn't provide the control->queue variable
1525 * for beacons. Set our own queue identification so
1526 * it can be used during descriptor initialization.
1527 */
1528 control->queue = RT2X00_BCN_QUEUE_BEACON;
1529 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
1530
1531 /*
1532 * Enable beacon generation. 1532 * Enable beacon generation.
1533 * Write entire beacon with descriptor to register, 1533 * Write entire beacon with descriptor to register,
1534 * and kick the beacon generator. 1534 * and kick the beacon generator.
1535 */ 1535 */
1536 memcpy(priv_tx->data, skb->data, skb->len); 1536 memcpy(entry_priv->data, skb->data, skb->len);
1537 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue); 1537 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
1538 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON);
1538 1539
1539 return 0; 1540 return 0;
1540} 1541}
@@ -1593,28 +1594,28 @@ static const struct data_queue_desc rt2400pci_queue_rx = {
1593 .entry_num = RX_ENTRIES, 1594 .entry_num = RX_ENTRIES,
1594 .data_size = DATA_FRAME_SIZE, 1595 .data_size = DATA_FRAME_SIZE,
1595 .desc_size = RXD_DESC_SIZE, 1596 .desc_size = RXD_DESC_SIZE,
1596 .priv_size = sizeof(struct queue_entry_priv_pci_rx), 1597 .priv_size = sizeof(struct queue_entry_priv_pci),
1597}; 1598};
1598 1599
1599static const struct data_queue_desc rt2400pci_queue_tx = { 1600static const struct data_queue_desc rt2400pci_queue_tx = {
1600 .entry_num = TX_ENTRIES, 1601 .entry_num = TX_ENTRIES,
1601 .data_size = DATA_FRAME_SIZE, 1602 .data_size = DATA_FRAME_SIZE,
1602 .desc_size = TXD_DESC_SIZE, 1603 .desc_size = TXD_DESC_SIZE,
1603 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 1604 .priv_size = sizeof(struct queue_entry_priv_pci),
1604}; 1605};
1605 1606
1606static const struct data_queue_desc rt2400pci_queue_bcn = { 1607static const struct data_queue_desc rt2400pci_queue_bcn = {
1607 .entry_num = BEACON_ENTRIES, 1608 .entry_num = BEACON_ENTRIES,
1608 .data_size = MGMT_FRAME_SIZE, 1609 .data_size = MGMT_FRAME_SIZE,
1609 .desc_size = TXD_DESC_SIZE, 1610 .desc_size = TXD_DESC_SIZE,
1610 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 1611 .priv_size = sizeof(struct queue_entry_priv_pci),
1611}; 1612};
1612 1613
1613static const struct data_queue_desc rt2400pci_queue_atim = { 1614static const struct data_queue_desc rt2400pci_queue_atim = {
1614 .entry_num = ATIM_ENTRIES, 1615 .entry_num = ATIM_ENTRIES,
1615 .data_size = DATA_FRAME_SIZE, 1616 .data_size = DATA_FRAME_SIZE,
1616 .desc_size = TXD_DESC_SIZE, 1617 .desc_size = TXD_DESC_SIZE,
1617 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 1618 .priv_size = sizeof(struct queue_entry_priv_pci),
1618}; 1619};
1619 1620
1620static const struct rt2x00_ops rt2400pci_ops = { 1621static const struct rt2x00_ops rt2400pci_ops = {
@@ -1623,6 +1624,7 @@ static const struct rt2x00_ops rt2400pci_ops = {
1623 .max_ap_intf = 1, 1624 .max_ap_intf = 1,
1624 .eeprom_size = EEPROM_SIZE, 1625 .eeprom_size = EEPROM_SIZE,
1625 .rf_size = RF_SIZE, 1626 .rf_size = RF_SIZE,
1627 .tx_queues = NUM_TX_QUEUES,
1626 .rx = &rt2400pci_queue_rx, 1628 .rx = &rt2400pci_queue_rx,
1627 .tx = &rt2400pci_queue_tx, 1629 .tx = &rt2400pci_queue_tx,
1628 .bcn = &rt2400pci_queue_bcn, 1630 .bcn = &rt2400pci_queue_bcn,
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index a5210f9a3360..e9aa326be9f6 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -52,6 +52,11 @@
52#define RF_SIZE 0x0010 52#define RF_SIZE 0x0010
53 53
54/* 54/*
55 * Number of TX queues.
56 */
57#define NUM_TX_QUEUES 2
58
59/*
55 * Control/Status Registers(CSR). 60 * Control/Status Registers(CSR).
56 * Some values are set in TU, whereas 1 TU == 1024 us. 61 * Some values are set in TU, whereas 1 TU == 1024 us.
57 */ 62 */
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index a5ed54b69262..673350953b89 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -317,8 +317,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
317 struct rt2x00intf_conf *conf, 317 struct rt2x00intf_conf *conf,
318 const unsigned int flags) 318 const unsigned int flags)
319{ 319{
320 struct data_queue *queue = 320 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
321 rt2x00queue_get_queue(rt2x00dev, RT2X00_BCN_QUEUE_BEACON);
322 unsigned int bcn_preload; 321 unsigned int bcn_preload;
323 u32 reg; 322 u32 reg;
324 323
@@ -716,38 +715,33 @@ dynamic_cca_tune:
716static void rt2500pci_init_rxentry(struct rt2x00_dev *rt2x00dev, 715static void rt2500pci_init_rxentry(struct rt2x00_dev *rt2x00dev,
717 struct queue_entry *entry) 716 struct queue_entry *entry)
718{ 717{
719 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data; 718 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
720 u32 word; 719 u32 word;
721 720
722 rt2x00_desc_read(priv_rx->desc, 1, &word); 721 rt2x00_desc_read(entry_priv->desc, 1, &word);
723 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, priv_rx->data_dma); 722 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, entry_priv->data_dma);
724 rt2x00_desc_write(priv_rx->desc, 1, word); 723 rt2x00_desc_write(entry_priv->desc, 1, word);
725 724
726 rt2x00_desc_read(priv_rx->desc, 0, &word); 725 rt2x00_desc_read(entry_priv->desc, 0, &word);
727 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); 726 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
728 rt2x00_desc_write(priv_rx->desc, 0, word); 727 rt2x00_desc_write(entry_priv->desc, 0, word);
729} 728}
730 729
731static void rt2500pci_init_txentry(struct rt2x00_dev *rt2x00dev, 730static void rt2500pci_init_txentry(struct rt2x00_dev *rt2x00dev,
732 struct queue_entry *entry) 731 struct queue_entry *entry)
733{ 732{
734 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data; 733 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
735 u32 word; 734 u32 word;
736 735
737 rt2x00_desc_read(priv_tx->desc, 1, &word); 736 rt2x00_desc_read(entry_priv->desc, 0, &word);
738 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, priv_tx->data_dma);
739 rt2x00_desc_write(priv_tx->desc, 1, word);
740
741 rt2x00_desc_read(priv_tx->desc, 0, &word);
742 rt2x00_set_field32(&word, TXD_W0_VALID, 0); 737 rt2x00_set_field32(&word, TXD_W0_VALID, 0);
743 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); 738 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
744 rt2x00_desc_write(priv_tx->desc, 0, word); 739 rt2x00_desc_write(entry_priv->desc, 0, word);
745} 740}
746 741
747static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev) 742static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
748{ 743{
749 struct queue_entry_priv_pci_rx *priv_rx; 744 struct queue_entry_priv_pci *entry_priv;
750 struct queue_entry_priv_pci_tx *priv_tx;
751 u32 reg; 745 u32 reg;
752 746
753 /* 747 /*
@@ -760,28 +754,28 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
760 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); 754 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
761 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); 755 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
762 756
763 priv_tx = rt2x00dev->tx[1].entries[0].priv_data; 757 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
764 rt2x00pci_register_read(rt2x00dev, TXCSR3, &reg); 758 rt2x00pci_register_read(rt2x00dev, TXCSR3, &reg);
765 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER, 759 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER,
766 priv_tx->desc_dma); 760 entry_priv->desc_dma);
767 rt2x00pci_register_write(rt2x00dev, TXCSR3, reg); 761 rt2x00pci_register_write(rt2x00dev, TXCSR3, reg);
768 762
769 priv_tx = rt2x00dev->tx[0].entries[0].priv_data; 763 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
770 rt2x00pci_register_read(rt2x00dev, TXCSR5, &reg); 764 rt2x00pci_register_read(rt2x00dev, TXCSR5, &reg);
771 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER, 765 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER,
772 priv_tx->desc_dma); 766 entry_priv->desc_dma);
773 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); 767 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
774 768
775 priv_tx = rt2x00dev->bcn[1].entries[0].priv_data; 769 entry_priv = rt2x00dev->bcn[1].entries[0].priv_data;
776 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg); 770 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
777 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER, 771 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
778 priv_tx->desc_dma); 772 entry_priv->desc_dma);
779 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); 773 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
780 774
781 priv_tx = rt2x00dev->bcn[0].entries[0].priv_data; 775 entry_priv = rt2x00dev->bcn[0].entries[0].priv_data;
782 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg); 776 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
783 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER, 777 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
784 priv_tx->desc_dma); 778 entry_priv->desc_dma);
785 rt2x00pci_register_write(rt2x00dev, TXCSR6, reg); 779 rt2x00pci_register_write(rt2x00dev, TXCSR6, reg);
786 780
787 rt2x00pci_register_read(rt2x00dev, RXCSR1, &reg); 781 rt2x00pci_register_read(rt2x00dev, RXCSR1, &reg);
@@ -789,9 +783,10 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
789 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit); 783 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit);
790 rt2x00pci_register_write(rt2x00dev, RXCSR1, reg); 784 rt2x00pci_register_write(rt2x00dev, RXCSR1, reg);
791 785
792 priv_rx = rt2x00dev->rx->entries[0].priv_data; 786 entry_priv = rt2x00dev->rx->entries[0].priv_data;
793 rt2x00pci_register_read(rt2x00dev, RXCSR2, &reg); 787 rt2x00pci_register_read(rt2x00dev, RXCSR2, &reg);
794 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER, priv_rx->desc_dma); 788 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER,
789 entry_priv->desc_dma);
795 rt2x00pci_register_write(rt2x00dev, RXCSR2, reg); 790 rt2x00pci_register_write(rt2x00dev, RXCSR2, reg);
796 791
797 return 0; 792 return 0;
@@ -1156,16 +1151,20 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1156 */ 1151 */
1157static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1152static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1158 struct sk_buff *skb, 1153 struct sk_buff *skb,
1159 struct txentry_desc *txdesc, 1154 struct txentry_desc *txdesc)
1160 struct ieee80211_tx_control *control)
1161{ 1155{
1162 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1156 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1157 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
1163 __le32 *txd = skbdesc->desc; 1158 __le32 *txd = skbdesc->desc;
1164 u32 word; 1159 u32 word;
1165 1160
1166 /* 1161 /*
1167 * Start writing the descriptor words. 1162 * Start writing the descriptor words.
1168 */ 1163 */
1164 rt2x00_desc_read(entry_priv->desc, 1, &word);
1165 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, entry_priv->data_dma);
1166 rt2x00_desc_write(entry_priv->desc, 1, word);
1167
1169 rt2x00_desc_read(txd, 2, &word); 1168 rt2x00_desc_read(txd, 2, &word);
1170 rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER); 1169 rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER);
1171 rt2x00_set_field32(&word, TXD_W2_AIFS, txdesc->aifs); 1170 rt2x00_set_field32(&word, TXD_W2_AIFS, txdesc->aifs);
@@ -1199,9 +1198,7 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1199 rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1); 1198 rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1);
1200 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1199 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1201 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1200 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1202 !!(control->flags & 1201 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1203 IEEE80211_TXCTL_LONG_RETRY_LIMIT));
1204 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len);
1205 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); 1202 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
1206 rt2x00_desc_write(txd, 0, word); 1203 rt2x00_desc_write(txd, 0, word);
1207} 1204}
@@ -1210,11 +1207,11 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1210 * TX data initialization 1207 * TX data initialization
1211 */ 1208 */
1212static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1209static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1213 const unsigned int queue) 1210 const enum data_queue_qid queue)
1214{ 1211{
1215 u32 reg; 1212 u32 reg;
1216 1213
1217 if (queue == RT2X00_BCN_QUEUE_BEACON) { 1214 if (queue == QID_BEACON) {
1218 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 1215 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
1219 if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) { 1216 if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) {
1220 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1); 1217 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
@@ -1226,12 +1223,9 @@ static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1226 } 1223 }
1227 1224
1228 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 1225 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1229 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, 1226 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE));
1230 (queue == IEEE80211_TX_QUEUE_DATA0)); 1227 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK));
1231 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1228 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue == QID_ATIM));
1232 (queue == IEEE80211_TX_QUEUE_DATA1));
1233 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM,
1234 (queue == RT2X00_BCN_QUEUE_ATIM));
1235 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 1229 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1236} 1230}
1237 1231
@@ -1241,14 +1235,13 @@ static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1241static void rt2500pci_fill_rxdone(struct queue_entry *entry, 1235static void rt2500pci_fill_rxdone(struct queue_entry *entry,
1242 struct rxdone_entry_desc *rxdesc) 1236 struct rxdone_entry_desc *rxdesc)
1243{ 1237{
1244 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data; 1238 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1245 u32 word0; 1239 u32 word0;
1246 u32 word2; 1240 u32 word2;
1247 1241
1248 rt2x00_desc_read(priv_rx->desc, 0, &word0); 1242 rt2x00_desc_read(entry_priv->desc, 0, &word0);
1249 rt2x00_desc_read(priv_rx->desc, 2, &word2); 1243 rt2x00_desc_read(entry_priv->desc, 2, &word2);
1250 1244
1251 rxdesc->flags = 0;
1252 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1245 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1253 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1246 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1254 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) 1247 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
@@ -1265,7 +1258,6 @@ static void rt2500pci_fill_rxdone(struct queue_entry *entry,
1265 entry->queue->rt2x00dev->rssi_offset; 1258 entry->queue->rt2x00dev->rssi_offset;
1266 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1259 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1267 1260
1268 rxdesc->dev_flags = 0;
1269 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1261 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1270 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1262 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1271 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1263 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
@@ -1276,18 +1268,18 @@ static void rt2500pci_fill_rxdone(struct queue_entry *entry,
1276 * Interrupt functions. 1268 * Interrupt functions.
1277 */ 1269 */
1278static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev, 1270static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
1279 const enum ieee80211_tx_queue queue_idx) 1271 const enum data_queue_qid queue_idx)
1280{ 1272{
1281 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 1273 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
1282 struct queue_entry_priv_pci_tx *priv_tx; 1274 struct queue_entry_priv_pci *entry_priv;
1283 struct queue_entry *entry; 1275 struct queue_entry *entry;
1284 struct txdone_entry_desc txdesc; 1276 struct txdone_entry_desc txdesc;
1285 u32 word; 1277 u32 word;
1286 1278
1287 while (!rt2x00queue_empty(queue)) { 1279 while (!rt2x00queue_empty(queue)) {
1288 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 1280 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
1289 priv_tx = entry->priv_data; 1281 entry_priv = entry->priv_data;
1290 rt2x00_desc_read(priv_tx->desc, 0, &word); 1282 rt2x00_desc_read(entry_priv->desc, 0, &word);
1291 1283
1292 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || 1284 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
1293 !rt2x00_get_field32(word, TXD_W0_VALID)) 1285 !rt2x00_get_field32(word, TXD_W0_VALID))
@@ -1296,7 +1288,18 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
1296 /* 1288 /*
1297 * Obtain the status about this packet. 1289 * Obtain the status about this packet.
1298 */ 1290 */
1299 txdesc.status = rt2x00_get_field32(word, TXD_W0_RESULT); 1291 txdesc.flags = 0;
1292 switch (rt2x00_get_field32(word, TXD_W0_RESULT)) {
1293 case 0: /* Success */
1294 case 1: /* Success with retry */
1295 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
1296 break;
1297 case 2: /* Failure, excessive retries */
1298 __set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
1299 /* Don't break, this is a failed frame! */
1300 default: /* Failure */
1301 __set_bit(TXDONE_FAILURE, &txdesc.flags);
1302 }
1300 txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT); 1303 txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT);
1301 1304
1302 rt2x00pci_txdone(rt2x00dev, entry, &txdesc); 1305 rt2x00pci_txdone(rt2x00dev, entry, &txdesc);
@@ -1343,19 +1346,19 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
1343 * 3 - Atim ring transmit done interrupt. 1346 * 3 - Atim ring transmit done interrupt.
1344 */ 1347 */
1345 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) 1348 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
1346 rt2500pci_txdone(rt2x00dev, RT2X00_BCN_QUEUE_ATIM); 1349 rt2500pci_txdone(rt2x00dev, QID_ATIM);
1347 1350
1348 /* 1351 /*
1349 * 4 - Priority ring transmit done interrupt. 1352 * 4 - Priority ring transmit done interrupt.
1350 */ 1353 */
1351 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) 1354 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
1352 rt2500pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); 1355 rt2500pci_txdone(rt2x00dev, QID_AC_BE);
1353 1356
1354 /* 1357 /*
1355 * 5 - Tx ring transmit done interrupt. 1358 * 5 - Tx ring transmit done interrupt.
1356 */ 1359 */
1357 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) 1360 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
1358 rt2500pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA1); 1361 rt2500pci_txdone(rt2x00dev, QID_AC_BK);
1359 1362
1360 return IRQ_HANDLED; 1363 return IRQ_HANDLED;
1361} 1364}
@@ -1684,11 +1687,10 @@ static void rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1684 /* 1687 /*
1685 * Initialize all hw fields. 1688 * Initialize all hw fields.
1686 */ 1689 */
1687 rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; 1690 rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1691 IEEE80211_HW_SIGNAL_DBM;
1692
1688 rt2x00dev->hw->extra_tx_headroom = 0; 1693 rt2x00dev->hw->extra_tx_headroom = 0;
1689 rt2x00dev->hw->max_signal = MAX_SIGNAL;
1690 rt2x00dev->hw->max_rssi = MAX_RX_SSI;
1691 rt2x00dev->hw->queues = 2;
1692 1694
1693 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev); 1695 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev);
1694 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 1696 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -1797,19 +1799,28 @@ static u64 rt2500pci_get_tsf(struct ieee80211_hw *hw)
1797 return tsf; 1799 return tsf;
1798} 1800}
1799 1801
1800static int rt2500pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 1802static int rt2500pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
1801 struct ieee80211_tx_control *control)
1802{ 1803{
1803 struct rt2x00_dev *rt2x00dev = hw->priv; 1804 struct rt2x00_dev *rt2x00dev = hw->priv;
1804 struct rt2x00_intf *intf = vif_to_intf(control->vif); 1805 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1805 struct queue_entry_priv_pci_tx *priv_tx; 1806 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
1807 struct queue_entry_priv_pci *entry_priv;
1806 struct skb_frame_desc *skbdesc; 1808 struct skb_frame_desc *skbdesc;
1809 struct txentry_desc txdesc;
1807 u32 reg; 1810 u32 reg;
1808 1811
1809 if (unlikely(!intf->beacon)) 1812 if (unlikely(!intf->beacon))
1810 return -ENOBUFS; 1813 return -ENOBUFS;
1811 1814
1812 priv_tx = intf->beacon->priv_data; 1815 entry_priv = intf->beacon->priv_data;
1816
1817 /*
1818 * Copy all TX descriptor information into txdesc,
1819 * after that we are free to use the skb->cb array
1820 * for our information.
1821 */
1822 intf->beacon->skb = skb;
1823 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
1813 1824
1814 /* 1825 /*
1815 * Fill in skb descriptor 1826 * Fill in skb descriptor
@@ -1819,7 +1830,7 @@ static int rt2500pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
1819 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED; 1830 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
1820 skbdesc->data = skb->data; 1831 skbdesc->data = skb->data;
1821 skbdesc->data_len = skb->len; 1832 skbdesc->data_len = skb->len;
1822 skbdesc->desc = priv_tx->desc; 1833 skbdesc->desc = entry_priv->desc;
1823 skbdesc->desc_len = intf->beacon->queue->desc_size; 1834 skbdesc->desc_len = intf->beacon->queue->desc_size;
1824 skbdesc->entry = intf->beacon; 1835 skbdesc->entry = intf->beacon;
1825 1836
@@ -1834,20 +1845,13 @@ static int rt2500pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
1834 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1845 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1835 1846
1836 /* 1847 /*
1837 * mac80211 doesn't provide the control->queue variable
1838 * for beacons. Set our own queue identification so
1839 * it can be used during descriptor initialization.
1840 */
1841 control->queue = RT2X00_BCN_QUEUE_BEACON;
1842 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
1843
1844 /*
1845 * Enable beacon generation. 1848 * Enable beacon generation.
1846 * Write entire beacon with descriptor to register, 1849 * Write entire beacon with descriptor to register,
1847 * and kick the beacon generator. 1850 * and kick the beacon generator.
1848 */ 1851 */
1849 memcpy(priv_tx->data, skb->data, skb->len); 1852 memcpy(entry_priv->data, skb->data, skb->len);
1850 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue); 1853 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
1854 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON);
1851 1855
1852 return 0; 1856 return 0;
1853} 1857}
@@ -1906,28 +1910,28 @@ static const struct data_queue_desc rt2500pci_queue_rx = {
1906 .entry_num = RX_ENTRIES, 1910 .entry_num = RX_ENTRIES,
1907 .data_size = DATA_FRAME_SIZE, 1911 .data_size = DATA_FRAME_SIZE,
1908 .desc_size = RXD_DESC_SIZE, 1912 .desc_size = RXD_DESC_SIZE,
1909 .priv_size = sizeof(struct queue_entry_priv_pci_rx), 1913 .priv_size = sizeof(struct queue_entry_priv_pci),
1910}; 1914};
1911 1915
1912static const struct data_queue_desc rt2500pci_queue_tx = { 1916static const struct data_queue_desc rt2500pci_queue_tx = {
1913 .entry_num = TX_ENTRIES, 1917 .entry_num = TX_ENTRIES,
1914 .data_size = DATA_FRAME_SIZE, 1918 .data_size = DATA_FRAME_SIZE,
1915 .desc_size = TXD_DESC_SIZE, 1919 .desc_size = TXD_DESC_SIZE,
1916 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 1920 .priv_size = sizeof(struct queue_entry_priv_pci),
1917}; 1921};
1918 1922
1919static const struct data_queue_desc rt2500pci_queue_bcn = { 1923static const struct data_queue_desc rt2500pci_queue_bcn = {
1920 .entry_num = BEACON_ENTRIES, 1924 .entry_num = BEACON_ENTRIES,
1921 .data_size = MGMT_FRAME_SIZE, 1925 .data_size = MGMT_FRAME_SIZE,
1922 .desc_size = TXD_DESC_SIZE, 1926 .desc_size = TXD_DESC_SIZE,
1923 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 1927 .priv_size = sizeof(struct queue_entry_priv_pci),
1924}; 1928};
1925 1929
1926static const struct data_queue_desc rt2500pci_queue_atim = { 1930static const struct data_queue_desc rt2500pci_queue_atim = {
1927 .entry_num = ATIM_ENTRIES, 1931 .entry_num = ATIM_ENTRIES,
1928 .data_size = DATA_FRAME_SIZE, 1932 .data_size = DATA_FRAME_SIZE,
1929 .desc_size = TXD_DESC_SIZE, 1933 .desc_size = TXD_DESC_SIZE,
1930 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 1934 .priv_size = sizeof(struct queue_entry_priv_pci),
1931}; 1935};
1932 1936
1933static const struct rt2x00_ops rt2500pci_ops = { 1937static const struct rt2x00_ops rt2500pci_ops = {
@@ -1936,6 +1940,7 @@ static const struct rt2x00_ops rt2500pci_ops = {
1936 .max_ap_intf = 1, 1940 .max_ap_intf = 1,
1937 .eeprom_size = EEPROM_SIZE, 1941 .eeprom_size = EEPROM_SIZE,
1938 .rf_size = RF_SIZE, 1942 .rf_size = RF_SIZE,
1943 .tx_queues = NUM_TX_QUEUES,
1939 .rx = &rt2500pci_queue_rx, 1944 .rx = &rt2500pci_queue_rx,
1940 .tx = &rt2500pci_queue_tx, 1945 .tx = &rt2500pci_queue_tx,
1941 .bcn = &rt2500pci_queue_bcn, 1946 .bcn = &rt2500pci_queue_bcn,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index 13899550465a..ea93b8f423a9 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -63,6 +63,11 @@
63#define RF_SIZE 0x0014 63#define RF_SIZE 0x0014
64 64
65/* 65/*
66 * Number of TX queues.
67 */
68#define NUM_TX_QUEUES 2
69
70/*
66 * Control/Status Registers(CSR). 71 * Control/Status Registers(CSR).
67 * Some values are set in TU, whereas 1 TU == 1024 us. 72 * Some values are set in TU, whereas 1 TU == 1024 us.
68 */ 73 */
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index fdbd0ef2be4b..cca1504550dc 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -76,10 +76,10 @@ static inline void rt2500usb_register_multiread(struct rt2x00_dev *rt2x00dev,
76 const unsigned int offset, 76 const unsigned int offset,
77 void *value, const u16 length) 77 void *value, const u16 length)
78{ 78{
79 int timeout = REGISTER_TIMEOUT * (length / sizeof(u16));
80 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, 79 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
81 USB_VENDOR_REQUEST_IN, offset, 80 USB_VENDOR_REQUEST_IN, offset,
82 value, length, timeout); 81 value, length,
82 REGISTER_TIMEOUT16(length));
83} 83}
84 84
85static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, 85static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
@@ -106,10 +106,10 @@ static inline void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
106 const unsigned int offset, 106 const unsigned int offset,
107 void *value, const u16 length) 107 void *value, const u16 length)
108{ 108{
109 int timeout = REGISTER_TIMEOUT * (length / sizeof(u16));
110 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, 109 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
111 USB_VENDOR_REQUEST_OUT, offset, 110 USB_VENDOR_REQUEST_OUT, offset,
112 value, length, timeout); 111 value, length,
112 REGISTER_TIMEOUT16(length));
113} 113}
114 114
115static u16 rt2500usb_bbp_check(struct rt2x00_dev *rt2x00dev) 115static u16 rt2500usb_bbp_check(struct rt2x00_dev *rt2x00dev)
@@ -1033,8 +1033,7 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1033 */ 1033 */
1034static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1034static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1035 struct sk_buff *skb, 1035 struct sk_buff *skb,
1036 struct txentry_desc *txdesc, 1036 struct txentry_desc *txdesc)
1037 struct ieee80211_tx_control *control)
1038{ 1037{
1039 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1038 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1040 __le32 *txd = skbdesc->desc; 1039 __le32 *txd = skbdesc->desc;
@@ -1058,7 +1057,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1058 rt2x00_desc_write(txd, 2, word); 1057 rt2x00_desc_write(txd, 2, word);
1059 1058
1060 rt2x00_desc_read(txd, 0, &word); 1059 rt2x00_desc_read(txd, 0, &word);
1061 rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, control->retry_limit); 1060 rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit);
1062 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, 1061 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
1063 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); 1062 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
1064 rt2x00_set_field32(&word, TXD_W0_ACK, 1063 rt2x00_set_field32(&word, TXD_W0_ACK,
@@ -1068,7 +1067,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1068 rt2x00_set_field32(&word, TXD_W0_OFDM, 1067 rt2x00_set_field32(&word, TXD_W0_OFDM,
1069 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags)); 1068 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags));
1070 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, 1069 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
1071 !!(control->flags & IEEE80211_TXCTL_FIRST_FRAGMENT)); 1070 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
1072 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1071 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1073 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len); 1072 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len);
1074 rt2x00_set_field32(&word, TXD_W0_CIPHER, CIPHER_NONE); 1073 rt2x00_set_field32(&word, TXD_W0_CIPHER, CIPHER_NONE);
@@ -1094,11 +1093,11 @@ static int rt2500usb_get_tx_data_len(struct rt2x00_dev *rt2x00dev,
1094 * TX data initialization 1093 * TX data initialization
1095 */ 1094 */
1096static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1095static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1097 const unsigned int queue) 1096 const enum data_queue_qid queue)
1098{ 1097{
1099 u16 reg; 1098 u16 reg;
1100 1099
1101 if (queue != RT2X00_BCN_QUEUE_BEACON) 1100 if (queue != QID_BEACON)
1102 return; 1101 return;
1103 1102
1104 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); 1103 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
@@ -1125,30 +1124,32 @@ static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1125static void rt2500usb_fill_rxdone(struct queue_entry *entry, 1124static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1126 struct rxdone_entry_desc *rxdesc) 1125 struct rxdone_entry_desc *rxdesc)
1127{ 1126{
1128 struct queue_entry_priv_usb_rx *priv_rx = entry->priv_data; 1127 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
1129 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1128 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1130 __le32 *rxd = 1129 __le32 *rxd =
1131 (__le32 *)(entry->skb->data + 1130 (__le32 *)(entry->skb->data +
1132 (priv_rx->urb->actual_length - entry->queue->desc_size)); 1131 (entry_priv->urb->actual_length -
1133 unsigned int offset = entry->queue->desc_size + 2; 1132 entry->queue->desc_size));
1134 u32 word0; 1133 u32 word0;
1135 u32 word1; 1134 u32 word1;
1136 1135
1137 /* 1136 /*
1138 * Copy descriptor to the available headroom inside the skbuffer. 1137 * Copy descriptor to the skb->cb array, this has 2 benefits:
1138 * 1) Each descriptor word is 4 byte aligned.
1139 * 2) Descriptor is safe from moving of frame data in rt2x00usb.
1139 */ 1140 */
1140 skb_push(entry->skb, offset); 1141 skbdesc->desc_len =
1141 memcpy(entry->skb->data, rxd, entry->queue->desc_size); 1142 min_t(u16, entry->queue->desc_size, sizeof(entry->skb->cb));
1142 rxd = (__le32 *)entry->skb->data; 1143 memcpy(entry->skb->cb, rxd, skbdesc->desc_len);
1144 skbdesc->desc = entry->skb->cb;
1145 rxd = (__le32 *)skbdesc->desc;
1143 1146
1144 /* 1147 /*
1145 * The descriptor is now aligned to 4 bytes and thus it is 1148 * It is now safe to read the descriptor on all architectures.
1146 * now safe to read it on all architectures.
1147 */ 1149 */
1148 rt2x00_desc_read(rxd, 0, &word0); 1150 rt2x00_desc_read(rxd, 0, &word0);
1149 rt2x00_desc_read(rxd, 1, &word1); 1151 rt2x00_desc_read(rxd, 1, &word1);
1150 1152
1151 rxdesc->flags = 0;
1152 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1153 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1153 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1154 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1154 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) 1155 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
@@ -1165,7 +1166,6 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1165 entry->queue->rt2x00dev->rssi_offset; 1166 entry->queue->rt2x00dev->rssi_offset;
1166 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1167 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1167 1168
1168 rxdesc->dev_flags = 0;
1169 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1169 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1170 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1170 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1171 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1171 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
@@ -1174,16 +1174,9 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1174 /* 1174 /*
1175 * Adjust the skb memory window to the frame boundaries. 1175 * Adjust the skb memory window to the frame boundaries.
1176 */ 1176 */
1177 skb_pull(entry->skb, offset);
1178 skb_trim(entry->skb, rxdesc->size); 1177 skb_trim(entry->skb, rxdesc->size);
1179
1180 /*
1181 * Set descriptor and data pointer.
1182 */
1183 skbdesc->data = entry->skb->data; 1178 skbdesc->data = entry->skb->data;
1184 skbdesc->data_len = rxdesc->size; 1179 skbdesc->data_len = rxdesc->size;
1185 skbdesc->desc = rxd;
1186 skbdesc->desc_len = entry->queue->desc_size;
1187} 1180}
1188 1181
1189/* 1182/*
@@ -1192,7 +1185,7 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1192static void rt2500usb_beacondone(struct urb *urb) 1185static void rt2500usb_beacondone(struct urb *urb)
1193{ 1186{
1194 struct queue_entry *entry = (struct queue_entry *)urb->context; 1187 struct queue_entry *entry = (struct queue_entry *)urb->context;
1195 struct queue_entry_priv_usb_bcn *priv_bcn = entry->priv_data; 1188 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
1196 1189
1197 if (!test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) 1190 if (!test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags))
1198 return; 1191 return;
@@ -1203,9 +1196,9 @@ static void rt2500usb_beacondone(struct urb *urb)
1203 * Otherwise we should free the sk_buffer, the device 1196 * Otherwise we should free the sk_buffer, the device
1204 * should be doing the rest of the work now. 1197 * should be doing the rest of the work now.
1205 */ 1198 */
1206 if (priv_bcn->guardian_urb == urb) { 1199 if (bcn_priv->guardian_urb == urb) {
1207 usb_submit_urb(priv_bcn->urb, GFP_ATOMIC); 1200 usb_submit_urb(bcn_priv->urb, GFP_ATOMIC);
1208 } else if (priv_bcn->urb == urb) { 1201 } else if (bcn_priv->urb == urb) {
1209 dev_kfree_skb(entry->skb); 1202 dev_kfree_skb(entry->skb);
1210 entry->skb = NULL; 1203 entry->skb = NULL;
1211 } 1204 }
@@ -1587,11 +1580,10 @@ static void rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1587 rt2x00dev->hw->flags = 1580 rt2x00dev->hw->flags =
1588 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 1581 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
1589 IEEE80211_HW_RX_INCLUDES_FCS | 1582 IEEE80211_HW_RX_INCLUDES_FCS |
1590 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; 1583 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1584 IEEE80211_HW_SIGNAL_DBM;
1585
1591 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE; 1586 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE;
1592 rt2x00dev->hw->max_signal = MAX_SIGNAL;
1593 rt2x00dev->hw->max_rssi = MAX_RX_SSI;
1594 rt2x00dev->hw->queues = 2;
1595 1587
1596 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_usb(rt2x00dev)->dev); 1588 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_usb(rt2x00dev)->dev);
1597 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 1589 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -1674,15 +1666,15 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1674/* 1666/*
1675 * IEEE80211 stack callback functions. 1667 * IEEE80211 stack callback functions.
1676 */ 1668 */
1677static int rt2500usb_beacon_update(struct ieee80211_hw *hw, 1669static int rt2500usb_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
1678 struct sk_buff *skb,
1679 struct ieee80211_tx_control *control)
1680{ 1670{
1681 struct rt2x00_dev *rt2x00dev = hw->priv; 1671 struct rt2x00_dev *rt2x00dev = hw->priv;
1682 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev); 1672 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
1683 struct rt2x00_intf *intf = vif_to_intf(control->vif); 1673 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1684 struct queue_entry_priv_usb_bcn *priv_bcn; 1674 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
1675 struct queue_entry_priv_usb_bcn *bcn_priv;
1685 struct skb_frame_desc *skbdesc; 1676 struct skb_frame_desc *skbdesc;
1677 struct txentry_desc txdesc;
1686 int pipe = usb_sndbulkpipe(usb_dev, 1); 1678 int pipe = usb_sndbulkpipe(usb_dev, 1);
1687 int length; 1679 int length;
1688 u16 reg; 1680 u16 reg;
@@ -1690,7 +1682,15 @@ static int rt2500usb_beacon_update(struct ieee80211_hw *hw,
1690 if (unlikely(!intf->beacon)) 1682 if (unlikely(!intf->beacon))
1691 return -ENOBUFS; 1683 return -ENOBUFS;
1692 1684
1693 priv_bcn = intf->beacon->priv_data; 1685 bcn_priv = intf->beacon->priv_data;
1686
1687 /*
1688 * Copy all TX descriptor information into txdesc,
1689 * after that we are free to use the skb->cb array
1690 * for our information.
1691 */
1692 intf->beacon->skb = skb;
1693 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
1694 1694
1695 /* 1695 /*
1696 * Add the descriptor in front of the skb. 1696 * Add the descriptor in front of the skb.
@@ -1720,13 +1720,7 @@ static int rt2500usb_beacon_update(struct ieee80211_hw *hw,
1720 rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0); 1720 rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0);
1721 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); 1721 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
1722 1722
1723 /* 1723 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
1724 * mac80211 doesn't provide the control->queue variable
1725 * for beacons. Set our own queue identification so
1726 * it can be used during descriptor initialization.
1727 */
1728 control->queue = RT2X00_BCN_QUEUE_BEACON;
1729 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
1730 1724
1731 /* 1725 /*
1732 * USB devices cannot blindly pass the skb->len as the 1726 * USB devices cannot blindly pass the skb->len as the
@@ -1735,7 +1729,7 @@ static int rt2500usb_beacon_update(struct ieee80211_hw *hw,
1735 */ 1729 */
1736 length = rt2500usb_get_tx_data_len(rt2x00dev, skb); 1730 length = rt2500usb_get_tx_data_len(rt2x00dev, skb);
1737 1731
1738 usb_fill_bulk_urb(priv_bcn->urb, usb_dev, pipe, 1732 usb_fill_bulk_urb(bcn_priv->urb, usb_dev, pipe,
1739 skb->data, length, rt2500usb_beacondone, 1733 skb->data, length, rt2500usb_beacondone,
1740 intf->beacon); 1734 intf->beacon);
1741 1735
@@ -1744,20 +1738,20 @@ static int rt2500usb_beacon_update(struct ieee80211_hw *hw,
1744 * We only need a single byte, so lets recycle 1738 * We only need a single byte, so lets recycle
1745 * the 'flags' field we are not using for beacons. 1739 * the 'flags' field we are not using for beacons.
1746 */ 1740 */
1747 priv_bcn->guardian_data = 0; 1741 bcn_priv->guardian_data = 0;
1748 usb_fill_bulk_urb(priv_bcn->guardian_urb, usb_dev, pipe, 1742 usb_fill_bulk_urb(bcn_priv->guardian_urb, usb_dev, pipe,
1749 &priv_bcn->guardian_data, 1, rt2500usb_beacondone, 1743 &bcn_priv->guardian_data, 1, rt2500usb_beacondone,
1750 intf->beacon); 1744 intf->beacon);
1751 1745
1752 /* 1746 /*
1753 * Send out the guardian byte. 1747 * Send out the guardian byte.
1754 */ 1748 */
1755 usb_submit_urb(priv_bcn->guardian_urb, GFP_ATOMIC); 1749 usb_submit_urb(bcn_priv->guardian_urb, GFP_ATOMIC);
1756 1750
1757 /* 1751 /*
1758 * Enable beacon generation. 1752 * Enable beacon generation.
1759 */ 1753 */
1760 rt2500usb_kick_tx_queue(rt2x00dev, control->queue); 1754 rt2500usb_kick_tx_queue(rt2x00dev, QID_BEACON);
1761 1755
1762 return 0; 1756 return 0;
1763} 1757}
@@ -1803,14 +1797,14 @@ static const struct data_queue_desc rt2500usb_queue_rx = {
1803 .entry_num = RX_ENTRIES, 1797 .entry_num = RX_ENTRIES,
1804 .data_size = DATA_FRAME_SIZE, 1798 .data_size = DATA_FRAME_SIZE,
1805 .desc_size = RXD_DESC_SIZE, 1799 .desc_size = RXD_DESC_SIZE,
1806 .priv_size = sizeof(struct queue_entry_priv_usb_rx), 1800 .priv_size = sizeof(struct queue_entry_priv_usb),
1807}; 1801};
1808 1802
1809static const struct data_queue_desc rt2500usb_queue_tx = { 1803static const struct data_queue_desc rt2500usb_queue_tx = {
1810 .entry_num = TX_ENTRIES, 1804 .entry_num = TX_ENTRIES,
1811 .data_size = DATA_FRAME_SIZE, 1805 .data_size = DATA_FRAME_SIZE,
1812 .desc_size = TXD_DESC_SIZE, 1806 .desc_size = TXD_DESC_SIZE,
1813 .priv_size = sizeof(struct queue_entry_priv_usb_tx), 1807 .priv_size = sizeof(struct queue_entry_priv_usb),
1814}; 1808};
1815 1809
1816static const struct data_queue_desc rt2500usb_queue_bcn = { 1810static const struct data_queue_desc rt2500usb_queue_bcn = {
@@ -1824,7 +1818,7 @@ static const struct data_queue_desc rt2500usb_queue_atim = {
1824 .entry_num = ATIM_ENTRIES, 1818 .entry_num = ATIM_ENTRIES,
1825 .data_size = DATA_FRAME_SIZE, 1819 .data_size = DATA_FRAME_SIZE,
1826 .desc_size = TXD_DESC_SIZE, 1820 .desc_size = TXD_DESC_SIZE,
1827 .priv_size = sizeof(struct queue_entry_priv_usb_tx), 1821 .priv_size = sizeof(struct queue_entry_priv_usb),
1828}; 1822};
1829 1823
1830static const struct rt2x00_ops rt2500usb_ops = { 1824static const struct rt2x00_ops rt2500usb_ops = {
@@ -1833,6 +1827,7 @@ static const struct rt2x00_ops rt2500usb_ops = {
1833 .max_ap_intf = 1, 1827 .max_ap_intf = 1,
1834 .eeprom_size = EEPROM_SIZE, 1828 .eeprom_size = EEPROM_SIZE,
1835 .rf_size = RF_SIZE, 1829 .rf_size = RF_SIZE,
1830 .tx_queues = NUM_TX_QUEUES,
1836 .rx = &rt2500usb_queue_rx, 1831 .rx = &rt2500usb_queue_rx,
1837 .tx = &rt2500usb_queue_tx, 1832 .tx = &rt2500usb_queue_tx,
1838 .bcn = &rt2500usb_queue_bcn, 1833 .bcn = &rt2500usb_queue_bcn,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index a37a068d0c71..7d50098f0cc5 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -63,6 +63,11 @@
63#define RF_SIZE 0x0014 63#define RF_SIZE 0x0014
64 64
65/* 65/*
66 * Number of TX queues.
67 */
68#define NUM_TX_QUEUES 2
69
70/*
66 * Control/Status Registers(CSR). 71 * Control/Status Registers(CSR).
67 * Some values are set in TU, whereas 1 TU == 1024 us. 72 * Some values are set in TU, whereas 1 TU == 1024 us.
68 */ 73 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 611d98320593..15ec797c5ec1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -44,7 +44,7 @@
44/* 44/*
45 * Module information. 45 * Module information.
46 */ 46 */
47#define DRV_VERSION "2.1.4" 47#define DRV_VERSION "2.1.6"
48#define DRV_PROJECT "http://rt2x00.serialmonkey.com" 48#define DRV_PROJECT "http://rt2x00.serialmonkey.com"
49 49
50/* 50/*
@@ -409,7 +409,7 @@ static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
409 * @supported_rates: Rate types which are supported (CCK, OFDM). 409 * @supported_rates: Rate types which are supported (CCK, OFDM).
410 * @num_channels: Number of supported channels. This is used as array size 410 * @num_channels: Number of supported channels. This is used as array size
411 * for @tx_power_a, @tx_power_bg and @channels. 411 * for @tx_power_a, @tx_power_bg and @channels.
412 * channels: Device/chipset specific channel values (See &struct rf_channel). 412 * @channels: Device/chipset specific channel values (See &struct rf_channel).
413 * @tx_power_a: TX power values for all 5.2GHz channels (may be NULL). 413 * @tx_power_a: TX power values for all 5.2GHz channels (may be NULL).
414 * @tx_power_bg: TX power values for all 2.4GHz channels (may be NULL). 414 * @tx_power_bg: TX power values for all 2.4GHz channels (may be NULL).
415 * @tx_power_default: Default TX power value to use when either 415 * @tx_power_default: Default TX power value to use when either
@@ -545,15 +545,13 @@ struct rt2x00lib_ops {
545 */ 545 */
546 void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev, 546 void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev,
547 struct sk_buff *skb, 547 struct sk_buff *skb,
548 struct txentry_desc *txdesc, 548 struct txentry_desc *txdesc);
549 struct ieee80211_tx_control *control);
550 int (*write_tx_data) (struct rt2x00_dev *rt2x00dev, 549 int (*write_tx_data) (struct rt2x00_dev *rt2x00dev,
551 struct data_queue *queue, struct sk_buff *skb, 550 struct data_queue *queue, struct sk_buff *skb);
552 struct ieee80211_tx_control *control);
553 int (*get_tx_data_len) (struct rt2x00_dev *rt2x00dev, 551 int (*get_tx_data_len) (struct rt2x00_dev *rt2x00dev,
554 struct sk_buff *skb); 552 struct sk_buff *skb);
555 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev, 553 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev,
556 const unsigned int queue); 554 const enum data_queue_qid queue);
557 555
558 /* 556 /*
559 * RX control handlers 557 * RX control handlers
@@ -597,6 +595,7 @@ struct rt2x00_ops {
597 const unsigned int max_ap_intf; 595 const unsigned int max_ap_intf;
598 const unsigned int eeprom_size; 596 const unsigned int eeprom_size;
599 const unsigned int rf_size; 597 const unsigned int rf_size;
598 const unsigned int tx_queues;
600 const struct data_queue_desc *rx; 599 const struct data_queue_desc *rx;
601 const struct data_queue_desc *tx; 600 const struct data_queue_desc *tx;
602 const struct data_queue_desc *bcn; 601 const struct data_queue_desc *bcn;
@@ -626,7 +625,6 @@ enum rt2x00_flags {
626 /* 625 /*
627 * Driver features 626 * Driver features
628 */ 627 */
629 DRIVER_SUPPORT_MIXED_INTERFACES,
630 DRIVER_REQUIRE_FIRMWARE, 628 DRIVER_REQUIRE_FIRMWARE,
631 DRIVER_REQUIRE_BEACON_GUARD, 629 DRIVER_REQUIRE_BEACON_GUARD,
632 DRIVER_REQUIRE_ATIM_QUEUE, 630 DRIVER_REQUIRE_ATIM_QUEUE,
@@ -933,17 +931,49 @@ static inline u16 get_duration_res(const unsigned int size, const u8 rate)
933} 931}
934 932
935/** 933/**
936 * rt2x00queue_get_queue - Convert mac80211 queue index to rt2x00 queue 934 * rt2x00queue_create_tx_descriptor - Create TX descriptor from mac80211 input
935 * @entry: The entry which will be used to transfer the TX frame.
936 * @txdesc: rt2x00 TX descriptor which will be initialized by this function.
937 *
938 * This function will initialize the &struct txentry_desc based on information
939 * from mac80211. This descriptor can then be used by rt2x00lib and the drivers
940 * to correctly initialize the hardware descriptor.
941 * Note that before calling this function the skb->cb array must be untouched
942 * by rt2x00lib. Only after this function completes will it be save to
943 * overwrite the skb->cb information.
944 * The reason for this is that mac80211 writes its own tx information into
945 * the skb->cb array, and this function will use that information to initialize
946 * the &struct txentry_desc structure.
947 */
948void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
949 struct txentry_desc *txdesc);
950
951/**
952 * rt2x00queue_write_tx_descriptor - Write TX descriptor to hardware
953 * @entry: The entry which will be used to transfer the TX frame.
954 * @txdesc: TX descriptor which will be used to write hardware descriptor
955 *
956 * This function will write a TX descriptor initialized by
957 * &rt2x00queue_create_tx_descriptor to the hardware. After this call
958 * has completed the frame is now owned by the hardware, the hardware
959 * queue will have automatically be kicked unless this frame was generated
960 * by rt2x00lib, in which case the frame is "special" and must be kicked
961 * by the caller.
962 */
963void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
964 struct txentry_desc *txdesc);
965
966/**
967 * rt2x00queue_get_queue - Convert queue index to queue pointer
937 * @rt2x00dev: Pointer to &struct rt2x00_dev. 968 * @rt2x00dev: Pointer to &struct rt2x00_dev.
938 * @queue: mac80211/rt2x00 queue index 969 * @queue: rt2x00 queue index (see &enum data_queue_qid).
939 * (see &enum ieee80211_tx_queue and &enum rt2x00_bcn_queue).
940 */ 970 */
941struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, 971struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
942 const unsigned int queue); 972 const enum data_queue_qid queue);
943 973
944/** 974/**
945 * rt2x00queue_get_entry - Get queue entry where the given index points to. 975 * rt2x00queue_get_entry - Get queue entry where the given index points to.
946 * @rt2x00dev: Pointer to &struct rt2x00_dev. 976 * @queue: Pointer to &struct data_queue from where we obtain the entry.
947 * @index: Index identifier for obtaining the correct index. 977 * @index: Index identifier for obtaining the correct index.
948 */ 978 */
949struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, 979struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
@@ -952,7 +982,7 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
952/** 982/**
953 * rt2x00queue_index_inc - Index incrementation function 983 * rt2x00queue_index_inc - Index incrementation function
954 * @queue: Queue (&struct data_queue) to perform the action on. 984 * @queue: Queue (&struct data_queue) to perform the action on.
955 * @action: Index type (&enum queue_index) to perform the action on. 985 * @index: Index type (&enum queue_index) to perform the action on.
956 * 986 *
957 * This function will increase the requested index on the queue, 987 * This function will increase the requested index on the queue,
958 * it will grab the appropriate locks and handle queue overflow events by 988 * it will grab the appropriate locks and handle queue overflow events by
@@ -971,17 +1001,9 @@ void rt2x00lib_rxdone(struct queue_entry *entry,
971 struct rxdone_entry_desc *rxdesc); 1001 struct rxdone_entry_desc *rxdesc);
972 1002
973/* 1003/*
974 * TX descriptor initializer
975 */
976void rt2x00lib_write_tx_desc(struct rt2x00_dev *rt2x00dev,
977 struct sk_buff *skb,
978 struct ieee80211_tx_control *control);
979
980/*
981 * mac80211 handlers. 1004 * mac80211 handlers.
982 */ 1005 */
983int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 1006int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
984 struct ieee80211_tx_control *control);
985int rt2x00mac_start(struct ieee80211_hw *hw); 1007int rt2x00mac_start(struct ieee80211_hw *hw);
986void rt2x00mac_stop(struct ieee80211_hw *hw); 1008void rt2x00mac_stop(struct ieee80211_hw *hw);
987int rt2x00mac_add_interface(struct ieee80211_hw *hw, 1009int rt2x00mac_add_interface(struct ieee80211_hw *hw,
@@ -1004,7 +1026,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
1004 struct ieee80211_vif *vif, 1026 struct ieee80211_vif *vif,
1005 struct ieee80211_bss_conf *bss_conf, 1027 struct ieee80211_bss_conf *bss_conf,
1006 u32 changes); 1028 u32 changes);
1007int rt2x00mac_conf_tx(struct ieee80211_hw *hw, int queue, 1029int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1008 const struct ieee80211_tx_queue_params *params); 1030 const struct ieee80211_tx_queue_params *params);
1009 1031
1010/* 1032/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index bfab3b8780d6..bd92cb8e68e0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -115,7 +115,7 @@ struct rt2x00debug_intf {
115}; 115};
116 116
117void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, 117void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
118 struct sk_buff *skb) 118 enum rt2x00_dump_type type, struct sk_buff *skb)
119{ 119{
120 struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf; 120 struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf;
121 struct skb_frame_desc *desc = get_skb_frame_desc(skb); 121 struct skb_frame_desc *desc = get_skb_frame_desc(skb);
@@ -148,7 +148,7 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
148 dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt); 148 dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt);
149 dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf); 149 dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf);
150 dump_hdr->chip_rev = cpu_to_le32(rt2x00dev->chip.rev); 150 dump_hdr->chip_rev = cpu_to_le32(rt2x00dev->chip.rev);
151 dump_hdr->type = cpu_to_le16(desc->frame_type); 151 dump_hdr->type = cpu_to_le16(type);
152 dump_hdr->queue_index = desc->entry->queue->qid; 152 dump_hdr->queue_index = desc->entry->queue->qid;
153 dump_hdr->entry_index = desc->entry->entry_idx; 153 dump_hdr->entry_index = desc->entry->entry_idx;
154 dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec); 154 dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 2673d568bcac..dc5ab90a52c3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -28,7 +28,6 @@
28 28
29#include "rt2x00.h" 29#include "rt2x00.h"
30#include "rt2x00lib.h" 30#include "rt2x00lib.h"
31#include "rt2x00dump.h"
32 31
33/* 32/*
34 * Link tuning handlers 33 * Link tuning handlers
@@ -126,7 +125,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
126 /* 125 /*
127 * Start the TX queues. 126 * Start the TX queues.
128 */ 127 */
129 ieee80211_start_queues(rt2x00dev->hw); 128 ieee80211_wake_queues(rt2x00dev->hw);
130 129
131 return 0; 130 return 0;
132} 131}
@@ -416,7 +415,6 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
416 struct rt2x00_dev *rt2x00dev = data; 415 struct rt2x00_dev *rt2x00dev = data;
417 struct rt2x00_intf *intf = vif_to_intf(vif); 416 struct rt2x00_intf *intf = vif_to_intf(vif);
418 struct sk_buff *skb; 417 struct sk_buff *skb;
419 struct ieee80211_tx_control control;
420 struct ieee80211_bss_conf conf; 418 struct ieee80211_bss_conf conf;
421 int delayed_flags; 419 int delayed_flags;
422 420
@@ -434,9 +432,9 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
434 spin_unlock(&intf->lock); 432 spin_unlock(&intf->lock);
435 433
436 if (delayed_flags & DELAYED_UPDATE_BEACON) { 434 if (delayed_flags & DELAYED_UPDATE_BEACON) {
437 skb = ieee80211_beacon_get(rt2x00dev->hw, vif, &control); 435 skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
438 if (skb && rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, 436 if (skb &&
439 skb, &control)) 437 rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, skb))
440 dev_kfree_skb(skb); 438 dev_kfree_skb(skb);
441 } 439 }
442 440
@@ -495,64 +493,55 @@ void rt2x00lib_txdone(struct queue_entry *entry,
495 struct txdone_entry_desc *txdesc) 493 struct txdone_entry_desc *txdesc)
496{ 494{
497 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 495 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
498 struct skb_frame_desc *skbdesc; 496 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
499 struct ieee80211_tx_status tx_status; 497
500 int success = !!(txdesc->status == TX_SUCCESS || 498 /*
501 txdesc->status == TX_SUCCESS_RETRY); 499 * Send frame to debugfs immediately, after this call is completed
502 int fail = !!(txdesc->status == TX_FAIL_RETRY || 500 * we are going to overwrite the skb->cb array.
503 txdesc->status == TX_FAIL_INVALID || 501 */
504 txdesc->status == TX_FAIL_OTHER); 502 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb);
505 503
506 /* 504 /*
507 * Update TX statistics. 505 * Update TX statistics.
508 */ 506 */
509 rt2x00dev->link.qual.tx_success += success; 507 rt2x00dev->link.qual.tx_success +=
510 rt2x00dev->link.qual.tx_failed += fail; 508 test_bit(TXDONE_SUCCESS, &txdesc->flags);
509 rt2x00dev->link.qual.tx_failed +=
510 test_bit(TXDONE_FAILURE, &txdesc->flags);
511 511
512 /* 512 /*
513 * Initialize TX status 513 * Initialize TX status
514 */ 514 */
515 tx_status.flags = 0; 515 memset(&tx_info->status, 0, sizeof(tx_info->status));
516 tx_status.ack_signal = 0; 516 tx_info->status.ack_signal = 0;
517 tx_status.excessive_retries = (txdesc->status == TX_FAIL_RETRY); 517 tx_info->status.excessive_retries =
518 tx_status.retry_count = txdesc->retry; 518 test_bit(TXDONE_EXCESSIVE_RETRY, &txdesc->flags);
519 memcpy(&tx_status.control, txdesc->control, sizeof(*txdesc->control)); 519 tx_info->status.retry_count = txdesc->retry;
520 520
521 if (!(tx_status.control.flags & IEEE80211_TXCTL_NO_ACK)) { 521 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
522 if (success) 522 if (test_bit(TXDONE_SUCCESS, &txdesc->flags))
523 tx_status.flags |= IEEE80211_TX_STATUS_ACK; 523 tx_info->flags |= IEEE80211_TX_STAT_ACK;
524 else 524 else if (test_bit(TXDONE_FAILURE, &txdesc->flags))
525 rt2x00dev->low_level_stats.dot11ACKFailureCount++; 525 rt2x00dev->low_level_stats.dot11ACKFailureCount++;
526 } 526 }
527 527
528 tx_status.queue_length = entry->queue->limit; 528 if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
529 tx_status.queue_number = tx_status.control.queue; 529 if (test_bit(TXDONE_SUCCESS, &txdesc->flags))
530
531 if (tx_status.control.flags & IEEE80211_TXCTL_USE_RTS_CTS) {
532 if (success)
533 rt2x00dev->low_level_stats.dot11RTSSuccessCount++; 530 rt2x00dev->low_level_stats.dot11RTSSuccessCount++;
534 else 531 else if (test_bit(TXDONE_FAILURE, &txdesc->flags))
535 rt2x00dev->low_level_stats.dot11RTSFailureCount++; 532 rt2x00dev->low_level_stats.dot11RTSFailureCount++;
536 } 533 }
537 534
538 /* 535 /*
539 * Send the tx_status to debugfs. Only send the status report 536 * Only send the status report to mac80211 when TX status was
540 * to mac80211 when the frame originated from there. If this was 537 * requested by it. If this was a extra frame coming through
541 * a extra frame coming through a mac80211 library call (RTS/CTS) 538 * a mac80211 library call (RTS/CTS) then we should not send the
542 * then we should not send the status report back. 539 * status report back.
543 * If send to mac80211, mac80211 will clean up the skb structure,
544 * otherwise we have to do it ourself.
545 */ 540 */
546 skbdesc = get_skb_frame_desc(entry->skb); 541 if (tx_info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
547 skbdesc->frame_type = DUMP_FRAME_TXDONE; 542 ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb);
548
549 rt2x00debug_dump_frame(rt2x00dev, entry->skb);
550
551 if (!(skbdesc->flags & FRAME_DESC_DRIVER_GENERATED))
552 ieee80211_tx_status_irqsafe(rt2x00dev->hw,
553 entry->skb, &tx_status);
554 else 543 else
555 dev_kfree_skb(entry->skb); 544 dev_kfree_skb_irq(entry->skb);
556 entry->skb = NULL; 545 entry->skb = NULL;
557} 546}
558EXPORT_SYMBOL_GPL(rt2x00lib_txdone); 547EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
@@ -603,9 +592,9 @@ void rt2x00lib_rxdone(struct queue_entry *entry,
603 rt2x00dev->link.qual.rx_success++; 592 rt2x00dev->link.qual.rx_success++;
604 593
605 rx_status->rate_idx = idx; 594 rx_status->rate_idx = idx;
606 rx_status->signal = 595 rx_status->qual =
607 rt2x00lib_calculate_link_signal(rt2x00dev, rxdesc->rssi); 596 rt2x00lib_calculate_link_signal(rt2x00dev, rxdesc->rssi);
608 rx_status->ssi = rxdesc->rssi; 597 rx_status->signal = rxdesc->rssi;
609 rx_status->flag = rxdesc->flags; 598 rx_status->flag = rxdesc->flags;
610 rx_status->antenna = rt2x00dev->link.ant.active.rx; 599 rx_status->antenna = rt2x00dev->link.ant.active.rx;
611 600
@@ -613,155 +602,13 @@ void rt2x00lib_rxdone(struct queue_entry *entry,
613 * Send frame to mac80211 & debugfs. 602 * Send frame to mac80211 & debugfs.
614 * mac80211 will clean up the skb structure. 603 * mac80211 will clean up the skb structure.
615 */ 604 */
616 get_skb_frame_desc(entry->skb)->frame_type = DUMP_FRAME_RXDONE; 605 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb);
617 rt2x00debug_dump_frame(rt2x00dev, entry->skb);
618 ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb, rx_status); 606 ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb, rx_status);
619 entry->skb = NULL; 607 entry->skb = NULL;
620} 608}
621EXPORT_SYMBOL_GPL(rt2x00lib_rxdone); 609EXPORT_SYMBOL_GPL(rt2x00lib_rxdone);
622 610
623/* 611/*
624 * TX descriptor initializer
625 */
626void rt2x00lib_write_tx_desc(struct rt2x00_dev *rt2x00dev,
627 struct sk_buff *skb,
628 struct ieee80211_tx_control *control)
629{
630 struct txentry_desc txdesc;
631 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
632 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skbdesc->data;
633 const struct rt2x00_rate *rate;
634 int tx_rate;
635 int length;
636 int duration;
637 int residual;
638 u16 frame_control;
639 u16 seq_ctrl;
640
641 memset(&txdesc, 0, sizeof(txdesc));
642
643 txdesc.queue = skbdesc->entry->queue->qid;
644 txdesc.cw_min = skbdesc->entry->queue->cw_min;
645 txdesc.cw_max = skbdesc->entry->queue->cw_max;
646 txdesc.aifs = skbdesc->entry->queue->aifs;
647
648 /*
649 * Read required fields from ieee80211 header.
650 */
651 frame_control = le16_to_cpu(hdr->frame_control);
652 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
653
654 tx_rate = control->tx_rate->hw_value;
655
656 /*
657 * Check whether this frame is to be acked
658 */
659 if (!(control->flags & IEEE80211_TXCTL_NO_ACK))
660 __set_bit(ENTRY_TXD_ACK, &txdesc.flags);
661
662 /*
663 * Check if this is a RTS/CTS frame
664 */
665 if (is_rts_frame(frame_control) || is_cts_frame(frame_control)) {
666 __set_bit(ENTRY_TXD_BURST, &txdesc.flags);
667 if (is_rts_frame(frame_control)) {
668 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc.flags);
669 __set_bit(ENTRY_TXD_ACK, &txdesc.flags);
670 } else
671 __clear_bit(ENTRY_TXD_ACK, &txdesc.flags);
672 if (control->rts_cts_rate)
673 tx_rate = control->rts_cts_rate->hw_value;
674 }
675
676 rate = rt2x00_get_rate(tx_rate);
677
678 /*
679 * Check if more fragments are pending
680 */
681 if (ieee80211_get_morefrag(hdr)) {
682 __set_bit(ENTRY_TXD_BURST, &txdesc.flags);
683 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc.flags);
684 }
685
686 /*
687 * Beacons and probe responses require the tsf timestamp
688 * to be inserted into the frame.
689 */
690 if (control->queue == RT2X00_BCN_QUEUE_BEACON ||
691 is_probe_resp(frame_control))
692 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc.flags);
693
694 /*
695 * Determine with what IFS priority this frame should be send.
696 * Set ifs to IFS_SIFS when the this is not the first fragment,
697 * or this fragment came after RTS/CTS.
698 */
699 if ((seq_ctrl & IEEE80211_SCTL_FRAG) > 0 ||
700 test_bit(ENTRY_TXD_RTS_FRAME, &txdesc.flags))
701 txdesc.ifs = IFS_SIFS;
702 else
703 txdesc.ifs = IFS_BACKOFF;
704
705 /*
706 * PLCP setup
707 * Length calculation depends on OFDM/CCK rate.
708 */
709 txdesc.signal = rate->plcp;
710 txdesc.service = 0x04;
711
712 length = skbdesc->data_len + FCS_LEN;
713 if (rate->flags & DEV_RATE_OFDM) {
714 __set_bit(ENTRY_TXD_OFDM_RATE, &txdesc.flags);
715
716 txdesc.length_high = (length >> 6) & 0x3f;
717 txdesc.length_low = length & 0x3f;
718 } else {
719 /*
720 * Convert length to microseconds.
721 */
722 residual = get_duration_res(length, rate->bitrate);
723 duration = get_duration(length, rate->bitrate);
724
725 if (residual != 0) {
726 duration++;
727
728 /*
729 * Check if we need to set the Length Extension
730 */
731 if (rate->bitrate == 110 && residual <= 30)
732 txdesc.service |= 0x80;
733 }
734
735 txdesc.length_high = (duration >> 8) & 0xff;
736 txdesc.length_low = duration & 0xff;
737
738 /*
739 * When preamble is enabled we should set the
740 * preamble bit for the signal.
741 */
742 if (rt2x00_get_rate_preamble(tx_rate))
743 txdesc.signal |= 0x08;
744 }
745
746 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, skb, &txdesc, control);
747
748 /*
749 * Update queue entry.
750 */
751 skbdesc->entry->skb = skb;
752
753 /*
754 * The frame has been completely initialized and ready
755 * for sending to the device. The caller will push the
756 * frame to the device, but we are going to push the
757 * frame to debugfs here.
758 */
759 skbdesc->frame_type = DUMP_FRAME_TX;
760 rt2x00debug_dump_frame(rt2x00dev, skb);
761}
762EXPORT_SYMBOL_GPL(rt2x00lib_write_tx_desc);
763
764/*
765 * Driver initialization handlers. 612 * Driver initialization handlers.
766 */ 613 */
767const struct rt2x00_rate rt2x00_supported_rates[12] = { 614const struct rt2x00_rate rt2x00_supported_rates[12] = {
@@ -977,6 +824,11 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
977 return status; 824 return status;
978 825
979 /* 826 /*
827 * Initialize HW fields.
828 */
829 rt2x00dev->hw->queues = rt2x00dev->ops->tx_queues;
830
831 /*
980 * Register HW. 832 * Register HW.
981 */ 833 */
982 status = ieee80211_register_hw(rt2x00dev->hw); 834 status = ieee80211_register_hw(rt2x00dev->hw);
@@ -1331,7 +1183,7 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1331 * In that case we have disabled the TX queue and should 1183 * In that case we have disabled the TX queue and should
1332 * now enable it again 1184 * now enable it again
1333 */ 1185 */
1334 ieee80211_start_queues(rt2x00dev->hw); 1186 ieee80211_wake_queues(rt2x00dev->hw);
1335 1187
1336 /* 1188 /*
1337 * During interface iteration we might have changed the 1189 * During interface iteration we might have changed the
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 41ee02cd2825..c4ce534e3cdb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -26,6 +26,8 @@
26#ifndef RT2X00LIB_H 26#ifndef RT2X00LIB_H
27#define RT2X00LIB_H 27#define RT2X00LIB_H
28 28
29#include "rt2x00dump.h"
30
29/* 31/*
30 * Interval defines 32 * Interval defines
31 * Both the link tuner as the rfkill will be called once per second. 33 * Both the link tuner as the rfkill will be called once per second.
@@ -128,7 +130,8 @@ static inline void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev)
128#ifdef CONFIG_RT2X00_LIB_DEBUGFS 130#ifdef CONFIG_RT2X00_LIB_DEBUGFS
129void rt2x00debug_register(struct rt2x00_dev *rt2x00dev); 131void rt2x00debug_register(struct rt2x00_dev *rt2x00dev);
130void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev); 132void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev);
131void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb); 133void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
134 enum rt2x00_dump_type type, struct sk_buff *skb);
132#else 135#else
133static inline void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) 136static inline void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
134{ 137{
@@ -139,6 +142,7 @@ static inline void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
139} 142}
140 143
141static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, 144static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
145 enum rt2x00_dump_type type,
142 struct sk_buff *skb) 146 struct sk_buff *skb)
143{ 147{
144} 148}
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 87e280a21971..b02dbc8a666e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -31,14 +31,15 @@
31 31
32static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, 32static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
33 struct data_queue *queue, 33 struct data_queue *queue,
34 struct sk_buff *frag_skb, 34 struct sk_buff *frag_skb)
35 struct ieee80211_tx_control *control)
36{ 35{
36 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb);
37 struct skb_frame_desc *skbdesc; 37 struct skb_frame_desc *skbdesc;
38 struct ieee80211_tx_info *rts_info;
38 struct sk_buff *skb; 39 struct sk_buff *skb;
39 int size; 40 int size;
40 41
41 if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) 42 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
42 size = sizeof(struct ieee80211_cts); 43 size = sizeof(struct ieee80211_cts);
43 else 44 else
44 size = sizeof(struct ieee80211_rts); 45 size = sizeof(struct ieee80211_rts);
@@ -52,13 +53,33 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
52 skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom); 53 skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom);
53 skb_put(skb, size); 54 skb_put(skb, size);
54 55
55 if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) 56 /*
56 ieee80211_ctstoself_get(rt2x00dev->hw, control->vif, 57 * Copy TX information over from original frame to
57 frag_skb->data, frag_skb->len, control, 58 * RTS/CTS frame. Note that we set the no encryption flag
59 * since we don't want this frame to be encrypted.
60 * RTS frames should be acked, while CTS-to-self frames
61 * should not. The ready for TX flag is cleared to prevent
62 * it being automatically send when the descriptor is
63 * written to the hardware.
64 */
65 memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
66 rts_info = IEEE80211_SKB_CB(skb);
67 rts_info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
68 rts_info->flags &= ~IEEE80211_TX_CTL_USE_CTS_PROTECT;
69 rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS;
70
71 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
72 rts_info->flags |= IEEE80211_TX_CTL_NO_ACK;
73 else
74 rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
75
76 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
77 ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif,
78 frag_skb->data, size, tx_info,
58 (struct ieee80211_cts *)(skb->data)); 79 (struct ieee80211_cts *)(skb->data));
59 else 80 else
60 ieee80211_rts_get(rt2x00dev->hw, control->vif, 81 ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif,
61 frag_skb->data, frag_skb->len, control, 82 frag_skb->data, size, tx_info,
62 (struct ieee80211_rts *)(skb->data)); 83 (struct ieee80211_rts *)(skb->data));
63 84
64 /* 85 /*
@@ -68,7 +89,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
68 memset(skbdesc, 0, sizeof(*skbdesc)); 89 memset(skbdesc, 0, sizeof(*skbdesc));
69 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED; 90 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
70 91
71 if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, queue, skb, control)) { 92 if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, queue, skb)) {
72 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n"); 93 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n");
73 return NETDEV_TX_BUSY; 94 return NETDEV_TX_BUSY;
74 } 95 }
@@ -76,13 +97,13 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
76 return NETDEV_TX_OK; 97 return NETDEV_TX_OK;
77} 98}
78 99
79int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 100int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
80 struct ieee80211_tx_control *control)
81{ 101{
82 struct rt2x00_dev *rt2x00dev = hw->priv; 102 struct rt2x00_dev *rt2x00dev = hw->priv;
103 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
83 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data; 104 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
105 enum data_queue_qid qid = skb_get_queue_mapping(skb);
84 struct data_queue *queue; 106 struct data_queue *queue;
85 struct skb_frame_desc *skbdesc;
86 u16 frame_control; 107 u16 frame_control;
87 108
88 /* 109 /*
@@ -100,16 +121,15 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
100 /* 121 /*
101 * Determine which queue to put packet on. 122 * Determine which queue to put packet on.
102 */ 123 */
103 if (control->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM && 124 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
104 test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) 125 test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
105 queue = rt2x00queue_get_queue(rt2x00dev, RT2X00_BCN_QUEUE_ATIM); 126 queue = rt2x00queue_get_queue(rt2x00dev, QID_ATIM);
106 else 127 else
107 queue = rt2x00queue_get_queue(rt2x00dev, control->queue); 128 queue = rt2x00queue_get_queue(rt2x00dev, qid);
108 if (unlikely(!queue)) { 129 if (unlikely(!queue)) {
109 ERROR(rt2x00dev, 130 ERROR(rt2x00dev,
110 "Attempt to send packet over invalid queue %d.\n" 131 "Attempt to send packet over invalid queue %d.\n"
111 "Please file bug report to %s.\n", 132 "Please file bug report to %s.\n", qid, DRV_PROJECT);
112 control->queue, DRV_PROJECT);
113 dev_kfree_skb_any(skb); 133 dev_kfree_skb_any(skb);
114 return NETDEV_TX_OK; 134 return NETDEV_TX_OK;
115 } 135 }
@@ -119,38 +139,37 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
119 * create and queue that frame first. But make sure we have 139 * create and queue that frame first. But make sure we have
120 * at least enough entries available to send this CTS/RTS 140 * at least enough entries available to send this CTS/RTS
121 * frame as well as the data frame. 141 * frame as well as the data frame.
142 * Note that when the driver has set the set_rts_threshold()
143 * callback function it doesn't need software generation of
144 * neither RTS or CTS-to-self frames and handles everything
145 * inside the hardware.
122 */ 146 */
123 frame_control = le16_to_cpu(ieee80211hdr->frame_control); 147 frame_control = le16_to_cpu(ieee80211hdr->frame_control);
124 if (!is_rts_frame(frame_control) && !is_cts_frame(frame_control) && 148 if (!is_rts_frame(frame_control) && !is_cts_frame(frame_control) &&
125 (control->flags & (IEEE80211_TXCTL_USE_RTS_CTS | 149 (tx_info->flags & (IEEE80211_TX_CTL_USE_RTS_CTS |
126 IEEE80211_TXCTL_USE_CTS_PROTECT))) { 150 IEEE80211_TX_CTL_USE_CTS_PROTECT)) &&
151 !rt2x00dev->ops->hw->set_rts_threshold) {
127 if (rt2x00queue_available(queue) <= 1) { 152 if (rt2x00queue_available(queue) <= 1) {
128 ieee80211_stop_queue(rt2x00dev->hw, control->queue); 153 ieee80211_stop_queue(rt2x00dev->hw, qid);
129 return NETDEV_TX_BUSY; 154 return NETDEV_TX_BUSY;
130 } 155 }
131 156
132 if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb, control)) { 157 if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) {
133 ieee80211_stop_queue(rt2x00dev->hw, control->queue); 158 ieee80211_stop_queue(rt2x00dev->hw, qid);
134 return NETDEV_TX_BUSY; 159 return NETDEV_TX_BUSY;
135 } 160 }
136 } 161 }
137 162
138 /* 163 if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, queue, skb)) {
139 * Initialize skb descriptor 164 ieee80211_stop_queue(rt2x00dev->hw, qid);
140 */
141 skbdesc = get_skb_frame_desc(skb);
142 memset(skbdesc, 0, sizeof(*skbdesc));
143
144 if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, queue, skb, control)) {
145 ieee80211_stop_queue(rt2x00dev->hw, control->queue);
146 return NETDEV_TX_BUSY; 165 return NETDEV_TX_BUSY;
147 } 166 }
148 167
149 if (rt2x00queue_full(queue)) 168 if (rt2x00queue_full(queue))
150 ieee80211_stop_queue(rt2x00dev->hw, control->queue); 169 ieee80211_stop_queue(rt2x00dev->hw, qid);
151 170
152 if (rt2x00dev->ops->lib->kick_tx_queue) 171 if (rt2x00dev->ops->lib->kick_tx_queue)
153 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue); 172 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, qid);
154 173
155 return NETDEV_TX_OK; 174 return NETDEV_TX_OK;
156} 175}
@@ -183,8 +202,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
183{ 202{
184 struct rt2x00_dev *rt2x00dev = hw->priv; 203 struct rt2x00_dev *rt2x00dev = hw->priv;
185 struct rt2x00_intf *intf = vif_to_intf(conf->vif); 204 struct rt2x00_intf *intf = vif_to_intf(conf->vif);
186 struct data_queue *queue = 205 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
187 rt2x00queue_get_queue(rt2x00dev, RT2X00_BCN_QUEUE_BEACON);
188 struct queue_entry *entry = NULL; 206 struct queue_entry *entry = NULL;
189 unsigned int i; 207 unsigned int i;
190 208
@@ -197,13 +215,12 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
197 return -ENODEV; 215 return -ENODEV;
198 216
199 /* 217 /*
200 * When we don't support mixed interfaces (a combination 218 * We don't support mixed combinations of sta and ap virtual
201 * of sta and ap virtual interfaces) then we can only 219 * interfaces. We can only add this interface when the rival
202 * add this interface when the rival interface count is 0. 220 * interface count is 0.
203 */ 221 */
204 if (!test_bit(DRIVER_SUPPORT_MIXED_INTERFACES, &rt2x00dev->flags) && 222 if ((conf->type == IEEE80211_IF_TYPE_AP && rt2x00dev->intf_sta_count) ||
205 ((conf->type == IEEE80211_IF_TYPE_AP && rt2x00dev->intf_sta_count) || 223 (conf->type != IEEE80211_IF_TYPE_AP && rt2x00dev->intf_ap_count))
206 (conf->type != IEEE80211_IF_TYPE_AP && rt2x00dev->intf_ap_count)))
207 return -ENOBUFS; 224 return -ENOBUFS;
208 225
209 /* 226 /*
@@ -378,9 +395,7 @@ int rt2x00mac_config_interface(struct ieee80211_hw *hw,
378 if (conf->type != IEEE80211_IF_TYPE_AP || !conf->beacon) 395 if (conf->type != IEEE80211_IF_TYPE_AP || !conf->beacon)
379 return 0; 396 return 0;
380 397
381 status = rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, 398 status = rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, conf->beacon);
382 conf->beacon,
383 conf->beacon_control);
384 if (status) 399 if (status)
385 dev_kfree_skb(conf->beacon); 400 dev_kfree_skb(conf->beacon);
386 401
@@ -454,10 +469,10 @@ int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
454 struct rt2x00_dev *rt2x00dev = hw->priv; 469 struct rt2x00_dev *rt2x00dev = hw->priv;
455 unsigned int i; 470 unsigned int i;
456 471
457 for (i = 0; i < hw->queues; i++) { 472 for (i = 0; i < rt2x00dev->ops->tx_queues; i++) {
458 stats->data[i].len = rt2x00dev->tx[i].length; 473 stats[i].len = rt2x00dev->tx[i].length;
459 stats->data[i].limit = rt2x00dev->tx[i].limit; 474 stats[i].limit = rt2x00dev->tx[i].limit;
460 stats->data[i].count = rt2x00dev->tx[i].count; 475 stats[i].count = rt2x00dev->tx[i].count;
461 } 476 }
462 477
463 return 0; 478 return 0;
@@ -515,7 +530,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
515} 530}
516EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed); 531EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed);
517 532
518int rt2x00mac_conf_tx(struct ieee80211_hw *hw, int queue_idx, 533int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
519 const struct ieee80211_tx_queue_params *params) 534 const struct ieee80211_tx_queue_params *params)
520{ 535{
521 struct rt2x00_dev *rt2x00dev = hw->priv; 536 struct rt2x00_dev *rt2x00dev = hw->priv;
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 971af2546b59..70a3d135f64e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -35,42 +35,50 @@
35 * TX data handlers. 35 * TX data handlers.
36 */ 36 */
37int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev, 37int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
38 struct data_queue *queue, struct sk_buff *skb, 38 struct data_queue *queue, struct sk_buff *skb)
39 struct ieee80211_tx_control *control)
40{ 39{
41 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); 40 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
42 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data; 41 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
43 struct skb_frame_desc *skbdesc; 42 struct skb_frame_desc *skbdesc;
43 struct txentry_desc txdesc;
44 u32 word; 44 u32 word;
45 45
46 if (rt2x00queue_full(queue)) 46 if (rt2x00queue_full(queue))
47 return -EINVAL; 47 return -EINVAL;
48 48
49 rt2x00_desc_read(priv_tx->desc, 0, &word); 49 rt2x00_desc_read(entry_priv->desc, 0, &word);
50 50
51 if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) || 51 if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) ||
52 rt2x00_get_field32(word, TXD_ENTRY_VALID)) { 52 rt2x00_get_field32(word, TXD_ENTRY_VALID)) {
53 ERROR(rt2x00dev, 53 ERROR(rt2x00dev,
54 "Arrived at non-free entry in the non-full queue %d.\n" 54 "Arrived at non-free entry in the non-full queue %d.\n"
55 "Please file bug report to %s.\n", 55 "Please file bug report to %s.\n",
56 control->queue, DRV_PROJECT); 56 entry->queue->qid, DRV_PROJECT);
57 return -EINVAL; 57 return -EINVAL;
58 } 58 }
59 59
60 /* 60 /*
61 * Copy all TX descriptor information into txdesc,
62 * after that we are free to use the skb->cb array
63 * for our information.
64 */
65 entry->skb = skb;
66 rt2x00queue_create_tx_descriptor(entry, &txdesc);
67
68 /*
61 * Fill in skb descriptor 69 * Fill in skb descriptor
62 */ 70 */
63 skbdesc = get_skb_frame_desc(skb); 71 skbdesc = get_skb_frame_desc(skb);
72 memset(skbdesc, 0, sizeof(*skbdesc));
64 skbdesc->data = skb->data; 73 skbdesc->data = skb->data;
65 skbdesc->data_len = skb->len; 74 skbdesc->data_len = skb->len;
66 skbdesc->desc = priv_tx->desc; 75 skbdesc->desc = entry_priv->desc;
67 skbdesc->desc_len = queue->desc_size; 76 skbdesc->desc_len = queue->desc_size;
68 skbdesc->entry = entry; 77 skbdesc->entry = entry;
69 78
70 memcpy(&priv_tx->control, control, sizeof(priv_tx->control)); 79 memcpy(entry_priv->data, skb->data, skb->len);
71 memcpy(priv_tx->data, skb->data, skb->len);
72 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
73 80
81 rt2x00queue_write_tx_descriptor(entry, &txdesc);
74 rt2x00queue_index_inc(queue, Q_INDEX); 82 rt2x00queue_index_inc(queue, Q_INDEX);
75 83
76 return 0; 84 return 0;
@@ -84,7 +92,7 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
84{ 92{
85 struct data_queue *queue = rt2x00dev->rx; 93 struct data_queue *queue = rt2x00dev->rx;
86 struct queue_entry *entry; 94 struct queue_entry *entry;
87 struct queue_entry_priv_pci_rx *priv_rx; 95 struct queue_entry_priv_pci *entry_priv;
88 struct ieee80211_hdr *hdr; 96 struct ieee80211_hdr *hdr;
89 struct skb_frame_desc *skbdesc; 97 struct skb_frame_desc *skbdesc;
90 struct rxdone_entry_desc rxdesc; 98 struct rxdone_entry_desc rxdesc;
@@ -94,8 +102,8 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
94 102
95 while (1) { 103 while (1) {
96 entry = rt2x00queue_get_entry(queue, Q_INDEX); 104 entry = rt2x00queue_get_entry(queue, Q_INDEX);
97 priv_rx = entry->priv_data; 105 entry_priv = entry->priv_data;
98 rt2x00_desc_read(priv_rx->desc, 0, &word); 106 rt2x00_desc_read(entry_priv->desc, 0, &word);
99 107
100 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC)) 108 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC))
101 break; 109 break;
@@ -103,7 +111,7 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
103 memset(&rxdesc, 0, sizeof(rxdesc)); 111 memset(&rxdesc, 0, sizeof(rxdesc));
104 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc); 112 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
105 113
106 hdr = (struct ieee80211_hdr *)priv_rx->data; 114 hdr = (struct ieee80211_hdr *)entry_priv->data;
107 header_size = 115 header_size =
108 ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)); 116 ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
109 117
@@ -123,7 +131,7 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
123 131
124 skb_reserve(entry->skb, align); 132 skb_reserve(entry->skb, align);
125 memcpy(skb_put(entry->skb, rxdesc.size), 133 memcpy(skb_put(entry->skb, rxdesc.size),
126 priv_rx->data, rxdesc.size); 134 entry_priv->data, rxdesc.size);
127 135
128 /* 136 /*
129 * Fill in skb descriptor 137 * Fill in skb descriptor
@@ -132,7 +140,7 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
132 memset(skbdesc, 0, sizeof(*skbdesc)); 140 memset(skbdesc, 0, sizeof(*skbdesc));
133 skbdesc->data = entry->skb->data; 141 skbdesc->data = entry->skb->data;
134 skbdesc->data_len = entry->skb->len; 142 skbdesc->data_len = entry->skb->len;
135 skbdesc->desc = priv_rx->desc; 143 skbdesc->desc = entry_priv->desc;
136 skbdesc->desc_len = queue->desc_size; 144 skbdesc->desc_len = queue->desc_size;
137 skbdesc->entry = entry; 145 skbdesc->entry = entry;
138 146
@@ -143,7 +151,7 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
143 151
144 if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) { 152 if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) {
145 rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1); 153 rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1);
146 rt2x00_desc_write(priv_rx->desc, 0, word); 154 rt2x00_desc_write(entry_priv->desc, 0, word);
147 } 155 }
148 156
149 rt2x00queue_index_inc(queue, Q_INDEX); 157 rt2x00queue_index_inc(queue, Q_INDEX);
@@ -154,10 +162,10 @@ EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
154void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry, 162void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
155 struct txdone_entry_desc *txdesc) 163 struct txdone_entry_desc *txdesc)
156{ 164{
157 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data; 165 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
166 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
158 u32 word; 167 u32 word;
159 168
160 txdesc->control = &priv_tx->control;
161 rt2x00lib_txdone(entry, txdesc); 169 rt2x00lib_txdone(entry, txdesc);
162 170
163 /* 171 /*
@@ -165,10 +173,10 @@ void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
165 */ 173 */
166 entry->flags = 0; 174 entry->flags = 0;
167 175
168 rt2x00_desc_read(priv_tx->desc, 0, &word); 176 rt2x00_desc_read(entry_priv->desc, 0, &word);
169 rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0); 177 rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0);
170 rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0); 178 rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0);
171 rt2x00_desc_write(priv_tx->desc, 0, word); 179 rt2x00_desc_write(entry_priv->desc, 0, word);
172 180
173 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE); 181 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
174 182
@@ -178,7 +186,7 @@ void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
178 * is reenabled when the txdone handler has finished. 186 * is reenabled when the txdone handler has finished.
179 */ 187 */
180 if (!rt2x00queue_full(entry->queue)) 188 if (!rt2x00queue_full(entry->queue))
181 ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue); 189 ieee80211_wake_queue(rt2x00dev->hw, qid);
182 190
183} 191}
184EXPORT_SYMBOL_GPL(rt2x00pci_txdone); 192EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
@@ -217,14 +225,9 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
217 struct data_queue *queue) 225 struct data_queue *queue)
218{ 226{
219 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev); 227 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
220 struct queue_entry_priv_pci_rx *priv_rx; 228 struct queue_entry_priv_pci *entry_priv;
221 struct queue_entry_priv_pci_tx *priv_tx;
222 void *addr; 229 void *addr;
223 dma_addr_t dma; 230 dma_addr_t dma;
224 void *desc_addr;
225 dma_addr_t desc_dma;
226 void *data_addr;
227 dma_addr_t data_dma;
228 unsigned int i; 231 unsigned int i;
229 232
230 /* 233 /*
@@ -240,24 +243,11 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
240 * Initialize all queue entries to contain valid addresses. 243 * Initialize all queue entries to contain valid addresses.
241 */ 244 */
242 for (i = 0; i < queue->limit; i++) { 245 for (i = 0; i < queue->limit; i++) {
243 desc_addr = desc_offset(queue, addr, i); 246 entry_priv = queue->entries[i].priv_data;
244 desc_dma = desc_offset(queue, dma, i); 247 entry_priv->desc = desc_offset(queue, addr, i);
245 data_addr = data_offset(queue, addr, i); 248 entry_priv->desc_dma = desc_offset(queue, dma, i);
246 data_dma = data_offset(queue, dma, i); 249 entry_priv->data = data_offset(queue, addr, i);
247 250 entry_priv->data_dma = data_offset(queue, dma, i);
248 if (queue->qid == QID_RX) {
249 priv_rx = queue->entries[i].priv_data;
250 priv_rx->desc = desc_addr;
251 priv_rx->desc_dma = desc_dma;
252 priv_rx->data = data_addr;
253 priv_rx->data_dma = data_dma;
254 } else {
255 priv_tx = queue->entries[i].priv_data;
256 priv_tx->desc = desc_addr;
257 priv_tx->desc_dma = desc_dma;
258 priv_tx->data = data_addr;
259 priv_tx->data_dma = data_dma;
260 }
261 } 251 }
262 252
263 return 0; 253 return 0;
@@ -267,28 +257,13 @@ static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
267 struct data_queue *queue) 257 struct data_queue *queue)
268{ 258{
269 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev); 259 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
270 struct queue_entry_priv_pci_rx *priv_rx; 260 struct queue_entry_priv_pci *entry_priv =
271 struct queue_entry_priv_pci_tx *priv_tx; 261 queue->entries[0].priv_data;
272 void *data_addr;
273 dma_addr_t data_dma;
274
275 if (queue->qid == QID_RX) {
276 priv_rx = queue->entries[0].priv_data;
277 data_addr = priv_rx->data;
278 data_dma = priv_rx->data_dma;
279
280 priv_rx->data = NULL;
281 } else {
282 priv_tx = queue->entries[0].priv_data;
283 data_addr = priv_tx->data;
284 data_dma = priv_tx->data_dma;
285
286 priv_tx->data = NULL;
287 }
288 262
289 if (data_addr) 263 if (entry_priv->data)
290 pci_free_consistent(pci_dev, dma_size(queue), 264 pci_free_consistent(pci_dev, dma_size(queue),
291 data_addr, data_dma); 265 entry_priv->data, entry_priv->data_dma);
266 entry_priv->data = NULL;
292} 267}
293 268
294int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) 269int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 9d1cdb99431c..37c851e442c1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -91,40 +91,22 @@ rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
91 * TX data handlers. 91 * TX data handlers.
92 */ 92 */
93int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev, 93int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
94 struct data_queue *queue, struct sk_buff *skb, 94 struct data_queue *queue, struct sk_buff *skb);
95 struct ieee80211_tx_control *control);
96 95
97/** 96/**
98 * struct queue_entry_priv_pci_rx: Per RX entry PCI specific information 97 * struct queue_entry_priv_pci: Per entry PCI specific information
99 *
100 * @desc: Pointer to device descriptor.
101 * @data: Pointer to device's entry memory.
102 * @dma: DMA pointer to &data.
103 */
104struct queue_entry_priv_pci_rx {
105 __le32 *desc;
106 dma_addr_t desc_dma;
107
108 void *data;
109 dma_addr_t data_dma;
110};
111
112/**
113 * struct queue_entry_priv_pci_tx: Per TX entry PCI specific information
114 * 98 *
115 * @desc: Pointer to device descriptor 99 * @desc: Pointer to device descriptor
100 * @desc_dma: DMA pointer to &desc.
116 * @data: Pointer to device's entry memory. 101 * @data: Pointer to device's entry memory.
117 * @dma: DMA pointer to &data. 102 * @data_dma: DMA pointer to &data.
118 * @control: mac80211 control structure used to transmit data.
119 */ 103 */
120struct queue_entry_priv_pci_tx { 104struct queue_entry_priv_pci {
121 __le32 *desc; 105 __le32 *desc;
122 dma_addr_t desc_dma; 106 dma_addr_t desc_dma;
123 107
124 void *data; 108 void *data;
125 dma_addr_t data_dma; 109 dma_addr_t data_dma;
126
127 struct ieee80211_tx_control control;
128}; 110};
129 111
130/** 112/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 659e9f44c40c..e69ef4b19239 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -29,20 +29,179 @@
29#include "rt2x00.h" 29#include "rt2x00.h"
30#include "rt2x00lib.h" 30#include "rt2x00lib.h"
31 31
32void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
33 struct txentry_desc *txdesc)
34{
35 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
36 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
37 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
38 struct ieee80211_rate *rate =
39 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
40 const struct rt2x00_rate *hwrate;
41 unsigned int data_length;
42 unsigned int duration;
43 unsigned int residual;
44 u16 frame_control;
45
46 memset(txdesc, 0, sizeof(*txdesc));
47
48 /*
49 * Initialize information from queue
50 */
51 txdesc->queue = entry->queue->qid;
52 txdesc->cw_min = entry->queue->cw_min;
53 txdesc->cw_max = entry->queue->cw_max;
54 txdesc->aifs = entry->queue->aifs;
55
56 /* Data length should be extended with 4 bytes for CRC */
57 data_length = entry->skb->len + 4;
58
59 /*
60 * Read required fields from ieee80211 header.
61 */
62 frame_control = le16_to_cpu(hdr->frame_control);
63
64 /*
65 * Check whether this frame is to be acked.
66 */
67 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
68 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
69
70 /*
71 * Check if this is a RTS/CTS frame
72 */
73 if (is_rts_frame(frame_control) || is_cts_frame(frame_control)) {
74 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
75 if (is_rts_frame(frame_control))
76 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
77 else
78 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
79 if (tx_info->control.rts_cts_rate_idx >= 0)
80 rate =
81 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
82 }
83
84 /*
85 * Determine retry information.
86 */
87 txdesc->retry_limit = tx_info->control.retry_limit;
88 if (tx_info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT)
89 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
90
91 /*
92 * Check if more fragments are pending
93 */
94 if (ieee80211_get_morefrag(hdr)) {
95 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
96 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
97 }
98
99 /*
100 * Beacons and probe responses require the tsf timestamp
101 * to be inserted into the frame.
102 */
103 if (txdesc->queue == QID_BEACON || is_probe_resp(frame_control))
104 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
105
106 /*
107 * Determine with what IFS priority this frame should be send.
108 * Set ifs to IFS_SIFS when the this is not the first fragment,
109 * or this fragment came after RTS/CTS.
110 */
111 if (test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
112 txdesc->ifs = IFS_SIFS;
113 } else if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) {
114 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
115 txdesc->ifs = IFS_BACKOFF;
116 } else {
117 txdesc->ifs = IFS_SIFS;
118 }
119
120 /*
121 * PLCP setup
122 * Length calculation depends on OFDM/CCK rate.
123 */
124 hwrate = rt2x00_get_rate(rate->hw_value);
125 txdesc->signal = hwrate->plcp;
126 txdesc->service = 0x04;
127
128 if (hwrate->flags & DEV_RATE_OFDM) {
129 __set_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags);
130
131 txdesc->length_high = (data_length >> 6) & 0x3f;
132 txdesc->length_low = data_length & 0x3f;
133 } else {
134 /*
135 * Convert length to microseconds.
136 */
137 residual = get_duration_res(data_length, hwrate->bitrate);
138 duration = get_duration(data_length, hwrate->bitrate);
139
140 if (residual != 0) {
141 duration++;
142
143 /*
144 * Check if we need to set the Length Extension
145 */
146 if (hwrate->bitrate == 110 && residual <= 30)
147 txdesc->service |= 0x80;
148 }
149
150 txdesc->length_high = (duration >> 8) & 0xff;
151 txdesc->length_low = duration & 0xff;
152
153 /*
154 * When preamble is enabled we should set the
155 * preamble bit for the signal.
156 */
157 if (rt2x00_get_rate_preamble(rate->hw_value))
158 txdesc->signal |= 0x08;
159 }
160}
161EXPORT_SYMBOL_GPL(rt2x00queue_create_tx_descriptor);
162
163void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
164 struct txentry_desc *txdesc)
165{
166 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
167 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
168
169 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
170
171 /*
172 * All processing on the frame has been completed, this means
173 * it is now ready to be dumped to userspace through debugfs.
174 */
175 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
176
177 /*
178 * We are done writing the frame to the queue entry,
179 * also kick the queue in case the correct flags are set,
180 * note that this will automatically filter beacons and
181 * RTS/CTS frames since those frames don't have this flag
182 * set.
183 */
184 if (rt2x00dev->ops->lib->kick_tx_queue &&
185 !(skbdesc->flags & FRAME_DESC_DRIVER_GENERATED))
186 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev,
187 entry->queue->qid);
188}
189EXPORT_SYMBOL_GPL(rt2x00queue_write_tx_descriptor);
190
32struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, 191struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
33 const unsigned int queue) 192 const enum data_queue_qid queue)
34{ 193{
35 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 194 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
36 195
37 if (queue < rt2x00dev->hw->queues && rt2x00dev->tx) 196 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
38 return &rt2x00dev->tx[queue]; 197 return &rt2x00dev->tx[queue];
39 198
40 if (!rt2x00dev->bcn) 199 if (!rt2x00dev->bcn)
41 return NULL; 200 return NULL;
42 201
43 if (queue == RT2X00_BCN_QUEUE_BEACON) 202 if (queue == QID_BEACON)
44 return &rt2x00dev->bcn[0]; 203 return &rt2x00dev->bcn[0];
45 else if (queue == RT2X00_BCN_QUEUE_ATIM && atim) 204 else if (queue == QID_ATIM && atim)
46 return &rt2x00dev->bcn[1]; 205 return &rt2x00dev->bcn[1];
47 206
48 return NULL; 207 return NULL;
@@ -255,11 +414,11 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
255 /* 414 /*
256 * We need the following queues: 415 * We need the following queues:
257 * RX: 1 416 * RX: 1
258 * TX: hw->queues 417 * TX: ops->tx_queues
259 * Beacon: 1 418 * Beacon: 1
260 * Atim: 1 (if required) 419 * Atim: 1 (if required)
261 */ 420 */
262 rt2x00dev->data_queues = 2 + rt2x00dev->hw->queues + req_atim; 421 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
263 422
264 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL); 423 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
265 if (!queue) { 424 if (!queue) {
@@ -272,7 +431,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
272 */ 431 */
273 rt2x00dev->rx = queue; 432 rt2x00dev->rx = queue;
274 rt2x00dev->tx = &queue[1]; 433 rt2x00dev->tx = &queue[1];
275 rt2x00dev->bcn = &queue[1 + rt2x00dev->hw->queues]; 434 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
276 435
277 /* 436 /*
278 * Initialize queue parameters. 437 * Initialize queue parameters.
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 7027c9f47d3f..4d00ced14cc7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -54,6 +54,17 @@
54 54
55/** 55/**
56 * enum data_queue_qid: Queue identification 56 * enum data_queue_qid: Queue identification
57 *
58 * @QID_AC_BE: AC BE queue
59 * @QID_AC_BK: AC BK queue
60 * @QID_AC_VI: AC VI queue
61 * @QID_AC_VO: AC VO queue
62 * @QID_HCCA: HCCA queue
63 * @QID_MGMT: MGMT queue (prio queue)
64 * @QID_RX: RX queue
65 * @QID_OTHER: None of the above (don't use, only present for completeness)
66 * @QID_BEACON: Beacon queue (value unspecified, don't send it to device)
67 * @QID_ATIM: Atim queue (value unspeficied, don't send it to device)
57 */ 68 */
58enum data_queue_qid { 69enum data_queue_qid {
59 QID_AC_BE = 0, 70 QID_AC_BE = 0,
@@ -64,21 +75,8 @@ enum data_queue_qid {
64 QID_MGMT = 13, 75 QID_MGMT = 13,
65 QID_RX = 14, 76 QID_RX = 14,
66 QID_OTHER = 15, 77 QID_OTHER = 15,
67}; 78 QID_BEACON,
68 79 QID_ATIM,
69/**
70 * enum rt2x00_bcn_queue: Beacon queue index
71 *
72 * Start counting with a high offset, this because this enumeration
73 * supplements &enum ieee80211_tx_queue and we should prevent value
74 * conflicts.
75 *
76 * @RT2X00_BCN_QUEUE_BEACON: Beacon queue
77 * @RT2X00_BCN_QUEUE_ATIM: Atim queue (sends frame after beacon)
78 */
79enum rt2x00_bcn_queue {
80 RT2X00_BCN_QUEUE_BEACON = 100,
81 RT2X00_BCN_QUEUE_ATIM = 101,
82}; 80};
83 81
84/** 82/**
@@ -94,38 +92,39 @@ enum skb_frame_desc_flags {
94/** 92/**
95 * struct skb_frame_desc: Descriptor information for the skb buffer 93 * struct skb_frame_desc: Descriptor information for the skb buffer
96 * 94 *
97 * This structure is placed over the skb->cb array, this means that 95 * This structure is placed over the driver_data array, this means that
98 * this structure should not exceed the size of that array (48 bytes). 96 * this structure should not exceed the size of that array (40 bytes).
99 * 97 *
100 * @flags: Frame flags, see &enum skb_frame_desc_flags. 98 * @flags: Frame flags, see &enum skb_frame_desc_flags.
101 * @frame_type: Frame type, see &enum rt2x00_dump_type.
102 * @data: Pointer to data part of frame (Start of ieee80211 header). 99 * @data: Pointer to data part of frame (Start of ieee80211 header).
103 * @desc: Pointer to descriptor part of the frame. 100 * @desc: Pointer to descriptor part of the frame.
104 * Note that this pointer could point to something outside 101 * Note that this pointer could point to something outside
105 * of the scope of the skb->data pointer. 102 * of the scope of the skb->data pointer.
106 * @data_len: Length of the frame data. 103 * @data_len: Length of the frame data.
107 * @desc_len: Length of the frame descriptor. 104 * @desc_len: Length of the frame descriptor.
108
109 * @entry: The entry to which this sk buffer belongs. 105 * @entry: The entry to which this sk buffer belongs.
110 */ 106 */
111struct skb_frame_desc { 107struct skb_frame_desc {
112 unsigned int flags; 108 unsigned int flags;
113 109
114 unsigned int frame_type; 110 unsigned short data_len;
111 unsigned short desc_len;
115 112
116 void *data; 113 void *data;
117 void *desc; 114 void *desc;
118 115
119 unsigned int data_len;
120 unsigned int desc_len;
121
122 struct queue_entry *entry; 116 struct queue_entry *entry;
123}; 117};
124 118
119/**
120 * get_skb_frame_desc - Obtain the rt2x00 frame descriptor from a sk_buff.
121 * @skb: &struct sk_buff from where we obtain the &struct skb_frame_desc
122 */
125static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb) 123static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
126{ 124{
127 BUILD_BUG_ON(sizeof(struct skb_frame_desc) > sizeof(skb->cb)); 125 BUILD_BUG_ON(sizeof(struct skb_frame_desc) >
128 return (struct skb_frame_desc *)&skb->cb[0]; 126 IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
127 return (struct skb_frame_desc *)&IEEE80211_SKB_CB(skb)->driver_data;
129} 128}
130 129
131/** 130/**
@@ -161,18 +160,32 @@ struct rxdone_entry_desc {
161}; 160};
162 161
163/** 162/**
163 * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc
164 *
165 * @TXDONE_UNKNOWN: Hardware could not determine success of transmission.
166 * @TXDONE_SUCCESS: Frame was successfully send
167 * @TXDONE_FAILURE: Frame was not successfully send
168 * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the
169 * frame transmission failed due to excessive retries.
170 */
171enum txdone_entry_desc_flags {
172 TXDONE_UNKNOWN = 1 << 0,
173 TXDONE_SUCCESS = 1 << 1,
174 TXDONE_FAILURE = 1 << 2,
175 TXDONE_EXCESSIVE_RETRY = 1 << 3,
176};
177
178/**
164 * struct txdone_entry_desc: TX done entry descriptor 179 * struct txdone_entry_desc: TX done entry descriptor
165 * 180 *
166 * Summary of information that has been read from the TX frame descriptor 181 * Summary of information that has been read from the TX frame descriptor
167 * after the device is done with transmission. 182 * after the device is done with transmission.
168 * 183 *
169 * @control: Control structure which was used to transmit the frame. 184 * @flags: TX done flags (See &enum txdone_entry_desc_flags).
170 * @status: TX status (See &enum tx_status).
171 * @retry: Retry count. 185 * @retry: Retry count.
172 */ 186 */
173struct txdone_entry_desc { 187struct txdone_entry_desc {
174 struct ieee80211_tx_control *control; 188 unsigned long flags;
175 int status;
176 int retry; 189 int retry;
177}; 190};
178 191
@@ -180,19 +193,25 @@ struct txdone_entry_desc {
180 * enum txentry_desc_flags: Status flags for TX entry descriptor 193 * enum txentry_desc_flags: Status flags for TX entry descriptor
181 * 194 *
182 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame. 195 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
196 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame.
183 * @ENTRY_TXD_OFDM_RATE: This frame is send out with an OFDM rate. 197 * @ENTRY_TXD_OFDM_RATE: This frame is send out with an OFDM rate.
198 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame.
184 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment. 199 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
185 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted. 200 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted.
186 * @ENTRY_TXD_BURST: This frame belongs to the same burst event. 201 * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
187 * @ENTRY_TXD_ACK: An ACK is required for this frame. 202 * @ENTRY_TXD_ACK: An ACK is required for this frame.
203 * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used.
188 */ 204 */
189enum txentry_desc_flags { 205enum txentry_desc_flags {
190 ENTRY_TXD_RTS_FRAME, 206 ENTRY_TXD_RTS_FRAME,
207 ENTRY_TXD_CTS_FRAME,
191 ENTRY_TXD_OFDM_RATE, 208 ENTRY_TXD_OFDM_RATE,
209 ENTRY_TXD_FIRST_FRAGMENT,
192 ENTRY_TXD_MORE_FRAG, 210 ENTRY_TXD_MORE_FRAG,
193 ENTRY_TXD_REQ_TIMESTAMP, 211 ENTRY_TXD_REQ_TIMESTAMP,
194 ENTRY_TXD_BURST, 212 ENTRY_TXD_BURST,
195 ENTRY_TXD_ACK, 213 ENTRY_TXD_ACK,
214 ENTRY_TXD_RETRY_MODE,
196}; 215};
197 216
198/** 217/**
@@ -206,6 +225,7 @@ enum txentry_desc_flags {
206 * @length_low: PLCP length low word. 225 * @length_low: PLCP length low word.
207 * @signal: PLCP signal. 226 * @signal: PLCP signal.
208 * @service: PLCP service. 227 * @service: PLCP service.
228 * @retry_limit: Max number of retries.
209 * @aifs: AIFS value. 229 * @aifs: AIFS value.
210 * @ifs: IFS value. 230 * @ifs: IFS value.
211 * @cw_min: cwmin value. 231 * @cw_min: cwmin value.
@@ -221,10 +241,11 @@ struct txentry_desc {
221 u16 signal; 241 u16 signal;
222 u16 service; 242 u16 service;
223 243
224 int aifs; 244 short retry_limit;
225 int ifs; 245 short aifs;
226 int cw_min; 246 short ifs;
227 int cw_max; 247 short cw_min;
248 short cw_max;
228}; 249};
229 250
230/** 251/**
@@ -240,7 +261,6 @@ struct txentry_desc {
240 * encryption or decryption. The entry should only be touched after 261 * encryption or decryption. The entry should only be touched after
241 * the device has signaled it is done with it. 262 * the device has signaled it is done with it.
242 */ 263 */
243
244enum queue_entry_flags { 264enum queue_entry_flags {
245 ENTRY_BCN_ASSIGNED, 265 ENTRY_BCN_ASSIGNED,
246 ENTRY_OWNER_DEVICE_DATA, 266 ENTRY_OWNER_DEVICE_DATA,
@@ -369,7 +389,7 @@ struct data_queue_desc {
369 * the end of the TX queue array. 389 * the end of the TX queue array.
370 */ 390 */
371#define tx_queue_end(__dev) \ 391#define tx_queue_end(__dev) \
372 &(__dev)->tx[(__dev)->hw->queues] 392 &(__dev)->tx[(__dev)->ops->tx_queues]
373 393
374/** 394/**
375 * queue_loop - Loop through the queues within a specific range (HELPER MACRO). 395 * queue_loop - Loop through the queues within a specific range (HELPER MACRO).
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 0325bed2fbf5..3f255df58b78 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -27,17 +27,6 @@
27#define RT2X00REG_H 27#define RT2X00REG_H
28 28
29/* 29/*
30 * TX result flags.
31 */
32enum tx_status {
33 TX_SUCCESS = 0,
34 TX_SUCCESS_RETRY = 1,
35 TX_FAIL_RETRY = 2,
36 TX_FAIL_INVALID = 3,
37 TX_FAIL_OTHER = 4,
38};
39
40/*
41 * Antenna values 30 * Antenna values
42 */ 31 */
43enum antenna { 32enum antenna {
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 5a331674dcb2..52d12fdc0ccf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -129,9 +129,9 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
129{ 129{
130 struct queue_entry *entry = (struct queue_entry *)urb->context; 130 struct queue_entry *entry = (struct queue_entry *)urb->context;
131 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 131 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
132 struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data;
133 struct txdone_entry_desc txdesc; 132 struct txdone_entry_desc txdesc;
134 __le32 *txd = (__le32 *)entry->skb->data; 133 __le32 *txd = (__le32 *)entry->skb->data;
134 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
135 u32 word; 135 u32 word;
136 136
137 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 137 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
@@ -147,10 +147,18 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
147 147
148 /* 148 /*
149 * Obtain the status about this packet. 149 * Obtain the status about this packet.
150 * Note that when the status is 0 it does not mean the
151 * frame was send out correctly. It only means the frame
152 * was succesfully pushed to the hardware, we have no
153 * way to determine the transmission status right now.
154 * (Only indirectly by looking at the failed TX counters
155 * in the register).
150 */ 156 */
151 txdesc.status = !urb->status ? TX_SUCCESS : TX_FAIL_RETRY; 157 if (!urb->status)
158 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
159 else
160 __set_bit(TXDONE_FAILURE, &txdesc.flags);
152 txdesc.retry = 0; 161 txdesc.retry = 0;
153 txdesc.control = &priv_tx->control;
154 162
155 rt2x00lib_txdone(entry, &txdesc); 163 rt2x00lib_txdone(entry, &txdesc);
156 164
@@ -166,17 +174,17 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
166 * is reenabled when the txdone handler has finished. 174 * is reenabled when the txdone handler has finished.
167 */ 175 */
168 if (!rt2x00queue_full(entry->queue)) 176 if (!rt2x00queue_full(entry->queue))
169 ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue); 177 ieee80211_wake_queue(rt2x00dev->hw, qid);
170} 178}
171 179
172int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev, 180int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
173 struct data_queue *queue, struct sk_buff *skb, 181 struct data_queue *queue, struct sk_buff *skb)
174 struct ieee80211_tx_control *control)
175{ 182{
176 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev); 183 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
177 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); 184 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
178 struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data; 185 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
179 struct skb_frame_desc *skbdesc; 186 struct skb_frame_desc *skbdesc;
187 struct txentry_desc txdesc;
180 u32 length; 188 u32 length;
181 189
182 if (rt2x00queue_full(queue)) 190 if (rt2x00queue_full(queue))
@@ -186,11 +194,19 @@ int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
186 ERROR(rt2x00dev, 194 ERROR(rt2x00dev,
187 "Arrived at non-free entry in the non-full queue %d.\n" 195 "Arrived at non-free entry in the non-full queue %d.\n"
188 "Please file bug report to %s.\n", 196 "Please file bug report to %s.\n",
189 control->queue, DRV_PROJECT); 197 entry->queue->qid, DRV_PROJECT);
190 return -EINVAL; 198 return -EINVAL;
191 } 199 }
192 200
193 /* 201 /*
202 * Copy all TX descriptor information into txdesc,
203 * after that we are free to use the skb->cb array
204 * for our information.
205 */
206 entry->skb = skb;
207 rt2x00queue_create_tx_descriptor(entry, &txdesc);
208
209 /*
194 * Add the descriptor in front of the skb. 210 * Add the descriptor in front of the skb.
195 */ 211 */
196 skb_push(skb, queue->desc_size); 212 skb_push(skb, queue->desc_size);
@@ -200,14 +216,14 @@ int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
200 * Fill in skb descriptor 216 * Fill in skb descriptor
201 */ 217 */
202 skbdesc = get_skb_frame_desc(skb); 218 skbdesc = get_skb_frame_desc(skb);
219 memset(skbdesc, 0, sizeof(*skbdesc));
203 skbdesc->data = skb->data + queue->desc_size; 220 skbdesc->data = skb->data + queue->desc_size;
204 skbdesc->data_len = skb->len - queue->desc_size; 221 skbdesc->data_len = skb->len - queue->desc_size;
205 skbdesc->desc = skb->data; 222 skbdesc->desc = skb->data;
206 skbdesc->desc_len = queue->desc_size; 223 skbdesc->desc_len = queue->desc_size;
207 skbdesc->entry = entry; 224 skbdesc->entry = entry;
208 225
209 memcpy(&priv_tx->control, control, sizeof(priv_tx->control)); 226 rt2x00queue_write_tx_descriptor(entry, &txdesc);
210 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
211 227
212 /* 228 /*
213 * USB devices cannot blindly pass the skb->len as the 229 * USB devices cannot blindly pass the skb->len as the
@@ -220,9 +236,9 @@ int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
220 * Initialize URB and send the frame to the device. 236 * Initialize URB and send the frame to the device.
221 */ 237 */
222 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 238 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
223 usb_fill_bulk_urb(priv_tx->urb, usb_dev, usb_sndbulkpipe(usb_dev, 1), 239 usb_fill_bulk_urb(entry_priv->urb, usb_dev, usb_sndbulkpipe(usb_dev, 1),
224 skb->data, length, rt2x00usb_interrupt_txdone, entry); 240 skb->data, length, rt2x00usb_interrupt_txdone, entry);
225 usb_submit_urb(priv_tx->urb, GFP_ATOMIC); 241 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
226 242
227 rt2x00queue_index_inc(queue, Q_INDEX); 243 rt2x00queue_index_inc(queue, Q_INDEX);
228 244
@@ -237,22 +253,35 @@ static struct sk_buff* rt2x00usb_alloc_rxskb(struct data_queue *queue)
237{ 253{
238 struct sk_buff *skb; 254 struct sk_buff *skb;
239 unsigned int frame_size; 255 unsigned int frame_size;
256 unsigned int reserved_size;
240 257
241 /* 258 /*
242 * As alignment we use 2 and not NET_IP_ALIGN because we need 259 * The frame size includes descriptor size, because the
243 * to be sure we have 2 bytes room in the head. (NET_IP_ALIGN 260 * hardware directly receive the frame into the skbuffer.
244 * can be 0 on some hardware). We use these 2 bytes for frame
245 * alignment later, we assume that the chance that
246 * header_size % 4 == 2 is bigger then header_size % 2 == 0
247 * and thus optimize alignment by reserving the 2 bytes in
248 * advance.
249 */ 261 */
250 frame_size = queue->data_size + queue->desc_size; 262 frame_size = queue->data_size + queue->desc_size;
251 skb = dev_alloc_skb(queue->desc_size + frame_size + 2); 263
264 /*
265 * For the allocation we should keep a few things in mind:
266 * 1) 4byte alignment of 802.11 payload
267 *
268 * For (1) we need at most 4 bytes to guarentee the correct
269 * alignment. We are going to optimize the fact that the chance
270 * that the 802.11 header_size % 4 == 2 is much bigger then
271 * anything else. However since we need to move the frame up
272 * to 3 bytes to the front, which means we need to preallocate
273 * 6 bytes.
274 */
275 reserved_size = 6;
276
277 /*
278 * Allocate skbuffer.
279 */
280 skb = dev_alloc_skb(frame_size + reserved_size);
252 if (!skb) 281 if (!skb)
253 return NULL; 282 return NULL;
254 283
255 skb_reserve(skb, queue->desc_size + 2); 284 skb_reserve(skb, reserved_size);
256 skb_put(skb, frame_size); 285 skb_put(skb, frame_size);
257 286
258 return skb; 287 return skb;
@@ -265,7 +294,8 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
265 struct sk_buff *skb; 294 struct sk_buff *skb;
266 struct skb_frame_desc *skbdesc; 295 struct skb_frame_desc *skbdesc;
267 struct rxdone_entry_desc rxdesc; 296 struct rxdone_entry_desc rxdesc;
268 int header_size; 297 unsigned int header_size;
298 unsigned int align;
269 299
270 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 300 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
271 !test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 301 !test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
@@ -289,19 +319,29 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
289 memset(&rxdesc, 0, sizeof(rxdesc)); 319 memset(&rxdesc, 0, sizeof(rxdesc));
290 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc); 320 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
291 321
322 header_size = ieee80211_get_hdrlen_from_skb(entry->skb);
323
292 /* 324 /*
293 * The data behind the ieee80211 header must be 325 * The data behind the ieee80211 header must be
294 * aligned on a 4 byte boundary. 326 * aligned on a 4 byte boundary. We already reserved
327 * 2 bytes for header_size % 4 == 2 optimization.
328 * To determine the number of bytes which the data
329 * should be moved to the left, we must add these
330 * 2 bytes to the header_size.
295 */ 331 */
296 header_size = ieee80211_get_hdrlen_from_skb(entry->skb); 332 align = (header_size + 2) % 4;
297 if (header_size % 4 == 0) { 333
298 skb_push(entry->skb, 2); 334 if (align) {
299 memmove(entry->skb->data, entry->skb->data + 2, 335 skb_push(entry->skb, align);
300 entry->skb->len - 2); 336 /* Move entire frame in 1 command */
301 skbdesc->data = entry->skb->data; 337 memmove(entry->skb->data, entry->skb->data + align,
302 skb_trim(entry->skb,entry->skb->len - 2); 338 rxdesc.size);
303 } 339 }
304 340
341 /* Update data pointers, trim buffer to correct size */
342 skbdesc->data = entry->skb->data;
343 skb_trim(entry->skb, rxdesc.size);
344
305 /* 345 /*
306 * Allocate a new sk buffer to replace the current one. 346 * Allocate a new sk buffer to replace the current one.
307 * If allocation fails, we should drop the current frame 347 * If allocation fails, we should drop the current frame
@@ -338,44 +378,28 @@ skip_entry:
338 */ 378 */
339void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev) 379void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
340{ 380{
341 struct queue_entry_priv_usb_rx *priv_rx; 381 struct queue_entry_priv_usb *entry_priv;
342 struct queue_entry_priv_usb_tx *priv_tx; 382 struct queue_entry_priv_usb_bcn *bcn_priv;
343 struct queue_entry_priv_usb_bcn *priv_bcn;
344 struct data_queue *queue;
345 unsigned int i; 383 unsigned int i;
346 384
347 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0x0000, 0x0000, 385 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
348 REGISTER_TIMEOUT); 386 REGISTER_TIMEOUT);
349 387
350 /* 388 /*
351 * Cancel all queues. 389 * Cancel all queues.
352 */ 390 */
353 for (i = 0; i < rt2x00dev->rx->limit; i++) { 391 for (i = 0; i < rt2x00dev->rx->limit; i++) {
354 priv_rx = rt2x00dev->rx->entries[i].priv_data; 392 entry_priv = rt2x00dev->rx->entries[i].priv_data;
355 usb_kill_urb(priv_rx->urb); 393 usb_kill_urb(entry_priv->urb);
356 }
357
358 tx_queue_for_each(rt2x00dev, queue) {
359 for (i = 0; i < queue->limit; i++) {
360 priv_tx = queue->entries[i].priv_data;
361 usb_kill_urb(priv_tx->urb);
362 }
363 } 394 }
364 395
396 /*
397 * Kill guardian urb.
398 */
365 for (i = 0; i < rt2x00dev->bcn->limit; i++) { 399 for (i = 0; i < rt2x00dev->bcn->limit; i++) {
366 priv_bcn = rt2x00dev->bcn->entries[i].priv_data; 400 bcn_priv = rt2x00dev->bcn->entries[i].priv_data;
367 usb_kill_urb(priv_bcn->urb); 401 if (bcn_priv->guardian_urb)
368 402 usb_kill_urb(bcn_priv->guardian_urb);
369 if (priv_bcn->guardian_urb)
370 usb_kill_urb(priv_bcn->guardian_urb);
371 }
372
373 if (!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
374 return;
375
376 for (i = 0; i < rt2x00dev->bcn[1].limit; i++) {
377 priv_tx = rt2x00dev->bcn[1].entries[i].priv_data;
378 usb_kill_urb(priv_tx->urb);
379 } 403 }
380} 404}
381EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); 405EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
@@ -387,15 +411,15 @@ void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev,
387 struct queue_entry *entry) 411 struct queue_entry *entry)
388{ 412{
389 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev); 413 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
390 struct queue_entry_priv_usb_rx *priv_rx = entry->priv_data; 414 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
391 415
392 usb_fill_bulk_urb(priv_rx->urb, usb_dev, 416 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
393 usb_rcvbulkpipe(usb_dev, 1), 417 usb_rcvbulkpipe(usb_dev, 1),
394 entry->skb->data, entry->skb->len, 418 entry->skb->data, entry->skb->len,
395 rt2x00usb_interrupt_rxdone, entry); 419 rt2x00usb_interrupt_rxdone, entry);
396 420
397 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 421 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
398 usb_submit_urb(priv_rx->urb, GFP_ATOMIC); 422 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
399} 423}
400EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry); 424EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry);
401 425
@@ -409,38 +433,31 @@ EXPORT_SYMBOL_GPL(rt2x00usb_init_txentry);
409static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev, 433static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
410 struct data_queue *queue) 434 struct data_queue *queue)
411{ 435{
412 struct queue_entry_priv_usb_rx *priv_rx; 436 struct queue_entry_priv_usb *entry_priv;
413 struct queue_entry_priv_usb_tx *priv_tx; 437 struct queue_entry_priv_usb_bcn *bcn_priv;
414 struct queue_entry_priv_usb_bcn *priv_bcn;
415 struct urb *urb;
416 unsigned int guardian =
417 test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
418 unsigned int i; 438 unsigned int i;
419 439
440 for (i = 0; i < queue->limit; i++) {
441 entry_priv = queue->entries[i].priv_data;
442 entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL);
443 if (!entry_priv->urb)
444 return -ENOMEM;
445 }
446
420 /* 447 /*
421 * Allocate the URB's 448 * If this is not the beacon queue or
449 * no guardian byte was required for the beacon,
450 * then we are done.
422 */ 451 */
452 if (rt2x00dev->bcn != queue ||
453 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
454 return 0;
455
423 for (i = 0; i < queue->limit; i++) { 456 for (i = 0; i < queue->limit; i++) {
424 urb = usb_alloc_urb(0, GFP_KERNEL); 457 bcn_priv = queue->entries[i].priv_data;
425 if (!urb) 458 bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL);
459 if (!bcn_priv->guardian_urb)
426 return -ENOMEM; 460 return -ENOMEM;
427
428 if (queue->qid == QID_RX) {
429 priv_rx = queue->entries[i].priv_data;
430 priv_rx->urb = urb;
431 } else if (queue->qid == QID_MGMT && guardian) {
432 priv_bcn = queue->entries[i].priv_data;
433 priv_bcn->urb = urb;
434
435 urb = usb_alloc_urb(0, GFP_KERNEL);
436 if (!urb)
437 return -ENOMEM;
438
439 priv_bcn->guardian_urb = urb;
440 } else {
441 priv_tx = queue->entries[i].priv_data;
442 priv_tx->urb = urb;
443 }
444 } 461 }
445 462
446 return 0; 463 return 0;
@@ -449,38 +466,35 @@ static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
449static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev, 466static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev,
450 struct data_queue *queue) 467 struct data_queue *queue)
451{ 468{
452 struct queue_entry_priv_usb_rx *priv_rx; 469 struct queue_entry_priv_usb *entry_priv;
453 struct queue_entry_priv_usb_tx *priv_tx; 470 struct queue_entry_priv_usb_bcn *bcn_priv;
454 struct queue_entry_priv_usb_bcn *priv_bcn;
455 struct urb *urb;
456 unsigned int guardian =
457 test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
458 unsigned int i; 471 unsigned int i;
459 472
460 if (!queue->entries) 473 if (!queue->entries)
461 return; 474 return;
462 475
463 for (i = 0; i < queue->limit; i++) { 476 for (i = 0; i < queue->limit; i++) {
464 if (queue->qid == QID_RX) { 477 entry_priv = queue->entries[i].priv_data;
465 priv_rx = queue->entries[i].priv_data; 478 usb_kill_urb(entry_priv->urb);
466 urb = priv_rx->urb; 479 usb_free_urb(entry_priv->urb);
467 } else if (queue->qid == QID_MGMT && guardian) {
468 priv_bcn = queue->entries[i].priv_data;
469
470 usb_kill_urb(priv_bcn->guardian_urb);
471 usb_free_urb(priv_bcn->guardian_urb);
472
473 urb = priv_bcn->urb;
474 } else {
475 priv_tx = queue->entries[i].priv_data;
476 urb = priv_tx->urb;
477 }
478
479 usb_kill_urb(urb);
480 usb_free_urb(urb);
481 if (queue->entries[i].skb) 480 if (queue->entries[i].skb)
482 kfree_skb(queue->entries[i].skb); 481 kfree_skb(queue->entries[i].skb);
483 } 482 }
483
484 /*
485 * If this is not the beacon queue or
486 * no guardian byte was required for the beacon,
487 * then we are done.
488 */
489 if (rt2x00dev->bcn != queue ||
490 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
491 return;
492
493 for (i = 0; i < queue->limit; i++) {
494 bcn_priv = queue->entries[i].priv_data;
495 usb_kill_urb(bcn_priv->guardian_urb);
496 usb_free_urb(bcn_priv->guardian_urb);
497 }
484} 498}
485 499
486int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev) 500int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 11e55180cbaf..26f53f868af6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -47,6 +47,20 @@
47#define REGISTER_TIMEOUT 500 47#define REGISTER_TIMEOUT 500
48#define REGISTER_TIMEOUT_FIRMWARE 1000 48#define REGISTER_TIMEOUT_FIRMWARE 1000
49 49
50/**
51 * REGISTER_TIMEOUT16 - Determine the timeout for 16bit register access
52 * @__datalen: Data length
53 */
54#define REGISTER_TIMEOUT16(__datalen) \
55 ( REGISTER_TIMEOUT * ((__datalen) / sizeof(u16)) )
56
57/**
58 * REGISTER_TIMEOUT32 - Determine the timeout for 32bit register access
59 * @__datalen: Data length
60 */
61#define REGISTER_TIMEOUT32(__datalen) \
62 ( REGISTER_TIMEOUT * ((__datalen) / sizeof(u32)) )
63
50/* 64/*
51 * Cache size 65 * Cache size
52 */ 66 */
@@ -185,13 +199,12 @@ static inline int rt2x00usb_vendor_request_sw(struct rt2x00_dev *rt2x00dev,
185 * kmalloc for correct handling inside the kernel USB layer. 199 * kmalloc for correct handling inside the kernel USB layer.
186 */ 200 */
187static inline int rt2x00usb_eeprom_read(struct rt2x00_dev *rt2x00dev, 201static inline int rt2x00usb_eeprom_read(struct rt2x00_dev *rt2x00dev,
188 __le16 *eeprom, const u16 lenght) 202 __le16 *eeprom, const u16 length)
189{ 203{
190 int timeout = REGISTER_TIMEOUT * (lenght / sizeof(u16));
191
192 return rt2x00usb_vendor_request(rt2x00dev, USB_EEPROM_READ, 204 return rt2x00usb_vendor_request(rt2x00dev, USB_EEPROM_READ,
193 USB_VENDOR_REQUEST_IN, 0, 0, 205 USB_VENDOR_REQUEST_IN, 0, 0,
194 eeprom, lenght, timeout); 206 eeprom, length,
207 REGISTER_TIMEOUT16(length));
195} 208}
196 209
197/* 210/*
@@ -203,47 +216,31 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev);
203 * TX data handlers. 216 * TX data handlers.
204 */ 217 */
205int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev, 218int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
206 struct data_queue *queue, struct sk_buff *skb, 219 struct data_queue *queue, struct sk_buff *skb);
207 struct ieee80211_tx_control *control);
208
209/**
210 * struct queue_entry_priv_usb_rx: Per RX entry USB specific information
211 *
212 * @urb: Urb structure used for device communication.
213 */
214struct queue_entry_priv_usb_rx {
215 struct urb *urb;
216};
217 220
218/** 221/**
219 * struct queue_entry_priv_usb_tx: Per TX entry USB specific information 222 * struct queue_entry_priv_usb: Per entry USB specific information
220 * 223 *
221 * @urb: Urb structure used for device communication. 224 * @urb: Urb structure used for device communication.
222 * @control: mac80211 control structure used to transmit data.
223 */ 225 */
224struct queue_entry_priv_usb_tx { 226struct queue_entry_priv_usb {
225 struct urb *urb; 227 struct urb *urb;
226
227 struct ieee80211_tx_control control;
228}; 228};
229 229
230/** 230/**
231 * struct queue_entry_priv_usb_tx: Per TX entry USB specific information 231 * struct queue_entry_priv_usb_bcn: Per TX entry USB specific information
232 * 232 *
233 * The first section should match &struct queue_entry_priv_usb_tx exactly. 233 * The first section should match &struct queue_entry_priv_usb exactly.
234 * rt2500usb can use this structure to send a guardian byte when working 234 * rt2500usb can use this structure to send a guardian byte when working
235 * with beacons. 235 * with beacons.
236 * 236 *
237 * @urb: Urb structure used for device communication. 237 * @urb: Urb structure used for device communication.
238 * @control: mac80211 control structure used to transmit data.
239 * @guardian_data: Set to 0, used for sending the guardian data. 238 * @guardian_data: Set to 0, used for sending the guardian data.
240 * @guardian_urb: Urb structure used to send the guardian data. 239 * @guardian_urb: Urb structure used to send the guardian data.
241 */ 240 */
242struct queue_entry_priv_usb_bcn { 241struct queue_entry_priv_usb_bcn {
243 struct urb *urb; 242 struct urb *urb;
244 243
245 struct ieee80211_tx_control control;
246
247 unsigned int guardian_data; 244 unsigned int guardian_data;
248 struct urb *guardian_urb; 245 struct urb *guardian_urb;
249}; 246};
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 14bc7b281659..e13ed5ced26e 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1018,49 +1018,34 @@ static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev, void *data,
1018static void rt61pci_init_rxentry(struct rt2x00_dev *rt2x00dev, 1018static void rt61pci_init_rxentry(struct rt2x00_dev *rt2x00dev,
1019 struct queue_entry *entry) 1019 struct queue_entry *entry)
1020{ 1020{
1021 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data; 1021 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1022 u32 word; 1022 u32 word;
1023 1023
1024 rt2x00_desc_read(priv_rx->desc, 5, &word); 1024 rt2x00_desc_read(entry_priv->desc, 5, &word);
1025 rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS, 1025 rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS,
1026 priv_rx->data_dma); 1026 entry_priv->data_dma);
1027 rt2x00_desc_write(priv_rx->desc, 5, word); 1027 rt2x00_desc_write(entry_priv->desc, 5, word);
1028 1028
1029 rt2x00_desc_read(priv_rx->desc, 0, &word); 1029 rt2x00_desc_read(entry_priv->desc, 0, &word);
1030 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); 1030 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
1031 rt2x00_desc_write(priv_rx->desc, 0, word); 1031 rt2x00_desc_write(entry_priv->desc, 0, word);
1032} 1032}
1033 1033
1034static void rt61pci_init_txentry(struct rt2x00_dev *rt2x00dev, 1034static void rt61pci_init_txentry(struct rt2x00_dev *rt2x00dev,
1035 struct queue_entry *entry) 1035 struct queue_entry *entry)
1036{ 1036{
1037 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data; 1037 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1038 u32 word; 1038 u32 word;
1039 1039
1040 rt2x00_desc_read(priv_tx->desc, 1, &word); 1040 rt2x00_desc_read(entry_priv->desc, 0, &word);
1041 rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1);
1042 rt2x00_desc_write(priv_tx->desc, 1, word);
1043
1044 rt2x00_desc_read(priv_tx->desc, 5, &word);
1045 rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid);
1046 rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, entry->entry_idx);
1047 rt2x00_desc_write(priv_tx->desc, 5, word);
1048
1049 rt2x00_desc_read(priv_tx->desc, 6, &word);
1050 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
1051 priv_tx->data_dma);
1052 rt2x00_desc_write(priv_tx->desc, 6, word);
1053
1054 rt2x00_desc_read(priv_tx->desc, 0, &word);
1055 rt2x00_set_field32(&word, TXD_W0_VALID, 0); 1041 rt2x00_set_field32(&word, TXD_W0_VALID, 0);
1056 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); 1042 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
1057 rt2x00_desc_write(priv_tx->desc, 0, word); 1043 rt2x00_desc_write(entry_priv->desc, 0, word);
1058} 1044}
1059 1045
1060static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev) 1046static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev)
1061{ 1047{
1062 struct queue_entry_priv_pci_rx *priv_rx; 1048 struct queue_entry_priv_pci *entry_priv;
1063 struct queue_entry_priv_pci_tx *priv_tx;
1064 u32 reg; 1049 u32 reg;
1065 1050
1066 /* 1051 /*
@@ -1082,28 +1067,28 @@ static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev)
1082 rt2x00dev->tx[0].desc_size / 4); 1067 rt2x00dev->tx[0].desc_size / 4);
1083 rt2x00pci_register_write(rt2x00dev, TX_RING_CSR1, reg); 1068 rt2x00pci_register_write(rt2x00dev, TX_RING_CSR1, reg);
1084 1069
1085 priv_tx = rt2x00dev->tx[0].entries[0].priv_data; 1070 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
1086 rt2x00pci_register_read(rt2x00dev, AC0_BASE_CSR, &reg); 1071 rt2x00pci_register_read(rt2x00dev, AC0_BASE_CSR, &reg);
1087 rt2x00_set_field32(&reg, AC0_BASE_CSR_RING_REGISTER, 1072 rt2x00_set_field32(&reg, AC0_BASE_CSR_RING_REGISTER,
1088 priv_tx->desc_dma); 1073 entry_priv->desc_dma);
1089 rt2x00pci_register_write(rt2x00dev, AC0_BASE_CSR, reg); 1074 rt2x00pci_register_write(rt2x00dev, AC0_BASE_CSR, reg);
1090 1075
1091 priv_tx = rt2x00dev->tx[1].entries[0].priv_data; 1076 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
1092 rt2x00pci_register_read(rt2x00dev, AC1_BASE_CSR, &reg); 1077 rt2x00pci_register_read(rt2x00dev, AC1_BASE_CSR, &reg);
1093 rt2x00_set_field32(&reg, AC1_BASE_CSR_RING_REGISTER, 1078 rt2x00_set_field32(&reg, AC1_BASE_CSR_RING_REGISTER,
1094 priv_tx->desc_dma); 1079 entry_priv->desc_dma);
1095 rt2x00pci_register_write(rt2x00dev, AC1_BASE_CSR, reg); 1080 rt2x00pci_register_write(rt2x00dev, AC1_BASE_CSR, reg);
1096 1081
1097 priv_tx = rt2x00dev->tx[2].entries[0].priv_data; 1082 entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
1098 rt2x00pci_register_read(rt2x00dev, AC2_BASE_CSR, &reg); 1083 rt2x00pci_register_read(rt2x00dev, AC2_BASE_CSR, &reg);
1099 rt2x00_set_field32(&reg, AC2_BASE_CSR_RING_REGISTER, 1084 rt2x00_set_field32(&reg, AC2_BASE_CSR_RING_REGISTER,
1100 priv_tx->desc_dma); 1085 entry_priv->desc_dma);
1101 rt2x00pci_register_write(rt2x00dev, AC2_BASE_CSR, reg); 1086 rt2x00pci_register_write(rt2x00dev, AC2_BASE_CSR, reg);
1102 1087
1103 priv_tx = rt2x00dev->tx[3].entries[0].priv_data; 1088 entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
1104 rt2x00pci_register_read(rt2x00dev, AC3_BASE_CSR, &reg); 1089 rt2x00pci_register_read(rt2x00dev, AC3_BASE_CSR, &reg);
1105 rt2x00_set_field32(&reg, AC3_BASE_CSR_RING_REGISTER, 1090 rt2x00_set_field32(&reg, AC3_BASE_CSR_RING_REGISTER,
1106 priv_tx->desc_dma); 1091 entry_priv->desc_dma);
1107 rt2x00pci_register_write(rt2x00dev, AC3_BASE_CSR, reg); 1092 rt2x00pci_register_write(rt2x00dev, AC3_BASE_CSR, reg);
1108 1093
1109 rt2x00pci_register_read(rt2x00dev, RX_RING_CSR, &reg); 1094 rt2x00pci_register_read(rt2x00dev, RX_RING_CSR, &reg);
@@ -1113,10 +1098,10 @@ static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev)
1113 rt2x00_set_field32(&reg, RX_RING_CSR_RXD_WRITEBACK_SIZE, 4); 1098 rt2x00_set_field32(&reg, RX_RING_CSR_RXD_WRITEBACK_SIZE, 4);
1114 rt2x00pci_register_write(rt2x00dev, RX_RING_CSR, reg); 1099 rt2x00pci_register_write(rt2x00dev, RX_RING_CSR, reg);
1115 1100
1116 priv_rx = rt2x00dev->rx->entries[0].priv_data; 1101 entry_priv = rt2x00dev->rx->entries[0].priv_data;
1117 rt2x00pci_register_read(rt2x00dev, RX_BASE_CSR, &reg); 1102 rt2x00pci_register_read(rt2x00dev, RX_BASE_CSR, &reg);
1118 rt2x00_set_field32(&reg, RX_BASE_CSR_RING_REGISTER, 1103 rt2x00_set_field32(&reg, RX_BASE_CSR_RING_REGISTER,
1119 priv_rx->desc_dma); 1104 entry_priv->desc_dma);
1120 rt2x00pci_register_write(rt2x00dev, RX_BASE_CSR, reg); 1105 rt2x00pci_register_write(rt2x00dev, RX_BASE_CSR, reg);
1121 1106
1122 rt2x00pci_register_read(rt2x00dev, TX_DMA_DST_CSR, &reg); 1107 rt2x00pci_register_read(rt2x00dev, TX_DMA_DST_CSR, &reg);
@@ -1526,10 +1511,10 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1526 */ 1511 */
1527static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1512static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1528 struct sk_buff *skb, 1513 struct sk_buff *skb,
1529 struct txentry_desc *txdesc, 1514 struct txentry_desc *txdesc)
1530 struct ieee80211_tx_control *control)
1531{ 1515{
1532 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1516 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1517 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
1533 __le32 *txd = skbdesc->desc; 1518 __le32 *txd = skbdesc->desc;
1534 u32 word; 1519 u32 word;
1535 1520
@@ -1543,6 +1528,7 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1543 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1528 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
1544 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); 1529 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER);
1545 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1); 1530 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1);
1531 rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1);
1546 rt2x00_desc_write(txd, 1, word); 1532 rt2x00_desc_write(txd, 1, word);
1547 1533
1548 rt2x00_desc_read(txd, 2, &word); 1534 rt2x00_desc_read(txd, 2, &word);
@@ -1553,11 +1539,19 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1553 rt2x00_desc_write(txd, 2, word); 1539 rt2x00_desc_write(txd, 2, word);
1554 1540
1555 rt2x00_desc_read(txd, 5, &word); 1541 rt2x00_desc_read(txd, 5, &word);
1542 rt2x00_set_field32(&word, TXD_W5_PID_TYPE, skbdesc->entry->queue->qid);
1543 rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE,
1544 skbdesc->entry->entry_idx);
1556 rt2x00_set_field32(&word, TXD_W5_TX_POWER, 1545 rt2x00_set_field32(&word, TXD_W5_TX_POWER,
1557 TXPOWER_TO_DEV(rt2x00dev->tx_power)); 1546 TXPOWER_TO_DEV(rt2x00dev->tx_power));
1558 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); 1547 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
1559 rt2x00_desc_write(txd, 5, word); 1548 rt2x00_desc_write(txd, 5, word);
1560 1549
1550 rt2x00_desc_read(txd, 6, &word);
1551 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
1552 entry_priv->data_dma);
1553 rt2x00_desc_write(txd, 6, word);
1554
1561 if (skbdesc->desc_len > TXINFO_SIZE) { 1555 if (skbdesc->desc_len > TXINFO_SIZE) {
1562 rt2x00_desc_read(txd, 11, &word); 1556 rt2x00_desc_read(txd, 11, &word);
1563 rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0, skbdesc->data_len); 1557 rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0, skbdesc->data_len);
@@ -1577,8 +1571,7 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1577 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags)); 1571 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags));
1578 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1572 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1579 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1573 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1580 !!(control->flags & 1574 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1581 IEEE80211_TXCTL_LONG_RETRY_LIMIT));
1582 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0); 1575 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0);
1583 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len); 1576 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len);
1584 rt2x00_set_field32(&word, TXD_W0_BURST, 1577 rt2x00_set_field32(&word, TXD_W0_BURST,
@@ -1591,11 +1584,11 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1591 * TX data initialization 1584 * TX data initialization
1592 */ 1585 */
1593static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1586static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1594 const unsigned int queue) 1587 const enum data_queue_qid queue)
1595{ 1588{
1596 u32 reg; 1589 u32 reg;
1597 1590
1598 if (queue == RT2X00_BCN_QUEUE_BEACON) { 1591 if (queue == QID_BEACON) {
1599 /* 1592 /*
1600 * For Wi-Fi faily generated beacons between participating 1593 * For Wi-Fi faily generated beacons between participating
1601 * stations. Set TBTT phase adaptive adjustment step to 8us. 1594 * stations. Set TBTT phase adaptive adjustment step to 8us.
@@ -1613,14 +1606,10 @@ static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1613 } 1606 }
1614 1607
1615 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1608 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1616 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, 1609 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, (queue == QID_AC_BE));
1617 (queue == IEEE80211_TX_QUEUE_DATA0)); 1610 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, (queue == QID_AC_BK));
1618 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, 1611 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2, (queue == QID_AC_VI));
1619 (queue == IEEE80211_TX_QUEUE_DATA1)); 1612 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, (queue == QID_AC_VO));
1620 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2,
1621 (queue == IEEE80211_TX_QUEUE_DATA2));
1622 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3,
1623 (queue == IEEE80211_TX_QUEUE_DATA3));
1624 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1613 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1625} 1614}
1626 1615
@@ -1671,14 +1660,13 @@ static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1671static void rt61pci_fill_rxdone(struct queue_entry *entry, 1660static void rt61pci_fill_rxdone(struct queue_entry *entry,
1672 struct rxdone_entry_desc *rxdesc) 1661 struct rxdone_entry_desc *rxdesc)
1673{ 1662{
1674 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data; 1663 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1675 u32 word0; 1664 u32 word0;
1676 u32 word1; 1665 u32 word1;
1677 1666
1678 rt2x00_desc_read(priv_rx->desc, 0, &word0); 1667 rt2x00_desc_read(entry_priv->desc, 0, &word0);
1679 rt2x00_desc_read(priv_rx->desc, 1, &word1); 1668 rt2x00_desc_read(entry_priv->desc, 1, &word1);
1680 1669
1681 rxdesc->flags = 0;
1682 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1670 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1683 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1671 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1684 1672
@@ -1692,7 +1680,6 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
1692 rxdesc->rssi = rt61pci_agc_to_rssi(entry->queue->rt2x00dev, word1); 1680 rxdesc->rssi = rt61pci_agc_to_rssi(entry->queue->rt2x00dev, word1);
1693 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1681 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1694 1682
1695 rxdesc->dev_flags = 0;
1696 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1683 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1697 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1684 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1698 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1685 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
@@ -1707,7 +1694,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
1707 struct data_queue *queue; 1694 struct data_queue *queue;
1708 struct queue_entry *entry; 1695 struct queue_entry *entry;
1709 struct queue_entry *entry_done; 1696 struct queue_entry *entry_done;
1710 struct queue_entry_priv_pci_tx *priv_tx; 1697 struct queue_entry_priv_pci *entry_priv;
1711 struct txdone_entry_desc txdesc; 1698 struct txdone_entry_desc txdesc;
1712 u32 word; 1699 u32 word;
1713 u32 reg; 1700 u32 reg;
@@ -1752,8 +1739,8 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
1752 continue; 1739 continue;
1753 1740
1754 entry = &queue->entries[index]; 1741 entry = &queue->entries[index];
1755 priv_tx = entry->priv_data; 1742 entry_priv = entry->priv_data;
1756 rt2x00_desc_read(priv_tx->desc, 0, &word); 1743 rt2x00_desc_read(entry_priv->desc, 0, &word);
1757 1744
1758 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || 1745 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
1759 !rt2x00_get_field32(word, TXD_W0_VALID)) 1746 !rt2x00_get_field32(word, TXD_W0_VALID))
@@ -1768,7 +1755,8 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
1768 "TX status report missed for entry %d\n", 1755 "TX status report missed for entry %d\n",
1769 entry_done->entry_idx); 1756 entry_done->entry_idx);
1770 1757
1771 txdesc.status = TX_FAIL_OTHER; 1758 txdesc.flags = 0;
1759 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
1772 txdesc.retry = 0; 1760 txdesc.retry = 0;
1773 1761
1774 rt2x00pci_txdone(rt2x00dev, entry_done, &txdesc); 1762 rt2x00pci_txdone(rt2x00dev, entry_done, &txdesc);
@@ -1778,7 +1766,17 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
1778 /* 1766 /*
1779 * Obtain the status about this packet. 1767 * Obtain the status about this packet.
1780 */ 1768 */
1781 txdesc.status = rt2x00_get_field32(reg, STA_CSR4_TX_RESULT); 1769 txdesc.flags = 0;
1770 switch (rt2x00_get_field32(reg, STA_CSR4_TX_RESULT)) {
1771 case 0: /* Success, maybe with retry */
1772 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
1773 break;
1774 case 6: /* Failure, excessive retries */
1775 __set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
1776 /* Don't break, this is a failed frame! */
1777 default: /* Failure */
1778 __set_bit(TXDONE_FAILURE, &txdesc.flags);
1779 }
1782 txdesc.retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT); 1780 txdesc.retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT);
1783 1781
1784 rt2x00pci_txdone(rt2x00dev, entry, &txdesc); 1782 rt2x00pci_txdone(rt2x00dev, entry, &txdesc);
@@ -2249,11 +2247,9 @@ static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2249 */ 2247 */
2250 rt2x00dev->hw->flags = 2248 rt2x00dev->hw->flags =
2251 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 2249 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
2252 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; 2250 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2251 IEEE80211_HW_SIGNAL_DBM;
2253 rt2x00dev->hw->extra_tx_headroom = 0; 2252 rt2x00dev->hw->extra_tx_headroom = 0;
2254 rt2x00dev->hw->max_signal = MAX_SIGNAL;
2255 rt2x00dev->hw->max_rssi = MAX_RX_SSI;
2256 rt2x00dev->hw->queues = 4;
2257 2253
2258 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev); 2254 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev);
2259 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 2255 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -2361,21 +2357,30 @@ static u64 rt61pci_get_tsf(struct ieee80211_hw *hw)
2361 return tsf; 2357 return tsf;
2362} 2358}
2363 2359
2364static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 2360static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
2365 struct ieee80211_tx_control *control)
2366{ 2361{
2367 struct rt2x00_dev *rt2x00dev = hw->priv; 2362 struct rt2x00_dev *rt2x00dev = hw->priv;
2368 struct rt2x00_intf *intf = vif_to_intf(control->vif); 2363 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2369 struct queue_entry_priv_pci_tx *priv_tx; 2364 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
2365 struct queue_entry_priv_pci *entry_priv;
2370 struct skb_frame_desc *skbdesc; 2366 struct skb_frame_desc *skbdesc;
2367 struct txentry_desc txdesc;
2371 unsigned int beacon_base; 2368 unsigned int beacon_base;
2372 u32 reg; 2369 u32 reg;
2373 2370
2374 if (unlikely(!intf->beacon)) 2371 if (unlikely(!intf->beacon))
2375 return -ENOBUFS; 2372 return -ENOBUFS;
2376 2373
2377 priv_tx = intf->beacon->priv_data; 2374 /*
2378 memset(priv_tx->desc, 0, intf->beacon->queue->desc_size); 2375 * Copy all TX descriptor information into txdesc,
2376 * after that we are free to use the skb->cb array
2377 * for our information.
2378 */
2379 intf->beacon->skb = skb;
2380 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
2381
2382 entry_priv = intf->beacon->priv_data;
2383 memset(entry_priv->desc, 0, intf->beacon->queue->desc_size);
2379 2384
2380 /* 2385 /*
2381 * Fill in skb descriptor 2386 * Fill in skb descriptor
@@ -2385,7 +2390,7 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
2385 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED; 2390 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
2386 skbdesc->data = skb->data; 2391 skbdesc->data = skb->data;
2387 skbdesc->data_len = skb->len; 2392 skbdesc->data_len = skb->len;
2388 skbdesc->desc = priv_tx->desc; 2393 skbdesc->desc = entry_priv->desc;
2389 skbdesc->desc_len = intf->beacon->queue->desc_size; 2394 skbdesc->desc_len = intf->beacon->queue->desc_size;
2390 skbdesc->entry = intf->beacon; 2395 skbdesc->entry = intf->beacon;
2391 2396
@@ -2400,24 +2405,17 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
2400 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 2405 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
2401 2406
2402 /* 2407 /*
2403 * mac80211 doesn't provide the control->queue variable
2404 * for beacons. Set our own queue identification so
2405 * it can be used during descriptor initialization.
2406 */
2407 control->queue = RT2X00_BCN_QUEUE_BEACON;
2408 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
2409
2410 /*
2411 * Write entire beacon with descriptor to register, 2408 * Write entire beacon with descriptor to register,
2412 * and kick the beacon generator. 2409 * and kick the beacon generator.
2413 */ 2410 */
2411 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
2414 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); 2412 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
2415 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base, 2413 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
2416 skbdesc->desc, skbdesc->desc_len); 2414 skbdesc->desc, skbdesc->desc_len);
2417 rt2x00pci_register_multiwrite(rt2x00dev, 2415 rt2x00pci_register_multiwrite(rt2x00dev,
2418 beacon_base + skbdesc->desc_len, 2416 beacon_base + skbdesc->desc_len,
2419 skbdesc->data, skbdesc->data_len); 2417 skbdesc->data, skbdesc->data_len);
2420 rt61pci_kick_tx_queue(rt2x00dev, control->queue); 2418 rt61pci_kick_tx_queue(rt2x00dev, QID_BEACON);
2421 2419
2422 return 0; 2420 return 0;
2423} 2421}
@@ -2469,21 +2467,21 @@ static const struct data_queue_desc rt61pci_queue_rx = {
2469 .entry_num = RX_ENTRIES, 2467 .entry_num = RX_ENTRIES,
2470 .data_size = DATA_FRAME_SIZE, 2468 .data_size = DATA_FRAME_SIZE,
2471 .desc_size = RXD_DESC_SIZE, 2469 .desc_size = RXD_DESC_SIZE,
2472 .priv_size = sizeof(struct queue_entry_priv_pci_rx), 2470 .priv_size = sizeof(struct queue_entry_priv_pci),
2473}; 2471};
2474 2472
2475static const struct data_queue_desc rt61pci_queue_tx = { 2473static const struct data_queue_desc rt61pci_queue_tx = {
2476 .entry_num = TX_ENTRIES, 2474 .entry_num = TX_ENTRIES,
2477 .data_size = DATA_FRAME_SIZE, 2475 .data_size = DATA_FRAME_SIZE,
2478 .desc_size = TXD_DESC_SIZE, 2476 .desc_size = TXD_DESC_SIZE,
2479 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 2477 .priv_size = sizeof(struct queue_entry_priv_pci),
2480}; 2478};
2481 2479
2482static const struct data_queue_desc rt61pci_queue_bcn = { 2480static const struct data_queue_desc rt61pci_queue_bcn = {
2483 .entry_num = 4 * BEACON_ENTRIES, 2481 .entry_num = 4 * BEACON_ENTRIES,
2484 .data_size = 0, /* No DMA required for beacons */ 2482 .data_size = 0, /* No DMA required for beacons */
2485 .desc_size = TXINFO_SIZE, 2483 .desc_size = TXINFO_SIZE,
2486 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 2484 .priv_size = sizeof(struct queue_entry_priv_pci),
2487}; 2485};
2488 2486
2489static const struct rt2x00_ops rt61pci_ops = { 2487static const struct rt2x00_ops rt61pci_ops = {
@@ -2492,6 +2490,7 @@ static const struct rt2x00_ops rt61pci_ops = {
2492 .max_ap_intf = 4, 2490 .max_ap_intf = 4,
2493 .eeprom_size = EEPROM_SIZE, 2491 .eeprom_size = EEPROM_SIZE,
2494 .rf_size = RF_SIZE, 2492 .rf_size = RF_SIZE,
2493 .tx_queues = NUM_TX_QUEUES,
2495 .rx = &rt61pci_queue_rx, 2494 .rx = &rt61pci_queue_rx,
2496 .tx = &rt61pci_queue_tx, 2495 .tx = &rt61pci_queue_tx,
2497 .bcn = &rt61pci_queue_bcn, 2496 .bcn = &rt61pci_queue_bcn,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 3511bba7ff65..c5a04b9329d2 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -54,6 +54,11 @@
54#define RF_SIZE 0x0014 54#define RF_SIZE 0x0014
55 55
56/* 56/*
57 * Number of TX queues.
58 */
59#define NUM_TX_QUEUES 4
60
61/*
57 * PCI registers. 62 * PCI registers.
58 */ 63 */
59 64
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index da19a3a91f4d..26c2e0a1a308 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -74,10 +74,10 @@ static inline void rt73usb_register_multiread(struct rt2x00_dev *rt2x00dev,
74 const unsigned int offset, 74 const unsigned int offset,
75 void *value, const u32 length) 75 void *value, const u32 length)
76{ 76{
77 int timeout = REGISTER_TIMEOUT * (length / sizeof(u32));
78 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, 77 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
79 USB_VENDOR_REQUEST_IN, offset, 78 USB_VENDOR_REQUEST_IN, offset,
80 value, length, timeout); 79 value, length,
80 REGISTER_TIMEOUT32(length));
81} 81}
82 82
83static inline void rt73usb_register_write(struct rt2x00_dev *rt2x00dev, 83static inline void rt73usb_register_write(struct rt2x00_dev *rt2x00dev,
@@ -102,10 +102,10 @@ static inline void rt73usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
102 const unsigned int offset, 102 const unsigned int offset,
103 void *value, const u32 length) 103 void *value, const u32 length)
104{ 104{
105 int timeout = REGISTER_TIMEOUT * (length / sizeof(u32));
106 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, 105 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
107 USB_VENDOR_REQUEST_OUT, offset, 106 USB_VENDOR_REQUEST_OUT, offset,
108 value, length, timeout); 107 value, length,
108 REGISTER_TIMEOUT32(length));
109} 109}
110 110
111static u32 rt73usb_bbp_check(struct rt2x00_dev *rt2x00dev) 111static u32 rt73usb_bbp_check(struct rt2x00_dev *rt2x00dev)
@@ -876,7 +876,6 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, void *data,
876 char *ptr = data; 876 char *ptr = data;
877 char *cache; 877 char *cache;
878 int buflen; 878 int buflen;
879 int timeout;
880 879
881 /* 880 /*
882 * Wait for stable hardware. 881 * Wait for stable hardware.
@@ -907,14 +906,14 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, void *data,
907 906
908 for (i = 0; i < len; i += CSR_CACHE_SIZE_FIRMWARE) { 907 for (i = 0; i < len; i += CSR_CACHE_SIZE_FIRMWARE) {
909 buflen = min_t(int, len - i, CSR_CACHE_SIZE_FIRMWARE); 908 buflen = min_t(int, len - i, CSR_CACHE_SIZE_FIRMWARE);
910 timeout = REGISTER_TIMEOUT * (buflen / sizeof(u32));
911 909
912 memcpy(cache, ptr, buflen); 910 memcpy(cache, ptr, buflen);
913 911
914 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE, 912 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE,
915 USB_VENDOR_REQUEST_OUT, 913 USB_VENDOR_REQUEST_OUT,
916 FIRMWARE_IMAGE_BASE + i, 0, 914 FIRMWARE_IMAGE_BASE + i, 0,
917 cache, buflen, timeout); 915 cache, buflen,
916 REGISTER_TIMEOUT32(buflen));
918 917
919 ptr += buflen; 918 ptr += buflen;
920 } 919 }
@@ -1256,8 +1255,7 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1256 */ 1255 */
1257static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1256static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1258 struct sk_buff *skb, 1257 struct sk_buff *skb,
1259 struct txentry_desc *txdesc, 1258 struct txentry_desc *txdesc)
1260 struct ieee80211_tx_control *control)
1261{ 1259{
1262 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1260 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1263 __le32 *txd = skbdesc->desc; 1261 __le32 *txd = skbdesc->desc;
@@ -1302,8 +1300,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1302 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags)); 1300 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags));
1303 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1301 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1304 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1302 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1305 !!(control->flags & 1303 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1306 IEEE80211_TXCTL_LONG_RETRY_LIMIT));
1307 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0); 1304 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0);
1308 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len); 1305 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len);
1309 rt2x00_set_field32(&word, TXD_W0_BURST2, 1306 rt2x00_set_field32(&word, TXD_W0_BURST2,
@@ -1331,11 +1328,11 @@ static int rt73usb_get_tx_data_len(struct rt2x00_dev *rt2x00dev,
1331 * TX data initialization 1328 * TX data initialization
1332 */ 1329 */
1333static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1330static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1334 const unsigned int queue) 1331 const enum data_queue_qid queue)
1335{ 1332{
1336 u32 reg; 1333 u32 reg;
1337 1334
1338 if (queue != RT2X00_BCN_QUEUE_BEACON) 1335 if (queue != QID_BEACON)
1339 return; 1336 return;
1340 1337
1341 /* 1338 /*
@@ -1406,25 +1403,26 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
1406{ 1403{
1407 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1404 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1408 __le32 *rxd = (__le32 *)entry->skb->data; 1405 __le32 *rxd = (__le32 *)entry->skb->data;
1409 unsigned int offset = entry->queue->desc_size + 2;
1410 u32 word0; 1406 u32 word0;
1411 u32 word1; 1407 u32 word1;
1412 1408
1413 /* 1409 /*
1414 * Copy descriptor to the available headroom inside the skbuffer. 1410 * Copy descriptor to the skb->cb array, this has 2 benefits:
1411 * 1) Each descriptor word is 4 byte aligned.
1412 * 2) Descriptor is safe from moving of frame data in rt2x00usb.
1415 */ 1413 */
1416 skb_push(entry->skb, offset); 1414 skbdesc->desc_len =
1417 memcpy(entry->skb->data, rxd, entry->queue->desc_size); 1415 min_t(u16, entry->queue->desc_size, sizeof(entry->skb->cb));
1418 rxd = (__le32 *)entry->skb->data; 1416 memcpy(entry->skb->cb, rxd, skbdesc->desc_len);
1417 skbdesc->desc = entry->skb->cb;
1418 rxd = (__le32 *)skbdesc->desc;
1419 1419
1420 /* 1420 /*
1421 * The descriptor is now aligned to 4 bytes and thus it is 1421 * It is now safe to read the descriptor on all architectures.
1422 * now safe to read it on all architectures.
1423 */ 1422 */
1424 rt2x00_desc_read(rxd, 0, &word0); 1423 rt2x00_desc_read(rxd, 0, &word0);
1425 rt2x00_desc_read(rxd, 1, &word1); 1424 rt2x00_desc_read(rxd, 1, &word1);
1426 1425
1427 rxdesc->flags = 0;
1428 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1426 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1429 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1427 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1430 1428
@@ -1438,25 +1436,18 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
1438 rxdesc->rssi = rt73usb_agc_to_rssi(entry->queue->rt2x00dev, word1); 1436 rxdesc->rssi = rt73usb_agc_to_rssi(entry->queue->rt2x00dev, word1);
1439 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1437 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1440 1438
1441 rxdesc->dev_flags = 0;
1442 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1439 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1443 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1440 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1444 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1441 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1445 rxdesc->dev_flags |= RXDONE_MY_BSS; 1442 rxdesc->dev_flags |= RXDONE_MY_BSS;
1446 1443
1447 /* 1444 /*
1448 * Adjust the skb memory window to the frame boundaries. 1445 * Set skb pointers, and update frame information.
1449 */ 1446 */
1450 skb_pull(entry->skb, offset + entry->queue->desc_size); 1447 skb_pull(entry->skb, entry->queue->desc_size);
1451 skb_trim(entry->skb, rxdesc->size); 1448 skb_trim(entry->skb, rxdesc->size);
1452
1453 /*
1454 * Set descriptor and data pointer.
1455 */
1456 skbdesc->data = entry->skb->data; 1449 skbdesc->data = entry->skb->data;
1457 skbdesc->data_len = rxdesc->size; 1450 skbdesc->data_len = rxdesc->size;
1458 skbdesc->desc = rxd;
1459 skbdesc->desc_len = entry->queue->desc_size;
1460} 1451}
1461 1452
1462/* 1453/*
@@ -1831,11 +1822,9 @@ static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1831 */ 1822 */
1832 rt2x00dev->hw->flags = 1823 rt2x00dev->hw->flags =
1833 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 1824 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
1834 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; 1825 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1826 IEEE80211_HW_SIGNAL_DBM;
1835 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE; 1827 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE;
1836 rt2x00dev->hw->max_signal = MAX_SIGNAL;
1837 rt2x00dev->hw->max_rssi = MAX_RX_SSI;
1838 rt2x00dev->hw->queues = 4;
1839 1828
1840 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_usb(rt2x00dev)->dev); 1829 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_usb(rt2x00dev)->dev);
1841 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 1830 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -1959,20 +1948,28 @@ static u64 rt73usb_get_tsf(struct ieee80211_hw *hw)
1959#define rt73usb_get_tsf NULL 1948#define rt73usb_get_tsf NULL
1960#endif 1949#endif
1961 1950
1962static int rt73usb_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 1951static int rt73usb_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
1963 struct ieee80211_tx_control *control)
1964{ 1952{
1965 struct rt2x00_dev *rt2x00dev = hw->priv; 1953 struct rt2x00_dev *rt2x00dev = hw->priv;
1966 struct rt2x00_intf *intf = vif_to_intf(control->vif); 1954 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1955 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
1967 struct skb_frame_desc *skbdesc; 1956 struct skb_frame_desc *skbdesc;
1957 struct txentry_desc txdesc;
1968 unsigned int beacon_base; 1958 unsigned int beacon_base;
1969 unsigned int timeout;
1970 u32 reg; 1959 u32 reg;
1971 1960
1972 if (unlikely(!intf->beacon)) 1961 if (unlikely(!intf->beacon))
1973 return -ENOBUFS; 1962 return -ENOBUFS;
1974 1963
1975 /* 1964 /*
1965 * Copy all TX descriptor information into txdesc,
1966 * after that we are free to use the skb->cb array
1967 * for our information.
1968 */
1969 intf->beacon->skb = skb;
1970 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
1971
1972 /*
1976 * Add the descriptor in front of the skb. 1973 * Add the descriptor in front of the skb.
1977 */ 1974 */
1978 skb_push(skb, intf->beacon->queue->desc_size); 1975 skb_push(skb, intf->beacon->queue->desc_size);
@@ -2001,23 +1998,16 @@ static int rt73usb_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
2001 rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); 1998 rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg);
2002 1999
2003 /* 2000 /*
2004 * mac80211 doesn't provide the control->queue variable
2005 * for beacons. Set our own queue identification so
2006 * it can be used during descriptor initialization.
2007 */
2008 control->queue = RT2X00_BCN_QUEUE_BEACON;
2009 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
2010
2011 /*
2012 * Write entire beacon with descriptor to register, 2001 * Write entire beacon with descriptor to register,
2013 * and kick the beacon generator. 2002 * and kick the beacon generator.
2014 */ 2003 */
2004 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
2015 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); 2005 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
2016 timeout = REGISTER_TIMEOUT * (skb->len / sizeof(u32));
2017 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE, 2006 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE,
2018 USB_VENDOR_REQUEST_OUT, beacon_base, 0, 2007 USB_VENDOR_REQUEST_OUT, beacon_base, 0,
2019 skb->data, skb->len, timeout); 2008 skb->data, skb->len,
2020 rt73usb_kick_tx_queue(rt2x00dev, control->queue); 2009 REGISTER_TIMEOUT32(skb->len));
2010 rt73usb_kick_tx_queue(rt2x00dev, QID_BEACON);
2021 2011
2022 return 0; 2012 return 0;
2023} 2013}
@@ -2068,21 +2058,21 @@ static const struct data_queue_desc rt73usb_queue_rx = {
2068 .entry_num = RX_ENTRIES, 2058 .entry_num = RX_ENTRIES,
2069 .data_size = DATA_FRAME_SIZE, 2059 .data_size = DATA_FRAME_SIZE,
2070 .desc_size = RXD_DESC_SIZE, 2060 .desc_size = RXD_DESC_SIZE,
2071 .priv_size = sizeof(struct queue_entry_priv_usb_rx), 2061 .priv_size = sizeof(struct queue_entry_priv_usb),
2072}; 2062};
2073 2063
2074static const struct data_queue_desc rt73usb_queue_tx = { 2064static const struct data_queue_desc rt73usb_queue_tx = {
2075 .entry_num = TX_ENTRIES, 2065 .entry_num = TX_ENTRIES,
2076 .data_size = DATA_FRAME_SIZE, 2066 .data_size = DATA_FRAME_SIZE,
2077 .desc_size = TXD_DESC_SIZE, 2067 .desc_size = TXD_DESC_SIZE,
2078 .priv_size = sizeof(struct queue_entry_priv_usb_tx), 2068 .priv_size = sizeof(struct queue_entry_priv_usb),
2079}; 2069};
2080 2070
2081static const struct data_queue_desc rt73usb_queue_bcn = { 2071static const struct data_queue_desc rt73usb_queue_bcn = {
2082 .entry_num = 4 * BEACON_ENTRIES, 2072 .entry_num = 4 * BEACON_ENTRIES,
2083 .data_size = MGMT_FRAME_SIZE, 2073 .data_size = MGMT_FRAME_SIZE,
2084 .desc_size = TXINFO_SIZE, 2074 .desc_size = TXINFO_SIZE,
2085 .priv_size = sizeof(struct queue_entry_priv_usb_tx), 2075 .priv_size = sizeof(struct queue_entry_priv_usb),
2086}; 2076};
2087 2077
2088static const struct rt2x00_ops rt73usb_ops = { 2078static const struct rt2x00_ops rt73usb_ops = {
@@ -2091,6 +2081,7 @@ static const struct rt2x00_ops rt73usb_ops = {
2091 .max_ap_intf = 4, 2081 .max_ap_intf = 4,
2092 .eeprom_size = EEPROM_SIZE, 2082 .eeprom_size = EEPROM_SIZE,
2093 .rf_size = RF_SIZE, 2083 .rf_size = RF_SIZE,
2084 .tx_queues = NUM_TX_QUEUES,
2094 .rx = &rt73usb_queue_rx, 2085 .rx = &rt73usb_queue_rx,
2095 .tx = &rt73usb_queue_tx, 2086 .tx = &rt73usb_queue_tx,
2096 .bcn = &rt73usb_queue_bcn, 2087 .bcn = &rt73usb_queue_bcn,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 06d687425fef..25cdcc9bf7c4 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -54,6 +54,11 @@
54#define RF_SIZE 0x0014 54#define RF_SIZE 0x0014
55 55
56/* 56/*
57 * Number of TX queues.
58 */
59#define NUM_TX_QUEUES 4
60
61/*
57 * USB registers. 62 * USB registers.
58 */ 63 */
59 64
diff --git a/drivers/net/wireless/rtl8180_dev.c b/drivers/net/wireless/rtl8180_dev.c
index c181f23e930d..b7172a12c057 100644
--- a/drivers/net/wireless/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl8180_dev.c
@@ -132,8 +132,8 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
132 132
133 rx_status.antenna = (flags2 >> 15) & 1; 133 rx_status.antenna = (flags2 >> 15) & 1;
134 /* TODO: improve signal/rssi reporting */ 134 /* TODO: improve signal/rssi reporting */
135 rx_status.signal = flags2 & 0xFF; 135 rx_status.qual = flags2 & 0xFF;
136 rx_status.ssi = (flags2 >> 8) & 0x7F; 136 rx_status.signal = (flags2 >> 8) & 0x7F;
137 /* XXX: is this correct? */ 137 /* XXX: is this correct? */
138 rx_status.rate_idx = (flags >> 20) & 0xF; 138 rx_status.rate_idx = (flags >> 20) & 0xF;
139 rx_status.freq = dev->conf.channel->center_freq; 139 rx_status.freq = dev->conf.channel->center_freq;
@@ -170,34 +170,29 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
170 while (skb_queue_len(&ring->queue)) { 170 while (skb_queue_len(&ring->queue)) {
171 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx]; 171 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx];
172 struct sk_buff *skb; 172 struct sk_buff *skb;
173 struct ieee80211_tx_status status; 173 struct ieee80211_tx_info *info;
174 struct ieee80211_tx_control *control;
175 u32 flags = le32_to_cpu(entry->flags); 174 u32 flags = le32_to_cpu(entry->flags);
176 175
177 if (flags & RTL8180_TX_DESC_FLAG_OWN) 176 if (flags & RTL8180_TX_DESC_FLAG_OWN)
178 return; 177 return;
179 178
180 memset(&status, 0, sizeof(status));
181
182 ring->idx = (ring->idx + 1) % ring->entries; 179 ring->idx = (ring->idx + 1) % ring->entries;
183 skb = __skb_dequeue(&ring->queue); 180 skb = __skb_dequeue(&ring->queue);
184 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf), 181 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
185 skb->len, PCI_DMA_TODEVICE); 182 skb->len, PCI_DMA_TODEVICE);
186 183
187 control = *((struct ieee80211_tx_control **)skb->cb); 184 info = IEEE80211_SKB_CB(skb);
188 if (control) 185 memset(&info->status, 0, sizeof(info->status));
189 memcpy(&status.control, control, sizeof(*control));
190 kfree(control);
191 186
192 if (!(status.control.flags & IEEE80211_TXCTL_NO_ACK)) { 187 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
193 if (flags & RTL8180_TX_DESC_FLAG_TX_OK) 188 if (flags & RTL8180_TX_DESC_FLAG_TX_OK)
194 status.flags = IEEE80211_TX_STATUS_ACK; 189 info->flags |= IEEE80211_TX_STAT_ACK;
195 else 190 else
196 status.excessive_retries = 1; 191 info->status.excessive_retries = 1;
197 } 192 }
198 status.retry_count = flags & 0xFF; 193 info->status.retry_count = flags & 0xFF;
199 194
200 ieee80211_tx_status_irqsafe(dev, skb, &status); 195 ieee80211_tx_status_irqsafe(dev, skb);
201 if (ring->entries - skb_queue_len(&ring->queue) == 2) 196 if (ring->entries - skb_queue_len(&ring->queue) == 2)
202 ieee80211_wake_queue(dev, prio); 197 ieee80211_wake_queue(dev, prio);
203 } 198 }
@@ -238,9 +233,9 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
238 return IRQ_HANDLED; 233 return IRQ_HANDLED;
239} 234}
240 235
241static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb, 236static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
242 struct ieee80211_tx_control *control)
243{ 237{
238 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
244 struct rtl8180_priv *priv = dev->priv; 239 struct rtl8180_priv *priv = dev->priv;
245 struct rtl8180_tx_ring *ring; 240 struct rtl8180_tx_ring *ring;
246 struct rtl8180_tx_desc *entry; 241 struct rtl8180_tx_desc *entry;
@@ -251,46 +246,40 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
251 u16 plcp_len = 0; 246 u16 plcp_len = 0;
252 __le16 rts_duration = 0; 247 __le16 rts_duration = 0;
253 248
254 prio = control->queue; 249 prio = skb_get_queue_mapping(skb);
255 ring = &priv->tx_ring[prio]; 250 ring = &priv->tx_ring[prio];
256 251
257 mapping = pci_map_single(priv->pdev, skb->data, 252 mapping = pci_map_single(priv->pdev, skb->data,
258 skb->len, PCI_DMA_TODEVICE); 253 skb->len, PCI_DMA_TODEVICE);
259 254
260 BUG_ON(!control->tx_rate);
261
262 tx_flags = RTL8180_TX_DESC_FLAG_OWN | RTL8180_TX_DESC_FLAG_FS | 255 tx_flags = RTL8180_TX_DESC_FLAG_OWN | RTL8180_TX_DESC_FLAG_FS |
263 RTL8180_TX_DESC_FLAG_LS | 256 RTL8180_TX_DESC_FLAG_LS |
264 (control->tx_rate->hw_value << 24) | skb->len; 257 (ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
258 skb->len;
265 259
266 if (priv->r8185) 260 if (priv->r8185)
267 tx_flags |= RTL8180_TX_DESC_FLAG_DMA | 261 tx_flags |= RTL8180_TX_DESC_FLAG_DMA |
268 RTL8180_TX_DESC_FLAG_NO_ENC; 262 RTL8180_TX_DESC_FLAG_NO_ENC;
269 263
270 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) { 264 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
271 BUG_ON(!control->rts_cts_rate);
272 tx_flags |= RTL8180_TX_DESC_FLAG_RTS; 265 tx_flags |= RTL8180_TX_DESC_FLAG_RTS;
273 tx_flags |= control->rts_cts_rate->hw_value << 19; 266 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
274 } else if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { 267 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
275 BUG_ON(!control->rts_cts_rate);
276 tx_flags |= RTL8180_TX_DESC_FLAG_CTS; 268 tx_flags |= RTL8180_TX_DESC_FLAG_CTS;
277 tx_flags |= control->rts_cts_rate->hw_value << 19; 269 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
278 } 270 }
279 271
280 *((struct ieee80211_tx_control **) skb->cb) = 272 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
281 kmemdup(control, sizeof(*control), GFP_ATOMIC);
282
283 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS)
284 rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len, 273 rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len,
285 control); 274 info);
286 275
287 if (!priv->r8185) { 276 if (!priv->r8185) {
288 unsigned int remainder; 277 unsigned int remainder;
289 278
290 plcp_len = DIV_ROUND_UP(16 * (skb->len + 4), 279 plcp_len = DIV_ROUND_UP(16 * (skb->len + 4),
291 (control->tx_rate->bitrate * 2) / 10); 280 (ieee80211_get_tx_rate(dev, info)->bitrate * 2) / 10);
292 remainder = (16 * (skb->len + 4)) % 281 remainder = (16 * (skb->len + 4)) %
293 ((control->tx_rate->bitrate * 2) / 10); 282 ((ieee80211_get_tx_rate(dev, info)->bitrate * 2) / 10);
294 if (remainder > 0 && remainder <= 6) 283 if (remainder > 0 && remainder <= 6)
295 plcp_len |= 1 << 15; 284 plcp_len |= 1 << 15;
296 } 285 }
@@ -303,13 +292,13 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
303 entry->plcp_len = cpu_to_le16(plcp_len); 292 entry->plcp_len = cpu_to_le16(plcp_len);
304 entry->tx_buf = cpu_to_le32(mapping); 293 entry->tx_buf = cpu_to_le32(mapping);
305 entry->frame_len = cpu_to_le32(skb->len); 294 entry->frame_len = cpu_to_le32(skb->len);
306 entry->flags2 = control->alt_retry_rate != NULL ? 295 entry->flags2 = info->control.alt_retry_rate_idx >= 0 ?
307 control->alt_retry_rate->bitrate << 4 : 0; 296 ieee80211_get_alt_retry_rate(dev, info)->bitrate << 4 : 0;
308 entry->retry_limit = control->retry_limit; 297 entry->retry_limit = info->control.retry_limit;
309 entry->flags = cpu_to_le32(tx_flags); 298 entry->flags = cpu_to_le32(tx_flags);
310 __skb_queue_tail(&ring->queue, skb); 299 __skb_queue_tail(&ring->queue, skb);
311 if (ring->entries - skb_queue_len(&ring->queue) < 2) 300 if (ring->entries - skb_queue_len(&ring->queue) < 2)
312 ieee80211_stop_queue(dev, control->queue); 301 ieee80211_stop_queue(dev, skb_get_queue_mapping(skb));
313 spin_unlock_irqrestore(&priv->lock, flags); 302 spin_unlock_irqrestore(&priv->lock, flags);
314 303
315 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4))); 304 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
@@ -525,7 +514,6 @@ static void rtl8180_free_tx_ring(struct ieee80211_hw *dev, unsigned int prio)
525 514
526 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf), 515 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
527 skb->len, PCI_DMA_TODEVICE); 516 skb->len, PCI_DMA_TODEVICE);
528 kfree(*((struct ieee80211_tx_control **) skb->cb));
529 kfree_skb(skb); 517 kfree_skb(skb);
530 ring->idx = (ring->idx + 1) % ring->entries; 518 ring->idx = (ring->idx + 1) % ring->entries;
531 } 519 }
@@ -894,9 +882,10 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
894 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 882 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
895 883
896 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 884 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
897 IEEE80211_HW_RX_INCLUDES_FCS; 885 IEEE80211_HW_RX_INCLUDES_FCS |
886 IEEE80211_HW_SIGNAL_UNSPEC;
898 dev->queues = 1; 887 dev->queues = 1;
899 dev->max_rssi = 65; 888 dev->max_signal = 65;
900 889
901 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF); 890 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
902 reg &= RTL818X_TX_CONF_HWVER_MASK; 891 reg &= RTL818X_TX_CONF_HWVER_MASK;
diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl8187.h
index 076d88b6db0e..a0cfb666de0e 100644
--- a/drivers/net/wireless/rtl8187.h
+++ b/drivers/net/wireless/rtl8187.h
@@ -44,12 +44,6 @@ struct rtl8187_rx_hdr {
44 __le64 mac_time; 44 __le64 mac_time;
45} __attribute__((packed)); 45} __attribute__((packed));
46 46
47struct rtl8187_tx_info {
48 struct ieee80211_tx_control *control;
49 struct urb *urb;
50 struct ieee80211_hw *dev;
51};
52
53struct rtl8187_tx_hdr { 47struct rtl8187_tx_hdr {
54 __le32 flags; 48 __le32 flags;
55#define RTL8187_TX_FLAG_NO_ENCRYPT (1 << 15) 49#define RTL8187_TX_FLAG_NO_ENCRYPT (1 << 15)
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index 9223ada5f00e..0078c7e9918c 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -150,27 +150,22 @@ void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
150 150
151static void rtl8187_tx_cb(struct urb *urb) 151static void rtl8187_tx_cb(struct urb *urb)
152{ 152{
153 struct ieee80211_tx_status status;
154 struct sk_buff *skb = (struct sk_buff *)urb->context; 153 struct sk_buff *skb = (struct sk_buff *)urb->context;
155 struct rtl8187_tx_info *info = (struct rtl8187_tx_info *)skb->cb; 154 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
155 struct ieee80211_hw *hw = info->driver_data[0];
156 156
157 memset(&status, 0, sizeof(status)); 157 usb_free_urb(info->driver_data[1]);
158
159 usb_free_urb(info->urb);
160 if (info->control)
161 memcpy(&status.control, info->control, sizeof(status.control));
162 kfree(info->control);
163 skb_pull(skb, sizeof(struct rtl8187_tx_hdr)); 158 skb_pull(skb, sizeof(struct rtl8187_tx_hdr));
164 status.flags |= IEEE80211_TX_STATUS_ACK; 159 memset(&info->status, 0, sizeof(info->status));
165 ieee80211_tx_status_irqsafe(info->dev, skb, &status); 160 info->flags |= IEEE80211_TX_STAT_ACK;
161 ieee80211_tx_status_irqsafe(hw, skb);
166} 162}
167 163
168static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb, 164static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
169 struct ieee80211_tx_control *control)
170{ 165{
171 struct rtl8187_priv *priv = dev->priv; 166 struct rtl8187_priv *priv = dev->priv;
167 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
172 struct rtl8187_tx_hdr *hdr; 168 struct rtl8187_tx_hdr *hdr;
173 struct rtl8187_tx_info *info;
174 struct urb *urb; 169 struct urb *urb;
175 __le16 rts_dur = 0; 170 __le16 rts_dur = 0;
176 u32 flags; 171 u32 flags;
@@ -185,33 +180,27 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
185 flags = skb->len; 180 flags = skb->len;
186 flags |= RTL8187_TX_FLAG_NO_ENCRYPT; 181 flags |= RTL8187_TX_FLAG_NO_ENCRYPT;
187 182
188 BUG_ON(!control->tx_rate); 183 flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24;
189
190 flags |= control->tx_rate->hw_value << 24;
191 if (ieee80211_get_morefrag((struct ieee80211_hdr *)skb->data)) 184 if (ieee80211_get_morefrag((struct ieee80211_hdr *)skb->data))
192 flags |= RTL8187_TX_FLAG_MORE_FRAG; 185 flags |= RTL8187_TX_FLAG_MORE_FRAG;
193 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) { 186 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
194 BUG_ON(!control->rts_cts_rate);
195 flags |= RTL8187_TX_FLAG_RTS; 187 flags |= RTL8187_TX_FLAG_RTS;
196 flags |= control->rts_cts_rate->hw_value << 19; 188 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
197 rts_dur = ieee80211_rts_duration(dev, priv->vif, 189 rts_dur = ieee80211_rts_duration(dev, priv->vif,
198 skb->len, control); 190 skb->len, info);
199 } else if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { 191 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
200 BUG_ON(!control->rts_cts_rate);
201 flags |= RTL8187_TX_FLAG_CTS; 192 flags |= RTL8187_TX_FLAG_CTS;
202 flags |= control->rts_cts_rate->hw_value << 19; 193 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
203 } 194 }
204 195
205 hdr = (struct rtl8187_tx_hdr *)skb_push(skb, sizeof(*hdr)); 196 hdr = (struct rtl8187_tx_hdr *)skb_push(skb, sizeof(*hdr));
206 hdr->flags = cpu_to_le32(flags); 197 hdr->flags = cpu_to_le32(flags);
207 hdr->len = 0; 198 hdr->len = 0;
208 hdr->rts_duration = rts_dur; 199 hdr->rts_duration = rts_dur;
209 hdr->retry = cpu_to_le32(control->retry_limit << 8); 200 hdr->retry = cpu_to_le32(info->control.retry_limit << 8);
210 201
211 info = (struct rtl8187_tx_info *)skb->cb; 202 info->driver_data[0] = dev;
212 info->control = kmemdup(control, sizeof(*control), GFP_ATOMIC); 203 info->driver_data[1] = urb;
213 info->urb = urb;
214 info->dev = dev;
215 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2), 204 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2),
216 hdr, skb->len, rtl8187_tx_cb, skb); 205 hdr, skb->len, rtl8187_tx_cb, skb);
217 rc = usb_submit_urb(urb, GFP_ATOMIC); 206 rc = usb_submit_urb(urb, GFP_ATOMIC);
@@ -271,8 +260,8 @@ static void rtl8187_rx_cb(struct urb *urb)
271 } 260 }
272 261
273 rx_status.antenna = (hdr->signal >> 7) & 1; 262 rx_status.antenna = (hdr->signal >> 7) & 1;
274 rx_status.signal = 64 - min(hdr->noise, (u8)64); 263 rx_status.qual = 64 - min(hdr->noise, (u8)64);
275 rx_status.ssi = signal; 264 rx_status.signal = signal;
276 rx_status.rate_idx = rate; 265 rx_status.rate_idx = rate;
277 rx_status.freq = dev->conf.channel->center_freq; 266 rx_status.freq = dev->conf.channel->center_freq;
278 rx_status.band = dev->conf.channel->band; 267 rx_status.band = dev->conf.channel->band;
@@ -750,11 +739,11 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
750 739
751 priv->mode = IEEE80211_IF_TYPE_MNTR; 740 priv->mode = IEEE80211_IF_TYPE_MNTR;
752 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 741 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
753 IEEE80211_HW_RX_INCLUDES_FCS; 742 IEEE80211_HW_RX_INCLUDES_FCS |
743 IEEE80211_HW_SIGNAL_UNSPEC;
754 dev->extra_tx_headroom = sizeof(struct rtl8187_tx_hdr); 744 dev->extra_tx_headroom = sizeof(struct rtl8187_tx_hdr);
755 dev->queues = 1; 745 dev->queues = 1;
756 dev->max_rssi = 65; 746 dev->max_signal = 65;
757 dev->max_signal = 64;
758 747
759 eeprom.data = dev; 748 eeprom.data = dev;
760 eeprom.register_read = rtl8187_eeprom_register_read; 749 eeprom.register_read = rtl8187_eeprom_register_read;
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 418606ac1c3b..6d86b365f150 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -224,36 +224,6 @@ out:
224 return r; 224 return r;
225} 225}
226 226
227/**
228 * clear_tx_skb_control_block - clears the control block of tx skbuffs
229 * @skb: a &struct sk_buff pointer
230 *
231 * This clears the control block of skbuff buffers, which were transmitted to
232 * the device. Notify that the function is not thread-safe, so prevent
233 * multiple calls.
234 */
235static void clear_tx_skb_control_block(struct sk_buff *skb)
236{
237 struct zd_tx_skb_control_block *cb =
238 (struct zd_tx_skb_control_block *)skb->cb;
239
240 kfree(cb->control);
241 cb->control = NULL;
242}
243
244/**
245 * kfree_tx_skb - frees a tx skbuff
246 * @skb: a &struct sk_buff pointer
247 *
248 * Frees the tx skbuff. Frees also the allocated control structure in the
249 * control block if necessary.
250 */
251static void kfree_tx_skb(struct sk_buff *skb)
252{
253 clear_tx_skb_control_block(skb);
254 dev_kfree_skb_any(skb);
255}
256
257static void zd_op_stop(struct ieee80211_hw *hw) 227static void zd_op_stop(struct ieee80211_hw *hw)
258{ 228{
259 struct zd_mac *mac = zd_hw_mac(hw); 229 struct zd_mac *mac = zd_hw_mac(hw);
@@ -276,40 +246,15 @@ static void zd_op_stop(struct ieee80211_hw *hw)
276 246
277 247
278 while ((skb = skb_dequeue(ack_wait_queue))) 248 while ((skb = skb_dequeue(ack_wait_queue)))
279 kfree_tx_skb(skb); 249 dev_kfree_skb_any(skb);
280}
281
282/**
283 * init_tx_skb_control_block - initializes skb control block
284 * @skb: a &sk_buff pointer
285 * @dev: pointer to the mac80221 device
286 * @control: mac80211 tx control applying for the frame in @skb
287 *
288 * Initializes the control block of the skbuff to be transmitted.
289 */
290static int init_tx_skb_control_block(struct sk_buff *skb,
291 struct ieee80211_hw *hw,
292 struct ieee80211_tx_control *control)
293{
294 struct zd_tx_skb_control_block *cb =
295 (struct zd_tx_skb_control_block *)skb->cb;
296
297 ZD_ASSERT(sizeof(*cb) <= sizeof(skb->cb));
298 memset(cb, 0, sizeof(*cb));
299 cb->hw= hw;
300 cb->control = kmalloc(sizeof(*control), GFP_ATOMIC);
301 if (cb->control == NULL)
302 return -ENOMEM;
303 memcpy(cb->control, control, sizeof(*control));
304
305 return 0;
306} 250}
307 251
308/** 252/**
309 * tx_status - reports tx status of a packet if required 253 * tx_status - reports tx status of a packet if required
310 * @hw - a &struct ieee80211_hw pointer 254 * @hw - a &struct ieee80211_hw pointer
311 * @skb - a sk-buffer 255 * @skb - a sk-buffer
312 * @status - the tx status of the packet without control information 256 * @flags: extra flags to set in the TX status info
257 * @ackssi: ACK signal strength
313 * @success - True for successfull transmission of the frame 258 * @success - True for successfull transmission of the frame
314 * 259 *
315 * This information calls ieee80211_tx_status_irqsafe() if required by the 260 * This information calls ieee80211_tx_status_irqsafe() if required by the
@@ -319,18 +264,17 @@ static int init_tx_skb_control_block(struct sk_buff *skb,
319 * If no status information has been requested, the skb is freed. 264 * If no status information has been requested, the skb is freed.
320 */ 265 */
321static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, 266static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
322 struct ieee80211_tx_status *status, 267 u32 flags, int ackssi, bool success)
323 bool success)
324{ 268{
325 struct zd_tx_skb_control_block *cb = (struct zd_tx_skb_control_block *) 269 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
326 skb->cb; 270
271 memset(&info->status, 0, sizeof(info->status));
327 272
328 ZD_ASSERT(cb->control != NULL);
329 memcpy(&status->control, cb->control, sizeof(status->control));
330 if (!success) 273 if (!success)
331 status->excessive_retries = 1; 274 info->status.excessive_retries = 1;
332 clear_tx_skb_control_block(skb); 275 info->flags |= flags;
333 ieee80211_tx_status_irqsafe(hw, skb, status); 276 info->status.ack_signal = ackssi;
277 ieee80211_tx_status_irqsafe(hw, skb);
334} 278}
335 279
336/** 280/**
@@ -345,15 +289,12 @@ void zd_mac_tx_failed(struct ieee80211_hw *hw)
345{ 289{
346 struct sk_buff_head *q = &zd_hw_mac(hw)->ack_wait_queue; 290 struct sk_buff_head *q = &zd_hw_mac(hw)->ack_wait_queue;
347 struct sk_buff *skb; 291 struct sk_buff *skb;
348 struct ieee80211_tx_status status;
349 292
350 skb = skb_dequeue(q); 293 skb = skb_dequeue(q);
351 if (skb == NULL) 294 if (skb == NULL)
352 return; 295 return;
353 296
354 memset(&status, 0, sizeof(status)); 297 tx_status(hw, skb, 0, 0, 0);
355
356 tx_status(hw, skb, &status, 0);
357} 298}
358 299
359/** 300/**
@@ -368,28 +309,20 @@ void zd_mac_tx_failed(struct ieee80211_hw *hw)
368 */ 309 */
369void zd_mac_tx_to_dev(struct sk_buff *skb, int error) 310void zd_mac_tx_to_dev(struct sk_buff *skb, int error)
370{ 311{
371 struct zd_tx_skb_control_block *cb = 312 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
372 (struct zd_tx_skb_control_block *)skb->cb; 313 struct ieee80211_hw *hw = info->driver_data[0];
373 struct ieee80211_hw *hw = cb->hw;
374
375 if (likely(cb->control)) {
376 skb_pull(skb, sizeof(struct zd_ctrlset));
377 if (unlikely(error ||
378 (cb->control->flags & IEEE80211_TXCTL_NO_ACK)))
379 {
380 struct ieee80211_tx_status status;
381 memset(&status, 0, sizeof(status));
382 tx_status(hw, skb, &status, !error);
383 } else {
384 struct sk_buff_head *q =
385 &zd_hw_mac(hw)->ack_wait_queue;
386 314
387 skb_queue_tail(q, skb); 315 skb_pull(skb, sizeof(struct zd_ctrlset));
388 while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) 316 if (unlikely(error ||
389 zd_mac_tx_failed(hw); 317 (info->flags & IEEE80211_TX_CTL_NO_ACK))) {
390 } 318 tx_status(hw, skb, 0, 0, !error);
391 } else { 319 } else {
392 kfree_tx_skb(skb); 320 struct sk_buff_head *q =
321 &zd_hw_mac(hw)->ack_wait_queue;
322
323 skb_queue_tail(q, skb);
324 while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS)
325 zd_mac_tx_failed(hw);
393 } 326 }
394} 327}
395 328
@@ -454,7 +387,7 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
454 cs->control = 0; 387 cs->control = 0;
455 388
456 /* First fragment */ 389 /* First fragment */
457 if (flags & IEEE80211_TXCTL_FIRST_FRAGMENT) 390 if (flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
458 cs->control |= ZD_CS_NEED_RANDOM_BACKOFF; 391 cs->control |= ZD_CS_NEED_RANDOM_BACKOFF;
459 392
460 /* Multicast */ 393 /* Multicast */
@@ -466,10 +399,10 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
466 (IEEE80211_FTYPE_CTL|IEEE80211_STYPE_PSPOLL)) 399 (IEEE80211_FTYPE_CTL|IEEE80211_STYPE_PSPOLL))
467 cs->control |= ZD_CS_PS_POLL_FRAME; 400 cs->control |= ZD_CS_PS_POLL_FRAME;
468 401
469 if (flags & IEEE80211_TXCTL_USE_RTS_CTS) 402 if (flags & IEEE80211_TX_CTL_USE_RTS_CTS)
470 cs->control |= ZD_CS_RTS; 403 cs->control |= ZD_CS_RTS;
471 404
472 if (flags & IEEE80211_TXCTL_USE_CTS_PROTECT) 405 if (flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
473 cs->control |= ZD_CS_SELF_CTS; 406 cs->control |= ZD_CS_SELF_CTS;
474 407
475 /* FIXME: Management frame? */ 408 /* FIXME: Management frame? */
@@ -516,25 +449,28 @@ void zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon)
516} 449}
517 450
518static int fill_ctrlset(struct zd_mac *mac, 451static int fill_ctrlset(struct zd_mac *mac,
519 struct sk_buff *skb, 452 struct sk_buff *skb)
520 struct ieee80211_tx_control *control)
521{ 453{
522 int r; 454 int r;
523 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 455 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
524 unsigned int frag_len = skb->len + FCS_LEN; 456 unsigned int frag_len = skb->len + FCS_LEN;
525 unsigned int packet_length; 457 unsigned int packet_length;
458 struct ieee80211_rate *txrate;
526 struct zd_ctrlset *cs = (struct zd_ctrlset *) 459 struct zd_ctrlset *cs = (struct zd_ctrlset *)
527 skb_push(skb, sizeof(struct zd_ctrlset)); 460 skb_push(skb, sizeof(struct zd_ctrlset));
461 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
528 462
529 ZD_ASSERT(frag_len <= 0xffff); 463 ZD_ASSERT(frag_len <= 0xffff);
530 464
531 cs->modulation = control->tx_rate->hw_value; 465 txrate = ieee80211_get_tx_rate(mac->hw, info);
532 if (control->flags & IEEE80211_TXCTL_SHORT_PREAMBLE) 466
533 cs->modulation = control->tx_rate->hw_value_short; 467 cs->modulation = txrate->hw_value;
468 if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE)
469 cs->modulation = txrate->hw_value_short;
534 470
535 cs->tx_length = cpu_to_le16(frag_len); 471 cs->tx_length = cpu_to_le16(frag_len);
536 472
537 cs_set_control(mac, cs, hdr, control->flags); 473 cs_set_control(mac, cs, hdr, info->flags);
538 474
539 packet_length = frag_len + sizeof(struct zd_ctrlset) + 10; 475 packet_length = frag_len + sizeof(struct zd_ctrlset) + 10;
540 ZD_ASSERT(packet_length <= 0xffff); 476 ZD_ASSERT(packet_length <= 0xffff);
@@ -579,24 +515,21 @@ static int fill_ctrlset(struct zd_mac *mac,
579 * control block of the skbuff will be initialized. If necessary the incoming 515 * control block of the skbuff will be initialized. If necessary the incoming
580 * mac80211 queues will be stopped. 516 * mac80211 queues will be stopped.
581 */ 517 */
582static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 518static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
583 struct ieee80211_tx_control *control)
584{ 519{
585 struct zd_mac *mac = zd_hw_mac(hw); 520 struct zd_mac *mac = zd_hw_mac(hw);
521 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
586 int r; 522 int r;
587 523
588 r = fill_ctrlset(mac, skb, control); 524 r = fill_ctrlset(mac, skb);
589 if (r) 525 if (r)
590 return r; 526 return r;
591 527
592 r = init_tx_skb_control_block(skb, hw, control); 528 info->driver_data[0] = hw;
593 if (r) 529
594 return r;
595 r = zd_usb_tx(&mac->chip.usb, skb); 530 r = zd_usb_tx(&mac->chip.usb, skb);
596 if (r) { 531 if (r)
597 clear_tx_skb_control_block(skb);
598 return r; 532 return r;
599 }
600 return 0; 533 return 0;
601} 534}
602 535
@@ -634,13 +567,8 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
634 tx_hdr = (struct ieee80211_hdr *)skb->data; 567 tx_hdr = (struct ieee80211_hdr *)skb->data;
635 if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1))) 568 if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1)))
636 { 569 {
637 struct ieee80211_tx_status status;
638
639 memset(&status, 0, sizeof(status));
640 status.flags = IEEE80211_TX_STATUS_ACK;
641 status.ack_signal = stats->ssi;
642 __skb_unlink(skb, q); 570 __skb_unlink(skb, q);
643 tx_status(hw, skb, &status, 1); 571 tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1);
644 goto out; 572 goto out;
645 } 573 }
646 } 574 }
@@ -691,8 +619,8 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
691 619
692 stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq; 620 stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
693 stats.band = IEEE80211_BAND_2GHZ; 621 stats.band = IEEE80211_BAND_2GHZ;
694 stats.ssi = status->signal_strength; 622 stats.signal = status->signal_strength;
695 stats.signal = zd_rx_qual_percent(buffer, 623 stats.qual = zd_rx_qual_percent(buffer,
696 length - sizeof(struct rx_status), 624 length - sizeof(struct rx_status),
697 status); 625 status);
698 626
@@ -751,6 +679,7 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
751 case IEEE80211_IF_TYPE_MNTR: 679 case IEEE80211_IF_TYPE_MNTR:
752 case IEEE80211_IF_TYPE_MESH_POINT: 680 case IEEE80211_IF_TYPE_MESH_POINT:
753 case IEEE80211_IF_TYPE_STA: 681 case IEEE80211_IF_TYPE_STA:
682 case IEEE80211_IF_TYPE_IBSS:
754 mac->type = conf->type; 683 mac->type = conf->type;
755 break; 684 break;
756 default: 685 default:
@@ -781,7 +710,8 @@ static int zd_op_config_interface(struct ieee80211_hw *hw,
781 struct zd_mac *mac = zd_hw_mac(hw); 710 struct zd_mac *mac = zd_hw_mac(hw);
782 int associated; 711 int associated;
783 712
784 if (mac->type == IEEE80211_IF_TYPE_MESH_POINT) { 713 if (mac->type == IEEE80211_IF_TYPE_MESH_POINT ||
714 mac->type == IEEE80211_IF_TYPE_IBSS) {
785 associated = true; 715 associated = true;
786 if (conf->beacon) { 716 if (conf->beacon) {
787 zd_mac_config_beacon(hw, conf->beacon); 717 zd_mac_config_beacon(hw, conf->beacon);
@@ -941,6 +871,17 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
941 } 871 }
942} 872}
943 873
874static int zd_op_beacon_update(struct ieee80211_hw *hw,
875 struct sk_buff *skb)
876{
877 struct zd_mac *mac = zd_hw_mac(hw);
878 zd_mac_config_beacon(hw, skb);
879 kfree_skb(skb);
880 zd_set_beacon_interval(&mac->chip, BCN_MODE_IBSS |
881 hw->conf.beacon_int);
882 return 0;
883}
884
944static const struct ieee80211_ops zd_ops = { 885static const struct ieee80211_ops zd_ops = {
945 .tx = zd_op_tx, 886 .tx = zd_op_tx,
946 .start = zd_op_start, 887 .start = zd_op_start,
@@ -951,6 +892,7 @@ static const struct ieee80211_ops zd_ops = {
951 .config_interface = zd_op_config_interface, 892 .config_interface = zd_op_config_interface,
952 .configure_filter = zd_op_configure_filter, 893 .configure_filter = zd_op_configure_filter,
953 .bss_info_changed = zd_op_bss_info_changed, 894 .bss_info_changed = zd_op_bss_info_changed,
895 .beacon_update = zd_op_beacon_update,
954}; 896};
955 897
956struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf) 898struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
@@ -982,10 +924,10 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
982 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band; 924 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
983 925
984 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 926 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
985 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE; 927 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
986 hw->max_rssi = 100; 928 IEEE80211_HW_SIGNAL_DB;
987 hw->max_signal = 100;
988 929
930 hw->max_signal = 100;
989 hw->queues = 1; 931 hw->queues = 1;
990 hw->extra_tx_headroom = sizeof(struct zd_ctrlset); 932 hw->extra_tx_headroom = sizeof(struct zd_ctrlset);
991 933
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 71170244d2c9..18c1d56d3dd7 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -149,22 +149,6 @@ struct housekeeping {
149 struct delayed_work link_led_work; 149 struct delayed_work link_led_work;
150}; 150};
151 151
152/**
153 * struct zd_tx_skb_control_block - control block for tx skbuffs
154 * @control: &struct ieee80211_tx_control pointer
155 * @context: context pointer
156 *
157 * This structure is used to fill the cb field in an &sk_buff to transmit.
158 * The control field is NULL, if there is no requirement from the mac80211
159 * stack to report about the packet ACK. This is the case if the flag
160 * IEEE80211_TXCTL_NO_ACK is not set in &struct ieee80211_tx_control.
161 */
162struct zd_tx_skb_control_block {
163 struct ieee80211_tx_control *control;
164 struct ieee80211_hw *hw;
165 void *context;
166};
167
168#define ZD_MAC_STATS_BUFFER_SIZE 16 152#define ZD_MAC_STATS_BUFFER_SIZE 16
169 153
170#define ZD_MAC_MAX_ACK_WAITERS 10 154#define ZD_MAC_MAX_ACK_WAITERS 10
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 8941f5eb96c2..1ccff240bf97 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -169,10 +169,11 @@ static int upload_code(struct usb_device *udev,
169 if (flags & REBOOT) { 169 if (flags & REBOOT) {
170 u8 ret; 170 u8 ret;
171 171
172 /* Use "DMA-aware" buffer. */
172 r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 173 r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
173 USB_REQ_FIRMWARE_CONFIRM, 174 USB_REQ_FIRMWARE_CONFIRM,
174 USB_DIR_IN | USB_TYPE_VENDOR, 175 USB_DIR_IN | USB_TYPE_VENDOR,
175 0, 0, &ret, sizeof(ret), 5000 /* ms */); 176 0, 0, p, sizeof(ret), 5000 /* ms */);
176 if (r != sizeof(ret)) { 177 if (r != sizeof(ret)) {
177 dev_err(&udev->dev, 178 dev_err(&udev->dev,
178 "control request firmeware confirmation failed." 179 "control request firmeware confirmation failed."
@@ -181,6 +182,7 @@ static int upload_code(struct usb_device *udev,
181 r = -ENODEV; 182 r = -ENODEV;
182 goto error; 183 goto error;
183 } 184 }
185 ret = p[0];
184 if (ret & 0x80) { 186 if (ret & 0x80) {
185 dev_err(&udev->dev, 187 dev_err(&udev->dev,
186 "Internal error while downloading." 188 "Internal error while downloading."
@@ -312,22 +314,31 @@ int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len)
312{ 314{
313 int r; 315 int r;
314 struct usb_device *udev = zd_usb_to_usbdev(usb); 316 struct usb_device *udev = zd_usb_to_usbdev(usb);
317 u8 *buf;
315 318
319 /* Use "DMA-aware" buffer. */
320 buf = kmalloc(len, GFP_KERNEL);
321 if (!buf)
322 return -ENOMEM;
316 r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 323 r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
317 USB_REQ_FIRMWARE_READ_DATA, USB_DIR_IN | 0x40, addr, 0, 324 USB_REQ_FIRMWARE_READ_DATA, USB_DIR_IN | 0x40, addr, 0,
318 data, len, 5000); 325 buf, len, 5000);
319 if (r < 0) { 326 if (r < 0) {
320 dev_err(&udev->dev, 327 dev_err(&udev->dev,
321 "read over firmware interface failed: %d\n", r); 328 "read over firmware interface failed: %d\n", r);
322 return r; 329 goto exit;
323 } else if (r != len) { 330 } else if (r != len) {
324 dev_err(&udev->dev, 331 dev_err(&udev->dev,
325 "incomplete read over firmware interface: %d/%d\n", 332 "incomplete read over firmware interface: %d/%d\n",
326 r, len); 333 r, len);
327 return -EIO; 334 r = -EIO;
335 goto exit;
328 } 336 }
329 337 r = 0;
330 return 0; 338 memcpy(data, buf, len);
339exit:
340 kfree(buf);
341 return r;
331} 342}
332 343
333#define urb_dev(urb) (&(urb)->dev->dev) 344#define urb_dev(urb) (&(urb)->dev->dev)
@@ -869,7 +880,7 @@ static void tx_urb_complete(struct urb *urb)
869{ 880{
870 int r; 881 int r;
871 struct sk_buff *skb; 882 struct sk_buff *skb;
872 struct zd_tx_skb_control_block *cb; 883 struct ieee80211_tx_info *info;
873 struct zd_usb *usb; 884 struct zd_usb *usb;
874 885
875 switch (urb->status) { 886 switch (urb->status) {
@@ -893,8 +904,8 @@ free_urb:
893 * grab 'usb' pointer before handing off the skb (since 904 * grab 'usb' pointer before handing off the skb (since
894 * it might be freed by zd_mac_tx_to_dev or mac80211) 905 * it might be freed by zd_mac_tx_to_dev or mac80211)
895 */ 906 */
896 cb = (struct zd_tx_skb_control_block *)skb->cb; 907 info = IEEE80211_SKB_CB(skb);
897 usb = &zd_hw_mac(cb->hw)->chip.usb; 908 usb = &zd_hw_mac(info->driver_data[0])->chip.usb;
898 zd_mac_tx_to_dev(skb, urb->status); 909 zd_mac_tx_to_dev(skb, urb->status);
899 free_tx_urb(usb, urb); 910 free_tx_urb(usb, urb);
900 tx_dec_submitted_urbs(usb); 911 tx_dec_submitted_urbs(usb);
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 57c4ccfab1ee..f883dcfffe06 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -510,17 +510,15 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
510 sprom_do_read(bus, buf); 510 sprom_do_read(bus, buf);
511 err = sprom_check_crc(buf, bus->sprom_size); 511 err = sprom_check_crc(buf, bus->sprom_size);
512 if (err) { 512 if (err) {
513 /* check for rev 4 sprom - has special signature */ 513 /* try for a 440 byte SPROM - revision 4 and higher */
514 if (buf[32] == 0x5372) { 514 kfree(buf);
515 kfree(buf); 515 buf = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16),
516 buf = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16), 516 GFP_KERNEL);
517 GFP_KERNEL); 517 if (!buf)
518 if (!buf) 518 goto out;
519 goto out; 519 bus->sprom_size = SSB_SPROMSIZE_WORDS_R4;
520 bus->sprom_size = SSB_SPROMSIZE_WORDS_R4; 520 sprom_do_read(bus, buf);
521 sprom_do_read(bus, buf); 521 err = sprom_check_crc(buf, bus->sprom_size);
522 err = sprom_check_crc(buf, bus->sprom_size);
523 }
524 if (err) 522 if (err)
525 ssb_printk(KERN_WARNING PFX "WARNING: Invalid" 523 ssb_printk(KERN_WARNING PFX "WARNING: Invalid"
526 " SPROM CRC (corrupt SPROM)\n"); 524 " SPROM CRC (corrupt SPROM)\n");
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
new file mode 100644
index 000000000000..9b64b6d67873
--- /dev/null
+++ b/include/linux/brcmphy.h
@@ -0,0 +1,6 @@
1#define PHY_BRCM_WIRESPEED_ENABLE 0x00000001
2#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000002
3#define PHY_BRCM_APD_CLK125_ENABLE 0x00000004
4#define PHY_BRCM_STD_IBND_DISABLE 0x00000008
5#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00000010
6#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00000020
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 0b5e03eae6d2..9300f37cd7e8 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -306,20 +306,32 @@ struct ieee80211_ht_addt_info {
306#define IEEE80211_HT_CAP_SGI_40 0x0040 306#define IEEE80211_HT_CAP_SGI_40 0x0040
307#define IEEE80211_HT_CAP_DELAY_BA 0x0400 307#define IEEE80211_HT_CAP_DELAY_BA 0x0400
308#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 308#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
309/* 802.11n HT capability AMPDU settings */
309#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03 310#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03
310#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C 311#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C
312/* 802.11n HT capability MSC set */
313#define IEEE80211_SUPP_MCS_SET_UEQM 4
314#define IEEE80211_HT_CAP_MAX_STREAMS 4
315#define IEEE80211_SUPP_MCS_SET_LEN 10
316/* maximum streams the spec allows */
317#define IEEE80211_HT_CAP_MCS_TX_DEFINED 0x01
318#define IEEE80211_HT_CAP_MCS_TX_RX_DIFF 0x02
319#define IEEE80211_HT_CAP_MCS_TX_STREAMS 0x0C
320#define IEEE80211_HT_CAP_MCS_TX_UEQM 0x10
311/* 802.11n HT IE masks */ 321/* 802.11n HT IE masks */
312#define IEEE80211_HT_IE_CHA_SEC_OFFSET 0x03 322#define IEEE80211_HT_IE_CHA_SEC_OFFSET 0x03
323#define IEEE80211_HT_IE_CHA_SEC_ABOVE 0x01
324#define IEEE80211_HT_IE_CHA_SEC_BELOW 0x03
313#define IEEE80211_HT_IE_CHA_WIDTH 0x04 325#define IEEE80211_HT_IE_CHA_WIDTH 0x04
314#define IEEE80211_HT_IE_HT_PROTECTION 0x0003 326#define IEEE80211_HT_IE_HT_PROTECTION 0x0003
315#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 327#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
316#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 328#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
317 329
318/* MIMO Power Save Modes */ 330/* MIMO Power Save Modes */
319#define WLAN_HT_CAP_MIMO_PS_STATIC 0 331#define WLAN_HT_CAP_MIMO_PS_STATIC 0
320#define WLAN_HT_CAP_MIMO_PS_DYNAMIC 1 332#define WLAN_HT_CAP_MIMO_PS_DYNAMIC 1
321#define WLAN_HT_CAP_MIMO_PS_INVALID 2 333#define WLAN_HT_CAP_MIMO_PS_INVALID 2
322#define WLAN_HT_CAP_MIMO_PS_DISABLED 3 334#define WLAN_HT_CAP_MIMO_PS_DISABLED 3
323 335
324/* Authentication algorithms */ 336/* Authentication algorithms */
325#define WLAN_AUTH_OPEN 0 337#define WLAN_AUTH_OPEN 0
@@ -552,16 +564,17 @@ enum ieee80211_back_parties {
552 */ 564 */
553static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr) 565static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr)
554{ 566{
555 u8 *raw = (u8 *) hdr; 567 __le16 fc = hdr->frame_control;
556 u8 tofrom = (*(raw+1)) & 3; /* get the TODS and FROMDS bits */ 568 fc &= cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS);
557 569
558 switch (tofrom) { 570 switch (fc) {
559 case 2: 571 case __constant_cpu_to_le16(IEEE80211_FCTL_FROMDS):
560 return hdr->addr3; 572 return hdr->addr3;
561 case 3: 573 case __constant_cpu_to_le16(IEEE80211_FCTL_TODS|IEEE80211_FCTL_FROMDS):
562 return hdr->addr4; 574 return hdr->addr4;
575 default:
576 return hdr->addr2;
563 } 577 }
564 return hdr->addr2;
565} 578}
566 579
567/** 580/**
@@ -577,12 +590,13 @@ static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr)
577 */ 590 */
578static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr) 591static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
579{ 592{
580 u8 *raw = (u8 *) hdr; 593 __le16 fc = hdr->frame_control;
581 u8 to_ds = (*(raw+1)) & 1; /* get the TODS bit */ 594 fc &= cpu_to_le16(IEEE80211_FCTL_TODS);
582 595
583 if (to_ds) 596 if (fc)
584 return hdr->addr3; 597 return hdr->addr3;
585 return hdr->addr1; 598 else
599 return hdr->addr1;
586} 600}
587 601
588/** 602/**
@@ -595,8 +609,8 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
595 */ 609 */
596static inline int ieee80211_get_morefrag(struct ieee80211_hdr *hdr) 610static inline int ieee80211_get_morefrag(struct ieee80211_hdr *hdr)
597{ 611{
598 return (le16_to_cpu(hdr->frame_control) & 612 __le16 fc = hdr->frame_control;
599 IEEE80211_FCTL_MOREFRAGS) != 0; 613 return !!(fc & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS));
600} 614}
601 615
602#endif /* IEEE80211_H */ 616#endif /* IEEE80211_H */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 950e13d09e06..6badb3e2c4e4 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -4,8 +4,6 @@
4 * Authors: 4 * Authors:
5 * Lennert Buytenhek <buytenh@gnu.org> 5 * Lennert Buytenhek <buytenh@gnu.org>
6 * 6 *
7 * $Id: if_bridge.h,v 1.1 2000/02/18 16:47:01 davem Exp $
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
diff --git a/include/linux/if_ppp.h b/include/linux/if_ppp.h
index 0f2f70d4e48c..c3b1f8562709 100644
--- a/include/linux/if_ppp.h
+++ b/include/linux/if_ppp.h
@@ -1,5 +1,3 @@
1/* $Id: if_ppp.h,v 1.21 2000/03/27 06:03:36 paulus Exp $ */
2
3/* 1/*
4 * if_ppp.h - Point-to-Point Protocol definitions. 2 * if_ppp.h - Point-to-Point Protocol definitions.
5 * 3 *
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 8c71fe2fb1f5..18f31b6187a3 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -11,8 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * $Id: if_tun.h,v 1.2 2001/06/01 18:39:47 davem Exp $
16 */ 14 */
17 15
18#ifndef __IF_TUN_H 16#ifndef __IF_TUN_H
diff --git a/include/linux/ip6_tunnel.h b/include/linux/ip6_tunnel.h
index af3f4a70f3df..1e7cc4af40de 100644
--- a/include/linux/ip6_tunnel.h
+++ b/include/linux/ip6_tunnel.h
@@ -1,7 +1,3 @@
1/*
2 * $Id$
3 */
4
5#ifndef _IP6_TUNNEL_H 1#ifndef _IP6_TUNNEL_H
6#define _IP6_TUNNEL_H 2#define _IP6_TUNNEL_H
7 3
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index 0a383ac083cb..759bc043dc65 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -81,6 +81,7 @@ enum ctattr_protoinfo {
81 CTA_PROTOINFO_UNSPEC, 81 CTA_PROTOINFO_UNSPEC,
82 CTA_PROTOINFO_TCP, 82 CTA_PROTOINFO_TCP,
83 CTA_PROTOINFO_DCCP, 83 CTA_PROTOINFO_DCCP,
84 CTA_PROTOINFO_SCTP,
84 __CTA_PROTOINFO_MAX 85 __CTA_PROTOINFO_MAX
85}; 86};
86#define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1) 87#define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1)
@@ -103,6 +104,15 @@ enum ctattr_protoinfo_dccp {
103}; 104};
104#define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1) 105#define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1)
105 106
107enum ctattr_protoinfo_sctp {
108 CTA_PROTOINFO_SCTP_UNSPEC,
109 CTA_PROTOINFO_SCTP_STATE,
110 CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
111 CTA_PROTOINFO_SCTP_VTAG_REPLY,
112 __CTA_PROTOINFO_SCTP_MAX
113};
114#define CTA_PROTOINFO_SCTP_MAX (__CTA_PROTOINFO_SCTP_MAX - 1)
115
106enum ctattr_counters { 116enum ctattr_counters {
107 CTA_COUNTERS_UNSPEC, 117 CTA_COUNTERS_UNSPEC,
108 CTA_COUNTERS_PACKETS, /* old 64bit counters */ 118 CTA_COUNTERS_PACKETS, /* old 64bit counters */
diff --git a/include/linux/netfilter_bridge/ebt_ip6.h b/include/linux/netfilter_bridge/ebt_ip6.h
new file mode 100644
index 000000000000..2273c3ae33ca
--- /dev/null
+++ b/include/linux/netfilter_bridge/ebt_ip6.h
@@ -0,0 +1,40 @@
1/*
2 * ebt_ip6
3 *
4 * Authors:
5 * Kuo-Lang Tseng <kuo-lang.tseng@intel.com>
6 * Manohar Castelino <manohar.r.castelino@intel.com>
7 *
8 * Jan 11, 2008
9 *
10 */
11
12#ifndef __LINUX_BRIDGE_EBT_IP6_H
13#define __LINUX_BRIDGE_EBT_IP6_H
14
15#define EBT_IP6_SOURCE 0x01
16#define EBT_IP6_DEST 0x02
17#define EBT_IP6_TCLASS 0x04
18#define EBT_IP6_PROTO 0x08
19#define EBT_IP6_SPORT 0x10
20#define EBT_IP6_DPORT 0x20
21#define EBT_IP6_MASK (EBT_IP6_SOURCE | EBT_IP6_DEST | EBT_IP6_TCLASS |\
22 EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT)
23#define EBT_IP6_MATCH "ip6"
24
25/* the same values are used for the invflags */
26struct ebt_ip6_info
27{
28 struct in6_addr saddr;
29 struct in6_addr daddr;
30 struct in6_addr smsk;
31 struct in6_addr dmsk;
32 uint8_t tclass;
33 uint8_t protocol;
34 uint8_t bitmask;
35 uint8_t invflags;
36 uint16_t sport[2];
37 uint16_t dport[2];
38};
39
40#endif
diff --git a/include/linux/netfilter_bridge/ebt_log.h b/include/linux/netfilter_bridge/ebt_log.h
index 96e231ae7554..b76e653157e5 100644
--- a/include/linux/netfilter_bridge/ebt_log.h
+++ b/include/linux/netfilter_bridge/ebt_log.h
@@ -4,7 +4,8 @@
4#define EBT_LOG_IP 0x01 /* if the frame is made by ip, log the ip information */ 4#define EBT_LOG_IP 0x01 /* if the frame is made by ip, log the ip information */
5#define EBT_LOG_ARP 0x02 5#define EBT_LOG_ARP 0x02
6#define EBT_LOG_NFLOG 0x04 6#define EBT_LOG_NFLOG 0x04
7#define EBT_LOG_MASK (EBT_LOG_IP | EBT_LOG_ARP) 7#define EBT_LOG_IP6 0x08
8#define EBT_LOG_MASK (EBT_LOG_IP | EBT_LOG_ARP | EBT_LOG_IP6)
8#define EBT_LOG_PREFIX_SIZE 30 9#define EBT_LOG_PREFIX_SIZE 30
9#define EBT_LOG_WATCHER "log" 10#define EBT_LOG_WATCHER "log"
10 11
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 650318b0c405..29c7727ff0e8 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -60,6 +60,7 @@ enum nf_ip_hook_priorities {
60 NF_IP_PRI_MANGLE = -150, 60 NF_IP_PRI_MANGLE = -150,
61 NF_IP_PRI_NAT_DST = -100, 61 NF_IP_PRI_NAT_DST = -100,
62 NF_IP_PRI_FILTER = 0, 62 NF_IP_PRI_FILTER = 0,
63 NF_IP_PRI_SECURITY = 50,
63 NF_IP_PRI_NAT_SRC = 100, 64 NF_IP_PRI_NAT_SRC = 100,
64 NF_IP_PRI_SELINUX_LAST = 225, 65 NF_IP_PRI_SELINUX_LAST = 225,
65 NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX, 66 NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX,
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 3475a65dae9b..fd50988b83ec 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -64,6 +64,7 @@ enum nf_ip6_hook_priorities {
64 NF_IP6_PRI_MANGLE = -150, 64 NF_IP6_PRI_MANGLE = -150,
65 NF_IP6_PRI_NAT_DST = -100, 65 NF_IP6_PRI_NAT_DST = -100,
66 NF_IP6_PRI_FILTER = 0, 66 NF_IP6_PRI_FILTER = 0,
67 NF_IP6_PRI_SECURITY = 50,
67 NF_IP6_PRI_NAT_SRC = 100, 68 NF_IP6_PRI_NAT_SRC = 100,
68 NF_IP6_PRI_SELINUX_LAST = 225, 69 NF_IP6_PRI_SELINUX_LAST = 225,
69 NF_IP6_PRI_LAST = INT_MAX, 70 NF_IP6_PRI_LAST = INT_MAX,
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index bec1062a25a1..9ff1b54908f3 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -193,7 +193,7 @@ extern int netlink_unregister_notifier(struct notifier_block *nb);
193 193
194/* finegrained unicast helpers: */ 194/* finegrained unicast helpers: */
195struct sock *netlink_getsockbyfilp(struct file *filp); 195struct sock *netlink_getsockbyfilp(struct file *filp);
196int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, 196int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
197 long *timeo, struct sock *ssk); 197 long *timeo, struct sock *ssk);
198void netlink_detachskb(struct sock *sk, struct sk_buff *skb); 198void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
199int netlink_sendskb(struct sock *sk, struct sk_buff *skb); 199int netlink_sendskb(struct sock *sk, struct sk_buff *skb);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index eafc9d6d2b35..caa000596b25 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1981,6 +1981,7 @@
1981#define PCI_DEVICE_ID_TIGON3_5787M 0x1693 1981#define PCI_DEVICE_ID_TIGON3_5787M 0x1693
1982#define PCI_DEVICE_ID_TIGON3_5782 0x1696 1982#define PCI_DEVICE_ID_TIGON3_5782 0x1696
1983#define PCI_DEVICE_ID_TIGON3_5784 0x1698 1983#define PCI_DEVICE_ID_TIGON3_5784 0x1698
1984#define PCI_DEVICE_ID_TIGON3_5785 0x1699
1984#define PCI_DEVICE_ID_TIGON3_5786 0x169a 1985#define PCI_DEVICE_ID_TIGON3_5786 0x169a
1985#define PCI_DEVICE_ID_TIGON3_5787 0x169b 1986#define PCI_DEVICE_ID_TIGON3_5787 0x169b
1986#define PCI_DEVICE_ID_TIGON3_5788 0x169c 1987#define PCI_DEVICE_ID_TIGON3_5788 0x169c
diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
index e86a7a5cf355..b8d4ddd22736 100644
--- a/include/linux/ppp-comp.h
+++ b/include/linux/ppp-comp.h
@@ -23,8 +23,6 @@
23 * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO 23 * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
24 * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, 24 * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
25 * OR MODIFICATIONS. 25 * OR MODIFICATIONS.
26 *
27 * $Id: ppp-comp.h,v 1.6 1997/11/27 06:04:44 paulus Exp $
28 */ 26 */
29 27
30/* 28/*
diff --git a/include/linux/ppp_defs.h b/include/linux/ppp_defs.h
index c6b13ff85028..6e8adc77522c 100644
--- a/include/linux/ppp_defs.h
+++ b/include/linux/ppp_defs.h
@@ -1,5 +1,3 @@
1/* $Id: ppp_defs.h,v 1.2 1994/09/21 01:31:06 paulus Exp $ */
2
3/* 1/*
4 * ppp_defs.h - PPP definitions. 2 * ppp_defs.h - PPP definitions.
5 * 3 *
diff --git a/include/linux/smc911x.h b/include/linux/smc911x.h
new file mode 100644
index 000000000000..b58f54c24183
--- /dev/null
+++ b/include/linux/smc911x.h
@@ -0,0 +1,12 @@
1#ifndef __SMC911X_H__
2#define __SMC911X_H__
3
4#define SMC911X_USE_16BIT (1 << 0)
5#define SMC911X_USE_32BIT (1 << 1)
6
7struct smc911x_platdata {
8 unsigned long flags;
9 unsigned long irq_flags; /* IRQF_... */
10};
11
12#endif /* __SMC911X_H__ */
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h
index fec6899bf355..d48d4e605f74 100644
--- a/include/linux/sunrpc/auth_gss.h
+++ b/include/linux/sunrpc/auth_gss.h
@@ -7,8 +7,6 @@
7 * Andy Adamson <andros@umich.edu> 7 * Andy Adamson <andros@umich.edu>
8 * Bruce Fields <bfields@umich.edu> 8 * Bruce Fields <bfields@umich.edu>
9 * Copyright (c) 2000 The Regents of the University of Michigan 9 * Copyright (c) 2000 The Regents of the University of Michigan
10 *
11 * $Id$
12 */ 10 */
13 11
14#ifndef _LINUX_SUNRPC_AUTH_GSS_H 12#ifndef _LINUX_SUNRPC_AUTH_GSS_H
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index 459c5fc11d51..03f33330ece2 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -7,8 +7,6 @@
7 * Andy Adamson <andros@umich.edu> 7 * Andy Adamson <andros@umich.edu>
8 * Bruce Fields <bfields@umich.edu> 8 * Bruce Fields <bfields@umich.edu>
9 * Copyright (c) 2000 The Regents of the University of Michigan 9 * Copyright (c) 2000 The Regents of the University of Michigan
10 *
11 * $Id$
12 */ 10 */
13 11
14#ifndef _LINUX_SUNRPC_GSS_API_H 12#ifndef _LINUX_SUNRPC_GSS_API_H
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
index 417a1def56db..c9165d9771a8 100644
--- a/include/linux/sunrpc/svcauth_gss.h
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -3,9 +3,6 @@
3 * 3 *
4 * Bruce Fields <bfields@umich.edu> 4 * Bruce Fields <bfields@umich.edu>
5 * Copyright (c) 2002 The Regents of the Unviersity of Michigan 5 * Copyright (c) 2002 The Regents of the Unviersity of Michigan
6 *
7 * $Id$
8 *
9 */ 6 */
10 7
11#ifndef _LINUX_SUNRPC_SVCAUTH_GSS_H 8#ifndef _LINUX_SUNRPC_SVCAUTH_GSS_H
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index b31b6b74aa28..07e79bdb9cdf 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -291,10 +291,9 @@ struct tcp_sock {
291 u32 rcv_ssthresh; /* Current window clamp */ 291 u32 rcv_ssthresh; /* Current window clamp */
292 292
293 u32 frto_highmark; /* snd_nxt when RTO occurred */ 293 u32 frto_highmark; /* snd_nxt when RTO occurred */
294 u8 reordering; /* Packet reordering metric. */ 294 u16 advmss; /* Advertised MSS */
295 u8 frto_counter; /* Number of new acks after RTO */ 295 u8 frto_counter; /* Number of new acks after RTO */
296 u8 nonagle; /* Disable Nagle algorithm? */ 296 u8 nonagle; /* Disable Nagle algorithm? */
297 u8 keepalive_probes; /* num of allowed keep alive probes */
298 297
299/* RTT measurement */ 298/* RTT measurement */
300 u32 srtt; /* smoothed round trip time << 3 */ 299 u32 srtt; /* smoothed round trip time << 3 */
@@ -305,6 +304,10 @@ struct tcp_sock {
305 304
306 u32 packets_out; /* Packets which are "in flight" */ 305 u32 packets_out; /* Packets which are "in flight" */
307 u32 retrans_out; /* Retransmitted packets out */ 306 u32 retrans_out; /* Retransmitted packets out */
307
308 u16 urg_data; /* Saved octet of OOB data and control flags */
309 u8 urg_mode; /* In urgent mode */
310 u8 ecn_flags; /* ECN status bits. */
308/* 311/*
309 * Options received (usually on last packet, some only on SYN packets). 312 * Options received (usually on last packet, some only on SYN packets).
310 */ 313 */
@@ -320,13 +323,24 @@ struct tcp_sock {
320 u32 snd_cwnd_used; 323 u32 snd_cwnd_used;
321 u32 snd_cwnd_stamp; 324 u32 snd_cwnd_stamp;
322 325
323 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
324
325 u32 rcv_wnd; /* Current receiver window */ 326 u32 rcv_wnd; /* Current receiver window */
326 u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ 327 u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
327 u32 pushed_seq; /* Last pushed seq, required to talk to windows */ 328 u32 pushed_seq; /* Last pushed seq, required to talk to windows */
329 u32 lost_out; /* Lost packets */
330 u32 sacked_out; /* SACK'd packets */
331 u32 fackets_out; /* FACK'd packets */
332 u32 tso_deferred;
333 u32 bytes_acked; /* Appropriate Byte Counting - RFC3465 */
328 334
329/* SACKs data */ 335 /* from STCP, retrans queue hinting */
336 struct sk_buff* lost_skb_hint;
337 struct sk_buff *scoreboard_skb_hint;
338 struct sk_buff *retransmit_skb_hint;
339 struct sk_buff *forward_skb_hint;
340
341 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
342
343 /* SACKs data, these 2 need to be together (see tcp_build_and_update_options) */
330 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ 344 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
331 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ 345 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
332 346
@@ -337,23 +351,14 @@ struct tcp_sock {
337 * sacked_out > 0) 351 * sacked_out > 0)
338 */ 352 */
339 353
340 /* from STCP, retrans queue hinting */
341 struct sk_buff* lost_skb_hint;
342
343 struct sk_buff *scoreboard_skb_hint;
344 struct sk_buff *retransmit_skb_hint;
345 struct sk_buff *forward_skb_hint;
346
347 int lost_cnt_hint; 354 int lost_cnt_hint;
348 int retransmit_cnt_hint; 355 int retransmit_cnt_hint;
349 356
350 u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ 357 u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */
351 358
352 u16 advmss; /* Advertised MSS */ 359 u8 reordering; /* Packet reordering metric. */
360 u8 keepalive_probes; /* num of allowed keep alive probes */
353 u32 prior_ssthresh; /* ssthresh saved at recovery start */ 361 u32 prior_ssthresh; /* ssthresh saved at recovery start */
354 u32 lost_out; /* Lost packets */
355 u32 sacked_out; /* SACK'd packets */
356 u32 fackets_out; /* FACK'd packets */
357 u32 high_seq; /* snd_nxt at onset of congestion */ 362 u32 high_seq; /* snd_nxt at onset of congestion */
358 363
359 u32 retrans_stamp; /* Timestamp of the last retransmit, 364 u32 retrans_stamp; /* Timestamp of the last retransmit,
@@ -361,23 +366,16 @@ struct tcp_sock {
361 * the first SYN. */ 366 * the first SYN. */
362 u32 undo_marker; /* tracking retrans started here. */ 367 u32 undo_marker; /* tracking retrans started here. */
363 int undo_retrans; /* number of undoable retransmissions. */ 368 int undo_retrans; /* number of undoable retransmissions. */
369 u32 total_retrans; /* Total retransmits for entire connection */
370
364 u32 urg_seq; /* Seq of received urgent pointer */ 371 u32 urg_seq; /* Seq of received urgent pointer */
365 u16 urg_data; /* Saved octet of OOB data and control flags */
366 u8 urg_mode; /* In urgent mode */
367 u8 ecn_flags; /* ECN status bits. */
368 u32 snd_up; /* Urgent pointer */ 372 u32 snd_up; /* Urgent pointer */
369 373
370 u32 total_retrans; /* Total retransmits for entire connection */
371 u32 bytes_acked; /* Appropriate Byte Counting - RFC3465 */
372
373 unsigned int keepalive_time; /* time before keep alive takes place */ 374 unsigned int keepalive_time; /* time before keep alive takes place */
374 unsigned int keepalive_intvl; /* time interval between keep alive probes */ 375 unsigned int keepalive_intvl; /* time interval between keep alive probes */
375 int linger2;
376 376
377 unsigned long last_synq_overflow; 377 unsigned long last_synq_overflow;
378 378
379 u32 tso_deferred;
380
381/* Receiver side RTT estimation */ 379/* Receiver side RTT estimation */
382 struct { 380 struct {
383 u32 rtt; 381 u32 rtt;
@@ -405,6 +403,8 @@ struct tcp_sock {
405/* TCP MD5 Signagure Option information */ 403/* TCP MD5 Signagure Option information */
406 struct tcp_md5sig_info *md5sig_info; 404 struct tcp_md5sig_info *md5sig_info;
407#endif 405#endif
406
407 int linger2;
408}; 408};
409 409
410static inline struct tcp_sock *tcp_sk(const struct sock *sk) 410static inline struct tcp_sock *tcp_sk(const struct sock *sk)
diff --git a/include/linux/tipc_config.h b/include/linux/tipc_config.h
index b0c916d1f375..2bc6fa4adeb5 100644
--- a/include/linux/tipc_config.h
+++ b/include/linux/tipc_config.h
@@ -2,7 +2,7 @@
2 * include/linux/tipc_config.h: Include file for TIPC configuration interface 2 * include/linux/tipc_config.h: Include file for TIPC configuration interface
3 * 3 *
4 * Copyright (c) 2003-2006, Ericsson AB 4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -136,6 +136,14 @@
136#define TIPC_CMD_SET_NETID 0x800B /* tx unsigned, rx none */ 136#define TIPC_CMD_SET_NETID 0x800B /* tx unsigned, rx none */
137 137
138/* 138/*
139 * Reserved commands:
140 * May not be issued by any process.
141 * Used internally by TIPC.
142 */
143
144#define TIPC_CMD_NOT_NET_ADMIN 0xC001 /* tx none, rx none */
145
146/*
139 * TLV types defined for TIPC 147 * TLV types defined for TIPC
140 */ 148 */
141 149
diff --git a/include/linux/wanrouter.h b/include/linux/wanrouter.h
index 3add87465b1f..e0aa39612eba 100644
--- a/include/linux/wanrouter.h
+++ b/include/linux/wanrouter.h
@@ -522,7 +522,7 @@ extern int wanrouter_proc_init(void);
522extern void wanrouter_proc_cleanup(void); 522extern void wanrouter_proc_cleanup(void);
523extern int wanrouter_proc_add(struct wan_device *wandev); 523extern int wanrouter_proc_add(struct wan_device *wandev);
524extern int wanrouter_proc_delete(struct wan_device *wandev); 524extern int wanrouter_proc_delete(struct wan_device *wandev);
525extern int wanrouter_ioctl( struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); 525extern long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
526 526
527/* Public Data */ 527/* Public Data */
528/* list of registered devices */ 528/* list of registered devices */
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index 0a9b5b41ed67..4a95a0e5eeca 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -611,6 +611,7 @@
611#define IW_ENCODE_ALG_WEP 1 611#define IW_ENCODE_ALG_WEP 1
612#define IW_ENCODE_ALG_TKIP 2 612#define IW_ENCODE_ALG_TKIP 2
613#define IW_ENCODE_ALG_CCMP 3 613#define IW_ENCODE_ALG_CCMP 3
614#define IW_ENCODE_ALG_PMK 4
614/* struct iw_encode_ext ->ext_flags */ 615/* struct iw_encode_ext ->ext_flags */
615#define IW_ENCODE_EXT_TX_SEQ_VALID 0x00000001 616#define IW_ENCODE_EXT_TX_SEQ_VALID 0x00000001
616#define IW_ENCODE_EXT_RX_SEQ_VALID 0x00000002 617#define IW_ENCODE_EXT_RX_SEQ_VALID 0x00000002
@@ -630,6 +631,7 @@
630#define IW_ENC_CAPA_WPA2 0x00000002 631#define IW_ENC_CAPA_WPA2 0x00000002
631#define IW_ENC_CAPA_CIPHER_TKIP 0x00000004 632#define IW_ENC_CAPA_CIPHER_TKIP 0x00000004
632#define IW_ENC_CAPA_CIPHER_CCMP 0x00000008 633#define IW_ENC_CAPA_CIPHER_CCMP 0x00000008
634#define IW_ENC_CAPA_4WAY_HANDSHAKE 0x00000010
633 635
634/* Event capability macros - in (struct iw_range *)->event_capa 636/* Event capability macros - in (struct iw_range *)->event_capa
635 * Because we have more than 32 possible events, we use an array of 637 * Because we have more than 32 possible events, we use an array of
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index bbd3d583c6e6..06b28142b3ab 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -121,7 +121,8 @@ static inline int addrconf_finite_timeout(unsigned long timeout)
121 */ 121 */
122extern int ipv6_addr_label_init(void); 122extern int ipv6_addr_label_init(void);
123extern void ipv6_addr_label_rtnl_register(void); 123extern void ipv6_addr_label_rtnl_register(void);
124extern u32 ipv6_addr_label(const struct in6_addr *addr, 124extern u32 ipv6_addr_label(struct net *net,
125 const struct in6_addr *addr,
125 int type, int ifindex); 126 int type, int ifindex);
126 127
127/* 128/*
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index 529816bfbc52..b31399e1fd83 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -1262,9 +1262,6 @@ extern int ieee80211_set_encryption(struct ieee80211_device *ieee);
1262/* ieee80211_tx.c */ 1262/* ieee80211_tx.c */
1263extern int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev); 1263extern int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev);
1264extern void ieee80211_txb_free(struct ieee80211_txb *); 1264extern void ieee80211_txb_free(struct ieee80211_txb *);
1265extern int ieee80211_tx_frame(struct ieee80211_device *ieee,
1266 struct ieee80211_hdr *frame, int hdr_len,
1267 int total_len, int encrypt_mpdu);
1268 1265
1269/* ieee80211_rx.c */ 1266/* ieee80211_rx.c */
1270extern void ieee80211_rx_any(struct ieee80211_device *ieee, 1267extern void ieee80211_rx_any(struct ieee80211_device *ieee,
@@ -1312,14 +1309,6 @@ extern int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee,
1312extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, 1309extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee,
1313 struct iw_request_info *info, 1310 struct iw_request_info *info,
1314 union iwreq_data *wrqu, char *extra); 1311 union iwreq_data *wrqu, char *extra);
1315extern int ieee80211_wx_set_auth(struct net_device *dev,
1316 struct iw_request_info *info,
1317 union iwreq_data *wrqu,
1318 char *extra);
1319extern int ieee80211_wx_get_auth(struct net_device *dev,
1320 struct iw_request_info *info,
1321 union iwreq_data *wrqu,
1322 char *extra);
1323 1312
1324static inline void ieee80211_increment_scans(struct ieee80211_device *ieee) 1313static inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
1325{ 1314{
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index b2cfc4927257..db66c7927743 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -148,7 +148,6 @@ struct ifacaddr6
148#define IFA_HOST IPV6_ADDR_LOOPBACK 148#define IFA_HOST IPV6_ADDR_LOOPBACK
149#define IFA_LINK IPV6_ADDR_LINKLOCAL 149#define IFA_LINK IPV6_ADDR_LINKLOCAL
150#define IFA_SITE IPV6_ADDR_SITELOCAL 150#define IFA_SITE IPV6_ADDR_SITELOCAL
151#define IFA_GLOBAL 0x0000U
152 151
153struct ipv6_devstat { 152struct ipv6_devstat {
154 struct proc_dir_entry *proc_dir_entry; 153 struct proc_dir_entry *proc_dir_entry;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index ad8404b56113..15e1f8fe4c1f 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -1,8 +1,6 @@
1/* 1/*
2 * INETPEER - A storage for permanent information about peers 2 * INETPEER - A storage for permanent information about peers
3 * 3 *
4 * Version: $Id: inetpeer.h,v 1.2 2002/01/12 07:54:56 davem Exp $
5 *
6 * Authors: Andrey V. Savochkin <saw@msu.ru> 4 * Authors: Andrey V. Savochkin <saw@msu.ru>
7 */ 5 */
8 6
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 6512d85f11b3..83b4e008b16d 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -1,7 +1,3 @@
1/*
2 * $Id$
3 */
4
5#ifndef _NET_IP6_TUNNEL_H 1#ifndef _NET_IP6_TUNNEL_H
6#define _NET_IP6_TUNNEL_H 2#define _NET_IP6_TUNNEL_H
7 3
@@ -19,7 +15,6 @@
19struct ip6_tnl { 15struct ip6_tnl {
20 struct ip6_tnl *next; /* next tunnel in list */ 16 struct ip6_tnl *next; /* next tunnel in list */
21 struct net_device *dev; /* virtual device associated with tunnel */ 17 struct net_device *dev; /* virtual device associated with tunnel */
22 struct net_device_stats stat; /* statistics for tunnel device */
23 int recursion; /* depth of hard_start_xmit recursion */ 18 int recursion; /* depth of hard_start_xmit recursion */
24 struct ip6_tnl_parm parms; /* tunnel configuration parameters */ 19 struct ip6_tnl_parm parms; /* tunnel configuration parameters */
25 struct flowi fl; /* flowi template for xmit */ 20 struct flowi fl; /* flowi template for xmit */
diff --git a/include/net/ipconfig.h b/include/net/ipconfig.h
index 3924d7d2cb11..c74cc1bd5a02 100644
--- a/include/net/ipconfig.h
+++ b/include/net/ipconfig.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: ipconfig.h,v 1.4 2001/04/30 04:51:46 davem Exp $
3 *
4 * Copyright (C) 1997 Martin Mares 2 * Copyright (C) 1997 Martin Mares
5 * 3 *
6 * Automatic IP Layer Configuration 4 * Automatic IP Layer Configuration
diff --git a/include/net/ipip.h b/include/net/ipip.h
index 633ed4def8e3..a85bda64b852 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -11,7 +11,6 @@ struct ip_tunnel
11{ 11{
12 struct ip_tunnel *next; 12 struct ip_tunnel *next;
13 struct net_device *dev; 13 struct net_device *dev;
14 struct net_device_stats stat;
15 14
16 int recursion; /* Depth of hard_start_xmit recursion */ 15 int recursion; /* Depth of hard_start_xmit recursion */
17 int err_count; /* Number of arrived ICMP errors */ 16 int err_count; /* Number of arrived ICMP errors */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index e0a612bc9c4e..7f7db8d57934 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -4,8 +4,6 @@
4 * Authors: 4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt> 5 * Pedro Roque <roque@di.fc.ul.pt>
6 * 6 *
7 * $Id: ipv6.h,v 1.1 2002/05/20 15:13:07 jgrimm Exp $
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index dae3f9ec1154..1196de85f8db 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -98,6 +98,18 @@ struct ieee80211_ht_bss_info {
98}; 98};
99 99
100/** 100/**
101 * enum ieee80211_max_queues - maximum number of queues
102 *
103 * @IEEE80211_MAX_QUEUES: Maximum number of regular device queues.
104 * @IEEE80211_MAX_AMPDU_QUEUES: Maximum number of queues usable
105 * for A-MPDU operation.
106 */
107enum ieee80211_max_queues {
108 IEEE80211_MAX_QUEUES = 16,
109 IEEE80211_MAX_AMPDU_QUEUES = 16,
110};
111
112/**
101 * struct ieee80211_tx_queue_params - transmit queue configuration 113 * struct ieee80211_tx_queue_params - transmit queue configuration
102 * 114 *
103 * The information provided in this structure is required for QoS 115 * The information provided in this structure is required for QoS
@@ -117,58 +129,18 @@ struct ieee80211_tx_queue_params {
117}; 129};
118 130
119/** 131/**
120 * struct ieee80211_tx_queue_stats_data - transmit queue statistics 132 * struct ieee80211_tx_queue_stats - transmit queue statistics
121 * 133 *
122 * @len: number of packets in queue 134 * @len: number of packets in queue
123 * @limit: queue length limit 135 * @limit: queue length limit
124 * @count: number of frames sent 136 * @count: number of frames sent
125 */ 137 */
126struct ieee80211_tx_queue_stats_data { 138struct ieee80211_tx_queue_stats {
127 unsigned int len; 139 unsigned int len;
128 unsigned int limit; 140 unsigned int limit;
129 unsigned int count; 141 unsigned int count;
130}; 142};
131 143
132/**
133 * enum ieee80211_tx_queue - transmit queue number
134 *
135 * These constants are used with some callbacks that take a
136 * queue number to set parameters for a queue.
137 *
138 * @IEEE80211_TX_QUEUE_DATA0: data queue 0
139 * @IEEE80211_TX_QUEUE_DATA1: data queue 1
140 * @IEEE80211_TX_QUEUE_DATA2: data queue 2
141 * @IEEE80211_TX_QUEUE_DATA3: data queue 3
142 * @IEEE80211_TX_QUEUE_DATA4: data queue 4
143 * @IEEE80211_TX_QUEUE_SVP: ??
144 * @NUM_TX_DATA_QUEUES: number of data queues
145 * @IEEE80211_TX_QUEUE_AFTER_BEACON: transmit queue for frames to be
146 * sent after a beacon
147 * @IEEE80211_TX_QUEUE_BEACON: transmit queue for beacon frames
148 * @NUM_TX_DATA_QUEUES_AMPDU: adding more queues for A-MPDU
149 */
150enum ieee80211_tx_queue {
151 IEEE80211_TX_QUEUE_DATA0,
152 IEEE80211_TX_QUEUE_DATA1,
153 IEEE80211_TX_QUEUE_DATA2,
154 IEEE80211_TX_QUEUE_DATA3,
155 IEEE80211_TX_QUEUE_DATA4,
156 IEEE80211_TX_QUEUE_SVP,
157
158 NUM_TX_DATA_QUEUES,
159
160/* due to stupidity in the sub-ioctl userspace interface, the items in
161 * this struct need to have fixed values. As soon as it is removed, we can
162 * fix these entries. */
163 IEEE80211_TX_QUEUE_AFTER_BEACON = 6,
164 IEEE80211_TX_QUEUE_BEACON = 7,
165 NUM_TX_DATA_QUEUES_AMPDU = 16
166};
167
168struct ieee80211_tx_queue_stats {
169 struct ieee80211_tx_queue_stats_data data[NUM_TX_DATA_QUEUES_AMPDU];
170};
171
172struct ieee80211_low_level_stats { 144struct ieee80211_low_level_stats {
173 unsigned int dot11ACKFailureCount; 145 unsigned int dot11ACKFailureCount;
174 unsigned int dot11RTSFailureCount; 146 unsigned int dot11RTSFailureCount;
@@ -229,91 +201,128 @@ struct ieee80211_bss_conf {
229}; 201};
230 202
231/** 203/**
232 * enum mac80211_tx_control_flags - flags to describe Tx configuration for 204 * enum mac80211_tx_flags - flags to transmission information/status
233 * the Tx frame 205 *
234 * 206 * These flags are used with the @flags member of &ieee80211_tx_info
235 * These flags are used with the @flags member of &ieee80211_tx_control 207 *
236 * 208 * @IEEE80211_TX_CTL_REQ_TX_STATUS: request TX status callback for this frame.
237 * @IEEE80211_TXCTL_REQ_TX_STATUS: request TX status callback for this frame. 209 * @IEEE80211_TX_CTL_DO_NOT_ENCRYPT: send this frame without encryption;
238 * @IEEE80211_TXCTL_DO_NOT_ENCRYPT: send this frame without encryption; 210 * e.g., for EAPOL frame
239 * e.g., for EAPOL frame 211 * @IEEE80211_TX_CTL_USE_RTS_CTS: use RTS-CTS before sending frame
240 * @IEEE80211_TXCTL_USE_RTS_CTS: use RTS-CTS before sending frame 212 * @IEEE80211_TX_CTL_USE_CTS_PROTECT: use CTS protection for the frame (e.g.,
241 * @IEEE80211_TXCTL_USE_CTS_PROTECT: use CTS protection for the frame (e.g., 213 * for combined 802.11g / 802.11b networks)
242 * for combined 802.11g / 802.11b networks) 214 * @IEEE80211_TX_CTL_NO_ACK: tell the low level not to wait for an ack
243 * @IEEE80211_TXCTL_NO_ACK: tell the low level not to wait for an ack 215 * @IEEE80211_TX_CTL_RATE_CTRL_PROBE
244 * @IEEE80211_TXCTL_RATE_CTRL_PROBE 216 * @IEEE80211_TX_CTL_CLEAR_PS_FILT: clear powersave filter for destination
245 * @EEE80211_TXCTL_CLEAR_PS_FILT: clear powersave filter 217 * station
246 * for destination station 218 * @IEEE80211_TX_CTL_REQUEUE:
247 * @IEEE80211_TXCTL_REQUEUE: 219 * @IEEE80211_TX_CTL_FIRST_FRAGMENT: this is a first fragment of the frame
248 * @IEEE80211_TXCTL_FIRST_FRAGMENT: this is a first fragment of the frame 220 * @IEEE80211_TX_CTL_LONG_RETRY_LIMIT: this frame should be send using the
249 * @IEEE80211_TXCTL_LONG_RETRY_LIMIT: this frame should be send using the 221 * through set_retry_limit configured long retry value
250 * through set_retry_limit configured long 222 * @IEEE80211_TX_CTL_EAPOL_FRAME: internal to mac80211
251 * retry value 223 * @IEEE80211_TX_CTL_SEND_AFTER_DTIM: send this frame after DTIM beacon
252 * @IEEE80211_TXCTL_EAPOL_FRAME: internal to mac80211 224 * @IEEE80211_TX_CTL_AMPDU: this frame should be sent as part of an A-MPDU
253 * @IEEE80211_TXCTL_SEND_AFTER_DTIM: send this frame after DTIM beacon 225 * @IEEE80211_TX_CTL_OFDM_HT: this frame can be sent in HT OFDM rates. number
254 * @IEEE80211_TXCTL_AMPDU: this frame should be sent as part of an A-MPDU 226 * of streams when this flag is on can be extracted from antenna_sel_tx,
255 * @IEEE80211_TXCTL_OFDM_HT: this frame can be sent in HT OFDM rates. number 227 * so if 1 antenna is marked use SISO, 2 antennas marked use MIMO, n
256 * of streams when this flag is on can be extracted 228 * antennas marked use MIMO_n.
257 * from antenna_sel_tx, so if 1 antenna is marked 229 * @IEEE80211_TX_CTL_GREEN_FIELD: use green field protection for this frame
258 * use SISO, 2 antennas marked use MIMO, n antennas 230 * @IEEE80211_TX_CTL_40_MHZ_WIDTH: send this frame using 40 Mhz channel width
259 * marked use MIMO_n. 231 * @IEEE80211_TX_CTL_DUP_DATA: duplicate data frame on both 20 Mhz channels
260 * @IEEE80211_TXCTL_GREEN_FIELD: use green field protection for this frame 232 * @IEEE80211_TX_CTL_SHORT_GI: send this frame using short guard interval
261 * @IEEE80211_TXCTL_40_MHZ_WIDTH: send this frame using 40 Mhz channel width 233 * @IEEE80211_TX_STAT_TX_FILTERED: The frame was not transmitted
262 * @IEEE80211_TXCTL_DUP_DATA: duplicate data frame on both 20 Mhz channels 234 * because the destination STA was in powersave mode.
263 * @IEEE80211_TXCTL_SHORT_GI: send this frame using short guard interval 235 * @IEEE80211_TX_STAT_ACK: Frame was acknowledged
236 * @IEEE80211_TX_STAT_AMPDU: The frame was aggregated, so status
237 * is for the whole aggregation.
264 */ 238 */
265enum mac80211_tx_control_flags { 239enum mac80211_tx_control_flags {
266 IEEE80211_TXCTL_REQ_TX_STATUS = (1<<0), 240 IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0),
267 IEEE80211_TXCTL_DO_NOT_ENCRYPT = (1<<1), 241 IEEE80211_TX_CTL_DO_NOT_ENCRYPT = BIT(1),
268 IEEE80211_TXCTL_USE_RTS_CTS = (1<<2), 242 IEEE80211_TX_CTL_USE_RTS_CTS = BIT(2),
269 IEEE80211_TXCTL_USE_CTS_PROTECT = (1<<3), 243 IEEE80211_TX_CTL_USE_CTS_PROTECT = BIT(3),
270 IEEE80211_TXCTL_NO_ACK = (1<<4), 244 IEEE80211_TX_CTL_NO_ACK = BIT(4),
271 IEEE80211_TXCTL_RATE_CTRL_PROBE = (1<<5), 245 IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(5),
272 IEEE80211_TXCTL_CLEAR_PS_FILT = (1<<6), 246 IEEE80211_TX_CTL_CLEAR_PS_FILT = BIT(6),
273 IEEE80211_TXCTL_REQUEUE = (1<<7), 247 IEEE80211_TX_CTL_REQUEUE = BIT(7),
274 IEEE80211_TXCTL_FIRST_FRAGMENT = (1<<8), 248 IEEE80211_TX_CTL_FIRST_FRAGMENT = BIT(8),
275 IEEE80211_TXCTL_SHORT_PREAMBLE = (1<<9), 249 IEEE80211_TX_CTL_SHORT_PREAMBLE = BIT(9),
276 IEEE80211_TXCTL_LONG_RETRY_LIMIT = (1<<10), 250 IEEE80211_TX_CTL_LONG_RETRY_LIMIT = BIT(10),
277 IEEE80211_TXCTL_EAPOL_FRAME = (1<<11), 251 IEEE80211_TX_CTL_EAPOL_FRAME = BIT(11),
278 IEEE80211_TXCTL_SEND_AFTER_DTIM = (1<<12), 252 IEEE80211_TX_CTL_SEND_AFTER_DTIM = BIT(12),
279 IEEE80211_TXCTL_AMPDU = (1<<13), 253 IEEE80211_TX_CTL_AMPDU = BIT(13),
280 IEEE80211_TXCTL_OFDM_HT = (1<<14), 254 IEEE80211_TX_CTL_OFDM_HT = BIT(14),
281 IEEE80211_TXCTL_GREEN_FIELD = (1<<15), 255 IEEE80211_TX_CTL_GREEN_FIELD = BIT(15),
282 IEEE80211_TXCTL_40_MHZ_WIDTH = (1<<16), 256 IEEE80211_TX_CTL_40_MHZ_WIDTH = BIT(16),
283 IEEE80211_TXCTL_DUP_DATA = (1<<17), 257 IEEE80211_TX_CTL_DUP_DATA = BIT(17),
284 IEEE80211_TXCTL_SHORT_GI = (1<<18), 258 IEEE80211_TX_CTL_SHORT_GI = BIT(18),
259 IEEE80211_TX_CTL_INJECTED = BIT(19),
260 IEEE80211_TX_STAT_TX_FILTERED = BIT(20),
261 IEEE80211_TX_STAT_ACK = BIT(21),
262 IEEE80211_TX_STAT_AMPDU = BIT(22),
285}; 263};
286 264
287/* Transmit control fields. This data structure is passed to low-level driver
288 * with each TX frame. The low-level driver is responsible for configuring
289 * the hardware to use given values (depending on what is supported). */
290 265
291struct ieee80211_tx_control { 266#define IEEE80211_TX_INFO_DRIVER_DATA_SIZE \
292 struct ieee80211_vif *vif; 267 (sizeof(((struct sk_buff *)0)->cb) - 8)
293 struct ieee80211_rate *tx_rate; 268#define IEEE80211_TX_INFO_DRIVER_DATA_PTRS \
294 269 (IEEE80211_TX_INFO_DRIVER_DATA_SIZE / sizeof(void *))
295 /* Transmit rate for RTS/CTS frame */ 270
296 struct ieee80211_rate *rts_cts_rate; 271/**
297 272 * struct ieee80211_tx_info - skb transmit information
298 /* retry rate for the last retries */ 273 *
299 struct ieee80211_rate *alt_retry_rate; 274 * This structure is placed in skb->cb for three uses:
300 275 * (1) mac80211 TX control - mac80211 tells the driver what to do
301 u32 flags; /* tx control flags defined above */ 276 * (2) driver internal use (if applicable)
302 u8 key_idx; /* keyidx from hw->set_key(), undefined if 277 * (3) TX status information - driver tells mac80211 what happened
303 * IEEE80211_TXCTL_DO_NOT_ENCRYPT is set */ 278 *
304 u8 retry_limit; /* 1 = only first attempt, 2 = one retry, .. 279 * @flags: transmit info flags, defined above
305 * This could be used when set_retry_limit 280 * @retry_count: number of retries
306 * is not implemented by the driver */ 281 * @excessive_retries: set to 1 if the frame was retried many times
307 u8 antenna_sel_tx; /* 0 = default/diversity, otherwise bit 282 * but not acknowledged
308 * position represents antenna number used */ 283 * @ampdu_ack_len: number of aggregated frames.
309 u8 icv_len; /* length of the ICV/MIC field in octets */ 284 * relevant only if IEEE80211_TX_STATUS_AMPDU was set.
310 u8 iv_len; /* length of the IV field in octets */ 285 * @ampdu_ack_map: block ack bit map for the aggregation.
311 u8 queue; /* hardware queue to use for this frame; 286 * relevant only if IEEE80211_TX_STATUS_AMPDU was set.
312 * 0 = highest, hw->queues-1 = lowest */ 287 * @ack_signal: signal strength of the ACK frame
313 u16 aid; /* Station AID */ 288 */
314 int type; /* internal */ 289struct ieee80211_tx_info {
290 /* common information */
291 u32 flags;
292 u8 band;
293 s8 tx_rate_idx;
294 u8 antenna_sel_tx;
295
296 /* 1 byte hole */
297
298 union {
299 struct {
300 struct ieee80211_vif *vif;
301 struct ieee80211_key_conf *hw_key;
302 unsigned long jiffies;
303 int ifindex;
304 u16 aid;
305 s8 rts_cts_rate_idx, alt_retry_rate_idx;
306 u8 retry_limit;
307 u8 icv_len;
308 u8 iv_len;
309 } control;
310 struct {
311 u64 ampdu_ack_map;
312 int ack_signal;
313 u8 retry_count;
314 bool excessive_retries;
315 u8 ampdu_ack_len;
316 } status;
317 void *driver_data[IEEE80211_TX_INFO_DRIVER_DATA_PTRS];
318 };
315}; 319};
316 320
321static inline struct ieee80211_tx_info *IEEE80211_SKB_CB(struct sk_buff *skb)
322{
323 return (struct ieee80211_tx_info *)skb->cb;
324}
325
317 326
318/** 327/**
319 * enum mac80211_rx_flags - receive flags 328 * enum mac80211_rx_flags - receive flags
@@ -353,13 +362,16 @@ enum mac80211_rx_flags {
353 * The low-level driver should provide this information (the subset 362 * The low-level driver should provide this information (the subset
354 * supported by hardware) to the 802.11 code with each received 363 * supported by hardware) to the 802.11 code with each received
355 * frame. 364 * frame.
365 *
356 * @mactime: value in microseconds of the 64-bit Time Synchronization Function 366 * @mactime: value in microseconds of the 64-bit Time Synchronization Function
357 * (TSF) timer when the first data symbol (MPDU) arrived at the hardware. 367 * (TSF) timer when the first data symbol (MPDU) arrived at the hardware.
358 * @band: the active band when this frame was received 368 * @band: the active band when this frame was received
359 * @freq: frequency the radio was tuned to when receiving this frame, in MHz 369 * @freq: frequency the radio was tuned to when receiving this frame, in MHz
360 * @ssi: signal strength when receiving this frame 370 * @signal: signal strength when receiving this frame, either in dBm, in dB or
361 * @signal: used as 'qual' in statistics reporting 371 * unspecified depending on the hardware capabilities flags
362 * @noise: PHY noise when receiving this frame 372 * @IEEE80211_HW_SIGNAL_*
373 * @noise: noise when receiving this frame, in dBm.
374 * @qual: overall signal quality indication, in percent (0-100).
363 * @antenna: antenna used 375 * @antenna: antenna used
364 * @rate_idx: index of data rate into band's supported rates 376 * @rate_idx: index of data rate into band's supported rates
365 * @flag: %RX_FLAG_* 377 * @flag: %RX_FLAG_*
@@ -368,64 +380,15 @@ struct ieee80211_rx_status {
368 u64 mactime; 380 u64 mactime;
369 enum ieee80211_band band; 381 enum ieee80211_band band;
370 int freq; 382 int freq;
371 int ssi;
372 int signal; 383 int signal;
373 int noise; 384 int noise;
385 int qual;
374 int antenna; 386 int antenna;
375 int rate_idx; 387 int rate_idx;
376 int flag; 388 int flag;
377}; 389};
378 390
379/** 391/**
380 * enum ieee80211_tx_status_flags - transmit status flags
381 *
382 * Status flags to indicate various transmit conditions.
383 *
384 * @IEEE80211_TX_STATUS_TX_FILTERED: The frame was not transmitted
385 * because the destination STA was in powersave mode.
386 * @IEEE80211_TX_STATUS_ACK: Frame was acknowledged
387 * @IEEE80211_TX_STATUS_AMPDU: The frame was aggregated, so status
388 * is for the whole aggregation.
389 */
390enum ieee80211_tx_status_flags {
391 IEEE80211_TX_STATUS_TX_FILTERED = 1<<0,
392 IEEE80211_TX_STATUS_ACK = 1<<1,
393 IEEE80211_TX_STATUS_AMPDU = 1<<2,
394};
395
396/**
397 * struct ieee80211_tx_status - transmit status
398 *
399 * As much information as possible should be provided for each transmitted
400 * frame with ieee80211_tx_status().
401 *
402 * @control: a copy of the &struct ieee80211_tx_control passed to the driver
403 * in the tx() callback.
404 * @flags: transmit status flags, defined above
405 * @retry_count: number of retries
406 * @excessive_retries: set to 1 if the frame was retried many times
407 * but not acknowledged
408 * @ampdu_ack_len: number of aggregated frames.
409 * relevant only if IEEE80211_TX_STATUS_AMPDU was set.
410 * @ampdu_ack_map: block ack bit map for the aggregation.
411 * relevant only if IEEE80211_TX_STATUS_AMPDU was set.
412 * @ack_signal: signal strength of the ACK frame
413 * @queue_length: ?? REMOVE
414 * @queue_number: ?? REMOVE
415 */
416struct ieee80211_tx_status {
417 struct ieee80211_tx_control control;
418 u8 flags;
419 u8 retry_count;
420 bool excessive_retries;
421 u8 ampdu_ack_len;
422 u64 ampdu_ack_map;
423 int ack_signal;
424 int queue_length;
425 int queue_number;
426};
427
428/**
429 * enum ieee80211_conf_flags - configuration flags 392 * enum ieee80211_conf_flags - configuration flags
430 * 393 *
431 * Flags to define PHY configuration options 394 * Flags to define PHY configuration options
@@ -580,7 +543,6 @@ struct ieee80211_if_conf {
580 u8 *ssid; 543 u8 *ssid;
581 size_t ssid_len; 544 size_t ssid_len;
582 struct sk_buff *beacon; 545 struct sk_buff *beacon;
583 struct ieee80211_tx_control *beacon_control;
584}; 546};
585 547
586/** 548/**
@@ -610,11 +572,14 @@ enum ieee80211_key_alg {
610 * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by 572 * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
611 * the driver for a TKIP key if it requires Michael MIC 573 * the driver for a TKIP key if it requires Michael MIC
612 * generation in software. 574 * generation in software.
575 * @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates
576 * that the key is pairwise rather then a shared key.
613 */ 577 */
614enum ieee80211_key_flags { 578enum ieee80211_key_flags {
615 IEEE80211_KEY_FLAG_WMM_STA = 1<<0, 579 IEEE80211_KEY_FLAG_WMM_STA = 1<<0,
616 IEEE80211_KEY_FLAG_GENERATE_IV = 1<<1, 580 IEEE80211_KEY_FLAG_GENERATE_IV = 1<<1,
617 IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2, 581 IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2,
582 IEEE80211_KEY_FLAG_PAIRWISE = 1<<3,
618}; 583};
619 584
620/** 585/**
@@ -721,6 +686,25 @@ enum ieee80211_tkip_key_type {
721 * @IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE: 686 * @IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE:
722 * Hardware is not capable of receiving frames with short preamble on 687 * Hardware is not capable of receiving frames with short preamble on
723 * the 2.4 GHz band. 688 * the 2.4 GHz band.
689 *
690 * @IEEE80211_HW_SIGNAL_UNSPEC:
691 * Hardware can provide signal values but we don't know its units. We
692 * expect values between 0 and @max_signal.
693 * If possible please provide dB or dBm instead.
694 *
695 * @IEEE80211_HW_SIGNAL_DB:
696 * Hardware gives signal values in dB, decibel difference from an
697 * arbitrary, fixed reference. We expect values between 0 and @max_signal.
698 * If possible please provide dBm instead.
699 *
700 * @IEEE80211_HW_SIGNAL_DBM:
701 * Hardware gives signal values in dBm, decibel difference from
702 * one milliwatt. This is the preferred method since it is standardized
703 * between different devices. @max_signal does not need to be set.
704 *
705 * @IEEE80211_HW_NOISE_DBM:
706 * Hardware can provide noise (radio interference) values in units dBm,
707 * decibel difference from one milliwatt.
724 */ 708 */
725enum ieee80211_hw_flags { 709enum ieee80211_hw_flags {
726 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE = 1<<0, 710 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE = 1<<0,
@@ -728,6 +712,10 @@ enum ieee80211_hw_flags {
728 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING = 1<<2, 712 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING = 1<<2,
729 IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE = 1<<3, 713 IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE = 1<<3,
730 IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4, 714 IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4,
715 IEEE80211_HW_SIGNAL_UNSPEC = 1<<5,
716 IEEE80211_HW_SIGNAL_DB = 1<<6,
717 IEEE80211_HW_SIGNAL_DBM = 1<<7,
718 IEEE80211_HW_NOISE_DBM = 1<<8,
731}; 719};
732 720
733/** 721/**
@@ -758,15 +746,18 @@ enum ieee80211_hw_flags {
758 * 746 *
759 * @channel_change_time: time (in microseconds) it takes to change channels. 747 * @channel_change_time: time (in microseconds) it takes to change channels.
760 * 748 *
761 * @max_rssi: Maximum value for ssi in RX information, use 749 * @max_signal: Maximum value for signal (rssi) in RX information, used
762 * negative numbers for dBm and 0 to indicate no support. 750 * only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB
763 *
764 * @max_signal: like @max_rssi, but for the signal value.
765 *
766 * @max_noise: like @max_rssi, but for the noise value.
767 * 751 *
768 * @queues: number of available hardware transmit queues for 752 * @queues: number of available hardware transmit queues for
769 * data packets. WMM/QoS requires at least four. 753 * data packets. WMM/QoS requires at least four, these
754 * queues need to have configurable access parameters.
755 *
756 * @ampdu_queues: number of available hardware transmit queues
757 * for A-MPDU packets, these have no access parameters
758 * because they're used only for A-MPDU frames. Note that
759 * mac80211 will not currently use any of the regular queues
760 * for aggregation.
770 * 761 *
771 * @rate_control_algorithm: rate control algorithm for this hardware. 762 * @rate_control_algorithm: rate control algorithm for this hardware.
772 * If unset (NULL), the default algorithm will be used. Must be 763 * If unset (NULL), the default algorithm will be used. Must be
@@ -785,10 +776,8 @@ struct ieee80211_hw {
785 unsigned int extra_tx_headroom; 776 unsigned int extra_tx_headroom;
786 int channel_change_time; 777 int channel_change_time;
787 int vif_data_size; 778 int vif_data_size;
788 u8 queues; 779 u16 queues, ampdu_queues;
789 s8 max_rssi;
790 s8 max_signal; 780 s8 max_signal;
791 s8 max_noise;
792}; 781};
793 782
794/** 783/**
@@ -813,6 +802,51 @@ static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr)
813 memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN); 802 memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN);
814} 803}
815 804
805static inline int ieee80211_num_regular_queues(struct ieee80211_hw *hw)
806{
807#ifdef CONFIG_MAC80211_QOS
808 return hw->queues;
809#else
810 return 1;
811#endif
812}
813
814static inline int ieee80211_num_queues(struct ieee80211_hw *hw)
815{
816#ifdef CONFIG_MAC80211_QOS
817 return hw->queues + hw->ampdu_queues;
818#else
819 return 1;
820#endif
821}
822
823static inline struct ieee80211_rate *
824ieee80211_get_tx_rate(const struct ieee80211_hw *hw,
825 const struct ieee80211_tx_info *c)
826{
827 if (WARN_ON(c->tx_rate_idx < 0))
828 return NULL;
829 return &hw->wiphy->bands[c->band]->bitrates[c->tx_rate_idx];
830}
831
832static inline struct ieee80211_rate *
833ieee80211_get_rts_cts_rate(const struct ieee80211_hw *hw,
834 const struct ieee80211_tx_info *c)
835{
836 if (c->control.rts_cts_rate_idx < 0)
837 return NULL;
838 return &hw->wiphy->bands[c->band]->bitrates[c->control.rts_cts_rate_idx];
839}
840
841static inline struct ieee80211_rate *
842ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
843 const struct ieee80211_tx_info *c)
844{
845 if (c->control.alt_retry_rate_idx < 0)
846 return NULL;
847 return &hw->wiphy->bands[c->band]->bitrates[c->control.alt_retry_rate_idx];
848}
849
816/** 850/**
817 * DOC: Hardware crypto acceleration 851 * DOC: Hardware crypto acceleration
818 * 852 *
@@ -970,8 +1004,10 @@ enum ieee80211_ampdu_mlme_action {
970 * @tx: Handler that 802.11 module calls for each transmitted frame. 1004 * @tx: Handler that 802.11 module calls for each transmitted frame.
971 * skb contains the buffer starting from the IEEE 802.11 header. 1005 * skb contains the buffer starting from the IEEE 802.11 header.
972 * The low-level driver should send the frame out based on 1006 * The low-level driver should send the frame out based on
973 * configuration in the TX control data. Must be implemented and 1007 * configuration in the TX control data. This handler should,
974 * atomic. 1008 * preferably, never fail and stop queues appropriately, more
1009 * importantly, however, it must never fail for A-MPDU-queues.
1010 * Must be implemented and atomic.
975 * 1011 *
976 * @start: Called before the first netdevice attached to the hardware 1012 * @start: Called before the first netdevice attached to the hardware
977 * is enabled. This should turn on the hardware and must turn on 1013 * is enabled. This should turn on the hardware and must turn on
@@ -1063,15 +1099,13 @@ enum ieee80211_ampdu_mlme_action {
1063 * of assocaited station or AP. 1099 * of assocaited station or AP.
1064 * 1100 *
1065 * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), 1101 * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
1066 * bursting) for a hardware TX queue. The @queue parameter uses the 1102 * bursting) for a hardware TX queue. Must be atomic.
1067 * %IEEE80211_TX_QUEUE_* constants. Must be atomic.
1068 * 1103 *
1069 * @get_tx_stats: Get statistics of the current TX queue status. This is used 1104 * @get_tx_stats: Get statistics of the current TX queue status. This is used
1070 * to get number of currently queued packets (queue length), maximum queue 1105 * to get number of currently queued packets (queue length), maximum queue
1071 * size (limit), and total number of packets sent using each TX queue 1106 * size (limit), and total number of packets sent using each TX queue
1072 * (count). This information is used for WMM to find out which TX 1107 * (count). The 'stats' pointer points to an array that has hw->queues +
1073 * queues have room for more packets and by hostapd to provide 1108 * hw->ampdu_queues items.
1074 * statistics about the current queueing state to external programs.
1075 * 1109 *
1076 * @get_tsf: Get the current TSF timer value from firmware/hardware. Currently, 1110 * @get_tsf: Get the current TSF timer value from firmware/hardware. Currently,
1077 * this is only used for IBSS mode debugging and, as such, is not a 1111 * this is only used for IBSS mode debugging and, as such, is not a
@@ -1107,8 +1141,7 @@ enum ieee80211_ampdu_mlme_action {
1107 * that TX/RX_STOP can pass NULL for this parameter. 1141 * that TX/RX_STOP can pass NULL for this parameter.
1108 */ 1142 */
1109struct ieee80211_ops { 1143struct ieee80211_ops {
1110 int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb, 1144 int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
1111 struct ieee80211_tx_control *control);
1112 int (*start)(struct ieee80211_hw *hw); 1145 int (*start)(struct ieee80211_hw *hw);
1113 void (*stop)(struct ieee80211_hw *hw); 1146 void (*stop)(struct ieee80211_hw *hw);
1114 int (*add_interface)(struct ieee80211_hw *hw, 1147 int (*add_interface)(struct ieee80211_hw *hw,
@@ -1145,15 +1178,14 @@ struct ieee80211_ops {
1145 u32 short_retry, u32 long_retr); 1178 u32 short_retry, u32 long_retr);
1146 void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1179 void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1147 enum sta_notify_cmd, const u8 *addr); 1180 enum sta_notify_cmd, const u8 *addr);
1148 int (*conf_tx)(struct ieee80211_hw *hw, int queue, 1181 int (*conf_tx)(struct ieee80211_hw *hw, u16 queue,
1149 const struct ieee80211_tx_queue_params *params); 1182 const struct ieee80211_tx_queue_params *params);
1150 int (*get_tx_stats)(struct ieee80211_hw *hw, 1183 int (*get_tx_stats)(struct ieee80211_hw *hw,
1151 struct ieee80211_tx_queue_stats *stats); 1184 struct ieee80211_tx_queue_stats *stats);
1152 u64 (*get_tsf)(struct ieee80211_hw *hw); 1185 u64 (*get_tsf)(struct ieee80211_hw *hw);
1153 void (*reset_tsf)(struct ieee80211_hw *hw); 1186 void (*reset_tsf)(struct ieee80211_hw *hw);
1154 int (*beacon_update)(struct ieee80211_hw *hw, 1187 int (*beacon_update)(struct ieee80211_hw *hw,
1155 struct sk_buff *skb, 1188 struct sk_buff *skb);
1156 struct ieee80211_tx_control *control);
1157 int (*tx_last_beacon)(struct ieee80211_hw *hw); 1189 int (*tx_last_beacon)(struct ieee80211_hw *hw);
1158 int (*ampdu_action)(struct ieee80211_hw *hw, 1190 int (*ampdu_action)(struct ieee80211_hw *hw,
1159 enum ieee80211_ampdu_mlme_action action, 1191 enum ieee80211_ampdu_mlme_action action,
@@ -1349,13 +1381,9 @@ void ieee80211_rx_irqsafe(struct ieee80211_hw *hw,
1349 * 1381 *
1350 * @hw: the hardware the frame was transmitted by 1382 * @hw: the hardware the frame was transmitted by
1351 * @skb: the frame that was transmitted, owned by mac80211 after this call 1383 * @skb: the frame that was transmitted, owned by mac80211 after this call
1352 * @status: status information for this frame; the status pointer need not
1353 * be valid after this function returns and is not freed by mac80211,
1354 * it is recommended that it points to a stack area
1355 */ 1384 */
1356void ieee80211_tx_status(struct ieee80211_hw *hw, 1385void ieee80211_tx_status(struct ieee80211_hw *hw,
1357 struct sk_buff *skb, 1386 struct sk_buff *skb);
1358 struct ieee80211_tx_status *status);
1359 1387
1360/** 1388/**
1361 * ieee80211_tx_status_irqsafe - irq-safe transmit status callback 1389 * ieee80211_tx_status_irqsafe - irq-safe transmit status callback
@@ -1368,13 +1396,9 @@ void ieee80211_tx_status(struct ieee80211_hw *hw,
1368 * 1396 *
1369 * @hw: the hardware the frame was transmitted by 1397 * @hw: the hardware the frame was transmitted by
1370 * @skb: the frame that was transmitted, owned by mac80211 after this call 1398 * @skb: the frame that was transmitted, owned by mac80211 after this call
1371 * @status: status information for this frame; the status pointer need not
1372 * be valid after this function returns and is not freed by mac80211,
1373 * it is recommended that it points to a stack area
1374 */ 1399 */
1375void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, 1400void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
1376 struct sk_buff *skb, 1401 struct sk_buff *skb);
1377 struct ieee80211_tx_status *status);
1378 1402
1379/** 1403/**
1380 * ieee80211_beacon_get - beacon generation function 1404 * ieee80211_beacon_get - beacon generation function
@@ -1390,8 +1414,7 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
1390 * is responsible of freeing it. 1414 * is responsible of freeing it.
1391 */ 1415 */
1392struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, 1416struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1393 struct ieee80211_vif *vif, 1417 struct ieee80211_vif *vif);
1394 struct ieee80211_tx_control *control);
1395 1418
1396/** 1419/**
1397 * ieee80211_rts_get - RTS frame generation function 1420 * ieee80211_rts_get - RTS frame generation function
@@ -1399,7 +1422,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1399 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. 1422 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf.
1400 * @frame: pointer to the frame that is going to be protected by the RTS. 1423 * @frame: pointer to the frame that is going to be protected by the RTS.
1401 * @frame_len: the frame length (in octets). 1424 * @frame_len: the frame length (in octets).
1402 * @frame_txctl: &struct ieee80211_tx_control of the frame. 1425 * @frame_txctl: &struct ieee80211_tx_info of the frame.
1403 * @rts: The buffer where to store the RTS frame. 1426 * @rts: The buffer where to store the RTS frame.
1404 * 1427 *
1405 * If the RTS frames are generated by the host system (i.e., not in 1428 * If the RTS frames are generated by the host system (i.e., not in
@@ -1409,7 +1432,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1409 */ 1432 */
1410void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1433void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1411 const void *frame, size_t frame_len, 1434 const void *frame, size_t frame_len,
1412 const struct ieee80211_tx_control *frame_txctl, 1435 const struct ieee80211_tx_info *frame_txctl,
1413 struct ieee80211_rts *rts); 1436 struct ieee80211_rts *rts);
1414 1437
1415/** 1438/**
@@ -1417,7 +1440,7 @@ void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1417 * @hw: pointer obtained from ieee80211_alloc_hw(). 1440 * @hw: pointer obtained from ieee80211_alloc_hw().
1418 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. 1441 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf.
1419 * @frame_len: the length of the frame that is going to be protected by the RTS. 1442 * @frame_len: the length of the frame that is going to be protected by the RTS.
1420 * @frame_txctl: &struct ieee80211_tx_control of the frame. 1443 * @frame_txctl: &struct ieee80211_tx_info of the frame.
1421 * 1444 *
1422 * If the RTS is generated in firmware, but the host system must provide 1445 * If the RTS is generated in firmware, but the host system must provide
1423 * the duration field, the low-level driver uses this function to receive 1446 * the duration field, the low-level driver uses this function to receive
@@ -1425,7 +1448,7 @@ void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1425 */ 1448 */
1426__le16 ieee80211_rts_duration(struct ieee80211_hw *hw, 1449__le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
1427 struct ieee80211_vif *vif, size_t frame_len, 1450 struct ieee80211_vif *vif, size_t frame_len,
1428 const struct ieee80211_tx_control *frame_txctl); 1451 const struct ieee80211_tx_info *frame_txctl);
1429 1452
1430/** 1453/**
1431 * ieee80211_ctstoself_get - CTS-to-self frame generation function 1454 * ieee80211_ctstoself_get - CTS-to-self frame generation function
@@ -1433,7 +1456,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
1433 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. 1456 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf.
1434 * @frame: pointer to the frame that is going to be protected by the CTS-to-self. 1457 * @frame: pointer to the frame that is going to be protected by the CTS-to-self.
1435 * @frame_len: the frame length (in octets). 1458 * @frame_len: the frame length (in octets).
1436 * @frame_txctl: &struct ieee80211_tx_control of the frame. 1459 * @frame_txctl: &struct ieee80211_tx_info of the frame.
1437 * @cts: The buffer where to store the CTS-to-self frame. 1460 * @cts: The buffer where to store the CTS-to-self frame.
1438 * 1461 *
1439 * If the CTS-to-self frames are generated by the host system (i.e., not in 1462 * If the CTS-to-self frames are generated by the host system (i.e., not in
@@ -1444,7 +1467,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
1444void ieee80211_ctstoself_get(struct ieee80211_hw *hw, 1467void ieee80211_ctstoself_get(struct ieee80211_hw *hw,
1445 struct ieee80211_vif *vif, 1468 struct ieee80211_vif *vif,
1446 const void *frame, size_t frame_len, 1469 const void *frame, size_t frame_len,
1447 const struct ieee80211_tx_control *frame_txctl, 1470 const struct ieee80211_tx_info *frame_txctl,
1448 struct ieee80211_cts *cts); 1471 struct ieee80211_cts *cts);
1449 1472
1450/** 1473/**
@@ -1452,7 +1475,7 @@ void ieee80211_ctstoself_get(struct ieee80211_hw *hw,
1452 * @hw: pointer obtained from ieee80211_alloc_hw(). 1475 * @hw: pointer obtained from ieee80211_alloc_hw().
1453 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. 1476 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf.
1454 * @frame_len: the length of the frame that is going to be protected by the CTS-to-self. 1477 * @frame_len: the length of the frame that is going to be protected by the CTS-to-self.
1455 * @frame_txctl: &struct ieee80211_tx_control of the frame. 1478 * @frame_txctl: &struct ieee80211_tx_info of the frame.
1456 * 1479 *
1457 * If the CTS-to-self is generated in firmware, but the host system must provide 1480 * If the CTS-to-self is generated in firmware, but the host system must provide
1458 * the duration field, the low-level driver uses this function to receive 1481 * the duration field, the low-level driver uses this function to receive
@@ -1461,7 +1484,7 @@ void ieee80211_ctstoself_get(struct ieee80211_hw *hw,
1461__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, 1484__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
1462 struct ieee80211_vif *vif, 1485 struct ieee80211_vif *vif,
1463 size_t frame_len, 1486 size_t frame_len,
1464 const struct ieee80211_tx_control *frame_txctl); 1487 const struct ieee80211_tx_info *frame_txctl);
1465 1488
1466/** 1489/**
1467 * ieee80211_generic_frame_duration - Calculate the duration field for a frame 1490 * ieee80211_generic_frame_duration - Calculate the duration field for a frame
@@ -1500,8 +1523,7 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
1500 * use common code for all beacons. 1523 * use common code for all beacons.
1501 */ 1524 */
1502struct sk_buff * 1525struct sk_buff *
1503ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1526ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
1504 struct ieee80211_tx_control *control);
1505 1527
1506/** 1528/**
1507 * ieee80211_get_hdrlen_from_skb - get header length from data 1529 * ieee80211_get_hdrlen_from_skb - get header length from data
@@ -1559,14 +1581,6 @@ void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue);
1559void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue); 1581void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue);
1560 1582
1561/** 1583/**
1562 * ieee80211_start_queues - start all queues
1563 * @hw: pointer to as obtained from ieee80211_alloc_hw().
1564 *
1565 * Drivers should use this function instead of netif_start_queue.
1566 */
1567void ieee80211_start_queues(struct ieee80211_hw *hw);
1568
1569/**
1570 * ieee80211_stop_queues - stop all queues 1584 * ieee80211_stop_queues - stop all queues
1571 * @hw: pointer as obtained from ieee80211_alloc_hw(). 1585 * @hw: pointer as obtained from ieee80211_alloc_hw().
1572 * 1586 *
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index aa540e6be502..8df751b3be55 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -201,8 +201,11 @@ extern void unregister_pernet_gen_device(int id, struct pernet_operations *);
201struct ctl_path; 201struct ctl_path;
202struct ctl_table; 202struct ctl_table;
203struct ctl_table_header; 203struct ctl_table_header;
204
204extern struct ctl_table_header *register_net_sysctl_table(struct net *net, 205extern struct ctl_table_header *register_net_sysctl_table(struct net *net,
205 const struct ctl_path *path, struct ctl_table *table); 206 const struct ctl_path *path, struct ctl_table *table);
207extern struct ctl_table_header *register_net_sysctl_rotable(
208 const struct ctl_path *path, struct ctl_table *table);
206extern void unregister_net_sysctl_table(struct ctl_table_header *header); 209extern void unregister_net_sysctl_table(struct ctl_table_header *header);
207 210
208#endif /* __NET_NET_NAMESPACE_H */ 211#endif /* __NET_NET_NAMESPACE_H */
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 9bf059817aec..7573d52a4346 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -9,8 +9,6 @@
9#ifndef _NF_CONNTRACK_IPV4_H 9#ifndef _NF_CONNTRACK_IPV4_H
10#define _NF_CONNTRACK_IPV4_H 10#define _NF_CONNTRACK_IPV4_H
11 11
12/* Returns new sk_buff, or NULL */
13struct sk_buff *nf_ct_ipv4_ct_gather_frags(struct sk_buff *skb);
14 12
15extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4; 13extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
16 14
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 2dbd6c015b94..d77dec768dc2 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -223,6 +223,25 @@ static inline void nf_ct_refresh(struct nf_conn *ct,
223 __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0); 223 __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
224} 224}
225 225
226extern void __nf_ct_kill_acct(struct nf_conn *ct,
227 enum ip_conntrack_info ctinfo,
228 const struct sk_buff *skb,
229 int do_acct);
230
231/* kill conntrack and do accounting */
232static inline void nf_ct_kill_acct(struct nf_conn *ct,
233 enum ip_conntrack_info ctinfo,
234 const struct sk_buff *skb)
235{
236 __nf_ct_kill_acct(ct, ctinfo, skb, 1);
237}
238
239/* kill conntrack without accounting */
240static inline void nf_ct_kill(struct nf_conn *ct)
241{
242 __nf_ct_kill_acct(ct, 0, NULL, 0);
243}
244
226/* These are for NAT. Icky. */ 245/* These are for NAT. Icky. */
227/* Update TCP window tracking data when NAT mangles the packet */ 246/* Update TCP window tracking data when NAT mangles the packet */
228extern void nf_conntrack_tcp_update(const struct sk_buff *skb, 247extern void nf_conntrack_tcp_update(const struct sk_buff *skb,
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 34ee348a2cf2..6ef90b5fafb3 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -36,6 +36,7 @@ struct netns_ipv4 {
36 struct xt_table *iptable_mangle; 36 struct xt_table *iptable_mangle;
37 struct xt_table *iptable_raw; 37 struct xt_table *iptable_raw;
38 struct xt_table *arptable_filter; 38 struct xt_table *arptable_filter;
39 struct xt_table *iptable_security;
39#endif 40#endif
40 41
41 int sysctl_icmp_echo_ignore_all; 42 int sysctl_icmp_echo_ignore_all;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index ac053be6c256..5bacd838e88b 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -35,6 +35,7 @@ struct netns_ipv6 {
35 struct xt_table *ip6table_filter; 35 struct xt_table *ip6table_filter;
36 struct xt_table *ip6table_mangle; 36 struct xt_table *ip6table_mangle;
37 struct xt_table *ip6table_raw; 37 struct xt_table *ip6table_raw;
38 struct xt_table *ip6table_security;
38#endif 39#endif
39 struct rt6_info *ip6_null_entry; 40 struct rt6_info *ip6_null_entry;
40 struct rt6_statistics *rt6_stats; 41 struct rt6_statistics *rt6_stats;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 90b1e8d23b16..5672d489e924 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -179,6 +179,8 @@ int sctp_eps_proc_init(void);
179void sctp_eps_proc_exit(void); 179void sctp_eps_proc_exit(void);
180int sctp_assocs_proc_init(void); 180int sctp_assocs_proc_init(void);
181void sctp_assocs_proc_exit(void); 181void sctp_assocs_proc_exit(void);
182int sctp_remaddr_proc_init(void);
183void sctp_remaddr_proc_exit(void);
182 184
183 185
184/* 186/*
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 7f25195f9855..fbc27ac8a09e 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -300,6 +300,7 @@ struct sctp_sock {
300 300
301 /* The default SACK delay timeout for new associations. */ 301 /* The default SACK delay timeout for new associations. */
302 __u32 sackdelay; 302 __u32 sackdelay;
303 __u32 sackfreq;
303 304
304 /* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */ 305 /* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */
305 __u32 param_flags; 306 __u32 param_flags;
@@ -946,6 +947,7 @@ struct sctp_transport {
946 947
947 /* SACK delay timeout */ 948 /* SACK delay timeout */
948 unsigned long sackdelay; 949 unsigned long sackdelay;
950 __u32 sackfreq;
949 951
950 /* When was the last time (in jiffies) that we heard from this 952 /* When was the last time (in jiffies) that we heard from this
951 * transport? We use this to pick new active and retran paths. 953 * transport? We use this to pick new active and retran paths.
@@ -1553,6 +1555,7 @@ struct sctp_association {
1553 * : SACK's are not delayed (see Section 6). 1555 * : SACK's are not delayed (see Section 6).
1554 */ 1556 */
1555 __u8 sack_needed; /* Do we need to sack the peer? */ 1557 __u8 sack_needed; /* Do we need to sack the peer? */
1558 __u32 sack_cnt;
1556 1559
1557 /* These are capabilities which our peer advertised. */ 1560 /* These are capabilities which our peer advertised. */
1558 __u8 ecn_capable; /* Can peer do ECN? */ 1561 __u8 ecn_capable; /* Can peer do ECN? */
@@ -1662,6 +1665,7 @@ struct sctp_association {
1662 1665
1663 /* SACK delay timeout */ 1666 /* SACK delay timeout */
1664 unsigned long sackdelay; 1667 unsigned long sackdelay;
1668 __u32 sackfreq;
1665 1669
1666 1670
1667 unsigned long timeouts[SCTP_NUM_TIMEOUT_TYPES]; 1671 unsigned long timeouts[SCTP_NUM_TIMEOUT_TYPES];
diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h
index 9619b9d35c9e..f205b10f0ab9 100644
--- a/include/net/sctp/user.h
+++ b/include/net/sctp/user.h
@@ -93,8 +93,9 @@ enum sctp_optname {
93#define SCTP_STATUS SCTP_STATUS 93#define SCTP_STATUS SCTP_STATUS
94 SCTP_GET_PEER_ADDR_INFO, 94 SCTP_GET_PEER_ADDR_INFO,
95#define SCTP_GET_PEER_ADDR_INFO SCTP_GET_PEER_ADDR_INFO 95#define SCTP_GET_PEER_ADDR_INFO SCTP_GET_PEER_ADDR_INFO
96 SCTP_DELAYED_ACK_TIME, 96 SCTP_DELAYED_ACK,
97#define SCTP_DELAYED_ACK_TIME SCTP_DELAYED_ACK_TIME 97#define SCTP_DELAYED_ACK_TIME SCTP_DELAYED_ACK
98#define SCTP_DELAYED_ACK SCTP_DELAYED_ACK
98 SCTP_CONTEXT, /* Receive Context */ 99 SCTP_CONTEXT, /* Receive Context */
99#define SCTP_CONTEXT SCTP_CONTEXT 100#define SCTP_CONTEXT SCTP_CONTEXT
100 SCTP_FRAGMENT_INTERLEAVE, 101 SCTP_FRAGMENT_INTERLEAVE,
@@ -136,12 +137,14 @@ enum sctp_optname {
136#define SCTP_GET_LOCAL_ADDRS_NUM_OLD SCTP_GET_LOCAL_ADDRS_NUM_OLD 137#define SCTP_GET_LOCAL_ADDRS_NUM_OLD SCTP_GET_LOCAL_ADDRS_NUM_OLD
137 SCTP_GET_LOCAL_ADDRS_OLD, /* Get all local addresss. */ 138 SCTP_GET_LOCAL_ADDRS_OLD, /* Get all local addresss. */
138#define SCTP_GET_LOCAL_ADDRS_OLD SCTP_GET_LOCAL_ADDRS_OLD 139#define SCTP_GET_LOCAL_ADDRS_OLD SCTP_GET_LOCAL_ADDRS_OLD
139 SCTP_SOCKOPT_CONNECTX, /* CONNECTX requests. */ 140 SCTP_SOCKOPT_CONNECTX_OLD, /* CONNECTX old requests. */
140#define SCTP_SOCKOPT_CONNECTX SCTP_SOCKOPT_CONNECTX 141#define SCTP_SOCKOPT_CONNECTX_OLD SCTP_SOCKOPT_CONNECTX_OLD
141 SCTP_GET_PEER_ADDRS, /* Get all peer addresss. */ 142 SCTP_GET_PEER_ADDRS, /* Get all peer addresss. */
142#define SCTP_GET_PEER_ADDRS SCTP_GET_PEER_ADDRS 143#define SCTP_GET_PEER_ADDRS SCTP_GET_PEER_ADDRS
143 SCTP_GET_LOCAL_ADDRS, /* Get all local addresss. */ 144 SCTP_GET_LOCAL_ADDRS, /* Get all local addresss. */
144#define SCTP_GET_LOCAL_ADDRS SCTP_GET_LOCAL_ADDRS 145#define SCTP_GET_LOCAL_ADDRS SCTP_GET_LOCAL_ADDRS
146 SCTP_SOCKOPT_CONNECTX, /* CONNECTX requests. */
147#define SCTP_SOCKOPT_CONNECTX SCTP_SOCKOPT_CONNECTX
145}; 148};
146 149
147/* 150/*
@@ -618,13 +621,26 @@ struct sctp_authkeyid {
618}; 621};
619 622
620 623
621/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) 624/*
625 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
622 * 626 *
623 * This options will get or set the delayed ack timer. The time is set 627 * This option will effect the way delayed acks are performed. This
624 * in milliseconds. If the assoc_id is 0, then this sets or gets the 628 * option allows you to get or set the delayed ack time, in
625 * endpoints default delayed ack timer value. If the assoc_id field is 629 * milliseconds. It also allows changing the delayed ack frequency.
626 * non-zero, then the set or get effects the specified association. 630 * Changing the frequency to 1 disables the delayed sack algorithm. If
631 * the assoc_id is 0, then this sets or gets the endpoints default
632 * values. If the assoc_id field is non-zero, then the set or get
633 * effects the specified association for the one to many model (the
634 * assoc_id field is ignored by the one to one model). Note that if
635 * sack_delay or sack_freq are 0 when setting this option, then the
636 * current values will remain unchanged.
627 */ 637 */
638struct sctp_sack_info {
639 sctp_assoc_t sack_assoc_id;
640 uint32_t sack_delay;
641 uint32_t sack_freq;
642};
643
628struct sctp_assoc_value { 644struct sctp_assoc_value {
629 sctp_assoc_t assoc_id; 645 sctp_assoc_t assoc_id;
630 uint32_t assoc_value; 646 uint32_t assoc_value;
diff --git a/include/net/snmp.h b/include/net/snmp.h
index ce2f48507510..57c93628695f 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -14,8 +14,6 @@
14 * as published by the Free Software Foundation; either version 14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version. 15 * 2 of the License, or (at your option) any later version.
16 * 16 *
17 * $Id: snmp.h,v 1.19 2001/06/14 13:40:46 davem Exp $
18 *
19 */ 17 */
20 18
21#ifndef _SNMP_H 19#ifndef _SNMP_H
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cf54034019d9..b5a1b9eb12e8 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -398,6 +398,8 @@ extern void tcp_parse_options(struct sk_buff *skb,
398 struct tcp_options_received *opt_rx, 398 struct tcp_options_received *opt_rx,
399 int estab); 399 int estab);
400 400
401extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
402
401/* 403/*
402 * TCP v4 functions exported for the inet6 API 404 * TCP v4 functions exported for the inet6 API
403 */ 405 */
@@ -1113,13 +1115,19 @@ struct tcp_md5sig_pool {
1113#define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */ 1115#define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */
1114 1116
1115/* - functions */ 1117/* - functions */
1118extern int tcp_calc_md5_hash(char *md5_hash,
1119 struct tcp_md5sig_key *key,
1120 int bplen,
1121 struct tcphdr *th,
1122 unsigned int tcplen,
1123 struct tcp_md5sig_pool *hp);
1124
1116extern int tcp_v4_calc_md5_hash(char *md5_hash, 1125extern int tcp_v4_calc_md5_hash(char *md5_hash,
1117 struct tcp_md5sig_key *key, 1126 struct tcp_md5sig_key *key,
1118 struct sock *sk, 1127 struct sock *sk,
1119 struct dst_entry *dst, 1128 struct dst_entry *dst,
1120 struct request_sock *req, 1129 struct request_sock *req,
1121 struct tcphdr *th, 1130 struct tcphdr *th,
1122 int protocol,
1123 unsigned int tcplen); 1131 unsigned int tcplen);
1124extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, 1132extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1125 struct sock *addr_sk); 1133 struct sock *addr_sk);
@@ -1132,6 +1140,16 @@ extern int tcp_v4_md5_do_add(struct sock *sk,
1132extern int tcp_v4_md5_do_del(struct sock *sk, 1140extern int tcp_v4_md5_do_del(struct sock *sk,
1133 __be32 addr); 1141 __be32 addr);
1134 1142
1143#ifdef CONFIG_TCP_MD5SIG
1144#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \
1145 &(struct tcp_md5sig_key) { \
1146 .key = (twsk)->tw_md5_key, \
1147 .keylen = (twsk)->tw_md5_keylen, \
1148 } : NULL)
1149#else
1150#define tcp_twsk_md5_key(twsk) NULL
1151#endif
1152
1135extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void); 1153extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void);
1136extern void tcp_free_md5sig_pool(void); 1154extern void tcp_free_md5sig_pool(void);
1137 1155
@@ -1369,7 +1387,6 @@ struct tcp_sock_af_ops {
1369 struct dst_entry *dst, 1387 struct dst_entry *dst,
1370 struct request_sock *req, 1388 struct request_sock *req,
1371 struct tcphdr *th, 1389 struct tcphdr *th,
1372 int protocol,
1373 unsigned int len); 1390 unsigned int len);
1374 int (*md5_add) (struct sock *sk, 1391 int (*md5_add) (struct sock *sk,
1375 struct sock *addr_sk, 1392 struct sock *addr_sk,
diff --git a/include/net/tipc/tipc_port.h b/include/net/tipc/tipc_port.h
index 11105bcc4457..9923e41a8215 100644
--- a/include/net/tipc/tipc_port.h
+++ b/include/net/tipc/tipc_port.h
@@ -84,7 +84,8 @@ struct tipc_port {
84u32 tipc_createport_raw(void *usr_handle, 84u32 tipc_createport_raw(void *usr_handle,
85 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *), 85 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
86 void (*wakeup)(struct tipc_port *), 86 void (*wakeup)(struct tipc_port *),
87 const u32 importance); 87 const u32 importance,
88 struct tipc_port **tp_ptr);
88 89
89int tipc_reject_msg(struct sk_buff *buf, u32 err); 90int tipc_reject_msg(struct sk_buff *buf, u32 err);
90 91
diff --git a/include/net/udp.h b/include/net/udp.h
index ccce83707046..7a8684855245 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -196,8 +196,8 @@ struct udp_seq_afinfo {
196struct udp_iter_state { 196struct udp_iter_state {
197 struct seq_net_private p; 197 struct seq_net_private p;
198 sa_family_t family; 198 sa_family_t family;
199 struct hlist_head *hashtable;
200 int bucket; 199 int bucket;
200 struct hlist_head *hashtable;
201}; 201};
202 202
203#ifdef CONFIG_PROC_FS 203#ifdef CONFIG_PROC_FS
diff --git a/include/net/wireless.h b/include/net/wireless.h
index 667b4080d30f..9324f8dd183e 100644
--- a/include/net/wireless.h
+++ b/include/net/wireless.h
@@ -39,12 +39,18 @@ enum ieee80211_band {
39 * on this channel. 39 * on this channel.
40 * @IEEE80211_CHAN_NO_IBSS: IBSS is not allowed on this channel. 40 * @IEEE80211_CHAN_NO_IBSS: IBSS is not allowed on this channel.
41 * @IEEE80211_CHAN_RADAR: Radar detection is required on this channel. 41 * @IEEE80211_CHAN_RADAR: Radar detection is required on this channel.
42 * @IEEE80211_CHAN_NO_FAT_ABOVE: extension channel above this channel
43 * is not permitted.
44 * @IEEE80211_CHAN_NO_FAT_BELOW: extension channel below this channel
45 * is not permitted.
42 */ 46 */
43enum ieee80211_channel_flags { 47enum ieee80211_channel_flags {
44 IEEE80211_CHAN_DISABLED = 1<<0, 48 IEEE80211_CHAN_DISABLED = 1<<0,
45 IEEE80211_CHAN_PASSIVE_SCAN = 1<<1, 49 IEEE80211_CHAN_PASSIVE_SCAN = 1<<1,
46 IEEE80211_CHAN_NO_IBSS = 1<<2, 50 IEEE80211_CHAN_NO_IBSS = 1<<2,
47 IEEE80211_CHAN_RADAR = 1<<3, 51 IEEE80211_CHAN_RADAR = 1<<3,
52 IEEE80211_CHAN_NO_FAT_ABOVE = 1<<4,
53 IEEE80211_CHAN_NO_FAT_BELOW = 1<<5,
48}; 54};
49 55
50/** 56/**
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index b3b69fd51330..3e84b958186b 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1054,7 +1054,7 @@ retry:
1054 } 1054 }
1055 1055
1056 timeo = MAX_SCHEDULE_TIMEOUT; 1056 timeo = MAX_SCHEDULE_TIMEOUT;
1057 ret = netlink_attachskb(sock, nc, 0, &timeo, NULL); 1057 ret = netlink_attachskb(sock, nc, &timeo, NULL);
1058 if (ret == 1) 1058 if (ret == 1)
1059 goto retry; 1059 goto retry;
1060 if (ret) { 1060 if (ret) {
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index e69244dd8de8..b69bf4e7c48b 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -16,10 +16,6 @@
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/ 17*/
18 18
19/*
20 * $Id: bnep.h,v 1.5 2002/08/04 21:23:58 maxk Exp $
21 */
22
23#ifndef _BNEP_H 19#ifndef _BNEP_H
24#define _BNEP_H 20#define _BNEP_H
25 21
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index f85d94643aaf..1d98a1b80da7 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -25,10 +25,6 @@
25 SOFTWARE IS DISCLAIMED. 25 SOFTWARE IS DISCLAIMED.
26*/ 26*/
27 27
28/*
29 * $Id: core.c,v 1.20 2002/08/04 21:23:58 maxk Exp $
30 */
31
32#include <linux/module.h> 28#include <linux/module.h>
33 29
34#include <linux/kernel.h> 30#include <linux/kernel.h>
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 95e3837e4312..d9fa0ab2c87f 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -25,10 +25,6 @@
25 SOFTWARE IS DISCLAIMED. 25 SOFTWARE IS DISCLAIMED.
26*/ 26*/
27 27
28/*
29 * $Id: netdev.c,v 1.8 2002/08/04 21:23:58 maxk Exp $
30 */
31
32#include <linux/module.h> 28#include <linux/module.h>
33 29
34#include <linux/socket.h> 30#include <linux/socket.h>
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 201e5b1ce473..8ffb57f2303a 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -24,10 +24,6 @@
24 SOFTWARE IS DISCLAIMED. 24 SOFTWARE IS DISCLAIMED.
25*/ 25*/
26 26
27/*
28 * $Id: sock.c,v 1.4 2002/08/04 21:23:58 maxk Exp $
29 */
30
31#include <linux/module.h> 27#include <linux/module.h>
32 28
33#include <linux/types.h> 29#include <linux/types.h>
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 0c2c93735e93..b4fb84e398e5 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -23,8 +23,6 @@
23 23
24/* 24/*
25 * Bluetooth RFCOMM core. 25 * Bluetooth RFCOMM core.
26 *
27 * $Id: core.c,v 1.42 2002/10/01 23:26:25 maxk Exp $
28 */ 26 */
29 27
30#include <linux/module.h> 28#include <linux/module.h>
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 5083adcbfae5..c9054487670a 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -23,8 +23,6 @@
23 23
24/* 24/*
25 * RFCOMM sockets. 25 * RFCOMM sockets.
26 *
27 * $Id: sock.c,v 1.24 2002/10/03 01:00:34 maxk Exp $
28 */ 26 */
29 27
30#include <linux/module.h> 28#include <linux/module.h>
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index c9191871c1e0..be84f4fc1477 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -23,8 +23,6 @@
23 23
24/* 24/*
25 * RFCOMM TTY. 25 * RFCOMM TTY.
26 *
27 * $Id: tty.c,v 1.24 2002/10/03 01:54:38 holtmann Exp $
28 */ 26 */
29 27
30#include <linux/module.h> 28#include <linux/module.h>
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 8f3c58e5f7a5..cede010f4ddd 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br.c,v 1.47 2001/12/24 00:56:41 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index bf7787395fe0..a6ffc6c2a69f 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_device.c,v 1.6 2001/12/24 00:59:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -21,12 +19,6 @@
21#include <asm/uaccess.h> 19#include <asm/uaccess.h>
22#include "br_private.h" 20#include "br_private.h"
23 21
24static struct net_device_stats *br_dev_get_stats(struct net_device *dev)
25{
26 struct net_bridge *br = netdev_priv(dev);
27 return &br->statistics;
28}
29
30/* net device transmit always called with no BH (preempt_disabled) */ 22/* net device transmit always called with no BH (preempt_disabled) */
31int br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 23int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
32{ 24{
@@ -34,8 +26,8 @@ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
34 const unsigned char *dest = skb->data; 26 const unsigned char *dest = skb->data;
35 struct net_bridge_fdb_entry *dst; 27 struct net_bridge_fdb_entry *dst;
36 28
37 br->statistics.tx_packets++; 29 dev->stats.tx_packets++;
38 br->statistics.tx_bytes += skb->len; 30 dev->stats.tx_bytes += skb->len;
39 31
40 skb_reset_mac_header(skb); 32 skb_reset_mac_header(skb);
41 skb_pull(skb, ETH_HLEN); 33 skb_pull(skb, ETH_HLEN);
@@ -161,7 +153,6 @@ void br_dev_setup(struct net_device *dev)
161 ether_setup(dev); 153 ether_setup(dev);
162 154
163 dev->do_ioctl = br_dev_ioctl; 155 dev->do_ioctl = br_dev_ioctl;
164 dev->get_stats = br_dev_get_stats;
165 dev->hard_start_xmit = br_dev_xmit; 156 dev->hard_start_xmit = br_dev_xmit;
166 dev->open = br_dev_open; 157 dev->open = br_dev_open;
167 dev->set_multicast_list = br_dev_set_multicast_list; 158 dev->set_multicast_list = br_dev_set_multicast_list;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 72c5976a5ce3..4de74cdd091d 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_fdb.c,v 1.6 2002/01/17 00:57:07 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index bdd7c35c3c7b..512645727f51 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_forward.c,v 1.4 2001/08/14 22:05:57 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -115,7 +113,7 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
115 struct sk_buff *skb2; 113 struct sk_buff *skb2;
116 114
117 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) { 115 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
118 br->statistics.tx_dropped++; 116 br->dev->stats.tx_dropped++;
119 kfree_skb(skb); 117 kfree_skb(skb);
120 return; 118 return;
121 } 119 }
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index c2397f503b0f..143c954681b8 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_if.c,v 1.7 2001/12/24 00:59:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 255c00f60ce7..0145e9416714 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_input.c,v 1.10 2001/12/24 04:50:20 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -24,13 +22,13 @@ const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
24 22
25static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb) 23static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb)
26{ 24{
27 struct net_device *indev; 25 struct net_device *indev, *brdev = br->dev;
28 26
29 br->statistics.rx_packets++; 27 brdev->stats.rx_packets++;
30 br->statistics.rx_bytes += skb->len; 28 brdev->stats.rx_bytes += skb->len;
31 29
32 indev = skb->dev; 30 indev = skb->dev;
33 skb->dev = br->dev; 31 skb->dev = brdev;
34 32
35 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, 33 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
36 netif_receive_skb); 34 netif_receive_skb);
@@ -64,7 +62,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
64 dst = NULL; 62 dst = NULL;
65 63
66 if (is_multicast_ether_addr(dest)) { 64 if (is_multicast_ether_addr(dest)) {
67 br->statistics.multicast++; 65 br->dev->stats.multicast++;
68 skb2 = skb; 66 skb2 = skb;
69 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) { 67 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) {
70 skb2 = skb; 68 skb2 = skb;
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 0655a5f07f58..eeee218eed80 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_ioctl.c,v 1.4 2000/11/08 05:16:40 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 00644a544e3c..88d8ec7b3142 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_notify.c,v 1.2 2000/02/21 15:51:34 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index c11b554fd109..83ff5861c2d2 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -4,8 +4,6 @@
4 * Authors: 4 * Authors:
5 * Lennert Buytenhek <buytenh@gnu.org> 5 * Lennert Buytenhek <buytenh@gnu.org>
6 * 6 *
7 * $Id: br_private.h,v 1.7 2001/12/24 00:59:55 davem Exp $
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
@@ -90,7 +88,6 @@ struct net_bridge
90 spinlock_t lock; 88 spinlock_t lock;
91 struct list_head port_list; 89 struct list_head port_list;
92 struct net_device *dev; 90 struct net_device *dev;
93 struct net_device_stats statistics;
94 spinlock_t hash_lock; 91 spinlock_t hash_lock;
95 struct hlist_head hash[BR_HASH_SIZE]; 92 struct hlist_head hash[BR_HASH_SIZE];
96 struct list_head age_list; 93 struct list_head age_list;
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index e29f01ac1adf..8b650f7fbfa0 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -4,8 +4,6 @@
4 * Authors: 4 * Authors:
5 * Lennert Buytenhek <buytenh@gnu.org> 5 * Lennert Buytenhek <buytenh@gnu.org>
6 * 6 *
7 * $Id: br_private_stp.h,v 1.3 2001/02/05 06:03:47 davem Exp $
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index e38034aa56f5..284d1b2fa1ff 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_stp.c,v 1.4 2000/06/19 10:13:35 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index ddeb6e5d45d6..9dc2de656965 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_stp_bpdu.c,v 1.3 2001/11/10 02:35:25 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 1a430eccec9b..1a4e5c37a0cf 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_stp_if.c,v 1.4 2001/04/14 21:14:39 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 77f5255e6915..772a140bfdf0 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_stp_timer.c,v 1.3 2000/05/05 02:17:17 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index 7beeefa0f9c0..fb684c2ff8b6 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -83,6 +83,15 @@ config BRIDGE_EBT_IP
83 83
84 To compile it as a module, choose M here. If unsure, say N. 84 To compile it as a module, choose M here. If unsure, say N.
85 85
86config BRIDGE_EBT_IP6
87 tristate "ebt: IP6 filter support"
88 depends on BRIDGE_NF_EBTABLES
89 help
90 This option adds the IP6 match, which allows basic IPV6 header field
91 filtering.
92
93 To compile it as a module, choose M here. If unsure, say N.
94
86config BRIDGE_EBT_LIMIT 95config BRIDGE_EBT_LIMIT
87 tristate "ebt: limit match support" 96 tristate "ebt: limit match support"
88 depends on BRIDGE_NF_EBTABLES 97 depends on BRIDGE_NF_EBTABLES
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile
index 83715d73a503..dd960645b413 100644
--- a/net/bridge/netfilter/Makefile
+++ b/net/bridge/netfilter/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_BRIDGE_EBT_802_3) += ebt_802_3.o
14obj-$(CONFIG_BRIDGE_EBT_AMONG) += ebt_among.o 14obj-$(CONFIG_BRIDGE_EBT_AMONG) += ebt_among.o
15obj-$(CONFIG_BRIDGE_EBT_ARP) += ebt_arp.o 15obj-$(CONFIG_BRIDGE_EBT_ARP) += ebt_arp.o
16obj-$(CONFIG_BRIDGE_EBT_IP) += ebt_ip.o 16obj-$(CONFIG_BRIDGE_EBT_IP) += ebt_ip.o
17obj-$(CONFIG_BRIDGE_EBT_IP) += ebt_ip6.o
17obj-$(CONFIG_BRIDGE_EBT_LIMIT) += ebt_limit.o 18obj-$(CONFIG_BRIDGE_EBT_LIMIT) += ebt_limit.o
18obj-$(CONFIG_BRIDGE_EBT_MARK) += ebt_mark_m.o 19obj-$(CONFIG_BRIDGE_EBT_MARK) += ebt_mark_m.o
19obj-$(CONFIG_BRIDGE_EBT_PKTTYPE) += ebt_pkttype.o 20obj-$(CONFIG_BRIDGE_EBT_PKTTYPE) += ebt_pkttype.o
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
new file mode 100644
index 000000000000..36efb3a75249
--- /dev/null
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -0,0 +1,144 @@
1/*
2 * ebt_ip6
3 *
4 * Authors:
5 * Manohar Castelino <manohar.r.castelino@intel.com>
6 * Kuo-Lang Tseng <kuo-lang.tseng@intel.com>
7 * Jan Engelhardt <jengelh@computergmbh.de>
8 *
9 * Summary:
10 * This is just a modification of the IPv4 code written by
11 * Bart De Schuymer <bdschuym@pandora.be>
12 * with the changes required to support IPv6
13 *
14 * Jan, 2008
15 */
16
17#include <linux/netfilter_bridge/ebtables.h>
18#include <linux/netfilter_bridge/ebt_ip6.h>
19#include <linux/ipv6.h>
20#include <net/ipv6.h>
21#include <linux/in.h>
22#include <linux/module.h>
23#include <net/dsfield.h>
24
25struct tcpudphdr {
26 __be16 src;
27 __be16 dst;
28};
29
30static int ebt_filter_ip6(const struct sk_buff *skb,
31 const struct net_device *in,
32 const struct net_device *out, const void *data,
33 unsigned int datalen)
34{
35 const struct ebt_ip6_info *info = (struct ebt_ip6_info *)data;
36 const struct ipv6hdr *ih6;
37 struct ipv6hdr _ip6h;
38 const struct tcpudphdr *pptr;
39 struct tcpudphdr _ports;
40 struct in6_addr tmp_addr;
41 int i;
42
43 ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
44 if (ih6 == NULL)
45 return EBT_NOMATCH;
46 if (info->bitmask & EBT_IP6_TCLASS &&
47 FWINV(info->tclass != ipv6_get_dsfield(ih6), EBT_IP6_TCLASS))
48 return EBT_NOMATCH;
49 for (i = 0; i < 4; i++)
50 tmp_addr.in6_u.u6_addr32[i] = ih6->saddr.in6_u.u6_addr32[i] &
51 info->smsk.in6_u.u6_addr32[i];
52 if (info->bitmask & EBT_IP6_SOURCE &&
53 FWINV((ipv6_addr_cmp(&tmp_addr, &info->saddr) != 0),
54 EBT_IP6_SOURCE))
55 return EBT_NOMATCH;
56 for (i = 0; i < 4; i++)
57 tmp_addr.in6_u.u6_addr32[i] = ih6->daddr.in6_u.u6_addr32[i] &
58 info->dmsk.in6_u.u6_addr32[i];
59 if (info->bitmask & EBT_IP6_DEST &&
60 FWINV((ipv6_addr_cmp(&tmp_addr, &info->daddr) != 0), EBT_IP6_DEST))
61 return EBT_NOMATCH;
62 if (info->bitmask & EBT_IP6_PROTO) {
63 uint8_t nexthdr = ih6->nexthdr;
64 int offset_ph;
65
66 offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr);
67 if (offset_ph == -1)
68 return EBT_NOMATCH;
69 if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
70 return EBT_NOMATCH;
71 if (!(info->bitmask & EBT_IP6_DPORT) &&
72 !(info->bitmask & EBT_IP6_SPORT))
73 return EBT_MATCH;
74 pptr = skb_header_pointer(skb, offset_ph, sizeof(_ports),
75 &_ports);
76 if (pptr == NULL)
77 return EBT_NOMATCH;
78 if (info->bitmask & EBT_IP6_DPORT) {
79 u32 dst = ntohs(pptr->dst);
80 if (FWINV(dst < info->dport[0] ||
81 dst > info->dport[1], EBT_IP6_DPORT))
82 return EBT_NOMATCH;
83 }
84 if (info->bitmask & EBT_IP6_SPORT) {
85 u32 src = ntohs(pptr->src);
86 if (FWINV(src < info->sport[0] ||
87 src > info->sport[1], EBT_IP6_SPORT))
88 return EBT_NOMATCH;
89 }
90 return EBT_MATCH;
91 }
92 return EBT_MATCH;
93}
94
95static int ebt_ip6_check(const char *tablename, unsigned int hookmask,
96 const struct ebt_entry *e, void *data, unsigned int datalen)
97{
98 struct ebt_ip6_info *info = (struct ebt_ip6_info *)data;
99
100 if (datalen != EBT_ALIGN(sizeof(struct ebt_ip6_info)))
101 return -EINVAL;
102 if (e->ethproto != htons(ETH_P_IPV6) || e->invflags & EBT_IPROTO)
103 return -EINVAL;
104 if (info->bitmask & ~EBT_IP6_MASK || info->invflags & ~EBT_IP6_MASK)
105 return -EINVAL;
106 if (info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT)) {
107 if (info->invflags & EBT_IP6_PROTO)
108 return -EINVAL;
109 if (info->protocol != IPPROTO_TCP &&
110 info->protocol != IPPROTO_UDP &&
111 info->protocol != IPPROTO_UDPLITE &&
112 info->protocol != IPPROTO_SCTP &&
113 info->protocol != IPPROTO_DCCP)
114 return -EINVAL;
115 }
116 if (info->bitmask & EBT_IP6_DPORT && info->dport[0] > info->dport[1])
117 return -EINVAL;
118 if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1])
119 return -EINVAL;
120 return 0;
121}
122
123static struct ebt_match filter_ip6 =
124{
125 .name = EBT_IP6_MATCH,
126 .match = ebt_filter_ip6,
127 .check = ebt_ip6_check,
128 .me = THIS_MODULE,
129};
130
131static int __init ebt_ip6_init(void)
132{
133 return ebt_register_match(&filter_ip6);
134}
135
136static void __exit ebt_ip6_fini(void)
137{
138 ebt_unregister_match(&filter_ip6);
139}
140
141module_init(ebt_ip6_init);
142module_exit(ebt_ip6_fini);
143MODULE_DESCRIPTION("Ebtables: IPv6 protocol packet match");
144MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 0b209e4aad0a..c883ec8a28b4 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -18,6 +18,9 @@
18#include <linux/if_arp.h> 18#include <linux/if_arp.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <net/netfilter/nf_log.h> 20#include <net/netfilter/nf_log.h>
21#include <linux/ipv6.h>
22#include <net/ipv6.h>
23#include <linux/in6.h>
21 24
22static DEFINE_SPINLOCK(ebt_log_lock); 25static DEFINE_SPINLOCK(ebt_log_lock);
23 26
@@ -58,6 +61,27 @@ static void print_MAC(const unsigned char *p)
58 printk("%02x%c", *p, i == ETH_ALEN - 1 ? ' ':':'); 61 printk("%02x%c", *p, i == ETH_ALEN - 1 ? ' ':':');
59} 62}
60 63
64static void
65print_ports(const struct sk_buff *skb, uint8_t protocol, int offset)
66{
67 if (protocol == IPPROTO_TCP ||
68 protocol == IPPROTO_UDP ||
69 protocol == IPPROTO_UDPLITE ||
70 protocol == IPPROTO_SCTP ||
71 protocol == IPPROTO_DCCP) {
72 const struct tcpudphdr *pptr;
73 struct tcpudphdr _ports;
74
75 pptr = skb_header_pointer(skb, offset,
76 sizeof(_ports), &_ports);
77 if (pptr == NULL) {
78 printk(" INCOMPLETE TCP/UDP header");
79 return;
80 }
81 printk(" SPT=%u DPT=%u", ntohs(pptr->src), ntohs(pptr->dst));
82 }
83}
84
61#define myNIPQUAD(a) a[0], a[1], a[2], a[3] 85#define myNIPQUAD(a) a[0], a[1], a[2], a[3]
62static void 86static void
63ebt_log_packet(unsigned int pf, unsigned int hooknum, 87ebt_log_packet(unsigned int pf, unsigned int hooknum,
@@ -95,23 +119,31 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum,
95 printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u, IP " 119 printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u, IP "
96 "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr), 120 "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr),
97 NIPQUAD(ih->daddr), ih->tos, ih->protocol); 121 NIPQUAD(ih->daddr), ih->tos, ih->protocol);
98 if (ih->protocol == IPPROTO_TCP || 122 print_ports(skb, ih->protocol, ih->ihl*4);
99 ih->protocol == IPPROTO_UDP || 123 goto out;
100 ih->protocol == IPPROTO_UDPLITE || 124 }
101 ih->protocol == IPPROTO_SCTP || 125
102 ih->protocol == IPPROTO_DCCP) { 126 if ((bitmask & EBT_LOG_IP6) && eth_hdr(skb)->h_proto ==
103 const struct tcpudphdr *pptr; 127 htons(ETH_P_IPV6)) {
104 struct tcpudphdr _ports; 128 const struct ipv6hdr *ih;
105 129 struct ipv6hdr _iph;
106 pptr = skb_header_pointer(skb, ih->ihl*4, 130 uint8_t nexthdr;
107 sizeof(_ports), &_ports); 131 int offset_ph;
108 if (pptr == NULL) { 132
109 printk(" INCOMPLETE TCP/UDP header"); 133 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
110 goto out; 134 if (ih == NULL) {
111 } 135 printk(" INCOMPLETE IPv6 header");
112 printk(" SPT=%u DPT=%u", ntohs(pptr->src), 136 goto out;
113 ntohs(pptr->dst));
114 } 137 }
138 printk(" IPv6 SRC=%x:%x:%x:%x:%x:%x:%x:%x "
139 "IPv6 DST=%x:%x:%x:%x:%x:%x:%x:%x, IPv6 "
140 "priority=0x%01X, Next Header=%d", NIP6(ih->saddr),
141 NIP6(ih->daddr), ih->priority, ih->nexthdr);
142 nexthdr = ih->nexthdr;
143 offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr);
144 if (offset_ph == -1)
145 goto out;
146 print_ports(skb, nexthdr, offset_ph);
115 goto out; 147 goto out;
116 } 148 }
117 149
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 90e2177af081..dccd737ea2e3 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -242,11 +242,11 @@ static ssize_t netstat_show(const struct device *d,
242 offset % sizeof(unsigned long) != 0); 242 offset % sizeof(unsigned long) != 0);
243 243
244 read_lock(&dev_base_lock); 244 read_lock(&dev_base_lock);
245 if (dev_isalive(dev) && dev->get_stats && 245 if (dev_isalive(dev)) {
246 (stats = (*dev->get_stats)(dev))) 246 stats = dev->get_stats(dev);
247 ret = sprintf(buf, fmt_ulong, 247 ret = sprintf(buf, fmt_ulong,
248 *(unsigned long *)(((u8 *) stats) + offset)); 248 *(unsigned long *)(((u8 *) stats) + offset));
249 249 }
250 read_unlock(&dev_base_lock); 250 read_unlock(&dev_base_lock);
251 return ret; 251 return ret;
252} 252}
@@ -457,8 +457,7 @@ int netdev_register_kobject(struct net_device *net)
457 strlcpy(dev->bus_id, net->name, BUS_ID_SIZE); 457 strlcpy(dev->bus_id, net->name, BUS_ID_SIZE);
458 458
459#ifdef CONFIG_SYSFS 459#ifdef CONFIG_SYSFS
460 if (net->get_stats) 460 *groups++ = &netstat_group;
461 *groups++ = &netstat_group;
462 461
463#ifdef CONFIG_WIRELESS_EXT 462#ifdef CONFIG_WIRELESS_EXT
464 if (net->wireless_handlers && net->wireless_handlers->get_wireless_stats) 463 if (net->wireless_handlers && net->wireless_handlers->get_wireless_stats)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a9a77216310e..6c8d7f0ea01a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -607,6 +607,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
607{ 607{
608 struct ifinfomsg *ifm; 608 struct ifinfomsg *ifm;
609 struct nlmsghdr *nlh; 609 struct nlmsghdr *nlh;
610 struct net_device_stats *stats;
611 struct nlattr *attr;
610 612
611 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 613 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
612 if (nlh == NULL) 614 if (nlh == NULL)
@@ -653,19 +655,13 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
653 NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast); 655 NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast);
654 } 656 }
655 657
656 if (dev->get_stats) { 658 attr = nla_reserve(skb, IFLA_STATS,
657 struct net_device_stats *stats = dev->get_stats(dev); 659 sizeof(struct rtnl_link_stats));
658 if (stats) { 660 if (attr == NULL)
659 struct nlattr *attr; 661 goto nla_put_failure;
660 662
661 attr = nla_reserve(skb, IFLA_STATS, 663 stats = dev->get_stats(dev);
662 sizeof(struct rtnl_link_stats)); 664 copy_rtnl_link_stats(nla_data(attr), stats);
663 if (attr == NULL)
664 goto nla_put_failure;
665
666 copy_rtnl_link_stats(nla_data(attr), stats);
667 }
668 }
669 665
670 if (dev->rtnl_link_ops) { 666 if (dev->rtnl_link_ops) {
671 if (rtnl_link_fill(skb, dev) < 0) 667 if (rtnl_link_fill(skb, dev) < 0)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1e556d312117..3e18f8525e82 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4,8 +4,6 @@
4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk> 4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
5 * Florian La Roche <rzsfl@rz.uni-sb.de> 5 * Florian La Roche <rzsfl@rz.uni-sb.de>
6 * 6 *
7 * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
8 *
9 * Fixes: 7 * Fixes:
10 * Alan Cox : Fixed the worst of the load 8 * Alan Cox : Fixed the worst of the load
11 * balancer bugs. 9 * balancer bugs.
diff --git a/net/core/sock.c b/net/core/sock.c
index 88094cb09c06..3879bf65897e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -7,8 +7,6 @@
7 * handler for protocols to use and generic option handler. 7 * handler for protocols to use and generic option handler.
8 * 8 *
9 * 9 *
10 * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $
11 *
12 * Authors: Ross Biro 10 * Authors: Ross Biro
13 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14 * Florian La Roche, <flla@stud.uni-sb.de> 12 * Florian La Roche, <flla@stud.uni-sb.de>
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 5fc801057244..a570e2af22cb 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -125,14 +125,6 @@ static struct ctl_table net_core_table[] = {
125#endif /* CONFIG_XFRM */ 125#endif /* CONFIG_XFRM */
126#endif /* CONFIG_NET */ 126#endif /* CONFIG_NET */
127 { 127 {
128 .ctl_name = NET_CORE_SOMAXCONN,
129 .procname = "somaxconn",
130 .data = &init_net.core.sysctl_somaxconn,
131 .maxlen = sizeof(int),
132 .mode = 0644,
133 .proc_handler = &proc_dointvec
134 },
135 {
136 .ctl_name = NET_CORE_BUDGET, 128 .ctl_name = NET_CORE_BUDGET,
137 .procname = "netdev_budget", 129 .procname = "netdev_budget",
138 .data = &netdev_budget, 130 .data = &netdev_budget,
@@ -151,6 +143,18 @@ static struct ctl_table net_core_table[] = {
151 { .ctl_name = 0 } 143 { .ctl_name = 0 }
152}; 144};
153 145
146static struct ctl_table netns_core_table[] = {
147 {
148 .ctl_name = NET_CORE_SOMAXCONN,
149 .procname = "somaxconn",
150 .data = &init_net.core.sysctl_somaxconn,
151 .maxlen = sizeof(int),
152 .mode = 0644,
153 .proc_handler = &proc_dointvec
154 },
155 { .ctl_name = 0 }
156};
157
154static __net_initdata struct ctl_path net_core_path[] = { 158static __net_initdata struct ctl_path net_core_path[] = {
155 { .procname = "net", .ctl_name = CTL_NET, }, 159 { .procname = "net", .ctl_name = CTL_NET, },
156 { .procname = "core", .ctl_name = NET_CORE, }, 160 { .procname = "core", .ctl_name = NET_CORE, },
@@ -159,23 +163,17 @@ static __net_initdata struct ctl_path net_core_path[] = {
159 163
160static __net_init int sysctl_core_net_init(struct net *net) 164static __net_init int sysctl_core_net_init(struct net *net)
161{ 165{
162 struct ctl_table *tbl, *tmp; 166 struct ctl_table *tbl;
163 167
164 net->core.sysctl_somaxconn = SOMAXCONN; 168 net->core.sysctl_somaxconn = SOMAXCONN;
165 169
166 tbl = net_core_table; 170 tbl = netns_core_table;
167 if (net != &init_net) { 171 if (net != &init_net) {
168 tbl = kmemdup(tbl, sizeof(net_core_table), GFP_KERNEL); 172 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
169 if (tbl == NULL) 173 if (tbl == NULL)
170 goto err_dup; 174 goto err_dup;
171 175
172 for (tmp = tbl; tmp->procname; tmp++) { 176 tbl[0].data = &net->core.sysctl_somaxconn;
173 if (tmp->data >= (void *)&init_net &&
174 tmp->data < (void *)(&init_net + 1))
175 tmp->data += (char *)net - (char *)&init_net;
176 else
177 tmp->mode &= ~0222;
178 }
179 } 177 }
180 178
181 net->core.sysctl_hdr = register_net_sysctl_table(net, 179 net->core.sysctl_hdr = register_net_sysctl_table(net,
@@ -186,7 +184,7 @@ static __net_init int sysctl_core_net_init(struct net *net)
186 return 0; 184 return 0;
187 185
188err_reg: 186err_reg:
189 if (tbl != net_core_table) 187 if (tbl != netns_core_table)
190 kfree(tbl); 188 kfree(tbl);
191err_dup: 189err_dup:
192 return -ENOMEM; 190 return -ENOMEM;
@@ -198,7 +196,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
198 196
199 tbl = net->core.sysctl_hdr->ctl_table_arg; 197 tbl = net->core.sysctl_hdr->ctl_table_arg;
200 unregister_net_sysctl_table(net->core.sysctl_hdr); 198 unregister_net_sysctl_table(net->core.sysctl_hdr);
201 BUG_ON(tbl == net_core_table); 199 BUG_ON(tbl == netns_core_table);
202 kfree(tbl); 200 kfree(tbl);
203} 201}
204 202
@@ -209,6 +207,7 @@ static __net_initdata struct pernet_operations sysctl_core_ops = {
209 207
210static __init int sysctl_core_init(void) 208static __init int sysctl_core_init(void)
211{ 209{
210 register_net_sysctl_rotable(net_core_path, net_core_table);
212 return register_pernet_subsys(&sysctl_core_ops); 211 return register_pernet_subsys(&sysctl_core_ops);
213} 212}
214 213
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index 200ee1e63728..69dbc342a464 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -391,7 +391,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
391 391
392 wstats.updated = 0; 392 wstats.updated = 0;
393 if (rx_stats->mask & IEEE80211_STATMASK_RSSI) { 393 if (rx_stats->mask & IEEE80211_STATMASK_RSSI) {
394 wstats.level = rx_stats->rssi; 394 wstats.level = rx_stats->signal;
395 wstats.updated |= IW_QUAL_LEVEL_UPDATED; 395 wstats.updated |= IW_QUAL_LEVEL_UPDATED;
396 } else 396 } else
397 wstats.updated |= IW_QUAL_LEVEL_INVALID; 397 wstats.updated |= IW_QUAL_LEVEL_INVALID;
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index d8b02603cbe5..d996547f7a62 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -542,90 +542,4 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
542 return 1; 542 return 1;
543} 543}
544 544
545/* Incoming 802.11 strucure is converted to a TXB
546 * a block of 802.11 fragment packets (stored as skbs) */
547int ieee80211_tx_frame(struct ieee80211_device *ieee,
548 struct ieee80211_hdr *frame, int hdr_len, int total_len,
549 int encrypt_mpdu)
550{
551 struct ieee80211_txb *txb = NULL;
552 unsigned long flags;
553 struct net_device_stats *stats = &ieee->stats;
554 struct sk_buff *skb_frag;
555 int priority = -1;
556 int fraglen = total_len;
557 int headroom = ieee->tx_headroom;
558 struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
559
560 spin_lock_irqsave(&ieee->lock, flags);
561
562 if (encrypt_mpdu && (!ieee->sec.encrypt || !crypt))
563 encrypt_mpdu = 0;
564
565 /* If there is no driver handler to take the TXB, dont' bother
566 * creating it... */
567 if (!ieee->hard_start_xmit) {
568 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
569 goto success;
570 }
571
572 if (unlikely(total_len < 24)) {
573 printk(KERN_WARNING "%s: skb too small (%d).\n",
574 ieee->dev->name, total_len);
575 goto success;
576 }
577
578 if (encrypt_mpdu) {
579 frame->frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
580 fraglen += crypt->ops->extra_mpdu_prefix_len +
581 crypt->ops->extra_mpdu_postfix_len;
582 headroom += crypt->ops->extra_mpdu_prefix_len;
583 }
584
585 /* When we allocate the TXB we allocate enough space for the reserve
586 * and full fragment bytes (bytes_per_frag doesn't include prefix,
587 * postfix, header, FCS, etc.) */
588 txb = ieee80211_alloc_txb(1, fraglen, headroom, GFP_ATOMIC);
589 if (unlikely(!txb)) {
590 printk(KERN_WARNING "%s: Could not allocate TXB\n",
591 ieee->dev->name);
592 goto failed;
593 }
594 txb->encrypted = 0;
595 txb->payload_size = fraglen;
596
597 skb_frag = txb->fragments[0];
598
599 memcpy(skb_put(skb_frag, total_len), frame, total_len);
600
601 if (ieee->config &
602 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
603 skb_put(skb_frag, 4);
604
605 /* To avoid overcomplicating things, we do the corner-case frame
606 * encryption in software. The only real situation where encryption is
607 * needed here is during software-based shared key authentication. */
608 if (encrypt_mpdu)
609 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
610
611 success:
612 spin_unlock_irqrestore(&ieee->lock, flags);
613
614 if (txb) {
615 if ((*ieee->hard_start_xmit) (txb, ieee->dev, priority) == 0) {
616 stats->tx_packets++;
617 stats->tx_bytes += txb->payload_size;
618 return 0;
619 }
620 ieee80211_txb_free(txb);
621 }
622 return 0;
623
624 failed:
625 spin_unlock_irqrestore(&ieee->lock, flags);
626 stats->tx_errors++;
627 return 1;
628}
629
630EXPORT_SYMBOL(ieee80211_tx_frame);
631EXPORT_SYMBOL(ieee80211_txb_free); 545EXPORT_SYMBOL(ieee80211_txb_free);
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index 623489afa62c..822606b615ca 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -744,98 +744,9 @@ int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee,
744 return 0; 744 return 0;
745} 745}
746 746
747int ieee80211_wx_set_auth(struct net_device *dev,
748 struct iw_request_info *info,
749 union iwreq_data *wrqu,
750 char *extra)
751{
752 struct ieee80211_device *ieee = netdev_priv(dev);
753 unsigned long flags;
754 int err = 0;
755
756 spin_lock_irqsave(&ieee->lock, flags);
757
758 switch (wrqu->param.flags & IW_AUTH_INDEX) {
759 case IW_AUTH_WPA_VERSION:
760 case IW_AUTH_CIPHER_PAIRWISE:
761 case IW_AUTH_CIPHER_GROUP:
762 case IW_AUTH_KEY_MGMT:
763 /*
764 * Host AP driver does not use these parameters and allows
765 * wpa_supplicant to control them internally.
766 */
767 break;
768 case IW_AUTH_TKIP_COUNTERMEASURES:
769 break; /* FIXME */
770 case IW_AUTH_DROP_UNENCRYPTED:
771 ieee->drop_unencrypted = !!wrqu->param.value;
772 break;
773 case IW_AUTH_80211_AUTH_ALG:
774 break; /* FIXME */
775 case IW_AUTH_WPA_ENABLED:
776 ieee->privacy_invoked = ieee->wpa_enabled = !!wrqu->param.value;
777 break;
778 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
779 ieee->ieee802_1x = !!wrqu->param.value;
780 break;
781 case IW_AUTH_PRIVACY_INVOKED:
782 ieee->privacy_invoked = !!wrqu->param.value;
783 break;
784 default:
785 err = -EOPNOTSUPP;
786 break;
787 }
788 spin_unlock_irqrestore(&ieee->lock, flags);
789 return err;
790}
791
792int ieee80211_wx_get_auth(struct net_device *dev,
793 struct iw_request_info *info,
794 union iwreq_data *wrqu,
795 char *extra)
796{
797 struct ieee80211_device *ieee = netdev_priv(dev);
798 unsigned long flags;
799 int err = 0;
800
801 spin_lock_irqsave(&ieee->lock, flags);
802
803 switch (wrqu->param.flags & IW_AUTH_INDEX) {
804 case IW_AUTH_WPA_VERSION:
805 case IW_AUTH_CIPHER_PAIRWISE:
806 case IW_AUTH_CIPHER_GROUP:
807 case IW_AUTH_KEY_MGMT:
808 case IW_AUTH_TKIP_COUNTERMEASURES: /* FIXME */
809 case IW_AUTH_80211_AUTH_ALG: /* FIXME */
810 /*
811 * Host AP driver does not use these parameters and allows
812 * wpa_supplicant to control them internally.
813 */
814 err = -EOPNOTSUPP;
815 break;
816 case IW_AUTH_DROP_UNENCRYPTED:
817 wrqu->param.value = ieee->drop_unencrypted;
818 break;
819 case IW_AUTH_WPA_ENABLED:
820 wrqu->param.value = ieee->wpa_enabled;
821 break;
822 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
823 wrqu->param.value = ieee->ieee802_1x;
824 break;
825 default:
826 err = -EOPNOTSUPP;
827 break;
828 }
829 spin_unlock_irqrestore(&ieee->lock, flags);
830 return err;
831}
832
833EXPORT_SYMBOL(ieee80211_wx_set_encodeext); 747EXPORT_SYMBOL(ieee80211_wx_set_encodeext);
834EXPORT_SYMBOL(ieee80211_wx_get_encodeext); 748EXPORT_SYMBOL(ieee80211_wx_get_encodeext);
835 749
836EXPORT_SYMBOL(ieee80211_wx_get_scan); 750EXPORT_SYMBOL(ieee80211_wx_get_scan);
837EXPORT_SYMBOL(ieee80211_wx_set_encode); 751EXPORT_SYMBOL(ieee80211_wx_set_encode);
838EXPORT_SYMBOL(ieee80211_wx_get_encode); 752EXPORT_SYMBOL(ieee80211_wx_get_encode);
839
840EXPORT_SYMBOL_GPL(ieee80211_wx_set_auth);
841EXPORT_SYMBOL_GPL(ieee80211_wx_get_auth);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 24eca23c2db3..42bd24b64b57 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * PF_INET protocol family socket handler. 6 * PF_INET protocol family socket handler.
7 * 7 *
8 * Version: $Id: af_inet.c,v 1.137 2002/02/01 22:01:03 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de> 10 * Florian La Roche, <flla@stud.uni-sb.de>
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 9b539fa9fe18..20c515a1be28 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1,7 +1,5 @@
1/* linux/net/ipv4/arp.c 1/* linux/net/ipv4/arp.c
2 * 2 *
3 * Version: $Id: arp.c,v 1.99 2001/08/30 22:55:42 davem Exp $
4 *
5 * Copyright (C) 1994 by Florian La Roche 3 * Copyright (C) 1994 by Florian La Roche
6 * 4 *
7 * This module implements the Address Resolution Protocol ARP (RFC 826), 5 * This module implements the Address Resolution Protocol ARP (RFC 826),
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 79a7ef6209ff..f8c0b0aea93a 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * NET3 IP device support routines. 2 * NET3 IP device support routines.
3 * 3 *
4 * Version: $Id: devinet.c,v 1.44 2001/10/31 21:55:54 davem Exp $
5 *
6 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
@@ -1013,7 +1011,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1013 memcpy(old, ifa->ifa_label, IFNAMSIZ); 1011 memcpy(old, ifa->ifa_label, IFNAMSIZ);
1014 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); 1012 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1015 if (named++ == 0) 1013 if (named++ == 0)
1016 continue; 1014 goto skip;
1017 dot = strchr(old, ':'); 1015 dot = strchr(old, ':');
1018 if (dot == NULL) { 1016 if (dot == NULL) {
1019 sprintf(old, ":%d", named); 1017 sprintf(old, ":%d", named);
@@ -1024,6 +1022,8 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1024 } else { 1022 } else {
1025 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot); 1023 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1026 } 1024 }
1025skip:
1026 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1027 } 1027 }
1028} 1028}
1029 1029
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 0b2ac6a3d903..5ad01d63f83b 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * IPv4 Forwarding Information Base: FIB frontend. 6 * IPv4 Forwarding Information Base: FIB frontend.
7 * 7 *
8 * Version: $Id: fib_frontend.c,v 1.26 2001/10/31 21:55:54 davem Exp $
9 *
10 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 2e2fc3376ac9..eeec4bf982b8 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * IPv4 FIB: lookup engine and maintenance routines. 6 * IPv4 FIB: lookup engine and maintenance routines.
7 * 7 *
8 * Version: $Id: fib_hash.c,v 1.13 2001/10/31 21:55:54 davem Exp $
9 *
10 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 0d4d72827e4b..ded2ae34eab1 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * IPv4 Forwarding Information Base: semantics. 6 * IPv4 Forwarding Information Base: semantics.
7 * 7 *
8 * Version: $Id: fib_semantics.c,v 1.19 2002/01/12 07:54:56 davem Exp $
9 *
10 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 4b02d14e7ab9..394db9c941a1 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -22,8 +22,6 @@
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson 22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999 23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
24 * 24 *
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
26 *
27 * 25 *
28 * Code from fib_hash has been reused which includes the following header: 26 * Code from fib_hash has been reused which includes the following header:
29 * 27 *
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 87397351ddac..aa7cf46853b7 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Alan Cox, <alan@redhat.com> 4 * Alan Cox, <alan@redhat.com>
5 * 5 *
6 * Version: $Id: icmp.c,v 1.85 2002/02/01 22:01:03 davem Exp $
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 2769dc4a4c84..68e84a933e90 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -8,8 +8,6 @@
8 * the older version didn't come out right using gcc 2.5.8, the newer one 8 * the older version didn't come out right using gcc 2.5.8, the newer one
9 * seems to fall out with gcc 2.6.2. 9 * seems to fall out with gcc 2.6.2.
10 * 10 *
11 * Version: $Id: igmp.c,v 1.47 2002/02/01 22:01:03 davem Exp $
12 *
13 * Authors: 11 * Authors:
14 * Alan Cox <Alan.Cox@linux.org> 12 * Alan Cox <Alan.Cox@linux.org>
15 * 13 *
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index da97695e7096..c10036e7a463 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * inet_diag.c Module for monitoring INET transport protocols sockets. 2 * inet_diag.c Module for monitoring INET transport protocols sockets.
3 * 3 *
4 * Version: $Id: inet_diag.c,v 1.3 2002/02/01 22:01:04 davem Exp $
5 *
6 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index af995198f643..a456ceeac3f2 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * This source is covered by the GNU GPL, the same as all kernel sources. 4 * This source is covered by the GNU GPL, the same as all kernel sources.
5 * 5 *
6 * Version: $Id: inetpeer.c,v 1.7 2001/09/20 21:22:50 davem Exp $
7 *
8 * Authors: Andrey V. Savochkin <saw@msu.ru> 6 * Authors: Andrey V. Savochkin <saw@msu.ru>
9 */ 7 */
10 8
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 4813c39b438b..37d36a3f33cd 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The IP forwarding functionality. 6 * The IP forwarding functionality.
7 * 7 *
8 * Version: $Id: ip_forward.c,v 1.48 2000/12/13 18:31:48 davem Exp $
9 *
10 * Authors: see ip.c 8 * Authors: see ip.c
11 * 9 *
12 * Fixes: 10 * Fixes:
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index cd6ce6ac6358..91e321407313 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The IP fragmentation functionality. 6 * The IP fragmentation functionality.
7 * 7 *
8 * Version: $Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $
9 *
10 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> 8 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox <Alan.Cox@linux.org> 9 * Alan Cox <Alan.Cox@linux.org>
12 * 10 *
@@ -598,7 +596,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
598#ifdef CONFIG_SYSCTL 596#ifdef CONFIG_SYSCTL
599static int zero; 597static int zero;
600 598
601static struct ctl_table ip4_frags_ctl_table[] = { 599static struct ctl_table ip4_frags_ns_ctl_table[] = {
602 { 600 {
603 .ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH, 601 .ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH,
604 .procname = "ipfrag_high_thresh", 602 .procname = "ipfrag_high_thresh",
@@ -624,6 +622,10 @@ static struct ctl_table ip4_frags_ctl_table[] = {
624 .proc_handler = &proc_dointvec_jiffies, 622 .proc_handler = &proc_dointvec_jiffies,
625 .strategy = &sysctl_jiffies 623 .strategy = &sysctl_jiffies
626 }, 624 },
625 { }
626};
627
628static struct ctl_table ip4_frags_ctl_table[] = {
627 { 629 {
628 .ctl_name = NET_IPV4_IPFRAG_SECRET_INTERVAL, 630 .ctl_name = NET_IPV4_IPFRAG_SECRET_INTERVAL,
629 .procname = "ipfrag_secret_interval", 631 .procname = "ipfrag_secret_interval",
@@ -644,22 +646,20 @@ static struct ctl_table ip4_frags_ctl_table[] = {
644 { } 646 { }
645}; 647};
646 648
647static int ip4_frags_ctl_register(struct net *net) 649static int ip4_frags_ns_ctl_register(struct net *net)
648{ 650{
649 struct ctl_table *table; 651 struct ctl_table *table;
650 struct ctl_table_header *hdr; 652 struct ctl_table_header *hdr;
651 653
652 table = ip4_frags_ctl_table; 654 table = ip4_frags_ns_ctl_table;
653 if (net != &init_net) { 655 if (net != &init_net) {
654 table = kmemdup(table, sizeof(ip4_frags_ctl_table), GFP_KERNEL); 656 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
655 if (table == NULL) 657 if (table == NULL)
656 goto err_alloc; 658 goto err_alloc;
657 659
658 table[0].data = &net->ipv4.frags.high_thresh; 660 table[0].data = &net->ipv4.frags.high_thresh;
659 table[1].data = &net->ipv4.frags.low_thresh; 661 table[1].data = &net->ipv4.frags.low_thresh;
660 table[2].data = &net->ipv4.frags.timeout; 662 table[2].data = &net->ipv4.frags.timeout;
661 table[3].mode &= ~0222;
662 table[4].mode &= ~0222;
663 } 663 }
664 664
665 hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); 665 hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
@@ -676,7 +676,7 @@ err_alloc:
676 return -ENOMEM; 676 return -ENOMEM;
677} 677}
678 678
679static void ip4_frags_ctl_unregister(struct net *net) 679static void ip4_frags_ns_ctl_unregister(struct net *net)
680{ 680{
681 struct ctl_table *table; 681 struct ctl_table *table;
682 682
@@ -684,13 +684,22 @@ static void ip4_frags_ctl_unregister(struct net *net)
684 unregister_net_sysctl_table(net->ipv4.frags_hdr); 684 unregister_net_sysctl_table(net->ipv4.frags_hdr);
685 kfree(table); 685 kfree(table);
686} 686}
687
688static void ip4_frags_ctl_register(void)
689{
690 register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table);
691}
687#else 692#else
688static inline int ip4_frags_ctl_register(struct net *net) 693static inline int ip4_frags_ns_ctl_register(struct net *net)
689{ 694{
690 return 0; 695 return 0;
691} 696}
692 697
693static inline void ip4_frags_ctl_unregister(struct net *net) 698static inline void ip4_frags_ns_ctl_unregister(struct net *net)
699{
700}
701
702static inline void ip4_frags_ctl_register(void)
694{ 703{
695} 704}
696#endif 705#endif
@@ -714,12 +723,12 @@ static int ipv4_frags_init_net(struct net *net)
714 723
715 inet_frags_init_net(&net->ipv4.frags); 724 inet_frags_init_net(&net->ipv4.frags);
716 725
717 return ip4_frags_ctl_register(net); 726 return ip4_frags_ns_ctl_register(net);
718} 727}
719 728
720static void ipv4_frags_exit_net(struct net *net) 729static void ipv4_frags_exit_net(struct net *net)
721{ 730{
722 ip4_frags_ctl_unregister(net); 731 ip4_frags_ns_ctl_unregister(net);
723 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); 732 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
724} 733}
725 734
@@ -730,6 +739,7 @@ static struct pernet_operations ip4_frags_ops = {
730 739
731void __init ipfrag_init(void) 740void __init ipfrag_init(void)
732{ 741{
742 ip4_frags_ctl_register();
733 register_pernet_subsys(&ip4_frags_ops); 743 register_pernet_subsys(&ip4_frags_ops);
734 ip4_frags.hashfn = ip4_hashfn; 744 ip4_frags.hashfn = ip4_hashfn;
735 ip4_frags.constructor = ip4_frag_init; 745 ip4_frags.constructor = ip4_frag_init;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 4342cba4ff82..2a61158ea722 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -473,6 +473,8 @@ static int ipgre_rcv(struct sk_buff *skb)
473 read_lock(&ipgre_lock); 473 read_lock(&ipgre_lock);
474 if ((tunnel = ipgre_tunnel_lookup(dev_net(skb->dev), 474 if ((tunnel = ipgre_tunnel_lookup(dev_net(skb->dev),
475 iph->saddr, iph->daddr, key)) != NULL) { 475 iph->saddr, iph->daddr, key)) != NULL) {
476 struct net_device_stats *stats = &tunnel->dev->stats;
477
476 secpath_reset(skb); 478 secpath_reset(skb);
477 479
478 skb->protocol = *(__be16*)(h + 2); 480 skb->protocol = *(__be16*)(h + 2);
@@ -497,28 +499,28 @@ static int ipgre_rcv(struct sk_buff *skb)
497 /* Looped back packet, drop it! */ 499 /* Looped back packet, drop it! */
498 if (skb->rtable->fl.iif == 0) 500 if (skb->rtable->fl.iif == 0)
499 goto drop; 501 goto drop;
500 tunnel->stat.multicast++; 502 stats->multicast++;
501 skb->pkt_type = PACKET_BROADCAST; 503 skb->pkt_type = PACKET_BROADCAST;
502 } 504 }
503#endif 505#endif
504 506
505 if (((flags&GRE_CSUM) && csum) || 507 if (((flags&GRE_CSUM) && csum) ||
506 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) { 508 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
507 tunnel->stat.rx_crc_errors++; 509 stats->rx_crc_errors++;
508 tunnel->stat.rx_errors++; 510 stats->rx_errors++;
509 goto drop; 511 goto drop;
510 } 512 }
511 if (tunnel->parms.i_flags&GRE_SEQ) { 513 if (tunnel->parms.i_flags&GRE_SEQ) {
512 if (!(flags&GRE_SEQ) || 514 if (!(flags&GRE_SEQ) ||
513 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) { 515 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
514 tunnel->stat.rx_fifo_errors++; 516 stats->rx_fifo_errors++;
515 tunnel->stat.rx_errors++; 517 stats->rx_errors++;
516 goto drop; 518 goto drop;
517 } 519 }
518 tunnel->i_seqno = seqno + 1; 520 tunnel->i_seqno = seqno + 1;
519 } 521 }
520 tunnel->stat.rx_packets++; 522 stats->rx_packets++;
521 tunnel->stat.rx_bytes += skb->len; 523 stats->rx_bytes += skb->len;
522 skb->dev = tunnel->dev; 524 skb->dev = tunnel->dev;
523 dst_release(skb->dst); 525 dst_release(skb->dst);
524 skb->dst = NULL; 526 skb->dst = NULL;
@@ -540,7 +542,7 @@ drop_nolock:
540static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 542static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
541{ 543{
542 struct ip_tunnel *tunnel = netdev_priv(dev); 544 struct ip_tunnel *tunnel = netdev_priv(dev);
543 struct net_device_stats *stats = &tunnel->stat; 545 struct net_device_stats *stats = &tunnel->dev->stats;
544 struct iphdr *old_iph = ip_hdr(skb); 546 struct iphdr *old_iph = ip_hdr(skb);
545 struct iphdr *tiph; 547 struct iphdr *tiph;
546 u8 tos; 548 u8 tos;
@@ -554,7 +556,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
554 int mtu; 556 int mtu;
555 557
556 if (tunnel->recursion++) { 558 if (tunnel->recursion++) {
557 tunnel->stat.collisions++; 559 stats->collisions++;
558 goto tx_error; 560 goto tx_error;
559 } 561 }
560 562
@@ -570,7 +572,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
570 /* NBMA tunnel */ 572 /* NBMA tunnel */
571 573
572 if (skb->dst == NULL) { 574 if (skb->dst == NULL) {
573 tunnel->stat.tx_fifo_errors++; 575 stats->tx_fifo_errors++;
574 goto tx_error; 576 goto tx_error;
575 } 577 }
576 578
@@ -621,7 +623,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
621 .tos = RT_TOS(tos) } }, 623 .tos = RT_TOS(tos) } },
622 .proto = IPPROTO_GRE }; 624 .proto = IPPROTO_GRE };
623 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 625 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
624 tunnel->stat.tx_carrier_errors++; 626 stats->tx_carrier_errors++;
625 goto tx_error; 627 goto tx_error;
626 } 628 }
627 } 629 }
@@ -629,7 +631,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
629 631
630 if (tdev == dev) { 632 if (tdev == dev) {
631 ip_rt_put(rt); 633 ip_rt_put(rt);
632 tunnel->stat.collisions++; 634 stats->collisions++;
633 goto tx_error; 635 goto tx_error;
634 } 636 }
635 637
@@ -954,11 +956,6 @@ done:
954 return err; 956 return err;
955} 957}
956 958
957static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
958{
959 return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
960}
961
962static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) 959static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
963{ 960{
964 struct ip_tunnel *tunnel = netdev_priv(dev); 961 struct ip_tunnel *tunnel = netdev_priv(dev);
@@ -1084,7 +1081,6 @@ static void ipgre_tunnel_setup(struct net_device *dev)
1084 dev->uninit = ipgre_tunnel_uninit; 1081 dev->uninit = ipgre_tunnel_uninit;
1085 dev->destructor = free_netdev; 1082 dev->destructor = free_netdev;
1086 dev->hard_start_xmit = ipgre_tunnel_xmit; 1083 dev->hard_start_xmit = ipgre_tunnel_xmit;
1087 dev->get_stats = ipgre_tunnel_get_stats;
1088 dev->do_ioctl = ipgre_tunnel_ioctl; 1084 dev->do_ioctl = ipgre_tunnel_ioctl;
1089 dev->change_mtu = ipgre_tunnel_change_mtu; 1085 dev->change_mtu = ipgre_tunnel_change_mtu;
1090 1086
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index ff77a4a7f9ec..7c26428ea67b 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The Internet Protocol (IP) module. 6 * The Internet Protocol (IP) module.
7 * 7 *
8 * Version: $Id: ip_input.c,v 1.55 2002/01/12 07:39:45 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org> 10 * Donald Becker, <becker@super.org>
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 33126ad2cfdc..be3f18a7a40e 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The options processing module for ip.c 6 * The options processing module for ip.c
7 * 7 *
8 * Version: $Id: ip_options.c,v 1.21 2001/09/01 00:31:50 davem Exp $
9 *
10 * Authors: A.N.Kuznetsov 8 * Authors: A.N.Kuznetsov
11 * 9 *
12 */ 10 */
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e527628f56cf..f1278eecf56d 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The Internet Protocol (IP) output module. 6 * The Internet Protocol (IP) output module.
7 * 7 *
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org> 10 * Donald Becker, <becker@super.org>
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index e0514e82308e..105d92a039b9 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The IP to API glue. 6 * The IP to API glue.
7 * 7 *
8 * Version: $Id: ip_sockglue.c,v 1.62 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: see ip.c 8 * Authors: see ip.c
11 * 9 *
12 * Fixes: 10 * Fixes:
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index ed45037ce9be..b88aa9afa42e 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: ipconfig.c,v 1.46 2002/02/01 22:01:04 davem Exp $
3 *
4 * Automatic Configuration of IP -- use DHCP, BOOTP, RARP, or 2 * Automatic Configuration of IP -- use DHCP, BOOTP, RARP, or
5 * user-supplied information to configure own IP address and routes. 3 * user-supplied information to configure own IP address and routes.
6 * 4 *
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index af5cb53da5cc..4c6d2caf9203 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * Linux NET3: IP/IP protocol decoder. 2 * Linux NET3: IP/IP protocol decoder.
3 * 3 *
4 * Version: $Id: ipip.c,v 1.50 2001/10/02 02:22:36 davem Exp $
5 *
6 * Authors: 4 * Authors:
7 * Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95 5 * Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
8 * 6 *
@@ -368,8 +366,8 @@ static int ipip_rcv(struct sk_buff *skb)
368 skb->protocol = htons(ETH_P_IP); 366 skb->protocol = htons(ETH_P_IP);
369 skb->pkt_type = PACKET_HOST; 367 skb->pkt_type = PACKET_HOST;
370 368
371 tunnel->stat.rx_packets++; 369 tunnel->dev->stats.rx_packets++;
372 tunnel->stat.rx_bytes += skb->len; 370 tunnel->dev->stats.rx_bytes += skb->len;
373 skb->dev = tunnel->dev; 371 skb->dev = tunnel->dev;
374 dst_release(skb->dst); 372 dst_release(skb->dst);
375 skb->dst = NULL; 373 skb->dst = NULL;
@@ -392,7 +390,7 @@ static int ipip_rcv(struct sk_buff *skb)
392static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 390static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
393{ 391{
394 struct ip_tunnel *tunnel = netdev_priv(dev); 392 struct ip_tunnel *tunnel = netdev_priv(dev);
395 struct net_device_stats *stats = &tunnel->stat; 393 struct net_device_stats *stats = &tunnel->dev->stats;
396 struct iphdr *tiph = &tunnel->parms.iph; 394 struct iphdr *tiph = &tunnel->parms.iph;
397 u8 tos = tunnel->parms.iph.tos; 395 u8 tos = tunnel->parms.iph.tos;
398 __be16 df = tiph->frag_off; 396 __be16 df = tiph->frag_off;
@@ -405,7 +403,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
405 int mtu; 403 int mtu;
406 404
407 if (tunnel->recursion++) { 405 if (tunnel->recursion++) {
408 tunnel->stat.collisions++; 406 stats->collisions++;
409 goto tx_error; 407 goto tx_error;
410 } 408 }
411 409
@@ -418,7 +416,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
418 if (!dst) { 416 if (!dst) {
419 /* NBMA tunnel */ 417 /* NBMA tunnel */
420 if ((rt = skb->rtable) == NULL) { 418 if ((rt = skb->rtable) == NULL) {
421 tunnel->stat.tx_fifo_errors++; 419 stats->tx_fifo_errors++;
422 goto tx_error; 420 goto tx_error;
423 } 421 }
424 if ((dst = rt->rt_gateway) == 0) 422 if ((dst = rt->rt_gateway) == 0)
@@ -433,7 +431,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
433 .tos = RT_TOS(tos) } }, 431 .tos = RT_TOS(tos) } },
434 .proto = IPPROTO_IPIP }; 432 .proto = IPPROTO_IPIP };
435 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 433 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
436 tunnel->stat.tx_carrier_errors++; 434 stats->tx_carrier_errors++;
437 goto tx_error_icmp; 435 goto tx_error_icmp;
438 } 436 }
439 } 437 }
@@ -441,7 +439,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
441 439
442 if (tdev == dev) { 440 if (tdev == dev) {
443 ip_rt_put(rt); 441 ip_rt_put(rt);
444 tunnel->stat.collisions++; 442 stats->collisions++;
445 goto tx_error; 443 goto tx_error;
446 } 444 }
447 445
@@ -451,7 +449,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
451 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; 449 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
452 450
453 if (mtu < 68) { 451 if (mtu < 68) {
454 tunnel->stat.collisions++; 452 stats->collisions++;
455 ip_rt_put(rt); 453 ip_rt_put(rt);
456 goto tx_error; 454 goto tx_error;
457 } 455 }
@@ -685,11 +683,6 @@ done:
685 return err; 683 return err;
686} 684}
687 685
688static struct net_device_stats *ipip_tunnel_get_stats(struct net_device *dev)
689{
690 return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
691}
692
693static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu) 686static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
694{ 687{
695 if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr)) 688 if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr))
@@ -702,7 +695,6 @@ static void ipip_tunnel_setup(struct net_device *dev)
702{ 695{
703 dev->uninit = ipip_tunnel_uninit; 696 dev->uninit = ipip_tunnel_uninit;
704 dev->hard_start_xmit = ipip_tunnel_xmit; 697 dev->hard_start_xmit = ipip_tunnel_xmit;
705 dev->get_stats = ipip_tunnel_get_stats;
706 dev->do_ioctl = ipip_tunnel_ioctl; 698 dev->do_ioctl = ipip_tunnel_ioctl;
707 dev->change_mtu = ipip_tunnel_change_mtu; 699 dev->change_mtu = ipip_tunnel_change_mtu;
708 dev->destructor = free_netdev; 700 dev->destructor = free_netdev;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 11700a4dcd95..300ab0c2919e 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -9,8 +9,6 @@
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * Version: $Id: ipmr.c,v 1.65 2001/10/31 21:55:54 davem Exp $
13 *
14 * Fixes: 12 * Fixes:
15 * Michael Chastain : Incorrect size of copying. 13 * Michael Chastain : Incorrect size of copying.
16 * Alan Cox : Added the cache manager code 14 * Alan Cox : Added the cache manager code
@@ -181,26 +179,20 @@ static int reg_vif_num = -1;
181static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 179static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
182{ 180{
183 read_lock(&mrt_lock); 181 read_lock(&mrt_lock);
184 ((struct net_device_stats*)netdev_priv(dev))->tx_bytes += skb->len; 182 dev->stats.tx_bytes += skb->len;
185 ((struct net_device_stats*)netdev_priv(dev))->tx_packets++; 183 dev->stats.tx_packets++;
186 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT); 184 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
187 read_unlock(&mrt_lock); 185 read_unlock(&mrt_lock);
188 kfree_skb(skb); 186 kfree_skb(skb);
189 return 0; 187 return 0;
190} 188}
191 189
192static struct net_device_stats *reg_vif_get_stats(struct net_device *dev)
193{
194 return (struct net_device_stats*)netdev_priv(dev);
195}
196
197static void reg_vif_setup(struct net_device *dev) 190static void reg_vif_setup(struct net_device *dev)
198{ 191{
199 dev->type = ARPHRD_PIMREG; 192 dev->type = ARPHRD_PIMREG;
200 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; 193 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
201 dev->flags = IFF_NOARP; 194 dev->flags = IFF_NOARP;
202 dev->hard_start_xmit = reg_vif_xmit; 195 dev->hard_start_xmit = reg_vif_xmit;
203 dev->get_stats = reg_vif_get_stats;
204 dev->destructor = free_netdev; 196 dev->destructor = free_netdev;
205} 197}
206 198
@@ -209,8 +201,7 @@ static struct net_device *ipmr_reg_vif(void)
209 struct net_device *dev; 201 struct net_device *dev;
210 struct in_device *in_dev; 202 struct in_device *in_dev;
211 203
212 dev = alloc_netdev(sizeof(struct net_device_stats), "pimreg", 204 dev = alloc_netdev(0, "pimreg", reg_vif_setup);
213 reg_vif_setup);
214 205
215 if (dev == NULL) 206 if (dev == NULL)
216 return NULL; 207 return NULL;
@@ -1170,8 +1161,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1170 if (vif->flags & VIFF_REGISTER) { 1161 if (vif->flags & VIFF_REGISTER) {
1171 vif->pkt_out++; 1162 vif->pkt_out++;
1172 vif->bytes_out+=skb->len; 1163 vif->bytes_out+=skb->len;
1173 ((struct net_device_stats*)netdev_priv(vif->dev))->tx_bytes += skb->len; 1164 vif->dev->stats.tx_bytes += skb->len;
1174 ((struct net_device_stats*)netdev_priv(vif->dev))->tx_packets++; 1165 vif->dev->stats.tx_packets++;
1175 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); 1166 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
1176 kfree_skb(skb); 1167 kfree_skb(skb);
1177 return; 1168 return;
@@ -1230,8 +1221,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1230 if (vif->flags & VIFF_TUNNEL) { 1221 if (vif->flags & VIFF_TUNNEL) {
1231 ip_encap(skb, vif->local, vif->remote); 1222 ip_encap(skb, vif->local, vif->remote);
1232 /* FIXME: extra output firewall step used to be here. --RR */ 1223 /* FIXME: extra output firewall step used to be here. --RR */
1233 ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_packets++; 1224 vif->dev->stats.tx_packets++;
1234 ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_bytes+=skb->len; 1225 vif->dev->stats.tx_bytes += skb->len;
1235 } 1226 }
1236 1227
1237 IPCB(skb)->flags |= IPSKB_FORWARDED; 1228 IPCB(skb)->flags |= IPSKB_FORWARDED;
@@ -1487,8 +1478,8 @@ int pim_rcv_v1(struct sk_buff * skb)
1487 skb->pkt_type = PACKET_HOST; 1478 skb->pkt_type = PACKET_HOST;
1488 dst_release(skb->dst); 1479 dst_release(skb->dst);
1489 skb->dst = NULL; 1480 skb->dst = NULL;
1490 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len; 1481 reg_dev->stats.rx_bytes += skb->len;
1491 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++; 1482 reg_dev->stats.rx_packets++;
1492 nf_reset(skb); 1483 nf_reset(skb);
1493 netif_rx(skb); 1484 netif_rx(skb);
1494 dev_put(reg_dev); 1485 dev_put(reg_dev);
@@ -1542,8 +1533,8 @@ static int pim_rcv(struct sk_buff * skb)
1542 skb->ip_summed = 0; 1533 skb->ip_summed = 0;
1543 skb->pkt_type = PACKET_HOST; 1534 skb->pkt_type = PACKET_HOST;
1544 dst_release(skb->dst); 1535 dst_release(skb->dst);
1545 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len; 1536 reg_dev->stats.rx_bytes += skb->len;
1546 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++; 1537 reg_dev->stats.rx_packets++;
1547 skb->dst = NULL; 1538 skb->dst = NULL;
1548 nf_reset(skb); 1539 nf_reset(skb);
1549 netif_rx(skb); 1540 netif_rx(skb);
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c
index 535abe0c45e7..1f1897a1a702 100644
--- a/net/ipv4/ipvs/ip_vs_app.c
+++ b/net/ipv4/ipvs/ip_vs_app.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_app.c: Application module support for IPVS 2 * ip_vs_app.c: Application module support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_app.c,v 1.17 2003/03/22 06:31:21 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index 65f1ba112752..f8bdae47a77f 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -5,8 +5,6 @@
5 * high-performance and highly available server based on a 5 * high-performance and highly available server based on a
6 * cluster of servers. 6 * cluster of servers.
7 * 7 *
8 * Version: $Id: ip_vs_conn.c,v 1.31 2003/04/18 09:03:16 wensong Exp $
9 *
10 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
11 * Peter Kese <peter.kese@ijs.si> 9 * Peter Kese <peter.kese@ijs.si>
12 * Julian Anastasov <ja@ssi.bg> 10 * Julian Anastasov <ja@ssi.bg>
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index 963981a9d501..bcf6276ba4b2 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -5,8 +5,6 @@
5 * high-performance and highly available server based on a 5 * high-performance and highly available server based on a
6 * cluster of servers. 6 * cluster of servers.
7 * 7 *
8 * Version: $Id: ip_vs_core.c,v 1.34 2003/05/10 03:05:23 wensong Exp $
9 *
10 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
11 * Peter Kese <peter.kese@ijs.si> 9 * Peter Kese <peter.kese@ijs.si>
12 * Julian Anastasov <ja@ssi.bg> 10 * Julian Anastasov <ja@ssi.bg>
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 94c5767c8e01..9a5ace0b4dd6 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -5,8 +5,6 @@
5 * high-performance and highly available server based on a 5 * high-performance and highly available server based on a
6 * cluster of servers. 6 * cluster of servers.
7 * 7 *
8 * Version: $Id: ip_vs_ctl.c,v 1.36 2003/06/08 09:31:19 wensong Exp $
9 *
10 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
11 * Peter Kese <peter.kese@ijs.si> 9 * Peter Kese <peter.kese@ijs.si>
12 * Julian Anastasov <ja@ssi.bg> 10 * Julian Anastasov <ja@ssi.bg>
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c
index dcf5d46aaa5e..8afc1503ed20 100644
--- a/net/ipv4/ipvs/ip_vs_dh.c
+++ b/net/ipv4/ipvs/ip_vs_dh.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Destination Hashing scheduling module 2 * IPVS: Destination Hashing scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_dh.c,v 1.5 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@gnuchina.org> 4 * Authors: Wensong Zhang <wensong@gnuchina.org>
7 * 5 *
8 * Inspired by the consistent hashing scheduler patch from 6 * Inspired by the consistent hashing scheduler patch from
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index dfa0d713c801..bc04eedd6dbb 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_est.c: simple rate estimator for IPVS 2 * ip_vs_est.c: simple rate estimator for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_est.c,v 1.4 2002/11/30 01:50:35 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c
index 59aa166b7678..c1c758e4f733 100644
--- a/net/ipv4/ipvs/ip_vs_ftp.c
+++ b/net/ipv4/ipvs/ip_vs_ftp.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_ftp.c: IPVS ftp application module 2 * ip_vs_ftp.c: IPVS ftp application module
3 * 3 *
4 * Version: $Id: ip_vs_ftp.c,v 1.13 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * Changes: 6 * Changes:
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index 3888642706ad..0efa3db4b180 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Locality-Based Least-Connection scheduling module 2 * IPVS: Locality-Based Least-Connection scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_lblc.c,v 1.10 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@gnuchina.org> 4 * Authors: Wensong Zhang <wensong@gnuchina.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index daa260eb21cf..8e3bbeb45138 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Locality-Based Least-Connection with Replication scheduler 2 * IPVS: Locality-Based Least-Connection with Replication scheduler
3 * 3 *
4 * Version: $Id: ip_vs_lblcr.c,v 1.11 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@gnuchina.org> 4 * Authors: Wensong Zhang <wensong@gnuchina.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c
index d88fef90a641..ac9f08e065d5 100644
--- a/net/ipv4/ipvs/ip_vs_lc.c
+++ b/net/ipv4/ipvs/ip_vs_lc.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Least-Connection Scheduling module 2 * IPVS: Least-Connection Scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_lc.c,v 1.10 2003/04/18 09:03:16 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c
index bc2a9e5f2a7b..a46bf258d420 100644
--- a/net/ipv4/ipvs/ip_vs_nq.c
+++ b/net/ipv4/ipvs/ip_vs_nq.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Never Queue scheduling module 2 * IPVS: Never Queue scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_nq.c,v 1.2 2003/06/08 09:31:19 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index 4b1c16cbb16b..876714f23d65 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_proto.c: transport protocol load balancing support for IPVS 2 * ip_vs_proto.c: transport protocol load balancing support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_proto.c,v 1.2 2003/04/18 09:03:16 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Julian Anastasov <ja@ssi.bg> 5 * Julian Anastasov <ja@ssi.bg>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah.c b/net/ipv4/ipvs/ip_vs_proto_ah.c
index 4bf835e1d86d..73e0ea87c1f5 100644
--- a/net/ipv4/ipvs/ip_vs_proto_ah.c
+++ b/net/ipv4/ipvs/ip_vs_proto_ah.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_proto_ah.c: AH IPSec load balancing support for IPVS 2 * ip_vs_proto_ah.c: AH IPSec load balancing support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_proto_ah.c,v 1.1 2003/07/04 15:04:37 wensong Exp $
5 *
6 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 4 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002
7 * Wensong Zhang <wensong@linuxvirtualserver.org> 5 * Wensong Zhang <wensong@linuxvirtualserver.org>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_proto_esp.c b/net/ipv4/ipvs/ip_vs_proto_esp.c
index db6a6b7b1a0b..21d70c8ffa54 100644
--- a/net/ipv4/ipvs/ip_vs_proto_esp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_esp.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_proto_esp.c: ESP IPSec load balancing support for IPVS 2 * ip_vs_proto_esp.c: ESP IPSec load balancing support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_proto_esp.c,v 1.1 2003/07/04 15:04:37 wensong Exp $
5 *
6 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 4 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002
7 * Wensong Zhang <wensong@linuxvirtualserver.org> 5 * Wensong Zhang <wensong@linuxvirtualserver.org>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c
index b83dc14b0a4d..d0ea467986a0 100644
--- a/net/ipv4/ipvs/ip_vs_proto_tcp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_proto_tcp.c: TCP load balancing support for IPVS 2 * ip_vs_proto_tcp.c: TCP load balancing support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_proto_tcp.c,v 1.3 2002/11/30 01:50:35 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Julian Anastasov <ja@ssi.bg> 5 * Julian Anastasov <ja@ssi.bg>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c
index 75771cb3cd6f..c6be5d56823f 100644
--- a/net/ipv4/ipvs/ip_vs_proto_udp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_udp.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_proto_udp.c: UDP load balancing support for IPVS 2 * ip_vs_proto_udp.c: UDP load balancing support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_proto_udp.c,v 1.3 2002/11/30 01:50:35 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Julian Anastasov <ja@ssi.bg> 5 * Julian Anastasov <ja@ssi.bg>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c
index 433f8a947924..c8db12d39e61 100644
--- a/net/ipv4/ipvs/ip_vs_rr.c
+++ b/net/ipv4/ipvs/ip_vs_rr.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Round-Robin Scheduling module 2 * IPVS: Round-Robin Scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_rr.c,v 1.9 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Peter Kese <peter.kese@ijs.si> 5 * Peter Kese <peter.kese@ijs.si>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_sched.c b/net/ipv4/ipvs/ip_vs_sched.c
index 121a32b1b756..b64767309855 100644
--- a/net/ipv4/ipvs/ip_vs_sched.c
+++ b/net/ipv4/ipvs/ip_vs_sched.c
@@ -5,8 +5,6 @@
5 * high-performance and highly available server based on a 5 * high-performance and highly available server based on a
6 * cluster of servers. 6 * cluster of servers.
7 * 7 *
8 * Version: $Id: ip_vs_sched.c,v 1.13 2003/05/10 03:05:23 wensong Exp $
9 *
10 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
11 * Peter Kese <peter.kese@ijs.si> 9 * Peter Kese <peter.kese@ijs.si>
12 * 10 *
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c
index dd7c128f9db3..2a7d31358181 100644
--- a/net/ipv4/ipvs/ip_vs_sed.c
+++ b/net/ipv4/ipvs/ip_vs_sed.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Shortest Expected Delay scheduling module 2 * IPVS: Shortest Expected Delay scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_sed.c,v 1.1 2003/05/10 03:06:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c
index 1b25b00ef1e1..b8fdfac65001 100644
--- a/net/ipv4/ipvs/ip_vs_sh.c
+++ b/net/ipv4/ipvs/ip_vs_sh.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Source Hashing scheduling module 2 * IPVS: Source Hashing scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_sh.c,v 1.5 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@gnuchina.org> 4 * Authors: Wensong Zhang <wensong@gnuchina.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index eff54efe0351..2d4a86f73325 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -5,8 +5,6 @@
5 * high-performance and highly available server based on a 5 * high-performance and highly available server based on a
6 * cluster of servers. 6 * cluster of servers.
7 * 7 *
8 * Version: $Id: ip_vs_sync.c,v 1.13 2003/06/08 09:31:19 wensong Exp $
9 *
10 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
11 * 9 *
12 * ip_vs_sync: sync connection info from master load balancer to backups 10 * ip_vs_sync: sync connection info from master load balancer to backups
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c
index 8a9d913261d8..772c3cb4eca1 100644
--- a/net/ipv4/ipvs/ip_vs_wlc.c
+++ b/net/ipv4/ipvs/ip_vs_wlc.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Weighted Least-Connection Scheduling module 2 * IPVS: Weighted Least-Connection Scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_wlc.c,v 1.13 2003/04/18 09:03:16 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Peter Kese <peter.kese@ijs.si> 5 * Peter Kese <peter.kese@ijs.si>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c
index 85c680add6df..1d6932d7dc97 100644
--- a/net/ipv4/ipvs/ip_vs_wrr.c
+++ b/net/ipv4/ipvs/ip_vs_wrr.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Weighted Round-Robin Scheduling module 2 * IPVS: Weighted Round-Robin Scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_wrr.c,v 1.12 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c
index f63006caea03..9892d4aca42e 100644
--- a/net/ipv4/ipvs/ip_vs_xmit.c
+++ b/net/ipv4/ipvs/ip_vs_xmit.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_xmit.c: various packet transmitters for IPVS 2 * ip_vs_xmit.c: various packet transmitters for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_xmit.c,v 1.2 2002/11/30 01:50:35 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Julian Anastasov <ja@ssi.bg> 5 * Julian Anastasov <ja@ssi.bg>
8 * 6 *
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 2767841a8cef..6e251402506e 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -365,6 +365,18 @@ config IP_NF_RAW
365 If you want to compile it as a module, say M here and read 365 If you want to compile it as a module, say M here and read
366 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. 366 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
367 367
368# security table for MAC policy
369config IP_NF_SECURITY
370 tristate "Security table"
371 depends on IP_NF_IPTABLES
372 depends on SECURITY
373 default m if NETFILTER_ADVANCED=n
374 help
375 This option adds a `security' table to iptables, for use
376 with Mandatory Access Control (MAC) policy.
377
378 If unsure, say N.
379
368# ARP tables 380# ARP tables
369config IP_NF_ARPTABLES 381config IP_NF_ARPTABLES
370 tristate "ARP tables support" 382 tristate "ARP tables support"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index d9b92fbf5579..3f31291f37ce 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
42obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o 42obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
43obj-$(CONFIG_NF_NAT) += iptable_nat.o 43obj-$(CONFIG_NF_NAT) += iptable_nat.o
44obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o 44obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
45obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
45 46
46# matches 47# matches
47obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o 48obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 26a37cedcf2e..aa33a4a7a715 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -156,7 +156,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
156 case IPQ_COPY_META: 156 case IPQ_COPY_META:
157 case IPQ_COPY_NONE: 157 case IPQ_COPY_NONE:
158 size = NLMSG_SPACE(sizeof(*pmsg)); 158 size = NLMSG_SPACE(sizeof(*pmsg));
159 data_len = 0;
160 break; 159 break;
161 160
162 case IPQ_COPY_PACKET: 161 case IPQ_COPY_PACKET:
@@ -224,8 +223,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
224 return skb; 223 return skb;
225 224
226nlmsg_failure: 225nlmsg_failure:
227 if (skb)
228 kfree_skb(skb);
229 *errp = -EINVAL; 226 *errp = -EINVAL;
230 printk(KERN_ERR "ip_queue: error creating packet message\n"); 227 printk(KERN_ERR "ip_queue: error creating packet message\n");
231 return NULL; 228 return NULL;
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
new file mode 100644
index 000000000000..2b472ac2263a
--- /dev/null
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -0,0 +1,180 @@
1/*
2 * "security" table
3 *
4 * This is for use by Mandatory Access Control (MAC) security models,
5 * which need to be able to manage security policy in separate context
6 * to DAC.
7 *
8 * Based on iptable_mangle.c
9 *
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org>
12 * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18#include <linux/module.h>
19#include <linux/netfilter_ipv4/ip_tables.h>
20#include <net/ip.h>
21
22MODULE_LICENSE("GPL");
23MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>");
24MODULE_DESCRIPTION("iptables security table, for MAC rules");
25
26#define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \
27 (1 << NF_INET_FORWARD) | \
28 (1 << NF_INET_LOCAL_OUT)
29
30static struct
31{
32 struct ipt_replace repl;
33 struct ipt_standard entries[3];
34 struct ipt_error term;
35} initial_table __initdata = {
36 .repl = {
37 .name = "security",
38 .valid_hooks = SECURITY_VALID_HOOKS,
39 .num_entries = 4,
40 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
41 .hook_entry = {
42 [NF_INET_LOCAL_IN] = 0,
43 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
44 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
45 },
46 .underflow = {
47 [NF_INET_LOCAL_IN] = 0,
48 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
49 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
50 },
51 },
52 .entries = {
53 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
54 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
55 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
56 },
57 .term = IPT_ERROR_INIT, /* ERROR */
58};
59
60static struct xt_table security_table = {
61 .name = "security",
62 .valid_hooks = SECURITY_VALID_HOOKS,
63 .lock = __RW_LOCK_UNLOCKED(security_table.lock),
64 .me = THIS_MODULE,
65 .af = AF_INET,
66};
67
68static unsigned int
69ipt_local_in_hook(unsigned int hook,
70 struct sk_buff *skb,
71 const struct net_device *in,
72 const struct net_device *out,
73 int (*okfn)(struct sk_buff *))
74{
75 return ipt_do_table(skb, hook, in, out,
76 nf_local_in_net(in, out)->ipv4.iptable_security);
77}
78
79static unsigned int
80ipt_forward_hook(unsigned int hook,
81 struct sk_buff *skb,
82 const struct net_device *in,
83 const struct net_device *out,
84 int (*okfn)(struct sk_buff *))
85{
86 return ipt_do_table(skb, hook, in, out,
87 nf_forward_net(in, out)->ipv4.iptable_security);
88}
89
90static unsigned int
91ipt_local_out_hook(unsigned int hook,
92 struct sk_buff *skb,
93 const struct net_device *in,
94 const struct net_device *out,
95 int (*okfn)(struct sk_buff *))
96{
97 /* Somebody is playing with raw sockets. */
98 if (skb->len < sizeof(struct iphdr)
99 || ip_hdrlen(skb) < sizeof(struct iphdr)) {
100 if (net_ratelimit())
101 printk(KERN_INFO "iptable_security: ignoring short "
102 "SOCK_RAW packet.\n");
103 return NF_ACCEPT;
104 }
105 return ipt_do_table(skb, hook, in, out,
106 nf_local_out_net(in, out)->ipv4.iptable_security);
107}
108
109static struct nf_hook_ops ipt_ops[] __read_mostly = {
110 {
111 .hook = ipt_local_in_hook,
112 .owner = THIS_MODULE,
113 .pf = PF_INET,
114 .hooknum = NF_INET_LOCAL_IN,
115 .priority = NF_IP_PRI_SECURITY,
116 },
117 {
118 .hook = ipt_forward_hook,
119 .owner = THIS_MODULE,
120 .pf = PF_INET,
121 .hooknum = NF_INET_FORWARD,
122 .priority = NF_IP_PRI_SECURITY,
123 },
124 {
125 .hook = ipt_local_out_hook,
126 .owner = THIS_MODULE,
127 .pf = PF_INET,
128 .hooknum = NF_INET_LOCAL_OUT,
129 .priority = NF_IP_PRI_SECURITY,
130 },
131};
132
133static int __net_init iptable_security_net_init(struct net *net)
134{
135 net->ipv4.iptable_security =
136 ipt_register_table(net, &security_table, &initial_table.repl);
137
138 if (IS_ERR(net->ipv4.iptable_security))
139 return PTR_ERR(net->ipv4.iptable_security);
140
141 return 0;
142}
143
144static void __net_exit iptable_security_net_exit(struct net *net)
145{
146 ipt_unregister_table(net->ipv4.iptable_security);
147}
148
149static struct pernet_operations iptable_security_net_ops = {
150 .init = iptable_security_net_init,
151 .exit = iptable_security_net_exit,
152};
153
154static int __init iptable_security_init(void)
155{
156 int ret;
157
158 ret = register_pernet_subsys(&iptable_security_net_ops);
159 if (ret < 0)
160 return ret;
161
162 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
163 if (ret < 0)
164 goto cleanup_table;
165
166 return ret;
167
168cleanup_table:
169 unregister_pernet_subsys(&iptable_security_net_ops);
170 return ret;
171}
172
173static void __exit iptable_security_fini(void)
174{
175 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
176 unregister_pernet_subsys(&iptable_security_net_ops);
177}
178
179module_init(iptable_security_init);
180module_exit(iptable_security_fini);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 78ab19accace..97791048fa9b 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -87,9 +87,8 @@ static int icmp_packet(struct nf_conn *ct,
87 means this will only run once even if count hits zero twice 87 means this will only run once even if count hits zero twice
88 (theoretically possible with SMP) */ 88 (theoretically possible with SMP) */
89 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { 89 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
90 if (atomic_dec_and_test(&ct->proto.icmp.count) 90 if (atomic_dec_and_test(&ct->proto.icmp.count))
91 && del_timer(&ct->timeout)) 91 nf_ct_kill_acct(ct, ctinfo, skb);
92 ct->timeout.function((unsigned long)ct);
93 } else { 92 } else {
94 atomic_inc(&ct->proto.icmp.count); 93 atomic_inc(&ct->proto.icmp.count);
95 nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); 94 nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 552169b41b16..eb5cee279c5f 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -7,8 +7,6 @@
7 * PROC file system. It is mainly used for debugging and 7 * PROC file system. It is mainly used for debugging and
8 * statistics. 8 * statistics.
9 * 9 *
10 * Version: $Id: proc.c,v 1.45 2001/05/16 16:45:35 davem Exp $
11 *
12 * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * Gerald J. Heim, <heim@peanuts.informatik.uni-tuebingen.de> 11 * Gerald J. Heim, <heim@peanuts.informatik.uni-tuebingen.de>
14 * Fred Baumgarten, <dc6iq@insu1.etec.uni-karlsruhe.de> 12 * Fred Baumgarten, <dc6iq@insu1.etec.uni-karlsruhe.de>
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 971ab9356e51..ea50da0649fd 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * INET protocol dispatch tables. 6 * INET protocol dispatch tables.
7 * 7 *
8 * Version: $Id: protocol.c,v 1.14 2001/05/18 02:25:49 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * 10 *
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index e7e091d365ff..1d0c97c8712d 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * RAW - implementation of IP "raw" sockets. 6 * RAW - implementation of IP "raw" sockets.
7 * 7 *
8 * Version: $Id: raw.c,v 1.64 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * 10 *
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 96be336064fb..fe3a02237286 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * ROUTE - implementation of the IP router. 6 * ROUTE - implementation of the IP router.
7 * 7 *
8 * Version: $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org> 10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index d182a2a26291..fdde2ae07e24 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -8,8 +8,6 @@
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 *
12 * $Id: syncookies.c,v 1.18 2002/02/01 22:01:04 davem Exp $
13 */ 11 */
14 12
15#include <linux/tcp.h> 13#include <linux/tcp.h>
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index c437f804ee38..901607003205 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * sysctl_net_ipv4.c: sysctl interface to net IPV4 subsystem. 2 * sysctl_net_ipv4.c: sysctl interface to net IPV4 subsystem.
3 * 3 *
4 * $Id: sysctl_net_ipv4.c,v 1.50 2001/10/20 00:00:11 davem Exp $
5 *
6 * Begun April 1, 1996, Mike Shaver. 4 * Begun April 1, 1996, Mike Shaver.
7 * Added /proc/sys/net/ipv4 directory entry (empty =) ). [MS] 5 * Added /proc/sys/net/ipv4 directory entry (empty =) ). [MS]
8 */ 6 */
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index fc54a48fde1e..cf0850c068f5 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
@@ -2463,6 +2461,76 @@ static unsigned long tcp_md5sig_users;
2463static struct tcp_md5sig_pool **tcp_md5sig_pool; 2461static struct tcp_md5sig_pool **tcp_md5sig_pool;
2464static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); 2462static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2465 2463
2464int tcp_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
2465 int bplen,
2466 struct tcphdr *th, unsigned int tcplen,
2467 struct tcp_md5sig_pool *hp)
2468{
2469 struct scatterlist sg[4];
2470 __u16 data_len;
2471 int block = 0;
2472 __sum16 cksum;
2473 struct hash_desc *desc = &hp->md5_desc;
2474 int err;
2475 unsigned int nbytes = 0;
2476
2477 sg_init_table(sg, 4);
2478
2479 /* 1. The TCP pseudo-header */
2480 sg_set_buf(&sg[block++], &hp->md5_blk, bplen);
2481 nbytes += bplen;
2482
2483 /* 2. The TCP header, excluding options, and assuming a
2484 * checksum of zero
2485 */
2486 cksum = th->check;
2487 th->check = 0;
2488 sg_set_buf(&sg[block++], th, sizeof(*th));
2489 nbytes += sizeof(*th);
2490
2491 /* 3. The TCP segment data (if any) */
2492 data_len = tcplen - (th->doff << 2);
2493 if (data_len > 0) {
2494 u8 *data = (u8 *)th + (th->doff << 2);
2495 sg_set_buf(&sg[block++], data, data_len);
2496 nbytes += data_len;
2497 }
2498
2499 /* 4. an independently-specified key or password, known to both
2500 * TCPs and presumably connection-specific
2501 */
2502 sg_set_buf(&sg[block++], key->key, key->keylen);
2503 nbytes += key->keylen;
2504
2505 sg_mark_end(&sg[block - 1]);
2506
2507 /* Now store the hash into the packet */
2508 err = crypto_hash_init(desc);
2509 if (err) {
2510 if (net_ratelimit())
2511 printk(KERN_WARNING "%s(): hash_init failed\n", __func__);
2512 return -1;
2513 }
2514 err = crypto_hash_update(desc, sg, nbytes);
2515 if (err) {
2516 if (net_ratelimit())
2517 printk(KERN_WARNING "%s(): hash_update failed\n", __func__);
2518 return -1;
2519 }
2520 err = crypto_hash_final(desc, md5_hash);
2521 if (err) {
2522 if (net_ratelimit())
2523 printk(KERN_WARNING "%s(): hash_final failed\n", __func__);
2524 return -1;
2525 }
2526
2527 /* Reset header */
2528 th->check = cksum;
2529
2530 return 0;
2531}
2532EXPORT_SYMBOL(tcp_calc_md5_hash);
2533
2466static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool) 2534static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2467{ 2535{
2468 int cpu; 2536 int cpu;
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 2fbcc7d1b1a0..838d491dfda7 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * tcp_diag.c Module for monitoring TCP transport protocols sockets. 2 * tcp_diag.c Module for monitoring TCP transport protocols sockets.
3 * 3 *
4 * Version: $Id: tcp_diag.c,v 1.3 2002/02/01 22:01:04 davem Exp $
5 *
6 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index cad73b7dfef0..de30e70ff256 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_input.c,v 1.243 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
@@ -3450,6 +3448,43 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
3450 return 1; 3448 return 1;
3451} 3449}
3452 3450
3451#ifdef CONFIG_TCP_MD5SIG
3452/*
3453 * Parse MD5 Signature option
3454 */
3455u8 *tcp_parse_md5sig_option(struct tcphdr *th)
3456{
3457 int length = (th->doff << 2) - sizeof (*th);
3458 u8 *ptr = (u8*)(th + 1);
3459
3460 /* If the TCP option is too short, we can short cut */
3461 if (length < TCPOLEN_MD5SIG)
3462 return NULL;
3463
3464 while (length > 0) {
3465 int opcode = *ptr++;
3466 int opsize;
3467
3468 switch(opcode) {
3469 case TCPOPT_EOL:
3470 return NULL;
3471 case TCPOPT_NOP:
3472 length--;
3473 continue;
3474 default:
3475 opsize = *ptr++;
3476 if (opsize < 2 || opsize > length)
3477 return NULL;
3478 if (opcode == TCPOPT_MD5SIG)
3479 return ptr;
3480 }
3481 ptr += opsize - 2;
3482 length -= opsize;
3483 }
3484 return NULL;
3485}
3486#endif
3487
3453static inline void tcp_store_ts_recent(struct tcp_sock *tp) 3488static inline void tcp_store_ts_recent(struct tcp_sock *tp)
3454{ 3489{
3455 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; 3490 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
@@ -5422,6 +5457,9 @@ EXPORT_SYMBOL(sysctl_tcp_ecn);
5422EXPORT_SYMBOL(sysctl_tcp_reordering); 5457EXPORT_SYMBOL(sysctl_tcp_reordering);
5423EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 5458EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
5424EXPORT_SYMBOL(tcp_parse_options); 5459EXPORT_SYMBOL(tcp_parse_options);
5460#ifdef CONFIG_TCP_MD5SIG
5461EXPORT_SYMBOL(tcp_parse_md5sig_option);
5462#endif
5425EXPORT_SYMBOL(tcp_rcv_established); 5463EXPORT_SYMBOL(tcp_rcv_established);
5426EXPORT_SYMBOL(tcp_rcv_state_process); 5464EXPORT_SYMBOL(tcp_rcv_state_process);
5427EXPORT_SYMBOL(tcp_initialize_rcv_mss); 5465EXPORT_SYMBOL(tcp_initialize_rcv_mss);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 97a230026e13..b219a7a7cd08 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
9 *
10 * IPv4 specific functions 8 * IPv4 specific functions
11 * 9 *
12 * 10 *
@@ -95,8 +93,13 @@ static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
95 __be32 addr); 93 __be32 addr);
96static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, 94static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
97 __be32 saddr, __be32 daddr, 95 __be32 saddr, __be32 daddr,
98 struct tcphdr *th, int protocol, 96 struct tcphdr *th, unsigned int tcplen);
99 unsigned int tcplen); 97#else
98static inline
99struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
100{
101 return NULL;
102}
100#endif 103#endif
101 104
102struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { 105struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
@@ -586,8 +589,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
586 key, 589 key,
587 ip_hdr(skb)->daddr, 590 ip_hdr(skb)->daddr,
588 ip_hdr(skb)->saddr, 591 ip_hdr(skb)->saddr,
589 &rep.th, IPPROTO_TCP, 592 &rep.th, arg.iov[0].iov_len);
590 arg.iov[0].iov_len);
591 } 593 }
592#endif 594#endif
593 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, 595 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
@@ -606,9 +608,9 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
606 outside socket context is ugly, certainly. What can I do? 608 outside socket context is ugly, certainly. What can I do?
607 */ 609 */
608 610
609static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, 611static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
610 struct sk_buff *skb, u32 seq, u32 ack, 612 u32 win, u32 ts, int oif,
611 u32 win, u32 ts) 613 struct tcp_md5sig_key *key)
612{ 614{
613 struct tcphdr *th = tcp_hdr(skb); 615 struct tcphdr *th = tcp_hdr(skb);
614 struct { 616 struct {
@@ -620,10 +622,6 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
620 ]; 622 ];
621 } rep; 623 } rep;
622 struct ip_reply_arg arg; 624 struct ip_reply_arg arg;
623#ifdef CONFIG_TCP_MD5SIG
624 struct tcp_md5sig_key *key;
625 struct tcp_md5sig_key tw_key;
626#endif
627 625
628 memset(&rep.th, 0, sizeof(struct tcphdr)); 626 memset(&rep.th, 0, sizeof(struct tcphdr));
629 memset(&arg, 0, sizeof(arg)); 627 memset(&arg, 0, sizeof(arg));
@@ -649,23 +647,6 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
649 rep.th.window = htons(win); 647 rep.th.window = htons(win);
650 648
651#ifdef CONFIG_TCP_MD5SIG 649#ifdef CONFIG_TCP_MD5SIG
652 /*
653 * The SKB holds an imcoming packet, but may not have a valid ->sk
654 * pointer. This is especially the case when we're dealing with a
655 * TIME_WAIT ack, because the sk structure is long gone, and only
656 * the tcp_timewait_sock remains. So the md5 key is stashed in that
657 * structure, and we use it in preference. I believe that (twsk ||
658 * skb->sk) holds true, but we program defensively.
659 */
660 if (!twsk && skb->sk) {
661 key = tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr);
662 } else if (twsk && twsk->tw_md5_keylen) {
663 tw_key.key = twsk->tw_md5_key;
664 tw_key.keylen = twsk->tw_md5_keylen;
665 key = &tw_key;
666 } else
667 key = NULL;
668
669 if (key) { 650 if (key) {
670 int offset = (ts) ? 3 : 0; 651 int offset = (ts) ? 3 : 0;
671 652
@@ -680,16 +661,15 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
680 key, 661 key,
681 ip_hdr(skb)->daddr, 662 ip_hdr(skb)->daddr,
682 ip_hdr(skb)->saddr, 663 ip_hdr(skb)->saddr,
683 &rep.th, IPPROTO_TCP, 664 &rep.th, arg.iov[0].iov_len);
684 arg.iov[0].iov_len);
685 } 665 }
686#endif 666#endif
687 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, 667 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
688 ip_hdr(skb)->saddr, /* XXX */ 668 ip_hdr(skb)->saddr, /* XXX */
689 arg.iov[0].iov_len, IPPROTO_TCP, 0); 669 arg.iov[0].iov_len, IPPROTO_TCP, 0);
690 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 670 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
691 if (twsk) 671 if (oif)
692 arg.bound_dev_if = twsk->tw_sk.tw_bound_dev_if; 672 arg.bound_dev_if = oif;
693 673
694 ip_send_reply(dev_net(skb->dev)->ipv4.tcp_sock, skb, 674 ip_send_reply(dev_net(skb->dev)->ipv4.tcp_sock, skb,
695 &arg, arg.iov[0].iov_len); 675 &arg, arg.iov[0].iov_len);
@@ -702,9 +682,12 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
702 struct inet_timewait_sock *tw = inet_twsk(sk); 682 struct inet_timewait_sock *tw = inet_twsk(sk);
703 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 683 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
704 684
705 tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 685 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
706 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 686 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
707 tcptw->tw_ts_recent); 687 tcptw->tw_ts_recent,
688 tw->tw_bound_dev_if,
689 tcp_twsk_md5_key(tcptw)
690 );
708 691
709 inet_twsk_put(tw); 692 inet_twsk_put(tw);
710} 693}
@@ -712,9 +695,11 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
712static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, 695static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
713 struct request_sock *req) 696 struct request_sock *req)
714{ 697{
715 tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, 698 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
716 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, 699 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
717 req->ts_recent); 700 req->ts_recent,
701 0,
702 tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr));
718} 703}
719 704
720/* 705/*
@@ -1006,18 +991,12 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1006 991
1007static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, 992static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1008 __be32 saddr, __be32 daddr, 993 __be32 saddr, __be32 daddr,
1009 struct tcphdr *th, int protocol, 994 struct tcphdr *th,
1010 unsigned int tcplen) 995 unsigned int tcplen)
1011{ 996{
1012 struct scatterlist sg[4];
1013 __u16 data_len;
1014 int block = 0;
1015 __sum16 old_checksum;
1016 struct tcp_md5sig_pool *hp; 997 struct tcp_md5sig_pool *hp;
1017 struct tcp4_pseudohdr *bp; 998 struct tcp4_pseudohdr *bp;
1018 struct hash_desc *desc;
1019 int err; 999 int err;
1020 unsigned int nbytes = 0;
1021 1000
1022 /* 1001 /*
1023 * Okay, so RFC2385 is turned on for this connection, 1002 * Okay, so RFC2385 is turned on for this connection,
@@ -1029,63 +1008,25 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1029 goto clear_hash_noput; 1008 goto clear_hash_noput;
1030 1009
1031 bp = &hp->md5_blk.ip4; 1010 bp = &hp->md5_blk.ip4;
1032 desc = &hp->md5_desc;
1033 1011
1034 /* 1012 /*
1035 * 1. the TCP pseudo-header (in the order: source IP address, 1013 * The TCP pseudo-header (in the order: source IP address,
1036 * destination IP address, zero-padded protocol number, and 1014 * destination IP address, zero-padded protocol number, and
1037 * segment length) 1015 * segment length)
1038 */ 1016 */
1039 bp->saddr = saddr; 1017 bp->saddr = saddr;
1040 bp->daddr = daddr; 1018 bp->daddr = daddr;
1041 bp->pad = 0; 1019 bp->pad = 0;
1042 bp->protocol = protocol; 1020 bp->protocol = IPPROTO_TCP;
1043 bp->len = htons(tcplen); 1021 bp->len = htons(tcplen);
1044 1022
1045 sg_init_table(sg, 4); 1023 err = tcp_calc_md5_hash(md5_hash, key, sizeof(*bp),
1046 1024 th, tcplen, hp);
1047 sg_set_buf(&sg[block++], bp, sizeof(*bp));
1048 nbytes += sizeof(*bp);
1049
1050 /* 2. the TCP header, excluding options, and assuming a
1051 * checksum of zero/
1052 */
1053 old_checksum = th->check;
1054 th->check = 0;
1055 sg_set_buf(&sg[block++], th, sizeof(struct tcphdr));
1056 nbytes += sizeof(struct tcphdr);
1057
1058 /* 3. the TCP segment data (if any) */
1059 data_len = tcplen - (th->doff << 2);
1060 if (data_len > 0) {
1061 unsigned char *data = (unsigned char *)th + (th->doff << 2);
1062 sg_set_buf(&sg[block++], data, data_len);
1063 nbytes += data_len;
1064 }
1065
1066 /* 4. an independently-specified key or password, known to both
1067 * TCPs and presumably connection-specific
1068 */
1069 sg_set_buf(&sg[block++], key->key, key->keylen);
1070 nbytes += key->keylen;
1071
1072 sg_mark_end(&sg[block - 1]);
1073
1074 /* Now store the Hash into the packet */
1075 err = crypto_hash_init(desc);
1076 if (err)
1077 goto clear_hash;
1078 err = crypto_hash_update(desc, sg, nbytes);
1079 if (err)
1080 goto clear_hash;
1081 err = crypto_hash_final(desc, md5_hash);
1082 if (err) 1025 if (err)
1083 goto clear_hash; 1026 goto clear_hash;
1084 1027
1085 /* Reset header, and free up the crypto */ 1028 /* Free up the crypto pool */
1086 tcp_put_md5sig_pool(); 1029 tcp_put_md5sig_pool();
1087 th->check = old_checksum;
1088
1089out: 1030out:
1090 return 0; 1031 return 0;
1091clear_hash: 1032clear_hash:
@@ -1099,7 +1040,7 @@ int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1099 struct sock *sk, 1040 struct sock *sk,
1100 struct dst_entry *dst, 1041 struct dst_entry *dst,
1101 struct request_sock *req, 1042 struct request_sock *req,
1102 struct tcphdr *th, int protocol, 1043 struct tcphdr *th,
1103 unsigned int tcplen) 1044 unsigned int tcplen)
1104{ 1045{
1105 __be32 saddr, daddr; 1046 __be32 saddr, daddr;
@@ -1115,7 +1056,7 @@ int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1115 } 1056 }
1116 return tcp_v4_do_calc_md5_hash(md5_hash, key, 1057 return tcp_v4_do_calc_md5_hash(md5_hash, key,
1117 saddr, daddr, 1058 saddr, daddr,
1118 th, protocol, tcplen); 1059 th, tcplen);
1119} 1060}
1120 1061
1121EXPORT_SYMBOL(tcp_v4_calc_md5_hash); 1062EXPORT_SYMBOL(tcp_v4_calc_md5_hash);
@@ -1134,52 +1075,12 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1134 struct tcp_md5sig_key *hash_expected; 1075 struct tcp_md5sig_key *hash_expected;
1135 const struct iphdr *iph = ip_hdr(skb); 1076 const struct iphdr *iph = ip_hdr(skb);
1136 struct tcphdr *th = tcp_hdr(skb); 1077 struct tcphdr *th = tcp_hdr(skb);
1137 int length = (th->doff << 2) - sizeof(struct tcphdr);
1138 int genhash; 1078 int genhash;
1139 unsigned char *ptr;
1140 unsigned char newhash[16]; 1079 unsigned char newhash[16];
1141 1080
1142 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr); 1081 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1082 hash_location = tcp_parse_md5sig_option(th);
1143 1083
1144 /*
1145 * If the TCP option length is less than the TCP_MD5SIG
1146 * option length, then we can shortcut
1147 */
1148 if (length < TCPOLEN_MD5SIG) {
1149 if (hash_expected)
1150 return 1;
1151 else
1152 return 0;
1153 }
1154
1155 /* Okay, we can't shortcut - we have to grub through the options */
1156 ptr = (unsigned char *)(th + 1);
1157 while (length > 0) {
1158 int opcode = *ptr++;
1159 int opsize;
1160
1161 switch (opcode) {
1162 case TCPOPT_EOL:
1163 goto done_opts;
1164 case TCPOPT_NOP:
1165 length--;
1166 continue;
1167 default:
1168 opsize = *ptr++;
1169 if (opsize < 2)
1170 goto done_opts;
1171 if (opsize > length)
1172 goto done_opts;
1173
1174 if (opcode == TCPOPT_MD5SIG) {
1175 hash_location = ptr;
1176 goto done_opts;
1177 }
1178 }
1179 ptr += opsize-2;
1180 length -= opsize;
1181 }
1182done_opts:
1183 /* We've parsed the options - do we have a hash? */ 1084 /* We've parsed the options - do we have a hash? */
1184 if (!hash_expected && !hash_location) 1085 if (!hash_expected && !hash_location)
1185 return 0; 1086 return 0;
@@ -1206,8 +1107,7 @@ done_opts:
1206 genhash = tcp_v4_do_calc_md5_hash(newhash, 1107 genhash = tcp_v4_do_calc_md5_hash(newhash,
1207 hash_expected, 1108 hash_expected,
1208 iph->saddr, iph->daddr, 1109 iph->saddr, iph->daddr,
1209 th, sk->sk_protocol, 1110 th, skb->len);
1210 skb->len);
1211 1111
1212 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 1112 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1213 if (net_ratelimit()) { 1113 if (net_ratelimit()) {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 8245247a6ceb..ea68a478fad6 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index ad993ecb4810..8f83ab432705 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
@@ -607,7 +605,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
607 md5, 605 md5,
608 sk, NULL, NULL, 606 sk, NULL, NULL,
609 tcp_hdr(skb), 607 tcp_hdr(skb),
610 sk->sk_protocol,
611 skb->len); 608 skb->len);
612 } 609 }
613#endif 610#endif
@@ -2266,7 +2263,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2266 tp->af_specific->calc_md5_hash(md5_hash_location, 2263 tp->af_specific->calc_md5_hash(md5_hash_location,
2267 md5, 2264 md5,
2268 NULL, dst, req, 2265 NULL, dst, req,
2269 tcp_hdr(skb), sk->sk_protocol, 2266 tcp_hdr(skb),
2270 skb->len); 2267 skb->len);
2271 } 2268 }
2272#endif 2269#endif
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 63ed9d6830e7..3e358cbb1247 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_timer.c,v 1.88 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 56fcda3694ba..355e6d62d483 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The User Datagram Protocol (UDP). 6 * The User Datagram Protocol (UDP).
7 * 7 *
8 * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 72ce26b6c4d3..4ad16b6d5138 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * UDPLITE An implementation of the UDP-Lite protocol (RFC 3828). 2 * UDPLITE An implementation of the UDP-Lite protocol (RFC 3828).
3 * 3 *
4 * Version: $Id: udplite.c,v 1.25 2006/10/19 07:22:36 gerrit Exp $
5 *
6 * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk> 4 * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk>
7 * 5 *
8 * Changes: 6 * Changes:
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 147588f4c7c0..9be6be3a7ff3 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -6,8 +6,6 @@
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * 8 *
9 * $Id: addrconf.c,v 1.69 2001/10/31 21:55:54 davem Exp $
10 *
11 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
@@ -231,6 +229,12 @@ static inline int addrconf_qdisc_ok(struct net_device *dev)
231 return (dev->qdisc != &noop_qdisc); 229 return (dev->qdisc != &noop_qdisc);
232} 230}
233 231
232/* Check if a route is valid prefix route */
233static inline int addrconf_is_prefix_route(const struct rt6_info *rt)
234{
235 return ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0);
236}
237
234static void addrconf_del_timer(struct inet6_ifaddr *ifp) 238static void addrconf_del_timer(struct inet6_ifaddr *ifp)
235{ 239{
236 if (del_timer(&ifp->timer)) 240 if (del_timer(&ifp->timer))
@@ -777,7 +781,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
777 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len); 781 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
778 rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1); 782 rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1);
779 783
780 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { 784 if (rt && addrconf_is_prefix_route(rt)) {
781 if (onlink == 0) { 785 if (onlink == 0) {
782 ip6_del_rt(rt); 786 ip6_del_rt(rt);
783 rt = NULL; 787 rt = NULL;
@@ -958,7 +962,8 @@ static inline int ipv6_saddr_preferred(int type)
958 return 0; 962 return 0;
959} 963}
960 964
961static int ipv6_get_saddr_eval(struct ipv6_saddr_score *score, 965static int ipv6_get_saddr_eval(struct net *net,
966 struct ipv6_saddr_score *score,
962 struct ipv6_saddr_dst *dst, 967 struct ipv6_saddr_dst *dst,
963 int i) 968 int i)
964{ 969{
@@ -1037,7 +1042,8 @@ static int ipv6_get_saddr_eval(struct ipv6_saddr_score *score,
1037 break; 1042 break;
1038 case IPV6_SADDR_RULE_LABEL: 1043 case IPV6_SADDR_RULE_LABEL:
1039 /* Rule 6: Prefer matching label */ 1044 /* Rule 6: Prefer matching label */
1040 ret = ipv6_addr_label(&score->ifa->addr, score->addr_type, 1045 ret = ipv6_addr_label(net,
1046 &score->ifa->addr, score->addr_type,
1041 score->ifa->idev->dev->ifindex) == dst->label; 1047 score->ifa->idev->dev->ifindex) == dst->label;
1042 break; 1048 break;
1043#ifdef CONFIG_IPV6_PRIVACY 1049#ifdef CONFIG_IPV6_PRIVACY
@@ -1091,7 +1097,7 @@ int ipv6_dev_get_saddr(struct net_device *dst_dev,
1091 dst.addr = daddr; 1097 dst.addr = daddr;
1092 dst.ifindex = dst_dev ? dst_dev->ifindex : 0; 1098 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1093 dst.scope = __ipv6_addr_src_scope(dst_type); 1099 dst.scope = __ipv6_addr_src_scope(dst_type);
1094 dst.label = ipv6_addr_label(daddr, dst_type, dst.ifindex); 1100 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1095 dst.prefs = prefs; 1101 dst.prefs = prefs;
1096 1102
1097 hiscore->rule = -1; 1103 hiscore->rule = -1;
@@ -1159,8 +1165,8 @@ int ipv6_dev_get_saddr(struct net_device *dst_dev,
1159 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) { 1165 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1160 int minihiscore, miniscore; 1166 int minihiscore, miniscore;
1161 1167
1162 minihiscore = ipv6_get_saddr_eval(hiscore, &dst, i); 1168 minihiscore = ipv6_get_saddr_eval(net, hiscore, &dst, i);
1163 miniscore = ipv6_get_saddr_eval(score, &dst, i); 1169 miniscore = ipv6_get_saddr_eval(net, score, &dst, i);
1164 1170
1165 if (minihiscore > miniscore) { 1171 if (minihiscore > miniscore) {
1166 if (i == IPV6_SADDR_RULE_SCOPE && 1172 if (i == IPV6_SADDR_RULE_SCOPE &&
@@ -1788,7 +1794,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1788 rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL, 1794 rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL,
1789 dev->ifindex, 1); 1795 dev->ifindex, 1);
1790 1796
1791 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { 1797 if (rt && addrconf_is_prefix_route(rt)) {
1792 /* Autoconf prefix route */ 1798 /* Autoconf prefix route */
1793 if (valid_lft == 0) { 1799 if (valid_lft == 0) {
1794 ip6_del_rt(rt); 1800 ip6_del_rt(rt);
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 9bfa8846f262..08909039d87b 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -29,6 +29,9 @@
29 */ 29 */
30struct ip6addrlbl_entry 30struct ip6addrlbl_entry
31{ 31{
32#ifdef CONFIG_NET_NS
33 struct net *lbl_net;
34#endif
32 struct in6_addr prefix; 35 struct in6_addr prefix;
33 int prefixlen; 36 int prefixlen;
34 int ifindex; 37 int ifindex;
@@ -46,6 +49,16 @@ static struct ip6addrlbl_table
46 u32 seq; 49 u32 seq;
47} ip6addrlbl_table; 50} ip6addrlbl_table;
48 51
52static inline
53struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl)
54{
55#ifdef CONFIG_NET_NS
56 return lbl->lbl_net;
57#else
58 return &init_net;
59#endif
60}
61
49/* 62/*
50 * Default policy table (RFC3484 + extensions) 63 * Default policy table (RFC3484 + extensions)
51 * 64 *
@@ -65,7 +78,7 @@ static struct ip6addrlbl_table
65 78
66#define IPV6_ADDR_LABEL_DEFAULT 0xffffffffUL 79#define IPV6_ADDR_LABEL_DEFAULT 0xffffffffUL
67 80
68static const __initdata struct ip6addrlbl_init_table 81static const __net_initdata struct ip6addrlbl_init_table
69{ 82{
70 const struct in6_addr *prefix; 83 const struct in6_addr *prefix;
71 int prefixlen; 84 int prefixlen;
@@ -108,6 +121,9 @@ static const __initdata struct ip6addrlbl_init_table
108/* Object management */ 121/* Object management */
109static inline void ip6addrlbl_free(struct ip6addrlbl_entry *p) 122static inline void ip6addrlbl_free(struct ip6addrlbl_entry *p)
110{ 123{
124#ifdef CONFIG_NET_NS
125 release_net(p->lbl_net);
126#endif
111 kfree(p); 127 kfree(p);
112} 128}
113 129
@@ -128,10 +144,13 @@ static inline void ip6addrlbl_put(struct ip6addrlbl_entry *p)
128} 144}
129 145
130/* Find label */ 146/* Find label */
131static int __ip6addrlbl_match(struct ip6addrlbl_entry *p, 147static int __ip6addrlbl_match(struct net *net,
148 struct ip6addrlbl_entry *p,
132 const struct in6_addr *addr, 149 const struct in6_addr *addr,
133 int addrtype, int ifindex) 150 int addrtype, int ifindex)
134{ 151{
152 if (!net_eq(ip6addrlbl_net(p), net))
153 return 0;
135 if (p->ifindex && p->ifindex != ifindex) 154 if (p->ifindex && p->ifindex != ifindex)
136 return 0; 155 return 0;
137 if (p->addrtype && p->addrtype != addrtype) 156 if (p->addrtype && p->addrtype != addrtype)
@@ -141,19 +160,21 @@ static int __ip6addrlbl_match(struct ip6addrlbl_entry *p,
141 return 1; 160 return 1;
142} 161}
143 162
144static struct ip6addrlbl_entry *__ipv6_addr_label(const struct in6_addr *addr, 163static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net,
164 const struct in6_addr *addr,
145 int type, int ifindex) 165 int type, int ifindex)
146{ 166{
147 struct hlist_node *pos; 167 struct hlist_node *pos;
148 struct ip6addrlbl_entry *p; 168 struct ip6addrlbl_entry *p;
149 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { 169 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) {
150 if (__ip6addrlbl_match(p, addr, type, ifindex)) 170 if (__ip6addrlbl_match(net, p, addr, type, ifindex))
151 return p; 171 return p;
152 } 172 }
153 return NULL; 173 return NULL;
154} 174}
155 175
156u32 ipv6_addr_label(const struct in6_addr *addr, int type, int ifindex) 176u32 ipv6_addr_label(struct net *net,
177 const struct in6_addr *addr, int type, int ifindex)
157{ 178{
158 u32 label; 179 u32 label;
159 struct ip6addrlbl_entry *p; 180 struct ip6addrlbl_entry *p;
@@ -161,7 +182,7 @@ u32 ipv6_addr_label(const struct in6_addr *addr, int type, int ifindex)
161 type &= IPV6_ADDR_MAPPED | IPV6_ADDR_COMPATv4 | IPV6_ADDR_LOOPBACK; 182 type &= IPV6_ADDR_MAPPED | IPV6_ADDR_COMPATv4 | IPV6_ADDR_LOOPBACK;
162 183
163 rcu_read_lock(); 184 rcu_read_lock();
164 p = __ipv6_addr_label(addr, type, ifindex); 185 p = __ipv6_addr_label(net, addr, type, ifindex);
165 label = p ? p->label : IPV6_ADDR_LABEL_DEFAULT; 186 label = p ? p->label : IPV6_ADDR_LABEL_DEFAULT;
166 rcu_read_unlock(); 187 rcu_read_unlock();
167 188
@@ -174,7 +195,8 @@ u32 ipv6_addr_label(const struct in6_addr *addr, int type, int ifindex)
174} 195}
175 196
176/* allocate one entry */ 197/* allocate one entry */
177static struct ip6addrlbl_entry *ip6addrlbl_alloc(const struct in6_addr *prefix, 198static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net,
199 const struct in6_addr *prefix,
178 int prefixlen, int ifindex, 200 int prefixlen, int ifindex,
179 u32 label) 201 u32 label)
180{ 202{
@@ -216,6 +238,9 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(const struct in6_addr *prefix,
216 newp->addrtype = addrtype; 238 newp->addrtype = addrtype;
217 newp->label = label; 239 newp->label = label;
218 INIT_HLIST_NODE(&newp->list); 240 INIT_HLIST_NODE(&newp->list);
241#ifdef CONFIG_NET_NS
242 newp->lbl_net = hold_net(net);
243#endif
219 atomic_set(&newp->refcnt, 1); 244 atomic_set(&newp->refcnt, 1);
220 return newp; 245 return newp;
221} 246}
@@ -237,6 +262,7 @@ static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
237 hlist_for_each_entry_safe(p, pos, n, 262 hlist_for_each_entry_safe(p, pos, n,
238 &ip6addrlbl_table.head, list) { 263 &ip6addrlbl_table.head, list) {
239 if (p->prefixlen == newp->prefixlen && 264 if (p->prefixlen == newp->prefixlen &&
265 net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
240 p->ifindex == newp->ifindex && 266 p->ifindex == newp->ifindex &&
241 ipv6_addr_equal(&p->prefix, &newp->prefix)) { 267 ipv6_addr_equal(&p->prefix, &newp->prefix)) {
242 if (!replace) { 268 if (!replace) {
@@ -261,7 +287,8 @@ out:
261} 287}
262 288
263/* add a label */ 289/* add a label */
264static int ip6addrlbl_add(const struct in6_addr *prefix, int prefixlen, 290static int ip6addrlbl_add(struct net *net,
291 const struct in6_addr *prefix, int prefixlen,
265 int ifindex, u32 label, int replace) 292 int ifindex, u32 label, int replace)
266{ 293{
267 struct ip6addrlbl_entry *newp; 294 struct ip6addrlbl_entry *newp;
@@ -274,7 +301,7 @@ static int ip6addrlbl_add(const struct in6_addr *prefix, int prefixlen,
274 (unsigned int)label, 301 (unsigned int)label,
275 replace); 302 replace);
276 303
277 newp = ip6addrlbl_alloc(prefix, prefixlen, ifindex, label); 304 newp = ip6addrlbl_alloc(net, prefix, prefixlen, ifindex, label);
278 if (IS_ERR(newp)) 305 if (IS_ERR(newp))
279 return PTR_ERR(newp); 306 return PTR_ERR(newp);
280 spin_lock(&ip6addrlbl_table.lock); 307 spin_lock(&ip6addrlbl_table.lock);
@@ -286,7 +313,8 @@ static int ip6addrlbl_add(const struct in6_addr *prefix, int prefixlen,
286} 313}
287 314
288/* remove a label */ 315/* remove a label */
289static int __ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen, 316static int __ip6addrlbl_del(struct net *net,
317 const struct in6_addr *prefix, int prefixlen,
290 int ifindex) 318 int ifindex)
291{ 319{
292 struct ip6addrlbl_entry *p = NULL; 320 struct ip6addrlbl_entry *p = NULL;
@@ -300,6 +328,7 @@ static int __ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen,
300 328
301 hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { 329 hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) {
302 if (p->prefixlen == prefixlen && 330 if (p->prefixlen == prefixlen &&
331 net_eq(ip6addrlbl_net(p), net) &&
303 p->ifindex == ifindex && 332 p->ifindex == ifindex &&
304 ipv6_addr_equal(&p->prefix, prefix)) { 333 ipv6_addr_equal(&p->prefix, prefix)) {
305 hlist_del_rcu(&p->list); 334 hlist_del_rcu(&p->list);
@@ -311,7 +340,8 @@ static int __ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen,
311 return ret; 340 return ret;
312} 341}
313 342
314static int ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen, 343static int ip6addrlbl_del(struct net *net,
344 const struct in6_addr *prefix, int prefixlen,
315 int ifindex) 345 int ifindex)
316{ 346{
317 struct in6_addr prefix_buf; 347 struct in6_addr prefix_buf;
@@ -324,13 +354,13 @@ static int ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen,
324 354
325 ipv6_addr_prefix(&prefix_buf, prefix, prefixlen); 355 ipv6_addr_prefix(&prefix_buf, prefix, prefixlen);
326 spin_lock(&ip6addrlbl_table.lock); 356 spin_lock(&ip6addrlbl_table.lock);
327 ret = __ip6addrlbl_del(&prefix_buf, prefixlen, ifindex); 357 ret = __ip6addrlbl_del(net, &prefix_buf, prefixlen, ifindex);
328 spin_unlock(&ip6addrlbl_table.lock); 358 spin_unlock(&ip6addrlbl_table.lock);
329 return ret; 359 return ret;
330} 360}
331 361
332/* add default label */ 362/* add default label */
333static __init int ip6addrlbl_init(void) 363static int __net_init ip6addrlbl_net_init(struct net *net)
334{ 364{
335 int err = 0; 365 int err = 0;
336 int i; 366 int i;
@@ -338,7 +368,8 @@ static __init int ip6addrlbl_init(void)
338 ADDRLABEL(KERN_DEBUG "%s()\n", __func__); 368 ADDRLABEL(KERN_DEBUG "%s()\n", __func__);
339 369
340 for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) { 370 for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) {
341 int ret = ip6addrlbl_add(ip6addrlbl_init_table[i].prefix, 371 int ret = ip6addrlbl_add(net,
372 ip6addrlbl_init_table[i].prefix,
342 ip6addrlbl_init_table[i].prefixlen, 373 ip6addrlbl_init_table[i].prefixlen,
343 0, 374 0,
344 ip6addrlbl_init_table[i].label, 0); 375 ip6addrlbl_init_table[i].label, 0);
@@ -349,11 +380,32 @@ static __init int ip6addrlbl_init(void)
349 return err; 380 return err;
350} 381}
351 382
383static void __net_exit ip6addrlbl_net_exit(struct net *net)
384{
385 struct ip6addrlbl_entry *p = NULL;
386 struct hlist_node *pos, *n;
387
388 /* Remove all labels belonging to the exiting net */
389 spin_lock(&ip6addrlbl_table.lock);
390 hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) {
391 if (net_eq(ip6addrlbl_net(p), net)) {
392 hlist_del_rcu(&p->list);
393 ip6addrlbl_put(p);
394 }
395 }
396 spin_unlock(&ip6addrlbl_table.lock);
397}
398
399static struct pernet_operations ipv6_addr_label_ops = {
400 .init = ip6addrlbl_net_init,
401 .exit = ip6addrlbl_net_exit,
402};
403
352int __init ipv6_addr_label_init(void) 404int __init ipv6_addr_label_init(void)
353{ 405{
354 spin_lock_init(&ip6addrlbl_table.lock); 406 spin_lock_init(&ip6addrlbl_table.lock);
355 407
356 return ip6addrlbl_init(); 408 return register_pernet_subsys(&ipv6_addr_label_ops);
357} 409}
358 410
359static const struct nla_policy ifal_policy[IFAL_MAX+1] = { 411static const struct nla_policy ifal_policy[IFAL_MAX+1] = {
@@ -371,9 +423,6 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
371 u32 label; 423 u32 label;
372 int err = 0; 424 int err = 0;
373 425
374 if (net != &init_net)
375 return 0;
376
377 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); 426 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy);
378 if (err < 0) 427 if (err < 0)
379 return err; 428 return err;
@@ -385,7 +434,7 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
385 return -EINVAL; 434 return -EINVAL;
386 435
387 if (ifal->ifal_index && 436 if (ifal->ifal_index &&
388 !__dev_get_by_index(&init_net, ifal->ifal_index)) 437 !__dev_get_by_index(net, ifal->ifal_index))
389 return -EINVAL; 438 return -EINVAL;
390 439
391 if (!tb[IFAL_ADDRESS]) 440 if (!tb[IFAL_ADDRESS])
@@ -403,12 +452,12 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
403 452
404 switch(nlh->nlmsg_type) { 453 switch(nlh->nlmsg_type) {
405 case RTM_NEWADDRLABEL: 454 case RTM_NEWADDRLABEL:
406 err = ip6addrlbl_add(pfx, ifal->ifal_prefixlen, 455 err = ip6addrlbl_add(net, pfx, ifal->ifal_prefixlen,
407 ifal->ifal_index, label, 456 ifal->ifal_index, label,
408 nlh->nlmsg_flags & NLM_F_REPLACE); 457 nlh->nlmsg_flags & NLM_F_REPLACE);
409 break; 458 break;
410 case RTM_DELADDRLABEL: 459 case RTM_DELADDRLABEL:
411 err = ip6addrlbl_del(pfx, ifal->ifal_prefixlen, 460 err = ip6addrlbl_del(net, pfx, ifal->ifal_prefixlen,
412 ifal->ifal_index); 461 ifal->ifal_index);
413 break; 462 break;
414 default: 463 default:
@@ -458,12 +507,10 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
458 int idx = 0, s_idx = cb->args[0]; 507 int idx = 0, s_idx = cb->args[0];
459 int err; 508 int err;
460 509
461 if (net != &init_net)
462 return 0;
463
464 rcu_read_lock(); 510 rcu_read_lock();
465 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { 511 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) {
466 if (idx >= s_idx) { 512 if (idx >= s_idx &&
513 net_eq(ip6addrlbl_net(p), net)) {
467 if ((err = ip6addrlbl_fill(skb, p, 514 if ((err = ip6addrlbl_fill(skb, p,
468 ip6addrlbl_table.seq, 515 ip6addrlbl_table.seq,
469 NETLINK_CB(cb->skb).pid, 516 NETLINK_CB(cb->skb).pid,
@@ -499,9 +546,6 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
499 struct ip6addrlbl_entry *p; 546 struct ip6addrlbl_entry *p;
500 struct sk_buff *skb; 547 struct sk_buff *skb;
501 548
502 if (net != &init_net)
503 return 0;
504
505 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); 549 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy);
506 if (err < 0) 550 if (err < 0)
507 return err; 551 return err;
@@ -513,7 +557,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
513 return -EINVAL; 557 return -EINVAL;
514 558
515 if (ifal->ifal_index && 559 if (ifal->ifal_index &&
516 !__dev_get_by_index(&init_net, ifal->ifal_index)) 560 !__dev_get_by_index(net, ifal->ifal_index))
517 return -EINVAL; 561 return -EINVAL;
518 562
519 if (!tb[IFAL_ADDRESS]) 563 if (!tb[IFAL_ADDRESS])
@@ -524,7 +568,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
524 return -EINVAL; 568 return -EINVAL;
525 569
526 rcu_read_lock(); 570 rcu_read_lock();
527 p = __ipv6_addr_label(addr, ipv6_addr_type(addr), ifal->ifal_index); 571 p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index);
528 if (p && ip6addrlbl_hold(p)) 572 if (p && ip6addrlbl_hold(p))
529 p = NULL; 573 p = NULL;
530 lseq = ip6addrlbl_table.seq; 574 lseq = ip6addrlbl_table.seq;
@@ -552,7 +596,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
552 goto out; 596 goto out;
553 } 597 }
554 598
555 err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); 599 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
556out: 600out:
557 return err; 601 return err;
558} 602}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index e84b3fd17fb4..350457c761e6 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -7,8 +7,6 @@
7 * 7 *
8 * Adapted from linux/net/ipv4/af_inet.c 8 * Adapted from linux/net/ipv4/af_inet.c
9 * 9 *
10 * $Id: af_inet6.c,v 1.66 2002/02/01 22:01:04 davem Exp $
11 *
12 * Fixes: 10 * Fixes:
13 * piggy, Karl Knutson : Socket protocol table 11 * piggy, Karl Knutson : Socket protocol table
14 * Hideaki YOSHIFUJI : sin6_scope_id support 12 * Hideaki YOSHIFUJI : sin6_scope_id support
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 0f0f94a40335..f7b535dec860 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: datagram.c,v 1.24 2002/02/01 22:01:04 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 3cd1c993d52b..602ea826f0a5 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -7,8 +7,6 @@
7 * Andi Kleen <ak@muc.de> 7 * Andi Kleen <ak@muc.de>
8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
9 * 9 *
10 * $Id: exthdrs.c,v 1.13 2001/06/19 15:58:56 davem Exp $
11 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index d42dd16d3487..399d41f65437 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: icmp.c,v 1.38 2002/02/08 03:57:19 davem Exp $
9 *
10 * Based on net/ipv4/icmp.c 8 * Based on net/ipv4/icmp.c
11 * 9 *
12 * RFC 1885 10 * RFC 1885
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 1ee4fa17c129..4de2b9efcacb 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: ip6_fib.c,v 1.25 2001/10/31 21:55:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 4e5c8615832c..f77a6011c302 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -6,8 +6,6 @@
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Ian P. Morris <I.P.Morris@soton.ac.uk> 7 * Ian P. Morris <I.P.Morris@soton.ac.uk>
8 * 8 *
9 * $Id: ip6_input.c,v 1.19 2000/12/13 18:31:50 davem Exp $
10 *
11 * Based in linux/net/ipv4/ip_input.c 9 * Based in linux/net/ipv4/ip_input.c
12 * 10 *
13 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 48cdce9c696c..40a2813a63d1 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on linux/net/ipv4/ip_output.c 8 * Based on linux/net/ipv4/ip_output.c
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2bda3ba100b1..17c7b098cdb0 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -6,8 +6,6 @@
6 * Ville Nuorvala <vnuorval@tcs.hut.fi> 6 * Ville Nuorvala <vnuorval@tcs.hut.fi>
7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org> 7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
8 * 8 *
9 * $Id$
10 *
11 * Based on: 9 * Based on:
12 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c 10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
13 * 11 *
@@ -711,7 +709,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
711 } 709 }
712 710
713 if (!ip6_tnl_rcv_ctl(t)) { 711 if (!ip6_tnl_rcv_ctl(t)) {
714 t->stat.rx_dropped++; 712 t->dev->stats.rx_dropped++;
715 read_unlock(&ip6_tnl_lock); 713 read_unlock(&ip6_tnl_lock);
716 goto discard; 714 goto discard;
717 } 715 }
@@ -728,8 +726,8 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
728 726
729 dscp_ecn_decapsulate(t, ipv6h, skb); 727 dscp_ecn_decapsulate(t, ipv6h, skb);
730 728
731 t->stat.rx_packets++; 729 t->dev->stats.rx_packets++;
732 t->stat.rx_bytes += skb->len; 730 t->dev->stats.rx_bytes += skb->len;
733 netif_rx(skb); 731 netif_rx(skb);
734 read_unlock(&ip6_tnl_lock); 732 read_unlock(&ip6_tnl_lock);
735 return 0; 733 return 0;
@@ -849,7 +847,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
849 __u32 *pmtu) 847 __u32 *pmtu)
850{ 848{
851 struct ip6_tnl *t = netdev_priv(dev); 849 struct ip6_tnl *t = netdev_priv(dev);
852 struct net_device_stats *stats = &t->stat; 850 struct net_device_stats *stats = &t->dev->stats;
853 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 851 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
854 struct ipv6_tel_txoption opt; 852 struct ipv6_tel_txoption opt;
855 struct dst_entry *dst; 853 struct dst_entry *dst;
@@ -1043,11 +1041,11 @@ static int
1043ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1041ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1044{ 1042{
1045 struct ip6_tnl *t = netdev_priv(dev); 1043 struct ip6_tnl *t = netdev_priv(dev);
1046 struct net_device_stats *stats = &t->stat; 1044 struct net_device_stats *stats = &t->dev->stats;
1047 int ret; 1045 int ret;
1048 1046
1049 if (t->recursion++) { 1047 if (t->recursion++) {
1050 t->stat.collisions++; 1048 stats->collisions++;
1051 goto tx_err; 1049 goto tx_err;
1052 } 1050 }
1053 1051
@@ -1289,19 +1287,6 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1289} 1287}
1290 1288
1291/** 1289/**
1292 * ip6_tnl_get_stats - return the stats for tunnel device
1293 * @dev: virtual device associated with tunnel
1294 *
1295 * Return: stats for device
1296 **/
1297
1298static struct net_device_stats *
1299ip6_tnl_get_stats(struct net_device *dev)
1300{
1301 return &(((struct ip6_tnl *)netdev_priv(dev))->stat);
1302}
1303
1304/**
1305 * ip6_tnl_change_mtu - change mtu manually for tunnel device 1290 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1306 * @dev: virtual device associated with tunnel 1291 * @dev: virtual device associated with tunnel
1307 * @new_mtu: the new mtu 1292 * @new_mtu: the new mtu
@@ -1334,7 +1319,6 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
1334 dev->uninit = ip6_tnl_dev_uninit; 1319 dev->uninit = ip6_tnl_dev_uninit;
1335 dev->destructor = free_netdev; 1320 dev->destructor = free_netdev;
1336 dev->hard_start_xmit = ip6_tnl_xmit; 1321 dev->hard_start_xmit = ip6_tnl_xmit;
1337 dev->get_stats = ip6_tnl_get_stats;
1338 dev->do_ioctl = ip6_tnl_ioctl; 1322 dev->do_ioctl = ip6_tnl_ioctl;
1339 dev->change_mtu = ip6_tnl_change_mtu; 1323 dev->change_mtu = ip6_tnl_change_mtu;
1340 1324
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 14796181e8b5..90e763073dc5 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -388,8 +388,8 @@ static int pim6_rcv(struct sk_buff *skb)
388 skb->ip_summed = 0; 388 skb->ip_summed = 0;
389 skb->pkt_type = PACKET_HOST; 389 skb->pkt_type = PACKET_HOST;
390 dst_release(skb->dst); 390 dst_release(skb->dst);
391 ((struct net_device_stats *)netdev_priv(reg_dev))->rx_bytes += skb->len; 391 reg_dev->stats.rx_bytes += skb->len;
392 ((struct net_device_stats *)netdev_priv(reg_dev))->rx_packets++; 392 reg_dev->stats.rx_packets++;
393 skb->dst = NULL; 393 skb->dst = NULL;
394 nf_reset(skb); 394 nf_reset(skb);
395 netif_rx(skb); 395 netif_rx(skb);
@@ -409,26 +409,20 @@ static struct inet6_protocol pim6_protocol = {
409static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 409static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
410{ 410{
411 read_lock(&mrt_lock); 411 read_lock(&mrt_lock);
412 ((struct net_device_stats *)netdev_priv(dev))->tx_bytes += skb->len; 412 dev->stats.tx_bytes += skb->len;
413 ((struct net_device_stats *)netdev_priv(dev))->tx_packets++; 413 dev->stats.tx_packets++;
414 ip6mr_cache_report(skb, reg_vif_num, MRT6MSG_WHOLEPKT); 414 ip6mr_cache_report(skb, reg_vif_num, MRT6MSG_WHOLEPKT);
415 read_unlock(&mrt_lock); 415 read_unlock(&mrt_lock);
416 kfree_skb(skb); 416 kfree_skb(skb);
417 return 0; 417 return 0;
418} 418}
419 419
420static struct net_device_stats *reg_vif_get_stats(struct net_device *dev)
421{
422 return (struct net_device_stats *)netdev_priv(dev);
423}
424
425static void reg_vif_setup(struct net_device *dev) 420static void reg_vif_setup(struct net_device *dev)
426{ 421{
427 dev->type = ARPHRD_PIMREG; 422 dev->type = ARPHRD_PIMREG;
428 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8; 423 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
429 dev->flags = IFF_NOARP; 424 dev->flags = IFF_NOARP;
430 dev->hard_start_xmit = reg_vif_xmit; 425 dev->hard_start_xmit = reg_vif_xmit;
431 dev->get_stats = reg_vif_get_stats;
432 dev->destructor = free_netdev; 426 dev->destructor = free_netdev;
433} 427}
434 428
@@ -436,9 +430,7 @@ static struct net_device *ip6mr_reg_vif(void)
436{ 430{
437 struct net_device *dev; 431 struct net_device *dev;
438 432
439 dev = alloc_netdev(sizeof(struct net_device_stats), "pim6reg", 433 dev = alloc_netdev(0, "pim6reg", reg_vif_setup);
440 reg_vif_setup);
441
442 if (dev == NULL) 434 if (dev == NULL)
443 return NULL; 435 return NULL;
444 436
@@ -1248,7 +1240,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
1248 1240
1249#endif 1241#endif
1250 /* 1242 /*
1251 * Spurious command, or MRT_VERSION which you cannot 1243 * Spurious command, or MRT6_VERSION which you cannot
1252 * set. 1244 * set.
1253 */ 1245 */
1254 default: 1246 default:
@@ -1377,8 +1369,8 @@ static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1377 if (vif->flags & MIFF_REGISTER) { 1369 if (vif->flags & MIFF_REGISTER) {
1378 vif->pkt_out++; 1370 vif->pkt_out++;
1379 vif->bytes_out += skb->len; 1371 vif->bytes_out += skb->len;
1380 ((struct net_device_stats *)netdev_priv(vif->dev))->tx_bytes += skb->len; 1372 vif->dev->stats.tx_bytes += skb->len;
1381 ((struct net_device_stats *)netdev_priv(vif->dev))->tx_packets++; 1373 vif->dev->stats.tx_packets++;
1382 ip6mr_cache_report(skb, vifi, MRT6MSG_WHOLEPKT); 1374 ip6mr_cache_report(skb, vifi, MRT6MSG_WHOLEPKT);
1383 kfree_skb(skb); 1375 kfree_skb(skb);
1384 return 0; 1376 return 0;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index c042ce19bd14..a9988841172a 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -7,8 +7,6 @@
7 * 7 *
8 * Based on linux/net/ipv4/ip_sockglue.c 8 * Based on linux/net/ipv4/ip_sockglue.c
9 * 9 *
10 * $Id: ipv6_sockglue.c,v 1.41 2002/02/01 22:01:04 davem Exp $
11 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index fd632dd7f98d..bd2fe4cfafa7 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: mcast.c,v 1.40 2002/02/08 03:57:19 davem Exp $
9 *
10 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c 8 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
@@ -164,7 +162,6 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
164 ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \ 162 ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \
165 (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp)))) 163 (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp))))
166 164
167#define MLDV2_QQIC(value) MLDV2_EXP(0x80, 4, 3, value)
168#define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value) 165#define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value)
169 166
170#define IPV6_MLD_MAX_MSF 64 167#define IPV6_MLD_MAX_MSF 64
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 6cae5475737e..689dec899c57 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -208,5 +208,17 @@ config IP6_NF_RAW
208 If you want to compile it as a module, say M here and read 208 If you want to compile it as a module, say M here and read
209 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. 209 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
210 210
211# security table for MAC policy
212config IP6_NF_SECURITY
213 tristate "Security table"
214 depends on IP6_NF_IPTABLES
215 depends on SECURITY
216 default m if NETFILTER_ADVANCED=n
217 help
218 This option adds a `security' table to iptables, for use
219 with Mandatory Access Control (MAC) policy.
220
221 If unsure, say N.
222
211endmenu 223endmenu
212 224
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index fbf2c14ed887..3f17c948eefb 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
8obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o 8obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
9obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o 9obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
10obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o 10obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
11obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
11 12
12# objects for l3 independent conntrack 13# objects for l3 independent conntrack
13nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o 14nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 2eff3ae8977d..1b8815f6153d 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -159,7 +159,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
159 case IPQ_COPY_META: 159 case IPQ_COPY_META:
160 case IPQ_COPY_NONE: 160 case IPQ_COPY_NONE:
161 size = NLMSG_SPACE(sizeof(*pmsg)); 161 size = NLMSG_SPACE(sizeof(*pmsg));
162 data_len = 0;
163 break; 162 break;
164 163
165 case IPQ_COPY_PACKET: 164 case IPQ_COPY_PACKET:
@@ -226,8 +225,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
226 return skb; 225 return skb;
227 226
228nlmsg_failure: 227nlmsg_failure:
229 if (skb)
230 kfree_skb(skb);
231 *errp = -EINVAL; 228 *errp = -EINVAL;
232 printk(KERN_ERR "ip6_queue: error creating packet message\n"); 229 printk(KERN_ERR "ip6_queue: error creating packet message\n");
233 return NULL; 230 return NULL;
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
new file mode 100644
index 000000000000..063a3d9c3c67
--- /dev/null
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -0,0 +1,172 @@
1/*
2 * "security" table for IPv6
3 *
4 * This is for use by Mandatory Access Control (MAC) security models,
5 * which need to be able to manage security policy in separate context
6 * to DAC.
7 *
8 * Based on iptable_mangle.c
9 *
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org>
12 * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18#include <linux/module.h>
19#include <linux/netfilter_ipv6/ip6_tables.h>
20
21MODULE_LICENSE("GPL");
22MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>");
23MODULE_DESCRIPTION("ip6tables security table, for MAC rules");
24
25#define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \
26 (1 << NF_INET_FORWARD) | \
27 (1 << NF_INET_LOCAL_OUT)
28
29static struct
30{
31 struct ip6t_replace repl;
32 struct ip6t_standard entries[3];
33 struct ip6t_error term;
34} initial_table __initdata = {
35 .repl = {
36 .name = "security",
37 .valid_hooks = SECURITY_VALID_HOOKS,
38 .num_entries = 4,
39 .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
40 .hook_entry = {
41 [NF_INET_LOCAL_IN] = 0,
42 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
43 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
44 },
45 .underflow = {
46 [NF_INET_LOCAL_IN] = 0,
47 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
48 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
49 },
50 },
51 .entries = {
52 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
53 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
54 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
55 },
56 .term = IP6T_ERROR_INIT, /* ERROR */
57};
58
59static struct xt_table security_table = {
60 .name = "security",
61 .valid_hooks = SECURITY_VALID_HOOKS,
62 .lock = __RW_LOCK_UNLOCKED(security_table.lock),
63 .me = THIS_MODULE,
64 .af = AF_INET6,
65};
66
67static unsigned int
68ip6t_local_in_hook(unsigned int hook,
69 struct sk_buff *skb,
70 const struct net_device *in,
71 const struct net_device *out,
72 int (*okfn)(struct sk_buff *))
73{
74 return ip6t_do_table(skb, hook, in, out,
75 init_net.ipv6.ip6table_security);
76}
77
78static unsigned int
79ip6t_forward_hook(unsigned int hook,
80 struct sk_buff *skb,
81 const struct net_device *in,
82 const struct net_device *out,
83 int (*okfn)(struct sk_buff *))
84{
85 return ip6t_do_table(skb, hook, in, out,
86 init_net.ipv6.ip6table_security);
87}
88
89static unsigned int
90ip6t_local_out_hook(unsigned int hook,
91 struct sk_buff *skb,
92 const struct net_device *in,
93 const struct net_device *out,
94 int (*okfn)(struct sk_buff *))
95{
96 /* TBD: handle short packets via raw socket */
97 return ip6t_do_table(skb, hook, in, out,
98 init_net.ipv6.ip6table_security);
99}
100
101static struct nf_hook_ops ip6t_ops[] __read_mostly = {
102 {
103 .hook = ip6t_local_in_hook,
104 .owner = THIS_MODULE,
105 .pf = PF_INET6,
106 .hooknum = NF_INET_LOCAL_IN,
107 .priority = NF_IP6_PRI_SECURITY,
108 },
109 {
110 .hook = ip6t_forward_hook,
111 .owner = THIS_MODULE,
112 .pf = PF_INET6,
113 .hooknum = NF_INET_FORWARD,
114 .priority = NF_IP6_PRI_SECURITY,
115 },
116 {
117 .hook = ip6t_local_out_hook,
118 .owner = THIS_MODULE,
119 .pf = PF_INET6,
120 .hooknum = NF_INET_LOCAL_OUT,
121 .priority = NF_IP6_PRI_SECURITY,
122 },
123};
124
125static int __net_init ip6table_security_net_init(struct net *net)
126{
127 net->ipv6.ip6table_security =
128 ip6t_register_table(net, &security_table, &initial_table.repl);
129
130 if (IS_ERR(net->ipv6.ip6table_security))
131 return PTR_ERR(net->ipv6.ip6table_security);
132
133 return 0;
134}
135
136static void __net_exit ip6table_security_net_exit(struct net *net)
137{
138 ip6t_unregister_table(net->ipv6.ip6table_security);
139}
140
141static struct pernet_operations ip6table_security_net_ops = {
142 .init = ip6table_security_net_init,
143 .exit = ip6table_security_net_exit,
144};
145
146static int __init ip6table_security_init(void)
147{
148 int ret;
149
150 ret = register_pernet_subsys(&ip6table_security_net_ops);
151 if (ret < 0)
152 return ret;
153
154 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
155 if (ret < 0)
156 goto cleanup_table;
157
158 return ret;
159
160cleanup_table:
161 unregister_pernet_subsys(&ip6table_security_net_ops);
162 return ret;
163}
164
165static void __exit ip6table_security_fini(void)
166{
167 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
168 unregister_pernet_subsys(&ip6table_security_net_ops);
169}
170
171module_init(ip6table_security_init);
172module_exit(ip6table_security_fini);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index ee713b03e9ec..14d47d833545 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -89,9 +89,8 @@ static int icmpv6_packet(struct nf_conn *ct,
89 means this will only run once even if count hits zero twice 89 means this will only run once even if count hits zero twice
90 (theoretically possible with SMP) */ 90 (theoretically possible with SMP) */
91 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { 91 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
92 if (atomic_dec_and_test(&ct->proto.icmp.count) 92 if (atomic_dec_and_test(&ct->proto.icmp.count))
93 && del_timer(&ct->timeout)) 93 nf_ct_kill_acct(ct, ctinfo, skb);
94 ct->timeout.function((unsigned long)ct);
95 } else { 94 } else {
96 atomic_inc(&ct->proto.icmp.count); 95 atomic_inc(&ct->proto.icmp.count);
97 nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); 96 nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index df0736a4cafa..cbc7e514d3ec 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -7,8 +7,6 @@
7 * PROC file system. This is very similar to the IPv4 version, 7 * PROC file system. This is very similar to the IPv4 version,
8 * except it reports the sockets in the INET6 address family. 8 * except it reports the sockets in the INET6 address family.
9 * 9 *
10 * Version: $Id: proc.c,v 1.17 2002/02/01 22:01:04 davem Exp $
11 *
12 * Authors: David S. Miller (davem@caip.rutgers.edu) 10 * Authors: David S. Miller (davem@caip.rutgers.edu)
13 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> 11 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
14 * 12 *
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index f929f47b925e..9ab789159913 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * PF_INET6 protocol dispatch tables. 6 * PF_INET6 protocol dispatch tables.
7 * 7 *
8 * Version: $Id: protocol.c,v 1.10 2001/05/18 02:25:49 davem Exp $
9 *
10 * Authors: Pedro Roque <roque@di.fc.ul.pt> 8 * Authors: Pedro Roque <roque@di.fc.ul.pt>
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 3aee12310d94..70a57e45bf0e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -7,8 +7,6 @@
7 * 7 *
8 * Adapted from linux/net/ipv4/raw.c 8 * Adapted from linux/net/ipv4/raw.c
9 * 9 *
10 * $Id: raw.c,v 1.51 2002/02/01 22:01:04 davem Exp $
11 *
12 * Fixes: 10 * Fixes:
13 * Hideaki YOSHIFUJI : sin6_scope_id support 11 * Hideaki YOSHIFUJI : sin6_scope_id support
14 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance) 12 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 798cabc7535b..13509f906d89 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
9 *
10 * Based on: net/ipv4/ip_fragment.c 8 * Based on: net/ipv4/ip_fragment.c
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
@@ -632,7 +630,7 @@ static struct inet6_protocol frag_protocol =
632}; 630};
633 631
634#ifdef CONFIG_SYSCTL 632#ifdef CONFIG_SYSCTL
635static struct ctl_table ip6_frags_ctl_table[] = { 633static struct ctl_table ip6_frags_ns_ctl_table[] = {
636 { 634 {
637 .ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH, 635 .ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH,
638 .procname = "ip6frag_high_thresh", 636 .procname = "ip6frag_high_thresh",
@@ -658,6 +656,10 @@ static struct ctl_table ip6_frags_ctl_table[] = {
658 .proc_handler = &proc_dointvec_jiffies, 656 .proc_handler = &proc_dointvec_jiffies,
659 .strategy = &sysctl_jiffies, 657 .strategy = &sysctl_jiffies,
660 }, 658 },
659 { }
660};
661
662static struct ctl_table ip6_frags_ctl_table[] = {
661 { 663 {
662 .ctl_name = NET_IPV6_IP6FRAG_SECRET_INTERVAL, 664 .ctl_name = NET_IPV6_IP6FRAG_SECRET_INTERVAL,
663 .procname = "ip6frag_secret_interval", 665 .procname = "ip6frag_secret_interval",
@@ -670,21 +672,20 @@ static struct ctl_table ip6_frags_ctl_table[] = {
670 { } 672 { }
671}; 673};
672 674
673static int ip6_frags_sysctl_register(struct net *net) 675static int ip6_frags_ns_sysctl_register(struct net *net)
674{ 676{
675 struct ctl_table *table; 677 struct ctl_table *table;
676 struct ctl_table_header *hdr; 678 struct ctl_table_header *hdr;
677 679
678 table = ip6_frags_ctl_table; 680 table = ip6_frags_ns_ctl_table;
679 if (net != &init_net) { 681 if (net != &init_net) {
680 table = kmemdup(table, sizeof(ip6_frags_ctl_table), GFP_KERNEL); 682 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
681 if (table == NULL) 683 if (table == NULL)
682 goto err_alloc; 684 goto err_alloc;
683 685
684 table[0].data = &net->ipv6.frags.high_thresh; 686 table[0].data = &net->ipv6.frags.high_thresh;
685 table[1].data = &net->ipv6.frags.low_thresh; 687 table[1].data = &net->ipv6.frags.low_thresh;
686 table[2].data = &net->ipv6.frags.timeout; 688 table[2].data = &net->ipv6.frags.timeout;
687 table[3].mode &= ~0222;
688 } 689 }
689 690
690 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); 691 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
@@ -701,7 +702,7 @@ err_alloc:
701 return -ENOMEM; 702 return -ENOMEM;
702} 703}
703 704
704static void ip6_frags_sysctl_unregister(struct net *net) 705static void ip6_frags_ns_sysctl_unregister(struct net *net)
705{ 706{
706 struct ctl_table *table; 707 struct ctl_table *table;
707 708
@@ -709,13 +710,36 @@ static void ip6_frags_sysctl_unregister(struct net *net)
709 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); 710 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
710 kfree(table); 711 kfree(table);
711} 712}
713
714static struct ctl_table_header *ip6_ctl_header;
715
716static int ip6_frags_sysctl_register(void)
717{
718 ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path,
719 ip6_frags_ctl_table);
720 return ip6_ctl_header == NULL ? -ENOMEM : 0;
721}
722
723static void ip6_frags_sysctl_unregister(void)
724{
725 unregister_net_sysctl_table(ip6_ctl_header);
726}
712#else 727#else
713static inline int ip6_frags_sysctl_register(struct net *net) 728static inline int ip6_frags_ns_sysctl_register(struct net *net)
714{ 729{
715 return 0; 730 return 0;
716} 731}
717 732
718static inline void ip6_frags_sysctl_unregister(struct net *net) 733static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
734{
735}
736
737static inline int ip6_frags_sysctl_register(void)
738{
739 return 0;
740}
741
742static inline void ip6_frags_sysctl_unregister(void)
719{ 743{
720} 744}
721#endif 745#endif
@@ -728,12 +752,12 @@ static int ipv6_frags_init_net(struct net *net)
728 752
729 inet_frags_init_net(&net->ipv6.frags); 753 inet_frags_init_net(&net->ipv6.frags);
730 754
731 return ip6_frags_sysctl_register(net); 755 return ip6_frags_ns_sysctl_register(net);
732} 756}
733 757
734static void ipv6_frags_exit_net(struct net *net) 758static void ipv6_frags_exit_net(struct net *net)
735{ 759{
736 ip6_frags_sysctl_unregister(net); 760 ip6_frags_ns_sysctl_unregister(net);
737 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); 761 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
738} 762}
739 763
@@ -750,7 +774,13 @@ int __init ipv6_frag_init(void)
750 if (ret) 774 if (ret)
751 goto out; 775 goto out;
752 776
753 register_pernet_subsys(&ip6_frags_ops); 777 ret = ip6_frags_sysctl_register();
778 if (ret)
779 goto err_sysctl;
780
781 ret = register_pernet_subsys(&ip6_frags_ops);
782 if (ret)
783 goto err_pernet;
754 784
755 ip6_frags.hashfn = ip6_hashfn; 785 ip6_frags.hashfn = ip6_hashfn;
756 ip6_frags.constructor = ip6_frag_init; 786 ip6_frags.constructor = ip6_frag_init;
@@ -763,11 +793,18 @@ int __init ipv6_frag_init(void)
763 inet_frags_init(&ip6_frags); 793 inet_frags_init(&ip6_frags);
764out: 794out:
765 return ret; 795 return ret;
796
797err_pernet:
798 ip6_frags_sysctl_unregister();
799err_sysctl:
800 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
801 goto out;
766} 802}
767 803
768void ipv6_frag_exit(void) 804void ipv6_frag_exit(void)
769{ 805{
770 inet_frags_fini(&ip6_frags); 806 inet_frags_fini(&ip6_frags);
807 ip6_frags_sysctl_unregister();
771 unregister_pernet_subsys(&ip6_frags_ops); 808 unregister_pernet_subsys(&ip6_frags_ops);
772 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); 809 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
773} 810}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d1f3e19b06c7..efe036aa3dd1 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: route.c,v 1.56 2001/10/31 21:55:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 3de6ffdaedf2..b0c5080420a8 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -6,8 +6,6 @@
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * 8 *
9 * $Id: sit.c,v 1.53 2001/09/25 05:09:53 davem Exp $
10 *
11 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
@@ -491,13 +489,13 @@ static int ipip6_rcv(struct sk_buff *skb)
491 489
492 if ((tunnel->dev->priv_flags & IFF_ISATAP) && 490 if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
493 !isatap_chksrc(skb, iph, tunnel)) { 491 !isatap_chksrc(skb, iph, tunnel)) {
494 tunnel->stat.rx_errors++; 492 tunnel->dev->stats.rx_errors++;
495 read_unlock(&ipip6_lock); 493 read_unlock(&ipip6_lock);
496 kfree_skb(skb); 494 kfree_skb(skb);
497 return 0; 495 return 0;
498 } 496 }
499 tunnel->stat.rx_packets++; 497 tunnel->dev->stats.rx_packets++;
500 tunnel->stat.rx_bytes += skb->len; 498 tunnel->dev->stats.rx_bytes += skb->len;
501 skb->dev = tunnel->dev; 499 skb->dev = tunnel->dev;
502 dst_release(skb->dst); 500 dst_release(skb->dst);
503 skb->dst = NULL; 501 skb->dst = NULL;
@@ -537,7 +535,7 @@ static inline __be32 try_6to4(struct in6_addr *v6dst)
537static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 535static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
538{ 536{
539 struct ip_tunnel *tunnel = netdev_priv(dev); 537 struct ip_tunnel *tunnel = netdev_priv(dev);
540 struct net_device_stats *stats = &tunnel->stat; 538 struct net_device_stats *stats = &tunnel->dev->stats;
541 struct iphdr *tiph = &tunnel->parms.iph; 539 struct iphdr *tiph = &tunnel->parms.iph;
542 struct ipv6hdr *iph6 = ipv6_hdr(skb); 540 struct ipv6hdr *iph6 = ipv6_hdr(skb);
543 u8 tos = tunnel->parms.iph.tos; 541 u8 tos = tunnel->parms.iph.tos;
@@ -551,7 +549,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
551 int addr_type; 549 int addr_type;
552 550
553 if (tunnel->recursion++) { 551 if (tunnel->recursion++) {
554 tunnel->stat.collisions++; 552 stats->collisions++;
555 goto tx_error; 553 goto tx_error;
556 } 554 }
557 555
@@ -618,20 +616,20 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
618 .oif = tunnel->parms.link, 616 .oif = tunnel->parms.link,
619 .proto = IPPROTO_IPV6 }; 617 .proto = IPPROTO_IPV6 };
620 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 618 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
621 tunnel->stat.tx_carrier_errors++; 619 stats->tx_carrier_errors++;
622 goto tx_error_icmp; 620 goto tx_error_icmp;
623 } 621 }
624 } 622 }
625 if (rt->rt_type != RTN_UNICAST) { 623 if (rt->rt_type != RTN_UNICAST) {
626 ip_rt_put(rt); 624 ip_rt_put(rt);
627 tunnel->stat.tx_carrier_errors++; 625 stats->tx_carrier_errors++;
628 goto tx_error_icmp; 626 goto tx_error_icmp;
629 } 627 }
630 tdev = rt->u.dst.dev; 628 tdev = rt->u.dst.dev;
631 629
632 if (tdev == dev) { 630 if (tdev == dev) {
633 ip_rt_put(rt); 631 ip_rt_put(rt);
634 tunnel->stat.collisions++; 632 stats->collisions++;
635 goto tx_error; 633 goto tx_error;
636 } 634 }
637 635
@@ -641,7 +639,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
641 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; 639 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
642 640
643 if (mtu < 68) { 641 if (mtu < 68) {
644 tunnel->stat.collisions++; 642 stats->collisions++;
645 ip_rt_put(rt); 643 ip_rt_put(rt);
646 goto tx_error; 644 goto tx_error;
647 } 645 }
@@ -916,11 +914,6 @@ done:
916 return err; 914 return err;
917} 915}
918 916
919static struct net_device_stats *ipip6_tunnel_get_stats(struct net_device *dev)
920{
921 return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
922}
923
924static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) 917static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
925{ 918{
926 if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr)) 919 if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr))
@@ -934,7 +927,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
934 dev->uninit = ipip6_tunnel_uninit; 927 dev->uninit = ipip6_tunnel_uninit;
935 dev->destructor = free_netdev; 928 dev->destructor = free_netdev;
936 dev->hard_start_xmit = ipip6_tunnel_xmit; 929 dev->hard_start_xmit = ipip6_tunnel_xmit;
937 dev->get_stats = ipip6_tunnel_get_stats;
938 dev->do_ioctl = ipip6_tunnel_ioctl; 930 dev->do_ioctl = ipip6_tunnel_ioctl;
939 dev->change_mtu = ipip6_tunnel_change_mtu; 931 dev->change_mtu = ipip6_tunnel_change_mtu;
940 932
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 3804dcbbfab0..5c99274558bf 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -37,6 +37,10 @@ static ctl_table ipv6_table_template[] = {
37 .mode = 0644, 37 .mode = 0644,
38 .proc_handler = &proc_dointvec 38 .proc_handler = &proc_dointvec
39 }, 39 },
40 { .ctl_name = 0 }
41};
42
43static ctl_table ipv6_table[] = {
40 { 44 {
41 .ctl_name = NET_IPV6_MLD_MAX_MSF, 45 .ctl_name = NET_IPV6_MLD_MAX_MSF,
42 .procname = "mld_max_msf", 46 .procname = "mld_max_msf",
@@ -80,12 +84,6 @@ static int ipv6_sysctl_net_init(struct net *net)
80 84
81 ipv6_table[2].data = &net->ipv6.sysctl.bindv6only; 85 ipv6_table[2].data = &net->ipv6.sysctl.bindv6only;
82 86
83 /* We don't want this value to be per namespace, it should be global
84 to all namespaces, so make it read-only when we are not in the
85 init network namespace */
86 if (net != &init_net)
87 ipv6_table[3].mode = 0444;
88
89 net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path, 87 net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path,
90 ipv6_table); 88 ipv6_table);
91 if (!net->ipv6.sysctl.table) 89 if (!net->ipv6.sysctl.table)
@@ -126,12 +124,29 @@ static struct pernet_operations ipv6_sysctl_net_ops = {
126 .exit = ipv6_sysctl_net_exit, 124 .exit = ipv6_sysctl_net_exit,
127}; 125};
128 126
127static struct ctl_table_header *ip6_header;
128
129int ipv6_sysctl_register(void) 129int ipv6_sysctl_register(void)
130{ 130{
131 return register_pernet_subsys(&ipv6_sysctl_net_ops); 131 int err = -ENOMEM;;
132
133 ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table);
134 if (ip6_header == NULL)
135 goto out;
136
137 err = register_pernet_subsys(&ipv6_sysctl_net_ops);
138 if (err)
139 goto err_pernet;
140out:
141 return err;
142
143err_pernet:
144 unregister_net_sysctl_table(ip6_header);
145 goto out;
132} 146}
133 147
134void ipv6_sysctl_unregister(void) 148void ipv6_sysctl_unregister(void)
135{ 149{
150 unregister_net_sysctl_table(ip6_header);
136 unregister_pernet_subsys(&ipv6_sysctl_net_ops); 151 unregister_pernet_subsys(&ipv6_sysctl_net_ops);
137} 152}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index cb46749d4c32..ebed5d3adb82 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on: 8 * Based on:
11 * linux/net/ipv4/tcp.c 9 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c 10 * linux/net/ipv4/tcp_input.c
@@ -82,6 +80,12 @@ static struct inet_connection_sock_af_ops ipv6_specific;
82#ifdef CONFIG_TCP_MD5SIG 80#ifdef CONFIG_TCP_MD5SIG
83static struct tcp_sock_af_ops tcp_sock_ipv6_specific; 81static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; 82static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
83#else
84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
85 struct in6_addr *addr)
86{
87 return NULL;
88}
85#endif 89#endif
86 90
87static void tcp_v6_hash(struct sock *sk) 91static void tcp_v6_hash(struct sock *sk)
@@ -736,78 +740,34 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
736static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, 740static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
737 struct in6_addr *saddr, 741 struct in6_addr *saddr,
738 struct in6_addr *daddr, 742 struct in6_addr *daddr,
739 struct tcphdr *th, int protocol, 743 struct tcphdr *th, unsigned int tcplen)
740 unsigned int tcplen)
741{ 744{
742 struct scatterlist sg[4];
743 __u16 data_len;
744 int block = 0;
745 __sum16 cksum;
746 struct tcp_md5sig_pool *hp; 745 struct tcp_md5sig_pool *hp;
747 struct tcp6_pseudohdr *bp; 746 struct tcp6_pseudohdr *bp;
748 struct hash_desc *desc;
749 int err; 747 int err;
750 unsigned int nbytes = 0;
751 748
752 hp = tcp_get_md5sig_pool(); 749 hp = tcp_get_md5sig_pool();
753 if (!hp) { 750 if (!hp) {
754 printk(KERN_WARNING "%s(): hash pool not found...\n", __func__); 751 printk(KERN_WARNING "%s(): hash pool not found...\n", __func__);
755 goto clear_hash_noput; 752 goto clear_hash_noput;
756 } 753 }
754
757 bp = &hp->md5_blk.ip6; 755 bp = &hp->md5_blk.ip6;
758 desc = &hp->md5_desc;
759 756
760 /* 1. TCP pseudo-header (RFC2460) */ 757 /* 1. TCP pseudo-header (RFC2460) */
761 ipv6_addr_copy(&bp->saddr, saddr); 758 ipv6_addr_copy(&bp->saddr, saddr);
762 ipv6_addr_copy(&bp->daddr, daddr); 759 ipv6_addr_copy(&bp->daddr, daddr);
763 bp->len = htonl(tcplen); 760 bp->len = htonl(tcplen);
764 bp->protocol = htonl(protocol); 761 bp->protocol = htonl(IPPROTO_TCP);
765
766 sg_init_table(sg, 4);
767 762
768 sg_set_buf(&sg[block++], bp, sizeof(*bp)); 763 err = tcp_calc_md5_hash(md5_hash, key, sizeof(*bp),
769 nbytes += sizeof(*bp); 764 th, tcplen, hp);
770 765
771 /* 2. TCP header, excluding options */ 766 if (err)
772 cksum = th->check;
773 th->check = 0;
774 sg_set_buf(&sg[block++], th, sizeof(*th));
775 nbytes += sizeof(*th);
776
777 /* 3. TCP segment data (if any) */
778 data_len = tcplen - (th->doff << 2);
779 if (data_len > 0) {
780 u8 *data = (u8 *)th + (th->doff << 2);
781 sg_set_buf(&sg[block++], data, data_len);
782 nbytes += data_len;
783 }
784
785 /* 4. shared key */
786 sg_set_buf(&sg[block++], key->key, key->keylen);
787 nbytes += key->keylen;
788
789 sg_mark_end(&sg[block - 1]);
790
791 /* Now store the hash into the packet */
792 err = crypto_hash_init(desc);
793 if (err) {
794 printk(KERN_WARNING "%s(): hash_init failed\n", __func__);
795 goto clear_hash;
796 }
797 err = crypto_hash_update(desc, sg, nbytes);
798 if (err) {
799 printk(KERN_WARNING "%s(): hash_update failed\n", __func__);
800 goto clear_hash;
801 }
802 err = crypto_hash_final(desc, md5_hash);
803 if (err) {
804 printk(KERN_WARNING "%s(): hash_final failed\n", __func__);
805 goto clear_hash; 767 goto clear_hash;
806 }
807 768
808 /* Reset header, and free up the crypto */ 769 /* Free up the crypto pool */
809 tcp_put_md5sig_pool(); 770 tcp_put_md5sig_pool();
810 th->check = cksum;
811out: 771out:
812 return 0; 772 return 0;
813clear_hash: 773clear_hash:
@@ -821,8 +781,7 @@ static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
821 struct sock *sk, 781 struct sock *sk,
822 struct dst_entry *dst, 782 struct dst_entry *dst,
823 struct request_sock *req, 783 struct request_sock *req,
824 struct tcphdr *th, int protocol, 784 struct tcphdr *th, unsigned int tcplen)
825 unsigned int tcplen)
826{ 785{
827 struct in6_addr *saddr, *daddr; 786 struct in6_addr *saddr, *daddr;
828 787
@@ -835,7 +794,7 @@ static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
835 } 794 }
836 return tcp_v6_do_calc_md5_hash(md5_hash, key, 795 return tcp_v6_do_calc_md5_hash(md5_hash, key,
837 saddr, daddr, 796 saddr, daddr,
838 th, protocol, tcplen); 797 th, tcplen);
839} 798}
840 799
841static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) 800static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
@@ -844,43 +803,12 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
844 struct tcp_md5sig_key *hash_expected; 803 struct tcp_md5sig_key *hash_expected;
845 struct ipv6hdr *ip6h = ipv6_hdr(skb); 804 struct ipv6hdr *ip6h = ipv6_hdr(skb);
846 struct tcphdr *th = tcp_hdr(skb); 805 struct tcphdr *th = tcp_hdr(skb);
847 int length = (th->doff << 2) - sizeof (*th);
848 int genhash; 806 int genhash;
849 u8 *ptr;
850 u8 newhash[16]; 807 u8 newhash[16];
851 808
852 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); 809 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
810 hash_location = tcp_parse_md5sig_option(th);
853 811
854 /* If the TCP option is too short, we can short cut */
855 if (length < TCPOLEN_MD5SIG)
856 return hash_expected ? 1 : 0;
857
858 /* parse options */
859 ptr = (u8*)(th + 1);
860 while (length > 0) {
861 int opcode = *ptr++;
862 int opsize;
863
864 switch(opcode) {
865 case TCPOPT_EOL:
866 goto done_opts;
867 case TCPOPT_NOP:
868 length--;
869 continue;
870 default:
871 opsize = *ptr++;
872 if (opsize < 2 || opsize > length)
873 goto done_opts;
874 if (opcode == TCPOPT_MD5SIG) {
875 hash_location = ptr;
876 goto done_opts;
877 }
878 }
879 ptr += opsize - 2;
880 length -= opsize;
881 }
882
883done_opts:
884 /* do we have a hash as expected? */ 812 /* do we have a hash as expected? */
885 if (!hash_expected) { 813 if (!hash_expected) {
886 if (!hash_location) 814 if (!hash_location)
@@ -910,8 +838,7 @@ done_opts:
910 genhash = tcp_v6_do_calc_md5_hash(newhash, 838 genhash = tcp_v6_do_calc_md5_hash(newhash,
911 hash_expected, 839 hash_expected,
912 &ip6h->saddr, &ip6h->daddr, 840 &ip6h->saddr, &ip6h->daddr,
913 th, sk->sk_protocol, 841 th, skb->len);
914 skb->len);
915 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 842 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
916 if (net_ratelimit()) { 843 if (net_ratelimit()) {
917 printk(KERN_INFO "MD5 Hash %s for " 844 printk(KERN_INFO "MD5 Hash %s for "
@@ -1051,7 +978,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1051 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key, 978 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key,
1052 &ipv6_hdr(skb)->daddr, 979 &ipv6_hdr(skb)->daddr,
1053 &ipv6_hdr(skb)->saddr, 980 &ipv6_hdr(skb)->saddr,
1054 t1, IPPROTO_TCP, tot_len); 981 t1, tot_len);
1055 } 982 }
1056#endif 983#endif
1057 984
@@ -1088,8 +1015,8 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1088 kfree_skb(buff); 1015 kfree_skb(buff);
1089} 1016}
1090 1017
1091static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, 1018static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1092 struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) 1019 struct tcp_md5sig_key *key)
1093{ 1020{
1094 struct tcphdr *th = tcp_hdr(skb), *t1; 1021 struct tcphdr *th = tcp_hdr(skb), *t1;
1095 struct sk_buff *buff; 1022 struct sk_buff *buff;
@@ -1098,22 +1025,6 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1098 struct sock *ctl_sk = net->ipv6.tcp_sk; 1025 struct sock *ctl_sk = net->ipv6.tcp_sk;
1099 unsigned int tot_len = sizeof(struct tcphdr); 1026 unsigned int tot_len = sizeof(struct tcphdr);
1100 __be32 *topt; 1027 __be32 *topt;
1101#ifdef CONFIG_TCP_MD5SIG
1102 struct tcp_md5sig_key *key;
1103 struct tcp_md5sig_key tw_key;
1104#endif
1105
1106#ifdef CONFIG_TCP_MD5SIG
1107 if (!tw && skb->sk) {
1108 key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr);
1109 } else if (tw && tw->tw_md5_keylen) {
1110 tw_key.key = tw->tw_md5_key;
1111 tw_key.keylen = tw->tw_md5_keylen;
1112 key = &tw_key;
1113 } else {
1114 key = NULL;
1115 }
1116#endif
1117 1028
1118 if (ts) 1029 if (ts)
1119 tot_len += TCPOLEN_TSTAMP_ALIGNED; 1030 tot_len += TCPOLEN_TSTAMP_ALIGNED;
@@ -1157,7 +1068,7 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1157 tcp_v6_do_calc_md5_hash((__u8 *)topt, key, 1068 tcp_v6_do_calc_md5_hash((__u8 *)topt, key,
1158 &ipv6_hdr(skb)->daddr, 1069 &ipv6_hdr(skb)->daddr,
1159 &ipv6_hdr(skb)->saddr, 1070 &ipv6_hdr(skb)->saddr,
1160 t1, IPPROTO_TCP, tot_len); 1071 t1, tot_len);
1161 } 1072 }
1162#endif 1073#endif
1163 1074
@@ -1193,16 +1104,17 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1193 struct inet_timewait_sock *tw = inet_twsk(sk); 1104 struct inet_timewait_sock *tw = inet_twsk(sk);
1194 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 1105 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1195 1106
1196 tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 1107 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1197 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 1108 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1198 tcptw->tw_ts_recent); 1109 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1199 1110
1200 inet_twsk_put(tw); 1111 inet_twsk_put(tw);
1201} 1112}
1202 1113
1203static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) 1114static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1204{ 1115{
1205 tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); 1116 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1117 tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr));
1206} 1118}
1207 1119
1208 1120
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index dd309626ae9a..e0693fffc9bd 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -7,8 +7,6 @@
7 * 7 *
8 * Based on linux/ipv4/udp.c 8 * Based on linux/ipv4/udp.c
9 * 9 *
10 * $Id: udp.c,v 1.65 2002/02/01 22:01:04 davem Exp $
11 *
12 * Fixes: 10 * Fixes:
13 * Hideaki YOSHIFUJI : sin6_scope_id support 11 * Hideaki YOSHIFUJI : sin6_scope_id support
14 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 12 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 491efd00a866..f6cdcb348e05 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -2,8 +2,6 @@
2 * UDPLITEv6 An implementation of the UDP-Lite protocol over IPv6. 2 * UDPLITEv6 An implementation of the UDP-Lite protocol over IPv6.
3 * See also net/ipv4/udplite.c 3 * See also net/ipv4/udplite.c
4 * 4 *
5 * Version: $Id: udplite.c,v 1.9 2006/10/19 08:28:10 gerrit Exp $
6 *
7 * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk> 5 * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk>
8 * 6 *
9 * Changes: 7 * Changes:
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index e0eab5927c4f..f6e54fa97f47 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -628,8 +628,8 @@ dev_irnet_poll(struct file * file,
628 * This is the way pppd configure us and control us while the PPP 628 * This is the way pppd configure us and control us while the PPP
629 * instance is active. 629 * instance is active.
630 */ 630 */
631static int 631static long
632dev_irnet_ioctl(struct inode * inode, 632dev_irnet_ioctl(
633 struct file * file, 633 struct file * file,
634 unsigned int cmd, 634 unsigned int cmd,
635 unsigned long arg) 635 unsigned long arg)
@@ -660,6 +660,7 @@ dev_irnet_ioctl(struct inode * inode,
660 { 660 {
661 DEBUG(FS_INFO, "Entering PPP discipline.\n"); 661 DEBUG(FS_INFO, "Entering PPP discipline.\n");
662 /* PPP channel setup (ap->chan in configued in dev_irnet_open())*/ 662 /* PPP channel setup (ap->chan in configued in dev_irnet_open())*/
663 lock_kernel();
663 err = ppp_register_channel(&ap->chan); 664 err = ppp_register_channel(&ap->chan);
664 if(err == 0) 665 if(err == 0)
665 { 666 {
@@ -672,12 +673,14 @@ dev_irnet_ioctl(struct inode * inode,
672 } 673 }
673 else 674 else
674 DERROR(FS_ERROR, "Can't setup PPP channel...\n"); 675 DERROR(FS_ERROR, "Can't setup PPP channel...\n");
676 unlock_kernel();
675 } 677 }
676 else 678 else
677 { 679 {
678 /* In theory, should be N_TTY */ 680 /* In theory, should be N_TTY */
679 DEBUG(FS_INFO, "Exiting PPP discipline.\n"); 681 DEBUG(FS_INFO, "Exiting PPP discipline.\n");
680 /* Disconnect from the generic PPP layer */ 682 /* Disconnect from the generic PPP layer */
683 lock_kernel();
681 if(ap->ppp_open) 684 if(ap->ppp_open)
682 { 685 {
683 ap->ppp_open = 0; 686 ap->ppp_open = 0;
@@ -686,24 +689,20 @@ dev_irnet_ioctl(struct inode * inode,
686 else 689 else
687 DERROR(FS_ERROR, "Channel not registered !\n"); 690 DERROR(FS_ERROR, "Channel not registered !\n");
688 err = 0; 691 err = 0;
692 unlock_kernel();
689 } 693 }
690 break; 694 break;
691 695
692 /* Query PPP channel and unit number */ 696 /* Query PPP channel and unit number */
693 case PPPIOCGCHAN: 697 case PPPIOCGCHAN:
694 if(!ap->ppp_open) 698 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan),
695 break; 699 (int __user *)argp))
696 if(put_user(ppp_channel_index(&ap->chan), (int __user *)argp)) 700 err = 0;
697 break;
698 DEBUG(FS_INFO, "Query channel.\n");
699 err = 0;
700 break; 701 break;
701 case PPPIOCGUNIT: 702 case PPPIOCGUNIT:
702 if(!ap->ppp_open) 703 lock_kernel();
703 break; 704 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan),
704 if(put_user(ppp_unit_number(&ap->chan), (int __user *)argp)) 705 (int __user *)argp))
705 break;
706 DEBUG(FS_INFO, "Query unit number.\n");
707 err = 0; 706 err = 0;
708 break; 707 break;
709 708
@@ -723,34 +722,39 @@ dev_irnet_ioctl(struct inode * inode,
723 DEBUG(FS_INFO, "Standard PPP ioctl.\n"); 722 DEBUG(FS_INFO, "Standard PPP ioctl.\n");
724 if(!capable(CAP_NET_ADMIN)) 723 if(!capable(CAP_NET_ADMIN))
725 err = -EPERM; 724 err = -EPERM;
726 else 725 else {
726 lock_kernel();
727 err = ppp_irnet_ioctl(&ap->chan, cmd, arg); 727 err = ppp_irnet_ioctl(&ap->chan, cmd, arg);
728 unlock_kernel();
729 }
728 break; 730 break;
729 731
730 /* TTY IOCTLs : Pretend that we are a tty, to keep pppd happy */ 732 /* TTY IOCTLs : Pretend that we are a tty, to keep pppd happy */
731 /* Get termios */ 733 /* Get termios */
732 case TCGETS: 734 case TCGETS:
733 DEBUG(FS_INFO, "Get termios.\n"); 735 DEBUG(FS_INFO, "Get termios.\n");
736 lock_kernel();
734#ifndef TCGETS2 737#ifndef TCGETS2
735 if(kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios)) 738 if(!kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios))
736 break; 739 err = 0;
737#else 740#else
738 if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios)) 741 if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios))
739 break; 742 err = 0;
740#endif 743#endif
741 err = 0; 744 unlock_kernel();
742 break; 745 break;
743 /* Set termios */ 746 /* Set termios */
744 case TCSETSF: 747 case TCSETSF:
745 DEBUG(FS_INFO, "Set termios.\n"); 748 DEBUG(FS_INFO, "Set termios.\n");
749 lock_kernel();
746#ifndef TCGETS2 750#ifndef TCGETS2
747 if(user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp)) 751 if(!user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp))
748 break; 752 err = 0;
749#else 753#else
750 if(user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp)) 754 if(!user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp))
751 break; 755 err = 0;
752#endif 756#endif
753 err = 0; 757 unlock_kernel();
754 break; 758 break;
755 759
756 /* Set DTR/RTS */ 760 /* Set DTR/RTS */
@@ -773,7 +777,9 @@ dev_irnet_ioctl(struct inode * inode,
773 * We should also worry that we don't accept junk here and that 777 * We should also worry that we don't accept junk here and that
774 * we get rid of our own buffers */ 778 * we get rid of our own buffers */
775#ifdef FLUSH_TO_PPP 779#ifdef FLUSH_TO_PPP
780 lock_kernel();
776 ppp_output_wakeup(&ap->chan); 781 ppp_output_wakeup(&ap->chan);
782 unlock_kernel();
777#endif /* FLUSH_TO_PPP */ 783#endif /* FLUSH_TO_PPP */
778 err = 0; 784 err = 0;
779 break; 785 break;
@@ -788,7 +794,7 @@ dev_irnet_ioctl(struct inode * inode,
788 794
789 default: 795 default:
790 DERROR(FS_ERROR, "Unsupported ioctl (0x%X)\n", cmd); 796 DERROR(FS_ERROR, "Unsupported ioctl (0x%X)\n", cmd);
791 err = -ENOIOCTLCMD; 797 err = -ENOTTY;
792 } 798 }
793 799
794 DEXIT(FS_TRACE, " - err = 0x%X\n", err); 800 DEXIT(FS_TRACE, " - err = 0x%X\n", err);
diff --git a/net/irda/irnet/irnet_ppp.h b/net/irda/irnet/irnet_ppp.h
index d2beb7df8f7f..d9f8bd4ebd05 100644
--- a/net/irda/irnet/irnet_ppp.h
+++ b/net/irda/irnet/irnet_ppp.h
@@ -76,9 +76,8 @@ static ssize_t
76static unsigned int 76static unsigned int
77 dev_irnet_poll(struct file *, 77 dev_irnet_poll(struct file *,
78 poll_table *); 78 poll_table *);
79static int 79static long
80 dev_irnet_ioctl(struct inode *, 80 dev_irnet_ioctl(struct file *,
81 struct file *,
82 unsigned int, 81 unsigned int,
83 unsigned long); 82 unsigned long);
84/* ------------------------ PPP INTERFACE ------------------------ */ 83/* ------------------------ PPP INTERFACE ------------------------ */
@@ -102,7 +101,7 @@ static struct file_operations irnet_device_fops =
102 .read = dev_irnet_read, 101 .read = dev_irnet_read,
103 .write = dev_irnet_write, 102 .write = dev_irnet_write,
104 .poll = dev_irnet_poll, 103 .poll = dev_irnet_poll,
105 .ioctl = dev_irnet_ioctl, 104 .unlocked_ioctl = dev_irnet_ioctl,
106 .open = dev_irnet_open, 105 .open = dev_irnet_open,
107 .release = dev_irnet_close 106 .release = dev_irnet_close
108 /* Also : llseek, readdir, mmap, flush, fsync, fasync, lock, readv, writev */ 107 /* Also : llseek, readdir, mmap, flush, fsync, fasync, lock, readv, writev */
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 7b0038f45b16..58e4aee3e696 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -644,6 +644,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
644 } 644 }
645 645
646 txmsg.class = 0; 646 txmsg.class = 0;
647 memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
647 txmsg.tag = iucv->send_tag++; 648 txmsg.tag = iucv->send_tag++;
648 memcpy(skb->cb, &txmsg.tag, 4); 649 memcpy(skb->cb, &txmsg.tag, 4);
649 skb_queue_tail(&iucv->send_skb_q, skb); 650 skb_queue_tail(&iucv->send_skb_q, skb);
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 918970762131..531a206ce7a6 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -474,14 +474,14 @@ static void iucv_setmask_mp(void)
474{ 474{
475 int cpu; 475 int cpu;
476 476
477 preempt_disable(); 477 get_online_cpus();
478 for_each_online_cpu(cpu) 478 for_each_online_cpu(cpu)
479 /* Enable all cpus with a declared buffer. */ 479 /* Enable all cpus with a declared buffer. */
480 if (cpu_isset(cpu, iucv_buffer_cpumask) && 480 if (cpu_isset(cpu, iucv_buffer_cpumask) &&
481 !cpu_isset(cpu, iucv_irq_cpumask)) 481 !cpu_isset(cpu, iucv_irq_cpumask))
482 smp_call_function_single(cpu, iucv_allow_cpu, 482 smp_call_function_single(cpu, iucv_allow_cpu,
483 NULL, 0, 1); 483 NULL, 0, 1);
484 preempt_enable(); 484 put_online_cpus();
485} 485}
486 486
487/** 487/**
@@ -521,16 +521,17 @@ static int iucv_enable(void)
521 goto out; 521 goto out;
522 /* Declare per cpu buffers. */ 522 /* Declare per cpu buffers. */
523 rc = -EIO; 523 rc = -EIO;
524 preempt_disable(); 524 get_online_cpus();
525 for_each_online_cpu(cpu) 525 for_each_online_cpu(cpu)
526 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); 526 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
527 preempt_enable();
528 if (cpus_empty(iucv_buffer_cpumask)) 527 if (cpus_empty(iucv_buffer_cpumask))
529 /* No cpu could declare an iucv buffer. */ 528 /* No cpu could declare an iucv buffer. */
530 goto out_path; 529 goto out_path;
530 put_online_cpus();
531 return 0; 531 return 0;
532 532
533out_path: 533out_path:
534 put_online_cpus();
534 kfree(iucv_path_table); 535 kfree(iucv_path_table);
535out: 536out:
536 return rc; 537 return rc;
@@ -545,7 +546,9 @@ out:
545 */ 546 */
546static void iucv_disable(void) 547static void iucv_disable(void)
547{ 548{
549 get_online_cpus();
548 on_each_cpu(iucv_retrieve_cpu, NULL, 0, 1); 550 on_each_cpu(iucv_retrieve_cpu, NULL, 0, 1);
551 put_online_cpus();
549 kfree(iucv_path_table); 552 kfree(iucv_path_table);
550} 553}
551 554
@@ -598,7 +601,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
598 return NOTIFY_OK; 601 return NOTIFY_OK;
599} 602}
600 603
601static struct notifier_block __cpuinitdata iucv_cpu_notifier = { 604static struct notifier_block __refdata iucv_cpu_notifier = {
602 .notifier_call = iucv_cpu_notify, 605 .notifier_call = iucv_cpu_notify,
603}; 606};
604 607
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 7470e367272b..f0fc46c8038d 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -579,25 +579,43 @@ static uint8_t pfkey_proto_from_xfrm(uint8_t proto)
579 return (proto ? proto : IPSEC_PROTO_ANY); 579 return (proto ? proto : IPSEC_PROTO_ANY);
580} 580}
581 581
582static int pfkey_sadb_addr2xfrm_addr(struct sadb_address *addr, 582static inline int pfkey_sockaddr_len(sa_family_t family)
583 xfrm_address_t *xaddr)
584{ 583{
585 switch (((struct sockaddr*)(addr + 1))->sa_family) { 584 switch (family) {
585 case AF_INET:
586 return sizeof(struct sockaddr_in);
587#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
588 case AF_INET6:
589 return sizeof(struct sockaddr_in6);
590#endif
591 }
592 return 0;
593}
594
595static
596int pfkey_sockaddr_extract(const struct sockaddr *sa, xfrm_address_t *xaddr)
597{
598 switch (sa->sa_family) {
586 case AF_INET: 599 case AF_INET:
587 xaddr->a4 = 600 xaddr->a4 =
588 ((struct sockaddr_in *)(addr + 1))->sin_addr.s_addr; 601 ((struct sockaddr_in *)sa)->sin_addr.s_addr;
589 return AF_INET; 602 return AF_INET;
590#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 603#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
591 case AF_INET6: 604 case AF_INET6:
592 memcpy(xaddr->a6, 605 memcpy(xaddr->a6,
593 &((struct sockaddr_in6 *)(addr + 1))->sin6_addr, 606 &((struct sockaddr_in6 *)sa)->sin6_addr,
594 sizeof(struct in6_addr)); 607 sizeof(struct in6_addr));
595 return AF_INET6; 608 return AF_INET6;
596#endif 609#endif
597 default:
598 return 0;
599 } 610 }
600 /* NOTREACHED */ 611 return 0;
612}
613
614static
615int pfkey_sadb_addr2xfrm_addr(struct sadb_address *addr, xfrm_address_t *xaddr)
616{
617 return pfkey_sockaddr_extract((struct sockaddr *)(addr + 1),
618 xaddr);
601} 619}
602 620
603static struct xfrm_state *pfkey_xfrm_state_lookup(struct sadb_msg *hdr, void **ext_hdrs) 621static struct xfrm_state *pfkey_xfrm_state_lookup(struct sadb_msg *hdr, void **ext_hdrs)
@@ -642,20 +660,11 @@ static struct xfrm_state *pfkey_xfrm_state_lookup(struct sadb_msg *hdr, void **
642} 660}
643 661
644#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1))) 662#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1)))
663
645static int 664static int
646pfkey_sockaddr_size(sa_family_t family) 665pfkey_sockaddr_size(sa_family_t family)
647{ 666{
648 switch (family) { 667 return PFKEY_ALIGN8(pfkey_sockaddr_len(family));
649 case AF_INET:
650 return PFKEY_ALIGN8(sizeof(struct sockaddr_in));
651#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
652 case AF_INET6:
653 return PFKEY_ALIGN8(sizeof(struct sockaddr_in6));
654#endif
655 default:
656 return 0;
657 }
658 /* NOTREACHED */
659} 668}
660 669
661static inline int pfkey_mode_from_xfrm(int mode) 670static inline int pfkey_mode_from_xfrm(int mode)
@@ -687,6 +696,36 @@ static inline int pfkey_mode_to_xfrm(int mode)
687 } 696 }
688} 697}
689 698
699static unsigned int pfkey_sockaddr_fill(xfrm_address_t *xaddr, __be16 port,
700 struct sockaddr *sa,
701 unsigned short family)
702{
703 switch (family) {
704 case AF_INET:
705 {
706 struct sockaddr_in *sin = (struct sockaddr_in *)sa;
707 sin->sin_family = AF_INET;
708 sin->sin_port = port;
709 sin->sin_addr.s_addr = xaddr->a4;
710 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
711 return 32;
712 }
713#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
714 case AF_INET6:
715 {
716 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
717 sin6->sin6_family = AF_INET6;
718 sin6->sin6_port = port;
719 sin6->sin6_flowinfo = 0;
720 ipv6_addr_copy(&sin6->sin6_addr, (struct in6_addr *)xaddr->a6);
721 sin6->sin6_scope_id = 0;
722 return 128;
723 }
724#endif
725 }
726 return 0;
727}
728
690static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x, 729static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
691 int add_keys, int hsc) 730 int add_keys, int hsc)
692{ 731{
@@ -697,13 +736,9 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
697 struct sadb_address *addr; 736 struct sadb_address *addr;
698 struct sadb_key *key; 737 struct sadb_key *key;
699 struct sadb_x_sa2 *sa2; 738 struct sadb_x_sa2 *sa2;
700 struct sockaddr_in *sin;
701 struct sadb_x_sec_ctx *sec_ctx; 739 struct sadb_x_sec_ctx *sec_ctx;
702 struct xfrm_sec_ctx *xfrm_ctx; 740 struct xfrm_sec_ctx *xfrm_ctx;
703 int ctx_size = 0; 741 int ctx_size = 0;
704#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
705 struct sockaddr_in6 *sin6;
706#endif
707 int size; 742 int size;
708 int auth_key_size = 0; 743 int auth_key_size = 0;
709 int encrypt_key_size = 0; 744 int encrypt_key_size = 0;
@@ -732,14 +767,7 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
732 } 767 }
733 768
734 /* identity & sensitivity */ 769 /* identity & sensitivity */
735 770 if (xfrm_addr_cmp(&x->sel.saddr, &x->props.saddr, x->props.family))
736 if ((x->props.family == AF_INET &&
737 x->sel.saddr.a4 != x->props.saddr.a4)
738#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
739 || (x->props.family == AF_INET6 &&
740 memcmp (x->sel.saddr.a6, x->props.saddr.a6, sizeof (struct in6_addr)))
741#endif
742 )
743 size += sizeof(struct sadb_address) + sockaddr_size; 771 size += sizeof(struct sadb_address) + sockaddr_size;
744 772
745 if (add_keys) { 773 if (add_keys) {
@@ -861,29 +889,12 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
861 protocol's number." - RFC2367 */ 889 protocol's number." - RFC2367 */
862 addr->sadb_address_proto = 0; 890 addr->sadb_address_proto = 0;
863 addr->sadb_address_reserved = 0; 891 addr->sadb_address_reserved = 0;
864 if (x->props.family == AF_INET) {
865 addr->sadb_address_prefixlen = 32;
866 892
867 sin = (struct sockaddr_in *) (addr + 1); 893 addr->sadb_address_prefixlen =
868 sin->sin_family = AF_INET; 894 pfkey_sockaddr_fill(&x->props.saddr, 0,
869 sin->sin_addr.s_addr = x->props.saddr.a4; 895 (struct sockaddr *) (addr + 1),
870 sin->sin_port = 0; 896 x->props.family);
871 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 897 if (!addr->sadb_address_prefixlen)
872 }
873#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
874 else if (x->props.family == AF_INET6) {
875 addr->sadb_address_prefixlen = 128;
876
877 sin6 = (struct sockaddr_in6 *) (addr + 1);
878 sin6->sin6_family = AF_INET6;
879 sin6->sin6_port = 0;
880 sin6->sin6_flowinfo = 0;
881 memcpy(&sin6->sin6_addr, x->props.saddr.a6,
882 sizeof(struct in6_addr));
883 sin6->sin6_scope_id = 0;
884 }
885#endif
886 else
887 BUG(); 898 BUG();
888 899
889 /* dst address */ 900 /* dst address */
@@ -894,70 +905,32 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
894 sizeof(uint64_t); 905 sizeof(uint64_t);
895 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; 906 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
896 addr->sadb_address_proto = 0; 907 addr->sadb_address_proto = 0;
897 addr->sadb_address_prefixlen = 32; /* XXX */
898 addr->sadb_address_reserved = 0; 908 addr->sadb_address_reserved = 0;
899 if (x->props.family == AF_INET) {
900 sin = (struct sockaddr_in *) (addr + 1);
901 sin->sin_family = AF_INET;
902 sin->sin_addr.s_addr = x->id.daddr.a4;
903 sin->sin_port = 0;
904 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
905 909
906 if (x->sel.saddr.a4 != x->props.saddr.a4) { 910 addr->sadb_address_prefixlen =
907 addr = (struct sadb_address*) skb_put(skb, 911 pfkey_sockaddr_fill(&x->id.daddr, 0,
908 sizeof(struct sadb_address)+sockaddr_size); 912 (struct sockaddr *) (addr + 1),
909 addr->sadb_address_len = 913 x->props.family);
910 (sizeof(struct sadb_address)+sockaddr_size)/ 914 if (!addr->sadb_address_prefixlen)
911 sizeof(uint64_t); 915 BUG();
912 addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY;
913 addr->sadb_address_proto =
914 pfkey_proto_from_xfrm(x->sel.proto);
915 addr->sadb_address_prefixlen = x->sel.prefixlen_s;
916 addr->sadb_address_reserved = 0;
917
918 sin = (struct sockaddr_in *) (addr + 1);
919 sin->sin_family = AF_INET;
920 sin->sin_addr.s_addr = x->sel.saddr.a4;
921 sin->sin_port = x->sel.sport;
922 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
923 }
924 }
925#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
926 else if (x->props.family == AF_INET6) {
927 addr->sadb_address_prefixlen = 128;
928 916
929 sin6 = (struct sockaddr_in6 *) (addr + 1); 917 if (xfrm_addr_cmp(&x->sel.saddr, &x->props.saddr,
930 sin6->sin6_family = AF_INET6; 918 x->props.family)) {
931 sin6->sin6_port = 0; 919 addr = (struct sadb_address*) skb_put(skb,
932 sin6->sin6_flowinfo = 0; 920 sizeof(struct sadb_address)+sockaddr_size);
933 memcpy(&sin6->sin6_addr, x->id.daddr.a6, sizeof(struct in6_addr)); 921 addr->sadb_address_len =
934 sin6->sin6_scope_id = 0; 922 (sizeof(struct sadb_address)+sockaddr_size)/
923 sizeof(uint64_t);
924 addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY;
925 addr->sadb_address_proto =
926 pfkey_proto_from_xfrm(x->sel.proto);
927 addr->sadb_address_prefixlen = x->sel.prefixlen_s;
928 addr->sadb_address_reserved = 0;
935 929
936 if (memcmp (x->sel.saddr.a6, x->props.saddr.a6, 930 pfkey_sockaddr_fill(&x->sel.saddr, x->sel.sport,
937 sizeof(struct in6_addr))) { 931 (struct sockaddr *) (addr + 1),
938 addr = (struct sadb_address *) skb_put(skb, 932 x->props.family);
939 sizeof(struct sadb_address)+sockaddr_size);
940 addr->sadb_address_len =
941 (sizeof(struct sadb_address)+sockaddr_size)/
942 sizeof(uint64_t);
943 addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY;
944 addr->sadb_address_proto =
945 pfkey_proto_from_xfrm(x->sel.proto);
946 addr->sadb_address_prefixlen = x->sel.prefixlen_s;
947 addr->sadb_address_reserved = 0;
948
949 sin6 = (struct sockaddr_in6 *) (addr + 1);
950 sin6->sin6_family = AF_INET6;
951 sin6->sin6_port = x->sel.sport;
952 sin6->sin6_flowinfo = 0;
953 memcpy(&sin6->sin6_addr, x->sel.saddr.a6,
954 sizeof(struct in6_addr));
955 sin6->sin6_scope_id = 0;
956 }
957 } 933 }
958#endif
959 else
960 BUG();
961 934
962 /* auth key */ 935 /* auth key */
963 if (add_keys && auth_key_size) { 936 if (add_keys && auth_key_size) {
@@ -1853,10 +1826,6 @@ static int
1853parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) 1826parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
1854{ 1827{
1855 struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr; 1828 struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr;
1856 struct sockaddr_in *sin;
1857#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1858 struct sockaddr_in6 *sin6;
1859#endif
1860 int mode; 1829 int mode;
1861 1830
1862 if (xp->xfrm_nr >= XFRM_MAX_DEPTH) 1831 if (xp->xfrm_nr >= XFRM_MAX_DEPTH)
@@ -1881,31 +1850,19 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
1881 1850
1882 /* addresses present only in tunnel mode */ 1851 /* addresses present only in tunnel mode */
1883 if (t->mode == XFRM_MODE_TUNNEL) { 1852 if (t->mode == XFRM_MODE_TUNNEL) {
1884 struct sockaddr *sa; 1853 u8 *sa = (u8 *) (rq + 1);
1885 sa = (struct sockaddr *)(rq+1); 1854 int family, socklen;
1886 switch(sa->sa_family) { 1855
1887 case AF_INET: 1856 family = pfkey_sockaddr_extract((struct sockaddr *)sa,
1888 sin = (struct sockaddr_in*)sa; 1857 &t->saddr);
1889 t->saddr.a4 = sin->sin_addr.s_addr; 1858 if (!family)
1890 sin++;
1891 if (sin->sin_family != AF_INET)
1892 return -EINVAL;
1893 t->id.daddr.a4 = sin->sin_addr.s_addr;
1894 break;
1895#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1896 case AF_INET6:
1897 sin6 = (struct sockaddr_in6*)sa;
1898 memcpy(t->saddr.a6, &sin6->sin6_addr, sizeof(struct in6_addr));
1899 sin6++;
1900 if (sin6->sin6_family != AF_INET6)
1901 return -EINVAL;
1902 memcpy(t->id.daddr.a6, &sin6->sin6_addr, sizeof(struct in6_addr));
1903 break;
1904#endif
1905 default:
1906 return -EINVAL; 1859 return -EINVAL;
1907 } 1860
1908 t->encap_family = sa->sa_family; 1861 socklen = pfkey_sockaddr_len(family);
1862 if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen),
1863 &t->id.daddr) != family)
1864 return -EINVAL;
1865 t->encap_family = family;
1909 } else 1866 } else
1910 t->encap_family = xp->family; 1867 t->encap_family = xp->family;
1911 1868
@@ -1952,9 +1909,7 @@ static int pfkey_xfrm_policy2msg_size(struct xfrm_policy *xp)
1952 1909
1953 for (i=0; i<xp->xfrm_nr; i++) { 1910 for (i=0; i<xp->xfrm_nr; i++) {
1954 t = xp->xfrm_vec + i; 1911 t = xp->xfrm_vec + i;
1955 socklen += (t->encap_family == AF_INET ? 1912 socklen += pfkey_sockaddr_len(t->encap_family);
1956 sizeof(struct sockaddr_in) :
1957 sizeof(struct sockaddr_in6));
1958 } 1913 }
1959 1914
1960 return sizeof(struct sadb_msg) + 1915 return sizeof(struct sadb_msg) +
@@ -1987,18 +1942,12 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
1987 struct sadb_address *addr; 1942 struct sadb_address *addr;
1988 struct sadb_lifetime *lifetime; 1943 struct sadb_lifetime *lifetime;
1989 struct sadb_x_policy *pol; 1944 struct sadb_x_policy *pol;
1990 struct sockaddr_in *sin;
1991 struct sadb_x_sec_ctx *sec_ctx; 1945 struct sadb_x_sec_ctx *sec_ctx;
1992 struct xfrm_sec_ctx *xfrm_ctx; 1946 struct xfrm_sec_ctx *xfrm_ctx;
1993#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1994 struct sockaddr_in6 *sin6;
1995#endif
1996 int i; 1947 int i;
1997 int size; 1948 int size;
1998 int sockaddr_size = pfkey_sockaddr_size(xp->family); 1949 int sockaddr_size = pfkey_sockaddr_size(xp->family);
1999 int socklen = (xp->family == AF_INET ? 1950 int socklen = pfkey_sockaddr_len(xp->family);
2000 sizeof(struct sockaddr_in) :
2001 sizeof(struct sockaddr_in6));
2002 1951
2003 size = pfkey_xfrm_policy2msg_size(xp); 1952 size = pfkey_xfrm_policy2msg_size(xp);
2004 1953
@@ -2016,26 +1965,10 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
2016 addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto); 1965 addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto);
2017 addr->sadb_address_prefixlen = xp->selector.prefixlen_s; 1966 addr->sadb_address_prefixlen = xp->selector.prefixlen_s;
2018 addr->sadb_address_reserved = 0; 1967 addr->sadb_address_reserved = 0;
2019 /* src address */ 1968 if (!pfkey_sockaddr_fill(&xp->selector.saddr,
2020 if (xp->family == AF_INET) { 1969 xp->selector.sport,
2021 sin = (struct sockaddr_in *) (addr + 1); 1970 (struct sockaddr *) (addr + 1),
2022 sin->sin_family = AF_INET; 1971 xp->family))
2023 sin->sin_addr.s_addr = xp->selector.saddr.a4;
2024 sin->sin_port = xp->selector.sport;
2025 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
2026 }
2027#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2028 else if (xp->family == AF_INET6) {
2029 sin6 = (struct sockaddr_in6 *) (addr + 1);
2030 sin6->sin6_family = AF_INET6;
2031 sin6->sin6_port = xp->selector.sport;
2032 sin6->sin6_flowinfo = 0;
2033 memcpy(&sin6->sin6_addr, xp->selector.saddr.a6,
2034 sizeof(struct in6_addr));
2035 sin6->sin6_scope_id = 0;
2036 }
2037#endif
2038 else
2039 BUG(); 1972 BUG();
2040 1973
2041 /* dst address */ 1974 /* dst address */
@@ -2048,26 +1981,10 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
2048 addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto); 1981 addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto);
2049 addr->sadb_address_prefixlen = xp->selector.prefixlen_d; 1982 addr->sadb_address_prefixlen = xp->selector.prefixlen_d;
2050 addr->sadb_address_reserved = 0; 1983 addr->sadb_address_reserved = 0;
2051 if (xp->family == AF_INET) { 1984
2052 sin = (struct sockaddr_in *) (addr + 1); 1985 pfkey_sockaddr_fill(&xp->selector.daddr, xp->selector.dport,
2053 sin->sin_family = AF_INET; 1986 (struct sockaddr *) (addr + 1),
2054 sin->sin_addr.s_addr = xp->selector.daddr.a4; 1987 xp->family);
2055 sin->sin_port = xp->selector.dport;
2056 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
2057 }
2058#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2059 else if (xp->family == AF_INET6) {
2060 sin6 = (struct sockaddr_in6 *) (addr + 1);
2061 sin6->sin6_family = AF_INET6;
2062 sin6->sin6_port = xp->selector.dport;
2063 sin6->sin6_flowinfo = 0;
2064 memcpy(&sin6->sin6_addr, xp->selector.daddr.a6,
2065 sizeof(struct in6_addr));
2066 sin6->sin6_scope_id = 0;
2067 }
2068#endif
2069 else
2070 BUG();
2071 1988
2072 /* hard time */ 1989 /* hard time */
2073 lifetime = (struct sadb_lifetime *) skb_put(skb, 1990 lifetime = (struct sadb_lifetime *) skb_put(skb,
@@ -2121,12 +2038,13 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
2121 int mode; 2038 int mode;
2122 2039
2123 req_size = sizeof(struct sadb_x_ipsecrequest); 2040 req_size = sizeof(struct sadb_x_ipsecrequest);
2124 if (t->mode == XFRM_MODE_TUNNEL) 2041 if (t->mode == XFRM_MODE_TUNNEL) {
2125 req_size += ((t->encap_family == AF_INET ? 2042 socklen = pfkey_sockaddr_len(t->encap_family);
2126 sizeof(struct sockaddr_in) : 2043 req_size += socklen * 2;
2127 sizeof(struct sockaddr_in6)) * 2); 2044 } else {
2128 else
2129 size -= 2*socklen; 2045 size -= 2*socklen;
2046 socklen = 0;
2047 }
2130 rq = (void*)skb_put(skb, req_size); 2048 rq = (void*)skb_put(skb, req_size);
2131 pol->sadb_x_policy_len += req_size/8; 2049 pol->sadb_x_policy_len += req_size/8;
2132 memset(rq, 0, sizeof(*rq)); 2050 memset(rq, 0, sizeof(*rq));
@@ -2141,42 +2059,15 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
2141 if (t->optional) 2059 if (t->optional)
2142 rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE; 2060 rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE;
2143 rq->sadb_x_ipsecrequest_reqid = t->reqid; 2061 rq->sadb_x_ipsecrequest_reqid = t->reqid;
2062
2144 if (t->mode == XFRM_MODE_TUNNEL) { 2063 if (t->mode == XFRM_MODE_TUNNEL) {
2145 switch (t->encap_family) { 2064 u8 *sa = (void *)(rq + 1);
2146 case AF_INET: 2065 pfkey_sockaddr_fill(&t->saddr, 0,
2147 sin = (void*)(rq+1); 2066 (struct sockaddr *)sa,
2148 sin->sin_family = AF_INET; 2067 t->encap_family);
2149 sin->sin_addr.s_addr = t->saddr.a4; 2068 pfkey_sockaddr_fill(&t->id.daddr, 0,
2150 sin->sin_port = 0; 2069 (struct sockaddr *) (sa + socklen),
2151 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 2070 t->encap_family);
2152 sin++;
2153 sin->sin_family = AF_INET;
2154 sin->sin_addr.s_addr = t->id.daddr.a4;
2155 sin->sin_port = 0;
2156 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
2157 break;
2158#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2159 case AF_INET6:
2160 sin6 = (void*)(rq+1);
2161 sin6->sin6_family = AF_INET6;
2162 sin6->sin6_port = 0;
2163 sin6->sin6_flowinfo = 0;
2164 memcpy(&sin6->sin6_addr, t->saddr.a6,
2165 sizeof(struct in6_addr));
2166 sin6->sin6_scope_id = 0;
2167
2168 sin6++;
2169 sin6->sin6_family = AF_INET6;
2170 sin6->sin6_port = 0;
2171 sin6->sin6_flowinfo = 0;
2172 memcpy(&sin6->sin6_addr, t->id.daddr.a6,
2173 sizeof(struct in6_addr));
2174 sin6->sin6_scope_id = 0;
2175 break;
2176#endif
2177 default:
2178 break;
2179 }
2180 } 2071 }
2181 } 2072 }
2182 2073
@@ -2459,61 +2350,31 @@ out:
2459#ifdef CONFIG_NET_KEY_MIGRATE 2350#ifdef CONFIG_NET_KEY_MIGRATE
2460static int pfkey_sockaddr_pair_size(sa_family_t family) 2351static int pfkey_sockaddr_pair_size(sa_family_t family)
2461{ 2352{
2462 switch (family) { 2353 return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
2463 case AF_INET:
2464 return PFKEY_ALIGN8(sizeof(struct sockaddr_in) * 2);
2465#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2466 case AF_INET6:
2467 return PFKEY_ALIGN8(sizeof(struct sockaddr_in6) * 2);
2468#endif
2469 default:
2470 return 0;
2471 }
2472 /* NOTREACHED */
2473} 2354}
2474 2355
2475static int parse_sockaddr_pair(struct sadb_x_ipsecrequest *rq, 2356static int parse_sockaddr_pair(struct sadb_x_ipsecrequest *rq,
2476 xfrm_address_t *saddr, xfrm_address_t *daddr, 2357 xfrm_address_t *saddr, xfrm_address_t *daddr,
2477 u16 *family) 2358 u16 *family)
2478{ 2359{
2479 struct sockaddr *sa = (struct sockaddr *)(rq + 1); 2360 u8 *sa = (u8 *) (rq + 1);
2361 int af, socklen;
2362
2480 if (rq->sadb_x_ipsecrequest_len < 2363 if (rq->sadb_x_ipsecrequest_len <
2481 pfkey_sockaddr_pair_size(sa->sa_family)) 2364 pfkey_sockaddr_pair_size(((struct sockaddr *)sa)->sa_family))
2482 return -EINVAL; 2365 return -EINVAL;
2483 2366
2484 switch (sa->sa_family) { 2367 af = pfkey_sockaddr_extract((struct sockaddr *) sa,
2485 case AF_INET: 2368 saddr);
2486 { 2369 if (!af)
2487 struct sockaddr_in *sin;
2488 sin = (struct sockaddr_in *)sa;
2489 if ((sin+1)->sin_family != AF_INET)
2490 return -EINVAL;
2491 memcpy(&saddr->a4, &sin->sin_addr, sizeof(saddr->a4));
2492 sin++;
2493 memcpy(&daddr->a4, &sin->sin_addr, sizeof(daddr->a4));
2494 *family = AF_INET;
2495 break;
2496 }
2497#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2498 case AF_INET6:
2499 {
2500 struct sockaddr_in6 *sin6;
2501 sin6 = (struct sockaddr_in6 *)sa;
2502 if ((sin6+1)->sin6_family != AF_INET6)
2503 return -EINVAL;
2504 memcpy(&saddr->a6, &sin6->sin6_addr,
2505 sizeof(saddr->a6));
2506 sin6++;
2507 memcpy(&daddr->a6, &sin6->sin6_addr,
2508 sizeof(daddr->a6));
2509 *family = AF_INET6;
2510 break;
2511 }
2512#endif
2513 default:
2514 return -EINVAL; 2370 return -EINVAL;
2515 }
2516 2371
2372 socklen = pfkey_sockaddr_len(af);
2373 if (pfkey_sockaddr_extract((struct sockaddr *) (sa + socklen),
2374 daddr) != af)
2375 return -EINVAL;
2376
2377 *family = af;
2517 return 0; 2378 return 0;
2518} 2379}
2519 2380
@@ -3094,10 +2955,6 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3094 struct sadb_msg *hdr; 2955 struct sadb_msg *hdr;
3095 struct sadb_address *addr; 2956 struct sadb_address *addr;
3096 struct sadb_x_policy *pol; 2957 struct sadb_x_policy *pol;
3097 struct sockaddr_in *sin;
3098#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3099 struct sockaddr_in6 *sin6;
3100#endif
3101 int sockaddr_size; 2958 int sockaddr_size;
3102 int size; 2959 int size;
3103 struct sadb_x_sec_ctx *sec_ctx; 2960 struct sadb_x_sec_ctx *sec_ctx;
@@ -3146,29 +3003,11 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3146 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; 3003 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
3147 addr->sadb_address_proto = 0; 3004 addr->sadb_address_proto = 0;
3148 addr->sadb_address_reserved = 0; 3005 addr->sadb_address_reserved = 0;
3149 if (x->props.family == AF_INET) { 3006 addr->sadb_address_prefixlen =
3150 addr->sadb_address_prefixlen = 32; 3007 pfkey_sockaddr_fill(&x->props.saddr, 0,
3151 3008 (struct sockaddr *) (addr + 1),
3152 sin = (struct sockaddr_in *) (addr + 1); 3009 x->props.family);
3153 sin->sin_family = AF_INET; 3010 if (!addr->sadb_address_prefixlen)
3154 sin->sin_addr.s_addr = x->props.saddr.a4;
3155 sin->sin_port = 0;
3156 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3157 }
3158#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3159 else if (x->props.family == AF_INET6) {
3160 addr->sadb_address_prefixlen = 128;
3161
3162 sin6 = (struct sockaddr_in6 *) (addr + 1);
3163 sin6->sin6_family = AF_INET6;
3164 sin6->sin6_port = 0;
3165 sin6->sin6_flowinfo = 0;
3166 memcpy(&sin6->sin6_addr,
3167 x->props.saddr.a6, sizeof(struct in6_addr));
3168 sin6->sin6_scope_id = 0;
3169 }
3170#endif
3171 else
3172 BUG(); 3011 BUG();
3173 3012
3174 /* dst address */ 3013 /* dst address */
@@ -3180,29 +3019,11 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3180 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; 3019 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
3181 addr->sadb_address_proto = 0; 3020 addr->sadb_address_proto = 0;
3182 addr->sadb_address_reserved = 0; 3021 addr->sadb_address_reserved = 0;
3183 if (x->props.family == AF_INET) { 3022 addr->sadb_address_prefixlen =
3184 addr->sadb_address_prefixlen = 32; 3023 pfkey_sockaddr_fill(&x->id.daddr, 0,
3185 3024 (struct sockaddr *) (addr + 1),
3186 sin = (struct sockaddr_in *) (addr + 1); 3025 x->props.family);
3187 sin->sin_family = AF_INET; 3026 if (!addr->sadb_address_prefixlen)
3188 sin->sin_addr.s_addr = x->id.daddr.a4;
3189 sin->sin_port = 0;
3190 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3191 }
3192#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3193 else if (x->props.family == AF_INET6) {
3194 addr->sadb_address_prefixlen = 128;
3195
3196 sin6 = (struct sockaddr_in6 *) (addr + 1);
3197 sin6->sin6_family = AF_INET6;
3198 sin6->sin6_port = 0;
3199 sin6->sin6_flowinfo = 0;
3200 memcpy(&sin6->sin6_addr,
3201 x->id.daddr.a6, sizeof(struct in6_addr));
3202 sin6->sin6_scope_id = 0;
3203 }
3204#endif
3205 else
3206 BUG(); 3027 BUG();
3207 3028
3208 pol = (struct sadb_x_policy *) skb_put(skb, sizeof(struct sadb_x_policy)); 3029 pol = (struct sadb_x_policy *) skb_put(skb, sizeof(struct sadb_x_policy));
@@ -3328,10 +3149,6 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3328 struct sadb_sa *sa; 3149 struct sadb_sa *sa;
3329 struct sadb_address *addr; 3150 struct sadb_address *addr;
3330 struct sadb_x_nat_t_port *n_port; 3151 struct sadb_x_nat_t_port *n_port;
3331 struct sockaddr_in *sin;
3332#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3333 struct sockaddr_in6 *sin6;
3334#endif
3335 int sockaddr_size; 3152 int sockaddr_size;
3336 int size; 3153 int size;
3337 __u8 satype = (x->id.proto == IPPROTO_ESP ? SADB_SATYPE_ESP : 0); 3154 __u8 satype = (x->id.proto == IPPROTO_ESP ? SADB_SATYPE_ESP : 0);
@@ -3395,29 +3212,11 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3395 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; 3212 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
3396 addr->sadb_address_proto = 0; 3213 addr->sadb_address_proto = 0;
3397 addr->sadb_address_reserved = 0; 3214 addr->sadb_address_reserved = 0;
3398 if (x->props.family == AF_INET) { 3215 addr->sadb_address_prefixlen =
3399 addr->sadb_address_prefixlen = 32; 3216 pfkey_sockaddr_fill(&x->props.saddr, 0,
3400 3217 (struct sockaddr *) (addr + 1),
3401 sin = (struct sockaddr_in *) (addr + 1); 3218 x->props.family);
3402 sin->sin_family = AF_INET; 3219 if (!addr->sadb_address_prefixlen)
3403 sin->sin_addr.s_addr = x->props.saddr.a4;
3404 sin->sin_port = 0;
3405 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3406 }
3407#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3408 else if (x->props.family == AF_INET6) {
3409 addr->sadb_address_prefixlen = 128;
3410
3411 sin6 = (struct sockaddr_in6 *) (addr + 1);
3412 sin6->sin6_family = AF_INET6;
3413 sin6->sin6_port = 0;
3414 sin6->sin6_flowinfo = 0;
3415 memcpy(&sin6->sin6_addr,
3416 x->props.saddr.a6, sizeof(struct in6_addr));
3417 sin6->sin6_scope_id = 0;
3418 }
3419#endif
3420 else
3421 BUG(); 3220 BUG();
3422 3221
3423 /* NAT_T_SPORT (old port) */ 3222 /* NAT_T_SPORT (old port) */
@@ -3436,28 +3235,11 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3436 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; 3235 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
3437 addr->sadb_address_proto = 0; 3236 addr->sadb_address_proto = 0;
3438 addr->sadb_address_reserved = 0; 3237 addr->sadb_address_reserved = 0;
3439 if (x->props.family == AF_INET) { 3238 addr->sadb_address_prefixlen =
3440 addr->sadb_address_prefixlen = 32; 3239 pfkey_sockaddr_fill(ipaddr, 0,
3441 3240 (struct sockaddr *) (addr + 1),
3442 sin = (struct sockaddr_in *) (addr + 1); 3241 x->props.family);
3443 sin->sin_family = AF_INET; 3242 if (!addr->sadb_address_prefixlen)
3444 sin->sin_addr.s_addr = ipaddr->a4;
3445 sin->sin_port = 0;
3446 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3447 }
3448#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3449 else if (x->props.family == AF_INET6) {
3450 addr->sadb_address_prefixlen = 128;
3451
3452 sin6 = (struct sockaddr_in6 *) (addr + 1);
3453 sin6->sin6_family = AF_INET6;
3454 sin6->sin6_port = 0;
3455 sin6->sin6_flowinfo = 0;
3456 memcpy(&sin6->sin6_addr, &ipaddr->a6, sizeof(struct in6_addr));
3457 sin6->sin6_scope_id = 0;
3458 }
3459#endif
3460 else
3461 BUG(); 3243 BUG();
3462 3244
3463 /* NAT_T_DPORT (new port) */ 3245 /* NAT_T_DPORT (new port) */
@@ -3475,10 +3257,6 @@ static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
3475 struct xfrm_selector *sel) 3257 struct xfrm_selector *sel)
3476{ 3258{
3477 struct sadb_address *addr; 3259 struct sadb_address *addr;
3478 struct sockaddr_in *sin;
3479#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3480 struct sockaddr_in6 *sin6;
3481#endif
3482 addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize); 3260 addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize);
3483 addr->sadb_address_len = (sizeof(struct sadb_address) + sasize)/8; 3261 addr->sadb_address_len = (sizeof(struct sadb_address) + sasize)/8;
3484 addr->sadb_address_exttype = type; 3262 addr->sadb_address_exttype = type;
@@ -3487,50 +3265,16 @@ static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
3487 3265
3488 switch (type) { 3266 switch (type) {
3489 case SADB_EXT_ADDRESS_SRC: 3267 case SADB_EXT_ADDRESS_SRC:
3490 if (sel->family == AF_INET) { 3268 addr->sadb_address_prefixlen = sel->prefixlen_s;
3491 addr->sadb_address_prefixlen = sel->prefixlen_s; 3269 pfkey_sockaddr_fill(&sel->saddr, 0,
3492 sin = (struct sockaddr_in *)(addr + 1); 3270 (struct sockaddr *)(addr + 1),
3493 sin->sin_family = AF_INET; 3271 sel->family);
3494 memcpy(&sin->sin_addr.s_addr, &sel->saddr,
3495 sizeof(sin->sin_addr.s_addr));
3496 sin->sin_port = 0;
3497 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3498 }
3499#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3500 else if (sel->family == AF_INET6) {
3501 addr->sadb_address_prefixlen = sel->prefixlen_s;
3502 sin6 = (struct sockaddr_in6 *)(addr + 1);
3503 sin6->sin6_family = AF_INET6;
3504 sin6->sin6_port = 0;
3505 sin6->sin6_flowinfo = 0;
3506 sin6->sin6_scope_id = 0;
3507 memcpy(&sin6->sin6_addr.s6_addr, &sel->saddr,
3508 sizeof(sin6->sin6_addr.s6_addr));
3509 }
3510#endif
3511 break; 3272 break;
3512 case SADB_EXT_ADDRESS_DST: 3273 case SADB_EXT_ADDRESS_DST:
3513 if (sel->family == AF_INET) { 3274 addr->sadb_address_prefixlen = sel->prefixlen_d;
3514 addr->sadb_address_prefixlen = sel->prefixlen_d; 3275 pfkey_sockaddr_fill(&sel->daddr, 0,
3515 sin = (struct sockaddr_in *)(addr + 1); 3276 (struct sockaddr *)(addr + 1),
3516 sin->sin_family = AF_INET; 3277 sel->family);
3517 memcpy(&sin->sin_addr.s_addr, &sel->daddr,
3518 sizeof(sin->sin_addr.s_addr));
3519 sin->sin_port = 0;
3520 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3521 }
3522#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3523 else if (sel->family == AF_INET6) {
3524 addr->sadb_address_prefixlen = sel->prefixlen_d;
3525 sin6 = (struct sockaddr_in6 *)(addr + 1);
3526 sin6->sin6_family = AF_INET6;
3527 sin6->sin6_port = 0;
3528 sin6->sin6_flowinfo = 0;
3529 sin6->sin6_scope_id = 0;
3530 memcpy(&sin6->sin6_addr.s6_addr, &sel->daddr,
3531 sizeof(sin6->sin6_addr.s6_addr));
3532 }
3533#endif
3534 break; 3278 break;
3535 default: 3279 default:
3536 return -EINVAL; 3280 return -EINVAL;
@@ -3545,10 +3289,8 @@ static int set_ipsecrequest(struct sk_buff *skb,
3545 xfrm_address_t *src, xfrm_address_t *dst) 3289 xfrm_address_t *src, xfrm_address_t *dst)
3546{ 3290{
3547 struct sadb_x_ipsecrequest *rq; 3291 struct sadb_x_ipsecrequest *rq;
3548 struct sockaddr_in *sin; 3292 u8 *sa;
3549#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 3293 int socklen = pfkey_sockaddr_len(family);
3550 struct sockaddr_in6 *sin6;
3551#endif
3552 int size_req; 3294 int size_req;
3553 3295
3554 size_req = sizeof(struct sadb_x_ipsecrequest) + 3296 size_req = sizeof(struct sadb_x_ipsecrequest) +
@@ -3562,38 +3304,10 @@ static int set_ipsecrequest(struct sk_buff *skb,
3562 rq->sadb_x_ipsecrequest_level = level; 3304 rq->sadb_x_ipsecrequest_level = level;
3563 rq->sadb_x_ipsecrequest_reqid = reqid; 3305 rq->sadb_x_ipsecrequest_reqid = reqid;
3564 3306
3565 switch (family) { 3307 sa = (u8 *) (rq + 1);
3566 case AF_INET: 3308 if (!pfkey_sockaddr_fill(src, 0, (struct sockaddr *)sa, family) ||
3567 sin = (struct sockaddr_in *)(rq + 1); 3309 !pfkey_sockaddr_fill(dst, 0, (struct sockaddr *)(sa + socklen), family))
3568 sin->sin_family = AF_INET;
3569 memcpy(&sin->sin_addr.s_addr, src,
3570 sizeof(sin->sin_addr.s_addr));
3571 sin++;
3572 sin->sin_family = AF_INET;
3573 memcpy(&sin->sin_addr.s_addr, dst,
3574 sizeof(sin->sin_addr.s_addr));
3575 break;
3576#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3577 case AF_INET6:
3578 sin6 = (struct sockaddr_in6 *)(rq + 1);
3579 sin6->sin6_family = AF_INET6;
3580 sin6->sin6_port = 0;
3581 sin6->sin6_flowinfo = 0;
3582 sin6->sin6_scope_id = 0;
3583 memcpy(&sin6->sin6_addr.s6_addr, src,
3584 sizeof(sin6->sin6_addr.s6_addr));
3585 sin6++;
3586 sin6->sin6_family = AF_INET6;
3587 sin6->sin6_port = 0;
3588 sin6->sin6_flowinfo = 0;
3589 sin6->sin6_scope_id = 0;
3590 memcpy(&sin6->sin6_addr.s6_addr, dst,
3591 sizeof(sin6->sin6_addr.s6_addr));
3592 break;
3593#endif
3594 default:
3595 return -EINVAL; 3310 return -EINVAL;
3596 }
3597 3311
3598 return 0; 3312 return 0;
3599} 3313}
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index a24b459dd45a..590e00b2766c 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -7,11 +7,23 @@ config MAC80211
7 select CRC32 7 select CRC32
8 select WIRELESS_EXT 8 select WIRELESS_EXT
9 select CFG80211 9 select CFG80211
10 select NET_SCH_FIFO
11 ---help--- 10 ---help---
12 This option enables the hardware independent IEEE 802.11 11 This option enables the hardware independent IEEE 802.11
13 networking stack. 12 networking stack.
14 13
14config MAC80211_QOS
15 def_bool y
16 depends on MAC80211
17 depends on NET_SCHED
18 depends on NETDEVICES_MULTIQUEUE
19
20comment "QoS/HT support disabled"
21 depends on MAC80211 && !MAC80211_QOS
22comment "QoS/HT support needs CONFIG_NET_SCHED"
23 depends on MAC80211 && !NET_SCHED
24comment "QoS/HT support needs CONFIG_NETDEVICES_MULTIQUEUE"
25 depends on MAC80211 && !NETDEVICES_MULTIQUEUE
26
15menu "Rate control algorithm selection" 27menu "Rate control algorithm selection"
16 depends on MAC80211 != n 28 depends on MAC80211 != n
17 29
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 4e5847fd316c..1d2a4e010e5c 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -29,7 +29,7 @@ mac80211-y := \
29 event.o 29 event.o
30 30
31mac80211-$(CONFIG_MAC80211_LEDS) += led.o 31mac80211-$(CONFIG_MAC80211_LEDS) += led.o
32mac80211-$(CONFIG_NET_SCHED) += wme.o 32mac80211-$(CONFIG_MAC80211_QOS) += wme.o
33mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ 33mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
34 debugfs.o \ 34 debugfs.o \
35 debugfs_sta.o \ 35 debugfs_sta.o \
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 59f1691f62c8..4d4c2dfcf9a0 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -134,7 +134,7 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
134} 134}
135 135
136 136
137struct crypto_cipher * ieee80211_aes_key_setup_encrypt(const u8 key[]) 137struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[])
138{ 138{
139 struct crypto_cipher *tfm; 139 struct crypto_cipher *tfm;
140 140
diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h
index 885f19030b29..8cd0f14aab4d 100644
--- a/net/mac80211/aes_ccm.h
+++ b/net/mac80211/aes_ccm.h
@@ -14,7 +14,7 @@
14 14
15#define AES_BLOCK_LEN 16 15#define AES_BLOCK_LEN 16
16 16
17struct crypto_cipher * ieee80211_aes_key_setup_encrypt(const u8 key[]); 17struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[]);
18void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, 18void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
19 u8 *b_0, u8 *aad, u8 *data, size_t data_len, 19 u8 *b_0, u8 *aad, u8 *data, size_t data_len,
20 u8 *cdata, u8 *mic); 20 u8 *cdata, u8 *mic);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index a9fce4afdf21..81087281b031 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -256,8 +256,8 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
256 case ALG_TKIP: 256 case ALG_TKIP:
257 params.cipher = WLAN_CIPHER_SUITE_TKIP; 257 params.cipher = WLAN_CIPHER_SUITE_TKIP;
258 258
259 iv32 = key->u.tkip.iv32; 259 iv32 = key->u.tkip.tx.iv32;
260 iv16 = key->u.tkip.iv16; 260 iv16 = key->u.tkip.tx.iv16;
261 261
262 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && 262 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
263 sdata->local->ops->get_tkip_seq) 263 sdata->local->ops->get_tkip_seq)
@@ -602,6 +602,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
602 */ 602 */
603 603
604 if (params->station_flags & STATION_FLAG_CHANGED) { 604 if (params->station_flags & STATION_FLAG_CHANGED) {
605 spin_lock_bh(&sta->lock);
605 sta->flags &= ~WLAN_STA_AUTHORIZED; 606 sta->flags &= ~WLAN_STA_AUTHORIZED;
606 if (params->station_flags & STATION_FLAG_AUTHORIZED) 607 if (params->station_flags & STATION_FLAG_AUTHORIZED)
607 sta->flags |= WLAN_STA_AUTHORIZED; 608 sta->flags |= WLAN_STA_AUTHORIZED;
@@ -613,6 +614,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
613 sta->flags &= ~WLAN_STA_WME; 614 sta->flags &= ~WLAN_STA_WME;
614 if (params->station_flags & STATION_FLAG_WME) 615 if (params->station_flags & STATION_FLAG_WME)
615 sta->flags |= WLAN_STA_WME; 616 sta->flags |= WLAN_STA_WME;
617 spin_unlock_bh(&sta->lock);
616 } 618 }
617 619
618 /* 620 /*
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 1cccbfd781f6..d20d90eead1f 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -197,45 +197,6 @@ DEBUGFS_STATS_FILE(rx_handlers_fragments, 20, "%u",
197DEBUGFS_STATS_FILE(tx_status_drop, 20, "%u", 197DEBUGFS_STATS_FILE(tx_status_drop, 20, "%u",
198 local->tx_status_drop); 198 local->tx_status_drop);
199 199
200static ssize_t stats_wme_rx_queue_read(struct file *file,
201 char __user *userbuf,
202 size_t count, loff_t *ppos)
203{
204 struct ieee80211_local *local = file->private_data;
205 char buf[NUM_RX_DATA_QUEUES*15], *p = buf;
206 int i;
207
208 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
209 p += scnprintf(p, sizeof(buf)+buf-p,
210 "%u\n", local->wme_rx_queue[i]);
211
212 return simple_read_from_buffer(userbuf, count, ppos, buf, p-buf);
213}
214
215static const struct file_operations stats_wme_rx_queue_ops = {
216 .read = stats_wme_rx_queue_read,
217 .open = mac80211_open_file_generic,
218};
219
220static ssize_t stats_wme_tx_queue_read(struct file *file,
221 char __user *userbuf,
222 size_t count, loff_t *ppos)
223{
224 struct ieee80211_local *local = file->private_data;
225 char buf[NUM_TX_DATA_QUEUES*15], *p = buf;
226 int i;
227
228 for (i = 0; i < NUM_TX_DATA_QUEUES; i++)
229 p += scnprintf(p, sizeof(buf)+buf-p,
230 "%u\n", local->wme_tx_queue[i]);
231
232 return simple_read_from_buffer(userbuf, count, ppos, buf, p-buf);
233}
234
235static const struct file_operations stats_wme_tx_queue_ops = {
236 .read = stats_wme_tx_queue_read,
237 .open = mac80211_open_file_generic,
238};
239#endif 200#endif
240 201
241DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); 202DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount);
@@ -303,8 +264,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
303 DEBUGFS_STATS_ADD(rx_expand_skb_head2); 264 DEBUGFS_STATS_ADD(rx_expand_skb_head2);
304 DEBUGFS_STATS_ADD(rx_handlers_fragments); 265 DEBUGFS_STATS_ADD(rx_handlers_fragments);
305 DEBUGFS_STATS_ADD(tx_status_drop); 266 DEBUGFS_STATS_ADD(tx_status_drop);
306 DEBUGFS_STATS_ADD(wme_tx_queue);
307 DEBUGFS_STATS_ADD(wme_rx_queue);
308#endif 267#endif
309 DEBUGFS_STATS_ADD(dot11ACKFailureCount); 268 DEBUGFS_STATS_ADD(dot11ACKFailureCount);
310 DEBUGFS_STATS_ADD(dot11RTSFailureCount); 269 DEBUGFS_STATS_ADD(dot11RTSFailureCount);
@@ -356,8 +315,6 @@ void debugfs_hw_del(struct ieee80211_local *local)
356 DEBUGFS_STATS_DEL(rx_expand_skb_head2); 315 DEBUGFS_STATS_DEL(rx_expand_skb_head2);
357 DEBUGFS_STATS_DEL(rx_handlers_fragments); 316 DEBUGFS_STATS_DEL(rx_handlers_fragments);
358 DEBUGFS_STATS_DEL(tx_status_drop); 317 DEBUGFS_STATS_DEL(tx_status_drop);
359 DEBUGFS_STATS_DEL(wme_tx_queue);
360 DEBUGFS_STATS_DEL(wme_rx_queue);
361#endif 318#endif
362 DEBUGFS_STATS_DEL(dot11ACKFailureCount); 319 DEBUGFS_STATS_DEL(dot11ACKFailureCount);
363 DEBUGFS_STATS_DEL(dot11RTSFailureCount); 320 DEBUGFS_STATS_DEL(dot11RTSFailureCount);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 19efc3a6a932..7439b63df5d0 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -97,8 +97,8 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
97 break; 97 break;
98 case ALG_TKIP: 98 case ALG_TKIP:
99 len = scnprintf(buf, sizeof(buf), "%08x %04x\n", 99 len = scnprintf(buf, sizeof(buf), "%08x %04x\n",
100 key->u.tkip.iv32, 100 key->u.tkip.tx.iv32,
101 key->u.tkip.iv16); 101 key->u.tkip.tx.iv16);
102 break; 102 break;
103 case ALG_CCMP: 103 case ALG_CCMP:
104 tpn = key->u.ccmp.tx_pn; 104 tpn = key->u.ccmp.tx_pn;
@@ -128,8 +128,8 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
128 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) 128 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
129 p += scnprintf(p, sizeof(buf)+buf-p, 129 p += scnprintf(p, sizeof(buf)+buf-p,
130 "%08x %04x\n", 130 "%08x %04x\n",
131 key->u.tkip.iv32_rx[i], 131 key->u.tkip.rx[i].iv32,
132 key->u.tkip.iv16_rx[i]); 132 key->u.tkip.rx[i].iv16);
133 len = p - buf; 133 len = p - buf;
134 break; 134 break;
135 case ALG_CCMP: 135 case ALG_CCMP:
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index e3326d046944..b2089b2da48a 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -155,7 +155,6 @@ static const struct file_operations name##_ops = { \
155 __IEEE80211_IF_WFILE(name) 155 __IEEE80211_IF_WFILE(name)
156 156
157/* common attributes */ 157/* common attributes */
158IEEE80211_IF_FILE(channel_use, channel_use, DEC);
159IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC); 158IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC);
160 159
161/* STA/IBSS attributes */ 160/* STA/IBSS attributes */
@@ -248,7 +247,6 @@ IEEE80211_IF_WFILE(min_discovery_timeout,
248 247
249static void add_sta_files(struct ieee80211_sub_if_data *sdata) 248static void add_sta_files(struct ieee80211_sub_if_data *sdata)
250{ 249{
251 DEBUGFS_ADD(channel_use, sta);
252 DEBUGFS_ADD(drop_unencrypted, sta); 250 DEBUGFS_ADD(drop_unencrypted, sta);
253 DEBUGFS_ADD(state, sta); 251 DEBUGFS_ADD(state, sta);
254 DEBUGFS_ADD(bssid, sta); 252 DEBUGFS_ADD(bssid, sta);
@@ -269,7 +267,6 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
269 267
270static void add_ap_files(struct ieee80211_sub_if_data *sdata) 268static void add_ap_files(struct ieee80211_sub_if_data *sdata)
271{ 269{
272 DEBUGFS_ADD(channel_use, ap);
273 DEBUGFS_ADD(drop_unencrypted, ap); 270 DEBUGFS_ADD(drop_unencrypted, ap);
274 DEBUGFS_ADD(num_sta_ps, ap); 271 DEBUGFS_ADD(num_sta_ps, ap);
275 DEBUGFS_ADD(dtim_count, ap); 272 DEBUGFS_ADD(dtim_count, ap);
@@ -281,14 +278,12 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
281 278
282static void add_wds_files(struct ieee80211_sub_if_data *sdata) 279static void add_wds_files(struct ieee80211_sub_if_data *sdata)
283{ 280{
284 DEBUGFS_ADD(channel_use, wds);
285 DEBUGFS_ADD(drop_unencrypted, wds); 281 DEBUGFS_ADD(drop_unencrypted, wds);
286 DEBUGFS_ADD(peer, wds); 282 DEBUGFS_ADD(peer, wds);
287} 283}
288 284
289static void add_vlan_files(struct ieee80211_sub_if_data *sdata) 285static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
290{ 286{
291 DEBUGFS_ADD(channel_use, vlan);
292 DEBUGFS_ADD(drop_unencrypted, vlan); 287 DEBUGFS_ADD(drop_unencrypted, vlan);
293} 288}
294 289
@@ -376,7 +371,6 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
376 371
377static void del_sta_files(struct ieee80211_sub_if_data *sdata) 372static void del_sta_files(struct ieee80211_sub_if_data *sdata)
378{ 373{
379 DEBUGFS_DEL(channel_use, sta);
380 DEBUGFS_DEL(drop_unencrypted, sta); 374 DEBUGFS_DEL(drop_unencrypted, sta);
381 DEBUGFS_DEL(state, sta); 375 DEBUGFS_DEL(state, sta);
382 DEBUGFS_DEL(bssid, sta); 376 DEBUGFS_DEL(bssid, sta);
@@ -397,7 +391,6 @@ static void del_sta_files(struct ieee80211_sub_if_data *sdata)
397 391
398static void del_ap_files(struct ieee80211_sub_if_data *sdata) 392static void del_ap_files(struct ieee80211_sub_if_data *sdata)
399{ 393{
400 DEBUGFS_DEL(channel_use, ap);
401 DEBUGFS_DEL(drop_unencrypted, ap); 394 DEBUGFS_DEL(drop_unencrypted, ap);
402 DEBUGFS_DEL(num_sta_ps, ap); 395 DEBUGFS_DEL(num_sta_ps, ap);
403 DEBUGFS_DEL(dtim_count, ap); 396 DEBUGFS_DEL(dtim_count, ap);
@@ -409,14 +402,12 @@ static void del_ap_files(struct ieee80211_sub_if_data *sdata)
409 402
410static void del_wds_files(struct ieee80211_sub_if_data *sdata) 403static void del_wds_files(struct ieee80211_sub_if_data *sdata)
411{ 404{
412 DEBUGFS_DEL(channel_use, wds);
413 DEBUGFS_DEL(drop_unencrypted, wds); 405 DEBUGFS_DEL(drop_unencrypted, wds);
414 DEBUGFS_DEL(peer, wds); 406 DEBUGFS_DEL(peer, wds);
415} 407}
416 408
417static void del_vlan_files(struct ieee80211_sub_if_data *sdata) 409static void del_vlan_files(struct ieee80211_sub_if_data *sdata)
418{ 410{
419 DEBUGFS_DEL(channel_use, vlan);
420 DEBUGFS_DEL(drop_unencrypted, vlan); 411 DEBUGFS_DEL(drop_unencrypted, vlan);
421} 412}
422 413
@@ -528,7 +519,7 @@ void ieee80211_debugfs_change_if_type(struct ieee80211_sub_if_data *sdata,
528 add_files(sdata); 519 add_files(sdata);
529} 520}
530 521
531static int netdev_notify(struct notifier_block * nb, 522static int netdev_notify(struct notifier_block *nb,
532 unsigned long state, 523 unsigned long state,
533 void *ndev) 524 void *ndev)
534{ 525{
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 6d47a1d31b37..79a062782d52 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -63,10 +63,9 @@ STA_FILE(tx_fragments, tx_fragments, LU);
63STA_FILE(tx_filtered, tx_filtered_count, LU); 63STA_FILE(tx_filtered, tx_filtered_count, LU);
64STA_FILE(tx_retry_failed, tx_retry_failed, LU); 64STA_FILE(tx_retry_failed, tx_retry_failed, LU);
65STA_FILE(tx_retry_count, tx_retry_count, LU); 65STA_FILE(tx_retry_count, tx_retry_count, LU);
66STA_FILE(last_rssi, last_rssi, D);
67STA_FILE(last_signal, last_signal, D); 66STA_FILE(last_signal, last_signal, D);
67STA_FILE(last_qual, last_qual, D);
68STA_FILE(last_noise, last_noise, D); 68STA_FILE(last_noise, last_noise, D);
69STA_FILE(channel_use, channel_use, D);
70STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); 69STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU);
71 70
72static ssize_t sta_flags_read(struct file *file, char __user *userbuf, 71static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
@@ -74,14 +73,15 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
74{ 73{
75 char buf[100]; 74 char buf[100];
76 struct sta_info *sta = file->private_data; 75 struct sta_info *sta = file->private_data;
76 u32 staflags = get_sta_flags(sta);
77 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s", 77 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s",
78 sta->flags & WLAN_STA_AUTH ? "AUTH\n" : "", 78 staflags & WLAN_STA_AUTH ? "AUTH\n" : "",
79 sta->flags & WLAN_STA_ASSOC ? "ASSOC\n" : "", 79 staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "",
80 sta->flags & WLAN_STA_PS ? "PS\n" : "", 80 staflags & WLAN_STA_PS ? "PS\n" : "",
81 sta->flags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", 81 staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "",
82 sta->flags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", 82 staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "",
83 sta->flags & WLAN_STA_WME ? "WME\n" : "", 83 staflags & WLAN_STA_WME ? "WME\n" : "",
84 sta->flags & WLAN_STA_WDS ? "WDS\n" : ""); 84 staflags & WLAN_STA_WDS ? "WDS\n" : "");
85 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 85 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
86} 86}
87STA_OPS(flags); 87STA_OPS(flags);
@@ -123,36 +123,6 @@ static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
123} 123}
124STA_OPS(last_seq_ctrl); 124STA_OPS(last_seq_ctrl);
125 125
126#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
127static ssize_t sta_wme_rx_queue_read(struct file *file, char __user *userbuf,
128 size_t count, loff_t *ppos)
129{
130 char buf[15*NUM_RX_DATA_QUEUES], *p = buf;
131 int i;
132 struct sta_info *sta = file->private_data;
133 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
134 p += scnprintf(p, sizeof(buf)+buf-p, "%u ",
135 sta->wme_rx_queue[i]);
136 p += scnprintf(p, sizeof(buf)+buf-p, "\n");
137 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
138}
139STA_OPS(wme_rx_queue);
140
141static ssize_t sta_wme_tx_queue_read(struct file *file, char __user *userbuf,
142 size_t count, loff_t *ppos)
143{
144 char buf[15*NUM_TX_DATA_QUEUES], *p = buf;
145 int i;
146 struct sta_info *sta = file->private_data;
147 for (i = 0; i < NUM_TX_DATA_QUEUES; i++)
148 p += scnprintf(p, sizeof(buf)+buf-p, "%u ",
149 sta->wme_tx_queue[i]);
150 p += scnprintf(p, sizeof(buf)+buf-p, "\n");
151 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
152}
153STA_OPS(wme_tx_queue);
154#endif
155
156static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, 126static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
157 size_t count, loff_t *ppos) 127 size_t count, loff_t *ppos)
158{ 128{
@@ -293,10 +263,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
293 DEBUGFS_ADD(num_ps_buf_frames); 263 DEBUGFS_ADD(num_ps_buf_frames);
294 DEBUGFS_ADD(inactive_ms); 264 DEBUGFS_ADD(inactive_ms);
295 DEBUGFS_ADD(last_seq_ctrl); 265 DEBUGFS_ADD(last_seq_ctrl);
296#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
297 DEBUGFS_ADD(wme_rx_queue);
298 DEBUGFS_ADD(wme_tx_queue);
299#endif
300 DEBUGFS_ADD(agg_status); 266 DEBUGFS_ADD(agg_status);
301} 267}
302 268
@@ -306,10 +272,6 @@ void ieee80211_sta_debugfs_remove(struct sta_info *sta)
306 DEBUGFS_DEL(num_ps_buf_frames); 272 DEBUGFS_DEL(num_ps_buf_frames);
307 DEBUGFS_DEL(inactive_ms); 273 DEBUGFS_DEL(inactive_ms);
308 DEBUGFS_DEL(last_seq_ctrl); 274 DEBUGFS_DEL(last_seq_ctrl);
309#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
310 DEBUGFS_DEL(wme_rx_queue);
311 DEBUGFS_DEL(wme_tx_queue);
312#endif
313 DEBUGFS_DEL(agg_status); 275 DEBUGFS_DEL(agg_status);
314 276
315 debugfs_remove(sta->debugfs.dir); 277 debugfs_remove(sta->debugfs.dir);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 006486b26726..b19bd16703b2 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -2,6 +2,7 @@
2 * Copyright 2002-2005, Instant802 Networks, Inc. 2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005, Devicescape Software, Inc. 3 * Copyright 2005, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -82,7 +83,7 @@ struct ieee80211_sta_bss {
82 u16 capability; /* host byte order */ 83 u16 capability; /* host byte order */
83 enum ieee80211_band band; 84 enum ieee80211_band band;
84 int freq; 85 int freq;
85 int rssi, signal, noise; 86 int signal, noise, qual;
86 u8 *wpa_ie; 87 u8 *wpa_ie;
87 size_t wpa_ie_len; 88 size_t wpa_ie_len;
88 u8 *rsn_ie; 89 u8 *rsn_ie;
@@ -91,6 +92,8 @@ struct ieee80211_sta_bss {
91 size_t wmm_ie_len; 92 size_t wmm_ie_len;
92 u8 *ht_ie; 93 u8 *ht_ie;
93 size_t ht_ie_len; 94 size_t ht_ie_len;
95 u8 *ht_add_ie;
96 size_t ht_add_ie_len;
94#ifdef CONFIG_MAC80211_MESH 97#ifdef CONFIG_MAC80211_MESH
95 u8 *mesh_id; 98 u8 *mesh_id;
96 size_t mesh_id_len; 99 size_t mesh_id_len;
@@ -147,7 +150,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
147#define IEEE80211_TX_UNICAST BIT(1) 150#define IEEE80211_TX_UNICAST BIT(1)
148#define IEEE80211_TX_PS_BUFFERED BIT(2) 151#define IEEE80211_TX_PS_BUFFERED BIT(2)
149#define IEEE80211_TX_PROBE_LAST_FRAG BIT(3) 152#define IEEE80211_TX_PROBE_LAST_FRAG BIT(3)
150#define IEEE80211_TX_INJECTED BIT(4)
151 153
152struct ieee80211_tx_data { 154struct ieee80211_tx_data {
153 struct sk_buff *skb; 155 struct sk_buff *skb;
@@ -157,13 +159,12 @@ struct ieee80211_tx_data {
157 struct sta_info *sta; 159 struct sta_info *sta;
158 struct ieee80211_key *key; 160 struct ieee80211_key *key;
159 161
160 struct ieee80211_tx_control *control;
161 struct ieee80211_channel *channel; 162 struct ieee80211_channel *channel;
162 struct ieee80211_rate *rate; 163 s8 rate_idx;
163 /* use this rate (if set) for last fragment; rate can 164 /* use this rate (if set) for last fragment; rate can
164 * be set to lower rate for the first fragments, e.g., 165 * be set to lower rate for the first fragments, e.g.,
165 * when using CTS protection with IEEE 802.11g. */ 166 * when using CTS protection with IEEE 802.11g. */
166 struct ieee80211_rate *last_frag_rate; 167 s8 last_frag_rate_idx;
167 168
168 /* Extra fragments (in addition to the first fragment 169 /* Extra fragments (in addition to the first fragment
169 * in skb) */ 170 * in skb) */
@@ -202,32 +203,16 @@ struct ieee80211_rx_data {
202 unsigned int flags; 203 unsigned int flags;
203 int sent_ps_buffered; 204 int sent_ps_buffered;
204 int queue; 205 int queue;
205 int load;
206 u32 tkip_iv32; 206 u32 tkip_iv32;
207 u16 tkip_iv16; 207 u16 tkip_iv16;
208}; 208};
209 209
210/* flags used in struct ieee80211_tx_packet_data.flags */
211#define IEEE80211_TXPD_REQ_TX_STATUS BIT(0)
212#define IEEE80211_TXPD_DO_NOT_ENCRYPT BIT(1)
213#define IEEE80211_TXPD_REQUEUE BIT(2)
214#define IEEE80211_TXPD_EAPOL_FRAME BIT(3)
215#define IEEE80211_TXPD_AMPDU BIT(4)
216/* Stored in sk_buff->cb */
217struct ieee80211_tx_packet_data {
218 int ifindex;
219 unsigned long jiffies;
220 unsigned int flags;
221 u8 queue;
222};
223
224struct ieee80211_tx_stored_packet { 210struct ieee80211_tx_stored_packet {
225 struct ieee80211_tx_control control;
226 struct sk_buff *skb; 211 struct sk_buff *skb;
227 struct sk_buff **extra_frag; 212 struct sk_buff **extra_frag;
228 struct ieee80211_rate *last_frag_rate; 213 s8 last_frag_rate_idx;
229 int num_extra_frag; 214 int num_extra_frag;
230 unsigned int last_frag_rate_ctrl_probe; 215 bool last_frag_rate_ctrl_probe;
231}; 216};
232 217
233struct beacon_data { 218struct beacon_data {
@@ -464,14 +449,11 @@ struct ieee80211_sub_if_data {
464 struct ieee80211_if_sta sta; 449 struct ieee80211_if_sta sta;
465 u32 mntr_flags; 450 u32 mntr_flags;
466 } u; 451 } u;
467 int channel_use;
468 int channel_use_raw;
469 452
470#ifdef CONFIG_MAC80211_DEBUGFS 453#ifdef CONFIG_MAC80211_DEBUGFS
471 struct dentry *debugfsdir; 454 struct dentry *debugfsdir;
472 union { 455 union {
473 struct { 456 struct {
474 struct dentry *channel_use;
475 struct dentry *drop_unencrypted; 457 struct dentry *drop_unencrypted;
476 struct dentry *state; 458 struct dentry *state;
477 struct dentry *bssid; 459 struct dentry *bssid;
@@ -490,7 +472,6 @@ struct ieee80211_sub_if_data {
490 struct dentry *num_beacons_sta; 472 struct dentry *num_beacons_sta;
491 } sta; 473 } sta;
492 struct { 474 struct {
493 struct dentry *channel_use;
494 struct dentry *drop_unencrypted; 475 struct dentry *drop_unencrypted;
495 struct dentry *num_sta_ps; 476 struct dentry *num_sta_ps;
496 struct dentry *dtim_count; 477 struct dentry *dtim_count;
@@ -500,12 +481,10 @@ struct ieee80211_sub_if_data {
500 struct dentry *num_buffered_multicast; 481 struct dentry *num_buffered_multicast;
501 } ap; 482 } ap;
502 struct { 483 struct {
503 struct dentry *channel_use;
504 struct dentry *drop_unencrypted; 484 struct dentry *drop_unencrypted;
505 struct dentry *peer; 485 struct dentry *peer;
506 } wds; 486 } wds;
507 struct { 487 struct {
508 struct dentry *channel_use;
509 struct dentry *drop_unencrypted; 488 struct dentry *drop_unencrypted;
510 } vlan; 489 } vlan;
511 struct { 490 struct {
@@ -610,8 +589,8 @@ struct ieee80211_local {
610 struct sta_info *sta_hash[STA_HASH_SIZE]; 589 struct sta_info *sta_hash[STA_HASH_SIZE];
611 struct timer_list sta_cleanup; 590 struct timer_list sta_cleanup;
612 591
613 unsigned long state[NUM_TX_DATA_QUEUES_AMPDU]; 592 unsigned long queues_pending[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)];
614 struct ieee80211_tx_stored_packet pending_packet[NUM_TX_DATA_QUEUES_AMPDU]; 593 struct ieee80211_tx_stored_packet pending_packet[IEEE80211_MAX_QUEUES];
615 struct tasklet_struct tx_pending_tasklet; 594 struct tasklet_struct tx_pending_tasklet;
616 595
617 /* number of interfaces with corresponding IFF_ flags */ 596 /* number of interfaces with corresponding IFF_ flags */
@@ -677,9 +656,6 @@ struct ieee80211_local {
677 assoc_led_name[32], radio_led_name[32]; 656 assoc_led_name[32], radio_led_name[32];
678#endif 657#endif
679 658
680 u32 channel_use;
681 u32 channel_use_raw;
682
683#ifdef CONFIG_MAC80211_DEBUGFS 659#ifdef CONFIG_MAC80211_DEBUGFS
684 struct work_struct sta_debugfs_add; 660 struct work_struct sta_debugfs_add;
685#endif 661#endif
@@ -705,8 +681,6 @@ struct ieee80211_local {
705 unsigned int rx_expand_skb_head2; 681 unsigned int rx_expand_skb_head2;
706 unsigned int rx_handlers_fragments; 682 unsigned int rx_handlers_fragments;
707 unsigned int tx_status_drop; 683 unsigned int tx_status_drop;
708 unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES];
709 unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES];
710#define I802_DEBUG_INC(c) (c)++ 684#define I802_DEBUG_INC(c) (c)++
711#else /* CONFIG_MAC80211_DEBUG_COUNTERS */ 685#else /* CONFIG_MAC80211_DEBUG_COUNTERS */
712#define I802_DEBUG_INC(c) do { } while (0) 686#define I802_DEBUG_INC(c) do { } while (0)
@@ -764,8 +738,6 @@ struct ieee80211_local {
764 struct dentry *rx_expand_skb_head2; 738 struct dentry *rx_expand_skb_head2;
765 struct dentry *rx_handlers_fragments; 739 struct dentry *rx_handlers_fragments;
766 struct dentry *tx_status_drop; 740 struct dentry *tx_status_drop;
767 struct dentry *wme_tx_queue;
768 struct dentry *wme_rx_queue;
769#endif 741#endif
770 struct dentry *dot11ACKFailureCount; 742 struct dentry *dot11ACKFailureCount;
771 struct dentry *dot11RTSFailureCount; 743 struct dentry *dot11RTSFailureCount;
@@ -778,6 +750,15 @@ struct ieee80211_local {
778#endif 750#endif
779}; 751};
780 752
753static inline int ieee80211_is_multiqueue(struct ieee80211_local *local)
754{
755#ifdef CONFIG_MAC80211_QOS
756 return netif_is_multiqueue(local->mdev);
757#else
758 return 0;
759#endif
760}
761
781/* this struct represents 802.11n's RA/TID combination */ 762/* this struct represents 802.11n's RA/TID combination */
782struct ieee80211_ra_tid { 763struct ieee80211_ra_tid {
783 u8 ra[ETH_ALEN]; 764 u8 ra[ETH_ALEN];
@@ -847,11 +828,6 @@ static inline struct ieee80211_hw *local_to_hw(
847 return &local->hw; 828 return &local->hw;
848} 829}
849 830
850enum ieee80211_link_state_t {
851 IEEE80211_LINK_STATE_XOFF = 0,
852 IEEE80211_LINK_STATE_PENDING,
853};
854
855struct sta_attribute { 831struct sta_attribute {
856 struct attribute attr; 832 struct attribute attr;
857 ssize_t (*show)(const struct sta_info *, char *buf); 833 ssize_t (*show)(const struct sta_info *, char *buf);
@@ -877,29 +853,8 @@ u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
877 853
878/* ieee80211_ioctl.c */ 854/* ieee80211_ioctl.c */
879extern const struct iw_handler_def ieee80211_iw_handler_def; 855extern const struct iw_handler_def ieee80211_iw_handler_def;
880
881
882/* Least common multiple of the used rates (in 100 kbps). This is used to
883 * calculate rate_inv values for each rate so that only integers are needed. */
884#define CHAN_UTIL_RATE_LCM 95040
885/* 1 usec is 1/8 * (95040/10) = 1188 */
886#define CHAN_UTIL_PER_USEC 1188
887/* Amount of bits to shift the result right to scale the total utilization
888 * to values that will not wrap around 32-bit integers. */
889#define CHAN_UTIL_SHIFT 9
890/* Theoretical maximum of channel utilization counter in 10 ms (stat_time=1):
891 * (CHAN_UTIL_PER_USEC * 10000) >> CHAN_UTIL_SHIFT = 23203. So dividing the
892 * raw value with about 23 should give utilization in 10th of a percentage
893 * (1/1000). However, utilization is only estimated and not all intervals
894 * between frames etc. are calculated. 18 seems to give numbers that are closer
895 * to the real maximum. */
896#define CHAN_UTIL_PER_10MS 18
897#define CHAN_UTIL_HDR_LONG (202 * CHAN_UTIL_PER_USEC)
898#define CHAN_UTIL_HDR_SHORT (40 * CHAN_UTIL_PER_USEC)
899
900
901/* ieee80211_ioctl.c */
902int ieee80211_set_freq(struct net_device *dev, int freq); 856int ieee80211_set_freq(struct net_device *dev, int freq);
857
903/* ieee80211_sta.c */ 858/* ieee80211_sta.c */
904void ieee80211_sta_timer(unsigned long data); 859void ieee80211_sta_timer(unsigned long data);
905void ieee80211_sta_work(struct work_struct *work); 860void ieee80211_sta_work(struct work_struct *work);
@@ -919,9 +874,9 @@ ieee80211_rx_result ieee80211_sta_rx_scan(
919void ieee80211_rx_bss_list_init(struct net_device *dev); 874void ieee80211_rx_bss_list_init(struct net_device *dev);
920void ieee80211_rx_bss_list_deinit(struct net_device *dev); 875void ieee80211_rx_bss_list_deinit(struct net_device *dev);
921int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len); 876int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len);
922struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev, 877struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
923 struct sk_buff *skb, u8 *bssid, 878 struct sk_buff *skb, u8 *bssid,
924 u8 *addr); 879 u8 *addr);
925int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason); 880int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason);
926int ieee80211_sta_disassociate(struct net_device *dev, u16 reason); 881int ieee80211_sta_disassociate(struct net_device *dev, u16 reason);
927void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, 882void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
@@ -940,7 +895,6 @@ void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
940 895
941void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da, 896void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da,
942 u16 tid, u16 initiator, u16 reason); 897 u16 tid, u16 initiator, u16 reason);
943void sta_rx_agg_session_timer_expired(unsigned long data);
944void sta_addba_resp_timer_expired(unsigned long data); 898void sta_addba_resp_timer_expired(unsigned long data);
945void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr); 899void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr);
946u64 ieee80211_sta_get_rates(struct ieee80211_local *local, 900u64 ieee80211_sta_get_rates(struct ieee80211_local *local,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 06e88a5a036d..984472702381 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -33,9 +33,8 @@ static void ieee80211_if_sdata_deinit(struct ieee80211_sub_if_data *sdata)
33{ 33{
34 int i; 34 int i;
35 35
36 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 36 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
37 __skb_queue_purge(&sdata->fragments[i].skb_list); 37 __skb_queue_purge(&sdata->fragments[i].skb_list);
38 }
39} 38}
40 39
41/* Must be called with rtnl lock held. */ 40/* Must be called with rtnl lock held. */
@@ -167,9 +166,10 @@ void ieee80211_if_set_type(struct net_device *dev, int type)
167 ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN | 166 ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN |
168 IEEE80211_AUTH_ALG_SHARED_KEY; 167 IEEE80211_AUTH_ALG_SHARED_KEY;
169 ifsta->flags |= IEEE80211_STA_CREATE_IBSS | 168 ifsta->flags |= IEEE80211_STA_CREATE_IBSS |
170 IEEE80211_STA_WMM_ENABLED |
171 IEEE80211_STA_AUTO_BSSID_SEL | 169 IEEE80211_STA_AUTO_BSSID_SEL |
172 IEEE80211_STA_AUTO_CHANNEL_SEL; 170 IEEE80211_STA_AUTO_CHANNEL_SEL;
171 if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4)
172 ifsta->flags |= IEEE80211_STA_WMM_ENABLED;
173 173
174 msdata = IEEE80211_DEV_TO_SUB_IF(sdata->local->mdev); 174 msdata = IEEE80211_DEV_TO_SUB_IF(sdata->local->mdev);
175 sdata->bss = &msdata->u.ap; 175 sdata->bss = &msdata->u.ap;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 150d66dbda9d..d4893bd17754 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -321,8 +321,15 @@ void ieee80211_key_link(struct ieee80211_key *key,
321 * some hardware cannot handle TKIP with QoS, so 321 * some hardware cannot handle TKIP with QoS, so
322 * we indicate whether QoS could be in use. 322 * we indicate whether QoS could be in use.
323 */ 323 */
324 if (sta->flags & WLAN_STA_WME) 324 if (test_sta_flags(sta, WLAN_STA_WME))
325 key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA; 325 key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA;
326
327 /*
328 * This key is for a specific sta interface,
329 * inform the driver that it should try to store
330 * this key as pairwise key.
331 */
332 key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE;
326 } else { 333 } else {
327 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { 334 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) {
328 struct sta_info *ap; 335 struct sta_info *ap;
@@ -335,7 +342,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
335 /* same here, the AP could be using QoS */ 342 /* same here, the AP could be using QoS */
336 ap = sta_info_get(key->local, key->sdata->u.sta.bssid); 343 ap = sta_info_get(key->local, key->sdata->u.sta.bssid);
337 if (ap) { 344 if (ap) {
338 if (ap->flags & WLAN_STA_WME) 345 if (test_sta_flags(ap, WLAN_STA_WME))
339 key->conf.flags |= 346 key->conf.flags |=
340 IEEE80211_KEY_FLAG_WMM_STA; 347 IEEE80211_KEY_FLAG_WMM_STA;
341 } 348 }
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index f52c3df1fe9a..a0f774aafa45 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -69,6 +69,13 @@ enum ieee80211_internal_key_flags {
69 KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5), 69 KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5),
70}; 70};
71 71
72struct tkip_ctx {
73 u32 iv32;
74 u16 iv16;
75 u16 p1k[5];
76 int initialized;
77};
78
72struct ieee80211_key { 79struct ieee80211_key {
73 struct ieee80211_local *local; 80 struct ieee80211_local *local;
74 struct ieee80211_sub_if_data *sdata; 81 struct ieee80211_sub_if_data *sdata;
@@ -85,16 +92,10 @@ struct ieee80211_key {
85 union { 92 union {
86 struct { 93 struct {
87 /* last used TSC */ 94 /* last used TSC */
88 u32 iv32; 95 struct tkip_ctx tx;
89 u16 iv16;
90 u16 p1k[5];
91 int tx_initialized;
92 96
93 /* last received RSC */ 97 /* last received RSC */
94 u32 iv32_rx[NUM_RX_DATA_QUEUES]; 98 struct tkip_ctx rx[NUM_RX_DATA_QUEUES];
95 u16 iv16_rx[NUM_RX_DATA_QUEUES];
96 u16 p1k_rx[NUM_RX_DATA_QUEUES][5];
97 int rx_initialized[NUM_RX_DATA_QUEUES];
98 } tkip; 99 } tkip;
99 struct { 100 struct {
100 u8 tx_pn[6]; 101 u8 tx_pn[6];
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 98c0b5e56ecc..b182f018a187 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -35,8 +35,6 @@
35#include "debugfs.h" 35#include "debugfs.h"
36#include "debugfs_netdev.h" 36#include "debugfs_netdev.h"
37 37
38#define SUPP_MCS_SET_LEN 16
39
40/* 38/*
41 * For seeing transmitted packets on monitor interfaces 39 * For seeing transmitted packets on monitor interfaces
42 * we have a radiotap header too. 40 * we have a radiotap header too.
@@ -112,7 +110,13 @@ static int ieee80211_master_open(struct net_device *dev)
112 break; 110 break;
113 } 111 }
114 } 112 }
115 return res; 113
114 if (res)
115 return res;
116
117 netif_start_queue(local->mdev);
118
119 return 0;
116} 120}
117 121
118static int ieee80211_master_stop(struct net_device *dev) 122static int ieee80211_master_stop(struct net_device *dev)
@@ -346,6 +350,7 @@ static int ieee80211_open(struct net_device *dev)
346 goto err_del_interface; 350 goto err_del_interface;
347 } 351 }
348 352
353 /* no locking required since STA is not live yet */
349 sta->flags |= WLAN_STA_AUTHORIZED; 354 sta->flags |= WLAN_STA_AUTHORIZED;
350 355
351 res = sta_info_insert(sta); 356 res = sta_info_insert(sta);
@@ -385,8 +390,8 @@ static int ieee80211_open(struct net_device *dev)
385 * yet be effective. Trigger execution of ieee80211_sta_work 390 * yet be effective. Trigger execution of ieee80211_sta_work
386 * to fix this. 391 * to fix this.
387 */ 392 */
388 if(sdata->vif.type == IEEE80211_IF_TYPE_STA || 393 if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
389 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 394 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
390 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 395 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
391 queue_work(local->hw.workqueue, &ifsta->work); 396 queue_work(local->hw.workqueue, &ifsta->work);
392 } 397 }
@@ -585,16 +590,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
585 sta = sta_info_get(local, ra); 590 sta = sta_info_get(local, ra);
586 if (!sta) { 591 if (!sta) {
587 printk(KERN_DEBUG "Could not find the station\n"); 592 printk(KERN_DEBUG "Could not find the station\n");
588 rcu_read_unlock(); 593 ret = -ENOENT;
589 return -ENOENT; 594 goto exit;
590 } 595 }
591 596
592 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 597 spin_lock_bh(&sta->lock);
593 598
594 /* we have tried too many times, receiver does not want A-MPDU */ 599 /* we have tried too many times, receiver does not want A-MPDU */
595 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { 600 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
596 ret = -EBUSY; 601 ret = -EBUSY;
597 goto start_ba_exit; 602 goto err_unlock_sta;
598 } 603 }
599 604
600 state = &sta->ampdu_mlme.tid_state_tx[tid]; 605 state = &sta->ampdu_mlme.tid_state_tx[tid];
@@ -605,7 +610,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
605 "idle on tid %u\n", tid); 610 "idle on tid %u\n", tid);
606#endif /* CONFIG_MAC80211_HT_DEBUG */ 611#endif /* CONFIG_MAC80211_HT_DEBUG */
607 ret = -EAGAIN; 612 ret = -EAGAIN;
608 goto start_ba_exit; 613 goto err_unlock_sta;
609 } 614 }
610 615
611 /* prepare A-MPDU MLME for Tx aggregation */ 616 /* prepare A-MPDU MLME for Tx aggregation */
@@ -616,7 +621,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
616 printk(KERN_ERR "allocate tx mlme to tid %d failed\n", 621 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
617 tid); 622 tid);
618 ret = -ENOMEM; 623 ret = -ENOMEM;
619 goto start_ba_exit; 624 goto err_unlock_sta;
620 } 625 }
621 /* Tx timer */ 626 /* Tx timer */
622 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = 627 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
@@ -639,7 +644,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
639 printk(KERN_DEBUG "BA request denied - queue unavailable for" 644 printk(KERN_DEBUG "BA request denied - queue unavailable for"
640 " tid %d\n", tid); 645 " tid %d\n", tid);
641#endif /* CONFIG_MAC80211_HT_DEBUG */ 646#endif /* CONFIG_MAC80211_HT_DEBUG */
642 goto start_ba_err; 647 goto err_unlock_queue;
643 } 648 }
644 sdata = sta->sdata; 649 sdata = sta->sdata;
645 650
@@ -661,12 +666,13 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
661 " tid %d\n", tid); 666 " tid %d\n", tid);
662#endif /* CONFIG_MAC80211_HT_DEBUG */ 667#endif /* CONFIG_MAC80211_HT_DEBUG */
663 *state = HT_AGG_STATE_IDLE; 668 *state = HT_AGG_STATE_IDLE;
664 goto start_ba_err; 669 goto err_unlock_queue;
665 } 670 }
666 671
667 /* Will put all the packets in the new SW queue */ 672 /* Will put all the packets in the new SW queue */
668 ieee80211_requeue(local, ieee802_1d_to_ac[tid]); 673 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
669 spin_unlock_bh(&local->mdev->queue_lock); 674 spin_unlock_bh(&local->mdev->queue_lock);
675 spin_unlock_bh(&sta->lock);
670 676
671 /* send an addBA request */ 677 /* send an addBA request */
672 sta->ampdu_mlme.dialog_token_allocator++; 678 sta->ampdu_mlme.dialog_token_allocator++;
@@ -674,25 +680,26 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
674 sta->ampdu_mlme.dialog_token_allocator; 680 sta->ampdu_mlme.dialog_token_allocator;
675 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; 681 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
676 682
683
677 ieee80211_send_addba_request(sta->sdata->dev, ra, tid, 684 ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
678 sta->ampdu_mlme.tid_tx[tid]->dialog_token, 685 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
679 sta->ampdu_mlme.tid_tx[tid]->ssn, 686 sta->ampdu_mlme.tid_tx[tid]->ssn,
680 0x40, 5000); 687 0x40, 5000);
681
682 /* activate the timer for the recipient's addBA response */ 688 /* activate the timer for the recipient's addBA response */
683 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = 689 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
684 jiffies + ADDBA_RESP_INTERVAL; 690 jiffies + ADDBA_RESP_INTERVAL;
685 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); 691 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
686 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); 692 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
687 goto start_ba_exit; 693 goto exit;
688 694
689start_ba_err: 695err_unlock_queue:
690 kfree(sta->ampdu_mlme.tid_tx[tid]); 696 kfree(sta->ampdu_mlme.tid_tx[tid]);
691 sta->ampdu_mlme.tid_tx[tid] = NULL; 697 sta->ampdu_mlme.tid_tx[tid] = NULL;
692 spin_unlock_bh(&local->mdev->queue_lock); 698 spin_unlock_bh(&local->mdev->queue_lock);
693 ret = -EBUSY; 699 ret = -EBUSY;
694start_ba_exit: 700err_unlock_sta:
695 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 701 spin_unlock_bh(&sta->lock);
702exit:
696 rcu_read_unlock(); 703 rcu_read_unlock();
697 return ret; 704 return ret;
698} 705}
@@ -720,7 +727,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
720 727
721 /* check if the TID is in aggregation */ 728 /* check if the TID is in aggregation */
722 state = &sta->ampdu_mlme.tid_state_tx[tid]; 729 state = &sta->ampdu_mlme.tid_state_tx[tid];
723 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 730 spin_lock_bh(&sta->lock);
724 731
725 if (*state != HT_AGG_STATE_OPERATIONAL) { 732 if (*state != HT_AGG_STATE_OPERATIONAL) {
726 ret = -ENOENT; 733 ret = -ENOENT;
@@ -750,7 +757,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
750 } 757 }
751 758
752stop_BA_exit: 759stop_BA_exit:
753 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 760 spin_unlock_bh(&sta->lock);
754 rcu_read_unlock(); 761 rcu_read_unlock();
755 return ret; 762 return ret;
756} 763}
@@ -779,12 +786,12 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
779 } 786 }
780 787
781 state = &sta->ampdu_mlme.tid_state_tx[tid]; 788 state = &sta->ampdu_mlme.tid_state_tx[tid];
782 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 789 spin_lock_bh(&sta->lock);
783 790
784 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 791 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
785 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", 792 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
786 *state); 793 *state);
787 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 794 spin_unlock_bh(&sta->lock);
788 rcu_read_unlock(); 795 rcu_read_unlock();
789 return; 796 return;
790 } 797 }
@@ -797,7 +804,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
797 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); 804 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
798 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); 805 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
799 } 806 }
800 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 807 spin_unlock_bh(&sta->lock);
801 rcu_read_unlock(); 808 rcu_read_unlock();
802} 809}
803EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); 810EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
@@ -831,10 +838,11 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
831 } 838 }
832 state = &sta->ampdu_mlme.tid_state_tx[tid]; 839 state = &sta->ampdu_mlme.tid_state_tx[tid];
833 840
834 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 841 /* NOTE: no need to use sta->lock in this state check, as
842 * ieee80211_stop_tx_ba_session will let only
843 * one stop call to pass through per sta/tid */
835 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { 844 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
836 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); 845 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
837 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
838 rcu_read_unlock(); 846 rcu_read_unlock();
839 return; 847 return;
840 } 848 }
@@ -857,11 +865,12 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
857 * ieee80211_wake_queue is not used here as this queue is not 865 * ieee80211_wake_queue is not used here as this queue is not
858 * necessarily stopped */ 866 * necessarily stopped */
859 netif_schedule(local->mdev); 867 netif_schedule(local->mdev);
868 spin_lock_bh(&sta->lock);
860 *state = HT_AGG_STATE_IDLE; 869 *state = HT_AGG_STATE_IDLE;
861 sta->ampdu_mlme.addba_req_num[tid] = 0; 870 sta->ampdu_mlme.addba_req_num[tid] = 0;
862 kfree(sta->ampdu_mlme.tid_tx[tid]); 871 kfree(sta->ampdu_mlme.tid_tx[tid]);
863 sta->ampdu_mlme.tid_tx[tid] = NULL; 872 sta->ampdu_mlme.tid_tx[tid] = NULL;
864 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 873 spin_unlock_bh(&sta->lock);
865 874
866 rcu_read_unlock(); 875 rcu_read_unlock();
867} 876}
@@ -967,8 +976,7 @@ void ieee80211_if_setup(struct net_device *dev)
967/* everything else */ 976/* everything else */
968 977
969static int __ieee80211_if_config(struct net_device *dev, 978static int __ieee80211_if_config(struct net_device *dev,
970 struct sk_buff *beacon, 979 struct sk_buff *beacon)
971 struct ieee80211_tx_control *control)
972{ 980{
973 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 981 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
974 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 982 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
@@ -986,13 +994,11 @@ static int __ieee80211_if_config(struct net_device *dev,
986 conf.ssid_len = sdata->u.sta.ssid_len; 994 conf.ssid_len = sdata->u.sta.ssid_len;
987 } else if (ieee80211_vif_is_mesh(&sdata->vif)) { 995 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
988 conf.beacon = beacon; 996 conf.beacon = beacon;
989 conf.beacon_control = control;
990 ieee80211_start_mesh(dev); 997 ieee80211_start_mesh(dev);
991 } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { 998 } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
992 conf.ssid = sdata->u.ap.ssid; 999 conf.ssid = sdata->u.ap.ssid;
993 conf.ssid_len = sdata->u.ap.ssid_len; 1000 conf.ssid_len = sdata->u.ap.ssid_len;
994 conf.beacon = beacon; 1001 conf.beacon = beacon;
995 conf.beacon_control = control;
996 } 1002 }
997 return local->ops->config_interface(local_to_hw(local), 1003 return local->ops->config_interface(local_to_hw(local),
998 &sdata->vif, &conf); 1004 &sdata->vif, &conf);
@@ -1005,23 +1011,21 @@ int ieee80211_if_config(struct net_device *dev)
1005 if (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT && 1011 if (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT &&
1006 (local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE)) 1012 (local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
1007 return ieee80211_if_config_beacon(dev); 1013 return ieee80211_if_config_beacon(dev);
1008 return __ieee80211_if_config(dev, NULL, NULL); 1014 return __ieee80211_if_config(dev, NULL);
1009} 1015}
1010 1016
1011int ieee80211_if_config_beacon(struct net_device *dev) 1017int ieee80211_if_config_beacon(struct net_device *dev)
1012{ 1018{
1013 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1019 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1014 struct ieee80211_tx_control control;
1015 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1020 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1016 struct sk_buff *skb; 1021 struct sk_buff *skb;
1017 1022
1018 if (!(local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE)) 1023 if (!(local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
1019 return 0; 1024 return 0;
1020 skb = ieee80211_beacon_get(local_to_hw(local), &sdata->vif, 1025 skb = ieee80211_beacon_get(local_to_hw(local), &sdata->vif);
1021 &control);
1022 if (!skb) 1026 if (!skb)
1023 return -ENOMEM; 1027 return -ENOMEM;
1024 return __ieee80211_if_config(dev, skb, &control); 1028 return __ieee80211_if_config(dev, skb);
1025} 1029}
1026 1030
1027int ieee80211_hw_config(struct ieee80211_local *local) 1031int ieee80211_hw_config(struct ieee80211_local *local)
@@ -1068,56 +1072,84 @@ u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
1068 struct ieee80211_supported_band *sband; 1072 struct ieee80211_supported_band *sband;
1069 struct ieee80211_ht_info ht_conf; 1073 struct ieee80211_ht_info ht_conf;
1070 struct ieee80211_ht_bss_info ht_bss_conf; 1074 struct ieee80211_ht_bss_info ht_bss_conf;
1071 int i;
1072 u32 changed = 0; 1075 u32 changed = 0;
1076 int i;
1077 u8 max_tx_streams = IEEE80211_HT_CAP_MAX_STREAMS;
1078 u8 tx_mcs_set_cap;
1073 1079
1074 sband = local->hw.wiphy->bands[conf->channel->band]; 1080 sband = local->hw.wiphy->bands[conf->channel->band];
1075 1081
1082 memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info));
1083 memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info));
1084
1076 /* HT is not supported */ 1085 /* HT is not supported */
1077 if (!sband->ht_info.ht_supported) { 1086 if (!sband->ht_info.ht_supported) {
1078 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE; 1087 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
1079 return 0; 1088 goto out;
1080 } 1089 }
1081 1090
1082 memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info)); 1091 /* disable HT */
1083 memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info)); 1092 if (!enable_ht) {
1084 1093 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)
1085 if (enable_ht) {
1086 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE))
1087 changed |= BSS_CHANGED_HT; 1094 changed |= BSS_CHANGED_HT;
1095 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
1096 conf->ht_conf.ht_supported = 0;
1097 goto out;
1098 }
1088 1099
1089 conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
1090 ht_conf.ht_supported = 1;
1091 1100
1092 ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap; 1101 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE))
1093 ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS); 1102 changed |= BSS_CHANGED_HT;
1094 ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
1095 1103
1096 for (i = 0; i < SUPP_MCS_SET_LEN; i++) 1104 conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
1097 ht_conf.supp_mcs_set[i] = 1105 ht_conf.ht_supported = 1;
1098 sband->ht_info.supp_mcs_set[i] &
1099 req_ht_cap->supp_mcs_set[i];
1100 1106
1101 ht_bss_conf.primary_channel = req_bss_cap->primary_channel; 1107 ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
1102 ht_bss_conf.bss_cap = req_bss_cap->bss_cap; 1108 ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS);
1103 ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode; 1109 ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
1110 ht_bss_conf.primary_channel = req_bss_cap->primary_channel;
1111 ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
1112 ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
1104 1113
1105 ht_conf.ampdu_factor = req_ht_cap->ampdu_factor; 1114 ht_conf.ampdu_factor = req_ht_cap->ampdu_factor;
1106 ht_conf.ampdu_density = req_ht_cap->ampdu_density; 1115 ht_conf.ampdu_density = req_ht_cap->ampdu_density;
1107 1116
1108 /* if bss configuration changed store the new one */ 1117 /* Bits 96-100 */
1109 if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) || 1118 tx_mcs_set_cap = sband->ht_info.supp_mcs_set[12];
1110 memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) { 1119
1111 changed |= BSS_CHANGED_HT; 1120 /* configure suppoerted Tx MCS according to requested MCS
1112 memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf)); 1121 * (based in most cases on Rx capabilities of peer) and self
1113 memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf)); 1122 * Tx MCS capabilities (as defined by low level driver HW
1114 } 1123 * Tx capabilities) */
1115 } else { 1124 if (!(tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_DEFINED))
1116 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) 1125 goto check_changed;
1117 changed |= BSS_CHANGED_HT;
1118 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
1119 }
1120 1126
1127 /* Counting from 0 therfore + 1 */
1128 if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_RX_DIFF)
1129 max_tx_streams = ((tx_mcs_set_cap &
1130 IEEE80211_HT_CAP_MCS_TX_STREAMS) >> 2) + 1;
1131
1132 for (i = 0; i < max_tx_streams; i++)
1133 ht_conf.supp_mcs_set[i] =
1134 sband->ht_info.supp_mcs_set[i] &
1135 req_ht_cap->supp_mcs_set[i];
1136
1137 if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_UEQM)
1138 for (i = IEEE80211_SUPP_MCS_SET_UEQM;
1139 i < IEEE80211_SUPP_MCS_SET_LEN; i++)
1140 ht_conf.supp_mcs_set[i] =
1141 sband->ht_info.supp_mcs_set[i] &
1142 req_ht_cap->supp_mcs_set[i];
1143
1144check_changed:
1145 /* if bss configuration changed store the new one */
1146 if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) ||
1147 memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) {
1148 changed |= BSS_CHANGED_HT;
1149 memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf));
1150 memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf));
1151 }
1152out:
1121 return changed; 1153 return changed;
1122} 1154}
1123 1155
@@ -1148,38 +1180,20 @@ void ieee80211_reset_erp_info(struct net_device *dev)
1148} 1180}
1149 1181
1150void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, 1182void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
1151 struct sk_buff *skb, 1183 struct sk_buff *skb)
1152 struct ieee80211_tx_status *status)
1153{ 1184{
1154 struct ieee80211_local *local = hw_to_local(hw); 1185 struct ieee80211_local *local = hw_to_local(hw);
1155 struct ieee80211_tx_status *saved; 1186 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1156 int tmp; 1187 int tmp;
1157 1188
1158 skb->dev = local->mdev; 1189 skb->dev = local->mdev;
1159 saved = kmalloc(sizeof(struct ieee80211_tx_status), GFP_ATOMIC);
1160 if (unlikely(!saved)) {
1161 if (net_ratelimit())
1162 printk(KERN_WARNING "%s: Not enough memory, "
1163 "dropping tx status", skb->dev->name);
1164 /* should be dev_kfree_skb_irq, but due to this function being
1165 * named _irqsafe instead of just _irq we can't be sure that
1166 * people won't call it from non-irq contexts */
1167 dev_kfree_skb_any(skb);
1168 return;
1169 }
1170 memcpy(saved, status, sizeof(struct ieee80211_tx_status));
1171 /* copy pointer to saved status into skb->cb for use by tasklet */
1172 memcpy(skb->cb, &saved, sizeof(saved));
1173
1174 skb->pkt_type = IEEE80211_TX_STATUS_MSG; 1190 skb->pkt_type = IEEE80211_TX_STATUS_MSG;
1175 skb_queue_tail(status->control.flags & IEEE80211_TXCTL_REQ_TX_STATUS ? 1191 skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
1176 &local->skb_queue : &local->skb_queue_unreliable, skb); 1192 &local->skb_queue : &local->skb_queue_unreliable, skb);
1177 tmp = skb_queue_len(&local->skb_queue) + 1193 tmp = skb_queue_len(&local->skb_queue) +
1178 skb_queue_len(&local->skb_queue_unreliable); 1194 skb_queue_len(&local->skb_queue_unreliable);
1179 while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT && 1195 while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
1180 (skb = skb_dequeue(&local->skb_queue_unreliable))) { 1196 (skb = skb_dequeue(&local->skb_queue_unreliable))) {
1181 memcpy(&saved, skb->cb, sizeof(saved));
1182 kfree(saved);
1183 dev_kfree_skb_irq(skb); 1197 dev_kfree_skb_irq(skb);
1184 tmp--; 1198 tmp--;
1185 I802_DEBUG_INC(local->tx_status_drop); 1199 I802_DEBUG_INC(local->tx_status_drop);
@@ -1193,7 +1207,6 @@ static void ieee80211_tasklet_handler(unsigned long data)
1193 struct ieee80211_local *local = (struct ieee80211_local *) data; 1207 struct ieee80211_local *local = (struct ieee80211_local *) data;
1194 struct sk_buff *skb; 1208 struct sk_buff *skb;
1195 struct ieee80211_rx_status rx_status; 1209 struct ieee80211_rx_status rx_status;
1196 struct ieee80211_tx_status *tx_status;
1197 struct ieee80211_ra_tid *ra_tid; 1210 struct ieee80211_ra_tid *ra_tid;
1198 1211
1199 while ((skb = skb_dequeue(&local->skb_queue)) || 1212 while ((skb = skb_dequeue(&local->skb_queue)) ||
@@ -1208,12 +1221,8 @@ static void ieee80211_tasklet_handler(unsigned long data)
1208 __ieee80211_rx(local_to_hw(local), skb, &rx_status); 1221 __ieee80211_rx(local_to_hw(local), skb, &rx_status);
1209 break; 1222 break;
1210 case IEEE80211_TX_STATUS_MSG: 1223 case IEEE80211_TX_STATUS_MSG:
1211 /* get pointer to saved status out of skb->cb */
1212 memcpy(&tx_status, skb->cb, sizeof(tx_status));
1213 skb->pkt_type = 0; 1224 skb->pkt_type = 0;
1214 ieee80211_tx_status(local_to_hw(local), 1225 ieee80211_tx_status(local_to_hw(local), skb);
1215 skb, tx_status);
1216 kfree(tx_status);
1217 break; 1226 break;
1218 case IEEE80211_DELBA_MSG: 1227 case IEEE80211_DELBA_MSG:
1219 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 1228 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
@@ -1242,24 +1251,15 @@ static void ieee80211_tasklet_handler(unsigned long data)
1242 * Also, tx_packet_data in cb is restored from tx_control. */ 1251 * Also, tx_packet_data in cb is restored from tx_control. */
1243static void ieee80211_remove_tx_extra(struct ieee80211_local *local, 1252static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
1244 struct ieee80211_key *key, 1253 struct ieee80211_key *key,
1245 struct sk_buff *skb, 1254 struct sk_buff *skb)
1246 struct ieee80211_tx_control *control)
1247{ 1255{
1248 int hdrlen, iv_len, mic_len; 1256 int hdrlen, iv_len, mic_len;
1249 struct ieee80211_tx_packet_data *pkt_data; 1257 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1250 1258
1251 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; 1259 info->flags &= IEEE80211_TX_CTL_REQ_TX_STATUS |
1252 pkt_data->ifindex = vif_to_sdata(control->vif)->dev->ifindex; 1260 IEEE80211_TX_CTL_DO_NOT_ENCRYPT |
1253 pkt_data->flags = 0; 1261 IEEE80211_TX_CTL_REQUEUE |
1254 if (control->flags & IEEE80211_TXCTL_REQ_TX_STATUS) 1262 IEEE80211_TX_CTL_EAPOL_FRAME;
1255 pkt_data->flags |= IEEE80211_TXPD_REQ_TX_STATUS;
1256 if (control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)
1257 pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT;
1258 if (control->flags & IEEE80211_TXCTL_REQUEUE)
1259 pkt_data->flags |= IEEE80211_TXPD_REQUEUE;
1260 if (control->flags & IEEE80211_TXCTL_EAPOL_FRAME)
1261 pkt_data->flags |= IEEE80211_TXPD_EAPOL_FRAME;
1262 pkt_data->queue = control->queue;
1263 1263
1264 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1264 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1265 1265
@@ -1306,9 +1306,10 @@ no_key:
1306 1306
1307static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, 1307static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
1308 struct sta_info *sta, 1308 struct sta_info *sta,
1309 struct sk_buff *skb, 1309 struct sk_buff *skb)
1310 struct ieee80211_tx_status *status)
1311{ 1310{
1311 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1312
1312 sta->tx_filtered_count++; 1313 sta->tx_filtered_count++;
1313 1314
1314 /* 1315 /*
@@ -1316,7 +1317,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
1316 * packet. If the STA went to power save mode, this will happen 1317 * packet. If the STA went to power save mode, this will happen
1317 * when it wakes up for the next time. 1318 * when it wakes up for the next time.
1318 */ 1319 */
1319 sta->flags |= WLAN_STA_CLEAR_PS_FILT; 1320 set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
1320 1321
1321 /* 1322 /*
1322 * This code races in the following way: 1323 * This code races in the following way:
@@ -1348,20 +1349,18 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
1348 * can be unknown, for example with different interrupt status 1349 * can be unknown, for example with different interrupt status
1349 * bits. 1350 * bits.
1350 */ 1351 */
1351 if (sta->flags & WLAN_STA_PS && 1352 if (test_sta_flags(sta, WLAN_STA_PS) &&
1352 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { 1353 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
1353 ieee80211_remove_tx_extra(local, sta->key, skb, 1354 ieee80211_remove_tx_extra(local, sta->key, skb);
1354 &status->control);
1355 skb_queue_tail(&sta->tx_filtered, skb); 1355 skb_queue_tail(&sta->tx_filtered, skb);
1356 return; 1356 return;
1357 } 1357 }
1358 1358
1359 if (!(sta->flags & WLAN_STA_PS) && 1359 if (!test_sta_flags(sta, WLAN_STA_PS) &&
1360 !(status->control.flags & IEEE80211_TXCTL_REQUEUE)) { 1360 !(info->flags & IEEE80211_TX_CTL_REQUEUE)) {
1361 /* Software retry the packet once */ 1361 /* Software retry the packet once */
1362 status->control.flags |= IEEE80211_TXCTL_REQUEUE; 1362 info->flags |= IEEE80211_TX_CTL_REQUEUE;
1363 ieee80211_remove_tx_extra(local, sta->key, skb, 1363 ieee80211_remove_tx_extra(local, sta->key, skb);
1364 &status->control);
1365 dev_queue_xmit(skb); 1364 dev_queue_xmit(skb);
1366 return; 1365 return;
1367 } 1366 }
@@ -1371,61 +1370,49 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
1371 "queue_len=%d PS=%d @%lu\n", 1370 "queue_len=%d PS=%d @%lu\n",
1372 wiphy_name(local->hw.wiphy), 1371 wiphy_name(local->hw.wiphy),
1373 skb_queue_len(&sta->tx_filtered), 1372 skb_queue_len(&sta->tx_filtered),
1374 !!(sta->flags & WLAN_STA_PS), jiffies); 1373 !!test_sta_flags(sta, WLAN_STA_PS), jiffies);
1375 dev_kfree_skb(skb); 1374 dev_kfree_skb(skb);
1376} 1375}
1377 1376
1378void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, 1377void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
1379 struct ieee80211_tx_status *status)
1380{ 1378{
1381 struct sk_buff *skb2; 1379 struct sk_buff *skb2;
1382 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1380 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1383 struct ieee80211_local *local = hw_to_local(hw); 1381 struct ieee80211_local *local = hw_to_local(hw);
1382 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1384 u16 frag, type; 1383 u16 frag, type;
1385 struct ieee80211_tx_status_rtap_hdr *rthdr; 1384 struct ieee80211_tx_status_rtap_hdr *rthdr;
1386 struct ieee80211_sub_if_data *sdata; 1385 struct ieee80211_sub_if_data *sdata;
1387 struct net_device *prev_dev = NULL; 1386 struct net_device *prev_dev = NULL;
1388 1387
1389 if (!status) {
1390 printk(KERN_ERR
1391 "%s: ieee80211_tx_status called with NULL status\n",
1392 wiphy_name(local->hw.wiphy));
1393 dev_kfree_skb(skb);
1394 return;
1395 }
1396
1397 rcu_read_lock(); 1388 rcu_read_lock();
1398 1389
1399 if (status->excessive_retries) { 1390 if (info->status.excessive_retries) {
1400 struct sta_info *sta; 1391 struct sta_info *sta;
1401 sta = sta_info_get(local, hdr->addr1); 1392 sta = sta_info_get(local, hdr->addr1);
1402 if (sta) { 1393 if (sta) {
1403 if (sta->flags & WLAN_STA_PS) { 1394 if (test_sta_flags(sta, WLAN_STA_PS)) {
1404 /* 1395 /*
1405 * The STA is in power save mode, so assume 1396 * The STA is in power save mode, so assume
1406 * that this TX packet failed because of that. 1397 * that this TX packet failed because of that.
1407 */ 1398 */
1408 status->excessive_retries = 0; 1399 ieee80211_handle_filtered_frame(local, sta, skb);
1409 status->flags |= IEEE80211_TX_STATUS_TX_FILTERED;
1410 ieee80211_handle_filtered_frame(local, sta,
1411 skb, status);
1412 rcu_read_unlock(); 1400 rcu_read_unlock();
1413 return; 1401 return;
1414 } 1402 }
1415 } 1403 }
1416 } 1404 }
1417 1405
1418 if (status->flags & IEEE80211_TX_STATUS_TX_FILTERED) { 1406 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
1419 struct sta_info *sta; 1407 struct sta_info *sta;
1420 sta = sta_info_get(local, hdr->addr1); 1408 sta = sta_info_get(local, hdr->addr1);
1421 if (sta) { 1409 if (sta) {
1422 ieee80211_handle_filtered_frame(local, sta, skb, 1410 ieee80211_handle_filtered_frame(local, sta, skb);
1423 status);
1424 rcu_read_unlock(); 1411 rcu_read_unlock();
1425 return; 1412 return;
1426 } 1413 }
1427 } else 1414 } else
1428 rate_control_tx_status(local->mdev, skb, status); 1415 rate_control_tx_status(local->mdev, skb);
1429 1416
1430 rcu_read_unlock(); 1417 rcu_read_unlock();
1431 1418
@@ -1439,14 +1426,14 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
1439 frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 1426 frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1440 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE; 1427 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
1441 1428
1442 if (status->flags & IEEE80211_TX_STATUS_ACK) { 1429 if (info->flags & IEEE80211_TX_STAT_ACK) {
1443 if (frag == 0) { 1430 if (frag == 0) {
1444 local->dot11TransmittedFrameCount++; 1431 local->dot11TransmittedFrameCount++;
1445 if (is_multicast_ether_addr(hdr->addr1)) 1432 if (is_multicast_ether_addr(hdr->addr1))
1446 local->dot11MulticastTransmittedFrameCount++; 1433 local->dot11MulticastTransmittedFrameCount++;
1447 if (status->retry_count > 0) 1434 if (info->status.retry_count > 0)
1448 local->dot11RetryCount++; 1435 local->dot11RetryCount++;
1449 if (status->retry_count > 1) 1436 if (info->status.retry_count > 1)
1450 local->dot11MultipleRetryCount++; 1437 local->dot11MultipleRetryCount++;
1451 } 1438 }
1452 1439
@@ -1483,7 +1470,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
1483 return; 1470 return;
1484 } 1471 }
1485 1472
1486 rthdr = (struct ieee80211_tx_status_rtap_hdr*) 1473 rthdr = (struct ieee80211_tx_status_rtap_hdr *)
1487 skb_push(skb, sizeof(*rthdr)); 1474 skb_push(skb, sizeof(*rthdr));
1488 1475
1489 memset(rthdr, 0, sizeof(*rthdr)); 1476 memset(rthdr, 0, sizeof(*rthdr));
@@ -1492,17 +1479,17 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
1492 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) | 1479 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
1493 (1 << IEEE80211_RADIOTAP_DATA_RETRIES)); 1480 (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
1494 1481
1495 if (!(status->flags & IEEE80211_TX_STATUS_ACK) && 1482 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
1496 !is_multicast_ether_addr(hdr->addr1)) 1483 !is_multicast_ether_addr(hdr->addr1))
1497 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL); 1484 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
1498 1485
1499 if ((status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS) && 1486 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) &&
1500 (status->control.flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) 1487 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT))
1501 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS); 1488 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
1502 else if (status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS) 1489 else if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
1503 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS); 1490 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
1504 1491
1505 rthdr->data_retries = status->retry_count; 1492 rthdr->data_retries = info->status.retry_count;
1506 1493
1507 /* XXX: is this sufficient for BPF? */ 1494 /* XXX: is this sufficient for BPF? */
1508 skb_set_mac_header(skb, 0); 1495 skb_set_mac_header(skb, 0);
@@ -1652,12 +1639,32 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1652 if (result < 0) 1639 if (result < 0)
1653 return result; 1640 return result;
1654 1641
1642 /*
1643 * We use the number of queues for feature tests (QoS, HT) internally
1644 * so restrict them appropriately.
1645 */
1646#ifdef CONFIG_MAC80211_QOS
1647 if (hw->queues > IEEE80211_MAX_QUEUES)
1648 hw->queues = IEEE80211_MAX_QUEUES;
1649 if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES)
1650 hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES;
1651 if (hw->queues < 4)
1652 hw->ampdu_queues = 0;
1653#else
1654 hw->queues = 1;
1655 hw->ampdu_queues = 0;
1656#endif
1657
1655 /* for now, mdev needs sub_if_data :/ */ 1658 /* for now, mdev needs sub_if_data :/ */
1656 mdev = alloc_netdev(sizeof(struct ieee80211_sub_if_data), 1659 mdev = alloc_netdev_mq(sizeof(struct ieee80211_sub_if_data),
1657 "wmaster%d", ether_setup); 1660 "wmaster%d", ether_setup,
1661 ieee80211_num_queues(hw));
1658 if (!mdev) 1662 if (!mdev)
1659 goto fail_mdev_alloc; 1663 goto fail_mdev_alloc;
1660 1664
1665 if (ieee80211_num_queues(hw) > 1)
1666 mdev->features |= NETIF_F_MULTI_QUEUE;
1667
1661 sdata = IEEE80211_DEV_TO_SUB_IF(mdev); 1668 sdata = IEEE80211_DEV_TO_SUB_IF(mdev);
1662 mdev->ieee80211_ptr = &sdata->wdev; 1669 mdev->ieee80211_ptr = &sdata->wdev;
1663 sdata->wdev.wiphy = local->hw.wiphy; 1670 sdata->wdev.wiphy = local->hw.wiphy;
@@ -1702,13 +1709,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1702 1709
1703 local->hw.conf.beacon_int = 1000; 1710 local->hw.conf.beacon_int = 1000;
1704 1711
1705 local->wstats_flags |= local->hw.max_rssi ? 1712 local->wstats_flags |= local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC |
1706 IW_QUAL_LEVEL_UPDATED : IW_QUAL_LEVEL_INVALID; 1713 IEEE80211_HW_SIGNAL_DB |
1707 local->wstats_flags |= local->hw.max_signal ? 1714 IEEE80211_HW_SIGNAL_DBM) ?
1708 IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID; 1715 IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID;
1709 local->wstats_flags |= local->hw.max_noise ? 1716 local->wstats_flags |= local->hw.flags & IEEE80211_HW_NOISE_DBM ?
1710 IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID; 1717 IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID;
1711 if (local->hw.max_rssi < 0 || local->hw.max_noise < 0) 1718 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
1712 local->wstats_flags |= IW_QUAL_DBM; 1719 local->wstats_flags |= IW_QUAL_DBM;
1713 1720
1714 result = sta_info_start(local); 1721 result = sta_info_start(local);
@@ -1858,7 +1865,9 @@ static int __init ieee80211_init(void)
1858 struct sk_buff *skb; 1865 struct sk_buff *skb;
1859 int ret; 1866 int ret;
1860 1867
1861 BUILD_BUG_ON(sizeof(struct ieee80211_tx_packet_data) > sizeof(skb->cb)); 1868 BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb));
1869 BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) +
1870 IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb));
1862 1871
1863 ret = rc80211_pid_init(); 1872 ret = rc80211_pid_init();
1864 if (ret) 1873 if (ret)
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 697ef67f96b6..b5933b271491 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -315,6 +315,13 @@ struct mesh_table *mesh_table_alloc(int size_order)
315 return newtbl; 315 return newtbl;
316} 316}
317 317
318static void __mesh_table_free(struct mesh_table *tbl)
319{
320 kfree(tbl->hash_buckets);
321 kfree(tbl->hashwlock);
322 kfree(tbl);
323}
324
318void mesh_table_free(struct mesh_table *tbl, bool free_leafs) 325void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
319{ 326{
320 struct hlist_head *mesh_hash; 327 struct hlist_head *mesh_hash;
@@ -330,9 +337,7 @@ void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
330 } 337 }
331 spin_unlock(&tbl->hashwlock[i]); 338 spin_unlock(&tbl->hashwlock[i]);
332 } 339 }
333 kfree(tbl->hash_buckets); 340 __mesh_table_free(tbl);
334 kfree(tbl->hashwlock);
335 kfree(tbl);
336} 341}
337 342
338static void ieee80211_mesh_path_timer(unsigned long data) 343static void ieee80211_mesh_path_timer(unsigned long data)
@@ -349,21 +354,16 @@ struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
349{ 354{
350 struct mesh_table *newtbl; 355 struct mesh_table *newtbl;
351 struct hlist_head *oldhash; 356 struct hlist_head *oldhash;
352 struct hlist_node *p; 357 struct hlist_node *p, *q;
353 int err = 0;
354 int i; 358 int i;
355 359
356 if (atomic_read(&tbl->entries) 360 if (atomic_read(&tbl->entries)
357 < tbl->mean_chain_len * (tbl->hash_mask + 1)) { 361 < tbl->mean_chain_len * (tbl->hash_mask + 1))
358 err = -EPERM;
359 goto endgrow; 362 goto endgrow;
360 }
361 363
362 newtbl = mesh_table_alloc(tbl->size_order + 1); 364 newtbl = mesh_table_alloc(tbl->size_order + 1);
363 if (!newtbl) { 365 if (!newtbl)
364 err = -ENOMEM;
365 goto endgrow; 366 goto endgrow;
366 }
367 367
368 newtbl->free_node = tbl->free_node; 368 newtbl->free_node = tbl->free_node;
369 newtbl->mean_chain_len = tbl->mean_chain_len; 369 newtbl->mean_chain_len = tbl->mean_chain_len;
@@ -373,13 +373,19 @@ struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
373 oldhash = tbl->hash_buckets; 373 oldhash = tbl->hash_buckets;
374 for (i = 0; i <= tbl->hash_mask; i++) 374 for (i = 0; i <= tbl->hash_mask; i++)
375 hlist_for_each(p, &oldhash[i]) 375 hlist_for_each(p, &oldhash[i])
376 tbl->copy_node(p, newtbl); 376 if (tbl->copy_node(p, newtbl) < 0)
377 goto errcopy;
377 378
379 return newtbl;
380
381errcopy:
382 for (i = 0; i <= newtbl->hash_mask; i++) {
383 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
384 tbl->free_node(p, 0);
385 }
386 __mesh_table_free(tbl);
378endgrow: 387endgrow:
379 if (err) 388 return NULL;
380 return NULL;
381 else
382 return newtbl;
383} 389}
384 390
385/** 391/**
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 2e161f6d8288..669eafafe497 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -109,7 +109,7 @@ struct mesh_table {
109 __u32 hash_rnd; /* Used for hash generation */ 109 __u32 hash_rnd; /* Used for hash generation */
110 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ 110 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
111 void (*free_node) (struct hlist_node *p, bool free_leafs); 111 void (*free_node) (struct hlist_node *p, bool free_leafs);
112 void (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); 112 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
113 int size_order; 113 int size_order;
114 int mean_chain_len; 114 int mean_chain_len;
115}; 115};
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index af0cd1e3e213..7fa149e230e6 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -26,7 +26,7 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
26{ 26{
27 if (ae) 27 if (ae)
28 offset += 6; 28 offset += 6;
29 return le32_to_cpu(get_unaligned((__le32 *) (preq_elem + offset))); 29 return get_unaligned_le32(preq_elem + offset);
30} 30}
31 31
32/* HWMP IE processing macros */ 32/* HWMP IE processing macros */
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 99c2d360888e..947b13b40726 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -158,19 +158,14 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
158 if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) 158 if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0)
159 return -ENOSPC; 159 return -ENOSPC;
160 160
161 err = -ENOMEM;
161 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); 162 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
162 if (!new_mpath) { 163 if (!new_mpath)
163 atomic_dec(&sdata->u.sta.mpaths); 164 goto err_path_alloc;
164 err = -ENOMEM; 165
165 goto endadd2;
166 }
167 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); 166 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
168 if (!new_node) { 167 if (!new_node)
169 kfree(new_mpath); 168 goto err_node_alloc;
170 atomic_dec(&sdata->u.sta.mpaths);
171 err = -ENOMEM;
172 goto endadd2;
173 }
174 169
175 read_lock(&pathtbl_resize_lock); 170 read_lock(&pathtbl_resize_lock);
176 memcpy(new_mpath->dst, dst, ETH_ALEN); 171 memcpy(new_mpath->dst, dst, ETH_ALEN);
@@ -189,16 +184,11 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
189 184
190 spin_lock(&mesh_paths->hashwlock[hash_idx]); 185 spin_lock(&mesh_paths->hashwlock[hash_idx]);
191 186
187 err = -EEXIST;
192 hlist_for_each_entry(node, n, bucket, list) { 188 hlist_for_each_entry(node, n, bucket, list) {
193 mpath = node->mpath; 189 mpath = node->mpath;
194 if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) 190 if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
195 == 0) { 191 goto err_exists;
196 err = -EEXIST;
197 atomic_dec(&sdata->u.sta.mpaths);
198 kfree(new_node);
199 kfree(new_mpath);
200 goto endadd;
201 }
202 } 192 }
203 193
204 hlist_add_head_rcu(&new_node->list, bucket); 194 hlist_add_head_rcu(&new_node->list, bucket);
@@ -206,10 +196,9 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
206 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) 196 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
207 grow = 1; 197 grow = 1;
208 198
209endadd:
210 spin_unlock(&mesh_paths->hashwlock[hash_idx]); 199 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
211 read_unlock(&pathtbl_resize_lock); 200 read_unlock(&pathtbl_resize_lock);
212 if (!err && grow) { 201 if (grow) {
213 struct mesh_table *oldtbl, *newtbl; 202 struct mesh_table *oldtbl, *newtbl;
214 203
215 write_lock(&pathtbl_resize_lock); 204 write_lock(&pathtbl_resize_lock);
@@ -217,7 +206,7 @@ endadd:
217 newtbl = mesh_table_grow(mesh_paths); 206 newtbl = mesh_table_grow(mesh_paths);
218 if (!newtbl) { 207 if (!newtbl) {
219 write_unlock(&pathtbl_resize_lock); 208 write_unlock(&pathtbl_resize_lock);
220 return -ENOMEM; 209 return 0;
221 } 210 }
222 rcu_assign_pointer(mesh_paths, newtbl); 211 rcu_assign_pointer(mesh_paths, newtbl);
223 write_unlock(&pathtbl_resize_lock); 212 write_unlock(&pathtbl_resize_lock);
@@ -225,7 +214,16 @@ endadd:
225 synchronize_rcu(); 214 synchronize_rcu();
226 mesh_table_free(oldtbl, false); 215 mesh_table_free(oldtbl, false);
227 } 216 }
228endadd2: 217 return 0;
218
219err_exists:
220 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
221 read_unlock(&pathtbl_resize_lock);
222 kfree(new_node);
223err_node_alloc:
224 kfree(new_mpath);
225err_path_alloc:
226 atomic_dec(&sdata->u.sta.mpaths);
229 return err; 227 return err;
230} 228}
231 229
@@ -460,25 +458,28 @@ static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
460 struct mpath_node *node = hlist_entry(p, struct mpath_node, list); 458 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
461 mpath = node->mpath; 459 mpath = node->mpath;
462 hlist_del_rcu(p); 460 hlist_del_rcu(p);
463 synchronize_rcu();
464 if (free_leafs) 461 if (free_leafs)
465 kfree(mpath); 462 kfree(mpath);
466 kfree(node); 463 kfree(node);
467} 464}
468 465
469static void mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) 466static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
470{ 467{
471 struct mesh_path *mpath; 468 struct mesh_path *mpath;
472 struct mpath_node *node, *new_node; 469 struct mpath_node *node, *new_node;
473 u32 hash_idx; 470 u32 hash_idx;
474 471
472 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
473 if (new_node == NULL)
474 return -ENOMEM;
475
475 node = hlist_entry(p, struct mpath_node, list); 476 node = hlist_entry(p, struct mpath_node, list);
476 mpath = node->mpath; 477 mpath = node->mpath;
477 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
478 new_node->mpath = mpath; 478 new_node->mpath = mpath;
479 hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl); 479 hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl);
480 hlist_add_head(&new_node->list, 480 hlist_add_head(&new_node->list,
481 &newtbl->hash_buckets[hash_idx]); 481 &newtbl->hash_buckets[hash_idx]);
482 return 0;
482} 483}
483 484
484int mesh_pathtbl_init(void) 485int mesh_pathtbl_init(void)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 37f0c2b94ae7..9efeb1f07025 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -79,7 +79,7 @@ void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
79 * 79 *
80 * @sta: mes peer link to restart 80 * @sta: mes peer link to restart
81 * 81 *
82 * Locking: this function must be called holding sta->plink_lock 82 * Locking: this function must be called holding sta->lock
83 */ 83 */
84static inline void mesh_plink_fsm_restart(struct sta_info *sta) 84static inline void mesh_plink_fsm_restart(struct sta_info *sta)
85{ 85{
@@ -105,7 +105,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
105 if (!sta) 105 if (!sta)
106 return NULL; 106 return NULL;
107 107
108 sta->flags |= WLAN_STA_AUTHORIZED; 108 sta->flags = WLAN_STA_AUTHORIZED;
109 sta->supp_rates[local->hw.conf.channel->band] = rates; 109 sta->supp_rates[local->hw.conf.channel->band] = rates;
110 110
111 return sta; 111 return sta;
@@ -118,7 +118,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
118 * 118 *
119 * All mesh paths with this peer as next hop will be flushed 119 * All mesh paths with this peer as next hop will be flushed
120 * 120 *
121 * Locking: the caller must hold sta->plink_lock 121 * Locking: the caller must hold sta->lock
122 */ 122 */
123static void __mesh_plink_deactivate(struct sta_info *sta) 123static void __mesh_plink_deactivate(struct sta_info *sta)
124{ 124{
@@ -139,9 +139,9 @@ static void __mesh_plink_deactivate(struct sta_info *sta)
139 */ 139 */
140void mesh_plink_deactivate(struct sta_info *sta) 140void mesh_plink_deactivate(struct sta_info *sta)
141{ 141{
142 spin_lock_bh(&sta->plink_lock); 142 spin_lock_bh(&sta->lock);
143 __mesh_plink_deactivate(sta); 143 __mesh_plink_deactivate(sta);
144 spin_unlock_bh(&sta->plink_lock); 144 spin_unlock_bh(&sta->lock);
145} 145}
146 146
147static int mesh_plink_frame_tx(struct net_device *dev, 147static int mesh_plink_frame_tx(struct net_device *dev,
@@ -270,10 +270,10 @@ static void mesh_plink_timer(unsigned long data)
270 */ 270 */
271 sta = (struct sta_info *) data; 271 sta = (struct sta_info *) data;
272 272
273 spin_lock_bh(&sta->plink_lock); 273 spin_lock_bh(&sta->lock);
274 if (sta->ignore_plink_timer) { 274 if (sta->ignore_plink_timer) {
275 sta->ignore_plink_timer = false; 275 sta->ignore_plink_timer = false;
276 spin_unlock_bh(&sta->plink_lock); 276 spin_unlock_bh(&sta->lock);
277 return; 277 return;
278 } 278 }
279 mpl_dbg("Mesh plink timer for %s fired on state %d\n", 279 mpl_dbg("Mesh plink timer for %s fired on state %d\n",
@@ -298,7 +298,7 @@ static void mesh_plink_timer(unsigned long data)
298 rand % sta->plink_timeout; 298 rand % sta->plink_timeout;
299 ++sta->plink_retries; 299 ++sta->plink_retries;
300 mod_plink_timer(sta, sta->plink_timeout); 300 mod_plink_timer(sta, sta->plink_timeout);
301 spin_unlock_bh(&sta->plink_lock); 301 spin_unlock_bh(&sta->lock);
302 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, 302 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid,
303 0, 0); 303 0, 0);
304 break; 304 break;
@@ -311,7 +311,7 @@ static void mesh_plink_timer(unsigned long data)
311 reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT); 311 reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT);
312 sta->plink_state = PLINK_HOLDING; 312 sta->plink_state = PLINK_HOLDING;
313 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 313 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
314 spin_unlock_bh(&sta->plink_lock); 314 spin_unlock_bh(&sta->lock);
315 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid, 315 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid,
316 reason); 316 reason);
317 break; 317 break;
@@ -319,10 +319,10 @@ static void mesh_plink_timer(unsigned long data)
319 /* holding timer */ 319 /* holding timer */
320 del_timer(&sta->plink_timer); 320 del_timer(&sta->plink_timer);
321 mesh_plink_fsm_restart(sta); 321 mesh_plink_fsm_restart(sta);
322 spin_unlock_bh(&sta->plink_lock); 322 spin_unlock_bh(&sta->lock);
323 break; 323 break;
324 default: 324 default:
325 spin_unlock_bh(&sta->plink_lock); 325 spin_unlock_bh(&sta->lock);
326 break; 326 break;
327 } 327 }
328} 328}
@@ -344,16 +344,16 @@ int mesh_plink_open(struct sta_info *sta)
344 DECLARE_MAC_BUF(mac); 344 DECLARE_MAC_BUF(mac);
345#endif 345#endif
346 346
347 spin_lock_bh(&sta->plink_lock); 347 spin_lock_bh(&sta->lock);
348 get_random_bytes(&llid, 2); 348 get_random_bytes(&llid, 2);
349 sta->llid = llid; 349 sta->llid = llid;
350 if (sta->plink_state != PLINK_LISTEN) { 350 if (sta->plink_state != PLINK_LISTEN) {
351 spin_unlock_bh(&sta->plink_lock); 351 spin_unlock_bh(&sta->lock);
352 return -EBUSY; 352 return -EBUSY;
353 } 353 }
354 sta->plink_state = PLINK_OPN_SNT; 354 sta->plink_state = PLINK_OPN_SNT;
355 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 355 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
356 spin_unlock_bh(&sta->plink_lock); 356 spin_unlock_bh(&sta->lock);
357 mpl_dbg("Mesh plink: starting establishment with %s\n", 357 mpl_dbg("Mesh plink: starting establishment with %s\n",
358 print_mac(mac, sta->addr)); 358 print_mac(mac, sta->addr));
359 359
@@ -367,10 +367,10 @@ void mesh_plink_block(struct sta_info *sta)
367 DECLARE_MAC_BUF(mac); 367 DECLARE_MAC_BUF(mac);
368#endif 368#endif
369 369
370 spin_lock_bh(&sta->plink_lock); 370 spin_lock_bh(&sta->lock);
371 __mesh_plink_deactivate(sta); 371 __mesh_plink_deactivate(sta);
372 sta->plink_state = PLINK_BLOCKED; 372 sta->plink_state = PLINK_BLOCKED;
373 spin_unlock_bh(&sta->plink_lock); 373 spin_unlock_bh(&sta->lock);
374} 374}
375 375
376int mesh_plink_close(struct sta_info *sta) 376int mesh_plink_close(struct sta_info *sta)
@@ -383,14 +383,14 @@ int mesh_plink_close(struct sta_info *sta)
383 383
384 mpl_dbg("Mesh plink: closing link with %s\n", 384 mpl_dbg("Mesh plink: closing link with %s\n",
385 print_mac(mac, sta->addr)); 385 print_mac(mac, sta->addr));
386 spin_lock_bh(&sta->plink_lock); 386 spin_lock_bh(&sta->lock);
387 sta->reason = cpu_to_le16(MESH_LINK_CANCELLED); 387 sta->reason = cpu_to_le16(MESH_LINK_CANCELLED);
388 reason = sta->reason; 388 reason = sta->reason;
389 389
390 if (sta->plink_state == PLINK_LISTEN || 390 if (sta->plink_state == PLINK_LISTEN ||
391 sta->plink_state == PLINK_BLOCKED) { 391 sta->plink_state == PLINK_BLOCKED) {
392 mesh_plink_fsm_restart(sta); 392 mesh_plink_fsm_restart(sta);
393 spin_unlock_bh(&sta->plink_lock); 393 spin_unlock_bh(&sta->lock);
394 return 0; 394 return 0;
395 } else if (sta->plink_state == PLINK_ESTAB) { 395 } else if (sta->plink_state == PLINK_ESTAB) {
396 __mesh_plink_deactivate(sta); 396 __mesh_plink_deactivate(sta);
@@ -402,7 +402,7 @@ int mesh_plink_close(struct sta_info *sta)
402 sta->plink_state = PLINK_HOLDING; 402 sta->plink_state = PLINK_HOLDING;
403 llid = sta->llid; 403 llid = sta->llid;
404 plid = sta->plid; 404 plid = sta->plid;
405 spin_unlock_bh(&sta->plink_lock); 405 spin_unlock_bh(&sta->lock);
406 mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid, 406 mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid,
407 plid, reason); 407 plid, reason);
408 return 0; 408 return 0;
@@ -490,7 +490,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
490 /* avoid warning */ 490 /* avoid warning */
491 break; 491 break;
492 } 492 }
493 spin_lock_bh(&sta->plink_lock); 493 spin_lock_bh(&sta->lock);
494 } else if (!sta) { 494 } else if (!sta) {
495 /* ftype == PLINK_OPEN */ 495 /* ftype == PLINK_OPEN */
496 u64 rates; 496 u64 rates;
@@ -512,9 +512,9 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
512 return; 512 return;
513 } 513 }
514 event = OPN_ACPT; 514 event = OPN_ACPT;
515 spin_lock_bh(&sta->plink_lock); 515 spin_lock_bh(&sta->lock);
516 } else { 516 } else {
517 spin_lock_bh(&sta->plink_lock); 517 spin_lock_bh(&sta->lock);
518 switch (ftype) { 518 switch (ftype) {
519 case PLINK_OPEN: 519 case PLINK_OPEN:
520 if (!mesh_plink_free_count(sdata) || 520 if (!mesh_plink_free_count(sdata) ||
@@ -551,7 +551,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
551 break; 551 break;
552 default: 552 default:
553 mpl_dbg("Mesh plink: unknown frame subtype\n"); 553 mpl_dbg("Mesh plink: unknown frame subtype\n");
554 spin_unlock_bh(&sta->plink_lock); 554 spin_unlock_bh(&sta->lock);
555 rcu_read_unlock(); 555 rcu_read_unlock();
556 return; 556 return;
557 } 557 }
@@ -568,7 +568,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
568 switch (event) { 568 switch (event) {
569 case CLS_ACPT: 569 case CLS_ACPT:
570 mesh_plink_fsm_restart(sta); 570 mesh_plink_fsm_restart(sta);
571 spin_unlock_bh(&sta->plink_lock); 571 spin_unlock_bh(&sta->lock);
572 break; 572 break;
573 case OPN_ACPT: 573 case OPN_ACPT:
574 sta->plink_state = PLINK_OPN_RCVD; 574 sta->plink_state = PLINK_OPN_RCVD;
@@ -576,14 +576,14 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
576 get_random_bytes(&llid, 2); 576 get_random_bytes(&llid, 2);
577 sta->llid = llid; 577 sta->llid = llid;
578 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 578 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
579 spin_unlock_bh(&sta->plink_lock); 579 spin_unlock_bh(&sta->lock);
580 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, 580 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid,
581 0, 0); 581 0, 0);
582 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, 582 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr,
583 llid, plid, 0); 583 llid, plid, 0);
584 break; 584 break;
585 default: 585 default:
586 spin_unlock_bh(&sta->plink_lock); 586 spin_unlock_bh(&sta->lock);
587 break; 587 break;
588 } 588 }
589 break; 589 break;
@@ -603,7 +603,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
603 sta->ignore_plink_timer = true; 603 sta->ignore_plink_timer = true;
604 604
605 llid = sta->llid; 605 llid = sta->llid;
606 spin_unlock_bh(&sta->plink_lock); 606 spin_unlock_bh(&sta->lock);
607 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 607 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
608 plid, reason); 608 plid, reason);
609 break; 609 break;
@@ -612,7 +612,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
612 sta->plink_state = PLINK_OPN_RCVD; 612 sta->plink_state = PLINK_OPN_RCVD;
613 sta->plid = plid; 613 sta->plid = plid;
614 llid = sta->llid; 614 llid = sta->llid;
615 spin_unlock_bh(&sta->plink_lock); 615 spin_unlock_bh(&sta->lock);
616 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 616 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
617 plid, 0); 617 plid, 0);
618 break; 618 break;
@@ -622,10 +622,10 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
622 dot11MeshConfirmTimeout(sdata))) 622 dot11MeshConfirmTimeout(sdata)))
623 sta->ignore_plink_timer = true; 623 sta->ignore_plink_timer = true;
624 624
625 spin_unlock_bh(&sta->plink_lock); 625 spin_unlock_bh(&sta->lock);
626 break; 626 break;
627 default: 627 default:
628 spin_unlock_bh(&sta->plink_lock); 628 spin_unlock_bh(&sta->lock);
629 break; 629 break;
630 } 630 }
631 break; 631 break;
@@ -645,13 +645,13 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
645 sta->ignore_plink_timer = true; 645 sta->ignore_plink_timer = true;
646 646
647 llid = sta->llid; 647 llid = sta->llid;
648 spin_unlock_bh(&sta->plink_lock); 648 spin_unlock_bh(&sta->lock);
649 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 649 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
650 plid, reason); 650 plid, reason);
651 break; 651 break;
652 case OPN_ACPT: 652 case OPN_ACPT:
653 llid = sta->llid; 653 llid = sta->llid;
654 spin_unlock_bh(&sta->plink_lock); 654 spin_unlock_bh(&sta->lock);
655 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 655 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
656 plid, 0); 656 plid, 0);
657 break; 657 break;
@@ -659,12 +659,12 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
659 del_timer(&sta->plink_timer); 659 del_timer(&sta->plink_timer);
660 sta->plink_state = PLINK_ESTAB; 660 sta->plink_state = PLINK_ESTAB;
661 mesh_plink_inc_estab_count(sdata); 661 mesh_plink_inc_estab_count(sdata);
662 spin_unlock_bh(&sta->plink_lock); 662 spin_unlock_bh(&sta->lock);
663 mpl_dbg("Mesh plink with %s ESTABLISHED\n", 663 mpl_dbg("Mesh plink with %s ESTABLISHED\n",
664 print_mac(mac, sta->addr)); 664 print_mac(mac, sta->addr));
665 break; 665 break;
666 default: 666 default:
667 spin_unlock_bh(&sta->plink_lock); 667 spin_unlock_bh(&sta->lock);
668 break; 668 break;
669 } 669 }
670 break; 670 break;
@@ -684,7 +684,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
684 sta->ignore_plink_timer = true; 684 sta->ignore_plink_timer = true;
685 685
686 llid = sta->llid; 686 llid = sta->llid;
687 spin_unlock_bh(&sta->plink_lock); 687 spin_unlock_bh(&sta->lock);
688 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 688 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
689 plid, reason); 689 plid, reason);
690 break; 690 break;
@@ -692,14 +692,14 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
692 del_timer(&sta->plink_timer); 692 del_timer(&sta->plink_timer);
693 sta->plink_state = PLINK_ESTAB; 693 sta->plink_state = PLINK_ESTAB;
694 mesh_plink_inc_estab_count(sdata); 694 mesh_plink_inc_estab_count(sdata);
695 spin_unlock_bh(&sta->plink_lock); 695 spin_unlock_bh(&sta->lock);
696 mpl_dbg("Mesh plink with %s ESTABLISHED\n", 696 mpl_dbg("Mesh plink with %s ESTABLISHED\n",
697 print_mac(mac, sta->addr)); 697 print_mac(mac, sta->addr));
698 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 698 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
699 plid, 0); 699 plid, 0);
700 break; 700 break;
701 default: 701 default:
702 spin_unlock_bh(&sta->plink_lock); 702 spin_unlock_bh(&sta->lock);
703 break; 703 break;
704 } 704 }
705 break; 705 break;
@@ -713,18 +713,18 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
713 sta->plink_state = PLINK_HOLDING; 713 sta->plink_state = PLINK_HOLDING;
714 llid = sta->llid; 714 llid = sta->llid;
715 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 715 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
716 spin_unlock_bh(&sta->plink_lock); 716 spin_unlock_bh(&sta->lock);
717 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 717 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
718 plid, reason); 718 plid, reason);
719 break; 719 break;
720 case OPN_ACPT: 720 case OPN_ACPT:
721 llid = sta->llid; 721 llid = sta->llid;
722 spin_unlock_bh(&sta->plink_lock); 722 spin_unlock_bh(&sta->lock);
723 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 723 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
724 plid, 0); 724 plid, 0);
725 break; 725 break;
726 default: 726 default:
727 spin_unlock_bh(&sta->plink_lock); 727 spin_unlock_bh(&sta->lock);
728 break; 728 break;
729 } 729 }
730 break; 730 break;
@@ -734,7 +734,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
734 if (del_timer(&sta->plink_timer)) 734 if (del_timer(&sta->plink_timer))
735 sta->ignore_plink_timer = 1; 735 sta->ignore_plink_timer = 1;
736 mesh_plink_fsm_restart(sta); 736 mesh_plink_fsm_restart(sta);
737 spin_unlock_bh(&sta->plink_lock); 737 spin_unlock_bh(&sta->lock);
738 break; 738 break;
739 case OPN_ACPT: 739 case OPN_ACPT:
740 case CNF_ACPT: 740 case CNF_ACPT:
@@ -742,19 +742,19 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
742 case CNF_RJCT: 742 case CNF_RJCT:
743 llid = sta->llid; 743 llid = sta->llid;
744 reason = sta->reason; 744 reason = sta->reason;
745 spin_unlock_bh(&sta->plink_lock); 745 spin_unlock_bh(&sta->lock);
746 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 746 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
747 plid, reason); 747 plid, reason);
748 break; 748 break;
749 default: 749 default:
750 spin_unlock_bh(&sta->plink_lock); 750 spin_unlock_bh(&sta->lock);
751 } 751 }
752 break; 752 break;
753 default: 753 default:
754 /* should not get here, PLINK_BLOCKED is dealt with at the 754 /* should not get here, PLINK_BLOCKED is dealt with at the
755 * beggining of the function 755 * beggining of the function
756 */ 756 */
757 spin_unlock_bh(&sta->plink_lock); 757 spin_unlock_bh(&sta->lock);
758 break; 758 break;
759 } 759 }
760 760
diff --git a/net/mac80211/michael.c b/net/mac80211/michael.c
index 0f844f7895f1..1fcdf38cf60c 100644
--- a/net/mac80211/michael.c
+++ b/net/mac80211/michael.c
@@ -6,85 +6,58 @@
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9
10#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/bitops.h>
11#include <asm/unaligned.h>
11 12
12#include "michael.h" 13#include "michael.h"
13 14
14static inline u32 rotr(u32 val, int bits) 15static void michael_block(struct michael_mic_ctx *mctx, u32 val)
15{
16 return (val >> bits) | (val << (32 - bits));
17}
18
19
20static inline u32 rotl(u32 val, int bits)
21{
22 return (val << bits) | (val >> (32 - bits));
23}
24
25
26static inline u32 xswap(u32 val)
27{
28 return ((val & 0xff00ff00) >> 8) | ((val & 0x00ff00ff) << 8);
29}
30
31
32#define michael_block(l, r) \
33do { \
34 r ^= rotl(l, 17); \
35 l += r; \
36 r ^= xswap(l); \
37 l += r; \
38 r ^= rotl(l, 3); \
39 l += r; \
40 r ^= rotr(l, 2); \
41 l += r; \
42} while (0)
43
44
45static inline u32 michael_get32(u8 *data)
46{ 16{
47 return data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); 17 mctx->l ^= val;
18 mctx->r ^= rol32(mctx->l, 17);
19 mctx->l += mctx->r;
20 mctx->r ^= ((mctx->l & 0xff00ff00) >> 8) |
21 ((mctx->l & 0x00ff00ff) << 8);
22 mctx->l += mctx->r;
23 mctx->r ^= rol32(mctx->l, 3);
24 mctx->l += mctx->r;
25 mctx->r ^= ror32(mctx->l, 2);
26 mctx->l += mctx->r;
48} 27}
49 28
50 29static void michael_mic_hdr(struct michael_mic_ctx *mctx,
51static inline void michael_put32(u32 val, u8 *data) 30 const u8 *key, const u8 *da, const u8 *sa, u8 priority)
52{ 31{
53 data[0] = val & 0xff; 32 mctx->l = get_unaligned_le32(key);
54 data[1] = (val >> 8) & 0xff; 33 mctx->r = get_unaligned_le32(key + 4);
55 data[2] = (val >> 16) & 0xff; 34
56 data[3] = (val >> 24) & 0xff; 35 /*
36 * A pseudo header (DA, SA, Priority, 0, 0, 0) is used in Michael MIC
37 * calculation, but it is _not_ transmitted
38 */
39 michael_block(mctx, get_unaligned_le32(da));
40 michael_block(mctx, get_unaligned_le16(&da[4]) |
41 (get_unaligned_le16(sa) << 16));
42 michael_block(mctx, get_unaligned_le32(&sa[2]));
43 michael_block(mctx, priority);
57} 44}
58 45
59 46void michael_mic(const u8 *key, const u8 *da, const u8 *sa, u8 priority,
60void michael_mic(u8 *key, u8 *da, u8 *sa, u8 priority, 47 const u8 *data, size_t data_len, u8 *mic)
61 u8 *data, size_t data_len, u8 *mic)
62{ 48{
63 u32 l, r, val; 49 u32 val;
64 size_t block, blocks, left; 50 size_t block, blocks, left;
51 struct michael_mic_ctx mctx;
65 52
66 l = michael_get32(key); 53 michael_mic_hdr(&mctx, key, da, sa, priority);
67 r = michael_get32(key + 4);
68
69 /* A pseudo header (DA, SA, Priority, 0, 0, 0) is used in Michael MIC
70 * calculation, but it is _not_ transmitted */
71 l ^= michael_get32(da);
72 michael_block(l, r);
73 l ^= da[4] | (da[5] << 8) | (sa[0] << 16) | (sa[1] << 24);
74 michael_block(l, r);
75 l ^= michael_get32(&sa[2]);
76 michael_block(l, r);
77 l ^= priority;
78 michael_block(l, r);
79 54
80 /* Real data */ 55 /* Real data */
81 blocks = data_len / 4; 56 blocks = data_len / 4;
82 left = data_len % 4; 57 left = data_len % 4;
83 58
84 for (block = 0; block < blocks; block++) { 59 for (block = 0; block < blocks; block++)
85 l ^= michael_get32(&data[block * 4]); 60 michael_block(&mctx, get_unaligned_le32(&data[block * 4]));
86 michael_block(l, r);
87 }
88 61
89 /* Partial block of 0..3 bytes and padding: 0x5a + 4..7 zeros to make 62 /* Partial block of 0..3 bytes and padding: 0x5a + 4..7 zeros to make
90 * total length a multiple of 4. */ 63 * total length a multiple of 4. */
@@ -94,11 +67,10 @@ void michael_mic(u8 *key, u8 *da, u8 *sa, u8 priority,
94 left--; 67 left--;
95 val |= data[blocks * 4 + left]; 68 val |= data[blocks * 4 + left];
96 } 69 }
97 l ^= val;
98 michael_block(l, r);
99 /* last block is zero, so l ^ 0 = l */
100 michael_block(l, r);
101 70
102 michael_put32(l, mic); 71 michael_block(&mctx, val);
103 michael_put32(r, mic + 4); 72 michael_block(&mctx, 0);
73
74 put_unaligned_le32(mctx.l, mic);
75 put_unaligned_le32(mctx.r, mic + 4);
104} 76}
diff --git a/net/mac80211/michael.h b/net/mac80211/michael.h
index 2e6aebabeea1..69b4501f13ba 100644
--- a/net/mac80211/michael.h
+++ b/net/mac80211/michael.h
@@ -14,7 +14,11 @@
14 14
15#define MICHAEL_MIC_LEN 8 15#define MICHAEL_MIC_LEN 8
16 16
17void michael_mic(u8 *key, u8 *da, u8 *sa, u8 priority, 17struct michael_mic_ctx {
18 u8 *data, size_t data_len, u8 *mic); 18 u32 l, r;
19};
20
21void michael_mic(const u8 *key, const u8 *da, const u8 *sa, u8 priority,
22 const u8 *data, size_t data_len, u8 *mic);
19 23
20#endif /* MICHAEL_H */ 24#endif /* MICHAEL_H */
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 4d2b582dd055..7f05820dc629 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -87,6 +87,7 @@ static int ieee80211_sta_start_scan(struct net_device *dev,
87 u8 *ssid, size_t ssid_len); 87 u8 *ssid, size_t ssid_len);
88static int ieee80211_sta_config_auth(struct net_device *dev, 88static int ieee80211_sta_config_auth(struct net_device *dev,
89 struct ieee80211_if_sta *ifsta); 89 struct ieee80211_if_sta *ifsta);
90static void sta_rx_agg_session_timer_expired(unsigned long data);
90 91
91 92
92void ieee802_11_parse_elems(u8 *start, size_t len, 93void ieee802_11_parse_elems(u8 *start, size_t len,
@@ -256,19 +257,8 @@ static void ieee80211_sta_def_wmm_params(struct net_device *dev,
256 qparam.cw_max = 1023; 257 qparam.cw_max = 1023;
257 qparam.txop = 0; 258 qparam.txop = 0;
258 259
259 for (i = IEEE80211_TX_QUEUE_DATA0; i < NUM_TX_DATA_QUEUES; i++) 260 for (i = 0; i < local_to_hw(local)->queues; i++)
260 local->ops->conf_tx(local_to_hw(local), 261 local->ops->conf_tx(local_to_hw(local), i, &qparam);
261 i + IEEE80211_TX_QUEUE_DATA0,
262 &qparam);
263
264 if (ibss) {
265 /* IBSS uses different parameters for Beacon sending */
266 qparam.cw_min++;
267 qparam.cw_min *= 2;
268 qparam.cw_min--;
269 local->ops->conf_tx(local_to_hw(local),
270 IEEE80211_TX_QUEUE_BEACON, &qparam);
271 }
272 } 262 }
273} 263}
274 264
@@ -282,6 +272,12 @@ static void ieee80211_sta_wmm_params(struct net_device *dev,
282 int count; 272 int count;
283 u8 *pos; 273 u8 *pos;
284 274
275 if (!(ifsta->flags & IEEE80211_STA_WMM_ENABLED))
276 return;
277
278 if (!wmm_param)
279 return;
280
285 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) 281 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
286 return; 282 return;
287 count = wmm_param[6] & 0x0f; 283 count = wmm_param[6] & 0x0f;
@@ -305,29 +301,25 @@ static void ieee80211_sta_wmm_params(struct net_device *dev,
305 301
306 switch (aci) { 302 switch (aci) {
307 case 1: 303 case 1:
308 queue = IEEE80211_TX_QUEUE_DATA3; 304 queue = 3;
309 if (acm) { 305 if (acm)
310 local->wmm_acm |= BIT(0) | BIT(3); 306 local->wmm_acm |= BIT(0) | BIT(3);
311 }
312 break; 307 break;
313 case 2: 308 case 2:
314 queue = IEEE80211_TX_QUEUE_DATA1; 309 queue = 1;
315 if (acm) { 310 if (acm)
316 local->wmm_acm |= BIT(4) | BIT(5); 311 local->wmm_acm |= BIT(4) | BIT(5);
317 }
318 break; 312 break;
319 case 3: 313 case 3:
320 queue = IEEE80211_TX_QUEUE_DATA0; 314 queue = 0;
321 if (acm) { 315 if (acm)
322 local->wmm_acm |= BIT(6) | BIT(7); 316 local->wmm_acm |= BIT(6) | BIT(7);
323 }
324 break; 317 break;
325 case 0: 318 case 0:
326 default: 319 default:
327 queue = IEEE80211_TX_QUEUE_DATA2; 320 queue = 2;
328 if (acm) { 321 if (acm)
329 local->wmm_acm |= BIT(1) | BIT(2); 322 local->wmm_acm |= BIT(1) | BIT(2);
330 }
331 break; 323 break;
332 } 324 }
333 325
@@ -586,7 +578,7 @@ void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb,
586 int encrypt) 578 int encrypt)
587{ 579{
588 struct ieee80211_sub_if_data *sdata; 580 struct ieee80211_sub_if_data *sdata;
589 struct ieee80211_tx_packet_data *pkt_data; 581 struct ieee80211_tx_info *info;
590 582
591 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 583 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
592 skb->dev = sdata->local->mdev; 584 skb->dev = sdata->local->mdev;
@@ -594,11 +586,11 @@ void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb,
594 skb_set_network_header(skb, 0); 586 skb_set_network_header(skb, 0);
595 skb_set_transport_header(skb, 0); 587 skb_set_transport_header(skb, 0);
596 588
597 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 589 info = IEEE80211_SKB_CB(skb);
598 memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data)); 590 memset(info, 0, sizeof(struct ieee80211_tx_info));
599 pkt_data->ifindex = sdata->dev->ifindex; 591 info->control.ifindex = sdata->dev->ifindex;
600 if (!encrypt) 592 if (!encrypt)
601 pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT; 593 info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
602 594
603 dev_queue_xmit(skb); 595 dev_queue_xmit(skb);
604} 596}
@@ -727,9 +719,8 @@ static void ieee80211_send_assoc(struct net_device *dev,
727 if (bss) { 719 if (bss) {
728 if (bss->capability & WLAN_CAPABILITY_PRIVACY) 720 if (bss->capability & WLAN_CAPABILITY_PRIVACY)
729 capab |= WLAN_CAPABILITY_PRIVACY; 721 capab |= WLAN_CAPABILITY_PRIVACY;
730 if (bss->wmm_ie) { 722 if (bss->wmm_ie)
731 wmm = 1; 723 wmm = 1;
732 }
733 724
734 /* get all rates supported by the device and the AP as 725 /* get all rates supported by the device and the AP as
735 * some APs don't like getting a superset of their rates 726 * some APs don't like getting a superset of their rates
@@ -821,9 +812,32 @@ static void ieee80211_send_assoc(struct net_device *dev,
821 *pos++ = 1; /* WME ver */ 812 *pos++ = 1; /* WME ver */
822 *pos++ = 0; 813 *pos++ = 0;
823 } 814 }
815
824 /* wmm support is a must to HT */ 816 /* wmm support is a must to HT */
825 if (wmm && sband->ht_info.ht_supported) { 817 if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) &&
826 __le16 tmp = cpu_to_le16(sband->ht_info.cap); 818 sband->ht_info.ht_supported && bss->ht_add_ie) {
819 struct ieee80211_ht_addt_info *ht_add_info =
820 (struct ieee80211_ht_addt_info *)bss->ht_add_ie;
821 u16 cap = sband->ht_info.cap;
822 __le16 tmp;
823 u32 flags = local->hw.conf.channel->flags;
824
825 switch (ht_add_info->ht_param & IEEE80211_HT_IE_CHA_SEC_OFFSET) {
826 case IEEE80211_HT_IE_CHA_SEC_ABOVE:
827 if (flags & IEEE80211_CHAN_NO_FAT_ABOVE) {
828 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH;
829 cap &= ~IEEE80211_HT_CAP_SGI_40;
830 }
831 break;
832 case IEEE80211_HT_IE_CHA_SEC_BELOW:
833 if (flags & IEEE80211_CHAN_NO_FAT_BELOW) {
834 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH;
835 cap &= ~IEEE80211_HT_CAP_SGI_40;
836 }
837 break;
838 }
839
840 tmp = cpu_to_le16(cap);
827 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2); 841 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2);
828 *pos++ = WLAN_EID_HT_CAPABILITY; 842 *pos++ = WLAN_EID_HT_CAPABILITY;
829 *pos++ = sizeof(struct ieee80211_ht_cap); 843 *pos++ = sizeof(struct ieee80211_ht_cap);
@@ -1141,8 +1155,8 @@ static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid,
1141 struct ieee80211_mgmt *mgmt; 1155 struct ieee80211_mgmt *mgmt;
1142 u16 capab; 1156 u16 capab;
1143 1157
1144 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 + 1158 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
1145 sizeof(mgmt->u.action.u.addba_resp)); 1159
1146 if (!skb) { 1160 if (!skb) {
1147 printk(KERN_DEBUG "%s: failed to allocate buffer " 1161 printk(KERN_DEBUG "%s: failed to allocate buffer "
1148 "for addba resp frame\n", dev->name); 1162 "for addba resp frame\n", dev->name);
@@ -1190,9 +1204,7 @@ void ieee80211_send_addba_request(struct net_device *dev, const u8 *da,
1190 struct ieee80211_mgmt *mgmt; 1204 struct ieee80211_mgmt *mgmt;
1191 u16 capab; 1205 u16 capab;
1192 1206
1193 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 + 1207 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
1194 sizeof(mgmt->u.action.u.addba_req));
1195
1196 1208
1197 if (!skb) { 1209 if (!skb) {
1198 printk(KERN_ERR "%s: failed to allocate buffer " 1210 printk(KERN_ERR "%s: failed to allocate buffer "
@@ -1293,7 +1305,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1293 1305
1294 1306
1295 /* examine state machine */ 1307 /* examine state machine */
1296 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); 1308 spin_lock_bh(&sta->lock);
1297 1309
1298 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { 1310 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) {
1299#ifdef CONFIG_MAC80211_HT_DEBUG 1311#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -1360,7 +1372,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1360 tid_agg_rx->stored_mpdu_num = 0; 1372 tid_agg_rx->stored_mpdu_num = 0;
1361 status = WLAN_STATUS_SUCCESS; 1373 status = WLAN_STATUS_SUCCESS;
1362end: 1374end:
1363 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); 1375 spin_unlock_bh(&sta->lock);
1364 1376
1365end_no_lock: 1377end_no_lock:
1366 ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid, 1378 ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid,
@@ -1392,10 +1404,10 @@ static void ieee80211_sta_process_addba_resp(struct net_device *dev,
1392 1404
1393 state = &sta->ampdu_mlme.tid_state_tx[tid]; 1405 state = &sta->ampdu_mlme.tid_state_tx[tid];
1394 1406
1395 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 1407 spin_lock_bh(&sta->lock);
1396 1408
1397 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 1409 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
1398 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1410 spin_unlock_bh(&sta->lock);
1399 printk(KERN_DEBUG "state not HT_ADDBA_REQUESTED_MSK:" 1411 printk(KERN_DEBUG "state not HT_ADDBA_REQUESTED_MSK:"
1400 "%d\n", *state); 1412 "%d\n", *state);
1401 goto addba_resp_exit; 1413 goto addba_resp_exit;
@@ -1403,7 +1415,7 @@ static void ieee80211_sta_process_addba_resp(struct net_device *dev,
1403 1415
1404 if (mgmt->u.action.u.addba_resp.dialog_token != 1416 if (mgmt->u.action.u.addba_resp.dialog_token !=
1405 sta->ampdu_mlme.tid_tx[tid]->dialog_token) { 1417 sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
1406 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1418 spin_unlock_bh(&sta->lock);
1407#ifdef CONFIG_MAC80211_HT_DEBUG 1419#ifdef CONFIG_MAC80211_HT_DEBUG
1408 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); 1420 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
1409#endif /* CONFIG_MAC80211_HT_DEBUG */ 1421#endif /* CONFIG_MAC80211_HT_DEBUG */
@@ -1427,7 +1439,7 @@ static void ieee80211_sta_process_addba_resp(struct net_device *dev,
1427 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); 1439 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
1428 } 1440 }
1429 1441
1430 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1442 spin_unlock_bh(&sta->lock);
1431 printk(KERN_DEBUG "recipient accepted agg: tid %d \n", tid); 1443 printk(KERN_DEBUG "recipient accepted agg: tid %d \n", tid);
1432 } else { 1444 } else {
1433 printk(KERN_DEBUG "recipient rejected agg: tid %d \n", tid); 1445 printk(KERN_DEBUG "recipient rejected agg: tid %d \n", tid);
@@ -1435,7 +1447,7 @@ static void ieee80211_sta_process_addba_resp(struct net_device *dev,
1435 sta->ampdu_mlme.addba_req_num[tid]++; 1447 sta->ampdu_mlme.addba_req_num[tid]++;
1436 /* this will allow the state check in stop_BA_session */ 1448 /* this will allow the state check in stop_BA_session */
1437 *state = HT_AGG_STATE_OPERATIONAL; 1449 *state = HT_AGG_STATE_OPERATIONAL;
1438 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1450 spin_unlock_bh(&sta->lock);
1439 ieee80211_stop_tx_ba_session(hw, sta->addr, tid, 1451 ieee80211_stop_tx_ba_session(hw, sta->addr, tid,
1440 WLAN_BACK_INITIATOR); 1452 WLAN_BACK_INITIATOR);
1441 } 1453 }
@@ -1454,8 +1466,7 @@ void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
1454 struct ieee80211_mgmt *mgmt; 1466 struct ieee80211_mgmt *mgmt;
1455 u16 params; 1467 u16 params;
1456 1468
1457 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 + 1469 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
1458 sizeof(mgmt->u.action.u.delba));
1459 1470
1460 if (!skb) { 1471 if (!skb) {
1461 printk(KERN_ERR "%s: failed to allocate buffer " 1472 printk(KERN_ERR "%s: failed to allocate buffer "
@@ -1506,17 +1517,17 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
1506 } 1517 }
1507 1518
1508 /* check if TID is in operational state */ 1519 /* check if TID is in operational state */
1509 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); 1520 spin_lock_bh(&sta->lock);
1510 if (sta->ampdu_mlme.tid_state_rx[tid] 1521 if (sta->ampdu_mlme.tid_state_rx[tid]
1511 != HT_AGG_STATE_OPERATIONAL) { 1522 != HT_AGG_STATE_OPERATIONAL) {
1512 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); 1523 spin_unlock_bh(&sta->lock);
1513 rcu_read_unlock(); 1524 rcu_read_unlock();
1514 return; 1525 return;
1515 } 1526 }
1516 sta->ampdu_mlme.tid_state_rx[tid] = 1527 sta->ampdu_mlme.tid_state_rx[tid] =
1517 HT_AGG_STATE_REQ_STOP_BA_MSK | 1528 HT_AGG_STATE_REQ_STOP_BA_MSK |
1518 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 1529 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
1519 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); 1530 spin_unlock_bh(&sta->lock);
1520 1531
1521 /* stop HW Rx aggregation. ampdu_action existence 1532 /* stop HW Rx aggregation. ampdu_action existence
1522 * already verified in session init so we add the BUG_ON */ 1533 * already verified in session init so we add the BUG_ON */
@@ -1593,10 +1604,10 @@ static void ieee80211_sta_process_delba(struct net_device *dev,
1593 ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid, 1604 ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid,
1594 WLAN_BACK_INITIATOR, 0); 1605 WLAN_BACK_INITIATOR, 0);
1595 else { /* WLAN_BACK_RECIPIENT */ 1606 else { /* WLAN_BACK_RECIPIENT */
1596 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 1607 spin_lock_bh(&sta->lock);
1597 sta->ampdu_mlme.tid_state_tx[tid] = 1608 sta->ampdu_mlme.tid_state_tx[tid] =
1598 HT_AGG_STATE_OPERATIONAL; 1609 HT_AGG_STATE_OPERATIONAL;
1599 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1610 spin_unlock_bh(&sta->lock);
1600 ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid, 1611 ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid,
1601 WLAN_BACK_RECIPIENT); 1612 WLAN_BACK_RECIPIENT);
1602 } 1613 }
@@ -1633,9 +1644,9 @@ void sta_addba_resp_timer_expired(unsigned long data)
1633 1644
1634 state = &sta->ampdu_mlme.tid_state_tx[tid]; 1645 state = &sta->ampdu_mlme.tid_state_tx[tid];
1635 /* check if the TID waits for addBA response */ 1646 /* check if the TID waits for addBA response */
1636 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 1647 spin_lock_bh(&sta->lock);
1637 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 1648 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
1638 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1649 spin_unlock_bh(&sta->lock);
1639 *state = HT_AGG_STATE_IDLE; 1650 *state = HT_AGG_STATE_IDLE;
1640 printk(KERN_DEBUG "timer expired on tid %d but we are not " 1651 printk(KERN_DEBUG "timer expired on tid %d but we are not "
1641 "expecting addBA response there", tid); 1652 "expecting addBA response there", tid);
@@ -1646,7 +1657,7 @@ void sta_addba_resp_timer_expired(unsigned long data)
1646 1657
1647 /* go through the state check in stop_BA_session */ 1658 /* go through the state check in stop_BA_session */
1648 *state = HT_AGG_STATE_OPERATIONAL; 1659 *state = HT_AGG_STATE_OPERATIONAL;
1649 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1660 spin_unlock_bh(&sta->lock);
1650 ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid, 1661 ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid,
1651 WLAN_BACK_INITIATOR); 1662 WLAN_BACK_INITIATOR);
1652 1663
@@ -1659,7 +1670,7 @@ timer_expired_exit:
1659 * resetting it after each frame that arrives from the originator. 1670 * resetting it after each frame that arrives from the originator.
1660 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. 1671 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed.
1661 */ 1672 */
1662void sta_rx_agg_session_timer_expired(unsigned long data) 1673static void sta_rx_agg_session_timer_expired(unsigned long data)
1663{ 1674{
1664 /* not an elegant detour, but there is no choice as the timer passes 1675 /* not an elegant detour, but there is no choice as the timer passes
1665 * only one argument, and various sta_info are needed here, so init 1676 * only one argument, and various sta_info are needed here, so init
@@ -1848,9 +1859,8 @@ static void ieee80211_rx_mgmt_deauth(struct net_device *dev,
1848 " (reason=%d)\n", 1859 " (reason=%d)\n",
1849 dev->name, print_mac(mac, mgmt->sa), reason_code); 1860 dev->name, print_mac(mac, mgmt->sa), reason_code);
1850 1861
1851 if (ifsta->flags & IEEE80211_STA_AUTHENTICATED) { 1862 if (ifsta->flags & IEEE80211_STA_AUTHENTICATED)
1852 printk(KERN_DEBUG "%s: deauthenticated\n", dev->name); 1863 printk(KERN_DEBUG "%s: deauthenticated\n", dev->name);
1853 }
1854 1864
1855 if (ifsta->state == IEEE80211_AUTHENTICATE || 1865 if (ifsta->state == IEEE80211_AUTHENTICATE ||
1856 ifsta->state == IEEE80211_ASSOCIATE || 1866 ifsta->state == IEEE80211_ASSOCIATE ||
@@ -2013,8 +2023,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2013 local->hw.conf.channel->center_freq, 2023 local->hw.conf.channel->center_freq,
2014 ifsta->ssid, ifsta->ssid_len); 2024 ifsta->ssid, ifsta->ssid_len);
2015 if (bss) { 2025 if (bss) {
2016 sta->last_rssi = bss->rssi;
2017 sta->last_signal = bss->signal; 2026 sta->last_signal = bss->signal;
2027 sta->last_qual = bss->qual;
2018 sta->last_noise = bss->noise; 2028 sta->last_noise = bss->noise;
2019 ieee80211_rx_bss_put(dev, bss); 2029 ieee80211_rx_bss_put(dev, bss);
2020 } 2030 }
@@ -2038,8 +2048,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2038 * to between the sta_info_alloc() and sta_info_insert() above. 2048 * to between the sta_info_alloc() and sta_info_insert() above.
2039 */ 2049 */
2040 2050
2041 sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP | 2051 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP |
2042 WLAN_STA_AUTHORIZED; 2052 WLAN_STA_AUTHORIZED);
2043 2053
2044 rates = 0; 2054 rates = 0;
2045 basic_rates = 0; 2055 basic_rates = 0;
@@ -2083,7 +2093,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2083 else 2093 else
2084 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; 2094 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
2085 2095
2086 if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param) { 2096 if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param &&
2097 (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) {
2087 struct ieee80211_ht_bss_info bss_info; 2098 struct ieee80211_ht_bss_info bss_info;
2088 ieee80211_ht_cap_ie_to_ht_info( 2099 ieee80211_ht_cap_ie_to_ht_info(
2089 (struct ieee80211_ht_cap *) 2100 (struct ieee80211_ht_cap *)
@@ -2096,8 +2107,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2096 2107
2097 rate_control_rate_init(sta, local); 2108 rate_control_rate_init(sta, local);
2098 2109
2099 if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) { 2110 if (elems.wmm_param) {
2100 sta->flags |= WLAN_STA_WME; 2111 set_sta_flags(sta, WLAN_STA_WME);
2101 rcu_read_unlock(); 2112 rcu_read_unlock();
2102 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, 2113 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
2103 elems.wmm_param_len); 2114 elems.wmm_param_len);
@@ -2281,6 +2292,7 @@ static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss)
2281 kfree(bss->rsn_ie); 2292 kfree(bss->rsn_ie);
2282 kfree(bss->wmm_ie); 2293 kfree(bss->wmm_ie);
2283 kfree(bss->ht_ie); 2294 kfree(bss->ht_ie);
2295 kfree(bss->ht_add_ie);
2284 kfree(bss_mesh_id(bss)); 2296 kfree(bss_mesh_id(bss));
2285 kfree(bss_mesh_cfg(bss)); 2297 kfree(bss_mesh_cfg(bss));
2286 kfree(bss); 2298 kfree(bss);
@@ -2331,7 +2343,7 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2331 int res, rates, i, j; 2343 int res, rates, i, j;
2332 struct sk_buff *skb; 2344 struct sk_buff *skb;
2333 struct ieee80211_mgmt *mgmt; 2345 struct ieee80211_mgmt *mgmt;
2334 struct ieee80211_tx_control control; 2346 struct ieee80211_tx_info *control;
2335 struct rate_selection ratesel; 2347 struct rate_selection ratesel;
2336 u8 *pos; 2348 u8 *pos;
2337 struct ieee80211_sub_if_data *sdata; 2349 struct ieee80211_sub_if_data *sdata;
@@ -2419,21 +2431,22 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2419 memcpy(pos, &bss->supp_rates[8], rates); 2431 memcpy(pos, &bss->supp_rates[8], rates);
2420 } 2432 }
2421 2433
2422 memset(&control, 0, sizeof(control)); 2434 control = IEEE80211_SKB_CB(skb);
2435
2423 rate_control_get_rate(dev, sband, skb, &ratesel); 2436 rate_control_get_rate(dev, sband, skb, &ratesel);
2424 if (!ratesel.rate) { 2437 if (ratesel.rate_idx < 0) {
2425 printk(KERN_DEBUG "%s: Failed to determine TX rate " 2438 printk(KERN_DEBUG "%s: Failed to determine TX rate "
2426 "for IBSS beacon\n", dev->name); 2439 "for IBSS beacon\n", dev->name);
2427 break; 2440 break;
2428 } 2441 }
2429 control.vif = &sdata->vif; 2442 control->control.vif = &sdata->vif;
2430 control.tx_rate = ratesel.rate; 2443 control->tx_rate_idx = ratesel.rate_idx;
2431 if (sdata->bss_conf.use_short_preamble && 2444 if (sdata->bss_conf.use_short_preamble &&
2432 ratesel.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) 2445 sband->bitrates[ratesel.rate_idx].flags & IEEE80211_RATE_SHORT_PREAMBLE)
2433 control.flags |= IEEE80211_TXCTL_SHORT_PREAMBLE; 2446 control->flags |= IEEE80211_TX_CTL_SHORT_PREAMBLE;
2434 control.antenna_sel_tx = local->hw.conf.antenna_sel_tx; 2447 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
2435 control.flags |= IEEE80211_TXCTL_NO_ACK; 2448 control->flags |= IEEE80211_TX_CTL_NO_ACK;
2436 control.retry_limit = 1; 2449 control->control.retry_limit = 1;
2437 2450
2438 ifsta->probe_resp = skb_copy(skb, GFP_ATOMIC); 2451 ifsta->probe_resp = skb_copy(skb, GFP_ATOMIC);
2439 if (ifsta->probe_resp) { 2452 if (ifsta->probe_resp) {
@@ -2448,8 +2461,7 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2448 } 2461 }
2449 2462
2450 if (local->ops->beacon_update && 2463 if (local->ops->beacon_update &&
2451 local->ops->beacon_update(local_to_hw(local), 2464 local->ops->beacon_update(local_to_hw(local), skb) == 0) {
2452 skb, &control) == 0) {
2453 printk(KERN_DEBUG "%s: Configured IBSS beacon " 2465 printk(KERN_DEBUG "%s: Configured IBSS beacon "
2454 "template\n", dev->name); 2466 "template\n", dev->name);
2455 skb = NULL; 2467 skb = NULL;
@@ -2657,6 +2669,26 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2657 bss->ht_ie_len = 0; 2669 bss->ht_ie_len = 0;
2658 } 2670 }
2659 2671
2672 if (elems.ht_info_elem &&
2673 (!bss->ht_add_ie ||
2674 bss->ht_add_ie_len != elems.ht_info_elem_len ||
2675 memcmp(bss->ht_add_ie, elems.ht_info_elem,
2676 elems.ht_info_elem_len))) {
2677 kfree(bss->ht_add_ie);
2678 bss->ht_add_ie =
2679 kmalloc(elems.ht_info_elem_len + 2, GFP_ATOMIC);
2680 if (bss->ht_add_ie) {
2681 memcpy(bss->ht_add_ie, elems.ht_info_elem - 2,
2682 elems.ht_info_elem_len + 2);
2683 bss->ht_add_ie_len = elems.ht_info_elem_len + 2;
2684 } else
2685 bss->ht_add_ie_len = 0;
2686 } else if (!elems.ht_info_elem && bss->ht_add_ie) {
2687 kfree(bss->ht_add_ie);
2688 bss->ht_add_ie = NULL;
2689 bss->ht_add_ie_len = 0;
2690 }
2691
2660 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); 2692 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int);
2661 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); 2693 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info);
2662 2694
@@ -2682,9 +2714,9 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2682 2714
2683 bss->timestamp = beacon_timestamp; 2715 bss->timestamp = beacon_timestamp;
2684 bss->last_update = jiffies; 2716 bss->last_update = jiffies;
2685 bss->rssi = rx_status->ssi;
2686 bss->signal = rx_status->signal; 2717 bss->signal = rx_status->signal;
2687 bss->noise = rx_status->noise; 2718 bss->noise = rx_status->noise;
2719 bss->qual = rx_status->qual;
2688 if (!beacon && !bss->probe_resp) 2720 if (!beacon && !bss->probe_resp)
2689 bss->probe_resp = true; 2721 bss->probe_resp = true;
2690 2722
@@ -2879,10 +2911,8 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2879 2911
2880 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); 2912 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
2881 2913
2882 if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) { 2914 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
2883 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, 2915 elems.wmm_param_len);
2884 elems.wmm_param_len);
2885 }
2886 2916
2887 /* Do not send changes to driver if we are scanning. This removes 2917 /* Do not send changes to driver if we are scanning. This removes
2888 * requirement that driver's bss_info_changed function needs to be 2918 * requirement that driver's bss_info_changed function needs to be
@@ -3478,9 +3508,9 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
3478 !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len)) 3508 !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len))
3479 continue; 3509 continue;
3480 3510
3481 if (!selected || top_rssi < bss->rssi) { 3511 if (!selected || top_rssi < bss->signal) {
3482 selected = bss; 3512 selected = bss;
3483 top_rssi = bss->rssi; 3513 top_rssi = bss->signal;
3484 } 3514 }
3485 } 3515 }
3486 if (selected) 3516 if (selected)
@@ -3557,10 +3587,12 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
3557 bss->beacon_int = local->hw.conf.beacon_int; 3587 bss->beacon_int = local->hw.conf.beacon_int;
3558 bss->last_update = jiffies; 3588 bss->last_update = jiffies;
3559 bss->capability = WLAN_CAPABILITY_IBSS; 3589 bss->capability = WLAN_CAPABILITY_IBSS;
3560 if (sdata->default_key) { 3590
3591 if (sdata->default_key)
3561 bss->capability |= WLAN_CAPABILITY_PRIVACY; 3592 bss->capability |= WLAN_CAPABILITY_PRIVACY;
3562 } else 3593 else
3563 sdata->drop_unencrypted = 0; 3594 sdata->drop_unencrypted = 0;
3595
3564 bss->supp_rates_len = sband->n_bitrates; 3596 bss->supp_rates_len = sband->n_bitrates;
3565 pos = bss->supp_rates; 3597 pos = bss->supp_rates;
3566 for (i = 0; i < sband->n_bitrates; i++) { 3598 for (i = 0; i < sband->n_bitrates; i++) {
@@ -4114,8 +4146,8 @@ ieee80211_sta_scan_result(struct net_device *dev,
4114 IW_EV_FREQ_LEN); 4146 IW_EV_FREQ_LEN);
4115 memset(&iwe, 0, sizeof(iwe)); 4147 memset(&iwe, 0, sizeof(iwe));
4116 iwe.cmd = IWEVQUAL; 4148 iwe.cmd = IWEVQUAL;
4117 iwe.u.qual.qual = bss->signal; 4149 iwe.u.qual.qual = bss->qual;
4118 iwe.u.qual.level = bss->rssi; 4150 iwe.u.qual.level = bss->signal;
4119 iwe.u.qual.noise = bss->noise; 4151 iwe.u.qual.noise = bss->noise;
4120 iwe.u.qual.updated = local->wstats_flags; 4152 iwe.u.qual.updated = local->wstats_flags;
4121 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 4153 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
@@ -4146,6 +4178,14 @@ ieee80211_sta_scan_result(struct net_device *dev,
4146 bss->rsn_ie); 4178 bss->rsn_ie);
4147 } 4179 }
4148 4180
4181 if (bss && bss->ht_ie) {
4182 memset(&iwe, 0, sizeof(iwe));
4183 iwe.cmd = IWEVGENIE;
4184 iwe.u.data.length = bss->ht_ie_len;
4185 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe,
4186 bss->ht_ie);
4187 }
4188
4149 if (bss && bss->supp_rates_len > 0) { 4189 if (bss && bss->supp_rates_len > 0) {
4150 /* display all supported rates in readable format */ 4190 /* display all supported rates in readable format */
4151 char *p = current_ev + IW_EV_LCP_LEN; 4191 char *p = current_ev + IW_EV_LCP_LEN;
@@ -4247,6 +4287,7 @@ int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len)
4247{ 4287{
4248 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 4288 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4249 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 4289 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4290
4250 kfree(ifsta->extra_ie); 4291 kfree(ifsta->extra_ie);
4251 if (len == 0) { 4292 if (len == 0) {
4252 ifsta->extra_ie = NULL; 4293 ifsta->extra_ie = NULL;
@@ -4264,9 +4305,9 @@ int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len)
4264} 4305}
4265 4306
4266 4307
4267struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev, 4308struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
4268 struct sk_buff *skb, u8 *bssid, 4309 struct sk_buff *skb, u8 *bssid,
4269 u8 *addr) 4310 u8 *addr)
4270{ 4311{
4271 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 4312 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
4272 struct sta_info *sta; 4313 struct sta_info *sta;
@@ -4290,7 +4331,7 @@ struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev,
4290 if (!sta) 4331 if (!sta)
4291 return NULL; 4332 return NULL;
4292 4333
4293 sta->flags |= WLAN_STA_AUTHORIZED; 4334 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
4294 4335
4295 sta->supp_rates[local->hw.conf.channel->band] = 4336 sta->supp_rates[local->hw.conf.channel->band] =
4296 sdata->u.sta.supp_rates_bits[local->hw.conf.channel->band]; 4337 sdata->u.sta.supp_rates_bits[local->hw.conf.channel->band];
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 841df93807fc..0388c090dfe9 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -176,20 +176,24 @@ void rate_control_get_rate(struct net_device *dev,
176 rcu_read_lock(); 176 rcu_read_lock();
177 sta = sta_info_get(local, hdr->addr1); 177 sta = sta_info_get(local, hdr->addr1);
178 178
179 memset(sel, 0, sizeof(struct rate_selection)); 179 sel->rate_idx = -1;
180 sel->nonerp_idx = -1;
181 sel->probe_idx = -1;
180 182
181 ref->ops->get_rate(ref->priv, dev, sband, skb, sel); 183 ref->ops->get_rate(ref->priv, dev, sband, skb, sel);
182 184
185 BUG_ON(sel->rate_idx < 0);
186
183 /* Select a non-ERP backup rate. */ 187 /* Select a non-ERP backup rate. */
184 if (!sel->nonerp) { 188 if (sel->nonerp_idx < 0) {
185 for (i = 0; i < sband->n_bitrates; i++) { 189 for (i = 0; i < sband->n_bitrates; i++) {
186 struct ieee80211_rate *rate = &sband->bitrates[i]; 190 struct ieee80211_rate *rate = &sband->bitrates[i];
187 if (sel->rate->bitrate < rate->bitrate) 191 if (sband->bitrates[sel->rate_idx].bitrate < rate->bitrate)
188 break; 192 break;
189 193
190 if (rate_supported(sta, sband->band, i) && 194 if (rate_supported(sta, sband->band, i) &&
191 !(rate->flags & IEEE80211_RATE_ERP_G)) 195 !(rate->flags & IEEE80211_RATE_ERP_G))
192 sel->nonerp = rate; 196 sel->nonerp_idx = i;
193 } 197 }
194 } 198 }
195 199
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 5b45f33cb766..0ed9c8a2f56f 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -19,22 +19,22 @@
19#include "ieee80211_i.h" 19#include "ieee80211_i.h"
20#include "sta_info.h" 20#include "sta_info.h"
21 21
22/* TODO: kdoc */ 22/**
23 * struct rate_selection - rate selection for rate control algos
24 * @rate: selected transmission rate index
25 * @nonerp: Non-ERP rate to use instead if ERP cannot be used
26 * @probe: rate for probing (or -1)
27 *
28 */
23struct rate_selection { 29struct rate_selection {
24 /* Selected transmission rate */ 30 s8 rate_idx, nonerp_idx, probe_idx;
25 struct ieee80211_rate *rate;
26 /* Non-ERP rate to use if mac80211 decides it cannot use an ERP rate */
27 struct ieee80211_rate *nonerp;
28 /* probe with this rate, or NULL for no probing */
29 struct ieee80211_rate *probe;
30}; 31};
31 32
32struct rate_control_ops { 33struct rate_control_ops {
33 struct module *module; 34 struct module *module;
34 const char *name; 35 const char *name;
35 void (*tx_status)(void *priv, struct net_device *dev, 36 void (*tx_status)(void *priv, struct net_device *dev,
36 struct sk_buff *skb, 37 struct sk_buff *skb);
37 struct ieee80211_tx_status *status);
38 void (*get_rate)(void *priv, struct net_device *dev, 38 void (*get_rate)(void *priv, struct net_device *dev,
39 struct ieee80211_supported_band *band, 39 struct ieee80211_supported_band *band,
40 struct sk_buff *skb, 40 struct sk_buff *skb,
@@ -76,13 +76,12 @@ struct rate_control_ref *rate_control_get(struct rate_control_ref *ref);
76void rate_control_put(struct rate_control_ref *ref); 76void rate_control_put(struct rate_control_ref *ref);
77 77
78static inline void rate_control_tx_status(struct net_device *dev, 78static inline void rate_control_tx_status(struct net_device *dev,
79 struct sk_buff *skb, 79 struct sk_buff *skb)
80 struct ieee80211_tx_status *status)
81{ 80{
82 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 81 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
83 struct rate_control_ref *ref = local->rate_ctrl; 82 struct rate_control_ref *ref = local->rate_ctrl;
84 83
85 ref->ops->tx_status(ref->priv, dev, skb, status); 84 ref->ops->tx_status(ref->priv, dev, skb);
86} 85}
87 86
88 87
@@ -138,7 +137,7 @@ static inline int rate_supported(struct sta_info *sta,
138 return (sta == NULL || sta->supp_rates[band] & BIT(index)); 137 return (sta == NULL || sta->supp_rates[band] & BIT(index));
139} 138}
140 139
141static inline int 140static inline s8
142rate_lowest_index(struct ieee80211_local *local, 141rate_lowest_index(struct ieee80211_local *local,
143 struct ieee80211_supported_band *sband, 142 struct ieee80211_supported_band *sband,
144 struct sta_info *sta) 143 struct sta_info *sta)
@@ -155,14 +154,6 @@ rate_lowest_index(struct ieee80211_local *local,
155 return 0; 154 return 0;
156} 155}
157 156
158static inline struct ieee80211_rate *
159rate_lowest(struct ieee80211_local *local,
160 struct ieee80211_supported_band *sband,
161 struct sta_info *sta)
162{
163 return &sband->bitrates[rate_lowest_index(local, sband, sta)];
164}
165
166 157
167/* functions for rate control related to a device */ 158/* functions for rate control related to a device */
168int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, 159int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
diff --git a/net/mac80211/rc80211_pid.h b/net/mac80211/rc80211_pid.h
index 04afc13ed825..2078803d3581 100644
--- a/net/mac80211/rc80211_pid.h
+++ b/net/mac80211/rc80211_pid.h
@@ -61,7 +61,7 @@ enum rc_pid_event_type {
61union rc_pid_event_data { 61union rc_pid_event_data {
62 /* RC_PID_EVENT_TX_STATUS */ 62 /* RC_PID_EVENT_TX_STATUS */
63 struct { 63 struct {
64 struct ieee80211_tx_status tx_status; 64 struct ieee80211_tx_info tx_status;
65 }; 65 };
66 /* RC_PID_EVENT_TYPE_RATE_CHANGE */ 66 /* RC_PID_EVENT_TYPE_RATE_CHANGE */
67 /* RC_PID_EVENT_TYPE_TX_RATE */ 67 /* RC_PID_EVENT_TYPE_TX_RATE */
@@ -158,7 +158,7 @@ struct rc_pid_debugfs_entries {
158}; 158};
159 159
160void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf, 160void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf,
161 struct ieee80211_tx_status *stat); 161 struct ieee80211_tx_info *stat);
162 162
163void rate_control_pid_event_rate_change(struct rc_pid_event_buffer *buf, 163void rate_control_pid_event_rate_change(struct rc_pid_event_buffer *buf,
164 int index, int rate); 164 int index, int rate);
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index a849b745bdb5..e8945413e4a2 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -237,8 +237,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
237} 237}
238 238
239static void rate_control_pid_tx_status(void *priv, struct net_device *dev, 239static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
240 struct sk_buff *skb, 240 struct sk_buff *skb)
241 struct ieee80211_tx_status *status)
242{ 241{
243 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 242 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
244 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 243 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -248,6 +247,7 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
248 struct rc_pid_sta_info *spinfo; 247 struct rc_pid_sta_info *spinfo;
249 unsigned long period; 248 unsigned long period;
250 struct ieee80211_supported_band *sband; 249 struct ieee80211_supported_band *sband;
250 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
251 251
252 rcu_read_lock(); 252 rcu_read_lock();
253 253
@@ -266,28 +266,28 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
266 266
267 /* Ignore all frames that were sent with a different rate than the rate 267 /* Ignore all frames that were sent with a different rate than the rate
268 * we currently advise mac80211 to use. */ 268 * we currently advise mac80211 to use. */
269 if (status->control.tx_rate != &sband->bitrates[sta->txrate_idx]) 269 if (info->tx_rate_idx != sta->txrate_idx)
270 goto unlock; 270 goto unlock;
271 271
272 spinfo = sta->rate_ctrl_priv; 272 spinfo = sta->rate_ctrl_priv;
273 spinfo->tx_num_xmit++; 273 spinfo->tx_num_xmit++;
274 274
275#ifdef CONFIG_MAC80211_DEBUGFS 275#ifdef CONFIG_MAC80211_DEBUGFS
276 rate_control_pid_event_tx_status(&spinfo->events, status); 276 rate_control_pid_event_tx_status(&spinfo->events, info);
277#endif 277#endif
278 278
279 /* We count frames that totally failed to be transmitted as two bad 279 /* We count frames that totally failed to be transmitted as two bad
280 * frames, those that made it out but had some retries as one good and 280 * frames, those that made it out but had some retries as one good and
281 * one bad frame. */ 281 * one bad frame. */
282 if (status->excessive_retries) { 282 if (info->status.excessive_retries) {
283 spinfo->tx_num_failed += 2; 283 spinfo->tx_num_failed += 2;
284 spinfo->tx_num_xmit++; 284 spinfo->tx_num_xmit++;
285 } else if (status->retry_count) { 285 } else if (info->status.retry_count) {
286 spinfo->tx_num_failed++; 286 spinfo->tx_num_failed++;
287 spinfo->tx_num_xmit++; 287 spinfo->tx_num_xmit++;
288 } 288 }
289 289
290 if (status->excessive_retries) { 290 if (info->status.excessive_retries) {
291 sta->tx_retry_failed++; 291 sta->tx_retry_failed++;
292 sta->tx_num_consecutive_failures++; 292 sta->tx_num_consecutive_failures++;
293 sta->tx_num_mpdu_fail++; 293 sta->tx_num_mpdu_fail++;
@@ -295,8 +295,8 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
295 sta->tx_num_consecutive_failures = 0; 295 sta->tx_num_consecutive_failures = 0;
296 sta->tx_num_mpdu_ok++; 296 sta->tx_num_mpdu_ok++;
297 } 297 }
298 sta->tx_retry_count += status->retry_count; 298 sta->tx_retry_count += info->status.retry_count;
299 sta->tx_num_mpdu_fail += status->retry_count; 299 sta->tx_num_mpdu_fail += info->status.retry_count;
300 300
301 /* Update PID controller state. */ 301 /* Update PID controller state. */
302 period = (HZ * pinfo->sampling_period + 500) / 1000; 302 period = (HZ * pinfo->sampling_period + 500) / 1000;
@@ -330,7 +330,7 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev,
330 fc = le16_to_cpu(hdr->frame_control); 330 fc = le16_to_cpu(hdr->frame_control);
331 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || 331 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
332 is_multicast_ether_addr(hdr->addr1) || !sta) { 332 is_multicast_ether_addr(hdr->addr1) || !sta) {
333 sel->rate = rate_lowest(local, sband, sta); 333 sel->rate_idx = rate_lowest_index(local, sband, sta);
334 rcu_read_unlock(); 334 rcu_read_unlock();
335 return; 335 return;
336 } 336 }
@@ -349,7 +349,7 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev,
349 349
350 rcu_read_unlock(); 350 rcu_read_unlock();
351 351
352 sel->rate = &sband->bitrates[rateidx]; 352 sel->rate_idx = rateidx;
353 353
354#ifdef CONFIG_MAC80211_DEBUGFS 354#ifdef CONFIG_MAC80211_DEBUGFS
355 rate_control_pid_event_tx_rate( 355 rate_control_pid_event_tx_rate(
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index ff5c380f3c13..8121d3bc6835 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -39,11 +39,11 @@ static void rate_control_pid_event(struct rc_pid_event_buffer *buf,
39} 39}
40 40
41void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf, 41void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf,
42 struct ieee80211_tx_status *stat) 42 struct ieee80211_tx_info *stat)
43{ 43{
44 union rc_pid_event_data evd; 44 union rc_pid_event_data evd;
45 45
46 memcpy(&evd.tx_status, stat, sizeof(struct ieee80211_tx_status)); 46 memcpy(&evd.tx_status, stat, sizeof(struct ieee80211_tx_info));
47 rate_control_pid_event(buf, RC_PID_EVENT_TYPE_TX_STATUS, &evd); 47 rate_control_pid_event(buf, RC_PID_EVENT_TYPE_TX_STATUS, &evd);
48} 48}
49 49
@@ -167,8 +167,8 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
167 switch (ev->type) { 167 switch (ev->type) {
168 case RC_PID_EVENT_TYPE_TX_STATUS: 168 case RC_PID_EVENT_TYPE_TX_STATUS:
169 p += snprintf(pb + p, length - p, "tx_status %u %u", 169 p += snprintf(pb + p, length - p, "tx_status %u %u",
170 ev->data.tx_status.excessive_retries, 170 ev->data.tx_status.status.excessive_retries,
171 ev->data.tx_status.retry_count); 171 ev->data.tx_status.status.retry_count);
172 break; 172 break;
173 case RC_PID_EVENT_TYPE_RATE_CHANGE: 173 case RC_PID_EVENT_TYPE_RATE_CHANGE:
174 p += snprintf(pb + p, length - p, "rate_change %d %d", 174 p += snprintf(pb + p, length - p, "rate_change %d %d",
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0941e5d6a522..a3643fd86af9 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -77,6 +77,134 @@ static inline int should_drop_frame(struct ieee80211_rx_status *status,
77 return 0; 77 return 0;
78} 78}
79 79
80static int
81ieee80211_rx_radiotap_len(struct ieee80211_local *local,
82 struct ieee80211_rx_status *status)
83{
84 int len;
85
86 /* always present fields */
87 len = sizeof(struct ieee80211_radiotap_header) + 9;
88
89 if (status->flag & RX_FLAG_TSFT)
90 len += 8;
91 if (local->hw.flags & IEEE80211_HW_SIGNAL_DB ||
92 local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
93 len += 1;
94 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
95 len += 1;
96
97 if (len & 1) /* padding for RX_FLAGS if necessary */
98 len++;
99
100 /* make sure radiotap starts at a naturally aligned address */
101 if (len % 8)
102 len = roundup(len, 8);
103
104 return len;
105}
106
107/**
108 * ieee80211_add_rx_radiotap_header - add radiotap header
109 *
110 * add a radiotap header containing all the fields which the hardware provided.
111 */
112static void
113ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
114 struct sk_buff *skb,
115 struct ieee80211_rx_status *status,
116 struct ieee80211_rate *rate,
117 int rtap_len)
118{
119 struct ieee80211_radiotap_header *rthdr;
120 unsigned char *pos;
121
122 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
123 memset(rthdr, 0, rtap_len);
124
125 /* radiotap header, set always present flags */
126 rthdr->it_present =
127 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
128 (1 << IEEE80211_RADIOTAP_RATE) |
129 (1 << IEEE80211_RADIOTAP_CHANNEL) |
130 (1 << IEEE80211_RADIOTAP_ANTENNA) |
131 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
132 rthdr->it_len = cpu_to_le16(rtap_len);
133
134 pos = (unsigned char *)(rthdr+1);
135
136 /* the order of the following fields is important */
137
138 /* IEEE80211_RADIOTAP_TSFT */
139 if (status->flag & RX_FLAG_TSFT) {
140 *(__le64 *)pos = cpu_to_le64(status->mactime);
141 rthdr->it_present |=
142 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
143 pos += 8;
144 }
145
146 /* IEEE80211_RADIOTAP_FLAGS */
147 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
148 *pos |= IEEE80211_RADIOTAP_F_FCS;
149 pos++;
150
151 /* IEEE80211_RADIOTAP_RATE */
152 *pos = rate->bitrate / 5;
153 pos++;
154
155 /* IEEE80211_RADIOTAP_CHANNEL */
156 *(__le16 *)pos = cpu_to_le16(status->freq);
157 pos += 2;
158 if (status->band == IEEE80211_BAND_5GHZ)
159 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
160 IEEE80211_CHAN_5GHZ);
161 else
162 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_DYN |
163 IEEE80211_CHAN_2GHZ);
164 pos += 2;
165
166 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
167 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
168 *pos = status->signal;
169 rthdr->it_present |=
170 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
171 pos++;
172 }
173
174 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
175 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
176 *pos = status->noise;
177 rthdr->it_present |=
178 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
179 pos++;
180 }
181
182 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
183
184 /* IEEE80211_RADIOTAP_ANTENNA */
185 *pos = status->antenna;
186 pos++;
187
188 /* IEEE80211_RADIOTAP_DB_ANTSIGNAL */
189 if (local->hw.flags & IEEE80211_HW_SIGNAL_DB) {
190 *pos = status->signal;
191 rthdr->it_present |=
192 cpu_to_le32(1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL);
193 pos++;
194 }
195
196 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
197
198 /* IEEE80211_RADIOTAP_RX_FLAGS */
199 /* ensure 2 byte alignment for the 2 byte field as required */
200 if ((pos - (unsigned char *)rthdr) & 1)
201 pos++;
202 /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */
203 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
204 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
205 pos += 2;
206}
207
80/* 208/*
81 * This function copies a received frame to all monitor interfaces and 209 * This function copies a received frame to all monitor interfaces and
82 * returns a cleaned-up SKB that no longer includes the FCS nor the 210 * returns a cleaned-up SKB that no longer includes the FCS nor the
@@ -89,17 +217,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
89{ 217{
90 struct ieee80211_sub_if_data *sdata; 218 struct ieee80211_sub_if_data *sdata;
91 int needed_headroom = 0; 219 int needed_headroom = 0;
92 struct ieee80211_radiotap_header *rthdr;
93 __le64 *rttsft = NULL;
94 struct ieee80211_rtap_fixed_data {
95 u8 flags;
96 u8 rate;
97 __le16 chan_freq;
98 __le16 chan_flags;
99 u8 antsignal;
100 u8 padding_for_rxflags;
101 __le16 rx_flags;
102 } __attribute__ ((packed)) *rtfixed;
103 struct sk_buff *skb, *skb2; 220 struct sk_buff *skb, *skb2;
104 struct net_device *prev_dev = NULL; 221 struct net_device *prev_dev = NULL;
105 int present_fcs_len = 0; 222 int present_fcs_len = 0;
@@ -116,8 +233,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
116 if (status->flag & RX_FLAG_RADIOTAP) 233 if (status->flag & RX_FLAG_RADIOTAP)
117 rtap_len = ieee80211_get_radiotap_len(origskb->data); 234 rtap_len = ieee80211_get_radiotap_len(origskb->data);
118 else 235 else
119 /* room for radiotap header, always present fields and TSFT */ 236 /* room for the radiotap header based on driver features */
120 needed_headroom = sizeof(*rthdr) + sizeof(*rtfixed) + 8; 237 needed_headroom = ieee80211_rx_radiotap_len(local, status);
121 238
122 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 239 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
123 present_fcs_len = FCS_LEN; 240 present_fcs_len = FCS_LEN;
@@ -163,55 +280,9 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
163 } 280 }
164 281
165 /* if necessary, prepend radiotap information */ 282 /* if necessary, prepend radiotap information */
166 if (!(status->flag & RX_FLAG_RADIOTAP)) { 283 if (!(status->flag & RX_FLAG_RADIOTAP))
167 rtfixed = (void *) skb_push(skb, sizeof(*rtfixed)); 284 ieee80211_add_rx_radiotap_header(local, skb, status, rate,
168 rtap_len = sizeof(*rthdr) + sizeof(*rtfixed); 285 needed_headroom);
169 if (status->flag & RX_FLAG_TSFT) {
170 rttsft = (void *) skb_push(skb, sizeof(*rttsft));
171 rtap_len += 8;
172 }
173 rthdr = (void *) skb_push(skb, sizeof(*rthdr));
174 memset(rthdr, 0, sizeof(*rthdr));
175 memset(rtfixed, 0, sizeof(*rtfixed));
176 rthdr->it_present =
177 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
178 (1 << IEEE80211_RADIOTAP_RATE) |
179 (1 << IEEE80211_RADIOTAP_CHANNEL) |
180 (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) |
181 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
182 rtfixed->flags = 0;
183 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
184 rtfixed->flags |= IEEE80211_RADIOTAP_F_FCS;
185
186 if (rttsft) {
187 *rttsft = cpu_to_le64(status->mactime);
188 rthdr->it_present |=
189 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
190 }
191
192 /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */
193 rtfixed->rx_flags = 0;
194 if (status->flag &
195 (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
196 rtfixed->rx_flags |=
197 cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
198
199 rtfixed->rate = rate->bitrate / 5;
200
201 rtfixed->chan_freq = cpu_to_le16(status->freq);
202
203 if (status->band == IEEE80211_BAND_5GHZ)
204 rtfixed->chan_flags =
205 cpu_to_le16(IEEE80211_CHAN_OFDM |
206 IEEE80211_CHAN_5GHZ);
207 else
208 rtfixed->chan_flags =
209 cpu_to_le16(IEEE80211_CHAN_DYN |
210 IEEE80211_CHAN_2GHZ);
211
212 rtfixed->antsignal = status->ssi;
213 rthdr->it_len = cpu_to_le16(rtap_len);
214 }
215 286
216 skb_reset_mac_header(skb); 287 skb_reset_mac_header(skb);
217 skb->ip_summed = CHECKSUM_UNNECESSARY; 288 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -275,11 +346,6 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
275 } 346 }
276 } 347 }
277 348
278 I802_DEBUG_INC(rx->local->wme_rx_queue[tid]);
279 /* only a debug counter, sta might not be assigned properly yet */
280 if (rx->sta)
281 I802_DEBUG_INC(rx->sta->wme_rx_queue[tid]);
282
283 rx->queue = tid; 349 rx->queue = tid;
284 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 350 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
285 * For now, set skb->priority to 0 for other cases. */ 351 * For now, set skb->priority to 0 for other cases. */
@@ -321,51 +387,9 @@ static void ieee80211_verify_ip_alignment(struct ieee80211_rx_data *rx)
321} 387}
322 388
323 389
324static u32 ieee80211_rx_load_stats(struct ieee80211_local *local,
325 struct sk_buff *skb,
326 struct ieee80211_rx_status *status,
327 struct ieee80211_rate *rate)
328{
329 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
330 u32 load = 0, hdrtime;
331
332 /* Estimate total channel use caused by this frame */
333
334 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values,
335 * 1 usec = 1/8 * (1080 / 10) = 13.5 */
336
337 if (status->band == IEEE80211_BAND_5GHZ ||
338 (status->band == IEEE80211_BAND_5GHZ &&
339 rate->flags & IEEE80211_RATE_ERP_G))
340 hdrtime = CHAN_UTIL_HDR_SHORT;
341 else
342 hdrtime = CHAN_UTIL_HDR_LONG;
343
344 load = hdrtime;
345 if (!is_multicast_ether_addr(hdr->addr1))
346 load += hdrtime;
347
348 /* TODO: optimise again */
349 load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate;
350
351 /* Divide channel_use by 8 to avoid wrapping around the counter */
352 load >>= CHAN_UTIL_SHIFT;
353
354 return load;
355}
356
357/* rx handlers */ 390/* rx handlers */
358 391
359static ieee80211_rx_result 392static ieee80211_rx_result
360ieee80211_rx_h_if_stats(struct ieee80211_rx_data *rx)
361{
362 if (rx->sta)
363 rx->sta->channel_use_raw += rx->load;
364 rx->sdata->channel_use_raw += rx->load;
365 return RX_CONTINUE;
366}
367
368static ieee80211_rx_result
369ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) 393ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
370{ 394{
371 struct ieee80211_local *local = rx->local; 395 struct ieee80211_local *local = rx->local;
@@ -484,7 +508,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
484 ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL && 508 ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL &&
485 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)) && 509 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)) &&
486 rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 510 rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
487 (!rx->sta || !(rx->sta->flags & WLAN_STA_ASSOC)))) { 511 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
488 if ((!(rx->fc & IEEE80211_FCTL_FROMDS) && 512 if ((!(rx->fc & IEEE80211_FCTL_FROMDS) &&
489 !(rx->fc & IEEE80211_FCTL_TODS) && 513 !(rx->fc & IEEE80211_FCTL_TODS) &&
490 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) 514 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
@@ -635,8 +659,7 @@ static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta)
635 659
636 if (sdata->bss) 660 if (sdata->bss)
637 atomic_inc(&sdata->bss->num_sta_ps); 661 atomic_inc(&sdata->bss->num_sta_ps);
638 sta->flags |= WLAN_STA_PS; 662 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
639 sta->flags &= ~WLAN_STA_PSPOLL;
640#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 663#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
641 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n", 664 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n",
642 dev->name, print_mac(mac, sta->addr), sta->aid); 665 dev->name, print_mac(mac, sta->addr), sta->aid);
@@ -649,7 +672,7 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
649 struct sk_buff *skb; 672 struct sk_buff *skb;
650 int sent = 0; 673 int sent = 0;
651 struct ieee80211_sub_if_data *sdata; 674 struct ieee80211_sub_if_data *sdata;
652 struct ieee80211_tx_packet_data *pkt_data; 675 struct ieee80211_tx_info *info;
653 DECLARE_MAC_BUF(mac); 676 DECLARE_MAC_BUF(mac);
654 677
655 sdata = sta->sdata; 678 sdata = sta->sdata;
@@ -657,7 +680,7 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
657 if (sdata->bss) 680 if (sdata->bss)
658 atomic_dec(&sdata->bss->num_sta_ps); 681 atomic_dec(&sdata->bss->num_sta_ps);
659 682
660 sta->flags &= ~(WLAN_STA_PS | WLAN_STA_PSPOLL); 683 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL);
661 684
662 if (!skb_queue_empty(&sta->ps_tx_buf)) 685 if (!skb_queue_empty(&sta->ps_tx_buf))
663 sta_info_clear_tim_bit(sta); 686 sta_info_clear_tim_bit(sta);
@@ -669,13 +692,13 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
669 692
670 /* Send all buffered frames to the station */ 693 /* Send all buffered frames to the station */
671 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { 694 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) {
672 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 695 info = IEEE80211_SKB_CB(skb);
673 sent++; 696 sent++;
674 pkt_data->flags |= IEEE80211_TXPD_REQUEUE; 697 info->flags |= IEEE80211_TX_CTL_REQUEUE;
675 dev_queue_xmit(skb); 698 dev_queue_xmit(skb);
676 } 699 }
677 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { 700 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
678 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 701 info = IEEE80211_SKB_CB(skb);
679 local->total_ps_buffered--; 702 local->total_ps_buffered--;
680 sent++; 703 sent++;
681#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 704#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
@@ -683,7 +706,7 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
683 "since STA not sleeping anymore\n", dev->name, 706 "since STA not sleeping anymore\n", dev->name,
684 print_mac(mac, sta->addr), sta->aid); 707 print_mac(mac, sta->addr), sta->aid);
685#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 708#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
686 pkt_data->flags |= IEEE80211_TXPD_REQUEUE; 709 info->flags |= IEEE80211_TX_CTL_REQUEUE;
687 dev_queue_xmit(skb); 710 dev_queue_xmit(skb);
688 } 711 }
689 712
@@ -725,16 +748,17 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
725 748
726 sta->rx_fragments++; 749 sta->rx_fragments++;
727 sta->rx_bytes += rx->skb->len; 750 sta->rx_bytes += rx->skb->len;
728 sta->last_rssi = rx->status->ssi;
729 sta->last_signal = rx->status->signal; 751 sta->last_signal = rx->status->signal;
752 sta->last_qual = rx->status->qual;
730 sta->last_noise = rx->status->noise; 753 sta->last_noise = rx->status->noise;
731 754
732 if (!(rx->fc & IEEE80211_FCTL_MOREFRAGS)) { 755 if (!(rx->fc & IEEE80211_FCTL_MOREFRAGS)) {
733 /* Change STA power saving mode only in the end of a frame 756 /* Change STA power saving mode only in the end of a frame
734 * exchange sequence */ 757 * exchange sequence */
735 if ((sta->flags & WLAN_STA_PS) && !(rx->fc & IEEE80211_FCTL_PM)) 758 if (test_sta_flags(sta, WLAN_STA_PS) &&
759 !(rx->fc & IEEE80211_FCTL_PM))
736 rx->sent_ps_buffered += ap_sta_ps_end(dev, sta); 760 rx->sent_ps_buffered += ap_sta_ps_end(dev, sta);
737 else if (!(sta->flags & WLAN_STA_PS) && 761 else if (!test_sta_flags(sta, WLAN_STA_PS) &&
738 (rx->fc & IEEE80211_FCTL_PM)) 762 (rx->fc & IEEE80211_FCTL_PM))
739 ap_sta_ps_start(dev, sta); 763 ap_sta_ps_start(dev, sta);
740 } 764 }
@@ -988,7 +1012,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
988 * Tell TX path to send one frame even though the STA may 1012 * Tell TX path to send one frame even though the STA may
989 * still remain is PS mode after this frame exchange. 1013 * still remain is PS mode after this frame exchange.
990 */ 1014 */
991 rx->sta->flags |= WLAN_STA_PSPOLL; 1015 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
992 1016
993#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1017#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
994 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n", 1018 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n",
@@ -1051,7 +1075,8 @@ ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1051static int 1075static int
1052ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 1076ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1053{ 1077{
1054 if (unlikely(!rx->sta || !(rx->sta->flags & WLAN_STA_AUTHORIZED))) { 1078 if (unlikely(!rx->sta ||
1079 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED))) {
1055#ifdef CONFIG_MAC80211_DEBUG 1080#ifdef CONFIG_MAC80211_DEBUG
1056 if (net_ratelimit()) 1081 if (net_ratelimit())
1057 printk(KERN_DEBUG "%s: dropped frame " 1082 printk(KERN_DEBUG "%s: dropped frame "
@@ -1713,7 +1738,6 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1713typedef ieee80211_rx_result (*ieee80211_rx_handler)(struct ieee80211_rx_data *); 1738typedef ieee80211_rx_result (*ieee80211_rx_handler)(struct ieee80211_rx_data *);
1714static ieee80211_rx_handler ieee80211_rx_handlers[] = 1739static ieee80211_rx_handler ieee80211_rx_handlers[] =
1715{ 1740{
1716 ieee80211_rx_h_if_stats,
1717 ieee80211_rx_h_passive_scan, 1741 ieee80211_rx_h_passive_scan,
1718 ieee80211_rx_h_check, 1742 ieee80211_rx_h_check,
1719 ieee80211_rx_h_decrypt, 1743 ieee80211_rx_h_decrypt,
@@ -1872,7 +1896,6 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1872static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 1896static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1873 struct sk_buff *skb, 1897 struct sk_buff *skb,
1874 struct ieee80211_rx_status *status, 1898 struct ieee80211_rx_status *status,
1875 u32 load,
1876 struct ieee80211_rate *rate) 1899 struct ieee80211_rate *rate)
1877{ 1900{
1878 struct ieee80211_local *local = hw_to_local(hw); 1901 struct ieee80211_local *local = hw_to_local(hw);
@@ -1891,7 +1914,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1891 rx.local = local; 1914 rx.local = local;
1892 1915
1893 rx.status = status; 1916 rx.status = status;
1894 rx.load = load;
1895 rx.rate = rate; 1917 rx.rate = rate;
1896 rx.fc = le16_to_cpu(hdr->frame_control); 1918 rx.fc = le16_to_cpu(hdr->frame_control);
1897 type = rx.fc & IEEE80211_FCTL_FTYPE; 1919 type = rx.fc & IEEE80211_FCTL_FTYPE;
@@ -2000,7 +2022,6 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2000 struct ieee80211_rx_status status; 2022 struct ieee80211_rx_status status;
2001 u16 head_seq_num, buf_size; 2023 u16 head_seq_num, buf_size;
2002 int index; 2024 int index;
2003 u32 pkt_load;
2004 struct ieee80211_supported_band *sband; 2025 struct ieee80211_supported_band *sband;
2005 struct ieee80211_rate *rate; 2026 struct ieee80211_rate *rate;
2006 2027
@@ -2035,12 +2056,9 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2035 sizeof(status)); 2056 sizeof(status));
2036 sband = local->hw.wiphy->bands[status.band]; 2057 sband = local->hw.wiphy->bands[status.band];
2037 rate = &sband->bitrates[status.rate_idx]; 2058 rate = &sband->bitrates[status.rate_idx];
2038 pkt_load = ieee80211_rx_load_stats(local,
2039 tid_agg_rx->reorder_buf[index],
2040 &status, rate);
2041 __ieee80211_rx_handle_packet(hw, 2059 __ieee80211_rx_handle_packet(hw,
2042 tid_agg_rx->reorder_buf[index], 2060 tid_agg_rx->reorder_buf[index],
2043 &status, pkt_load, rate); 2061 &status, rate);
2044 tid_agg_rx->stored_mpdu_num--; 2062 tid_agg_rx->stored_mpdu_num--;
2045 tid_agg_rx->reorder_buf[index] = NULL; 2063 tid_agg_rx->reorder_buf[index] = NULL;
2046 } 2064 }
@@ -2082,11 +2100,8 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2082 sizeof(status)); 2100 sizeof(status));
2083 sband = local->hw.wiphy->bands[status.band]; 2101 sband = local->hw.wiphy->bands[status.band];
2084 rate = &sband->bitrates[status.rate_idx]; 2102 rate = &sband->bitrates[status.rate_idx];
2085 pkt_load = ieee80211_rx_load_stats(local,
2086 tid_agg_rx->reorder_buf[index],
2087 &status, rate);
2088 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index], 2103 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
2089 &status, pkt_load, rate); 2104 &status, rate);
2090 tid_agg_rx->stored_mpdu_num--; 2105 tid_agg_rx->stored_mpdu_num--;
2091 tid_agg_rx->reorder_buf[index] = NULL; 2106 tid_agg_rx->reorder_buf[index] = NULL;
2092 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 2107 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
@@ -2165,7 +2180,6 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2165 struct ieee80211_rx_status *status) 2180 struct ieee80211_rx_status *status)
2166{ 2181{
2167 struct ieee80211_local *local = hw_to_local(hw); 2182 struct ieee80211_local *local = hw_to_local(hw);
2168 u32 pkt_load;
2169 struct ieee80211_rate *rate = NULL; 2183 struct ieee80211_rate *rate = NULL;
2170 struct ieee80211_supported_band *sband; 2184 struct ieee80211_supported_band *sband;
2171 2185
@@ -2205,11 +2219,8 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2205 return; 2219 return;
2206 } 2220 }
2207 2221
2208 pkt_load = ieee80211_rx_load_stats(local, skb, status, rate);
2209 local->channel_use_raw += pkt_load;
2210
2211 if (!ieee80211_rx_reorder_ampdu(local, skb)) 2222 if (!ieee80211_rx_reorder_ampdu(local, skb))
2212 __ieee80211_rx_handle_packet(hw, skb, status, pkt_load, rate); 2223 __ieee80211_rx_handle_packet(hw, skb, status, rate);
2213 2224
2214 rcu_read_unlock(); 2225 rcu_read_unlock();
2215} 2226}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7d4fe4a52929..c24770cb02c5 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -202,14 +202,12 @@ void sta_info_destroy(struct sta_info *sta)
202 dev_kfree_skb_any(skb); 202 dev_kfree_skb_any(skb);
203 203
204 for (i = 0; i < STA_TID_NUM; i++) { 204 for (i = 0; i < STA_TID_NUM; i++) {
205 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); 205 spin_lock_bh(&sta->lock);
206 if (sta->ampdu_mlme.tid_rx[i]) 206 if (sta->ampdu_mlme.tid_rx[i])
207 del_timer_sync(&sta->ampdu_mlme.tid_rx[i]->session_timer); 207 del_timer_sync(&sta->ampdu_mlme.tid_rx[i]->session_timer);
208 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
209 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
210 if (sta->ampdu_mlme.tid_tx[i]) 208 if (sta->ampdu_mlme.tid_tx[i])
211 del_timer_sync(&sta->ampdu_mlme.tid_tx[i]->addba_resp_timer); 209 del_timer_sync(&sta->ampdu_mlme.tid_tx[i]->addba_resp_timer);
212 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 210 spin_unlock_bh(&sta->lock);
213 } 211 }
214 212
215 __sta_info_free(local, sta); 213 __sta_info_free(local, sta);
@@ -236,6 +234,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
236 if (!sta) 234 if (!sta)
237 return NULL; 235 return NULL;
238 236
237 spin_lock_init(&sta->lock);
238
239 memcpy(sta->addr, addr, ETH_ALEN); 239 memcpy(sta->addr, addr, ETH_ALEN);
240 sta->local = local; 240 sta->local = local;
241 sta->sdata = sdata; 241 sta->sdata = sdata;
@@ -249,15 +249,13 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
249 return NULL; 249 return NULL;
250 } 250 }
251 251
252 spin_lock_init(&sta->ampdu_mlme.ampdu_rx);
253 spin_lock_init(&sta->ampdu_mlme.ampdu_tx);
254 for (i = 0; i < STA_TID_NUM; i++) { 252 for (i = 0; i < STA_TID_NUM; i++) {
255 /* timer_to_tid must be initialized with identity mapping to 253 /* timer_to_tid must be initialized with identity mapping to
256 * enable session_timer's data differentiation. refer to 254 * enable session_timer's data differentiation. refer to
257 * sta_rx_agg_session_timer_expired for useage */ 255 * sta_rx_agg_session_timer_expired for useage */
258 sta->timer_to_tid[i] = i; 256 sta->timer_to_tid[i] = i;
259 /* tid to tx queue: initialize according to HW (0 is valid) */ 257 /* tid to tx queue: initialize according to HW (0 is valid) */
260 sta->tid_to_tx_q[i] = local->hw.queues; 258 sta->tid_to_tx_q[i] = ieee80211_num_queues(&local->hw);
261 /* rx */ 259 /* rx */
262 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE; 260 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE;
263 sta->ampdu_mlme.tid_rx[i] = NULL; 261 sta->ampdu_mlme.tid_rx[i] = NULL;
@@ -276,7 +274,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
276 274
277#ifdef CONFIG_MAC80211_MESH 275#ifdef CONFIG_MAC80211_MESH
278 sta->plink_state = PLINK_LISTEN; 276 sta->plink_state = PLINK_LISTEN;
279 spin_lock_init(&sta->plink_lock);
280 init_timer(&sta->plink_timer); 277 init_timer(&sta->plink_timer);
281#endif 278#endif
282 279
@@ -437,8 +434,7 @@ void __sta_info_unlink(struct sta_info **sta)
437 434
438 list_del(&(*sta)->list); 435 list_del(&(*sta)->list);
439 436
440 if ((*sta)->flags & WLAN_STA_PS) { 437 if (test_and_clear_sta_flags(*sta, WLAN_STA_PS)) {
441 (*sta)->flags &= ~WLAN_STA_PS;
442 if (sdata->bss) 438 if (sdata->bss)
443 atomic_dec(&sdata->bss->num_sta_ps); 439 atomic_dec(&sdata->bss->num_sta_ps);
444 __sta_info_clear_tim_bit(sdata->bss, *sta); 440 __sta_info_clear_tim_bit(sdata->bss, *sta);
@@ -515,20 +511,20 @@ static inline int sta_info_buffer_expired(struct ieee80211_local *local,
515 struct sta_info *sta, 511 struct sta_info *sta,
516 struct sk_buff *skb) 512 struct sk_buff *skb)
517{ 513{
518 struct ieee80211_tx_packet_data *pkt_data; 514 struct ieee80211_tx_info *info;
519 int timeout; 515 int timeout;
520 516
521 if (!skb) 517 if (!skb)
522 return 0; 518 return 0;
523 519
524 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 520 info = IEEE80211_SKB_CB(skb);
525 521
526 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ 522 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */
527 timeout = (sta->listen_interval * local->hw.conf.beacon_int * 32 / 523 timeout = (sta->listen_interval * local->hw.conf.beacon_int * 32 /
528 15625) * HZ; 524 15625) * HZ;
529 if (timeout < STA_TX_BUFFER_EXPIRE) 525 if (timeout < STA_TX_BUFFER_EXPIRE)
530 timeout = STA_TX_BUFFER_EXPIRE; 526 timeout = STA_TX_BUFFER_EXPIRE;
531 return time_after(jiffies, pkt_data->jiffies + timeout); 527 return time_after(jiffies, info->control.jiffies + timeout);
532} 528}
533 529
534 530
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index f8c95bc9659c..95753f860acf 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -32,7 +32,7 @@
32 * @WLAN_STA_WDS: Station is one of our WDS peers. 32 * @WLAN_STA_WDS: Station is one of our WDS peers.
33 * @WLAN_STA_PSPOLL: Station has just PS-polled us. 33 * @WLAN_STA_PSPOLL: Station has just PS-polled us.
34 * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the 34 * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the
35 * IEEE80211_TXCTL_CLEAR_PS_FILT control flag) when the next 35 * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next
36 * frame to this station is transmitted. 36 * frame to this station is transmitted.
37 */ 37 */
38enum ieee80211_sta_info_flags { 38enum ieee80211_sta_info_flags {
@@ -129,23 +129,19 @@ enum plink_state {
129 * 129 *
130 * @tid_state_rx: TID's state in Rx session state machine. 130 * @tid_state_rx: TID's state in Rx session state machine.
131 * @tid_rx: aggregation info for Rx per TID 131 * @tid_rx: aggregation info for Rx per TID
132 * @ampdu_rx: for locking sections in aggregation Rx flow
133 * @tid_state_tx: TID's state in Tx session state machine. 132 * @tid_state_tx: TID's state in Tx session state machine.
134 * @tid_tx: aggregation info for Tx per TID 133 * @tid_tx: aggregation info for Tx per TID
135 * @addba_req_num: number of times addBA request has been sent. 134 * @addba_req_num: number of times addBA request has been sent.
136 * @ampdu_tx: for locking sectionsi in aggregation Tx flow
137 * @dialog_token_allocator: dialog token enumerator for each new session; 135 * @dialog_token_allocator: dialog token enumerator for each new session;
138 */ 136 */
139struct sta_ampdu_mlme { 137struct sta_ampdu_mlme {
140 /* rx */ 138 /* rx */
141 u8 tid_state_rx[STA_TID_NUM]; 139 u8 tid_state_rx[STA_TID_NUM];
142 struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; 140 struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
143 spinlock_t ampdu_rx;
144 /* tx */ 141 /* tx */
145 u8 tid_state_tx[STA_TID_NUM]; 142 u8 tid_state_tx[STA_TID_NUM];
146 struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; 143 struct tid_ampdu_tx *tid_tx[STA_TID_NUM];
147 u8 addba_req_num[STA_TID_NUM]; 144 u8 addba_req_num[STA_TID_NUM];
148 spinlock_t ampdu_tx;
149 u8 dialog_token_allocator; 145 u8 dialog_token_allocator;
150}; 146};
151 147
@@ -177,6 +173,8 @@ struct sta_ampdu_mlme {
177 * @rx_bytes: Number of bytes received from this STA 173 * @rx_bytes: Number of bytes received from this STA
178 * @supp_rates: Bitmap of supported rates (per band) 174 * @supp_rates: Bitmap of supported rates (per band)
179 * @ht_info: HT capabilities of this STA 175 * @ht_info: HT capabilities of this STA
176 * @lock: used for locking all fields that require locking, see comments
177 * in the header file.
180 */ 178 */
181struct sta_info { 179struct sta_info {
182 /* General information, mostly static */ 180 /* General information, mostly static */
@@ -187,6 +185,7 @@ struct sta_info {
187 struct ieee80211_key *key; 185 struct ieee80211_key *key;
188 struct rate_control_ref *rate_ctrl; 186 struct rate_control_ref *rate_ctrl;
189 void *rate_ctrl_priv; 187 void *rate_ctrl_priv;
188 spinlock_t lock;
190 struct ieee80211_ht_info ht_info; 189 struct ieee80211_ht_info ht_info;
191 u64 supp_rates[IEEE80211_NUM_BANDS]; 190 u64 supp_rates[IEEE80211_NUM_BANDS];
192 u8 addr[ETH_ALEN]; 191 u8 addr[ETH_ALEN];
@@ -199,7 +198,7 @@ struct sta_info {
199 */ 198 */
200 u8 pin_status; 199 u8 pin_status;
201 200
202 /* frequently updated information, needs locking? */ 201 /* frequently updated information, locked with lock spinlock */
203 u32 flags; 202 u32 flags;
204 203
205 /* 204 /*
@@ -217,8 +216,8 @@ struct sta_info {
217 * from this STA */ 216 * from this STA */
218 unsigned long rx_fragments; /* number of received MPDUs */ 217 unsigned long rx_fragments; /* number of received MPDUs */
219 unsigned long rx_dropped; /* number of dropped MPDUs from this STA */ 218 unsigned long rx_dropped; /* number of dropped MPDUs from this STA */
220 int last_rssi; /* RSSI of last received frame from this STA */
221 int last_signal; /* signal of last received frame from this STA */ 219 int last_signal; /* signal of last received frame from this STA */
220 int last_qual; /* qual of last received frame from this STA */
222 int last_noise; /* noise of last received frame from this STA */ 221 int last_noise; /* noise of last received frame from this STA */
223 /* last received seq/frag number from this STA (per RX queue) */ 222 /* last received seq/frag number from this STA (per RX queue) */
224 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; 223 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES];
@@ -246,12 +245,8 @@ struct sta_info {
246 unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES]; 245 unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES];
247#endif 246#endif
248 247
249 /* Debug counters, no locking doesn't matter */
250 int channel_use;
251 int channel_use_raw;
252
253 /* 248 /*
254 * Aggregation information, comes with own locking. 249 * Aggregation information, locked with lock.
255 */ 250 */
256 struct sta_ampdu_mlme ampdu_mlme; 251 struct sta_ampdu_mlme ampdu_mlme;
257 u8 timer_to_tid[STA_TID_NUM]; /* identity mapping to ID timers */ 252 u8 timer_to_tid[STA_TID_NUM]; /* identity mapping to ID timers */
@@ -270,9 +265,6 @@ struct sta_info {
270 enum plink_state plink_state; 265 enum plink_state plink_state;
271 u32 plink_timeout; 266 u32 plink_timeout;
272 struct timer_list plink_timer; 267 struct timer_list plink_timer;
273 spinlock_t plink_lock; /* For peer_state reads / updates and other
274 updates in the structure. Ensures robust
275 transitions for the peerlink FSM */
276#endif 268#endif
277 269
278#ifdef CONFIG_MAC80211_DEBUGFS 270#ifdef CONFIG_MAC80211_DEBUGFS
@@ -299,6 +291,64 @@ static inline enum plink_state sta_plink_state(struct sta_info *sta)
299 return PLINK_LISTEN; 291 return PLINK_LISTEN;
300} 292}
301 293
294static inline void set_sta_flags(struct sta_info *sta, const u32 flags)
295{
296 spin_lock_bh(&sta->lock);
297 sta->flags |= flags;
298 spin_unlock_bh(&sta->lock);
299}
300
301static inline void clear_sta_flags(struct sta_info *sta, const u32 flags)
302{
303 spin_lock_bh(&sta->lock);
304 sta->flags &= ~flags;
305 spin_unlock_bh(&sta->lock);
306}
307
308static inline void set_and_clear_sta_flags(struct sta_info *sta,
309 const u32 set, const u32 clear)
310{
311 spin_lock_bh(&sta->lock);
312 sta->flags |= set;
313 sta->flags &= ~clear;
314 spin_unlock_bh(&sta->lock);
315}
316
317static inline u32 test_sta_flags(struct sta_info *sta, const u32 flags)
318{
319 u32 ret;
320
321 spin_lock_bh(&sta->lock);
322 ret = sta->flags & flags;
323 spin_unlock_bh(&sta->lock);
324
325 return ret;
326}
327
328static inline u32 test_and_clear_sta_flags(struct sta_info *sta,
329 const u32 flags)
330{
331 u32 ret;
332
333 spin_lock_bh(&sta->lock);
334 ret = sta->flags & flags;
335 sta->flags &= ~flags;
336 spin_unlock_bh(&sta->lock);
337
338 return ret;
339}
340
341static inline u32 get_sta_flags(struct sta_info *sta)
342{
343 u32 ret;
344
345 spin_lock_bh(&sta->lock);
346 ret = sta->flags;
347 spin_unlock_bh(&sta->lock);
348
349 return ret;
350}
351
302 352
303/* Maximum number of concurrently registered stations */ 353/* Maximum number of concurrently registered stations */
304#define MAX_STA_COUNT 2007 354#define MAX_STA_COUNT 2007
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 09093da24af6..a00cf1ea7719 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -6,25 +6,23 @@
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9
10#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/bitops.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <asm/unaligned.h>
13 14
14#include <net/mac80211.h> 15#include <net/mac80211.h>
15#include "key.h" 16#include "key.h"
16#include "tkip.h" 17#include "tkip.h"
17#include "wep.h" 18#include "wep.h"
18 19
19
20/* TKIP key mixing functions */
21
22
23#define PHASE1_LOOP_COUNT 8 20#define PHASE1_LOOP_COUNT 8
24 21
25 22/*
26/* 2-byte by 2-byte subset of the full AES S-box table; second part of this 23 * 2-byte by 2-byte subset of the full AES S-box table; second part of this
27 * table is identical to first part but byte-swapped */ 24 * table is identical to first part but byte-swapped
25 */
28static const u16 tkip_sbox[256] = 26static const u16 tkip_sbox[256] =
29{ 27{
30 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, 28 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
@@ -61,84 +59,48 @@ static const u16 tkip_sbox[256] =
61 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, 59 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
62}; 60};
63 61
64 62static u16 tkipS(u16 val)
65static inline u16 Mk16(u8 x, u8 y)
66{
67 return ((u16) x << 8) | (u16) y;
68}
69
70
71static inline u8 Hi8(u16 v)
72{
73 return v >> 8;
74}
75
76
77static inline u8 Lo8(u16 v)
78{
79 return v & 0xff;
80}
81
82
83static inline u16 Hi16(u32 v)
84{ 63{
85 return v >> 16; 64 return tkip_sbox[val & 0xff] ^ swab16(tkip_sbox[val >> 8]);
86} 65}
87 66
88 67/*
89static inline u16 Lo16(u32 v) 68 * P1K := Phase1(TA, TK, TSC)
90{
91 return v & 0xffff;
92}
93
94
95static inline u16 RotR1(u16 v)
96{
97 return (v >> 1) | ((v & 0x0001) << 15);
98}
99
100
101static inline u16 tkip_S(u16 val)
102{
103 u16 a = tkip_sbox[Hi8(val)];
104
105 return tkip_sbox[Lo8(val)] ^ Hi8(a) ^ (Lo8(a) << 8);
106}
107
108
109
110/* P1K := Phase1(TA, TK, TSC)
111 * TA = transmitter address (48 bits) 69 * TA = transmitter address (48 bits)
112 * TK = dot11DefaultKeyValue or dot11KeyMappingValue (128 bits) 70 * TK = dot11DefaultKeyValue or dot11KeyMappingValue (128 bits)
113 * TSC = TKIP sequence counter (48 bits, only 32 msb bits used) 71 * TSC = TKIP sequence counter (48 bits, only 32 msb bits used)
114 * P1K: 80 bits 72 * P1K: 80 bits
115 */ 73 */
116static void tkip_mixing_phase1(const u8 *ta, const u8 *tk, u32 tsc_IV32, 74static void tkip_mixing_phase1(struct ieee80211_key *key, const u8 *ta,
117 u16 *p1k) 75 struct tkip_ctx *ctx, u32 tsc_IV32)
118{ 76{
119 int i, j; 77 int i, j;
78 const u8 *tk = &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY];
79 u16 *p1k = ctx->p1k;
120 80
121 p1k[0] = Lo16(tsc_IV32); 81 p1k[0] = tsc_IV32 & 0xFFFF;
122 p1k[1] = Hi16(tsc_IV32); 82 p1k[1] = tsc_IV32 >> 16;
123 p1k[2] = Mk16(ta[1], ta[0]); 83 p1k[2] = get_unaligned_le16(ta + 0);
124 p1k[3] = Mk16(ta[3], ta[2]); 84 p1k[3] = get_unaligned_le16(ta + 2);
125 p1k[4] = Mk16(ta[5], ta[4]); 85 p1k[4] = get_unaligned_le16(ta + 4);
126 86
127 for (i = 0; i < PHASE1_LOOP_COUNT; i++) { 87 for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
128 j = 2 * (i & 1); 88 j = 2 * (i & 1);
129 p1k[0] += tkip_S(p1k[4] ^ Mk16(tk[ 1 + j], tk[ 0 + j])); 89 p1k[0] += tkipS(p1k[4] ^ get_unaligned_le16(tk + 0 + j));
130 p1k[1] += tkip_S(p1k[0] ^ Mk16(tk[ 5 + j], tk[ 4 + j])); 90 p1k[1] += tkipS(p1k[0] ^ get_unaligned_le16(tk + 4 + j));
131 p1k[2] += tkip_S(p1k[1] ^ Mk16(tk[ 9 + j], tk[ 8 + j])); 91 p1k[2] += tkipS(p1k[1] ^ get_unaligned_le16(tk + 8 + j));
132 p1k[3] += tkip_S(p1k[2] ^ Mk16(tk[13 + j], tk[12 + j])); 92 p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j));
133 p1k[4] += tkip_S(p1k[3] ^ Mk16(tk[ 1 + j], tk[ 0 + j])) + i; 93 p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i;
134 } 94 }
95 ctx->initialized = 1;
135} 96}
136 97
137 98static void tkip_mixing_phase2(struct ieee80211_key *key, struct tkip_ctx *ctx,
138static void tkip_mixing_phase2(const u16 *p1k, const u8 *tk, u16 tsc_IV16, 99 u16 tsc_IV16, u8 *rc4key)
139 u8 *rc4key)
140{ 100{
141 u16 ppk[6]; 101 u16 ppk[6];
102 const u16 *p1k = ctx->p1k;
103 const u8 *tk = &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY];
142 int i; 104 int i;
143 105
144 ppk[0] = p1k[0]; 106 ppk[0] = p1k[0];
@@ -148,70 +110,51 @@ static void tkip_mixing_phase2(const u16 *p1k, const u8 *tk, u16 tsc_IV16,
148 ppk[4] = p1k[4]; 110 ppk[4] = p1k[4];
149 ppk[5] = p1k[4] + tsc_IV16; 111 ppk[5] = p1k[4] + tsc_IV16;
150 112
151 ppk[0] += tkip_S(ppk[5] ^ Mk16(tk[ 1], tk[ 0])); 113 ppk[0] += tkipS(ppk[5] ^ get_unaligned_le16(tk + 0));
152 ppk[1] += tkip_S(ppk[0] ^ Mk16(tk[ 3], tk[ 2])); 114 ppk[1] += tkipS(ppk[0] ^ get_unaligned_le16(tk + 2));
153 ppk[2] += tkip_S(ppk[1] ^ Mk16(tk[ 5], tk[ 4])); 115 ppk[2] += tkipS(ppk[1] ^ get_unaligned_le16(tk + 4));
154 ppk[3] += tkip_S(ppk[2] ^ Mk16(tk[ 7], tk[ 6])); 116 ppk[3] += tkipS(ppk[2] ^ get_unaligned_le16(tk + 6));
155 ppk[4] += tkip_S(ppk[3] ^ Mk16(tk[ 9], tk[ 8])); 117 ppk[4] += tkipS(ppk[3] ^ get_unaligned_le16(tk + 8));
156 ppk[5] += tkip_S(ppk[4] ^ Mk16(tk[11], tk[10])); 118 ppk[5] += tkipS(ppk[4] ^ get_unaligned_le16(tk + 10));
157 ppk[0] += RotR1(ppk[5] ^ Mk16(tk[13], tk[12])); 119 ppk[0] += ror16(ppk[5] ^ get_unaligned_le16(tk + 12), 1);
158 ppk[1] += RotR1(ppk[0] ^ Mk16(tk[15], tk[14])); 120 ppk[1] += ror16(ppk[0] ^ get_unaligned_le16(tk + 14), 1);
159 ppk[2] += RotR1(ppk[1]); 121 ppk[2] += ror16(ppk[1], 1);
160 ppk[3] += RotR1(ppk[2]); 122 ppk[3] += ror16(ppk[2], 1);
161 ppk[4] += RotR1(ppk[3]); 123 ppk[4] += ror16(ppk[3], 1);
162 ppk[5] += RotR1(ppk[4]); 124 ppk[5] += ror16(ppk[4], 1);
163 125
164 rc4key[0] = Hi8(tsc_IV16); 126 rc4key[0] = tsc_IV16 >> 8;
165 rc4key[1] = (Hi8(tsc_IV16) | 0x20) & 0x7f; 127 rc4key[1] = ((tsc_IV16 >> 8) | 0x20) & 0x7f;
166 rc4key[2] = Lo8(tsc_IV16); 128 rc4key[2] = tsc_IV16 & 0xFF;
167 rc4key[3] = Lo8((ppk[5] ^ Mk16(tk[1], tk[0])) >> 1); 129 rc4key[3] = ((ppk[5] ^ get_unaligned_le16(tk)) >> 1) & 0xFF;
168 130
169 for (i = 0; i < 6; i++) { 131 rc4key += 4;
170 rc4key[4 + 2 * i] = Lo8(ppk[i]); 132 for (i = 0; i < 6; i++)
171 rc4key[5 + 2 * i] = Hi8(ppk[i]); 133 put_unaligned_le16(ppk[i], rc4key + 2 * i);
172 }
173} 134}
174 135
175
176/* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets 136/* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets
177 * of the IV. Returns pointer to the octet following IVs (i.e., beginning of 137 * of the IV. Returns pointer to the octet following IVs (i.e., beginning of
178 * the packet payload). */ 138 * the packet payload). */
179u8 * ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, 139u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key,
180 u8 iv0, u8 iv1, u8 iv2) 140 u8 iv0, u8 iv1, u8 iv2)
181{ 141{
182 *pos++ = iv0; 142 *pos++ = iv0;
183 *pos++ = iv1; 143 *pos++ = iv1;
184 *pos++ = iv2; 144 *pos++ = iv2;
185 *pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */; 145 *pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */;
186 *pos++ = key->u.tkip.iv32 & 0xff; 146 put_unaligned_le32(key->u.tkip.tx.iv32, pos);
187 *pos++ = (key->u.tkip.iv32 >> 8) & 0xff; 147 return pos + 4;
188 *pos++ = (key->u.tkip.iv32 >> 16) & 0xff;
189 *pos++ = (key->u.tkip.iv32 >> 24) & 0xff;
190 return pos;
191}
192
193
194void ieee80211_tkip_gen_phase1key(struct ieee80211_key *key, u8 *ta,
195 u16 *phase1key)
196{
197 tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY],
198 key->u.tkip.iv32, phase1key);
199} 148}
200 149
201void ieee80211_tkip_gen_rc4key(struct ieee80211_key *key, u8 *ta, 150static void ieee80211_tkip_gen_rc4key(struct ieee80211_key *key, u8 *ta,
202 u8 *rc4key) 151 u8 *rc4key)
203{ 152{
204 /* Calculate per-packet key */ 153 /* Calculate per-packet key */
205 if (key->u.tkip.iv16 == 0 || !key->u.tkip.tx_initialized) { 154 if (key->u.tkip.tx.iv16 == 0 || !key->u.tkip.tx.initialized)
206 /* IV16 wrapped around - perform TKIP phase 1 */ 155 tkip_mixing_phase1(key, ta, &key->u.tkip.tx, key->u.tkip.tx.iv32);
207 tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY],
208 key->u.tkip.iv32, key->u.tkip.p1k);
209 key->u.tkip.tx_initialized = 1;
210 }
211 156
212 tkip_mixing_phase2(key->u.tkip.p1k, 157 tkip_mixing_phase2(key, &key->u.tkip.tx, key->u.tkip.tx.iv16, rc4key);
213 &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY],
214 key->u.tkip.iv16, rc4key);
215} 158}
216 159
217void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf, 160void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
@@ -228,18 +171,16 @@ void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
228 u16 iv16; 171 u16 iv16;
229 u32 iv32; 172 u32 iv32;
230 173
231 iv16 = data[hdr_len] << 8; 174 iv16 = data[hdr_len + 2] | (data[hdr_len] << 8);
232 iv16 += data[hdr_len + 2]; 175 iv32 = get_unaligned_le32(data + hdr_len + 4);
233 iv32 = data[hdr_len + 4] | (data[hdr_len + 5] << 8) |
234 (data[hdr_len + 6] << 16) | (data[hdr_len + 7] << 24);
235 176
236#ifdef CONFIG_TKIP_DEBUG 177#ifdef CONFIG_TKIP_DEBUG
237 printk(KERN_DEBUG "TKIP encrypt: iv16 = 0x%04x, iv32 = 0x%08x\n", 178 printk(KERN_DEBUG "TKIP encrypt: iv16 = 0x%04x, iv32 = 0x%08x\n",
238 iv16, iv32); 179 iv16, iv32);
239 180
240 if (iv32 != key->u.tkip.iv32) { 181 if (iv32 != key->u.tkip.tx.iv32) {
241 printk(KERN_DEBUG "skb: iv32 = 0x%08x key: iv32 = 0x%08x\n", 182 printk(KERN_DEBUG "skb: iv32 = 0x%08x key: iv32 = 0x%08x\n",
242 iv32, key->u.tkip.iv32); 183 iv32, key->u.tkip.tx.iv32);
243 printk(KERN_DEBUG "Wrap around of iv16 in the middle of a " 184 printk(KERN_DEBUG "Wrap around of iv16 in the middle of a "
244 "fragmented packet\n"); 185 "fragmented packet\n");
245 } 186 }
@@ -248,20 +189,15 @@ void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
248 /* Update the p1k only when the iv16 in the packet wraps around, this 189 /* Update the p1k only when the iv16 in the packet wraps around, this
249 * might occur after the wrap around of iv16 in the key in case of 190 * might occur after the wrap around of iv16 in the key in case of
250 * fragmented packets. */ 191 * fragmented packets. */
251 if (iv16 == 0 || !key->u.tkip.tx_initialized) { 192 if (iv16 == 0 || !key->u.tkip.tx.initialized)
252 /* IV16 wrapped around - perform TKIP phase 1 */ 193 tkip_mixing_phase1(key, ta, &key->u.tkip.tx, iv32);
253 tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY],
254 iv32, key->u.tkip.p1k);
255 key->u.tkip.tx_initialized = 1;
256 }
257 194
258 if (type == IEEE80211_TKIP_P1_KEY) { 195 if (type == IEEE80211_TKIP_P1_KEY) {
259 memcpy(outkey, key->u.tkip.p1k, sizeof(u16) * 5); 196 memcpy(outkey, key->u.tkip.tx.p1k, sizeof(u16) * 5);
260 return; 197 return;
261 } 198 }
262 199
263 tkip_mixing_phase2(key->u.tkip.p1k, 200 tkip_mixing_phase2(key, &key->u.tkip.tx, iv16, outkey);
264 &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], iv16, outkey);
265} 201}
266EXPORT_SYMBOL(ieee80211_get_tkip_key); 202EXPORT_SYMBOL(ieee80211_get_tkip_key);
267 203
@@ -281,7 +217,6 @@ void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
281 ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len); 217 ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len);
282} 218}
283 219
284
285/* Decrypt packet payload with TKIP using @key. @pos is a pointer to the 220/* Decrypt packet payload with TKIP using @key. @pos is a pointer to the
286 * beginning of the buffer containing IEEE 802.11 header payload, i.e., 221 * beginning of the buffer containing IEEE 802.11 header payload, i.e.,
287 * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the 222 * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the
@@ -302,7 +237,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
302 237
303 iv16 = (pos[0] << 8) | pos[2]; 238 iv16 = (pos[0] << 8) | pos[2];
304 keyid = pos[3]; 239 keyid = pos[3];
305 iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24); 240 iv32 = get_unaligned_le32(pos + 4);
306 pos += 8; 241 pos += 8;
307#ifdef CONFIG_TKIP_DEBUG 242#ifdef CONFIG_TKIP_DEBUG
308 { 243 {
@@ -322,33 +257,31 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
322 if ((keyid >> 6) != key->conf.keyidx) 257 if ((keyid >> 6) != key->conf.keyidx)
323 return TKIP_DECRYPT_INVALID_KEYIDX; 258 return TKIP_DECRYPT_INVALID_KEYIDX;
324 259
325 if (key->u.tkip.rx_initialized[queue] && 260 if (key->u.tkip.rx[queue].initialized &&
326 (iv32 < key->u.tkip.iv32_rx[queue] || 261 (iv32 < key->u.tkip.rx[queue].iv32 ||
327 (iv32 == key->u.tkip.iv32_rx[queue] && 262 (iv32 == key->u.tkip.rx[queue].iv32 &&
328 iv16 <= key->u.tkip.iv16_rx[queue]))) { 263 iv16 <= key->u.tkip.rx[queue].iv16))) {
329#ifdef CONFIG_TKIP_DEBUG 264#ifdef CONFIG_TKIP_DEBUG
330 DECLARE_MAC_BUF(mac); 265 DECLARE_MAC_BUF(mac);
331 printk(KERN_DEBUG "TKIP replay detected for RX frame from " 266 printk(KERN_DEBUG "TKIP replay detected for RX frame from "
332 "%s (RX IV (%04x,%02x) <= prev. IV (%04x,%02x)\n", 267 "%s (RX IV (%04x,%02x) <= prev. IV (%04x,%02x)\n",
333 print_mac(mac, ta), 268 print_mac(mac, ta),
334 iv32, iv16, key->u.tkip.iv32_rx[queue], 269 iv32, iv16, key->u.tkip.rx[queue].iv32,
335 key->u.tkip.iv16_rx[queue]); 270 key->u.tkip.rx[queue].iv16);
336#endif /* CONFIG_TKIP_DEBUG */ 271#endif /* CONFIG_TKIP_DEBUG */
337 return TKIP_DECRYPT_REPLAY; 272 return TKIP_DECRYPT_REPLAY;
338 } 273 }
339 274
340 if (only_iv) { 275 if (only_iv) {
341 res = TKIP_DECRYPT_OK; 276 res = TKIP_DECRYPT_OK;
342 key->u.tkip.rx_initialized[queue] = 1; 277 key->u.tkip.rx[queue].initialized = 1;
343 goto done; 278 goto done;
344 } 279 }
345 280
346 if (!key->u.tkip.rx_initialized[queue] || 281 if (!key->u.tkip.rx[queue].initialized ||
347 key->u.tkip.iv32_rx[queue] != iv32) { 282 key->u.tkip.rx[queue].iv32 != iv32) {
348 key->u.tkip.rx_initialized[queue] = 1;
349 /* IV16 wrapped around - perform TKIP phase 1 */ 283 /* IV16 wrapped around - perform TKIP phase 1 */
350 tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], 284 tkip_mixing_phase1(key, ta, &key->u.tkip.rx[queue], iv32);
351 iv32, key->u.tkip.p1k_rx[queue]);
352#ifdef CONFIG_TKIP_DEBUG 285#ifdef CONFIG_TKIP_DEBUG
353 { 286 {
354 int i; 287 int i;
@@ -362,7 +295,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
362 printk("\n"); 295 printk("\n");
363 printk(KERN_DEBUG "TKIP decrypt: P1K="); 296 printk(KERN_DEBUG "TKIP decrypt: P1K=");
364 for (i = 0; i < 5; i++) 297 for (i = 0; i < 5; i++)
365 printk("%04x ", key->u.tkip.p1k_rx[queue][i]); 298 printk("%04x ", key->u.tkip.rx[queue].p1k[i]);
366 printk("\n"); 299 printk("\n");
367 } 300 }
368#endif /* CONFIG_TKIP_DEBUG */ 301#endif /* CONFIG_TKIP_DEBUG */
@@ -377,13 +310,11 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
377 310
378 key->local->ops->update_tkip_key( 311 key->local->ops->update_tkip_key(
379 local_to_hw(key->local), &key->conf, 312 local_to_hw(key->local), &key->conf,
380 sta_addr, iv32, key->u.tkip.p1k_rx[queue]); 313 sta_addr, iv32, key->u.tkip.rx[queue].p1k);
381 } 314 }
382 } 315 }
383 316
384 tkip_mixing_phase2(key->u.tkip.p1k_rx[queue], 317 tkip_mixing_phase2(key, &key->u.tkip.rx[queue], iv16, rc4key);
385 &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY],
386 iv16, rc4key);
387#ifdef CONFIG_TKIP_DEBUG 318#ifdef CONFIG_TKIP_DEBUG
388 { 319 {
389 int i; 320 int i;
@@ -409,5 +340,3 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
409 340
410 return res; 341 return res;
411} 342}
412
413
diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h
index b7c2ee763d9d..b890427fc959 100644
--- a/net/mac80211/tkip.h
+++ b/net/mac80211/tkip.h
@@ -13,12 +13,8 @@
13#include <linux/crypto.h> 13#include <linux/crypto.h>
14#include "key.h" 14#include "key.h"
15 15
16u8 * ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, 16u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key,
17 u8 iv0, u8 iv1, u8 iv2); 17 u8 iv0, u8 iv1, u8 iv2);
18void ieee80211_tkip_gen_phase1key(struct ieee80211_key *key, u8 *ta,
19 u16 *phase1key);
20void ieee80211_tkip_gen_rc4key(struct ieee80211_key *key, u8 *ta,
21 u8 *rc4key);
22void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, 18void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
23 struct ieee80211_key *key, 19 struct ieee80211_key *key,
24 u8 *pos, size_t payload_len, u8 *ta); 20 u8 *pos, size_t payload_len, u8 *ta);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 1d7dd54aacef..1ad9e664f287 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -91,11 +91,12 @@ static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
91 int next_frag_len) 91 int next_frag_len)
92{ 92{
93 int rate, mrate, erp, dur, i; 93 int rate, mrate, erp, dur, i;
94 struct ieee80211_rate *txrate = tx->rate; 94 struct ieee80211_rate *txrate;
95 struct ieee80211_local *local = tx->local; 95 struct ieee80211_local *local = tx->local;
96 struct ieee80211_supported_band *sband; 96 struct ieee80211_supported_band *sband;
97 97
98 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 98 sband = local->hw.wiphy->bands[tx->channel->band];
99 txrate = &sband->bitrates[tx->rate_idx];
99 100
100 erp = 0; 101 erp = 0;
101 if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 102 if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
@@ -212,18 +213,6 @@ static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
212 return dur; 213 return dur;
213} 214}
214 215
215static inline int __ieee80211_queue_stopped(const struct ieee80211_local *local,
216 int queue)
217{
218 return test_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]);
219}
220
221static inline int __ieee80211_queue_pending(const struct ieee80211_local *local,
222 int queue)
223{
224 return test_bit(IEEE80211_LINK_STATE_PENDING, &local->state[queue]);
225}
226
227static int inline is_ieee80211_device(struct net_device *dev, 216static int inline is_ieee80211_device(struct net_device *dev,
228 struct net_device *master) 217 struct net_device *master)
229{ 218{
@@ -237,12 +226,12 @@ static ieee80211_tx_result
237ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) 226ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
238{ 227{
239#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 228#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
240 struct sk_buff *skb = tx->skb; 229 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
241 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
242#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 230#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
231 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
243 u32 sta_flags; 232 u32 sta_flags;
244 233
245 if (unlikely(tx->flags & IEEE80211_TX_INJECTED)) 234 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
246 return TX_CONTINUE; 235 return TX_CONTINUE;
247 236
248 if (unlikely(tx->local->sta_sw_scanning) && 237 if (unlikely(tx->local->sta_sw_scanning) &&
@@ -256,7 +245,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
256 if (tx->flags & IEEE80211_TX_PS_BUFFERED) 245 if (tx->flags & IEEE80211_TX_PS_BUFFERED)
257 return TX_CONTINUE; 246 return TX_CONTINUE;
258 247
259 sta_flags = tx->sta ? tx->sta->flags : 0; 248 sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0;
260 249
261 if (likely(tx->flags & IEEE80211_TX_UNICAST)) { 250 if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
262 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && 251 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) &&
@@ -347,6 +336,8 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
347static ieee80211_tx_result 336static ieee80211_tx_result
348ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) 337ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
349{ 338{
339 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
340
350 /* 341 /*
351 * broadcast/multicast frame 342 * broadcast/multicast frame
352 * 343 *
@@ -382,7 +373,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
382 } 373 }
383 374
384 /* buffered in hardware */ 375 /* buffered in hardware */
385 tx->control->flags |= IEEE80211_TXCTL_SEND_AFTER_DTIM; 376 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
386 377
387 return TX_CONTINUE; 378 return TX_CONTINUE;
388} 379}
@@ -391,6 +382,8 @@ static ieee80211_tx_result
391ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) 382ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
392{ 383{
393 struct sta_info *sta = tx->sta; 384 struct sta_info *sta = tx->sta;
385 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
386 u32 staflags;
394 DECLARE_MAC_BUF(mac); 387 DECLARE_MAC_BUF(mac);
395 388
396 if (unlikely(!sta || 389 if (unlikely(!sta ||
@@ -398,9 +391,10 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
398 (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP))) 391 (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP)))
399 return TX_CONTINUE; 392 return TX_CONTINUE;
400 393
401 if (unlikely((sta->flags & WLAN_STA_PS) && 394 staflags = get_sta_flags(sta);
402 !(sta->flags & WLAN_STA_PSPOLL))) { 395
403 struct ieee80211_tx_packet_data *pkt_data; 396 if (unlikely((staflags & WLAN_STA_PS) &&
397 !(staflags & WLAN_STA_PSPOLL))) {
404#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 398#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
405 printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " 399 printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries "
406 "before %d)\n", 400 "before %d)\n",
@@ -424,19 +418,18 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
424 if (skb_queue_empty(&sta->ps_tx_buf)) 418 if (skb_queue_empty(&sta->ps_tx_buf))
425 sta_info_set_tim_bit(sta); 419 sta_info_set_tim_bit(sta);
426 420
427 pkt_data = (struct ieee80211_tx_packet_data *)tx->skb->cb; 421 info->control.jiffies = jiffies;
428 pkt_data->jiffies = jiffies;
429 skb_queue_tail(&sta->ps_tx_buf, tx->skb); 422 skb_queue_tail(&sta->ps_tx_buf, tx->skb);
430 return TX_QUEUED; 423 return TX_QUEUED;
431 } 424 }
432#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 425#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
433 else if (unlikely(sta->flags & WLAN_STA_PS)) { 426 else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) {
434 printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll " 427 printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll "
435 "set -> send frame\n", tx->dev->name, 428 "set -> send frame\n", tx->dev->name,
436 print_mac(mac, sta->addr)); 429 print_mac(mac, sta->addr));
437 } 430 }
438#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 431#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
439 sta->flags &= ~WLAN_STA_PSPOLL; 432 clear_sta_flags(sta, WLAN_STA_PSPOLL);
440 433
441 return TX_CONTINUE; 434 return TX_CONTINUE;
442} 435}
@@ -457,17 +450,18 @@ static ieee80211_tx_result
457ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) 450ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
458{ 451{
459 struct ieee80211_key *key; 452 struct ieee80211_key *key;
453 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
460 u16 fc = tx->fc; 454 u16 fc = tx->fc;
461 455
462 if (unlikely(tx->control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) 456 if (unlikely(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT))
463 tx->key = NULL; 457 tx->key = NULL;
464 else if (tx->sta && (key = rcu_dereference(tx->sta->key))) 458 else if (tx->sta && (key = rcu_dereference(tx->sta->key)))
465 tx->key = key; 459 tx->key = key;
466 else if ((key = rcu_dereference(tx->sdata->default_key))) 460 else if ((key = rcu_dereference(tx->sdata->default_key)))
467 tx->key = key; 461 tx->key = key;
468 else if (tx->sdata->drop_unencrypted && 462 else if (tx->sdata->drop_unencrypted &&
469 !(tx->control->flags & IEEE80211_TXCTL_EAPOL_FRAME) && 463 !(info->flags & IEEE80211_TX_CTL_EAPOL_FRAME) &&
470 !(tx->flags & IEEE80211_TX_INJECTED)) { 464 !(info->flags & IEEE80211_TX_CTL_INJECTED)) {
471 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); 465 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
472 return TX_DROP; 466 return TX_DROP;
473 } else 467 } else
@@ -496,7 +490,156 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
496 } 490 }
497 491
498 if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 492 if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
499 tx->control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; 493 info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
494
495 return TX_CONTINUE;
496}
497
498static ieee80211_tx_result
499ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
500{
501 struct rate_selection rsel;
502 struct ieee80211_supported_band *sband;
503 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
504
505 sband = tx->local->hw.wiphy->bands[tx->channel->band];
506
507 if (likely(tx->rate_idx < 0)) {
508 rate_control_get_rate(tx->dev, sband, tx->skb, &rsel);
509 tx->rate_idx = rsel.rate_idx;
510 if (unlikely(rsel.probe_idx >= 0)) {
511 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
512 tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG;
513 info->control.alt_retry_rate_idx = tx->rate_idx;
514 tx->rate_idx = rsel.probe_idx;
515 } else
516 info->control.alt_retry_rate_idx = -1;
517
518 if (unlikely(tx->rate_idx < 0))
519 return TX_DROP;
520 } else
521 info->control.alt_retry_rate_idx = -1;
522
523 if (tx->sdata->bss_conf.use_cts_prot &&
524 (tx->flags & IEEE80211_TX_FRAGMENTED) && (rsel.nonerp_idx >= 0)) {
525 tx->last_frag_rate_idx = tx->rate_idx;
526 if (rsel.probe_idx >= 0)
527 tx->flags &= ~IEEE80211_TX_PROBE_LAST_FRAG;
528 else
529 tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG;
530 tx->rate_idx = rsel.nonerp_idx;
531 info->tx_rate_idx = rsel.nonerp_idx;
532 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
533 } else {
534 tx->last_frag_rate_idx = tx->rate_idx;
535 info->tx_rate_idx = tx->rate_idx;
536 }
537 info->tx_rate_idx = tx->rate_idx;
538
539 return TX_CONTINUE;
540}
541
542static ieee80211_tx_result
543ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
544{
545 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data;
546 u16 fc = le16_to_cpu(hdr->frame_control);
547 u16 dur;
548 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
549 struct ieee80211_supported_band *sband;
550
551 sband = tx->local->hw.wiphy->bands[tx->channel->band];
552
553 if (tx->sta)
554 info->control.aid = tx->sta->aid;
555
556 if (!info->control.retry_limit) {
557 if (!is_multicast_ether_addr(hdr->addr1)) {
558 int len = min_t(int, tx->skb->len + FCS_LEN,
559 tx->local->fragmentation_threshold);
560 if (len > tx->local->rts_threshold
561 && tx->local->rts_threshold <
562 IEEE80211_MAX_RTS_THRESHOLD) {
563 info->flags |= IEEE80211_TX_CTL_USE_RTS_CTS;
564 info->flags |=
565 IEEE80211_TX_CTL_LONG_RETRY_LIMIT;
566 info->control.retry_limit =
567 tx->local->long_retry_limit;
568 } else {
569 info->control.retry_limit =
570 tx->local->short_retry_limit;
571 }
572 } else {
573 info->control.retry_limit = 1;
574 }
575 }
576
577 if (tx->flags & IEEE80211_TX_FRAGMENTED) {
578 /* Do not use multiple retry rates when sending fragmented
579 * frames.
580 * TODO: The last fragment could still use multiple retry
581 * rates. */
582 info->control.alt_retry_rate_idx = -1;
583 }
584
585 /* Use CTS protection for unicast frames sent using extended rates if
586 * there are associated non-ERP stations and RTS/CTS is not configured
587 * for the frame. */
588 if ((tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) &&
589 (sband->bitrates[tx->rate_idx].flags & IEEE80211_RATE_ERP_G) &&
590 (tx->flags & IEEE80211_TX_UNICAST) &&
591 tx->sdata->bss_conf.use_cts_prot &&
592 !(info->flags & IEEE80211_TX_CTL_USE_RTS_CTS))
593 info->flags |= IEEE80211_TX_CTL_USE_CTS_PROTECT;
594
595 /* Transmit data frames using short preambles if the driver supports
596 * short preambles at the selected rate and short preambles are
597 * available on the network at the current point in time. */
598 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
599 (sband->bitrates[tx->rate_idx].flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
600 tx->sdata->bss_conf.use_short_preamble &&
601 (!tx->sta || test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE))) {
602 info->flags |= IEEE80211_TX_CTL_SHORT_PREAMBLE;
603 }
604
605 /* Setup duration field for the first fragment of the frame. Duration
606 * for remaining fragments will be updated when they are being sent
607 * to low-level driver in ieee80211_tx(). */
608 dur = ieee80211_duration(tx, is_multicast_ether_addr(hdr->addr1),
609 (tx->flags & IEEE80211_TX_FRAGMENTED) ?
610 tx->extra_frag[0]->len : 0);
611 hdr->duration_id = cpu_to_le16(dur);
612
613 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) ||
614 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)) {
615 struct ieee80211_rate *rate;
616 s8 baserate = -1;
617 int idx;
618
619 /* Do not use multiple retry rates when using RTS/CTS */
620 info->control.alt_retry_rate_idx = -1;
621
622 /* Use min(data rate, max base rate) as CTS/RTS rate */
623 rate = &sband->bitrates[tx->rate_idx];
624
625 for (idx = 0; idx < sband->n_bitrates; idx++) {
626 if (sband->bitrates[idx].bitrate > rate->bitrate)
627 continue;
628 if (tx->sdata->basic_rates & BIT(idx) &&
629 (baserate < 0 ||
630 (sband->bitrates[baserate].bitrate
631 < sband->bitrates[idx].bitrate)))
632 baserate = idx;
633 }
634
635 if (baserate >= 0)
636 info->control.rts_cts_rate_idx = baserate;
637 else
638 info->control.rts_cts_rate_idx = 0;
639 }
640
641 if (tx->sta)
642 info->control.aid = tx->sta->aid;
500 643
501 return TX_CONTINUE; 644 return TX_CONTINUE;
502} 645}
@@ -515,6 +658,17 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
515 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) 658 if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
516 return TX_CONTINUE; 659 return TX_CONTINUE;
517 660
661 /*
662 * Warn when submitting a fragmented A-MPDU frame and drop it.
663 * This is an error and needs to be fixed elsewhere, but when
664 * done needs to take care of monitor interfaces (injection)
665 * etc.
666 */
667 if (WARN_ON(tx->flags & IEEE80211_TX_CTL_AMPDU ||
668 skb_get_queue_mapping(tx->skb) >=
669 ieee80211_num_regular_queues(&tx->local->hw)))
670 return TX_DROP;
671
518 first = tx->skb; 672 first = tx->skb;
519 673
520 hdrlen = ieee80211_get_hdrlen(tx->fc); 674 hdrlen = ieee80211_get_hdrlen(tx->fc);
@@ -602,215 +756,22 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
602} 756}
603 757
604static ieee80211_tx_result 758static ieee80211_tx_result
605ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) 759ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
606{ 760{
607 struct rate_selection rsel; 761 int i;
608 struct ieee80211_supported_band *sband;
609
610 sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band];
611
612 if (likely(!tx->rate)) {
613 rate_control_get_rate(tx->dev, sband, tx->skb, &rsel);
614 tx->rate = rsel.rate;
615 if (unlikely(rsel.probe)) {
616 tx->control->flags |=
617 IEEE80211_TXCTL_RATE_CTRL_PROBE;
618 tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG;
619 tx->control->alt_retry_rate = tx->rate;
620 tx->rate = rsel.probe;
621 } else
622 tx->control->alt_retry_rate = NULL;
623
624 if (!tx->rate)
625 return TX_DROP;
626 } else
627 tx->control->alt_retry_rate = NULL;
628
629 if (tx->sdata->bss_conf.use_cts_prot &&
630 (tx->flags & IEEE80211_TX_FRAGMENTED) && rsel.nonerp) {
631 tx->last_frag_rate = tx->rate;
632 if (rsel.probe)
633 tx->flags &= ~IEEE80211_TX_PROBE_LAST_FRAG;
634 else
635 tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG;
636 tx->rate = rsel.nonerp;
637 tx->control->tx_rate = rsel.nonerp;
638 tx->control->flags &= ~IEEE80211_TXCTL_RATE_CTRL_PROBE;
639 } else {
640 tx->last_frag_rate = tx->rate;
641 tx->control->tx_rate = tx->rate;
642 }
643 tx->control->tx_rate = tx->rate;
644
645 return TX_CONTINUE;
646}
647
648static ieee80211_tx_result
649ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
650{
651 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data;
652 u16 fc = le16_to_cpu(hdr->frame_control);
653 u16 dur;
654 struct ieee80211_tx_control *control = tx->control;
655
656 if (!control->retry_limit) {
657 if (!is_multicast_ether_addr(hdr->addr1)) {
658 if (tx->skb->len + FCS_LEN > tx->local->rts_threshold
659 && tx->local->rts_threshold <
660 IEEE80211_MAX_RTS_THRESHOLD) {
661 control->flags |=
662 IEEE80211_TXCTL_USE_RTS_CTS;
663 control->flags |=
664 IEEE80211_TXCTL_LONG_RETRY_LIMIT;
665 control->retry_limit =
666 tx->local->long_retry_limit;
667 } else {
668 control->retry_limit =
669 tx->local->short_retry_limit;
670 }
671 } else {
672 control->retry_limit = 1;
673 }
674 }
675
676 if (tx->flags & IEEE80211_TX_FRAGMENTED) {
677 /* Do not use multiple retry rates when sending fragmented
678 * frames.
679 * TODO: The last fragment could still use multiple retry
680 * rates. */
681 control->alt_retry_rate = NULL;
682 }
683
684 /* Use CTS protection for unicast frames sent using extended rates if
685 * there are associated non-ERP stations and RTS/CTS is not configured
686 * for the frame. */
687 if ((tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) &&
688 (tx->rate->flags & IEEE80211_RATE_ERP_G) &&
689 (tx->flags & IEEE80211_TX_UNICAST) &&
690 tx->sdata->bss_conf.use_cts_prot &&
691 !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS))
692 control->flags |= IEEE80211_TXCTL_USE_CTS_PROTECT;
693
694 /* Transmit data frames using short preambles if the driver supports
695 * short preambles at the selected rate and short preambles are
696 * available on the network at the current point in time. */
697 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
698 (tx->rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
699 tx->sdata->bss_conf.use_short_preamble &&
700 (!tx->sta || (tx->sta->flags & WLAN_STA_SHORT_PREAMBLE))) {
701 tx->control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE;
702 }
703
704 /* Setup duration field for the first fragment of the frame. Duration
705 * for remaining fragments will be updated when they are being sent
706 * to low-level driver in ieee80211_tx(). */
707 dur = ieee80211_duration(tx, is_multicast_ether_addr(hdr->addr1),
708 (tx->flags & IEEE80211_TX_FRAGMENTED) ?
709 tx->extra_frag[0]->len : 0);
710 hdr->duration_id = cpu_to_le16(dur);
711
712 if ((control->flags & IEEE80211_TXCTL_USE_RTS_CTS) ||
713 (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) {
714 struct ieee80211_supported_band *sband;
715 struct ieee80211_rate *rate, *baserate;
716 int idx;
717
718 sband = tx->local->hw.wiphy->bands[
719 tx->local->hw.conf.channel->band];
720
721 /* Do not use multiple retry rates when using RTS/CTS */
722 control->alt_retry_rate = NULL;
723
724 /* Use min(data rate, max base rate) as CTS/RTS rate */
725 rate = tx->rate;
726 baserate = NULL;
727
728 for (idx = 0; idx < sband->n_bitrates; idx++) {
729 if (sband->bitrates[idx].bitrate > rate->bitrate)
730 continue;
731 if (tx->sdata->basic_rates & BIT(idx) &&
732 (!baserate ||
733 (baserate->bitrate < sband->bitrates[idx].bitrate)))
734 baserate = &sband->bitrates[idx];
735 }
736
737 if (baserate)
738 control->rts_cts_rate = baserate;
739 else
740 control->rts_cts_rate = &sband->bitrates[0];
741 }
742
743 if (tx->sta) {
744 control->aid = tx->sta->aid;
745 tx->sta->tx_packets++;
746 tx->sta->tx_fragments++;
747 tx->sta->tx_bytes += tx->skb->len;
748 if (tx->extra_frag) {
749 int i;
750 tx->sta->tx_fragments += tx->num_extra_frag;
751 for (i = 0; i < tx->num_extra_frag; i++) {
752 tx->sta->tx_bytes +=
753 tx->extra_frag[i]->len;
754 }
755 }
756 }
757
758 return TX_CONTINUE;
759}
760
761static ieee80211_tx_result
762ieee80211_tx_h_load_stats(struct ieee80211_tx_data *tx)
763{
764 struct ieee80211_local *local = tx->local;
765 struct sk_buff *skb = tx->skb;
766 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
767 u32 load = 0, hdrtime;
768 struct ieee80211_rate *rate = tx->rate;
769
770 /* TODO: this could be part of tx_status handling, so that the number
771 * of retries would be known; TX rate should in that case be stored
772 * somewhere with the packet */
773
774 /* Estimate total channel use caused by this frame */
775
776 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values,
777 * 1 usec = 1/8 * (1080 / 10) = 13.5 */
778
779 if (tx->channel->band == IEEE80211_BAND_5GHZ ||
780 (tx->channel->band == IEEE80211_BAND_2GHZ &&
781 rate->flags & IEEE80211_RATE_ERP_G))
782 hdrtime = CHAN_UTIL_HDR_SHORT;
783 else
784 hdrtime = CHAN_UTIL_HDR_LONG;
785
786 load = hdrtime;
787 if (!is_multicast_ether_addr(hdr->addr1))
788 load += hdrtime;
789
790 if (tx->control->flags & IEEE80211_TXCTL_USE_RTS_CTS)
791 load += 2 * hdrtime;
792 else if (tx->control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)
793 load += hdrtime;
794 762
795 /* TODO: optimise again */ 763 if (!tx->sta)
796 load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate; 764 return TX_CONTINUE;
797 765
766 tx->sta->tx_packets++;
767 tx->sta->tx_fragments++;
768 tx->sta->tx_bytes += tx->skb->len;
798 if (tx->extra_frag) { 769 if (tx->extra_frag) {
799 int i; 770 tx->sta->tx_fragments += tx->num_extra_frag;
800 for (i = 0; i < tx->num_extra_frag; i++) { 771 for (i = 0; i < tx->num_extra_frag; i++)
801 load += 2 * hdrtime; 772 tx->sta->tx_bytes += tx->extra_frag[i]->len;
802 load += tx->extra_frag[i]->len *
803 tx->rate->bitrate;
804 }
805 } 773 }
806 774
807 /* Divide channel_use by 8 to avoid wrapping around the counter */
808 load >>= CHAN_UTIL_SHIFT;
809 local->channel_use_raw += load;
810 if (tx->sta)
811 tx->sta->channel_use_raw += load;
812 tx->sdata->channel_use_raw += load;
813
814 return TX_CONTINUE; 775 return TX_CONTINUE;
815} 776}
816 777
@@ -823,11 +784,12 @@ static ieee80211_tx_handler ieee80211_tx_handlers[] =
823 ieee80211_tx_h_ps_buf, 784 ieee80211_tx_h_ps_buf,
824 ieee80211_tx_h_select_key, 785 ieee80211_tx_h_select_key,
825 ieee80211_tx_h_michael_mic_add, 786 ieee80211_tx_h_michael_mic_add,
826 ieee80211_tx_h_fragment,
827 ieee80211_tx_h_encrypt,
828 ieee80211_tx_h_rate_ctrl, 787 ieee80211_tx_h_rate_ctrl,
829 ieee80211_tx_h_misc, 788 ieee80211_tx_h_misc,
830 ieee80211_tx_h_load_stats, 789 ieee80211_tx_h_fragment,
790 /* handlers after fragment must be aware of tx info fragmentation! */
791 ieee80211_tx_h_encrypt,
792 ieee80211_tx_h_stats,
831 NULL 793 NULL
832}; 794};
833 795
@@ -854,12 +816,12 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
854 (struct ieee80211_radiotap_header *) skb->data; 816 (struct ieee80211_radiotap_header *) skb->data;
855 struct ieee80211_supported_band *sband; 817 struct ieee80211_supported_band *sband;
856 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); 818 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len);
857 struct ieee80211_tx_control *control = tx->control; 819 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
858 820
859 sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band]; 821 sband = tx->local->hw.wiphy->bands[tx->channel->band];
860 822
861 control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; 823 info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
862 tx->flags |= IEEE80211_TX_INJECTED; 824 info->flags |= IEEE80211_TX_CTL_INJECTED;
863 tx->flags &= ~IEEE80211_TX_FRAGMENTED; 825 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
864 826
865 /* 827 /*
@@ -896,7 +858,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
896 r = &sband->bitrates[i]; 858 r = &sband->bitrates[i];
897 859
898 if (r->bitrate == target_rate) { 860 if (r->bitrate == target_rate) {
899 tx->rate = r; 861 tx->rate_idx = i;
900 break; 862 break;
901 } 863 }
902 } 864 }
@@ -907,7 +869,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
907 * radiotap uses 0 for 1st ant, mac80211 is 1 for 869 * radiotap uses 0 for 1st ant, mac80211 is 1 for
908 * 1st ant 870 * 1st ant
909 */ 871 */
910 control->antenna_sel_tx = (*iterator.this_arg) + 1; 872 info->antenna_sel_tx = (*iterator.this_arg) + 1;
911 break; 873 break;
912 874
913#if 0 875#if 0
@@ -931,8 +893,8 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
931 skb_trim(skb, skb->len - FCS_LEN); 893 skb_trim(skb, skb->len - FCS_LEN);
932 } 894 }
933 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) 895 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
934 control->flags &= 896 info->flags &=
935 ~IEEE80211_TXCTL_DO_NOT_ENCRYPT; 897 ~IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
936 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) 898 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
937 tx->flags |= IEEE80211_TX_FRAGMENTED; 899 tx->flags |= IEEE80211_TX_FRAGMENTED;
938 break; 900 break;
@@ -967,12 +929,12 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
967static ieee80211_tx_result 929static ieee80211_tx_result
968__ieee80211_tx_prepare(struct ieee80211_tx_data *tx, 930__ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
969 struct sk_buff *skb, 931 struct sk_buff *skb,
970 struct net_device *dev, 932 struct net_device *dev)
971 struct ieee80211_tx_control *control)
972{ 933{
973 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 934 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
974 struct ieee80211_hdr *hdr; 935 struct ieee80211_hdr *hdr;
975 struct ieee80211_sub_if_data *sdata; 936 struct ieee80211_sub_if_data *sdata;
937 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
976 938
977 int hdrlen; 939 int hdrlen;
978 940
@@ -981,7 +943,9 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
981 tx->dev = dev; /* use original interface */ 943 tx->dev = dev; /* use original interface */
982 tx->local = local; 944 tx->local = local;
983 tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev); 945 tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev);
984 tx->control = control; 946 tx->channel = local->hw.conf.channel;
947 tx->rate_idx = -1;
948 tx->last_frag_rate_idx = -1;
985 /* 949 /*
986 * Set this flag (used below to indicate "automatic fragmentation"), 950 * Set this flag (used below to indicate "automatic fragmentation"),
987 * it will be cleared/left by radiotap as desired. 951 * it will be cleared/left by radiotap as desired.
@@ -1008,10 +972,10 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1008 972
1009 if (is_multicast_ether_addr(hdr->addr1)) { 973 if (is_multicast_ether_addr(hdr->addr1)) {
1010 tx->flags &= ~IEEE80211_TX_UNICAST; 974 tx->flags &= ~IEEE80211_TX_UNICAST;
1011 control->flags |= IEEE80211_TXCTL_NO_ACK; 975 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1012 } else { 976 } else {
1013 tx->flags |= IEEE80211_TX_UNICAST; 977 tx->flags |= IEEE80211_TX_UNICAST;
1014 control->flags &= ~IEEE80211_TXCTL_NO_ACK; 978 info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
1015 } 979 }
1016 980
1017 if (tx->flags & IEEE80211_TX_FRAGMENTED) { 981 if (tx->flags & IEEE80211_TX_FRAGMENTED) {
@@ -1024,18 +988,16 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1024 } 988 }
1025 989
1026 if (!tx->sta) 990 if (!tx->sta)
1027 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; 991 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1028 else if (tx->sta->flags & WLAN_STA_CLEAR_PS_FILT) { 992 else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT))
1029 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; 993 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1030 tx->sta->flags &= ~WLAN_STA_CLEAR_PS_FILT;
1031 }
1032 994
1033 hdrlen = ieee80211_get_hdrlen(tx->fc); 995 hdrlen = ieee80211_get_hdrlen(tx->fc);
1034 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { 996 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
1035 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; 997 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
1036 tx->ethertype = (pos[0] << 8) | pos[1]; 998 tx->ethertype = (pos[0] << 8) | pos[1];
1037 } 999 }
1038 control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT; 1000 info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
1039 1001
1040 return TX_CONTINUE; 1002 return TX_CONTINUE;
1041} 1003}
@@ -1045,14 +1007,12 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1045 */ 1007 */
1046static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx, 1008static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1047 struct sk_buff *skb, 1009 struct sk_buff *skb,
1048 struct net_device *mdev, 1010 struct net_device *mdev)
1049 struct ieee80211_tx_control *control)
1050{ 1011{
1051 struct ieee80211_tx_packet_data *pkt_data; 1012 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1052 struct net_device *dev; 1013 struct net_device *dev;
1053 1014
1054 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; 1015 dev = dev_get_by_index(&init_net, info->control.ifindex);
1055 dev = dev_get_by_index(&init_net, pkt_data->ifindex);
1056 if (unlikely(dev && !is_ieee80211_device(dev, mdev))) { 1016 if (unlikely(dev && !is_ieee80211_device(dev, mdev))) {
1057 dev_put(dev); 1017 dev_put(dev);
1058 dev = NULL; 1018 dev = NULL;
@@ -1060,7 +1020,7 @@ static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1060 if (unlikely(!dev)) 1020 if (unlikely(!dev))
1061 return -ENODEV; 1021 return -ENODEV;
1062 /* initialises tx with control */ 1022 /* initialises tx with control */
1063 __ieee80211_tx_prepare(tx, skb, dev, control); 1023 __ieee80211_tx_prepare(tx, skb, dev);
1064 dev_put(dev); 1024 dev_put(dev);
1065 return 0; 1025 return 0;
1066} 1026}
@@ -1068,50 +1028,49 @@ static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1068static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, 1028static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1069 struct ieee80211_tx_data *tx) 1029 struct ieee80211_tx_data *tx)
1070{ 1030{
1071 struct ieee80211_tx_control *control = tx->control; 1031 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1072 int ret, i; 1032 int ret, i;
1073 1033
1074 if (!ieee80211_qdisc_installed(local->mdev) && 1034 if (netif_subqueue_stopped(local->mdev, skb))
1075 __ieee80211_queue_stopped(local, 0)) {
1076 netif_stop_queue(local->mdev);
1077 return IEEE80211_TX_AGAIN; 1035 return IEEE80211_TX_AGAIN;
1078 } 1036
1079 if (skb) { 1037 if (skb) {
1080 ieee80211_dump_frame(wiphy_name(local->hw.wiphy), 1038 ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
1081 "TX to low-level driver", skb); 1039 "TX to low-level driver", skb);
1082 ret = local->ops->tx(local_to_hw(local), skb, control); 1040 ret = local->ops->tx(local_to_hw(local), skb);
1083 if (ret) 1041 if (ret)
1084 return IEEE80211_TX_AGAIN; 1042 return IEEE80211_TX_AGAIN;
1085 local->mdev->trans_start = jiffies; 1043 local->mdev->trans_start = jiffies;
1086 ieee80211_led_tx(local, 1); 1044 ieee80211_led_tx(local, 1);
1087 } 1045 }
1088 if (tx->extra_frag) { 1046 if (tx->extra_frag) {
1089 control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS |
1090 IEEE80211_TXCTL_USE_CTS_PROTECT |
1091 IEEE80211_TXCTL_CLEAR_PS_FILT |
1092 IEEE80211_TXCTL_FIRST_FRAGMENT);
1093 for (i = 0; i < tx->num_extra_frag; i++) { 1047 for (i = 0; i < tx->num_extra_frag; i++) {
1094 if (!tx->extra_frag[i]) 1048 if (!tx->extra_frag[i])
1095 continue; 1049 continue;
1096 if (__ieee80211_queue_stopped(local, control->queue)) 1050 info = IEEE80211_SKB_CB(tx->extra_frag[i]);
1051 info->flags &= ~(IEEE80211_TX_CTL_USE_RTS_CTS |
1052 IEEE80211_TX_CTL_USE_CTS_PROTECT |
1053 IEEE80211_TX_CTL_CLEAR_PS_FILT |
1054 IEEE80211_TX_CTL_FIRST_FRAGMENT);
1055 if (netif_subqueue_stopped(local->mdev,
1056 tx->extra_frag[i]))
1097 return IEEE80211_TX_FRAG_AGAIN; 1057 return IEEE80211_TX_FRAG_AGAIN;
1098 if (i == tx->num_extra_frag) { 1058 if (i == tx->num_extra_frag) {
1099 control->tx_rate = tx->last_frag_rate; 1059 info->tx_rate_idx = tx->last_frag_rate_idx;
1100 1060
1101 if (tx->flags & IEEE80211_TX_PROBE_LAST_FRAG) 1061 if (tx->flags & IEEE80211_TX_PROBE_LAST_FRAG)
1102 control->flags |= 1062 info->flags |=
1103 IEEE80211_TXCTL_RATE_CTRL_PROBE; 1063 IEEE80211_TX_CTL_RATE_CTRL_PROBE;
1104 else 1064 else
1105 control->flags &= 1065 info->flags &=
1106 ~IEEE80211_TXCTL_RATE_CTRL_PROBE; 1066 ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
1107 } 1067 }
1108 1068
1109 ieee80211_dump_frame(wiphy_name(local->hw.wiphy), 1069 ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
1110 "TX to low-level driver", 1070 "TX to low-level driver",
1111 tx->extra_frag[i]); 1071 tx->extra_frag[i]);
1112 ret = local->ops->tx(local_to_hw(local), 1072 ret = local->ops->tx(local_to_hw(local),
1113 tx->extra_frag[i], 1073 tx->extra_frag[i]);
1114 control);
1115 if (ret) 1074 if (ret)
1116 return IEEE80211_TX_FRAG_AGAIN; 1075 return IEEE80211_TX_FRAG_AGAIN;
1117 local->mdev->trans_start = jiffies; 1076 local->mdev->trans_start = jiffies;
@@ -1124,17 +1083,20 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1124 return IEEE80211_TX_OK; 1083 return IEEE80211_TX_OK;
1125} 1084}
1126 1085
1127static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb, 1086static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
1128 struct ieee80211_tx_control *control)
1129{ 1087{
1130 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1088 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1131 struct sta_info *sta; 1089 struct sta_info *sta;
1132 ieee80211_tx_handler *handler; 1090 ieee80211_tx_handler *handler;
1133 struct ieee80211_tx_data tx; 1091 struct ieee80211_tx_data tx;
1134 ieee80211_tx_result res = TX_DROP, res_prepare; 1092 ieee80211_tx_result res = TX_DROP, res_prepare;
1093 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1135 int ret, i; 1094 int ret, i;
1095 u16 queue;
1096
1097 queue = skb_get_queue_mapping(skb);
1136 1098
1137 WARN_ON(__ieee80211_queue_pending(local, control->queue)); 1099 WARN_ON(test_bit(queue, local->queues_pending));
1138 1100
1139 if (unlikely(skb->len < 10)) { 1101 if (unlikely(skb->len < 10)) {
1140 dev_kfree_skb(skb); 1102 dev_kfree_skb(skb);
@@ -1144,7 +1106,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1144 rcu_read_lock(); 1106 rcu_read_lock();
1145 1107
1146 /* initialises tx */ 1108 /* initialises tx */
1147 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev, control); 1109 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev);
1148 1110
1149 if (res_prepare == TX_DROP) { 1111 if (res_prepare == TX_DROP) {
1150 dev_kfree_skb(skb); 1112 dev_kfree_skb(skb);
@@ -1154,6 +1116,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1154 1116
1155 sta = tx.sta; 1117 sta = tx.sta;
1156 tx.channel = local->hw.conf.channel; 1118 tx.channel = local->hw.conf.channel;
1119 info->band = tx.channel->band;
1157 1120
1158 for (handler = ieee80211_tx_handlers; *handler != NULL; 1121 for (handler = ieee80211_tx_handlers; *handler != NULL;
1159 handler++) { 1122 handler++) {
@@ -1162,7 +1125,8 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1162 break; 1125 break;
1163 } 1126 }
1164 1127
1165 skb = tx.skb; /* handlers are allowed to change skb */ 1128 if (WARN_ON(tx.skb != skb))
1129 goto drop;
1166 1130
1167 if (unlikely(res == TX_DROP)) { 1131 if (unlikely(res == TX_DROP)) {
1168 I802_DEBUG_INC(local->tx_handlers_drop); 1132 I802_DEBUG_INC(local->tx_handlers_drop);
@@ -1186,7 +1150,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1186 next_len = tx.extra_frag[i + 1]->len; 1150 next_len = tx.extra_frag[i + 1]->len;
1187 } else { 1151 } else {
1188 next_len = 0; 1152 next_len = 0;
1189 tx.rate = tx.last_frag_rate; 1153 tx.rate_idx = tx.last_frag_rate_idx;
1190 } 1154 }
1191 dur = ieee80211_duration(&tx, 0, next_len); 1155 dur = ieee80211_duration(&tx, 0, next_len);
1192 hdr->duration_id = cpu_to_le16(dur); 1156 hdr->duration_id = cpu_to_le16(dur);
@@ -1196,34 +1160,41 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1196retry: 1160retry:
1197 ret = __ieee80211_tx(local, skb, &tx); 1161 ret = __ieee80211_tx(local, skb, &tx);
1198 if (ret) { 1162 if (ret) {
1199 struct ieee80211_tx_stored_packet *store = 1163 struct ieee80211_tx_stored_packet *store;
1200 &local->pending_packet[control->queue]; 1164
1165 /*
1166 * Since there are no fragmented frames on A-MPDU
1167 * queues, there's no reason for a driver to reject
1168 * a frame there, warn and drop it.
1169 */
1170 if (WARN_ON(queue >= ieee80211_num_regular_queues(&local->hw)))
1171 goto drop;
1172
1173 store = &local->pending_packet[queue];
1201 1174
1202 if (ret == IEEE80211_TX_FRAG_AGAIN) 1175 if (ret == IEEE80211_TX_FRAG_AGAIN)
1203 skb = NULL; 1176 skb = NULL;
1204 set_bit(IEEE80211_LINK_STATE_PENDING, 1177 set_bit(queue, local->queues_pending);
1205 &local->state[control->queue]);
1206 smp_mb(); 1178 smp_mb();
1207 /* When the driver gets out of buffers during sending of 1179 /*
1208 * fragments and calls ieee80211_stop_queue, there is 1180 * When the driver gets out of buffers during sending of
1209 * a small window between IEEE80211_LINK_STATE_XOFF and 1181 * fragments and calls ieee80211_stop_queue, the netif
1210 * IEEE80211_LINK_STATE_PENDING flags are set. If a buffer 1182 * subqueue is stopped. There is, however, a small window
1183 * in which the PENDING bit is not yet set. If a buffer
1211 * gets available in that window (i.e. driver calls 1184 * gets available in that window (i.e. driver calls
1212 * ieee80211_wake_queue), we would end up with ieee80211_tx 1185 * ieee80211_wake_queue), we would end up with ieee80211_tx
1213 * called with IEEE80211_LINK_STATE_PENDING. Prevent this by 1186 * called with the PENDING bit still set. Prevent this by
1214 * continuing transmitting here when that situation is 1187 * continuing transmitting here when that situation is
1215 * possible to have happened. */ 1188 * possible to have happened.
1216 if (!__ieee80211_queue_stopped(local, control->queue)) { 1189 */
1217 clear_bit(IEEE80211_LINK_STATE_PENDING, 1190 if (!__netif_subqueue_stopped(local->mdev, queue)) {
1218 &local->state[control->queue]); 1191 clear_bit(queue, local->queues_pending);
1219 goto retry; 1192 goto retry;
1220 } 1193 }
1221 memcpy(&store->control, control,
1222 sizeof(struct ieee80211_tx_control));
1223 store->skb = skb; 1194 store->skb = skb;
1224 store->extra_frag = tx.extra_frag; 1195 store->extra_frag = tx.extra_frag;
1225 store->num_extra_frag = tx.num_extra_frag; 1196 store->num_extra_frag = tx.num_extra_frag;
1226 store->last_frag_rate = tx.last_frag_rate; 1197 store->last_frag_rate_idx = tx.last_frag_rate_idx;
1227 store->last_frag_rate_ctrl_probe = 1198 store->last_frag_rate_ctrl_probe =
1228 !!(tx.flags & IEEE80211_TX_PROBE_LAST_FRAG); 1199 !!(tx.flags & IEEE80211_TX_PROBE_LAST_FRAG);
1229 } 1200 }
@@ -1243,24 +1214,57 @@ retry:
1243 1214
1244/* device xmit handlers */ 1215/* device xmit handlers */
1245 1216
1217static int ieee80211_skb_resize(struct ieee80211_local *local,
1218 struct sk_buff *skb,
1219 int head_need, bool may_encrypt)
1220{
1221 int tail_need = 0;
1222
1223 /*
1224 * This could be optimised, devices that do full hardware
1225 * crypto (including TKIP MMIC) need no tailroom... But we
1226 * have no drivers for such devices currently.
1227 */
1228 if (may_encrypt) {
1229 tail_need = IEEE80211_ENCRYPT_TAILROOM;
1230 tail_need -= skb_tailroom(skb);
1231 tail_need = max_t(int, tail_need, 0);
1232 }
1233
1234 if (head_need || tail_need) {
1235 /* Sorry. Can't account for this any more */
1236 skb_orphan(skb);
1237 }
1238
1239 if (skb_header_cloned(skb))
1240 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1241 else
1242 I802_DEBUG_INC(local->tx_expand_skb_head);
1243
1244 if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
1245 printk(KERN_DEBUG "%s: failed to reallocate TX buffer\n",
1246 wiphy_name(local->hw.wiphy));
1247 return -ENOMEM;
1248 }
1249
1250 /* update truesize too */
1251 skb->truesize += head_need + tail_need;
1252
1253 return 0;
1254}
1255
1246int ieee80211_master_start_xmit(struct sk_buff *skb, 1256int ieee80211_master_start_xmit(struct sk_buff *skb,
1247 struct net_device *dev) 1257 struct net_device *dev)
1248{ 1258{
1249 struct ieee80211_tx_control control; 1259 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1250 struct ieee80211_tx_packet_data *pkt_data;
1251 struct net_device *odev = NULL; 1260 struct net_device *odev = NULL;
1252 struct ieee80211_sub_if_data *osdata; 1261 struct ieee80211_sub_if_data *osdata;
1253 int headroom; 1262 int headroom;
1263 bool may_encrypt;
1254 int ret; 1264 int ret;
1255 1265
1256 /* 1266 if (info->control.ifindex)
1257 * copy control out of the skb so other people can use skb->cb 1267 odev = dev_get_by_index(&init_net, info->control.ifindex);
1258 */
1259 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb;
1260 memset(&control, 0, sizeof(struct ieee80211_tx_control));
1261
1262 if (pkt_data->ifindex)
1263 odev = dev_get_by_index(&init_net, pkt_data->ifindex);
1264 if (unlikely(odev && !is_ieee80211_device(odev, dev))) { 1268 if (unlikely(odev && !is_ieee80211_device(odev, dev))) {
1265 dev_put(odev); 1269 dev_put(odev);
1266 odev = NULL; 1270 odev = NULL;
@@ -1273,32 +1277,25 @@ int ieee80211_master_start_xmit(struct sk_buff *skb,
1273 dev_kfree_skb(skb); 1277 dev_kfree_skb(skb);
1274 return 0; 1278 return 0;
1275 } 1279 }
1280
1276 osdata = IEEE80211_DEV_TO_SUB_IF(odev); 1281 osdata = IEEE80211_DEV_TO_SUB_IF(odev);
1277 1282
1278 headroom = osdata->local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM; 1283 may_encrypt = !(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT);
1279 if (skb_headroom(skb) < headroom) { 1284
1280 if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { 1285 headroom = osdata->local->tx_headroom;
1281 dev_kfree_skb(skb); 1286 if (may_encrypt)
1282 dev_put(odev); 1287 headroom += IEEE80211_ENCRYPT_HEADROOM;
1283 return 0; 1288 headroom -= skb_headroom(skb);
1284 } 1289 headroom = max_t(int, 0, headroom);
1290
1291 if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) {
1292 dev_kfree_skb(skb);
1293 dev_put(odev);
1294 return 0;
1285 } 1295 }
1286 1296
1287 control.vif = &osdata->vif; 1297 info->control.vif = &osdata->vif;
1288 control.type = osdata->vif.type; 1298 ret = ieee80211_tx(odev, skb);
1289 if (pkt_data->flags & IEEE80211_TXPD_REQ_TX_STATUS)
1290 control.flags |= IEEE80211_TXCTL_REQ_TX_STATUS;
1291 if (pkt_data->flags & IEEE80211_TXPD_DO_NOT_ENCRYPT)
1292 control.flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT;
1293 if (pkt_data->flags & IEEE80211_TXPD_REQUEUE)
1294 control.flags |= IEEE80211_TXCTL_REQUEUE;
1295 if (pkt_data->flags & IEEE80211_TXPD_EAPOL_FRAME)
1296 control.flags |= IEEE80211_TXCTL_EAPOL_FRAME;
1297 if (pkt_data->flags & IEEE80211_TXPD_AMPDU)
1298 control.flags |= IEEE80211_TXCTL_AMPDU;
1299 control.queue = pkt_data->queue;
1300
1301 ret = ieee80211_tx(odev, skb, &control);
1302 dev_put(odev); 1299 dev_put(odev);
1303 1300
1304 return ret; 1301 return ret;
@@ -1308,7 +1305,7 @@ int ieee80211_monitor_start_xmit(struct sk_buff *skb,
1308 struct net_device *dev) 1305 struct net_device *dev)
1309{ 1306{
1310 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1307 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1311 struct ieee80211_tx_packet_data *pkt_data; 1308 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1312 struct ieee80211_radiotap_header *prthdr = 1309 struct ieee80211_radiotap_header *prthdr =
1313 (struct ieee80211_radiotap_header *)skb->data; 1310 (struct ieee80211_radiotap_header *)skb->data;
1314 u16 len_rthdr; 1311 u16 len_rthdr;
@@ -1330,12 +1327,12 @@ int ieee80211_monitor_start_xmit(struct sk_buff *skb,
1330 1327
1331 skb->dev = local->mdev; 1328 skb->dev = local->mdev;
1332 1329
1333 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb;
1334 memset(pkt_data, 0, sizeof(*pkt_data));
1335 /* needed because we set skb device to master */ 1330 /* needed because we set skb device to master */
1336 pkt_data->ifindex = dev->ifindex; 1331 info->control.ifindex = dev->ifindex;
1337 1332
1338 pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT; 1333 info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
1334 /* Interfaces should always request a status report */
1335 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1339 1336
1340 /* 1337 /*
1341 * fix up the pointers accounting for the radiotap 1338 * fix up the pointers accounting for the radiotap
@@ -1379,7 +1376,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1379 struct net_device *dev) 1376 struct net_device *dev)
1380{ 1377{
1381 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1378 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1382 struct ieee80211_tx_packet_data *pkt_data; 1379 struct ieee80211_tx_info *info;
1383 struct ieee80211_sub_if_data *sdata; 1380 struct ieee80211_sub_if_data *sdata;
1384 int ret = 1, head_need; 1381 int ret = 1, head_need;
1385 u16 ethertype, hdrlen, meshhdrlen = 0, fc; 1382 u16 ethertype, hdrlen, meshhdrlen = 0, fc;
@@ -1486,12 +1483,13 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1486 rcu_read_lock(); 1483 rcu_read_lock();
1487 sta = sta_info_get(local, hdr.addr1); 1484 sta = sta_info_get(local, hdr.addr1);
1488 if (sta) 1485 if (sta)
1489 sta_flags = sta->flags; 1486 sta_flags = get_sta_flags(sta);
1490 rcu_read_unlock(); 1487 rcu_read_unlock();
1491 } 1488 }
1492 1489
1493 /* receiver is QoS enabled, use a QoS type frame */ 1490 /* receiver and we are QoS enabled, use a QoS type frame */
1494 if (sta_flags & WLAN_STA_WME) { 1491 if (sta_flags & WLAN_STA_WME &&
1492 ieee80211_num_regular_queues(&local->hw) >= 4) {
1495 fc |= IEEE80211_STYPE_QOS_DATA; 1493 fc |= IEEE80211_STYPE_QOS_DATA;
1496 hdrlen += 2; 1494 hdrlen += 2;
1497 } 1495 }
@@ -1555,32 +1553,26 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1555 * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and 1553 * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and
1556 * alloc_skb() (net/core/skbuff.c) 1554 * alloc_skb() (net/core/skbuff.c)
1557 */ 1555 */
1558 head_need = hdrlen + encaps_len + meshhdrlen + local->tx_headroom; 1556 head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
1559 head_need -= skb_headroom(skb);
1560 1557
1561 /* We are going to modify skb data, so make a copy of it if happens to 1558 /*
1562 * be cloned. This could happen, e.g., with Linux bridge code passing 1559 * So we need to modify the skb header and hence need a copy of
1563 * us broadcast frames. */ 1560 * that. The head_need variable above doesn't, so far, include
1561 * the needed header space that we don't need right away. If we
1562 * can, then we don't reallocate right now but only after the
1563 * frame arrives at the master device (if it does...)
1564 *
1565 * If we cannot, however, then we will reallocate to include all
1566 * the ever needed space. Also, if we need to reallocate it anyway,
1567 * make it big enough for everything we may ever need.
1568 */
1564 1569
1565 if (head_need > 0 || skb_header_cloned(skb)) { 1570 if (head_need > 0 || skb_header_cloned(skb)) {
1566#if 0 1571 head_need += IEEE80211_ENCRYPT_HEADROOM;
1567 printk(KERN_DEBUG "%s: need to reallocate buffer for %d bytes " 1572 head_need += local->tx_headroom;
1568 "of headroom\n", dev->name, head_need); 1573 head_need = max_t(int, 0, head_need);
1569#endif 1574 if (ieee80211_skb_resize(local, skb, head_need, true))
1570
1571 if (skb_header_cloned(skb))
1572 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1573 else
1574 I802_DEBUG_INC(local->tx_expand_skb_head);
1575 /* Since we have to reallocate the buffer, make sure that there
1576 * is enough room for possible WEP IV/ICV and TKIP (8 bytes
1577 * before payload and 12 after). */
1578 if (pskb_expand_head(skb, (head_need > 0 ? head_need + 8 : 8),
1579 12, GFP_ATOMIC)) {
1580 printk(KERN_DEBUG "%s: failed to reallocate TX buffer"
1581 "\n", dev->name);
1582 goto fail; 1575 goto fail;
1583 }
1584 } 1576 }
1585 1577
1586 if (encaps_data) { 1578 if (encaps_data) {
@@ -1611,11 +1603,14 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1611 nh_pos += hdrlen; 1603 nh_pos += hdrlen;
1612 h_pos += hdrlen; 1604 h_pos += hdrlen;
1613 1605
1614 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; 1606 info = IEEE80211_SKB_CB(skb);
1615 memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data)); 1607 memset(info, 0, sizeof(*info));
1616 pkt_data->ifindex = dev->ifindex; 1608 info->control.ifindex = dev->ifindex;
1617 if (ethertype == ETH_P_PAE) 1609 if (ethertype == ETH_P_PAE)
1618 pkt_data->flags |= IEEE80211_TXPD_EAPOL_FRAME; 1610 info->flags |= IEEE80211_TX_CTL_EAPOL_FRAME;
1611
1612 /* Interfaces should always request a status report */
1613 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1619 1614
1620 skb->dev = local->mdev; 1615 skb->dev = local->mdev;
1621 dev->stats.tx_packets++; 1616 dev->stats.tx_packets++;
@@ -1640,46 +1635,55 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1640 return ret; 1635 return ret;
1641} 1636}
1642 1637
1643/* helper functions for pending packets for when queues are stopped */
1644 1638
1639/*
1640 * ieee80211_clear_tx_pending may not be called in a context where
1641 * it is possible that it packets could come in again.
1642 */
1645void ieee80211_clear_tx_pending(struct ieee80211_local *local) 1643void ieee80211_clear_tx_pending(struct ieee80211_local *local)
1646{ 1644{
1647 int i, j; 1645 int i, j;
1648 struct ieee80211_tx_stored_packet *store; 1646 struct ieee80211_tx_stored_packet *store;
1649 1647
1650 for (i = 0; i < local->hw.queues; i++) { 1648 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
1651 if (!__ieee80211_queue_pending(local, i)) 1649 if (!test_bit(i, local->queues_pending))
1652 continue; 1650 continue;
1653 store = &local->pending_packet[i]; 1651 store = &local->pending_packet[i];
1654 kfree_skb(store->skb); 1652 kfree_skb(store->skb);
1655 for (j = 0; j < store->num_extra_frag; j++) 1653 for (j = 0; j < store->num_extra_frag; j++)
1656 kfree_skb(store->extra_frag[j]); 1654 kfree_skb(store->extra_frag[j]);
1657 kfree(store->extra_frag); 1655 kfree(store->extra_frag);
1658 clear_bit(IEEE80211_LINK_STATE_PENDING, &local->state[i]); 1656 clear_bit(i, local->queues_pending);
1659 } 1657 }
1660} 1658}
1661 1659
1660/*
1661 * Transmit all pending packets. Called from tasklet, locks master device
1662 * TX lock so that no new packets can come in.
1663 */
1662void ieee80211_tx_pending(unsigned long data) 1664void ieee80211_tx_pending(unsigned long data)
1663{ 1665{
1664 struct ieee80211_local *local = (struct ieee80211_local *)data; 1666 struct ieee80211_local *local = (struct ieee80211_local *)data;
1665 struct net_device *dev = local->mdev; 1667 struct net_device *dev = local->mdev;
1666 struct ieee80211_tx_stored_packet *store; 1668 struct ieee80211_tx_stored_packet *store;
1667 struct ieee80211_tx_data tx; 1669 struct ieee80211_tx_data tx;
1668 int i, ret, reschedule = 0; 1670 int i, ret;
1669 1671
1670 netif_tx_lock_bh(dev); 1672 netif_tx_lock_bh(dev);
1671 for (i = 0; i < local->hw.queues; i++) { 1673 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
1672 if (__ieee80211_queue_stopped(local, i)) 1674 /* Check that this queue is ok */
1675 if (__netif_subqueue_stopped(local->mdev, i))
1673 continue; 1676 continue;
1674 if (!__ieee80211_queue_pending(local, i)) { 1677
1675 reschedule = 1; 1678 if (!test_bit(i, local->queues_pending)) {
1679 ieee80211_wake_queue(&local->hw, i);
1676 continue; 1680 continue;
1677 } 1681 }
1682
1678 store = &local->pending_packet[i]; 1683 store = &local->pending_packet[i];
1679 tx.control = &store->control;
1680 tx.extra_frag = store->extra_frag; 1684 tx.extra_frag = store->extra_frag;
1681 tx.num_extra_frag = store->num_extra_frag; 1685 tx.num_extra_frag = store->num_extra_frag;
1682 tx.last_frag_rate = store->last_frag_rate; 1686 tx.last_frag_rate_idx = store->last_frag_rate_idx;
1683 tx.flags = 0; 1687 tx.flags = 0;
1684 if (store->last_frag_rate_ctrl_probe) 1688 if (store->last_frag_rate_ctrl_probe)
1685 tx.flags |= IEEE80211_TX_PROBE_LAST_FRAG; 1689 tx.flags |= IEEE80211_TX_PROBE_LAST_FRAG;
@@ -1688,19 +1692,11 @@ void ieee80211_tx_pending(unsigned long data)
1688 if (ret == IEEE80211_TX_FRAG_AGAIN) 1692 if (ret == IEEE80211_TX_FRAG_AGAIN)
1689 store->skb = NULL; 1693 store->skb = NULL;
1690 } else { 1694 } else {
1691 clear_bit(IEEE80211_LINK_STATE_PENDING, 1695 clear_bit(i, local->queues_pending);
1692 &local->state[i]); 1696 ieee80211_wake_queue(&local->hw, i);
1693 reschedule = 1;
1694 } 1697 }
1695 } 1698 }
1696 netif_tx_unlock_bh(dev); 1699 netif_tx_unlock_bh(dev);
1697 if (reschedule) {
1698 if (!ieee80211_qdisc_installed(dev)) {
1699 if (!__ieee80211_queue_stopped(local, 0))
1700 netif_wake_queue(dev);
1701 } else
1702 netif_schedule(dev);
1703 }
1704} 1700}
1705 1701
1706/* functions for drivers to get certain frames */ 1702/* functions for drivers to get certain frames */
@@ -1769,11 +1765,11 @@ static void ieee80211_beacon_add_tim(struct ieee80211_local *local,
1769} 1765}
1770 1766
1771struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, 1767struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1772 struct ieee80211_vif *vif, 1768 struct ieee80211_vif *vif)
1773 struct ieee80211_tx_control *control)
1774{ 1769{
1775 struct ieee80211_local *local = hw_to_local(hw); 1770 struct ieee80211_local *local = hw_to_local(hw);
1776 struct sk_buff *skb; 1771 struct sk_buff *skb;
1772 struct ieee80211_tx_info *info;
1777 struct net_device *bdev; 1773 struct net_device *bdev;
1778 struct ieee80211_sub_if_data *sdata = NULL; 1774 struct ieee80211_sub_if_data *sdata = NULL;
1779 struct ieee80211_if_ap *ap = NULL; 1775 struct ieee80211_if_ap *ap = NULL;
@@ -1783,9 +1779,10 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1783 struct ieee80211_mgmt *mgmt; 1779 struct ieee80211_mgmt *mgmt;
1784 int *num_beacons; 1780 int *num_beacons;
1785 bool err = true; 1781 bool err = true;
1782 enum ieee80211_band band = local->hw.conf.channel->band;
1786 u8 *pos; 1783 u8 *pos;
1787 1784
1788 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1785 sband = local->hw.wiphy->bands[band];
1789 1786
1790 rcu_read_lock(); 1787 rcu_read_lock();
1791 1788
@@ -1878,30 +1875,32 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1878 goto out; 1875 goto out;
1879 } 1876 }
1880 1877
1881 if (control) { 1878 info = IEEE80211_SKB_CB(skb);
1882 rate_control_get_rate(local->mdev, sband, skb, &rsel);
1883 if (!rsel.rate) {
1884 if (net_ratelimit()) {
1885 printk(KERN_DEBUG "%s: ieee80211_beacon_get: "
1886 "no rate found\n",
1887 wiphy_name(local->hw.wiphy));
1888 }
1889 dev_kfree_skb(skb);
1890 skb = NULL;
1891 goto out;
1892 }
1893 1879
1894 control->vif = vif; 1880 info->band = band;
1895 control->tx_rate = rsel.rate; 1881 rate_control_get_rate(local->mdev, sband, skb, &rsel);
1896 if (sdata->bss_conf.use_short_preamble && 1882
1897 rsel.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) 1883 if (unlikely(rsel.rate_idx < 0)) {
1898 control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE; 1884 if (net_ratelimit()) {
1899 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx; 1885 printk(KERN_DEBUG "%s: ieee80211_beacon_get: "
1900 control->flags |= IEEE80211_TXCTL_NO_ACK; 1886 "no rate found\n",
1901 control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; 1887 wiphy_name(local->hw.wiphy));
1902 control->retry_limit = 1; 1888 }
1903 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; 1889 dev_kfree_skb(skb);
1890 skb = NULL;
1891 goto out;
1904 } 1892 }
1893
1894 info->control.vif = vif;
1895 info->tx_rate_idx = rsel.rate_idx;
1896 if (sdata->bss_conf.use_short_preamble &&
1897 sband->bitrates[rsel.rate_idx].flags & IEEE80211_RATE_SHORT_PREAMBLE)
1898 info->flags |= IEEE80211_TX_CTL_SHORT_PREAMBLE;
1899 info->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
1900 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1901 info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
1902 info->control.retry_limit = 1;
1903 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1905 (*num_beacons)++; 1904 (*num_beacons)++;
1906out: 1905out:
1907 rcu_read_unlock(); 1906 rcu_read_unlock();
@@ -1911,7 +1910,7 @@ EXPORT_SYMBOL(ieee80211_beacon_get);
1911 1910
1912void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1911void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1913 const void *frame, size_t frame_len, 1912 const void *frame, size_t frame_len,
1914 const struct ieee80211_tx_control *frame_txctl, 1913 const struct ieee80211_tx_info *frame_txctl,
1915 struct ieee80211_rts *rts) 1914 struct ieee80211_rts *rts)
1916{ 1915{
1917 const struct ieee80211_hdr *hdr = frame; 1916 const struct ieee80211_hdr *hdr = frame;
@@ -1928,7 +1927,7 @@ EXPORT_SYMBOL(ieee80211_rts_get);
1928 1927
1929void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1928void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1930 const void *frame, size_t frame_len, 1929 const void *frame, size_t frame_len,
1931 const struct ieee80211_tx_control *frame_txctl, 1930 const struct ieee80211_tx_info *frame_txctl,
1932 struct ieee80211_cts *cts) 1931 struct ieee80211_cts *cts)
1933{ 1932{
1934 const struct ieee80211_hdr *hdr = frame; 1933 const struct ieee80211_hdr *hdr = frame;
@@ -1944,11 +1943,10 @@ EXPORT_SYMBOL(ieee80211_ctstoself_get);
1944 1943
1945struct sk_buff * 1944struct sk_buff *
1946ieee80211_get_buffered_bc(struct ieee80211_hw *hw, 1945ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
1947 struct ieee80211_vif *vif, 1946 struct ieee80211_vif *vif)
1948 struct ieee80211_tx_control *control)
1949{ 1947{
1950 struct ieee80211_local *local = hw_to_local(hw); 1948 struct ieee80211_local *local = hw_to_local(hw);
1951 struct sk_buff *skb; 1949 struct sk_buff *skb = NULL;
1952 struct sta_info *sta; 1950 struct sta_info *sta;
1953 ieee80211_tx_handler *handler; 1951 ieee80211_tx_handler *handler;
1954 struct ieee80211_tx_data tx; 1952 struct ieee80211_tx_data tx;
@@ -1957,10 +1955,11 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
1957 struct ieee80211_sub_if_data *sdata; 1955 struct ieee80211_sub_if_data *sdata;
1958 struct ieee80211_if_ap *bss = NULL; 1956 struct ieee80211_if_ap *bss = NULL;
1959 struct beacon_data *beacon; 1957 struct beacon_data *beacon;
1958 struct ieee80211_tx_info *info;
1960 1959
1961 sdata = vif_to_sdata(vif); 1960 sdata = vif_to_sdata(vif);
1962 bdev = sdata->dev; 1961 bdev = sdata->dev;
1963 1962 bss = &sdata->u.ap;
1964 1963
1965 if (!bss) 1964 if (!bss)
1966 return NULL; 1965 return NULL;
@@ -1968,19 +1967,16 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
1968 rcu_read_lock(); 1967 rcu_read_lock();
1969 beacon = rcu_dereference(bss->beacon); 1968 beacon = rcu_dereference(bss->beacon);
1970 1969
1971 if (sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon || 1970 if (sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon || !beacon->head)
1972 !beacon->head) { 1971 goto out;
1973 rcu_read_unlock();
1974 return NULL;
1975 }
1976 1972
1977 if (bss->dtim_count != 0) 1973 if (bss->dtim_count != 0)
1978 return NULL; /* send buffered bc/mc only after DTIM beacon */ 1974 goto out; /* send buffered bc/mc only after DTIM beacon */
1979 memset(control, 0, sizeof(*control)); 1975
1980 while (1) { 1976 while (1) {
1981 skb = skb_dequeue(&bss->ps_bc_buf); 1977 skb = skb_dequeue(&bss->ps_bc_buf);
1982 if (!skb) 1978 if (!skb)
1983 return NULL; 1979 goto out;
1984 local->total_ps_buffered--; 1980 local->total_ps_buffered--;
1985 1981
1986 if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) { 1982 if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) {
@@ -1993,20 +1989,26 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
1993 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1989 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1994 } 1990 }
1995 1991
1996 if (!ieee80211_tx_prepare(&tx, skb, local->mdev, control)) 1992 if (!ieee80211_tx_prepare(&tx, skb, local->mdev))
1997 break; 1993 break;
1998 dev_kfree_skb_any(skb); 1994 dev_kfree_skb_any(skb);
1999 } 1995 }
1996
1997 info = IEEE80211_SKB_CB(skb);
1998
2000 sta = tx.sta; 1999 sta = tx.sta;
2001 tx.flags |= IEEE80211_TX_PS_BUFFERED; 2000 tx.flags |= IEEE80211_TX_PS_BUFFERED;
2002 tx.channel = local->hw.conf.channel; 2001 tx.channel = local->hw.conf.channel;
2002 info->band = tx.channel->band;
2003 2003
2004 for (handler = ieee80211_tx_handlers; *handler != NULL; handler++) { 2004 for (handler = ieee80211_tx_handlers; *handler != NULL; handler++) {
2005 res = (*handler)(&tx); 2005 res = (*handler)(&tx);
2006 if (res == TX_DROP || res == TX_QUEUED) 2006 if (res == TX_DROP || res == TX_QUEUED)
2007 break; 2007 break;
2008 } 2008 }
2009 skb = tx.skb; /* handlers are allowed to change skb */ 2009
2010 if (WARN_ON(tx.skb != skb))
2011 res = TX_DROP;
2010 2012
2011 if (res == TX_DROP) { 2013 if (res == TX_DROP) {
2012 I802_DEBUG_INC(local->tx_handlers_drop); 2014 I802_DEBUG_INC(local->tx_handlers_drop);
@@ -2017,6 +2019,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2017 skb = NULL; 2019 skb = NULL;
2018 } 2020 }
2019 2021
2022out:
2020 rcu_read_unlock(); 2023 rcu_read_unlock();
2021 2024
2022 return skb; 2025 return skb;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 4e97b266f907..6513bc2d2707 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -258,7 +258,7 @@ EXPORT_SYMBOL(ieee80211_generic_frame_duration);
258 258
259__le16 ieee80211_rts_duration(struct ieee80211_hw *hw, 259__le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
260 struct ieee80211_vif *vif, size_t frame_len, 260 struct ieee80211_vif *vif, size_t frame_len,
261 const struct ieee80211_tx_control *frame_txctl) 261 const struct ieee80211_tx_info *frame_txctl)
262{ 262{
263 struct ieee80211_local *local = hw_to_local(hw); 263 struct ieee80211_local *local = hw_to_local(hw);
264 struct ieee80211_rate *rate; 264 struct ieee80211_rate *rate;
@@ -266,10 +266,13 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
266 bool short_preamble; 266 bool short_preamble;
267 int erp; 267 int erp;
268 u16 dur; 268 u16 dur;
269 struct ieee80211_supported_band *sband;
270
271 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
269 272
270 short_preamble = sdata->bss_conf.use_short_preamble; 273 short_preamble = sdata->bss_conf.use_short_preamble;
271 274
272 rate = frame_txctl->rts_cts_rate; 275 rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx];
273 276
274 erp = 0; 277 erp = 0;
275 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 278 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
@@ -292,7 +295,7 @@ EXPORT_SYMBOL(ieee80211_rts_duration);
292__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, 295__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
293 struct ieee80211_vif *vif, 296 struct ieee80211_vif *vif,
294 size_t frame_len, 297 size_t frame_len,
295 const struct ieee80211_tx_control *frame_txctl) 298 const struct ieee80211_tx_info *frame_txctl)
296{ 299{
297 struct ieee80211_local *local = hw_to_local(hw); 300 struct ieee80211_local *local = hw_to_local(hw);
298 struct ieee80211_rate *rate; 301 struct ieee80211_rate *rate;
@@ -300,10 +303,13 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
300 bool short_preamble; 303 bool short_preamble;
301 int erp; 304 int erp;
302 u16 dur; 305 u16 dur;
306 struct ieee80211_supported_band *sband;
307
308 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
303 309
304 short_preamble = sdata->bss_conf.use_short_preamble; 310 short_preamble = sdata->bss_conf.use_short_preamble;
305 311
306 rate = frame_txctl->rts_cts_rate; 312 rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx];
307 erp = 0; 313 erp = 0;
308 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 314 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
309 erp = rate->flags & IEEE80211_RATE_ERP_G; 315 erp = rate->flags & IEEE80211_RATE_ERP_G;
@@ -311,7 +317,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
311 /* Data frame duration */ 317 /* Data frame duration */
312 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, 318 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate,
313 erp, short_preamble); 319 erp, short_preamble);
314 if (!(frame_txctl->flags & IEEE80211_TXCTL_NO_ACK)) { 320 if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) {
315 /* ACK duration */ 321 /* ACK duration */
316 dur += ieee80211_frame_duration(local, 10, rate->bitrate, 322 dur += ieee80211_frame_duration(local, 10, rate->bitrate,
317 erp, short_preamble); 323 erp, short_preamble);
@@ -325,17 +331,15 @@ void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue)
325{ 331{
326 struct ieee80211_local *local = hw_to_local(hw); 332 struct ieee80211_local *local = hw_to_local(hw);
327 333
328 if (test_and_clear_bit(IEEE80211_LINK_STATE_XOFF, 334 if (test_bit(queue, local->queues_pending)) {
329 &local->state[queue])) { 335 tasklet_schedule(&local->tx_pending_tasklet);
330 if (test_bit(IEEE80211_LINK_STATE_PENDING, 336 } else {
331 &local->state[queue])) 337 if (ieee80211_is_multiqueue(local)) {
332 tasklet_schedule(&local->tx_pending_tasklet); 338 netif_wake_subqueue(local->mdev, queue);
333 else 339 } else {
334 if (!ieee80211_qdisc_installed(local->mdev)) { 340 WARN_ON(queue != 0);
335 if (queue == 0) 341 netif_wake_queue(local->mdev);
336 netif_wake_queue(local->mdev); 342 }
337 } else
338 __netif_schedule(local->mdev);
339 } 343 }
340} 344}
341EXPORT_SYMBOL(ieee80211_wake_queue); 345EXPORT_SYMBOL(ieee80211_wake_queue);
@@ -344,29 +348,20 @@ void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue)
344{ 348{
345 struct ieee80211_local *local = hw_to_local(hw); 349 struct ieee80211_local *local = hw_to_local(hw);
346 350
347 if (!ieee80211_qdisc_installed(local->mdev) && queue == 0) 351 if (ieee80211_is_multiqueue(local)) {
352 netif_stop_subqueue(local->mdev, queue);
353 } else {
354 WARN_ON(queue != 0);
348 netif_stop_queue(local->mdev); 355 netif_stop_queue(local->mdev);
349 set_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]); 356 }
350} 357}
351EXPORT_SYMBOL(ieee80211_stop_queue); 358EXPORT_SYMBOL(ieee80211_stop_queue);
352 359
353void ieee80211_start_queues(struct ieee80211_hw *hw)
354{
355 struct ieee80211_local *local = hw_to_local(hw);
356 int i;
357
358 for (i = 0; i < local->hw.queues; i++)
359 clear_bit(IEEE80211_LINK_STATE_XOFF, &local->state[i]);
360 if (!ieee80211_qdisc_installed(local->mdev))
361 netif_start_queue(local->mdev);
362}
363EXPORT_SYMBOL(ieee80211_start_queues);
364
365void ieee80211_stop_queues(struct ieee80211_hw *hw) 360void ieee80211_stop_queues(struct ieee80211_hw *hw)
366{ 361{
367 int i; 362 int i;
368 363
369 for (i = 0; i < hw->queues; i++) 364 for (i = 0; i < ieee80211_num_queues(hw); i++)
370 ieee80211_stop_queue(hw, i); 365 ieee80211_stop_queue(hw, i);
371} 366}
372EXPORT_SYMBOL(ieee80211_stop_queues); 367EXPORT_SYMBOL(ieee80211_stop_queues);
@@ -375,7 +370,7 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw)
375{ 370{
376 int i; 371 int i;
377 372
378 for (i = 0; i < hw->queues; i++) 373 for (i = 0; i < hw->queues + hw->ampdu_queues; i++)
379 ieee80211_wake_queue(hw, i); 374 ieee80211_wake_queue(hw, i);
380} 375}
381EXPORT_SYMBOL(ieee80211_wake_queues); 376EXPORT_SYMBOL(ieee80211_wake_queues);
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index affcecd78c10..e7b6344c900a 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -93,13 +93,9 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
93 fc |= IEEE80211_FCTL_PROTECTED; 93 fc |= IEEE80211_FCTL_PROTECTED;
94 hdr->frame_control = cpu_to_le16(fc); 94 hdr->frame_control = cpu_to_le16(fc);
95 95
96 if ((skb_headroom(skb) < WEP_IV_LEN || 96 if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN ||
97 skb_tailroom(skb) < WEP_ICV_LEN)) { 97 skb_headroom(skb) < WEP_IV_LEN))
98 I802_DEBUG_INC(local->tx_expand_skb_head); 98 return NULL;
99 if (unlikely(pskb_expand_head(skb, WEP_IV_LEN, WEP_ICV_LEN,
100 GFP_ATOMIC)))
101 return NULL;
102 }
103 99
104 hdrlen = ieee80211_get_hdrlen(fc); 100 hdrlen = ieee80211_get_hdrlen(fc);
105 newhdr = skb_push(skb, WEP_IV_LEN); 101 newhdr = skb_push(skb, WEP_IV_LEN);
@@ -333,11 +329,16 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
333 329
334static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) 330static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
335{ 331{
332 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
333
334 info->control.iv_len = WEP_IV_LEN;
335 info->control.icv_len = WEP_ICV_LEN;
336
336 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { 337 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) {
337 if (ieee80211_wep_encrypt(tx->local, skb, tx->key)) 338 if (ieee80211_wep_encrypt(tx->local, skb, tx->key))
338 return -1; 339 return -1;
339 } else { 340 } else {
340 tx->control->key_idx = tx->key->conf.hw_key_idx; 341 info->control.hw_key = &tx->key->conf;
341 if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) { 342 if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) {
342 if (!ieee80211_wep_add_iv(tx->local, skb, tx->key)) 343 if (!ieee80211_wep_add_iv(tx->local, skb, tx->key))
343 return -1; 344 return -1;
@@ -349,8 +350,6 @@ static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
349ieee80211_tx_result 350ieee80211_tx_result
350ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) 351ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx)
351{ 352{
352 tx->control->iv_len = WEP_IV_LEN;
353 tx->control->icv_len = WEP_ICV_LEN;
354 ieee80211_tx_set_protected(tx); 353 ieee80211_tx_set_protected(tx);
355 354
356 if (wep_encrypt_skb(tx, tx->skb) < 0) { 355 if (wep_encrypt_skb(tx, tx->skb) < 0) {
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h
index 363779c50658..e587172115b8 100644
--- a/net/mac80211/wep.h
+++ b/net/mac80211/wep.h
@@ -26,7 +26,7 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb,
26 struct ieee80211_key *key); 26 struct ieee80211_key *key);
27int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, 27int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
28 struct ieee80211_key *key); 28 struct ieee80211_key *key);
29u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); 29u8 *ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key);
30 30
31ieee80211_rx_result 31ieee80211_rx_result
32ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx); 32ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx);
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index a8bb8e31b1ec..4806d96b9877 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -169,14 +169,26 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev,
169 range->num_encoding_sizes = 2; 169 range->num_encoding_sizes = 2;
170 range->max_encoding_tokens = NUM_DEFAULT_KEYS; 170 range->max_encoding_tokens = NUM_DEFAULT_KEYS;
171 171
172 range->max_qual.qual = local->hw.max_signal; 172 if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC ||
173 range->max_qual.level = local->hw.max_rssi; 173 local->hw.flags & IEEE80211_HW_SIGNAL_DB)
174 range->max_qual.noise = local->hw.max_noise; 174 range->max_qual.level = local->hw.max_signal;
175 else if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
176 range->max_qual.level = -110;
177 else
178 range->max_qual.level = 0;
179
180 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
181 range->max_qual.noise = -110;
182 else
183 range->max_qual.noise = 0;
184
185 range->max_qual.qual = 100;
175 range->max_qual.updated = local->wstats_flags; 186 range->max_qual.updated = local->wstats_flags;
176 187
177 range->avg_qual.qual = local->hw.max_signal/2; 188 range->avg_qual.qual = 50;
178 range->avg_qual.level = 0; 189 /* not always true but better than nothing */
179 range->avg_qual.noise = 0; 190 range->avg_qual.level = range->max_qual.level / 2;
191 range->avg_qual.noise = range->max_qual.noise / 2;
180 range->avg_qual.updated = local->wstats_flags; 192 range->avg_qual.updated = local->wstats_flags;
181 193
182 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | 194 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
@@ -1007,8 +1019,8 @@ static struct iw_statistics *ieee80211_get_wireless_stats(struct net_device *dev
1007 wstats->qual.noise = 0; 1019 wstats->qual.noise = 0;
1008 wstats->qual.updated = IW_QUAL_ALL_INVALID; 1020 wstats->qual.updated = IW_QUAL_ALL_INVALID;
1009 } else { 1021 } else {
1010 wstats->qual.level = sta->last_rssi; 1022 wstats->qual.level = sta->last_signal;
1011 wstats->qual.qual = sta->last_signal; 1023 wstats->qual.qual = sta->last_qual;
1012 wstats->qual.noise = sta->last_noise; 1024 wstats->qual.noise = sta->last_noise;
1013 wstats->qual.updated = local->wstats_flags; 1025 wstats->qual.updated = local->wstats_flags;
1014 } 1026 }
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index dc1598b86004..14a9ff10a1e9 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -19,16 +19,22 @@
19#include "wme.h" 19#include "wme.h"
20 20
21/* maximum number of hardware queues we support. */ 21/* maximum number of hardware queues we support. */
22#define TC_80211_MAX_QUEUES 16 22#define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
23/* current number of hardware queues we support. */
24#define QD_NUM(hw) ((hw)->queues + (hw)->ampdu_queues)
23 25
26/*
27 * Default mapping in classifier to work with default
28 * queue setup.
29 */
24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; 30const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
25 31
26struct ieee80211_sched_data 32struct ieee80211_sched_data
27{ 33{
28 unsigned long qdisc_pool[BITS_TO_LONGS(TC_80211_MAX_QUEUES)]; 34 unsigned long qdisc_pool[BITS_TO_LONGS(QD_MAX_QUEUES)];
29 struct tcf_proto *filter_list; 35 struct tcf_proto *filter_list;
30 struct Qdisc *queues[TC_80211_MAX_QUEUES]; 36 struct Qdisc *queues[QD_MAX_QUEUES];
31 struct sk_buff_head requeued[TC_80211_MAX_QUEUES]; 37 struct sk_buff_head requeued[QD_MAX_QUEUES];
32}; 38};
33 39
34static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0}; 40static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
@@ -95,7 +101,7 @@ static inline int wme_downgrade_ac(struct sk_buff *skb)
95 101
96/* positive return value indicates which queue to use 102/* positive return value indicates which queue to use
97 * negative return value indicates to drop the frame */ 103 * negative return value indicates to drop the frame */
98static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd) 104static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
99{ 105{
100 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); 106 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
101 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 107 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -106,7 +112,7 @@ static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd)
106 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) { 112 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) {
107 /* management frames go on AC_VO queue, but are sent 113 /* management frames go on AC_VO queue, but are sent
108 * without QoS control fields */ 114 * without QoS control fields */
109 return IEEE80211_TX_QUEUE_DATA0; 115 return 0;
110 } 116 }
111 117
112 if (0 /* injected */) { 118 if (0 /* injected */) {
@@ -141,29 +147,29 @@ static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd)
141static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd) 147static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
142{ 148{
143 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); 149 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
150 struct ieee80211_hw *hw = &local->hw;
144 struct ieee80211_sched_data *q = qdisc_priv(qd); 151 struct ieee80211_sched_data *q = qdisc_priv(qd);
145 struct ieee80211_tx_packet_data *pkt_data = 152 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
146 (struct ieee80211_tx_packet_data *) skb->cb;
147 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 153 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
148 unsigned short fc = le16_to_cpu(hdr->frame_control); 154 unsigned short fc = le16_to_cpu(hdr->frame_control);
149 struct Qdisc *qdisc; 155 struct Qdisc *qdisc;
150 int err, queue;
151 struct sta_info *sta; 156 struct sta_info *sta;
157 int err, queue;
152 u8 tid; 158 u8 tid;
153 159
154 if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) { 160 if (info->flags & IEEE80211_TX_CTL_REQUEUE) {
155 queue = pkt_data->queue; 161 queue = skb_get_queue_mapping(skb);
156 rcu_read_lock(); 162 rcu_read_lock();
157 sta = sta_info_get(local, hdr->addr1); 163 sta = sta_info_get(local, hdr->addr1);
158 tid = skb->priority & QOS_CONTROL_TAG1D_MASK; 164 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
159 if (sta) { 165 if (sta) {
160 int ampdu_queue = sta->tid_to_tx_q[tid]; 166 int ampdu_queue = sta->tid_to_tx_q[tid];
161 if ((ampdu_queue < local->hw.queues) && 167 if ((ampdu_queue < QD_NUM(hw)) &&
162 test_bit(ampdu_queue, q->qdisc_pool)) { 168 test_bit(ampdu_queue, q->qdisc_pool)) {
163 queue = ampdu_queue; 169 queue = ampdu_queue;
164 pkt_data->flags |= IEEE80211_TXPD_AMPDU; 170 info->flags |= IEEE80211_TX_CTL_AMPDU;
165 } else { 171 } else {
166 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU; 172 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
167 } 173 }
168 } 174 }
169 rcu_read_unlock(); 175 rcu_read_unlock();
@@ -174,6 +180,9 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
174 180
175 queue = classify80211(skb, qd); 181 queue = classify80211(skb, qd);
176 182
183 if (unlikely(queue >= local->hw.queues))
184 queue = local->hw.queues - 1;
185
177 /* now we know the 1d priority, fill in the QoS header if there is one 186 /* now we know the 1d priority, fill in the QoS header if there is one
178 */ 187 */
179 if (WLAN_FC_IS_QOS_DATA(fc)) { 188 if (WLAN_FC_IS_QOS_DATA(fc)) {
@@ -193,35 +202,24 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
193 sta = sta_info_get(local, hdr->addr1); 202 sta = sta_info_get(local, hdr->addr1);
194 if (sta) { 203 if (sta) {
195 int ampdu_queue = sta->tid_to_tx_q[tid]; 204 int ampdu_queue = sta->tid_to_tx_q[tid];
196 if ((ampdu_queue < local->hw.queues) && 205 if ((ampdu_queue < QD_NUM(hw)) &&
197 test_bit(ampdu_queue, q->qdisc_pool)) { 206 test_bit(ampdu_queue, q->qdisc_pool)) {
198 queue = ampdu_queue; 207 queue = ampdu_queue;
199 pkt_data->flags |= IEEE80211_TXPD_AMPDU; 208 info->flags |= IEEE80211_TX_CTL_AMPDU;
200 } else { 209 } else {
201 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU; 210 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
202 } 211 }
203 } 212 }
204 213
205 rcu_read_unlock(); 214 rcu_read_unlock();
206 } 215 }
207 216
208 if (unlikely(queue >= local->hw.queues)) {
209#if 0
210 if (net_ratelimit()) {
211 printk(KERN_DEBUG "%s - queue=%d (hw does not "
212 "support) -> %d\n",
213 __func__, queue, local->hw.queues - 1);
214 }
215#endif
216 queue = local->hw.queues - 1;
217 }
218
219 if (unlikely(queue < 0)) { 217 if (unlikely(queue < 0)) {
220 kfree_skb(skb); 218 kfree_skb(skb);
221 err = NET_XMIT_DROP; 219 err = NET_XMIT_DROP;
222 } else { 220 } else {
223 tid = skb->priority & QOS_CONTROL_TAG1D_MASK; 221 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
224 pkt_data->queue = (unsigned int) queue; 222 skb_set_queue_mapping(skb, queue);
225 qdisc = q->queues[queue]; 223 qdisc = q->queues[queue];
226 err = qdisc->enqueue(skb, qdisc); 224 err = qdisc->enqueue(skb, qdisc);
227 if (err == NET_XMIT_SUCCESS) { 225 if (err == NET_XMIT_SUCCESS) {
@@ -242,13 +240,11 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
242static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd) 240static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
243{ 241{
244 struct ieee80211_sched_data *q = qdisc_priv(qd); 242 struct ieee80211_sched_data *q = qdisc_priv(qd);
245 struct ieee80211_tx_packet_data *pkt_data =
246 (struct ieee80211_tx_packet_data *) skb->cb;
247 struct Qdisc *qdisc; 243 struct Qdisc *qdisc;
248 int err; 244 int err;
249 245
250 /* we recorded which queue to use earlier! */ 246 /* we recorded which queue to use earlier! */
251 qdisc = q->queues[pkt_data->queue]; 247 qdisc = q->queues[skb_get_queue_mapping(skb)];
252 248
253 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) { 249 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
254 qd->q.qlen++; 250 qd->q.qlen++;
@@ -270,13 +266,10 @@ static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
270 int queue; 266 int queue;
271 267
272 /* check all the h/w queues in numeric/priority order */ 268 /* check all the h/w queues in numeric/priority order */
273 for (queue = 0; queue < hw->queues; queue++) { 269 for (queue = 0; queue < QD_NUM(hw); queue++) {
274 /* see if there is room in this hardware queue */ 270 /* see if there is room in this hardware queue */
275 if ((test_bit(IEEE80211_LINK_STATE_XOFF, 271 if (__netif_subqueue_stopped(local->mdev, queue) ||
276 &local->state[queue])) || 272 !test_bit(queue, q->qdisc_pool))
277 (test_bit(IEEE80211_LINK_STATE_PENDING,
278 &local->state[queue])) ||
279 (!test_bit(queue, q->qdisc_pool)))
280 continue; 273 continue;
281 274
282 /* there is space - try and get a frame */ 275 /* there is space - try and get a frame */
@@ -308,7 +301,7 @@ static void wme_qdiscop_reset(struct Qdisc* qd)
308 301
309 /* QUESTION: should we have some hardware flush functionality here? */ 302 /* QUESTION: should we have some hardware flush functionality here? */
310 303
311 for (queue = 0; queue < hw->queues; queue++) { 304 for (queue = 0; queue < QD_NUM(hw); queue++) {
312 skb_queue_purge(&q->requeued[queue]); 305 skb_queue_purge(&q->requeued[queue]);
313 qdisc_reset(q->queues[queue]); 306 qdisc_reset(q->queues[queue]);
314 } 307 }
@@ -326,7 +319,7 @@ static void wme_qdiscop_destroy(struct Qdisc* qd)
326 tcf_destroy_chain(q->filter_list); 319 tcf_destroy_chain(q->filter_list);
327 q->filter_list = NULL; 320 q->filter_list = NULL;
328 321
329 for (queue=0; queue < hw->queues; queue++) { 322 for (queue = 0; queue < QD_NUM(hw); queue++) {
330 skb_queue_purge(&q->requeued[queue]); 323 skb_queue_purge(&q->requeued[queue]);
331 qdisc_destroy(q->queues[queue]); 324 qdisc_destroy(q->queues[queue]);
332 q->queues[queue] = &noop_qdisc; 325 q->queues[queue] = &noop_qdisc;
@@ -337,17 +330,6 @@ static void wme_qdiscop_destroy(struct Qdisc* qd)
337/* called whenever parameters are updated on existing qdisc */ 330/* called whenever parameters are updated on existing qdisc */
338static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt) 331static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
339{ 332{
340/* struct ieee80211_sched_data *q = qdisc_priv(qd);
341*/
342 /* check our options block is the right size */
343 /* copy any options to our local structure */
344/* Ignore options block for now - always use static mapping
345 struct tc_ieee80211_qopt *qopt = nla_data(opt);
346
347 if (opt->nla_len < nla_attr_size(sizeof(*qopt)))
348 return -EINVAL;
349 memcpy(q->tag2queue, qopt->tag2queue, sizeof(qopt->tag2queue));
350*/
351 return 0; 333 return 0;
352} 334}
353 335
@@ -358,7 +340,7 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
358 struct ieee80211_sched_data *q = qdisc_priv(qd); 340 struct ieee80211_sched_data *q = qdisc_priv(qd);
359 struct net_device *dev = qd->dev; 341 struct net_device *dev = qd->dev;
360 struct ieee80211_local *local; 342 struct ieee80211_local *local;
361 int queues; 343 struct ieee80211_hw *hw;
362 int err = 0, i; 344 int err = 0, i;
363 345
364 /* check that device is a mac80211 device */ 346 /* check that device is a mac80211 device */
@@ -366,29 +348,26 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
366 dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) 348 dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
367 return -EINVAL; 349 return -EINVAL;
368 350
369 /* check this device is an ieee80211 master type device */ 351 local = wdev_priv(dev->ieee80211_ptr);
370 if (dev->type != ARPHRD_IEEE80211) 352 hw = &local->hw;
353
354 /* only allow on master dev */
355 if (dev != local->mdev)
371 return -EINVAL; 356 return -EINVAL;
372 357
373 /* check that there is no qdisc currently attached to device 358 /* ensure that we are root qdisc */
374 * this ensures that we will be the root qdisc. (I can't find a better 359 if (qd->parent != TC_H_ROOT)
375 * way to test this explicitly) */
376 if (dev->qdisc_sleeping != &noop_qdisc)
377 return -EINVAL; 360 return -EINVAL;
378 361
379 if (qd->flags & TCQ_F_INGRESS) 362 if (qd->flags & TCQ_F_INGRESS)
380 return -EINVAL; 363 return -EINVAL;
381 364
382 local = wdev_priv(dev->ieee80211_ptr);
383 queues = local->hw.queues;
384
385 /* if options were passed in, set them */ 365 /* if options were passed in, set them */
386 if (opt) { 366 if (opt)
387 err = wme_qdiscop_tune(qd, opt); 367 err = wme_qdiscop_tune(qd, opt);
388 }
389 368
390 /* create child queues */ 369 /* create child queues */
391 for (i = 0; i < queues; i++) { 370 for (i = 0; i < QD_NUM(hw); i++) {
392 skb_queue_head_init(&q->requeued[i]); 371 skb_queue_head_init(&q->requeued[i]);
393 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops, 372 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
394 qd->handle); 373 qd->handle);
@@ -399,8 +378,8 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
399 } 378 }
400 } 379 }
401 380
402 /* reserve all legacy QoS queues */ 381 /* non-aggregation queues: reserve/mark as used */
403 for (i = 0; i < min(IEEE80211_TX_QUEUE_DATA4, queues); i++) 382 for (i = 0; i < local->hw.queues; i++)
404 set_bit(i, q->qdisc_pool); 383 set_bit(i, q->qdisc_pool);
405 384
406 return err; 385 return err;
@@ -408,16 +387,6 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
408 387
409static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb) 388static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
410{ 389{
411/* struct ieee80211_sched_data *q = qdisc_priv(qd);
412 unsigned char *p = skb->tail;
413 struct tc_ieee80211_qopt opt;
414
415 memcpy(&opt.tag2queue, q->tag2queue, TC_80211_MAX_TAG + 1);
416 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
417*/ return skb->len;
418/*
419nla_put_failure:
420 skb_trim(skb, p - skb->data);*/
421 return -1; 390 return -1;
422} 391}
423 392
@@ -430,7 +399,7 @@ static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
430 struct ieee80211_hw *hw = &local->hw; 399 struct ieee80211_hw *hw = &local->hw;
431 unsigned long queue = arg - 1; 400 unsigned long queue = arg - 1;
432 401
433 if (queue >= hw->queues) 402 if (queue >= QD_NUM(hw))
434 return -EINVAL; 403 return -EINVAL;
435 404
436 if (!new) 405 if (!new)
@@ -454,7 +423,7 @@ wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
454 struct ieee80211_hw *hw = &local->hw; 423 struct ieee80211_hw *hw = &local->hw;
455 unsigned long queue = arg - 1; 424 unsigned long queue = arg - 1;
456 425
457 if (queue >= hw->queues) 426 if (queue >= QD_NUM(hw))
458 return NULL; 427 return NULL;
459 428
460 return q->queues[queue]; 429 return q->queues[queue];
@@ -467,7 +436,7 @@ static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
467 struct ieee80211_hw *hw = &local->hw; 436 struct ieee80211_hw *hw = &local->hw;
468 unsigned long queue = TC_H_MIN(classid); 437 unsigned long queue = TC_H_MIN(classid);
469 438
470 if (queue - 1 >= hw->queues) 439 if (queue - 1 >= QD_NUM(hw))
471 return 0; 440 return 0;
472 441
473 return queue; 442 return queue;
@@ -493,7 +462,7 @@ static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
493 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); 462 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
494 struct ieee80211_hw *hw = &local->hw; 463 struct ieee80211_hw *hw = &local->hw;
495 464
496 if (cl - 1 > hw->queues) 465 if (cl - 1 > QD_NUM(hw))
497 return -ENOENT; 466 return -ENOENT;
498 467
499 /* TODO: put code to program hardware queue parameters here, 468 /* TODO: put code to program hardware queue parameters here,
@@ -510,7 +479,7 @@ static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
510 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); 479 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
511 struct ieee80211_hw *hw = &local->hw; 480 struct ieee80211_hw *hw = &local->hw;
512 481
513 if (cl - 1 > hw->queues) 482 if (cl - 1 > QD_NUM(hw))
514 return -ENOENT; 483 return -ENOENT;
515 return 0; 484 return 0;
516} 485}
@@ -523,7 +492,7 @@ static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
523 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); 492 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
524 struct ieee80211_hw *hw = &local->hw; 493 struct ieee80211_hw *hw = &local->hw;
525 494
526 if (cl - 1 > hw->queues) 495 if (cl - 1 > QD_NUM(hw))
527 return -ENOENT; 496 return -ENOENT;
528 tcm->tcm_handle = TC_H_MIN(cl); 497 tcm->tcm_handle = TC_H_MIN(cl);
529 tcm->tcm_parent = qd->handle; 498 tcm->tcm_parent = qd->handle;
@@ -541,7 +510,7 @@ static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
541 if (arg->stop) 510 if (arg->stop)
542 return; 511 return;
543 512
544 for (queue = 0; queue < hw->queues; queue++) { 513 for (queue = 0; queue < QD_NUM(hw); queue++) {
545 if (arg->count < arg->skip) { 514 if (arg->count < arg->skip) {
546 arg->count++; 515 arg->count++;
547 continue; 516 continue;
@@ -658,10 +627,13 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
658 DECLARE_MAC_BUF(mac); 627 DECLARE_MAC_BUF(mac);
659 628
660 /* prepare the filter and save it for the SW queue 629 /* prepare the filter and save it for the SW queue
661 * matching the recieved HW queue */ 630 * matching the received HW queue */
631
632 if (!local->hw.ampdu_queues)
633 return -EPERM;
662 634
663 /* try to get a Qdisc from the pool */ 635 /* try to get a Qdisc from the pool */
664 for (i = IEEE80211_TX_QUEUE_BEACON; i < local->hw.queues; i++) 636 for (i = local->hw.queues; i < QD_NUM(&local->hw); i++)
665 if (!test_and_set_bit(i, q->qdisc_pool)) { 637 if (!test_and_set_bit(i, q->qdisc_pool)) {
666 ieee80211_stop_queue(local_to_hw(local), i); 638 ieee80211_stop_queue(local_to_hw(local), i);
667 sta->tid_to_tx_q[tid] = i; 639 sta->tid_to_tx_q[tid] = i;
@@ -690,13 +662,14 @@ void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
690 struct sta_info *sta, u16 tid, 662 struct sta_info *sta, u16 tid,
691 u8 requeue) 663 u8 requeue)
692{ 664{
665 struct ieee80211_hw *hw = &local->hw;
693 struct ieee80211_sched_data *q = 666 struct ieee80211_sched_data *q =
694 qdisc_priv(local->mdev->qdisc_sleeping); 667 qdisc_priv(local->mdev->qdisc_sleeping);
695 int agg_queue = sta->tid_to_tx_q[tid]; 668 int agg_queue = sta->tid_to_tx_q[tid];
696 669
697 /* return the qdisc to the pool */ 670 /* return the qdisc to the pool */
698 clear_bit(agg_queue, q->qdisc_pool); 671 clear_bit(agg_queue, q->qdisc_pool);
699 sta->tid_to_tx_q[tid] = local->hw.queues; 672 sta->tid_to_tx_q[tid] = QD_NUM(hw);
700 673
701 if (requeue) 674 if (requeue)
702 ieee80211_requeue(local, agg_queue); 675 ieee80211_requeue(local, agg_queue);
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index fcc6b05508cc..bbdb53344817 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -31,7 +31,7 @@ static inline int WLAN_FC_IS_QOS_DATA(u16 fc)
31 return (fc & 0x8C) == 0x88; 31 return (fc & 0x8C) == 0x88;
32} 32}
33 33
34#ifdef CONFIG_NET_SCHED 34#ifdef CONFIG_MAC80211_QOS
35void ieee80211_install_qdisc(struct net_device *dev); 35void ieee80211_install_qdisc(struct net_device *dev);
36int ieee80211_qdisc_installed(struct net_device *dev); 36int ieee80211_qdisc_installed(struct net_device *dev);
37int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, 37int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 45709ada8fee..9f6fd20374e1 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -79,6 +79,7 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
79 struct sk_buff *skb = tx->skb; 79 struct sk_buff *skb = tx->skb;
80 int authenticator; 80 int authenticator;
81 int wpa_test = 0; 81 int wpa_test = 0;
82 int tail;
82 83
83 fc = tx->fc; 84 fc = tx->fc;
84 85
@@ -98,16 +99,13 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
98 return TX_CONTINUE; 99 return TX_CONTINUE;
99 } 100 }
100 101
101 if (skb_tailroom(skb) < MICHAEL_MIC_LEN) { 102 tail = MICHAEL_MIC_LEN;
102 I802_DEBUG_INC(tx->local->tx_expand_skb_head); 103 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
103 if (unlikely(pskb_expand_head(skb, TKIP_IV_LEN, 104 tail += TKIP_ICV_LEN;
104 MICHAEL_MIC_LEN + TKIP_ICV_LEN, 105
105 GFP_ATOMIC))) { 106 if (WARN_ON(skb_tailroom(skb) < tail ||
106 printk(KERN_DEBUG "%s: failed to allocate more memory " 107 skb_headroom(skb) < TKIP_IV_LEN))
107 "for Michael MIC\n", tx->dev->name); 108 return TX_DROP;
108 return TX_DROP;
109 }
110 }
111 109
112#if 0 110#if 0
113 authenticator = fc & IEEE80211_FCTL_FROMDS; /* FIX */ 111 authenticator = fc & IEEE80211_FCTL_FROMDS; /* FIX */
@@ -176,59 +174,65 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
176 skb_trim(skb, skb->len - MICHAEL_MIC_LEN); 174 skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
177 175
178 /* update IV in key information to be able to detect replays */ 176 /* update IV in key information to be able to detect replays */
179 rx->key->u.tkip.iv32_rx[rx->queue] = rx->tkip_iv32; 177 rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32;
180 rx->key->u.tkip.iv16_rx[rx->queue] = rx->tkip_iv16; 178 rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16;
181 179
182 return RX_CONTINUE; 180 return RX_CONTINUE;
183} 181}
184 182
185 183
186static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, 184static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
187 struct sk_buff *skb, int test)
188{ 185{
189 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 186 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
190 struct ieee80211_key *key = tx->key; 187 struct ieee80211_key *key = tx->key;
191 int hdrlen, len, tailneed; 188 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
189 int hdrlen, len, tail;
192 u16 fc; 190 u16 fc;
193 u8 *pos; 191 u8 *pos;
194 192
193 info->control.icv_len = TKIP_ICV_LEN;
194 info->control.iv_len = TKIP_IV_LEN;
195
196 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
197 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
198 /* hwaccel - with no need for preallocated room for IV/ICV */
199 info->control.hw_key = &tx->key->conf;
200 return 0;
201 }
202
195 fc = le16_to_cpu(hdr->frame_control); 203 fc = le16_to_cpu(hdr->frame_control);
196 hdrlen = ieee80211_get_hdrlen(fc); 204 hdrlen = ieee80211_get_hdrlen(fc);
197 len = skb->len - hdrlen; 205 len = skb->len - hdrlen;
198 206
199 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) 207 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
200 tailneed = 0; 208 tail = 0;
201 else 209 else
202 tailneed = TKIP_ICV_LEN; 210 tail = TKIP_ICV_LEN;
203 211
204 if ((skb_headroom(skb) < TKIP_IV_LEN || 212 if (WARN_ON(skb_tailroom(skb) < tail ||
205 skb_tailroom(skb) < tailneed)) { 213 skb_headroom(skb) < TKIP_IV_LEN))
206 I802_DEBUG_INC(tx->local->tx_expand_skb_head); 214 return -1;
207 if (unlikely(pskb_expand_head(skb, TKIP_IV_LEN, tailneed,
208 GFP_ATOMIC)))
209 return -1;
210 }
211 215
212 pos = skb_push(skb, TKIP_IV_LEN); 216 pos = skb_push(skb, TKIP_IV_LEN);
213 memmove(pos, pos + TKIP_IV_LEN, hdrlen); 217 memmove(pos, pos + TKIP_IV_LEN, hdrlen);
214 pos += hdrlen; 218 pos += hdrlen;
215 219
216 /* Increase IV for the frame */ 220 /* Increase IV for the frame */
217 key->u.tkip.iv16++; 221 key->u.tkip.tx.iv16++;
218 if (key->u.tkip.iv16 == 0) 222 if (key->u.tkip.tx.iv16 == 0)
219 key->u.tkip.iv32++; 223 key->u.tkip.tx.iv32++;
220 224
221 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 225 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
222 hdr = (struct ieee80211_hdr *)skb->data; 226 hdr = (struct ieee80211_hdr *)skb->data;
223 227
224 /* hwaccel - with preallocated room for IV */ 228 /* hwaccel - with preallocated room for IV */
225 ieee80211_tkip_add_iv(pos, key, 229 ieee80211_tkip_add_iv(pos, key,
226 (u8) (key->u.tkip.iv16 >> 8), 230 (u8) (key->u.tkip.tx.iv16 >> 8),
227 (u8) (((key->u.tkip.iv16 >> 8) | 0x20) & 231 (u8) (((key->u.tkip.tx.iv16 >> 8) | 0x20) &
228 0x7f), 232 0x7f),
229 (u8) key->u.tkip.iv16); 233 (u8) key->u.tkip.tx.iv16);
230 234
231 tx->control->key_idx = tx->key->conf.hw_key_idx; 235 info->control.hw_key = &tx->key->conf;
232 return 0; 236 return 0;
233 } 237 }
234 238
@@ -246,28 +250,16 @@ ieee80211_tx_result
246ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) 250ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx)
247{ 251{
248 struct sk_buff *skb = tx->skb; 252 struct sk_buff *skb = tx->skb;
249 int wpa_test = 0, test = 0;
250 253
251 tx->control->icv_len = TKIP_ICV_LEN;
252 tx->control->iv_len = TKIP_IV_LEN;
253 ieee80211_tx_set_protected(tx); 254 ieee80211_tx_set_protected(tx);
254 255
255 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 256 if (tkip_encrypt_skb(tx, skb) < 0)
256 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
257 !wpa_test) {
258 /* hwaccel - with no need for preallocated room for IV/ICV */
259 tx->control->key_idx = tx->key->conf.hw_key_idx;
260 return TX_CONTINUE;
261 }
262
263 if (tkip_encrypt_skb(tx, skb, test) < 0)
264 return TX_DROP; 257 return TX_DROP;
265 258
266 if (tx->extra_frag) { 259 if (tx->extra_frag) {
267 int i; 260 int i;
268 for (i = 0; i < tx->num_extra_frag; i++) { 261 for (i = 0; i < tx->num_extra_frag; i++) {
269 if (tkip_encrypt_skb(tx, tx->extra_frag[i], test) 262 if (tkip_encrypt_skb(tx, tx->extra_frag[i]) < 0)
270 < 0)
271 return TX_DROP; 263 return TX_DROP;
272 } 264 }
273 } 265 }
@@ -429,16 +421,27 @@ static inline int ccmp_hdr2pn(u8 *pn, u8 *hdr)
429} 421}
430 422
431 423
432static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, 424static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
433 struct sk_buff *skb, int test)
434{ 425{
435 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 426 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
436 struct ieee80211_key *key = tx->key; 427 struct ieee80211_key *key = tx->key;
437 int hdrlen, len, tailneed; 428 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
429 int hdrlen, len, tail;
438 u16 fc; 430 u16 fc;
439 u8 *pos, *pn, *b_0, *aad, *scratch; 431 u8 *pos, *pn, *b_0, *aad, *scratch;
440 int i; 432 int i;
441 433
434 info->control.icv_len = CCMP_MIC_LEN;
435 info->control.iv_len = CCMP_HDR_LEN;
436
437 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
438 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
439 /* hwaccel - with no need for preallocated room for CCMP "
440 * header or MIC fields */
441 info->control.hw_key = &tx->key->conf;
442 return 0;
443 }
444
442 scratch = key->u.ccmp.tx_crypto_buf; 445 scratch = key->u.ccmp.tx_crypto_buf;
443 b_0 = scratch + 3 * AES_BLOCK_LEN; 446 b_0 = scratch + 3 * AES_BLOCK_LEN;
444 aad = scratch + 4 * AES_BLOCK_LEN; 447 aad = scratch + 4 * AES_BLOCK_LEN;
@@ -448,17 +451,13 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx,
448 len = skb->len - hdrlen; 451 len = skb->len - hdrlen;
449 452
450 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) 453 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
451 tailneed = 0; 454 tail = 0;
452 else 455 else
453 tailneed = CCMP_MIC_LEN; 456 tail = CCMP_MIC_LEN;
454 457
455 if ((skb_headroom(skb) < CCMP_HDR_LEN || 458 if (WARN_ON(skb_tailroom(skb) < tail ||
456 skb_tailroom(skb) < tailneed)) { 459 skb_headroom(skb) < CCMP_HDR_LEN))
457 I802_DEBUG_INC(tx->local->tx_expand_skb_head); 460 return -1;
458 if (unlikely(pskb_expand_head(skb, CCMP_HDR_LEN, tailneed,
459 GFP_ATOMIC)))
460 return -1;
461 }
462 461
463 pos = skb_push(skb, CCMP_HDR_LEN); 462 pos = skb_push(skb, CCMP_HDR_LEN);
464 memmove(pos, pos + CCMP_HDR_LEN, hdrlen); 463 memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
@@ -478,7 +477,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx,
478 477
479 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 478 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
480 /* hwaccel - with preallocated room for CCMP header */ 479 /* hwaccel - with preallocated room for CCMP header */
481 tx->control->key_idx = key->conf.hw_key_idx; 480 info->control.hw_key = &tx->key->conf;
482 return 0; 481 return 0;
483 } 482 }
484 483
@@ -495,28 +494,16 @@ ieee80211_tx_result
495ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) 494ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
496{ 495{
497 struct sk_buff *skb = tx->skb; 496 struct sk_buff *skb = tx->skb;
498 int test = 0;
499 497
500 tx->control->icv_len = CCMP_MIC_LEN;
501 tx->control->iv_len = CCMP_HDR_LEN;
502 ieee80211_tx_set_protected(tx); 498 ieee80211_tx_set_protected(tx);
503 499
504 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 500 if (ccmp_encrypt_skb(tx, skb) < 0)
505 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
506 /* hwaccel - with no need for preallocated room for CCMP "
507 * header or MIC fields */
508 tx->control->key_idx = tx->key->conf.hw_key_idx;
509 return TX_CONTINUE;
510 }
511
512 if (ccmp_encrypt_skb(tx, skb, test) < 0)
513 return TX_DROP; 501 return TX_DROP;
514 502
515 if (tx->extra_frag) { 503 if (tx->extra_frag) {
516 int i; 504 int i;
517 for (i = 0; i < tx->num_extra_frag; i++) { 505 for (i = 0; i < tx->num_extra_frag; i++) {
518 if (ccmp_encrypt_skb(tx, tx->extra_frag[i], test) 506 if (ccmp_encrypt_skb(tx, tx->extra_frag[i]) < 0)
519 < 0)
520 return TX_DROP; 507 return TX_DROP;
521 } 508 }
522 } 509 }
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 662c1ccfee26..f27c99246a4c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -847,6 +847,25 @@ acct:
847} 847}
848EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); 848EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
849 849
850void __nf_ct_kill_acct(struct nf_conn *ct,
851 enum ip_conntrack_info ctinfo,
852 const struct sk_buff *skb,
853 int do_acct)
854{
855#ifdef CONFIG_NF_CT_ACCT
856 if (do_acct) {
857 spin_lock_bh(&nf_conntrack_lock);
858 ct->counters[CTINFO2DIR(ctinfo)].packets++;
859 ct->counters[CTINFO2DIR(ctinfo)].bytes +=
860 skb->len - skb_network_offset(skb);
861 spin_unlock_bh(&nf_conntrack_lock);
862 }
863#endif
864 if (del_timer(&ct->timeout))
865 ct->timeout.function((unsigned long)ct);
866}
867EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
868
850#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 869#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
851 870
852#include <linux/netfilter/nfnetlink.h> 871#include <linux/netfilter/nfnetlink.h>
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index bcc19fa4ed1e..ba1c4915e9eb 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -88,13 +88,11 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
88 newlen = newoff + t->len; 88 newlen = newoff + t->len;
89 rcu_read_unlock(); 89 rcu_read_unlock();
90 90
91 if (newlen >= ksize(ct->ext)) { 91 new = krealloc(ct->ext, newlen, gfp);
92 new = kmalloc(newlen, gfp); 92 if (!new)
93 if (!new) 93 return NULL;
94 return NULL;
95
96 memcpy(new, ct->ext, ct->ext->len);
97 94
95 if (new != ct->ext) {
98 for (i = 0; i < NF_CT_EXT_NUM; i++) { 96 for (i = 0; i < NF_CT_EXT_NUM; i++) {
99 if (!nf_ct_ext_exist(ct, i)) 97 if (!nf_ct_ext_exist(ct, i))
100 continue; 98 continue;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0edefcfc5949..63c4e1f299b8 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -4,7 +4,7 @@
4 * (C) 2001 by Jay Schulist <jschlst@samba.org> 4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> 5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net> 6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2007 by Pablo Neira Ayuso <pablo@netfilter.org> 7 * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org>
8 * 8 *
9 * Initial connection tracking via netlink development funded and 9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
@@ -475,14 +475,14 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
475 if (ctnetlink_dump_id(skb, ct) < 0) 475 if (ctnetlink_dump_id(skb, ct) < 0)
476 goto nla_put_failure; 476 goto nla_put_failure;
477 477
478 if (ctnetlink_dump_status(skb, ct) < 0)
479 goto nla_put_failure;
480
478 if (events & IPCT_DESTROY) { 481 if (events & IPCT_DESTROY) {
479 if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || 482 if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
480 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0) 483 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
481 goto nla_put_failure; 484 goto nla_put_failure;
482 } else { 485 } else {
483 if (ctnetlink_dump_status(skb, ct) < 0)
484 goto nla_put_failure;
485
486 if (ctnetlink_dump_timeout(skb, ct) < 0) 486 if (ctnetlink_dump_timeout(skb, ct) < 0)
487 goto nla_put_failure; 487 goto nla_put_failure;
488 488
@@ -812,9 +812,8 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
812 return -ENOENT; 812 return -ENOENT;
813 } 813 }
814 } 814 }
815 if (del_timer(&ct->timeout))
816 ct->timeout.function((unsigned long)ct);
817 815
816 nf_ct_kill(ct);
818 nf_ct_put(ct); 817 nf_ct_put(ct);
819 818
820 return 0; 819 return 0;
@@ -891,20 +890,19 @@ ctnetlink_change_status(struct nf_conn *ct, struct nlattr *cda[])
891 890
892 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) 891 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
893 /* unchangeable */ 892 /* unchangeable */
894 return -EINVAL; 893 return -EBUSY;
895 894
896 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) 895 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
897 /* SEEN_REPLY bit can only be set */ 896 /* SEEN_REPLY bit can only be set */
898 return -EINVAL; 897 return -EBUSY;
899
900 898
901 if (d & IPS_ASSURED && !(status & IPS_ASSURED)) 899 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
902 /* ASSURED bit can only be set */ 900 /* ASSURED bit can only be set */
903 return -EINVAL; 901 return -EBUSY;
904 902
905 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { 903 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
906#ifndef CONFIG_NF_NAT_NEEDED 904#ifndef CONFIG_NF_NAT_NEEDED
907 return -EINVAL; 905 return -EOPNOTSUPP;
908#else 906#else
909 struct nf_nat_range range; 907 struct nf_nat_range range;
910 908
@@ -945,7 +943,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
945 943
946 /* don't change helper of sibling connections */ 944 /* don't change helper of sibling connections */
947 if (ct->master) 945 if (ct->master)
948 return -EINVAL; 946 return -EBUSY;
949 947
950 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); 948 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
951 if (err < 0) 949 if (err < 0)
@@ -963,7 +961,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
963 961
964 helper = __nf_conntrack_helper_find_byname(helpname); 962 helper = __nf_conntrack_helper_find_byname(helpname);
965 if (helper == NULL) 963 if (helper == NULL)
966 return -EINVAL; 964 return -EOPNOTSUPP;
967 965
968 if (help) { 966 if (help) {
969 if (help->helper == helper) 967 if (help->helper == helper)
@@ -1258,12 +1256,12 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1258 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { 1256 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1259 /* we only allow nat config for new conntracks */ 1257 /* we only allow nat config for new conntracks */
1260 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { 1258 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
1261 err = -EINVAL; 1259 err = -EOPNOTSUPP;
1262 goto out_unlock; 1260 goto out_unlock;
1263 } 1261 }
1264 /* can't link an existing conntrack to a master */ 1262 /* can't link an existing conntrack to a master */
1265 if (cda[CTA_TUPLE_MASTER]) { 1263 if (cda[CTA_TUPLE_MASTER]) {
1266 err = -EINVAL; 1264 err = -EOPNOTSUPP;
1267 goto out_unlock; 1265 goto out_unlock;
1268 } 1266 }
1269 err = ctnetlink_change_conntrack(nf_ct_tuplehash_to_ctrack(h), 1267 err = ctnetlink_change_conntrack(nf_ct_tuplehash_to_ctrack(h),
@@ -1608,7 +1606,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1608 h = __nf_conntrack_helper_find_byname(name); 1606 h = __nf_conntrack_helper_find_byname(name);
1609 if (!h) { 1607 if (!h) {
1610 spin_unlock_bh(&nf_conntrack_lock); 1608 spin_unlock_bh(&nf_conntrack_lock);
1611 return -EINVAL; 1609 return -EOPNOTSUPP;
1612 } 1610 }
1613 for (i = 0; i < nf_ct_expect_hsize; i++) { 1611 for (i = 0; i < nf_ct_expect_hsize; i++) {
1614 hlist_for_each_entry_safe(exp, n, next, 1612 hlist_for_each_entry_safe(exp, n, next,
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index afb4a1861d2c..e7866dd3cde6 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -475,8 +475,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
475 if (type == DCCP_PKT_RESET && 475 if (type == DCCP_PKT_RESET &&
476 !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 476 !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
477 /* Tear down connection immediately if only reply is a RESET */ 477 /* Tear down connection immediately if only reply is a RESET */
478 if (del_timer(&ct->timeout)) 478 nf_ct_kill_acct(ct, ctinfo, skb);
479 ct->timeout.function((unsigned long)ct);
480 return NF_ACCEPT; 479 return NF_ACCEPT;
481 } 480 }
482 481
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index cbf2e27a22b2..41183a4d2d62 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -463,6 +463,82 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
463 return true; 463 return true;
464} 464}
465 465
466#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
467
468#include <linux/netfilter/nfnetlink.h>
469#include <linux/netfilter/nfnetlink_conntrack.h>
470
471static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
472 const struct nf_conn *ct)
473{
474 struct nlattr *nest_parms;
475
476 read_lock_bh(&sctp_lock);
477 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_SCTP | NLA_F_NESTED);
478 if (!nest_parms)
479 goto nla_put_failure;
480
481 NLA_PUT_U8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state);
482
483 NLA_PUT_BE32(skb,
484 CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
485 htonl(ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]));
486
487 NLA_PUT_BE32(skb,
488 CTA_PROTOINFO_SCTP_VTAG_REPLY,
489 htonl(ct->proto.sctp.vtag[IP_CT_DIR_REPLY]));
490
491 read_unlock_bh(&sctp_lock);
492
493 nla_nest_end(skb, nest_parms);
494
495 return 0;
496
497nla_put_failure:
498 read_unlock_bh(&sctp_lock);
499 return -1;
500}
501
502static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
503 [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
504 [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
505 [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
506};
507
508static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
509{
510 struct nlattr *attr = cda[CTA_PROTOINFO_SCTP];
511 struct nlattr *tb[CTA_PROTOINFO_SCTP_MAX+1];
512 int err;
513
514 /* updates may not contain the internal protocol info, skip parsing */
515 if (!attr)
516 return 0;
517
518 err = nla_parse_nested(tb,
519 CTA_PROTOINFO_SCTP_MAX,
520 attr,
521 sctp_nla_policy);
522 if (err < 0)
523 return err;
524
525 if (!tb[CTA_PROTOINFO_SCTP_STATE] ||
526 !tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] ||
527 !tb[CTA_PROTOINFO_SCTP_VTAG_REPLY])
528 return -EINVAL;
529
530 write_lock_bh(&sctp_lock);
531 ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]);
532 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] =
533 ntohl(nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]));
534 ct->proto.sctp.vtag[IP_CT_DIR_REPLY] =
535 ntohl(nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]));
536 write_unlock_bh(&sctp_lock);
537
538 return 0;
539}
540#endif
541
466#ifdef CONFIG_SYSCTL 542#ifdef CONFIG_SYSCTL
467static unsigned int sctp_sysctl_table_users; 543static unsigned int sctp_sysctl_table_users;
468static struct ctl_table_header *sctp_sysctl_header; 544static struct ctl_table_header *sctp_sysctl_header;
@@ -591,6 +667,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
591 .new = sctp_new, 667 .new = sctp_new,
592 .me = THIS_MODULE, 668 .me = THIS_MODULE,
593#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 669#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
670 .to_nlattr = sctp_to_nlattr,
671 .from_nlattr = nlattr_to_sctp,
594 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 672 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
595 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 673 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
596 .nla_policy = nf_ct_port_nla_policy, 674 .nla_policy = nf_ct_port_nla_policy,
@@ -617,6 +695,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
617 .new = sctp_new, 695 .new = sctp_new,
618 .me = THIS_MODULE, 696 .me = THIS_MODULE,
619#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 697#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
698 .to_nlattr = sctp_to_nlattr,
699 .from_nlattr = nlattr_to_sctp,
620 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 700 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
621 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 701 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
622 .nla_policy = nf_ct_port_nla_policy, 702 .nla_policy = nf_ct_port_nla_policy,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index ba94004fe323..8db13fba10bc 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -843,8 +843,7 @@ static int tcp_packet(struct nf_conn *ct,
843 /* Attempt to reopen a closed/aborted connection. 843 /* Attempt to reopen a closed/aborted connection.
844 * Delete this connection and look up again. */ 844 * Delete this connection and look up again. */
845 write_unlock_bh(&tcp_lock); 845 write_unlock_bh(&tcp_lock);
846 if (del_timer(&ct->timeout)) 846 nf_ct_kill(ct);
847 ct->timeout.function((unsigned long)ct);
848 return -NF_REPEAT; 847 return -NF_REPEAT;
849 } 848 }
850 /* Fall through */ 849 /* Fall through */
@@ -877,8 +876,7 @@ static int tcp_packet(struct nf_conn *ct,
877 if (LOG_INVALID(IPPROTO_TCP)) 876 if (LOG_INVALID(IPPROTO_TCP))
878 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 877 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
879 "nf_ct_tcp: killing out of sync session "); 878 "nf_ct_tcp: killing out of sync session ");
880 if (del_timer(&ct->timeout)) 879 nf_ct_kill(ct);
881 ct->timeout.function((unsigned long)ct);
882 return -NF_DROP; 880 return -NF_DROP;
883 } 881 }
884 ct->proto.tcp.last_index = index; 882 ct->proto.tcp.last_index = index;
@@ -961,8 +959,7 @@ static int tcp_packet(struct nf_conn *ct,
961 problem case, so we can delete the conntrack 959 problem case, so we can delete the conntrack
962 immediately. --RR */ 960 immediately. --RR */
963 if (th->rst) { 961 if (th->rst) {
964 if (del_timer(&ct->timeout)) 962 nf_ct_kill_acct(ct, ctinfo, skb);
965 ct->timeout.function((unsigned long)ct);
966 return NF_ACCEPT; 963 return NF_ACCEPT;
967 } 964 }
968 } else if (!test_bit(IPS_ASSURED_BIT, &ct->status) 965 } else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 3447025ce068..04e9c965f8ca 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -243,7 +243,6 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
243 switch ((enum nfqnl_config_mode)queue->copy_mode) { 243 switch ((enum nfqnl_config_mode)queue->copy_mode) {
244 case NFQNL_COPY_META: 244 case NFQNL_COPY_META:
245 case NFQNL_COPY_NONE: 245 case NFQNL_COPY_NONE:
246 data_len = 0;
247 break; 246 break;
248 247
249 case NFQNL_COPY_PACKET: 248 case NFQNL_COPY_PACKET:
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c
index 211189eb2b67..76ca1f2421eb 100644
--- a/net/netfilter/xt_CONNSECMARK.c
+++ b/net/netfilter/xt_CONNSECMARK.c
@@ -8,7 +8,7 @@
8 * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> 8 * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
9 * by Henrik Nordstrom <hno@marasystems.com> 9 * by Henrik Nordstrom <hno@marasystems.com>
10 * 10 *
11 * (C) 2006 Red Hat, Inc., James Morris <jmorris@redhat.com> 11 * (C) 2006,2008 Red Hat, Inc., James Morris <jmorris@redhat.com>
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as 14 * it under the terms of the GNU General Public License version 2 as
@@ -94,6 +94,12 @@ connsecmark_tg_check(const char *tablename, const void *entry,
94{ 94{
95 const struct xt_connsecmark_target_info *info = targinfo; 95 const struct xt_connsecmark_target_info *info = targinfo;
96 96
97 if (strcmp(tablename, "mangle") && strcmp(tablename, "security")) {
98 printk(KERN_INFO PFX "target only valid in the \'mangle\' "
99 "or \'security\' tables, not \'%s\'.\n", tablename);
100 return false;
101 }
102
97 switch (info->mode) { 103 switch (info->mode) {
98 case CONNSECMARK_SAVE: 104 case CONNSECMARK_SAVE:
99 case CONNSECMARK_RESTORE: 105 case CONNSECMARK_RESTORE:
@@ -126,7 +132,6 @@ static struct xt_target connsecmark_tg_reg[] __read_mostly = {
126 .destroy = connsecmark_tg_destroy, 132 .destroy = connsecmark_tg_destroy,
127 .target = connsecmark_tg, 133 .target = connsecmark_tg,
128 .targetsize = sizeof(struct xt_connsecmark_target_info), 134 .targetsize = sizeof(struct xt_connsecmark_target_info),
129 .table = "mangle",
130 .me = THIS_MODULE, 135 .me = THIS_MODULE,
131 }, 136 },
132 { 137 {
@@ -136,7 +141,6 @@ static struct xt_target connsecmark_tg_reg[] __read_mostly = {
136 .destroy = connsecmark_tg_destroy, 141 .destroy = connsecmark_tg_destroy,
137 .target = connsecmark_tg, 142 .target = connsecmark_tg,
138 .targetsize = sizeof(struct xt_connsecmark_target_info), 143 .targetsize = sizeof(struct xt_connsecmark_target_info),
139 .table = "mangle",
140 .me = THIS_MODULE, 144 .me = THIS_MODULE,
141 }, 145 },
142}; 146};
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index c0284856ccd4..94f87ee7552b 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -5,7 +5,7 @@
5 * Based on the nfmark match by: 5 * Based on the nfmark match by:
6 * (C) 1999-2001 Marc Boucher <marc@mbsi.ca> 6 * (C) 1999-2001 Marc Boucher <marc@mbsi.ca>
7 * 7 *
8 * (C) 2006 Red Hat, Inc., James Morris <jmorris@redhat.com> 8 * (C) 2006,2008 Red Hat, Inc., James Morris <jmorris@redhat.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
@@ -89,6 +89,12 @@ secmark_tg_check(const char *tablename, const void *entry,
89{ 89{
90 struct xt_secmark_target_info *info = targinfo; 90 struct xt_secmark_target_info *info = targinfo;
91 91
92 if (strcmp(tablename, "mangle") && strcmp(tablename, "security")) {
93 printk(KERN_INFO PFX "target only valid in the \'mangle\' "
94 "or \'security\' tables, not \'%s\'.\n", tablename);
95 return false;
96 }
97
92 if (mode && mode != info->mode) { 98 if (mode && mode != info->mode) {
93 printk(KERN_INFO PFX "mode already set to %hu cannot mix with " 99 printk(KERN_INFO PFX "mode already set to %hu cannot mix with "
94 "rules for mode %hu\n", mode, info->mode); 100 "rules for mode %hu\n", mode, info->mode);
@@ -127,7 +133,6 @@ static struct xt_target secmark_tg_reg[] __read_mostly = {
127 .destroy = secmark_tg_destroy, 133 .destroy = secmark_tg_destroy,
128 .target = secmark_tg, 134 .target = secmark_tg,
129 .targetsize = sizeof(struct xt_secmark_target_info), 135 .targetsize = sizeof(struct xt_secmark_target_info),
130 .table = "mangle",
131 .me = THIS_MODULE, 136 .me = THIS_MODULE,
132 }, 137 },
133 { 138 {
@@ -137,7 +142,6 @@ static struct xt_target secmark_tg_reg[] __read_mostly = {
137 .destroy = secmark_tg_destroy, 142 .destroy = secmark_tg_destroy,
138 .target = secmark_tg, 143 .target = secmark_tg,
139 .targetsize = sizeof(struct xt_secmark_target_info), 144 .targetsize = sizeof(struct xt_secmark_target_info),
140 .table = "mangle",
141 .me = THIS_MODULE, 145 .me = THIS_MODULE,
142 }, 146 },
143}; 147};
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 9b97f8006c9c..6507c02dbe0d 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -759,7 +759,7 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
759 * 0: continue 759 * 0: continue
760 * 1: repeat lookup - reference dropped while waiting for socket memory. 760 * 1: repeat lookup - reference dropped while waiting for socket memory.
761 */ 761 */
762int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, 762int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
763 long *timeo, struct sock *ssk) 763 long *timeo, struct sock *ssk)
764{ 764{
765 struct netlink_sock *nlk; 765 struct netlink_sock *nlk;
@@ -892,7 +892,7 @@ retry:
892 return err; 892 return err;
893 } 893 }
894 894
895 err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk); 895 err = netlink_attachskb(sk, skb, &timeo, ssk);
896 if (err == 1) 896 if (err == 1)
897 goto retry; 897 goto retry;
898 if (err) 898 if (err)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2cee87da4441..beca6402f1cf 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * PACKET - implements raw packet sockets. 6 * PACKET - implements raw packet sockets.
7 * 7 *
8 * Version: $Id: af_packet.c,v 1.61 2002/02/08 03:57:19 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org> 10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 5bc1ed490180..213071859030 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -24,8 +24,6 @@
24 * Jiri Fojtasek 24 * Jiri Fojtasek
25 * fixed requeue routine 25 * fixed requeue routine
26 * and many others. thanks. 26 * and many others. thanks.
27 *
28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29 */ 27 */
30#include <linux/module.h> 28#include <linux/module.h>
31#include <linux/types.h> 29#include <linux/types.h>
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 532634861db1..d5cc731b6798 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -136,6 +136,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
136 136
137 /* Set association default SACK delay */ 137 /* Set association default SACK delay */
138 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); 138 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
139 asoc->sackfreq = sp->sackfreq;
139 140
140 /* Set the association default flags controlling 141 /* Set the association default flags controlling
141 * Heartbeat, SACK delay, and Path MTU Discovery. 142 * Heartbeat, SACK delay, and Path MTU Discovery.
@@ -261,6 +262,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
261 * already received one packet.] 262 * already received one packet.]
262 */ 263 */
263 asoc->peer.sack_needed = 1; 264 asoc->peer.sack_needed = 1;
265 asoc->peer.sack_cnt = 0;
264 266
265 /* Assume that the peer will tell us if he recognizes ASCONF 267 /* Assume that the peer will tell us if he recognizes ASCONF
266 * as part of INIT exchange. 268 * as part of INIT exchange.
@@ -615,6 +617,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
615 * association configured value. 617 * association configured value.
616 */ 618 */
617 peer->sackdelay = asoc->sackdelay; 619 peer->sackdelay = asoc->sackdelay;
620 peer->sackfreq = asoc->sackfreq;
618 621
619 /* Enable/disable heartbeat, SACK delay, and path MTU discovery 622 /* Enable/disable heartbeat, SACK delay, and path MTU discovery
620 * based on association setting. 623 * based on association setting.
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 0aba759cb9b7..5dd89831eceb 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -383,3 +383,144 @@ void sctp_assocs_proc_exit(void)
383{ 383{
384 remove_proc_entry("assocs", proc_net_sctp); 384 remove_proc_entry("assocs", proc_net_sctp);
385} 385}
386
387static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos)
388{
389 if (*pos >= sctp_assoc_hashsize)
390 return NULL;
391
392 if (*pos < 0)
393 *pos = 0;
394
395 if (*pos == 0)
396 seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX "
397 "REM_ADDR_RTX START\n");
398
399 return (void *)pos;
400}
401
402static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos)
403{
404 if (++*pos >= sctp_assoc_hashsize)
405 return NULL;
406
407 return pos;
408}
409
410static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v)
411{
412 return;
413}
414
415static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
416{
417 struct sctp_hashbucket *head;
418 struct sctp_ep_common *epb;
419 struct sctp_association *assoc;
420 struct hlist_node *node;
421 struct sctp_transport *tsp;
422 int hash = *(loff_t *)v;
423
424 if (hash >= sctp_assoc_hashsize)
425 return -ENOMEM;
426
427 head = &sctp_assoc_hashtable[hash];
428 sctp_local_bh_disable();
429 read_lock(&head->lock);
430 sctp_for_each_hentry(epb, node, &head->chain) {
431 assoc = sctp_assoc(epb);
432 list_for_each_entry(tsp, &assoc->peer.transport_addr_list,
433 transports) {
434 /*
435 * The remote address (ADDR)
436 */
437 tsp->af_specific->seq_dump_addr(seq, &tsp->ipaddr);
438 seq_printf(seq, " ");
439
440 /*
441 * The association ID (ASSOC_ID)
442 */
443 seq_printf(seq, "%d ", tsp->asoc->assoc_id);
444
445 /*
446 * If the Heartbeat is active (HB_ACT)
447 * Note: 1 = Active, 0 = Inactive
448 */
449 seq_printf(seq, "%d ", timer_pending(&tsp->hb_timer));
450
451 /*
452 * Retransmit time out (RTO)
453 */
454 seq_printf(seq, "%lu ", tsp->rto);
455
456 /*
457 * Maximum path retransmit count (PATH_MAX_RTX)
458 */
459 seq_printf(seq, "%d ", tsp->pathmaxrxt);
460
461 /*
462 * remote address retransmit count (REM_ADDR_RTX)
463 * Note: We don't have a way to tally this at the moment
464 * so lets just leave it as zero for the moment
465 */
466 seq_printf(seq, "0 ");
467
468 /*
469 * remote address start time (START). This is also not
470 * currently implemented, but we can record it with a
471 * jiffies marker in a subsequent patch
472 */
473 seq_printf(seq, "0");
474
475 seq_printf(seq, "\n");
476 }
477 }
478
479 read_unlock(&head->lock);
480 sctp_local_bh_enable();
481
482 return 0;
483
484}
485
486static const struct seq_operations sctp_remaddr_ops = {
487 .start = sctp_remaddr_seq_start,
488 .next = sctp_remaddr_seq_next,
489 .stop = sctp_remaddr_seq_stop,
490 .show = sctp_remaddr_seq_show,
491};
492
493/* Cleanup the proc fs entry for 'remaddr' object. */
494void sctp_remaddr_proc_exit(void)
495{
496 remove_proc_entry("remaddr", proc_net_sctp);
497}
498
499static int sctp_remaddr_seq_open(struct inode *inode, struct file *file)
500{
501 return seq_open(file, &sctp_remaddr_ops);
502}
503
504static const struct file_operations sctp_remaddr_seq_fops = {
505 .open = sctp_remaddr_seq_open,
506 .read = seq_read,
507 .llseek = seq_lseek,
508 .release = seq_release,
509};
510
511int __init sctp_remaddr_proc_init(void)
512{
513 struct proc_dir_entry *p;
514
515 p = create_proc_entry("remaddr", S_IRUGO, proc_net_sctp);
516 if (!p)
517 return -ENOMEM;
518 p->proc_fops = &sctp_remaddr_seq_fops;
519
520 return 0;
521}
522
523void sctp_assoc_proc_exit(void)
524{
525 remove_proc_entry("remaddr", proc_net_sctp);
526}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index b435a193c5df..d6af466091d2 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -113,6 +113,8 @@ static __init int sctp_proc_init(void)
113 goto out_nomem; 113 goto out_nomem;
114 if (sctp_assocs_proc_init()) 114 if (sctp_assocs_proc_init())
115 goto out_nomem; 115 goto out_nomem;
116 if (sctp_remaddr_proc_init())
117 goto out_nomem;
116 118
117 return 0; 119 return 0;
118 120
@@ -129,6 +131,7 @@ static void sctp_proc_exit(void)
129 sctp_snmp_proc_exit(); 131 sctp_snmp_proc_exit();
130 sctp_eps_proc_exit(); 132 sctp_eps_proc_exit();
131 sctp_assocs_proc_exit(); 133 sctp_assocs_proc_exit();
134 sctp_remaddr_proc_exit();
132 135
133 if (proc_net_sctp) { 136 if (proc_net_sctp) {
134 proc_net_sctp = NULL; 137 proc_net_sctp = NULL;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 23a9f1a95b7d..b083312c725a 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -190,20 +190,28 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
190 * unacknowledged DATA chunk. ... 190 * unacknowledged DATA chunk. ...
191 */ 191 */
192 if (!asoc->peer.sack_needed) { 192 if (!asoc->peer.sack_needed) {
193 /* We will need a SACK for the next packet. */ 193 asoc->peer.sack_cnt++;
194 asoc->peer.sack_needed = 1;
195 194
196 /* Set the SACK delay timeout based on the 195 /* Set the SACK delay timeout based on the
197 * SACK delay for the last transport 196 * SACK delay for the last transport
198 * data was received from, or the default 197 * data was received from, or the default
199 * for the association. 198 * for the association.
200 */ 199 */
201 if (trans) 200 if (trans) {
201 /* We will need a SACK for the next packet. */
202 if (asoc->peer.sack_cnt >= trans->sackfreq - 1)
203 asoc->peer.sack_needed = 1;
204
202 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = 205 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
203 trans->sackdelay; 206 trans->sackdelay;
204 else 207 } else {
208 /* We will need a SACK for the next packet. */
209 if (asoc->peer.sack_cnt >= asoc->sackfreq - 1)
210 asoc->peer.sack_needed = 1;
211
205 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = 212 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
206 asoc->sackdelay; 213 asoc->sackdelay;
214 }
207 215
208 /* Restart the SACK timer. */ 216 /* Restart the SACK timer. */
209 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 217 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
@@ -216,6 +224,7 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
216 goto nomem; 224 goto nomem;
217 225
218 asoc->peer.sack_needed = 0; 226 asoc->peer.sack_needed = 0;
227 asoc->peer.sack_cnt = 0;
219 228
220 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack)); 229 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack));
221 230
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index e7e3baf7009e..253e5ea7e1e8 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -956,7 +956,8 @@ out:
956 */ 956 */
957static int __sctp_connect(struct sock* sk, 957static int __sctp_connect(struct sock* sk,
958 struct sockaddr *kaddrs, 958 struct sockaddr *kaddrs,
959 int addrs_size) 959 int addrs_size,
960 sctp_assoc_t *assoc_id)
960{ 961{
961 struct sctp_sock *sp; 962 struct sctp_sock *sp;
962 struct sctp_endpoint *ep; 963 struct sctp_endpoint *ep;
@@ -1111,6 +1112,8 @@ static int __sctp_connect(struct sock* sk,
1111 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1112 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
1112 1113
1113 err = sctp_wait_for_connect(asoc, &timeo); 1114 err = sctp_wait_for_connect(asoc, &timeo);
1115 if (!err && assoc_id)
1116 *assoc_id = asoc->assoc_id;
1114 1117
1115 /* Don't free association on exit. */ 1118 /* Don't free association on exit. */
1116 asoc = NULL; 1119 asoc = NULL;
@@ -1128,7 +1131,8 @@ out_free:
1128/* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() 1131/* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
1129 * 1132 *
1130 * API 8.9 1133 * API 8.9
1131 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt); 1134 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
1135 * sctp_assoc_t *asoc);
1132 * 1136 *
1133 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 1137 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
1134 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 1138 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
@@ -1144,8 +1148,10 @@ out_free:
1144 * representation is termed a "packed array" of addresses). The caller 1148 * representation is termed a "packed array" of addresses). The caller
1145 * specifies the number of addresses in the array with addrcnt. 1149 * specifies the number of addresses in the array with addrcnt.
1146 * 1150 *
1147 * On success, sctp_connectx() returns 0. On failure, sctp_connectx() returns 1151 * On success, sctp_connectx() returns 0. It also sets the assoc_id to
1148 * -1, and sets errno to the appropriate error code. 1152 * the association id of the new association. On failure, sctp_connectx()
1153 * returns -1, and sets errno to the appropriate error code. The assoc_id
1154 * is not touched by the kernel.
1149 * 1155 *
1150 * For SCTP, the port given in each socket address must be the same, or 1156 * For SCTP, the port given in each socket address must be the same, or
1151 * sctp_connectx() will fail, setting errno to EINVAL. 1157 * sctp_connectx() will fail, setting errno to EINVAL.
@@ -1182,11 +1188,12 @@ out_free:
1182 * addrs The pointer to the addresses in user land 1188 * addrs The pointer to the addresses in user land
1183 * addrssize Size of the addrs buffer 1189 * addrssize Size of the addrs buffer
1184 * 1190 *
1185 * Returns 0 if ok, <0 errno code on error. 1191 * Returns >=0 if ok, <0 errno code on error.
1186 */ 1192 */
1187SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk, 1193SCTP_STATIC int __sctp_setsockopt_connectx(struct sock* sk,
1188 struct sockaddr __user *addrs, 1194 struct sockaddr __user *addrs,
1189 int addrs_size) 1195 int addrs_size,
1196 sctp_assoc_t *assoc_id)
1190{ 1197{
1191 int err = 0; 1198 int err = 0;
1192 struct sockaddr *kaddrs; 1199 struct sockaddr *kaddrs;
@@ -1209,13 +1216,46 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
1209 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 1216 if (__copy_from_user(kaddrs, addrs, addrs_size)) {
1210 err = -EFAULT; 1217 err = -EFAULT;
1211 } else { 1218 } else {
1212 err = __sctp_connect(sk, kaddrs, addrs_size); 1219 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
1213 } 1220 }
1214 1221
1215 kfree(kaddrs); 1222 kfree(kaddrs);
1223
1216 return err; 1224 return err;
1217} 1225}
1218 1226
1227/*
1228 * This is an older interface. It's kept for backward compatibility
1229 * to the option that doesn't provide association id.
1230 */
1231SCTP_STATIC int sctp_setsockopt_connectx_old(struct sock* sk,
1232 struct sockaddr __user *addrs,
1233 int addrs_size)
1234{
1235 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
1236}
1237
1238/*
1239 * New interface for the API. The since the API is done with a socket
1240 * option, to make it simple we feed back the association id is as a return
1241 * indication to the call. Error is always negative and association id is
1242 * always positive.
1243 */
1244SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
1245 struct sockaddr __user *addrs,
1246 int addrs_size)
1247{
1248 sctp_assoc_t assoc_id = 0;
1249 int err = 0;
1250
1251 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
1252
1253 if (err)
1254 return err;
1255 else
1256 return assoc_id;
1257}
1258
1219/* API 3.1.4 close() - UDP Style Syntax 1259/* API 3.1.4 close() - UDP Style Syntax
1220 * Applications use close() to perform graceful shutdown (as described in 1260 * Applications use close() to perform graceful shutdown (as described in
1221 * Section 10.1 of [SCTP]) on ALL the associations currently represented 1261 * Section 10.1 of [SCTP]) on ALL the associations currently represented
@@ -2305,74 +2345,98 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2305 return 0; 2345 return 0;
2306} 2346}
2307 2347
2308/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) 2348/*
2309 * 2349 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
2310 * This options will get or set the delayed ack timer. The time is set 2350 *
2311 * in milliseconds. If the assoc_id is 0, then this sets or gets the 2351 * This option will effect the way delayed acks are performed. This
2312 * endpoints default delayed ack timer value. If the assoc_id field is 2352 * option allows you to get or set the delayed ack time, in
2313 * non-zero, then the set or get effects the specified association. 2353 * milliseconds. It also allows changing the delayed ack frequency.
2314 * 2354 * Changing the frequency to 1 disables the delayed sack algorithm. If
2315 * struct sctp_assoc_value { 2355 * the assoc_id is 0, then this sets or gets the endpoints default
2316 * sctp_assoc_t assoc_id; 2356 * values. If the assoc_id field is non-zero, then the set or get
2317 * uint32_t assoc_value; 2357 * effects the specified association for the one to many model (the
2318 * }; 2358 * assoc_id field is ignored by the one to one model). Note that if
2359 * sack_delay or sack_freq are 0 when setting this option, then the
2360 * current values will remain unchanged.
2361 *
2362 * struct sctp_sack_info {
2363 * sctp_assoc_t sack_assoc_id;
2364 * uint32_t sack_delay;
2365 * uint32_t sack_freq;
2366 * };
2319 * 2367 *
2320 * assoc_id - This parameter, indicates which association the 2368 * sack_assoc_id - This parameter, indicates which association the user
2321 * user is preforming an action upon. Note that if 2369 * is performing an action upon. Note that if this field's value is
2322 * this field's value is zero then the endpoints 2370 * zero then the endpoints default value is changed (effecting future
2323 * default value is changed (effecting future 2371 * associations only).
2324 * associations only).
2325 * 2372 *
2326 * assoc_value - This parameter contains the number of milliseconds 2373 * sack_delay - This parameter contains the number of milliseconds that
2327 * that the user is requesting the delayed ACK timer 2374 * the user is requesting the delayed ACK timer be set to. Note that
2328 * be set to. Note that this value is defined in 2375 * this value is defined in the standard to be between 200 and 500
2329 * the standard to be between 200 and 500 milliseconds. 2376 * milliseconds.
2330 * 2377 *
2331 * Note: a value of zero will leave the value alone, 2378 * sack_freq - This parameter contains the number of packets that must
2332 * but disable SACK delay. A non-zero value will also 2379 * be received before a sack is sent without waiting for the delay
2333 * enable SACK delay. 2380 * timer to expire. The default value for this is 2, setting this
2381 * value to 1 will disable the delayed sack algorithm.
2334 */ 2382 */
2335 2383
2336static int sctp_setsockopt_delayed_ack_time(struct sock *sk, 2384static int sctp_setsockopt_delayed_ack(struct sock *sk,
2337 char __user *optval, int optlen) 2385 char __user *optval, int optlen)
2338{ 2386{
2339 struct sctp_assoc_value params; 2387 struct sctp_sack_info params;
2340 struct sctp_transport *trans = NULL; 2388 struct sctp_transport *trans = NULL;
2341 struct sctp_association *asoc = NULL; 2389 struct sctp_association *asoc = NULL;
2342 struct sctp_sock *sp = sctp_sk(sk); 2390 struct sctp_sock *sp = sctp_sk(sk);
2343 2391
2344 if (optlen != sizeof(struct sctp_assoc_value)) 2392 if (optlen == sizeof(struct sctp_sack_info)) {
2345 return - EINVAL; 2393 if (copy_from_user(&params, optval, optlen))
2394 return -EFAULT;
2346 2395
2347 if (copy_from_user(&params, optval, optlen)) 2396 if (params.sack_delay == 0 && params.sack_freq == 0)
2348 return -EFAULT; 2397 return 0;
2398 } else if (optlen == sizeof(struct sctp_assoc_value)) {
2399 printk(KERN_WARNING "SCTP: Use of struct sctp_sack_info "
2400 "in delayed_ack socket option deprecated\n");
2401 printk(KERN_WARNING "SCTP: struct sctp_sack_info instead\n");
2402 if (copy_from_user(&params, optval, optlen))
2403 return -EFAULT;
2404
2405 if (params.sack_delay == 0)
2406 params.sack_freq = 1;
2407 else
2408 params.sack_freq = 0;
2409 } else
2410 return - EINVAL;
2349 2411
2350 /* Validate value parameter. */ 2412 /* Validate value parameter. */
2351 if (params.assoc_value > 500) 2413 if (params.sack_delay > 500)
2352 return -EINVAL; 2414 return -EINVAL;
2353 2415
2354 /* Get association, if assoc_id != 0 and the socket is a one 2416 /* Get association, if sack_assoc_id != 0 and the socket is a one
2355 * to many style socket, and an association was not found, then 2417 * to many style socket, and an association was not found, then
2356 * the id was invalid. 2418 * the id was invalid.
2357 */ 2419 */
2358 asoc = sctp_id2assoc(sk, params.assoc_id); 2420 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
2359 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 2421 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
2360 return -EINVAL; 2422 return -EINVAL;
2361 2423
2362 if (params.assoc_value) { 2424 if (params.sack_delay) {
2363 if (asoc) { 2425 if (asoc) {
2364 asoc->sackdelay = 2426 asoc->sackdelay =
2365 msecs_to_jiffies(params.assoc_value); 2427 msecs_to_jiffies(params.sack_delay);
2366 asoc->param_flags = 2428 asoc->param_flags =
2367 (asoc->param_flags & ~SPP_SACKDELAY) | 2429 (asoc->param_flags & ~SPP_SACKDELAY) |
2368 SPP_SACKDELAY_ENABLE; 2430 SPP_SACKDELAY_ENABLE;
2369 } else { 2431 } else {
2370 sp->sackdelay = params.assoc_value; 2432 sp->sackdelay = params.sack_delay;
2371 sp->param_flags = 2433 sp->param_flags =
2372 (sp->param_flags & ~SPP_SACKDELAY) | 2434 (sp->param_flags & ~SPP_SACKDELAY) |
2373 SPP_SACKDELAY_ENABLE; 2435 SPP_SACKDELAY_ENABLE;
2374 } 2436 }
2375 } else { 2437 }
2438
2439 if (params.sack_freq == 1) {
2376 if (asoc) { 2440 if (asoc) {
2377 asoc->param_flags = 2441 asoc->param_flags =
2378 (asoc->param_flags & ~SPP_SACKDELAY) | 2442 (asoc->param_flags & ~SPP_SACKDELAY) |
@@ -2382,22 +2446,40 @@ static int sctp_setsockopt_delayed_ack_time(struct sock *sk,
2382 (sp->param_flags & ~SPP_SACKDELAY) | 2446 (sp->param_flags & ~SPP_SACKDELAY) |
2383 SPP_SACKDELAY_DISABLE; 2447 SPP_SACKDELAY_DISABLE;
2384 } 2448 }
2449 } else if (params.sack_freq > 1) {
2450 if (asoc) {
2451 asoc->sackfreq = params.sack_freq;
2452 asoc->param_flags =
2453 (asoc->param_flags & ~SPP_SACKDELAY) |
2454 SPP_SACKDELAY_ENABLE;
2455 } else {
2456 sp->sackfreq = params.sack_freq;
2457 sp->param_flags =
2458 (sp->param_flags & ~SPP_SACKDELAY) |
2459 SPP_SACKDELAY_ENABLE;
2460 }
2385 } 2461 }
2386 2462
2387 /* If change is for association, also apply to each transport. */ 2463 /* If change is for association, also apply to each transport. */
2388 if (asoc) { 2464 if (asoc) {
2389 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2465 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2390 transports) { 2466 transports) {
2391 if (params.assoc_value) { 2467 if (params.sack_delay) {
2392 trans->sackdelay = 2468 trans->sackdelay =
2393 msecs_to_jiffies(params.assoc_value); 2469 msecs_to_jiffies(params.sack_delay);
2394 trans->param_flags = 2470 trans->param_flags =
2395 (trans->param_flags & ~SPP_SACKDELAY) | 2471 (trans->param_flags & ~SPP_SACKDELAY) |
2396 SPP_SACKDELAY_ENABLE; 2472 SPP_SACKDELAY_ENABLE;
2397 } else { 2473 }
2474 if (params.sack_freq == 1) {
2398 trans->param_flags = 2475 trans->param_flags =
2399 (trans->param_flags & ~SPP_SACKDELAY) | 2476 (trans->param_flags & ~SPP_SACKDELAY) |
2400 SPP_SACKDELAY_DISABLE; 2477 SPP_SACKDELAY_DISABLE;
2478 } else if (params.sack_freq > 1) {
2479 trans->sackfreq = params.sack_freq;
2480 trans->param_flags =
2481 (trans->param_flags & ~SPP_SACKDELAY) |
2482 SPP_SACKDELAY_ENABLE;
2401 } 2483 }
2402 } 2484 }
2403 } 2485 }
@@ -3164,10 +3246,18 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
3164 optlen, SCTP_BINDX_REM_ADDR); 3246 optlen, SCTP_BINDX_REM_ADDR);
3165 break; 3247 break;
3166 3248
3249 case SCTP_SOCKOPT_CONNECTX_OLD:
3250 /* 'optlen' is the size of the addresses buffer. */
3251 retval = sctp_setsockopt_connectx_old(sk,
3252 (struct sockaddr __user *)optval,
3253 optlen);
3254 break;
3255
3167 case SCTP_SOCKOPT_CONNECTX: 3256 case SCTP_SOCKOPT_CONNECTX:
3168 /* 'optlen' is the size of the addresses buffer. */ 3257 /* 'optlen' is the size of the addresses buffer. */
3169 retval = sctp_setsockopt_connectx(sk, (struct sockaddr __user *)optval, 3258 retval = sctp_setsockopt_connectx(sk,
3170 optlen); 3259 (struct sockaddr __user *)optval,
3260 optlen);
3171 break; 3261 break;
3172 3262
3173 case SCTP_DISABLE_FRAGMENTS: 3263 case SCTP_DISABLE_FRAGMENTS:
@@ -3186,8 +3276,8 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
3186 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); 3276 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
3187 break; 3277 break;
3188 3278
3189 case SCTP_DELAYED_ACK_TIME: 3279 case SCTP_DELAYED_ACK:
3190 retval = sctp_setsockopt_delayed_ack_time(sk, optval, optlen); 3280 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
3191 break; 3281 break;
3192 case SCTP_PARTIAL_DELIVERY_POINT: 3282 case SCTP_PARTIAL_DELIVERY_POINT:
3193 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); 3283 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen);
@@ -3294,7 +3384,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *addr,
3294 /* Pass correct addr len to common routine (so it knows there 3384 /* Pass correct addr len to common routine (so it knows there
3295 * is only one address being passed. 3385 * is only one address being passed.
3296 */ 3386 */
3297 err = __sctp_connect(sk, addr, af->sockaddr_len); 3387 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
3298 } 3388 }
3299 3389
3300 sctp_release_sock(sk); 3390 sctp_release_sock(sk);
@@ -3446,6 +3536,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3446 sp->pathmaxrxt = sctp_max_retrans_path; 3536 sp->pathmaxrxt = sctp_max_retrans_path;
3447 sp->pathmtu = 0; // allow default discovery 3537 sp->pathmtu = 0; // allow default discovery
3448 sp->sackdelay = sctp_sack_timeout; 3538 sp->sackdelay = sctp_sack_timeout;
3539 sp->sackfreq = 2;
3449 sp->param_flags = SPP_HB_ENABLE | 3540 sp->param_flags = SPP_HB_ENABLE |
3450 SPP_PMTUD_ENABLE | 3541 SPP_PMTUD_ENABLE |
3451 SPP_SACKDELAY_ENABLE; 3542 SPP_SACKDELAY_ENABLE;
@@ -3999,70 +4090,91 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
3999 return 0; 4090 return 0;
4000} 4091}
4001 4092
4002/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) 4093/*
4003 * 4094 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
4004 * This options will get or set the delayed ack timer. The time is set 4095 *
4005 * in milliseconds. If the assoc_id is 0, then this sets or gets the 4096 * This option will effect the way delayed acks are performed. This
4006 * endpoints default delayed ack timer value. If the assoc_id field is 4097 * option allows you to get or set the delayed ack time, in
4007 * non-zero, then the set or get effects the specified association. 4098 * milliseconds. It also allows changing the delayed ack frequency.
4008 * 4099 * Changing the frequency to 1 disables the delayed sack algorithm. If
4009 * struct sctp_assoc_value { 4100 * the assoc_id is 0, then this sets or gets the endpoints default
4010 * sctp_assoc_t assoc_id; 4101 * values. If the assoc_id field is non-zero, then the set or get
4011 * uint32_t assoc_value; 4102 * effects the specified association for the one to many model (the
4012 * }; 4103 * assoc_id field is ignored by the one to one model). Note that if
4104 * sack_delay or sack_freq are 0 when setting this option, then the
4105 * current values will remain unchanged.
4106 *
4107 * struct sctp_sack_info {
4108 * sctp_assoc_t sack_assoc_id;
4109 * uint32_t sack_delay;
4110 * uint32_t sack_freq;
4111 * };
4013 * 4112 *
4014 * assoc_id - This parameter, indicates which association the 4113 * sack_assoc_id - This parameter, indicates which association the user
4015 * user is preforming an action upon. Note that if 4114 * is performing an action upon. Note that if this field's value is
4016 * this field's value is zero then the endpoints 4115 * zero then the endpoints default value is changed (effecting future
4017 * default value is changed (effecting future 4116 * associations only).
4018 * associations only).
4019 * 4117 *
4020 * assoc_value - This parameter contains the number of milliseconds 4118 * sack_delay - This parameter contains the number of milliseconds that
4021 * that the user is requesting the delayed ACK timer 4119 * the user is requesting the delayed ACK timer be set to. Note that
4022 * be set to. Note that this value is defined in 4120 * this value is defined in the standard to be between 200 and 500
4023 * the standard to be between 200 and 500 milliseconds. 4121 * milliseconds.
4024 * 4122 *
4025 * Note: a value of zero will leave the value alone, 4123 * sack_freq - This parameter contains the number of packets that must
4026 * but disable SACK delay. A non-zero value will also 4124 * be received before a sack is sent without waiting for the delay
4027 * enable SACK delay. 4125 * timer to expire. The default value for this is 2, setting this
4126 * value to 1 will disable the delayed sack algorithm.
4028 */ 4127 */
4029static int sctp_getsockopt_delayed_ack_time(struct sock *sk, int len, 4128static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
4030 char __user *optval, 4129 char __user *optval,
4031 int __user *optlen) 4130 int __user *optlen)
4032{ 4131{
4033 struct sctp_assoc_value params; 4132 struct sctp_sack_info params;
4034 struct sctp_association *asoc = NULL; 4133 struct sctp_association *asoc = NULL;
4035 struct sctp_sock *sp = sctp_sk(sk); 4134 struct sctp_sock *sp = sctp_sk(sk);
4036 4135
4037 if (len < sizeof(struct sctp_assoc_value)) 4136 if (len >= sizeof(struct sctp_sack_info)) {
4038 return - EINVAL; 4137 len = sizeof(struct sctp_sack_info);
4039
4040 len = sizeof(struct sctp_assoc_value);
4041 4138
4042 if (copy_from_user(&params, optval, len)) 4139 if (copy_from_user(&params, optval, len))
4043 return -EFAULT; 4140 return -EFAULT;
4141 } else if (len == sizeof(struct sctp_assoc_value)) {
4142 printk(KERN_WARNING "SCTP: Use of struct sctp_sack_info "
4143 "in delayed_ack socket option deprecated\n");
4144 printk(KERN_WARNING "SCTP: struct sctp_sack_info instead\n");
4145 if (copy_from_user(&params, optval, len))
4146 return -EFAULT;
4147 } else
4148 return - EINVAL;
4044 4149
4045 /* Get association, if assoc_id != 0 and the socket is a one 4150 /* Get association, if sack_assoc_id != 0 and the socket is a one
4046 * to many style socket, and an association was not found, then 4151 * to many style socket, and an association was not found, then
4047 * the id was invalid. 4152 * the id was invalid.
4048 */ 4153 */
4049 asoc = sctp_id2assoc(sk, params.assoc_id); 4154 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
4050 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 4155 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
4051 return -EINVAL; 4156 return -EINVAL;
4052 4157
4053 if (asoc) { 4158 if (asoc) {
4054 /* Fetch association values. */ 4159 /* Fetch association values. */
4055 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) 4160 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) {
4056 params.assoc_value = jiffies_to_msecs( 4161 params.sack_delay = jiffies_to_msecs(
4057 asoc->sackdelay); 4162 asoc->sackdelay);
4058 else 4163 params.sack_freq = asoc->sackfreq;
4059 params.assoc_value = 0; 4164
4165 } else {
4166 params.sack_delay = 0;
4167 params.sack_freq = 1;
4168 }
4060 } else { 4169 } else {
4061 /* Fetch socket values. */ 4170 /* Fetch socket values. */
4062 if (sp->param_flags & SPP_SACKDELAY_ENABLE) 4171 if (sp->param_flags & SPP_SACKDELAY_ENABLE) {
4063 params.assoc_value = sp->sackdelay; 4172 params.sack_delay = sp->sackdelay;
4064 else 4173 params.sack_freq = sp->sackfreq;
4065 params.assoc_value = 0; 4174 } else {
4175 params.sack_delay = 0;
4176 params.sack_freq = 1;
4177 }
4066 } 4178 }
4067 4179
4068 if (copy_to_user(optval, &params, len)) 4180 if (copy_to_user(optval, &params, len))
@@ -5218,8 +5330,8 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5218 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, 5330 retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
5219 optlen); 5331 optlen);
5220 break; 5332 break;
5221 case SCTP_DELAYED_ACK_TIME: 5333 case SCTP_DELAYED_ACK:
5222 retval = sctp_getsockopt_delayed_ack_time(sk, len, optval, 5334 retval = sctp_getsockopt_delayed_ack(sk, len, optval,
5223 optlen); 5335 optlen);
5224 break; 5336 break;
5225 case SCTP_INITMSG: 5337 case SCTP_INITMSG:
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index cc12d5f5d5da..019d4b4478c9 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -33,8 +33,6 @@
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * $Id$
38 */ 36 */
39 37
40 38
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index b4f0525f91af..007c1a6708ee 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -4,7 +4,6 @@
4 * Begun April 1, 1996, Mike Shaver. 4 * Begun April 1, 1996, Mike Shaver.
5 * Added /proc/sys/net directories for each protocol family. [MS] 5 * Added /proc/sys/net directories for each protocol family. [MS]
6 * 6 *
7 * $Log: sysctl_net.c,v $
8 * Revision 1.2 1996/05/08 20:24:40 shaver 7 * Revision 1.2 1996/05/08 20:24:40 shaver
9 * Added bits for NET_BRIDGE and the NET_IPV4_ARP stuff and 8 * Added bits for NET_BRIDGE and the NET_IPV4_ARP stuff and
10 * NET_IPV4_IP_FORWARD. 9 * NET_IPV4_IP_FORWARD.
@@ -40,6 +39,27 @@ static struct ctl_table_root net_sysctl_root = {
40 .lookup = net_ctl_header_lookup, 39 .lookup = net_ctl_header_lookup,
41}; 40};
42 41
42static LIST_HEAD(net_sysctl_ro_tables);
43static struct list_head *net_ctl_ro_header_lookup(struct ctl_table_root *root,
44 struct nsproxy *namespaces)
45{
46 return &net_sysctl_ro_tables;
47}
48
49static int net_ctl_ro_header_perms(struct ctl_table_root *root,
50 struct nsproxy *namespaces, struct ctl_table *table)
51{
52 if (namespaces->net_ns == &init_net)
53 return table->mode;
54 else
55 return table->mode & ~0222;
56}
57
58static struct ctl_table_root net_sysctl_ro_root = {
59 .lookup = net_ctl_ro_header_lookup,
60 .permissions = net_ctl_ro_header_perms,
61};
62
43static int sysctl_net_init(struct net *net) 63static int sysctl_net_init(struct net *net)
44{ 64{
45 INIT_LIST_HEAD(&net->sysctl_table_headers); 65 INIT_LIST_HEAD(&net->sysctl_table_headers);
@@ -64,6 +84,7 @@ static __init int sysctl_init(void)
64 if (ret) 84 if (ret)
65 goto out; 85 goto out;
66 register_sysctl_root(&net_sysctl_root); 86 register_sysctl_root(&net_sysctl_root);
87 register_sysctl_root(&net_sysctl_ro_root);
67out: 88out:
68 return ret; 89 return ret;
69} 90}
@@ -80,6 +101,14 @@ struct ctl_table_header *register_net_sysctl_table(struct net *net,
80} 101}
81EXPORT_SYMBOL_GPL(register_net_sysctl_table); 102EXPORT_SYMBOL_GPL(register_net_sysctl_table);
82 103
104struct ctl_table_header *register_net_sysctl_rotable(const
105 struct ctl_path *path, struct ctl_table *table)
106{
107 return __register_sysctl_paths(&net_sysctl_ro_root,
108 &init_nsproxy, path, table);
109}
110EXPORT_SYMBOL_GPL(register_net_sysctl_rotable);
111
83void unregister_net_sysctl_table(struct ctl_table_header *header) 112void unregister_net_sysctl_table(struct ctl_table_header *header)
84{ 113{
85 unregister_sysctl_table(header); 114 unregister_sysctl_table(header);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index e7880172ef19..a5883b1452ff 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -276,7 +276,7 @@ static void bclink_send_nack(struct node *n_ptr)
276 if (buf) { 276 if (buf) {
277 msg = buf_msg(buf); 277 msg = buf_msg(buf);
278 msg_init(msg, BCAST_PROTOCOL, STATE_MSG, 278 msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
279 TIPC_OK, INT_H_SIZE, n_ptr->addr); 279 INT_H_SIZE, n_ptr->addr);
280 msg_set_mc_netid(msg, tipc_net_id); 280 msg_set_mc_netid(msg, tipc_net_id);
281 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); 281 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
282 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after); 282 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
@@ -571,7 +571,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
571 assert(tipc_cltr_bcast_nodes.count != 0); 571 assert(tipc_cltr_bcast_nodes.count != 0);
572 bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count); 572 bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count);
573 msg = buf_msg(buf); 573 msg = buf_msg(buf);
574 msg_set_non_seq(msg); 574 msg_set_non_seq(msg, 1);
575 msg_set_mc_netid(msg, tipc_net_id); 575 msg_set_mc_netid(msg, tipc_net_id);
576 } 576 }
577 577
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 4bb3404f610b..bc1db474fe01 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -238,7 +238,7 @@ static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest)
238 if (buf) { 238 if (buf) {
239 msg = buf_msg(buf); 239 msg = buf_msg(buf);
240 memset((char *)msg, 0, size); 240 memset((char *)msg, 0, size);
241 msg_init(msg, ROUTE_DISTRIBUTOR, 0, TIPC_OK, INT_H_SIZE, dest); 241 msg_init(msg, ROUTE_DISTRIBUTOR, 0, INT_H_SIZE, dest);
242 } 242 }
243 return buf; 243 return buf;
244} 244}
diff --git a/net/tipc/config.c b/net/tipc/config.c
index c71337a22d33..ca3544d030c7 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -2,7 +2,7 @@
2 * net/tipc/config.c: TIPC configuration management code 2 * net/tipc/config.c: TIPC configuration management code
3 * 3 *
4 * Copyright (c) 2002-2006, Ericsson AB 4 * Copyright (c) 2002-2006, Ericsson AB
5 * Copyright (c) 2004-2006, Wind River Systems 5 * Copyright (c) 2004-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -293,7 +293,6 @@ static struct sk_buff *cfg_set_own_addr(void)
293 if (tipc_mode == TIPC_NET_MODE) 293 if (tipc_mode == TIPC_NET_MODE)
294 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 294 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
295 " (cannot change node address once assigned)"); 295 " (cannot change node address once assigned)");
296 tipc_own_addr = addr;
297 296
298 /* 297 /*
299 * Must release all spinlocks before calling start_net() because 298 * Must release all spinlocks before calling start_net() because
@@ -306,7 +305,7 @@ static struct sk_buff *cfg_set_own_addr(void)
306 */ 305 */
307 306
308 spin_unlock_bh(&config_lock); 307 spin_unlock_bh(&config_lock);
309 tipc_core_start_net(); 308 tipc_core_start_net(addr);
310 spin_lock_bh(&config_lock); 309 spin_lock_bh(&config_lock);
311 return tipc_cfg_reply_none(); 310 return tipc_cfg_reply_none();
312} 311}
@@ -529,7 +528,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
529 break; 528 break;
530#endif 529#endif
531 case TIPC_CMD_SET_LOG_SIZE: 530 case TIPC_CMD_SET_LOG_SIZE:
532 rep_tlv_buf = tipc_log_resize(req_tlv_area, req_tlv_space); 531 rep_tlv_buf = tipc_log_resize_cmd(req_tlv_area, req_tlv_space);
533 break; 532 break;
534 case TIPC_CMD_DUMP_LOG: 533 case TIPC_CMD_DUMP_LOG:
535 rep_tlv_buf = tipc_log_dump(); 534 rep_tlv_buf = tipc_log_dump();
@@ -602,6 +601,10 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
602 case TIPC_CMD_GET_NETID: 601 case TIPC_CMD_GET_NETID:
603 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id); 602 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
604 break; 603 break;
604 case TIPC_CMD_NOT_NET_ADMIN:
605 rep_tlv_buf =
606 tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
607 break;
605 default: 608 default:
606 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 609 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
607 " (unknown command)"); 610 " (unknown command)");
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 740aac5cdfb6..3256bd7d398f 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -49,7 +49,7 @@
49#include "config.h" 49#include "config.h"
50 50
51 51
52#define TIPC_MOD_VER "1.6.3" 52#define TIPC_MOD_VER "1.6.4"
53 53
54#ifndef CONFIG_TIPC_ZONES 54#ifndef CONFIG_TIPC_ZONES
55#define CONFIG_TIPC_ZONES 3 55#define CONFIG_TIPC_ZONES 3
@@ -117,11 +117,11 @@ void tipc_core_stop_net(void)
117 * start_net - start TIPC networking sub-systems 117 * start_net - start TIPC networking sub-systems
118 */ 118 */
119 119
120int tipc_core_start_net(void) 120int tipc_core_start_net(unsigned long addr)
121{ 121{
122 int res; 122 int res;
123 123
124 if ((res = tipc_net_start()) || 124 if ((res = tipc_net_start(addr)) ||
125 (res = tipc_eth_media_start())) { 125 (res = tipc_eth_media_start())) {
126 tipc_core_stop_net(); 126 tipc_core_stop_net();
127 } 127 }
@@ -164,8 +164,7 @@ int tipc_core_start(void)
164 tipc_mode = TIPC_NODE_MODE; 164 tipc_mode = TIPC_NODE_MODE;
165 165
166 if ((res = tipc_handler_start()) || 166 if ((res = tipc_handler_start()) ||
167 (res = tipc_ref_table_init(tipc_max_ports + tipc_max_subscriptions, 167 (res = tipc_ref_table_init(tipc_max_ports, tipc_random)) ||
168 tipc_random)) ||
169 (res = tipc_reg_start()) || 168 (res = tipc_reg_start()) ||
170 (res = tipc_nametbl_init()) || 169 (res = tipc_nametbl_init()) ||
171 (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) || 170 (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) ||
@@ -182,7 +181,7 @@ static int __init tipc_init(void)
182{ 181{
183 int res; 182 int res;
184 183
185 tipc_log_reinit(CONFIG_TIPC_LOG); 184 tipc_log_resize(CONFIG_TIPC_LOG);
186 info("Activated (version " TIPC_MOD_VER 185 info("Activated (version " TIPC_MOD_VER
187 " compiled " __DATE__ " " __TIME__ ")\n"); 186 " compiled " __DATE__ " " __TIME__ ")\n");
188 187
@@ -209,7 +208,7 @@ static void __exit tipc_exit(void)
209 tipc_core_stop_net(); 208 tipc_core_stop_net();
210 tipc_core_stop(); 209 tipc_core_stop();
211 info("Deactivated\n"); 210 info("Deactivated\n");
212 tipc_log_stop(); 211 tipc_log_resize(0);
213} 212}
214 213
215module_init(tipc_init); 214module_init(tipc_init);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 5a0e4878d3b7..a881f92a8537 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -2,7 +2,7 @@
2 * net/tipc/core.h: Include file for TIPC global declarations 2 * net/tipc/core.h: Include file for TIPC global declarations
3 * 3 *
4 * Copyright (c) 2005-2006, Ericsson AB 4 * Copyright (c) 2005-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -59,84 +59,108 @@
59#include <linux/vmalloc.h> 59#include <linux/vmalloc.h>
60 60
61/* 61/*
62 * TIPC debugging code 62 * TIPC sanity test macros
63 */ 63 */
64 64
65#define assert(i) BUG_ON(!(i)) 65#define assert(i) BUG_ON(!(i))
66 66
67struct tipc_msg;
68extern struct print_buf *TIPC_NULL, *TIPC_CONS, *TIPC_LOG;
69extern struct print_buf *TIPC_TEE(struct print_buf *, struct print_buf *);
70void tipc_msg_print(struct print_buf*,struct tipc_msg *,const char*);
71void tipc_printf(struct print_buf *, const char *fmt, ...);
72void tipc_dump(struct print_buf*,const char *fmt, ...);
73
74#ifdef CONFIG_TIPC_DEBUG
75
76/* 67/*
77 * TIPC debug support included: 68 * TIPC system monitoring code
78 * - system messages are printed to TIPC_OUTPUT print buffer
79 * - debug messages are printed to DBG_OUTPUT print buffer
80 */ 69 */
81 70
82#define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_ERR "TIPC: " fmt, ## arg) 71/*
83#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_WARNING "TIPC: " fmt, ## arg) 72 * TIPC's print buffer subsystem supports the following print buffers:
84#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg) 73 *
74 * TIPC_NULL : null buffer (i.e. print nowhere)
75 * TIPC_CONS : system console
76 * TIPC_LOG : TIPC log buffer
77 * &buf : user-defined buffer (struct print_buf *)
78 *
79 * Note: TIPC_LOG is configured to echo its output to the system console;
80 * user-defined buffers can be configured to do the same thing.
81 */
85 82
86#define dbg(fmt, arg...) do {if (DBG_OUTPUT != TIPC_NULL) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0) 83extern struct print_buf *const TIPC_NULL;
87#define msg_dbg(msg, txt) do {if (DBG_OUTPUT != TIPC_NULL) tipc_msg_print(DBG_OUTPUT, msg, txt);} while(0) 84extern struct print_buf *const TIPC_CONS;
88#define dump(fmt, arg...) do {if (DBG_OUTPUT != TIPC_NULL) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0) 85extern struct print_buf *const TIPC_LOG;
89 86
87void tipc_printf(struct print_buf *, const char *fmt, ...);
90 88
91/* 89/*
92 * By default, TIPC_OUTPUT is defined to be system console and TIPC log buffer, 90 * TIPC_OUTPUT is the destination print buffer for system messages.
93 * while DBG_OUTPUT is the null print buffer. These defaults can be changed
94 * here, or on a per .c file basis, by redefining these symbols. The following
95 * print buffer options are available:
96 *
97 * TIPC_NULL : null buffer (i.e. print nowhere)
98 * TIPC_CONS : system console
99 * TIPC_LOG : TIPC log buffer
100 * &buf : user-defined buffer (struct print_buf *)
101 * TIPC_TEE(&buf_a,&buf_b) : list of buffers (eg. TIPC_TEE(TIPC_CONS,TIPC_LOG))
102 */ 91 */
103 92
104#ifndef TIPC_OUTPUT 93#ifndef TIPC_OUTPUT
105#define TIPC_OUTPUT TIPC_TEE(TIPC_CONS,TIPC_LOG) 94#define TIPC_OUTPUT TIPC_LOG
106#endif
107
108#ifndef DBG_OUTPUT
109#define DBG_OUTPUT TIPC_NULL
110#endif 95#endif
111 96
112#else
113
114/* 97/*
115 * TIPC debug support not included: 98 * TIPC can be configured to send system messages to TIPC_OUTPUT
116 * - system messages are printed to system console 99 * or to the system console only.
117 * - debug messages are not printed
118 */ 100 */
119 101
102#ifdef CONFIG_TIPC_DEBUG
103
104#define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
105 KERN_ERR "TIPC: " fmt, ## arg)
106#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
107 KERN_WARNING "TIPC: " fmt, ## arg)
108#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
109 KERN_NOTICE "TIPC: " fmt, ## arg)
110
111#else
112
120#define err(fmt, arg...) printk(KERN_ERR "TIPC: " fmt , ## arg) 113#define err(fmt, arg...) printk(KERN_ERR "TIPC: " fmt , ## arg)
121#define info(fmt, arg...) printk(KERN_INFO "TIPC: " fmt , ## arg) 114#define info(fmt, arg...) printk(KERN_INFO "TIPC: " fmt , ## arg)
122#define warn(fmt, arg...) printk(KERN_WARNING "TIPC: " fmt , ## arg) 115#define warn(fmt, arg...) printk(KERN_WARNING "TIPC: " fmt , ## arg)
123 116
124#define dbg(fmt, arg...) do {} while (0) 117#endif
125#define msg_dbg(msg,txt) do {} while (0)
126#define dump(fmt,arg...) do {} while (0)
127 118
119/*
120 * DBG_OUTPUT is the destination print buffer for debug messages.
121 * It defaults to the the null print buffer, but can be redefined
122 * (typically in the individual .c files being debugged) to allow
123 * selected debug messages to be generated where needed.
124 */
125
126#ifndef DBG_OUTPUT
127#define DBG_OUTPUT TIPC_NULL
128#endif
128 129
129/* 130/*
130 * TIPC_OUTPUT is defined to be the system console, while DBG_OUTPUT is 131 * TIPC can be configured to send debug messages to the specified print buffer
131 * the null print buffer. Thes ensures that any system or debug messages 132 * (typically DBG_OUTPUT) or to suppress them entirely.
132 * that are generated without using the above macros are handled correctly.
133 */ 133 */
134 134
135#undef TIPC_OUTPUT 135#ifdef CONFIG_TIPC_DEBUG
136#define TIPC_OUTPUT TIPC_CONS
137 136
138#undef DBG_OUTPUT 137#define dbg(fmt, arg...) \
139#define DBG_OUTPUT TIPC_NULL 138 do { \
139 if (DBG_OUTPUT != TIPC_NULL) \
140 tipc_printf(DBG_OUTPUT, fmt, ## arg); \
141 } while (0)
142#define msg_dbg(msg, txt) \
143 do { \
144 if (DBG_OUTPUT != TIPC_NULL) \
145 tipc_msg_dbg(DBG_OUTPUT, msg, txt); \
146 } while (0)
147#define dump(fmt, arg...) \
148 do { \
149 if (DBG_OUTPUT != TIPC_NULL) \
150 tipc_dump_dbg(DBG_OUTPUT, fmt, ##arg); \
151 } while (0)
152
153void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *);
154void tipc_dump_dbg(struct print_buf *, const char *fmt, ...);
155
156#else
157
158#define dbg(fmt, arg...) do {} while (0)
159#define msg_dbg(msg, txt) do {} while (0)
160#define dump(fmt, arg...) do {} while (0)
161
162#define tipc_msg_dbg(...) do {} while (0)
163#define tipc_dump_dbg(...) do {} while (0)
140 164
141#endif 165#endif
142 166
@@ -178,7 +202,7 @@ extern atomic_t tipc_user_count;
178 202
179extern int tipc_core_start(void); 203extern int tipc_core_start(void);
180extern void tipc_core_stop(void); 204extern void tipc_core_stop(void);
181extern int tipc_core_start_net(void); 205extern int tipc_core_start_net(unsigned long addr);
182extern void tipc_core_stop_net(void); 206extern void tipc_core_stop_net(void);
183extern int tipc_handler_start(void); 207extern int tipc_handler_start(void);
184extern void tipc_handler_stop(void); 208extern void tipc_handler_stop(void);
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
index e809d2a2ce06..29ecae851668 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/dbg.c
@@ -2,7 +2,7 @@
2 * net/tipc/dbg.c: TIPC print buffer routines for debugging 2 * net/tipc/dbg.c: TIPC print buffer routines for debugging
3 * 3 *
4 * Copyright (c) 1996-2006, Ericsson AB 4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -38,17 +38,43 @@
38#include "config.h" 38#include "config.h"
39#include "dbg.h" 39#include "dbg.h"
40 40
41static char print_string[TIPC_PB_MAX_STR]; 41/*
42static DEFINE_SPINLOCK(print_lock); 42 * TIPC pre-defines the following print buffers:
43 *
44 * TIPC_NULL : null buffer (i.e. print nowhere)
45 * TIPC_CONS : system console
46 * TIPC_LOG : TIPC log buffer
47 *
48 * Additional user-defined print buffers are also permitted.
49 */
43 50
44static struct print_buf null_buf = { NULL, 0, NULL, NULL }; 51static struct print_buf null_buf = { NULL, 0, NULL, 0 };
45struct print_buf *TIPC_NULL = &null_buf; 52struct print_buf *const TIPC_NULL = &null_buf;
46 53
47static struct print_buf cons_buf = { NULL, 0, NULL, NULL }; 54static struct print_buf cons_buf = { NULL, 0, NULL, 1 };
48struct print_buf *TIPC_CONS = &cons_buf; 55struct print_buf *const TIPC_CONS = &cons_buf;
49 56
50static struct print_buf log_buf = { NULL, 0, NULL, NULL }; 57static struct print_buf log_buf = { NULL, 0, NULL, 1 };
51struct print_buf *TIPC_LOG = &log_buf; 58struct print_buf *const TIPC_LOG = &log_buf;
59
60/*
61 * Locking policy when using print buffers.
62 *
63 * 1) tipc_printf() uses 'print_lock' to protect against concurrent access to
64 * 'print_string' when writing to a print buffer. This also protects against
65 * concurrent writes to the print buffer being written to.
66 *
67 * 2) tipc_dump() and tipc_log_XXX() leverage the aforementioned
68 * use of 'print_lock' to protect against all types of concurrent operations
69 * on their associated print buffer (not just write operations).
70 *
71 * Note: All routines of the form tipc_printbuf_XXX() are lock-free, and rely
72 * on the caller to prevent simultaneous use of the print buffer(s) being
73 * manipulated.
74 */
75
76static char print_string[TIPC_PB_MAX_STR];
77static DEFINE_SPINLOCK(print_lock);
52 78
53 79
54#define FORMAT(PTR,LEN,FMT) \ 80#define FORMAT(PTR,LEN,FMT) \
@@ -60,27 +86,14 @@ struct print_buf *TIPC_LOG = &log_buf;
60 *(PTR + LEN) = '\0';\ 86 *(PTR + LEN) = '\0';\
61} 87}
62 88
63/*
64 * Locking policy when using print buffers.
65 *
66 * The following routines use 'print_lock' for protection:
67 * 1) tipc_printf() - to protect its print buffer(s) and 'print_string'
68 * 2) TIPC_TEE() - to protect its print buffer(s)
69 * 3) tipc_dump() - to protect its print buffer(s) and 'print_string'
70 * 4) tipc_log_XXX() - to protect TIPC_LOG
71 *
72 * All routines of the form tipc_printbuf_XXX() rely on the caller to prevent
73 * simultaneous use of the print buffer(s) being manipulated.
74 */
75
76/** 89/**
77 * tipc_printbuf_init - initialize print buffer to empty 90 * tipc_printbuf_init - initialize print buffer to empty
78 * @pb: pointer to print buffer structure 91 * @pb: pointer to print buffer structure
79 * @raw: pointer to character array used by print buffer 92 * @raw: pointer to character array used by print buffer
80 * @size: size of character array 93 * @size: size of character array
81 * 94 *
82 * Makes the print buffer a null device that discards anything written to it 95 * Note: If the character array is too small (or absent), the print buffer
83 * if the character array is too small (or absent). 96 * becomes a null device that discards anything written to it.
84 */ 97 */
85 98
86void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size) 99void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
@@ -88,13 +101,13 @@ void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
88 pb->buf = raw; 101 pb->buf = raw;
89 pb->crs = raw; 102 pb->crs = raw;
90 pb->size = size; 103 pb->size = size;
91 pb->next = NULL; 104 pb->echo = 0;
92 105
93 if (size < TIPC_PB_MIN_SIZE) { 106 if (size < TIPC_PB_MIN_SIZE) {
94 pb->buf = NULL; 107 pb->buf = NULL;
95 } else if (raw) { 108 } else if (raw) {
96 pb->buf[0] = 0; 109 pb->buf[0] = 0;
97 pb->buf[size-1] = ~0; 110 pb->buf[size - 1] = ~0;
98 } 111 }
99} 112}
100 113
@@ -105,7 +118,11 @@ void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
105 118
106void tipc_printbuf_reset(struct print_buf *pb) 119void tipc_printbuf_reset(struct print_buf *pb)
107{ 120{
108 tipc_printbuf_init(pb, pb->buf, pb->size); 121 if (pb->buf) {
122 pb->crs = pb->buf;
123 pb->buf[0] = 0;
124 pb->buf[pb->size - 1] = ~0;
125 }
109} 126}
110 127
111/** 128/**
@@ -141,7 +158,7 @@ int tipc_printbuf_validate(struct print_buf *pb)
141 158
142 if (pb->buf[pb->size - 1] == 0) { 159 if (pb->buf[pb->size - 1] == 0) {
143 cp_buf = kmalloc(pb->size, GFP_ATOMIC); 160 cp_buf = kmalloc(pb->size, GFP_ATOMIC);
144 if (cp_buf != NULL){ 161 if (cp_buf) {
145 tipc_printbuf_init(&cb, cp_buf, pb->size); 162 tipc_printbuf_init(&cb, cp_buf, pb->size);
146 tipc_printbuf_move(&cb, pb); 163 tipc_printbuf_move(&cb, pb);
147 tipc_printbuf_move(pb, &cb); 164 tipc_printbuf_move(pb, &cb);
@@ -179,15 +196,16 @@ void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
179 } 196 }
180 197
181 if (pb_to->size < pb_from->size) { 198 if (pb_to->size < pb_from->size) {
182 tipc_printbuf_reset(pb_to); 199 strcpy(pb_to->buf, "*** PRINT BUFFER MOVE ERROR ***");
183 tipc_printf(pb_to, "*** PRINT BUFFER MOVE ERROR ***"); 200 pb_to->buf[pb_to->size - 1] = ~0;
201 pb_to->crs = strchr(pb_to->buf, 0);
184 return; 202 return;
185 } 203 }
186 204
187 /* Copy data from char after cursor to end (if used) */ 205 /* Copy data from char after cursor to end (if used) */
188 206
189 len = pb_from->buf + pb_from->size - pb_from->crs - 2; 207 len = pb_from->buf + pb_from->size - pb_from->crs - 2;
190 if ((pb_from->buf[pb_from->size-1] == 0) && (len > 0)) { 208 if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) {
191 strcpy(pb_to->buf, pb_from->crs + 1); 209 strcpy(pb_to->buf, pb_from->crs + 1);
192 pb_to->crs = pb_to->buf + len; 210 pb_to->crs = pb_to->buf + len;
193 } else 211 } else
@@ -203,8 +221,8 @@ void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
203} 221}
204 222
205/** 223/**
206 * tipc_printf - append formatted output to print buffer chain 224 * tipc_printf - append formatted output to print buffer
207 * @pb: pointer to chain of print buffers (may be NULL) 225 * @pb: pointer to print buffer
208 * @fmt: formatted info to be printed 226 * @fmt: formatted info to be printed
209 */ 227 */
210 228
@@ -213,68 +231,40 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
213 int chars_to_add; 231 int chars_to_add;
214 int chars_left; 232 int chars_left;
215 char save_char; 233 char save_char;
216 struct print_buf *pb_next;
217 234
218 spin_lock_bh(&print_lock); 235 spin_lock_bh(&print_lock);
236
219 FORMAT(print_string, chars_to_add, fmt); 237 FORMAT(print_string, chars_to_add, fmt);
220 if (chars_to_add >= TIPC_PB_MAX_STR) 238 if (chars_to_add >= TIPC_PB_MAX_STR)
221 strcpy(print_string, "*** PRINT BUFFER STRING TOO LONG ***"); 239 strcpy(print_string, "*** PRINT BUFFER STRING TOO LONG ***");
222 240
223 while (pb) { 241 if (pb->buf) {
224 if (pb == TIPC_CONS) 242 chars_left = pb->buf + pb->size - pb->crs - 1;
225 printk(print_string); 243 if (chars_to_add <= chars_left) {
226 else if (pb->buf) { 244 strcpy(pb->crs, print_string);
227 chars_left = pb->buf + pb->size - pb->crs - 1; 245 pb->crs += chars_to_add;
228 if (chars_to_add <= chars_left) { 246 } else if (chars_to_add >= (pb->size - 1)) {
229 strcpy(pb->crs, print_string); 247 strcpy(pb->buf, print_string + chars_to_add + 1
230 pb->crs += chars_to_add; 248 - pb->size);
231 } else if (chars_to_add >= (pb->size - 1)) { 249 pb->crs = pb->buf + pb->size - 1;
232 strcpy(pb->buf, print_string + chars_to_add + 1 250 } else {
233 - pb->size); 251 strcpy(pb->buf, print_string + chars_left);
234 pb->crs = pb->buf + pb->size - 1; 252 save_char = print_string[chars_left];
235 } else { 253 print_string[chars_left] = 0;
236 strcpy(pb->buf, print_string + chars_left); 254 strcpy(pb->crs, print_string);
237 save_char = print_string[chars_left]; 255 print_string[chars_left] = save_char;
238 print_string[chars_left] = 0; 256 pb->crs = pb->buf + chars_to_add - chars_left;
239 strcpy(pb->crs, print_string);
240 print_string[chars_left] = save_char;
241 pb->crs = pb->buf + chars_to_add - chars_left;
242 }
243 } 257 }
244 pb_next = pb->next;
245 pb->next = NULL;
246 pb = pb_next;
247 } 258 }
248 spin_unlock_bh(&print_lock);
249}
250 259
251/** 260 if (pb->echo)
252 * TIPC_TEE - perform next output operation on both print buffers 261 printk(print_string);
253 * @b0: pointer to chain of print buffers (may be NULL)
254 * @b1: pointer to print buffer to add to chain
255 *
256 * Returns pointer to print buffer chain.
257 */
258 262
259struct print_buf *TIPC_TEE(struct print_buf *b0, struct print_buf *b1)
260{
261 struct print_buf *pb = b0;
262
263 if (!b0 || (b0 == b1))
264 return b1;
265
266 spin_lock_bh(&print_lock);
267 while (pb->next) {
268 if ((pb->next == b1) || (pb->next == b0))
269 pb->next = pb->next->next;
270 else
271 pb = pb->next;
272 }
273 pb->next = b1;
274 spin_unlock_bh(&print_lock); 263 spin_unlock_bh(&print_lock);
275 return b0;
276} 264}
277 265
266#ifdef CONFIG_TIPC_DEBUG
267
278/** 268/**
279 * print_to_console - write string of bytes to console in multiple chunks 269 * print_to_console - write string of bytes to console in multiple chunks
280 */ 270 */
@@ -321,72 +311,66 @@ static void printbuf_dump(struct print_buf *pb)
321} 311}
322 312
323/** 313/**
324 * tipc_dump - dump non-console print buffer(s) to console 314 * tipc_dump_dbg - dump (non-console) print buffer to console
325 * @pb: pointer to chain of print buffers 315 * @pb: pointer to print buffer
326 */ 316 */
327 317
328void tipc_dump(struct print_buf *pb, const char *fmt, ...) 318void tipc_dump_dbg(struct print_buf *pb, const char *fmt, ...)
329{ 319{
330 struct print_buf *pb_next;
331 int len; 320 int len;
332 321
322 if (pb == TIPC_CONS)
323 return;
324
333 spin_lock_bh(&print_lock); 325 spin_lock_bh(&print_lock);
326
334 FORMAT(print_string, len, fmt); 327 FORMAT(print_string, len, fmt);
335 printk(print_string); 328 printk(print_string);
336 329
337 for (; pb; pb = pb->next) { 330 printk("\n---- Start of %s log dump ----\n\n",
338 if (pb != TIPC_CONS) { 331 (pb == TIPC_LOG) ? "global" : "local");
339 printk("\n---- Start of %s log dump ----\n\n", 332 printbuf_dump(pb);
340 (pb == TIPC_LOG) ? "global" : "local"); 333 tipc_printbuf_reset(pb);
341 printbuf_dump(pb); 334 printk("\n---- End of dump ----\n");
342 tipc_printbuf_reset(pb); 335
343 printk("\n---- End of dump ----\n");
344 }
345 pb_next = pb->next;
346 pb->next = NULL;
347 pb = pb_next;
348 }
349 spin_unlock_bh(&print_lock); 336 spin_unlock_bh(&print_lock);
350} 337}
351 338
339#endif
340
352/** 341/**
353 * tipc_log_stop - free up TIPC log print buffer 342 * tipc_log_resize - change the size of the TIPC log buffer
343 * @log_size: print buffer size to use
354 */ 344 */
355 345
356void tipc_log_stop(void) 346int tipc_log_resize(int log_size)
357{ 347{
348 int res = 0;
349
358 spin_lock_bh(&print_lock); 350 spin_lock_bh(&print_lock);
359 if (TIPC_LOG->buf) { 351 if (TIPC_LOG->buf) {
360 kfree(TIPC_LOG->buf); 352 kfree(TIPC_LOG->buf);
361 TIPC_LOG->buf = NULL; 353 TIPC_LOG->buf = NULL;
362 } 354 }
363 spin_unlock_bh(&print_lock);
364}
365
366/**
367 * tipc_log_reinit - (re)initialize TIPC log print buffer
368 * @log_size: print buffer size to use
369 */
370
371void tipc_log_reinit(int log_size)
372{
373 tipc_log_stop();
374
375 if (log_size) { 355 if (log_size) {
376 if (log_size < TIPC_PB_MIN_SIZE) 356 if (log_size < TIPC_PB_MIN_SIZE)
377 log_size = TIPC_PB_MIN_SIZE; 357 log_size = TIPC_PB_MIN_SIZE;
378 spin_lock_bh(&print_lock); 358 res = TIPC_LOG->echo;
379 tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC), 359 tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC),
380 log_size); 360 log_size);
381 spin_unlock_bh(&print_lock); 361 TIPC_LOG->echo = res;
362 res = !TIPC_LOG->buf;
382 } 363 }
364 spin_unlock_bh(&print_lock);
365
366 return res;
383} 367}
384 368
385/** 369/**
386 * tipc_log_resize - reconfigure size of TIPC log buffer 370 * tipc_log_resize_cmd - reconfigure size of TIPC log buffer
387 */ 371 */
388 372
389struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space) 373struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
390{ 374{
391 u32 value; 375 u32 value;
392 376
@@ -397,7 +381,9 @@ struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space)
397 if (value != delimit(value, 0, 32768)) 381 if (value != delimit(value, 0, 32768))
398 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 382 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
399 " (log size must be 0-32768)"); 383 " (log size must be 0-32768)");
400 tipc_log_reinit(value); 384 if (tipc_log_resize(value))
385 return tipc_cfg_reply_error_string(
386 "unable to create specified log (log size is now 0)");
401 return tipc_cfg_reply_none(); 387 return tipc_cfg_reply_none();
402} 388}
403 389
@@ -410,27 +396,32 @@ struct sk_buff *tipc_log_dump(void)
410 struct sk_buff *reply; 396 struct sk_buff *reply;
411 397
412 spin_lock_bh(&print_lock); 398 spin_lock_bh(&print_lock);
413 if (!TIPC_LOG->buf) 399 if (!TIPC_LOG->buf) {
400 spin_unlock_bh(&print_lock);
414 reply = tipc_cfg_reply_ultra_string("log not activated\n"); 401 reply = tipc_cfg_reply_ultra_string("log not activated\n");
415 else if (tipc_printbuf_empty(TIPC_LOG)) 402 } else if (tipc_printbuf_empty(TIPC_LOG)) {
403 spin_unlock_bh(&print_lock);
416 reply = tipc_cfg_reply_ultra_string("log is empty\n"); 404 reply = tipc_cfg_reply_ultra_string("log is empty\n");
405 }
417 else { 406 else {
418 struct tlv_desc *rep_tlv; 407 struct tlv_desc *rep_tlv;
419 struct print_buf pb; 408 struct print_buf pb;
420 int str_len; 409 int str_len;
421 410
422 str_len = min(TIPC_LOG->size, 32768u); 411 str_len = min(TIPC_LOG->size, 32768u);
412 spin_unlock_bh(&print_lock);
423 reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len)); 413 reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len));
424 if (reply) { 414 if (reply) {
425 rep_tlv = (struct tlv_desc *)reply->data; 415 rep_tlv = (struct tlv_desc *)reply->data;
426 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len); 416 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
417 spin_lock_bh(&print_lock);
427 tipc_printbuf_move(&pb, TIPC_LOG); 418 tipc_printbuf_move(&pb, TIPC_LOG);
419 spin_unlock_bh(&print_lock);
428 str_len = strlen(TLV_DATA(rep_tlv)) + 1; 420 str_len = strlen(TLV_DATA(rep_tlv)) + 1;
429 skb_put(reply, TLV_SPACE(str_len)); 421 skb_put(reply, TLV_SPACE(str_len));
430 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 422 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
431 } 423 }
432 } 424 }
433 spin_unlock_bh(&print_lock);
434 return reply; 425 return reply;
435} 426}
436 427
diff --git a/net/tipc/dbg.h b/net/tipc/dbg.h
index c01b085000e0..5ef1bc8f64ef 100644
--- a/net/tipc/dbg.h
+++ b/net/tipc/dbg.h
@@ -2,7 +2,7 @@
2 * net/tipc/dbg.h: Include file for TIPC print buffer routines 2 * net/tipc/dbg.h: Include file for TIPC print buffer routines
3 * 3 *
4 * Copyright (c) 1997-2006, Ericsson AB 4 * Copyright (c) 1997-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -42,14 +42,14 @@
42 * @buf: pointer to character array containing print buffer contents 42 * @buf: pointer to character array containing print buffer contents
43 * @size: size of character array 43 * @size: size of character array
44 * @crs: pointer to first unused space in character array (i.e. final NUL) 44 * @crs: pointer to first unused space in character array (i.e. final NUL)
45 * @next: used to link print buffers when printing to more than one at a time 45 * @echo: echo output to system console if non-zero
46 */ 46 */
47 47
48struct print_buf { 48struct print_buf {
49 char *buf; 49 char *buf;
50 u32 size; 50 u32 size;
51 char *crs; 51 char *crs;
52 struct print_buf *next; 52 int echo;
53}; 53};
54 54
55#define TIPC_PB_MIN_SIZE 64 /* minimum size for a print buffer's array */ 55#define TIPC_PB_MIN_SIZE 64 /* minimum size for a print buffer's array */
@@ -61,10 +61,10 @@ int tipc_printbuf_empty(struct print_buf *pb);
61int tipc_printbuf_validate(struct print_buf *pb); 61int tipc_printbuf_validate(struct print_buf *pb);
62void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from); 62void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from);
63 63
64void tipc_log_reinit(int log_size); 64int tipc_log_resize(int log_size);
65void tipc_log_stop(void);
66 65
67struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space); 66struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area,
67 int req_tlv_space);
68struct sk_buff *tipc_log_dump(void); 68struct sk_buff *tipc_log_dump(void);
69 69
70#endif 70#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 5d643e5721eb..1657f0e795ff 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -120,9 +120,8 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
120 120
121 if (buf) { 121 if (buf) {
122 msg = buf_msg(buf); 122 msg = buf_msg(buf);
123 msg_init(msg, LINK_CONFIG, type, TIPC_OK, DSC_H_SIZE, 123 msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain);
124 dest_domain); 124 msg_set_non_seq(msg, 1);
125 msg_set_non_seq(msg);
126 msg_set_req_links(msg, req_links); 125 msg_set_req_links(msg, req_links);
127 msg_set_dest_domain(msg, dest_domain); 126 msg_set_dest_domain(msg, dest_domain);
128 msg_set_bc_netid(msg, tipc_net_id); 127 msg_set_bc_netid(msg, tipc_net_id);
@@ -156,11 +155,11 @@ static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
156/** 155/**
157 * tipc_disc_recv_msg - handle incoming link setup message (request or response) 156 * tipc_disc_recv_msg - handle incoming link setup message (request or response)
158 * @buf: buffer containing message 157 * @buf: buffer containing message
158 * @b_ptr: bearer that message arrived on
159 */ 159 */
160 160
161void tipc_disc_recv_msg(struct sk_buff *buf) 161void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
162{ 162{
163 struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle;
164 struct link *link; 163 struct link *link;
165 struct tipc_media_addr media_addr; 164 struct tipc_media_addr media_addr;
166 struct tipc_msg *msg = buf_msg(buf); 165 struct tipc_msg *msg = buf_msg(buf);
@@ -200,9 +199,8 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
200 dbg(" in own cluster\n"); 199 dbg(" in own cluster\n");
201 if (n_ptr == NULL) { 200 if (n_ptr == NULL) {
202 n_ptr = tipc_node_create(orig); 201 n_ptr = tipc_node_create(orig);
203 } 202 if (!n_ptr)
204 if (n_ptr == NULL) { 203 return;
205 return;
206 } 204 }
207 spin_lock_bh(&n_ptr->lock); 205 spin_lock_bh(&n_ptr->lock);
208 link = n_ptr->links[b_ptr->identity]; 206 link = n_ptr->links[b_ptr->identity];
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index 9fd7587b143a..c36eaeb7d5d0 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -48,7 +48,7 @@ struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
48void tipc_disc_update_link_req(struct link_req *req); 48void tipc_disc_update_link_req(struct link_req *req);
49void tipc_disc_stop_link_req(struct link_req *req); 49void tipc_disc_stop_link_req(struct link_req *req);
50 50
51void tipc_disc_recv_msg(struct sk_buff *buf); 51void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr);
52 52
53void tipc_disc_link_event(u32 addr, char *name, int up); 53void tipc_disc_link_event(u32 addr, char *name, int up);
54#if 0 54#if 0
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 2a26a16e269f..9784a8e963b4 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -51,6 +51,12 @@
51 51
52 52
53/* 53/*
54 * Out-of-range value for link session numbers
55 */
56
57#define INVALID_SESSION 0x10000
58
59/*
54 * Limit for deferred reception queue: 60 * Limit for deferred reception queue:
55 */ 61 */
56 62
@@ -147,9 +153,21 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
147 153
148#define LINK_LOG_BUF_SIZE 0 154#define LINK_LOG_BUF_SIZE 0
149 155
150#define dbg_link(fmt, arg...) do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0) 156#define dbg_link(fmt, arg...) \
151#define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) tipc_msg_print(&l_ptr->print_buf, msg, txt); } while(0) 157 do { \
152#define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0) 158 if (LINK_LOG_BUF_SIZE) \
159 tipc_printf(&l_ptr->print_buf, fmt, ## arg); \
160 } while (0)
161#define dbg_link_msg(msg, txt) \
162 do { \
163 if (LINK_LOG_BUF_SIZE) \
164 tipc_msg_dbg(&l_ptr->print_buf, msg, txt); \
165 } while (0)
166#define dbg_link_state(txt) \
167 do { \
168 if (LINK_LOG_BUF_SIZE) \
169 link_print(l_ptr, &l_ptr->print_buf, txt); \
170 } while (0)
153#define dbg_link_dump() do { \ 171#define dbg_link_dump() do { \
154 if (LINK_LOG_BUF_SIZE) { \ 172 if (LINK_LOG_BUF_SIZE) { \
155 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \ 173 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
@@ -450,9 +468,9 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
450 468
451 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; 469 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
452 msg = l_ptr->pmsg; 470 msg = l_ptr->pmsg;
453 msg_init(msg, LINK_PROTOCOL, RESET_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr); 471 msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
454 msg_set_size(msg, sizeof(l_ptr->proto_msg)); 472 msg_set_size(msg, sizeof(l_ptr->proto_msg));
455 msg_set_session(msg, tipc_random); 473 msg_set_session(msg, (tipc_random & 0xffff));
456 msg_set_bearer_id(msg, b_ptr->identity); 474 msg_set_bearer_id(msg, b_ptr->identity);
457 strcpy((char *)msg_data(msg), if_name); 475 strcpy((char *)msg_data(msg), if_name);
458 476
@@ -693,10 +711,10 @@ void tipc_link_reset(struct link *l_ptr)
693 u32 checkpoint = l_ptr->next_in_no; 711 u32 checkpoint = l_ptr->next_in_no;
694 int was_active_link = tipc_link_is_active(l_ptr); 712 int was_active_link = tipc_link_is_active(l_ptr);
695 713
696 msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1); 714 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
697 715
698 /* Link is down, accept any session: */ 716 /* Link is down, accept any session */
699 l_ptr->peer_session = 0; 717 l_ptr->peer_session = INVALID_SESSION;
700 718
701 /* Prepare for max packet size negotiation */ 719 /* Prepare for max packet size negotiation */
702 link_init_max_pkt(l_ptr); 720 link_init_max_pkt(l_ptr);
@@ -1110,7 +1128,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1110 1128
1111 if (bundler) { 1129 if (bundler) {
1112 msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, 1130 msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
1113 TIPC_OK, INT_H_SIZE, l_ptr->addr); 1131 INT_H_SIZE, l_ptr->addr);
1114 skb_copy_to_linear_data(bundler, &bundler_hdr, 1132 skb_copy_to_linear_data(bundler, &bundler_hdr,
1115 INT_H_SIZE); 1133 INT_H_SIZE);
1116 skb_trim(bundler, INT_H_SIZE); 1134 skb_trim(bundler, INT_H_SIZE);
@@ -1374,7 +1392,7 @@ again:
1374 1392
1375 msg_dbg(hdr, ">FRAGMENTING>"); 1393 msg_dbg(hdr, ">FRAGMENTING>");
1376 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 1394 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1377 TIPC_OK, INT_H_SIZE, msg_destnode(hdr)); 1395 INT_H_SIZE, msg_destnode(hdr));
1378 msg_set_link_selector(&fragm_hdr, sender->publ.ref); 1396 msg_set_link_selector(&fragm_hdr, sender->publ.ref);
1379 msg_set_size(&fragm_hdr, max_pkt); 1397 msg_set_size(&fragm_hdr, max_pkt);
1380 msg_set_fragm_no(&fragm_hdr, 1); 1398 msg_set_fragm_no(&fragm_hdr, 1);
@@ -1651,7 +1669,7 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
1651 struct tipc_msg *msg = buf_msg(buf); 1669 struct tipc_msg *msg = buf_msg(buf);
1652 1670
1653 warn("Retransmission failure on link <%s>\n", l_ptr->name); 1671 warn("Retransmission failure on link <%s>\n", l_ptr->name);
1654 tipc_msg_print(TIPC_OUTPUT, msg, ">RETR-FAIL>"); 1672 tipc_msg_dbg(TIPC_OUTPUT, msg, ">RETR-FAIL>");
1655 1673
1656 if (l_ptr->addr) { 1674 if (l_ptr->addr) {
1657 1675
@@ -1748,21 +1766,6 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1748 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; 1766 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1749} 1767}
1750 1768
1751/*
1752 * link_recv_non_seq: Receive packets which are outside
1753 * the link sequence flow
1754 */
1755
1756static void link_recv_non_seq(struct sk_buff *buf)
1757{
1758 struct tipc_msg *msg = buf_msg(buf);
1759
1760 if (msg_user(msg) == LINK_CONFIG)
1761 tipc_disc_recv_msg(buf);
1762 else
1763 tipc_bclink_recv_pkt(buf);
1764}
1765
1766/** 1769/**
1767 * link_insert_deferred_queue - insert deferred messages back into receive chain 1770 * link_insert_deferred_queue - insert deferred messages back into receive chain
1768 */ 1771 */
@@ -1839,7 +1842,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1839{ 1842{
1840 read_lock_bh(&tipc_net_lock); 1843 read_lock_bh(&tipc_net_lock);
1841 while (head) { 1844 while (head) {
1842 struct bearer *b_ptr; 1845 struct bearer *b_ptr = (struct bearer *)tb_ptr;
1843 struct node *n_ptr; 1846 struct node *n_ptr;
1844 struct link *l_ptr; 1847 struct link *l_ptr;
1845 struct sk_buff *crs; 1848 struct sk_buff *crs;
@@ -1850,9 +1853,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1850 u32 released = 0; 1853 u32 released = 0;
1851 int type; 1854 int type;
1852 1855
1853 b_ptr = (struct bearer *)tb_ptr;
1854 TIPC_SKB_CB(buf)->handle = b_ptr;
1855
1856 head = head->next; 1856 head = head->next;
1857 1857
1858 /* Ensure message is well-formed */ 1858 /* Ensure message is well-formed */
@@ -1871,7 +1871,10 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1871 msg = buf_msg(buf); 1871 msg = buf_msg(buf);
1872 1872
1873 if (unlikely(msg_non_seq(msg))) { 1873 if (unlikely(msg_non_seq(msg))) {
1874 link_recv_non_seq(buf); 1874 if (msg_user(msg) == LINK_CONFIG)
1875 tipc_disc_recv_msg(buf, b_ptr);
1876 else
1877 tipc_bclink_recv_pkt(buf);
1875 continue; 1878 continue;
1876 } 1879 }
1877 1880
@@ -1978,8 +1981,6 @@ deliver:
1978 if (link_recv_changeover_msg(&l_ptr, &buf)) { 1981 if (link_recv_changeover_msg(&l_ptr, &buf)) {
1979 msg = buf_msg(buf); 1982 msg = buf_msg(buf);
1980 seq_no = msg_seqno(msg); 1983 seq_no = msg_seqno(msg);
1981 TIPC_SKB_CB(buf)->handle
1982 = b_ptr;
1983 if (type == ORIGINAL_MSG) 1984 if (type == ORIGINAL_MSG)
1984 goto deliver; 1985 goto deliver;
1985 goto protocol_check; 1986 goto protocol_check;
@@ -2263,7 +2264,8 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2263 switch (msg_type(msg)) { 2264 switch (msg_type(msg)) {
2264 2265
2265 case RESET_MSG: 2266 case RESET_MSG:
2266 if (!link_working_unknown(l_ptr) && l_ptr->peer_session) { 2267 if (!link_working_unknown(l_ptr) &&
2268 (l_ptr->peer_session != INVALID_SESSION)) {
2267 if (msg_session(msg) == l_ptr->peer_session) { 2269 if (msg_session(msg) == l_ptr->peer_session) {
2268 dbg("Duplicate RESET: %u<->%u\n", 2270 dbg("Duplicate RESET: %u<->%u\n",
2269 msg_session(msg), l_ptr->peer_session); 2271 msg_session(msg), l_ptr->peer_session);
@@ -2424,7 +2426,7 @@ void tipc_link_changeover(struct link *l_ptr)
2424 } 2426 }
2425 2427
2426 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2428 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2427 ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr); 2429 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
2428 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2430 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2429 msg_set_msgcnt(&tunnel_hdr, msgcount); 2431 msg_set_msgcnt(&tunnel_hdr, msgcount);
2430 dbg("Link changeover requires %u tunnel messages\n", msgcount); 2432 dbg("Link changeover requires %u tunnel messages\n", msgcount);
@@ -2479,7 +2481,7 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2479 struct tipc_msg tunnel_hdr; 2481 struct tipc_msg tunnel_hdr;
2480 2482
2481 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2483 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2482 DUPLICATE_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr); 2484 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
2483 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); 2485 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2484 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2486 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2485 iter = l_ptr->first_out; 2487 iter = l_ptr->first_out;
@@ -2672,10 +2674,12 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2672 u32 pack_sz = link_max_pkt(l_ptr); 2674 u32 pack_sz = link_max_pkt(l_ptr);
2673 u32 fragm_sz = pack_sz - INT_H_SIZE; 2675 u32 fragm_sz = pack_sz - INT_H_SIZE;
2674 u32 fragm_no = 1; 2676 u32 fragm_no = 1;
2675 u32 destaddr = msg_destnode(inmsg); 2677 u32 destaddr;
2676 2678
2677 if (msg_short(inmsg)) 2679 if (msg_short(inmsg))
2678 destaddr = l_ptr->addr; 2680 destaddr = l_ptr->addr;
2681 else
2682 destaddr = msg_destnode(inmsg);
2679 2683
2680 if (msg_routed(inmsg)) 2684 if (msg_routed(inmsg))
2681 msg_set_prevnode(inmsg, tipc_own_addr); 2685 msg_set_prevnode(inmsg, tipc_own_addr);
@@ -2683,7 +2687,7 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2683 /* Prepare reusable fragment header: */ 2687 /* Prepare reusable fragment header: */
2684 2688
2685 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 2689 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2686 TIPC_OK, INT_H_SIZE, destaddr); 2690 INT_H_SIZE, destaddr);
2687 msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg)); 2691 msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg));
2688 msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++)); 2692 msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
2689 msg_set_fragm_no(&fragm_hdr, fragm_no); 2693 msg_set_fragm_no(&fragm_hdr, fragm_no);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 696a8633df75..73dcd00d674e 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -41,7 +41,9 @@
41#include "bearer.h" 41#include "bearer.h"
42 42
43 43
44void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str) 44#ifdef CONFIG_TIPC_DEBUG
45
46void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
45{ 47{
46 u32 usr = msg_user(msg); 48 u32 usr = msg_user(msg);
47 tipc_printf(buf, str); 49 tipc_printf(buf, str);
@@ -228,13 +230,10 @@ void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str
228 230
229 switch (usr) { 231 switch (usr) {
230 case CONN_MANAGER: 232 case CONN_MANAGER:
231 case NAME_DISTRIBUTOR:
232 case TIPC_LOW_IMPORTANCE: 233 case TIPC_LOW_IMPORTANCE:
233 case TIPC_MEDIUM_IMPORTANCE: 234 case TIPC_MEDIUM_IMPORTANCE:
234 case TIPC_HIGH_IMPORTANCE: 235 case TIPC_HIGH_IMPORTANCE:
235 case TIPC_CRITICAL_IMPORTANCE: 236 case TIPC_CRITICAL_IMPORTANCE:
236 if (msg_short(msg))
237 break; /* No error */
238 switch (msg_errcode(msg)) { 237 switch (msg_errcode(msg)) {
239 case TIPC_OK: 238 case TIPC_OK:
240 break; 239 break;
@@ -315,9 +314,11 @@ void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str
315 } 314 }
316 tipc_printf(buf, "\n"); 315 tipc_printf(buf, "\n");
317 if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) { 316 if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) {
318 tipc_msg_print(buf,msg_get_wrapped(msg)," /"); 317 tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
319 } 318 }
320 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) { 319 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) {
321 tipc_msg_print(buf,msg_get_wrapped(msg)," /"); 320 tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
322 } 321 }
323} 322}
323
324#endif
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index ad487e8abcc2..7ee6ae238147 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -2,7 +2,7 @@
2 * net/tipc/msg.h: Include file for TIPC message header routines 2 * net/tipc/msg.h: Include file for TIPC message header routines
3 * 3 *
4 * Copyright (c) 2000-2007, Ericsson AB 4 * Copyright (c) 2000-2007, Ericsson AB
5 * Copyright (c) 2005-2007, Wind River Systems 5 * Copyright (c) 2005-2008, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -75,6 +75,14 @@ static inline void msg_set_bits(struct tipc_msg *m, u32 w,
75 m->hdr[w] |= htonl(val); 75 m->hdr[w] |= htonl(val);
76} 76}
77 77
78static inline void msg_swap_words(struct tipc_msg *msg, u32 a, u32 b)
79{
80 u32 temp = msg->hdr[a];
81
82 msg->hdr[a] = msg->hdr[b];
83 msg->hdr[b] = temp;
84}
85
78/* 86/*
79 * Word 0 87 * Word 0
80 */ 88 */
@@ -119,9 +127,9 @@ static inline int msg_non_seq(struct tipc_msg *m)
119 return msg_bits(m, 0, 20, 1); 127 return msg_bits(m, 0, 20, 1);
120} 128}
121 129
122static inline void msg_set_non_seq(struct tipc_msg *m) 130static inline void msg_set_non_seq(struct tipc_msg *m, u32 n)
123{ 131{
124 msg_set_bits(m, 0, 20, 1, 1); 132 msg_set_bits(m, 0, 20, 1, n);
125} 133}
126 134
127static inline int msg_dest_droppable(struct tipc_msg *m) 135static inline int msg_dest_droppable(struct tipc_msg *m)
@@ -224,6 +232,25 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
224 msg_set_bits(m, 2, 0, 0xffff, n); 232 msg_set_bits(m, 2, 0, 0xffff, n);
225} 233}
226 234
235/*
236 * TIPC may utilize the "link ack #" and "link seq #" fields of a short
237 * message header to hold the destination node for the message, since the
238 * normal "dest node" field isn't present. This cache is only referenced
239 * when required, so populating the cache of a longer message header is
240 * harmless (as long as the header has the two link sequence fields present).
241 *
242 * Note: Host byte order is OK here, since the info never goes off-card.
243 */
244
245static inline u32 msg_destnode_cache(struct tipc_msg *m)
246{
247 return m->hdr[2];
248}
249
250static inline void msg_set_destnode_cache(struct tipc_msg *m, u32 dnode)
251{
252 m->hdr[2] = dnode;
253}
227 254
228/* 255/*
229 * Words 3-10 256 * Words 3-10
@@ -325,7 +352,7 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
325 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 352 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
326 w0:|vers |msg usr|hdr sz |n|resrv| packet size | 353 w0:|vers |msg usr|hdr sz |n|resrv| packet size |
327 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 354 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
328 w1:|m typ|rsv=0| sequence gap | broadcast ack no | 355 w1:|m typ| sequence gap | broadcast ack no |
329 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 356 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
330 w2:| link level ack no/bc_gap_from | seq no / bcast_gap_to | 357 w2:| link level ack no/bc_gap_from | seq no / bcast_gap_to |
331 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 358 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -388,12 +415,12 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
388 415
389static inline u32 msg_seq_gap(struct tipc_msg *m) 416static inline u32 msg_seq_gap(struct tipc_msg *m)
390{ 417{
391 return msg_bits(m, 1, 16, 0xff); 418 return msg_bits(m, 1, 16, 0x1fff);
392} 419}
393 420
394static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n) 421static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n)
395{ 422{
396 msg_set_bits(m, 1, 16, 0xff, n); 423 msg_set_bits(m, 1, 16, 0x1fff, n);
397} 424}
398 425
399static inline u32 msg_req_links(struct tipc_msg *m) 426static inline u32 msg_req_links(struct tipc_msg *m)
@@ -696,7 +723,7 @@ static inline u32 msg_tot_importance(struct tipc_msg *m)
696 723
697 724
698static inline void msg_init(struct tipc_msg *m, u32 user, u32 type, 725static inline void msg_init(struct tipc_msg *m, u32 user, u32 type,
699 u32 err, u32 hsize, u32 destnode) 726 u32 hsize, u32 destnode)
700{ 727{
701 memset(m, 0, hsize); 728 memset(m, 0, hsize);
702 msg_set_version(m); 729 msg_set_version(m);
@@ -705,7 +732,6 @@ static inline void msg_init(struct tipc_msg *m, u32 user, u32 type,
705 msg_set_size(m, hsize); 732 msg_set_size(m, hsize);
706 msg_set_prevnode(m, tipc_own_addr); 733 msg_set_prevnode(m, tipc_own_addr);
707 msg_set_type(m, type); 734 msg_set_type(m, type);
708 msg_set_errcode(m, err);
709 if (!msg_short(m)) { 735 if (!msg_short(m)) {
710 msg_set_orignode(m, tipc_own_addr); 736 msg_set_orignode(m, tipc_own_addr);
711 msg_set_destnode(m, destnode); 737 msg_set_destnode(m, destnode);
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 39fd1619febf..10a69894e2fd 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -41,9 +41,6 @@
41#include "msg.h" 41#include "msg.h"
42#include "name_distr.h" 42#include "name_distr.h"
43 43
44#undef DBG_OUTPUT
45#define DBG_OUTPUT NULL
46
47#define ITEM_SIZE sizeof(struct distr_item) 44#define ITEM_SIZE sizeof(struct distr_item)
48 45
49/** 46/**
@@ -106,8 +103,7 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
106 103
107 if (buf != NULL) { 104 if (buf != NULL) {
108 msg = buf_msg(buf); 105 msg = buf_msg(buf);
109 msg_init(msg, NAME_DISTRIBUTOR, type, TIPC_OK, 106 msg_init(msg, NAME_DISTRIBUTOR, type, LONG_H_SIZE, dest);
110 LONG_H_SIZE, dest);
111 msg_set_size(msg, LONG_H_SIZE + size); 107 msg_set_size(msg, LONG_H_SIZE + size);
112 } 108 }
113 return buf; 109 return buf;
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index ac7dfdda7973..096f7bd240a0 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -74,7 +74,7 @@ struct sub_seq {
74 * @first_free: array index of first unused sub-sequence entry 74 * @first_free: array index of first unused sub-sequence entry
75 * @ns_list: links to adjacent name sequences in hash chain 75 * @ns_list: links to adjacent name sequences in hash chain
76 * @subscriptions: list of subscriptions for this 'type' 76 * @subscriptions: list of subscriptions for this 'type'
77 * @lock: spinlock controlling access to name sequence structure 77 * @lock: spinlock controlling access to publication lists of all sub-sequences
78 */ 78 */
79 79
80struct name_seq { 80struct name_seq {
@@ -905,6 +905,9 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
905 struct sub_seq *sseq; 905 struct sub_seq *sseq;
906 char typearea[11]; 906 char typearea[11];
907 907
908 if (seq->first_free == 0)
909 return;
910
908 sprintf(typearea, "%-10u", seq->type); 911 sprintf(typearea, "%-10u", seq->type);
909 912
910 if (depth == 1) { 913 if (depth == 1) {
@@ -915,7 +918,9 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
915 for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) { 918 for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) {
916 if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) { 919 if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) {
917 tipc_printf(buf, "%s ", typearea); 920 tipc_printf(buf, "%s ", typearea);
921 spin_lock_bh(&seq->lock);
918 subseq_list(sseq, buf, depth, index); 922 subseq_list(sseq, buf, depth, index);
923 spin_unlock_bh(&seq->lock);
919 sprintf(typearea, "%10s", " "); 924 sprintf(typearea, "%10s", " ");
920 } 925 }
921 } 926 }
@@ -1050,15 +1055,12 @@ void tipc_nametbl_dump(void)
1050 1055
1051int tipc_nametbl_init(void) 1056int tipc_nametbl_init(void)
1052{ 1057{
1053 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size; 1058 table.types = kcalloc(tipc_nametbl_size, sizeof(struct hlist_head),
1054 1059 GFP_ATOMIC);
1055 table.types = kzalloc(array_size, GFP_ATOMIC);
1056 if (!table.types) 1060 if (!table.types)
1057 return -ENOMEM; 1061 return -ENOMEM;
1058 1062
1059 write_lock_bh(&tipc_nametbl_lock);
1060 table.local_publ_count = 0; 1063 table.local_publ_count = 0;
1061 write_unlock_bh(&tipc_nametbl_lock);
1062 return 0; 1064 return 0;
1063} 1065}
1064 1066
diff --git a/net/tipc/net.c b/net/tipc/net.c
index c39c76201e8e..cc51fa483672 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -266,7 +266,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
266 tipc_link_send(buf, dnode, msg_link_selector(msg)); 266 tipc_link_send(buf, dnode, msg_link_selector(msg));
267} 267}
268 268
269int tipc_net_start(void) 269int tipc_net_start(u32 addr)
270{ 270{
271 char addr_string[16]; 271 char addr_string[16];
272 int res; 272 int res;
@@ -274,6 +274,10 @@ int tipc_net_start(void)
274 if (tipc_mode != TIPC_NODE_MODE) 274 if (tipc_mode != TIPC_NODE_MODE)
275 return -ENOPROTOOPT; 275 return -ENOPROTOOPT;
276 276
277 tipc_subscr_stop();
278 tipc_cfg_stop();
279
280 tipc_own_addr = addr;
277 tipc_mode = TIPC_NET_MODE; 281 tipc_mode = TIPC_NET_MODE;
278 tipc_named_reinit(); 282 tipc_named_reinit();
279 tipc_port_reinit(); 283 tipc_port_reinit();
@@ -284,10 +288,10 @@ int tipc_net_start(void)
284 (res = tipc_bclink_init())) { 288 (res = tipc_bclink_init())) {
285 return res; 289 return res;
286 } 290 }
287 tipc_subscr_stop(); 291
288 tipc_cfg_stop();
289 tipc_k_signal((Handler)tipc_subscr_start, 0); 292 tipc_k_signal((Handler)tipc_subscr_start, 0);
290 tipc_k_signal((Handler)tipc_cfg_init, 0); 293 tipc_k_signal((Handler)tipc_cfg_init, 0);
294
291 info("Started in network mode\n"); 295 info("Started in network mode\n");
292 info("Own node address %s, network identity %u\n", 296 info("Own node address %s, network identity %u\n",
293 addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); 297 addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
diff --git a/net/tipc/net.h b/net/tipc/net.h
index a6a0e9976ac9..d154ac2bda9a 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -58,7 +58,7 @@ void tipc_net_route_msg(struct sk_buff *buf);
58struct node *tipc_net_select_remote_node(u32 addr, u32 ref); 58struct node *tipc_net_select_remote_node(u32 addr, u32 ref);
59u32 tipc_net_select_router(u32 addr, u32 ref); 59u32 tipc_net_select_router(u32 addr, u32 ref);
60 60
61int tipc_net_start(void); 61int tipc_net_start(u32 addr);
62void tipc_net_stop(void); 62void tipc_net_stop(void);
63 63
64#endif 64#endif
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 6a7f7b4c2595..c387217bb230 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -2,7 +2,7 @@
2 * net/tipc/netlink.c: TIPC configuration handling 2 * net/tipc/netlink.c: TIPC configuration handling
3 * 3 *
4 * Copyright (c) 2005-2006, Ericsson AB 4 * Copyright (c) 2005-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -45,15 +45,17 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
45 struct nlmsghdr *req_nlh = info->nlhdr; 45 struct nlmsghdr *req_nlh = info->nlhdr;
46 struct tipc_genlmsghdr *req_userhdr = info->userhdr; 46 struct tipc_genlmsghdr *req_userhdr = info->userhdr;
47 int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN); 47 int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN);
48 u16 cmd;
48 49
49 if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN))) 50 if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
50 rep_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN); 51 cmd = TIPC_CMD_NOT_NET_ADMIN;
51 else 52 else
52 rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, 53 cmd = req_userhdr->cmd;
53 req_userhdr->cmd, 54
54 NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN, 55 rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, cmd,
55 NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN), 56 NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
56 hdr_space); 57 NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
58 hdr_space);
57 59
58 if (rep_buf) { 60 if (rep_buf) {
59 skb_push(rep_buf, hdr_space); 61 skb_push(rep_buf, hdr_space);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 598f4d3a0098..34e9a2bb7c19 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -52,16 +52,40 @@ static void node_established_contact(struct node *n_ptr);
52 52
53struct node *tipc_nodes = NULL; /* sorted list of nodes within cluster */ 53struct node *tipc_nodes = NULL; /* sorted list of nodes within cluster */
54 54
55static DEFINE_SPINLOCK(node_create_lock);
56
55u32 tipc_own_tag = 0; 57u32 tipc_own_tag = 0;
56 58
59/**
60 * tipc_node_create - create neighboring node
61 *
62 * Currently, this routine is called by neighbor discovery code, which holds
63 * net_lock for reading only. We must take node_create_lock to ensure a node
64 * isn't created twice if two different bearers discover the node at the same
65 * time. (It would be preferable to switch to holding net_lock in write mode,
66 * but this is a non-trivial change.)
67 */
68
57struct node *tipc_node_create(u32 addr) 69struct node *tipc_node_create(u32 addr)
58{ 70{
59 struct cluster *c_ptr; 71 struct cluster *c_ptr;
60 struct node *n_ptr; 72 struct node *n_ptr;
61 struct node **curr_node; 73 struct node **curr_node;
62 74
75 spin_lock_bh(&node_create_lock);
76
77 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
78 if (addr < n_ptr->addr)
79 break;
80 if (addr == n_ptr->addr) {
81 spin_unlock_bh(&node_create_lock);
82 return n_ptr;
83 }
84 }
85
63 n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC); 86 n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC);
64 if (!n_ptr) { 87 if (!n_ptr) {
88 spin_unlock_bh(&node_create_lock);
65 warn("Node creation failed, no memory\n"); 89 warn("Node creation failed, no memory\n");
66 return NULL; 90 return NULL;
67 } 91 }
@@ -71,6 +95,7 @@ struct node *tipc_node_create(u32 addr)
71 c_ptr = tipc_cltr_create(addr); 95 c_ptr = tipc_cltr_create(addr);
72 } 96 }
73 if (!c_ptr) { 97 if (!c_ptr) {
98 spin_unlock_bh(&node_create_lock);
74 kfree(n_ptr); 99 kfree(n_ptr);
75 return NULL; 100 return NULL;
76 } 101 }
@@ -91,6 +116,7 @@ struct node *tipc_node_create(u32 addr)
91 } 116 }
92 } 117 }
93 (*curr_node) = n_ptr; 118 (*curr_node) = n_ptr;
119 spin_unlock_bh(&node_create_lock);
94 return n_ptr; 120 return n_ptr;
95} 121}
96 122
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 2f5806410c64..2e0cff408ff9 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -211,15 +211,18 @@ exit:
211} 211}
212 212
213/** 213/**
214 * tipc_createport_raw - create a native TIPC port 214 * tipc_createport_raw - create a generic TIPC port
215 * 215 *
216 * Returns local port reference 216 * Returns port reference, or 0 if unable to create it
217 *
218 * Note: The newly created port is returned in the locked state.
217 */ 219 */
218 220
219u32 tipc_createport_raw(void *usr_handle, 221u32 tipc_createport_raw(void *usr_handle,
220 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *), 222 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
221 void (*wakeup)(struct tipc_port *), 223 void (*wakeup)(struct tipc_port *),
222 const u32 importance) 224 const u32 importance,
225 struct tipc_port **tp_ptr)
223{ 226{
224 struct port *p_ptr; 227 struct port *p_ptr;
225 struct tipc_msg *msg; 228 struct tipc_msg *msg;
@@ -237,17 +240,12 @@ u32 tipc_createport_raw(void *usr_handle,
237 return 0; 240 return 0;
238 } 241 }
239 242
240 tipc_port_lock(ref);
241 p_ptr->publ.usr_handle = usr_handle; 243 p_ptr->publ.usr_handle = usr_handle;
242 p_ptr->publ.max_pkt = MAX_PKT_DEFAULT; 244 p_ptr->publ.max_pkt = MAX_PKT_DEFAULT;
243 p_ptr->publ.ref = ref; 245 p_ptr->publ.ref = ref;
244 msg = &p_ptr->publ.phdr; 246 msg = &p_ptr->publ.phdr;
245 msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 247 msg_init(msg, importance, TIPC_NAMED_MSG, LONG_H_SIZE, 0);
246 0);
247 msg_set_orignode(msg, tipc_own_addr);
248 msg_set_prevnode(msg, tipc_own_addr);
249 msg_set_origport(msg, ref); 248 msg_set_origport(msg, ref);
250 msg_set_importance(msg,importance);
251 p_ptr->last_in_seqno = 41; 249 p_ptr->last_in_seqno = 41;
252 p_ptr->sent = 1; 250 p_ptr->sent = 1;
253 INIT_LIST_HEAD(&p_ptr->wait_list); 251 INIT_LIST_HEAD(&p_ptr->wait_list);
@@ -262,7 +260,7 @@ u32 tipc_createport_raw(void *usr_handle,
262 INIT_LIST_HEAD(&p_ptr->port_list); 260 INIT_LIST_HEAD(&p_ptr->port_list);
263 list_add_tail(&p_ptr->port_list, &ports); 261 list_add_tail(&p_ptr->port_list, &ports);
264 spin_unlock_bh(&tipc_port_list_lock); 262 spin_unlock_bh(&tipc_port_list_lock);
265 tipc_port_unlock(p_ptr); 263 *tp_ptr = &p_ptr->publ;
266 return ref; 264 return ref;
267} 265}
268 266
@@ -402,10 +400,10 @@ static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
402 buf = buf_acquire(LONG_H_SIZE); 400 buf = buf_acquire(LONG_H_SIZE);
403 if (buf) { 401 if (buf) {
404 msg = buf_msg(buf); 402 msg = buf_msg(buf);
405 msg_init(msg, usr, type, err, LONG_H_SIZE, destnode); 403 msg_init(msg, usr, type, LONG_H_SIZE, destnode);
404 msg_set_errcode(msg, err);
406 msg_set_destport(msg, destport); 405 msg_set_destport(msg, destport);
407 msg_set_origport(msg, origport); 406 msg_set_origport(msg, origport);
408 msg_set_destnode(msg, destnode);
409 msg_set_orignode(msg, orignode); 407 msg_set_orignode(msg, orignode);
410 msg_set_transp_seqno(msg, seqno); 408 msg_set_transp_seqno(msg, seqno);
411 msg_set_msgcnt(msg, ack); 409 msg_set_msgcnt(msg, ack);
@@ -446,17 +444,19 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
446 return data_sz; 444 return data_sz;
447 } 445 }
448 rmsg = buf_msg(rbuf); 446 rmsg = buf_msg(rbuf);
449 msg_init(rmsg, imp, msg_type(msg), err, hdr_sz, msg_orignode(msg)); 447 msg_init(rmsg, imp, msg_type(msg), hdr_sz, msg_orignode(msg));
448 msg_set_errcode(rmsg, err);
450 msg_set_destport(rmsg, msg_origport(msg)); 449 msg_set_destport(rmsg, msg_origport(msg));
451 msg_set_prevnode(rmsg, tipc_own_addr);
452 msg_set_origport(rmsg, msg_destport(msg)); 450 msg_set_origport(rmsg, msg_destport(msg));
453 if (msg_short(msg)) 451 if (msg_short(msg)) {
454 msg_set_orignode(rmsg, tipc_own_addr); 452 msg_set_orignode(rmsg, tipc_own_addr);
455 else 453 /* leave name type & instance as zeroes */
454 } else {
456 msg_set_orignode(rmsg, msg_destnode(msg)); 455 msg_set_orignode(rmsg, msg_destnode(msg));
456 msg_set_nametype(rmsg, msg_nametype(msg));
457 msg_set_nameinst(rmsg, msg_nameinst(msg));
458 }
457 msg_set_size(rmsg, data_sz + hdr_sz); 459 msg_set_size(rmsg, data_sz + hdr_sz);
458 msg_set_nametype(rmsg, msg_nametype(msg));
459 msg_set_nameinst(rmsg, msg_nameinst(msg));
460 skb_copy_to_linear_data_offset(rbuf, hdr_sz, msg_data(msg), data_sz); 460 skb_copy_to_linear_data_offset(rbuf, hdr_sz, msg_data(msg), data_sz);
461 461
462 /* send self-abort message when rejecting on a connected port */ 462 /* send self-abort message when rejecting on a connected port */
@@ -778,6 +778,7 @@ void tipc_port_reinit(void)
778 msg = &p_ptr->publ.phdr; 778 msg = &p_ptr->publ.phdr;
779 if (msg_orignode(msg) == tipc_own_addr) 779 if (msg_orignode(msg) == tipc_own_addr)
780 break; 780 break;
781 msg_set_prevnode(msg, tipc_own_addr);
781 msg_set_orignode(msg, tipc_own_addr); 782 msg_set_orignode(msg, tipc_own_addr);
782 } 783 }
783 spin_unlock_bh(&tipc_port_list_lock); 784 spin_unlock_bh(&tipc_port_list_lock);
@@ -838,16 +839,13 @@ static void port_dispatcher_sigh(void *dummy)
838 u32 peer_node = port_peernode(p_ptr); 839 u32 peer_node = port_peernode(p_ptr);
839 840
840 tipc_port_unlock(p_ptr); 841 tipc_port_unlock(p_ptr);
842 if (unlikely(!cb))
843 goto reject;
841 if (unlikely(!connected)) { 844 if (unlikely(!connected)) {
842 if (unlikely(published)) 845 if (tipc_connect2port(dref, &orig))
843 goto reject; 846 goto reject;
844 tipc_connect2port(dref,&orig); 847 } else if ((msg_origport(msg) != peer_port) ||
845 } 848 (msg_orignode(msg) != peer_node))
846 if (unlikely(msg_origport(msg) != peer_port))
847 goto reject;
848 if (unlikely(msg_orignode(msg) != peer_node))
849 goto reject;
850 if (unlikely(!cb))
851 goto reject; 849 goto reject;
852 if (unlikely(++p_ptr->publ.conn_unacked >= 850 if (unlikely(++p_ptr->publ.conn_unacked >=
853 TIPC_FLOW_CONTROL_WIN)) 851 TIPC_FLOW_CONTROL_WIN))
@@ -862,9 +860,7 @@ static void port_dispatcher_sigh(void *dummy)
862 tipc_msg_event cb = up_ptr->msg_cb; 860 tipc_msg_event cb = up_ptr->msg_cb;
863 861
864 tipc_port_unlock(p_ptr); 862 tipc_port_unlock(p_ptr);
865 if (unlikely(connected)) 863 if (unlikely(!cb || connected))
866 goto reject;
867 if (unlikely(!cb))
868 goto reject; 864 goto reject;
869 skb_pull(buf, msg_hdr_sz(msg)); 865 skb_pull(buf, msg_hdr_sz(msg));
870 cb(usr_handle, dref, &buf, msg_data(msg), 866 cb(usr_handle, dref, &buf, msg_data(msg),
@@ -877,11 +873,7 @@ static void port_dispatcher_sigh(void *dummy)
877 tipc_named_msg_event cb = up_ptr->named_msg_cb; 873 tipc_named_msg_event cb = up_ptr->named_msg_cb;
878 874
879 tipc_port_unlock(p_ptr); 875 tipc_port_unlock(p_ptr);
880 if (unlikely(connected)) 876 if (unlikely(!cb || connected || !published))
881 goto reject;
882 if (unlikely(!cb))
883 goto reject;
884 if (unlikely(!published))
885 goto reject; 877 goto reject;
886 dseq.type = msg_nametype(msg); 878 dseq.type = msg_nametype(msg);
887 dseq.lower = msg_nameinst(msg); 879 dseq.lower = msg_nameinst(msg);
@@ -908,11 +900,10 @@ err:
908 u32 peer_node = port_peernode(p_ptr); 900 u32 peer_node = port_peernode(p_ptr);
909 901
910 tipc_port_unlock(p_ptr); 902 tipc_port_unlock(p_ptr);
911 if (!connected || !cb) 903 if (!cb || !connected)
912 break;
913 if (msg_origport(msg) != peer_port)
914 break; 904 break;
915 if (msg_orignode(msg) != peer_node) 905 if ((msg_origport(msg) != peer_port) ||
906 (msg_orignode(msg) != peer_node))
916 break; 907 break;
917 tipc_disconnect(dref); 908 tipc_disconnect(dref);
918 skb_pull(buf, msg_hdr_sz(msg)); 909 skb_pull(buf, msg_hdr_sz(msg));
@@ -924,7 +915,7 @@ err:
924 tipc_msg_err_event cb = up_ptr->err_cb; 915 tipc_msg_err_event cb = up_ptr->err_cb;
925 916
926 tipc_port_unlock(p_ptr); 917 tipc_port_unlock(p_ptr);
927 if (connected || !cb) 918 if (!cb || connected)
928 break; 919 break;
929 skb_pull(buf, msg_hdr_sz(msg)); 920 skb_pull(buf, msg_hdr_sz(msg));
930 cb(usr_handle, dref, &buf, msg_data(msg), 921 cb(usr_handle, dref, &buf, msg_data(msg),
@@ -937,7 +928,7 @@ err:
937 up_ptr->named_err_cb; 928 up_ptr->named_err_cb;
938 929
939 tipc_port_unlock(p_ptr); 930 tipc_port_unlock(p_ptr);
940 if (connected || !cb) 931 if (!cb || connected)
941 break; 932 break;
942 dseq.type = msg_nametype(msg); 933 dseq.type = msg_nametype(msg);
943 dseq.lower = msg_nameinst(msg); 934 dseq.lower = msg_nameinst(msg);
@@ -1053,6 +1044,7 @@ int tipc_createport(u32 user_ref,
1053{ 1044{
1054 struct user_port *up_ptr; 1045 struct user_port *up_ptr;
1055 struct port *p_ptr; 1046 struct port *p_ptr;
1047 struct tipc_port *tp_ptr;
1056 u32 ref; 1048 u32 ref;
1057 1049
1058 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC); 1050 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
@@ -1060,12 +1052,13 @@ int tipc_createport(u32 user_ref,
1060 warn("Port creation failed, no memory\n"); 1052 warn("Port creation failed, no memory\n");
1061 return -ENOMEM; 1053 return -ENOMEM;
1062 } 1054 }
1063 ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance); 1055 ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup,
1064 p_ptr = tipc_port_lock(ref); 1056 importance, &tp_ptr);
1065 if (!p_ptr) { 1057 if (ref == 0) {
1066 kfree(up_ptr); 1058 kfree(up_ptr);
1067 return -ENOMEM; 1059 return -ENOMEM;
1068 } 1060 }
1061 p_ptr = (struct port *)tp_ptr;
1069 1062
1070 p_ptr->user_port = up_ptr; 1063 p_ptr->user_port = up_ptr;
1071 up_ptr->user_ref = user_ref; 1064 up_ptr->user_ref = user_ref;
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 89cbab24d08f..a101de86824d 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -142,9 +142,13 @@ void tipc_ref_table_stop(void)
142/** 142/**
143 * tipc_ref_acquire - create reference to an object 143 * tipc_ref_acquire - create reference to an object
144 * 144 *
145 * Return a unique reference value which can be translated back to the pointer 145 * Register an object pointer in reference table and lock the object.
146 * 'object' at a later time. Also, pass back a pointer to the lock protecting 146 * Returns a unique reference value that is used from then on to retrieve the
147 * the object, but without locking it. 147 * object pointer, or to determine that the object has been deregistered.
148 *
149 * Note: The object is returned in the locked state so that the caller can
150 * register a partially initialized object, without running the risk that
151 * the object will be accessed before initialization is complete.
148 */ 152 */
149 153
150u32 tipc_ref_acquire(void *object, spinlock_t **lock) 154u32 tipc_ref_acquire(void *object, spinlock_t **lock)
@@ -178,13 +182,13 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
178 ref = (next_plus_upper & ~index_mask) + index; 182 ref = (next_plus_upper & ~index_mask) + index;
179 entry->ref = ref; 183 entry->ref = ref;
180 entry->object = object; 184 entry->object = object;
181 spin_unlock_bh(&entry->lock);
182 *lock = &entry->lock; 185 *lock = &entry->lock;
183 } 186 }
184 else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { 187 else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
185 index = tipc_ref_table.init_point++; 188 index = tipc_ref_table.init_point++;
186 entry = &(tipc_ref_table.entries[index]); 189 entry = &(tipc_ref_table.entries[index]);
187 spin_lock_init(&entry->lock); 190 spin_lock_init(&entry->lock);
191 spin_lock_bh(&entry->lock);
188 ref = tipc_ref_table.start_mask + index; 192 ref = tipc_ref_table.start_mask + index;
189 entry->ref = ref; 193 entry->ref = ref;
190 entry->object = object; 194 entry->object = object;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 230f9ca2ad6b..38f48795b40e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -188,6 +188,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
188 const struct proto_ops *ops; 188 const struct proto_ops *ops;
189 socket_state state; 189 socket_state state;
190 struct sock *sk; 190 struct sock *sk;
191 struct tipc_port *tp_ptr;
191 u32 portref; 192 u32 portref;
192 193
193 /* Validate arguments */ 194 /* Validate arguments */
@@ -225,7 +226,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
225 /* Allocate TIPC port for socket to use */ 226 /* Allocate TIPC port for socket to use */
226 227
227 portref = tipc_createport_raw(sk, &dispatch, &wakeupdispatch, 228 portref = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
228 TIPC_LOW_IMPORTANCE); 229 TIPC_LOW_IMPORTANCE, &tp_ptr);
229 if (unlikely(portref == 0)) { 230 if (unlikely(portref == 0)) {
230 sk_free(sk); 231 sk_free(sk);
231 return -ENOMEM; 232 return -ENOMEM;
@@ -241,6 +242,8 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
241 sk->sk_backlog_rcv = backlog_rcv; 242 sk->sk_backlog_rcv = backlog_rcv;
242 tipc_sk(sk)->p = tipc_get_port(portref); 243 tipc_sk(sk)->p = tipc_get_port(portref);
243 244
245 spin_unlock_bh(tp_ptr->lock);
246
244 if (sock->state == SS_READY) { 247 if (sock->state == SS_READY) {
245 tipc_set_portunreturnable(portref, 1); 248 tipc_set_portunreturnable(portref, 1);
246 if (sock->type == SOCK_DGRAM) 249 if (sock->type == SOCK_DGRAM)
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 8c01ccd3626c..0326d3060bc7 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/subscr.c: TIPC subscription service 2 * net/tipc/subscr.c: TIPC network topology service
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -36,27 +36,24 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "dbg.h" 38#include "dbg.h"
39#include "subscr.h"
40#include "name_table.h" 39#include "name_table.h"
40#include "port.h"
41#include "ref.h" 41#include "ref.h"
42#include "subscr.h"
42 43
43/** 44/**
44 * struct subscriber - TIPC network topology subscriber 45 * struct subscriber - TIPC network topology subscriber
45 * @ref: object reference to subscriber object itself 46 * @port_ref: object reference to server port connecting to subscriber
46 * @lock: pointer to spinlock controlling access to subscriber object 47 * @lock: pointer to spinlock controlling access to subscriber's server port
47 * @subscriber_list: adjacent subscribers in top. server's list of subscribers 48 * @subscriber_list: adjacent subscribers in top. server's list of subscribers
48 * @subscription_list: list of subscription objects for this subscriber 49 * @subscription_list: list of subscription objects for this subscriber
49 * @port_ref: object reference to port used to communicate with subscriber
50 * @swap: indicates if subscriber uses opposite endianness in its messages
51 */ 50 */
52 51
53struct subscriber { 52struct subscriber {
54 u32 ref; 53 u32 port_ref;
55 spinlock_t *lock; 54 spinlock_t *lock;
56 struct list_head subscriber_list; 55 struct list_head subscriber_list;
57 struct list_head subscription_list; 56 struct list_head subscription_list;
58 u32 port_ref;
59 int swap;
60}; 57};
61 58
62/** 59/**
@@ -88,13 +85,14 @@ static struct top_srv topsrv = { 0 };
88 85
89static u32 htohl(u32 in, int swap) 86static u32 htohl(u32 in, int swap)
90{ 87{
91 char *c = (char *)&in; 88 return swap ? (u32)___constant_swab32(in) : in;
92
93 return swap ? ((c[3] << 3) + (c[2] << 2) + (c[1] << 1) + c[0]) : in;
94} 89}
95 90
96/** 91/**
97 * subscr_send_event - send a message containing a tipc_event to the subscriber 92 * subscr_send_event - send a message containing a tipc_event to the subscriber
93 *
94 * Note: Must not hold subscriber's server port lock, since tipc_send() will
95 * try to take the lock if the message is rejected and returned!
98 */ 96 */
99 97
100static void subscr_send_event(struct subscription *sub, 98static void subscr_send_event(struct subscription *sub,
@@ -109,12 +107,12 @@ static void subscr_send_event(struct subscription *sub,
109 msg_sect.iov_base = (void *)&sub->evt; 107 msg_sect.iov_base = (void *)&sub->evt;
110 msg_sect.iov_len = sizeof(struct tipc_event); 108 msg_sect.iov_len = sizeof(struct tipc_event);
111 109
112 sub->evt.event = htohl(event, sub->owner->swap); 110 sub->evt.event = htohl(event, sub->swap);
113 sub->evt.found_lower = htohl(found_lower, sub->owner->swap); 111 sub->evt.found_lower = htohl(found_lower, sub->swap);
114 sub->evt.found_upper = htohl(found_upper, sub->owner->swap); 112 sub->evt.found_upper = htohl(found_upper, sub->swap);
115 sub->evt.port.ref = htohl(port_ref, sub->owner->swap); 113 sub->evt.port.ref = htohl(port_ref, sub->swap);
116 sub->evt.port.node = htohl(node, sub->owner->swap); 114 sub->evt.port.node = htohl(node, sub->swap);
117 tipc_send(sub->owner->port_ref, 1, &msg_sect); 115 tipc_send(sub->server_ref, 1, &msg_sect);
118} 116}
119 117
120/** 118/**
@@ -151,13 +149,12 @@ void tipc_subscr_report_overlap(struct subscription *sub,
151 u32 node, 149 u32 node,
152 int must) 150 int must)
153{ 151{
154 dbg("Rep overlap %u:%u,%u<->%u,%u\n", sub->seq.type, sub->seq.lower,
155 sub->seq.upper, found_lower, found_upper);
156 if (!tipc_subscr_overlap(sub, found_lower, found_upper)) 152 if (!tipc_subscr_overlap(sub, found_lower, found_upper))
157 return; 153 return;
158 if (!must && !(sub->filter & TIPC_SUB_PORTS)) 154 if (!must && !(sub->filter & TIPC_SUB_PORTS))
159 return; 155 return;
160 subscr_send_event(sub, found_lower, found_upper, event, port_ref, node); 156
157 sub->event_cb(sub, found_lower, found_upper, event, port_ref, node);
161} 158}
162 159
163/** 160/**
@@ -166,20 +163,18 @@ void tipc_subscr_report_overlap(struct subscription *sub,
166 163
167static void subscr_timeout(struct subscription *sub) 164static void subscr_timeout(struct subscription *sub)
168{ 165{
169 struct subscriber *subscriber; 166 struct port *server_port;
170 u32 subscriber_ref;
171 167
172 /* Validate subscriber reference (in case subscriber is terminating) */ 168 /* Validate server port reference (in case subscriber is terminating) */
173 169
174 subscriber_ref = sub->owner->ref; 170 server_port = tipc_port_lock(sub->server_ref);
175 subscriber = (struct subscriber *)tipc_ref_lock(subscriber_ref); 171 if (server_port == NULL)
176 if (subscriber == NULL)
177 return; 172 return;
178 173
179 /* Validate timeout (in case subscription is being cancelled) */ 174 /* Validate timeout (in case subscription is being cancelled) */
180 175
181 if (sub->timeout == TIPC_WAIT_FOREVER) { 176 if (sub->timeout == TIPC_WAIT_FOREVER) {
182 tipc_ref_unlock(subscriber_ref); 177 tipc_port_unlock(server_port);
183 return; 178 return;
184 } 179 }
185 180
@@ -187,19 +182,21 @@ static void subscr_timeout(struct subscription *sub)
187 182
188 tipc_nametbl_unsubscribe(sub); 183 tipc_nametbl_unsubscribe(sub);
189 184
190 /* Notify subscriber of timeout, then unlink subscription */ 185 /* Unlink subscription from subscriber */
191 186
192 subscr_send_event(sub,
193 sub->evt.s.seq.lower,
194 sub->evt.s.seq.upper,
195 TIPC_SUBSCR_TIMEOUT,
196 0,
197 0);
198 list_del(&sub->subscription_list); 187 list_del(&sub->subscription_list);
199 188
189 /* Release subscriber's server port */
190
191 tipc_port_unlock(server_port);
192
193 /* Notify subscriber of timeout */
194
195 subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
196 TIPC_SUBSCR_TIMEOUT, 0, 0);
197
200 /* Now destroy subscription */ 198 /* Now destroy subscription */
201 199
202 tipc_ref_unlock(subscriber_ref);
203 k_term_timer(&sub->timer); 200 k_term_timer(&sub->timer);
204 kfree(sub); 201 kfree(sub);
205 atomic_dec(&topsrv.subscription_count); 202 atomic_dec(&topsrv.subscription_count);
@@ -208,7 +205,7 @@ static void subscr_timeout(struct subscription *sub)
208/** 205/**
209 * subscr_del - delete a subscription within a subscription list 206 * subscr_del - delete a subscription within a subscription list
210 * 207 *
211 * Called with subscriber locked. 208 * Called with subscriber port locked.
212 */ 209 */
213 210
214static void subscr_del(struct subscription *sub) 211static void subscr_del(struct subscription *sub)
@@ -222,7 +219,7 @@ static void subscr_del(struct subscription *sub)
222/** 219/**
223 * subscr_terminate - terminate communication with a subscriber 220 * subscr_terminate - terminate communication with a subscriber
224 * 221 *
225 * Called with subscriber locked. Routine must temporarily release this lock 222 * Called with subscriber port locked. Routine must temporarily release lock
226 * to enable subscription timeout routine(s) to finish without deadlocking; 223 * to enable subscription timeout routine(s) to finish without deadlocking;
227 * the lock is then reclaimed to allow caller to release it upon return. 224 * the lock is then reclaimed to allow caller to release it upon return.
228 * (This should work even in the unlikely event some other thread creates 225 * (This should work even in the unlikely event some other thread creates
@@ -232,14 +229,21 @@ static void subscr_del(struct subscription *sub)
232 229
233static void subscr_terminate(struct subscriber *subscriber) 230static void subscr_terminate(struct subscriber *subscriber)
234{ 231{
232 u32 port_ref;
235 struct subscription *sub; 233 struct subscription *sub;
236 struct subscription *sub_temp; 234 struct subscription *sub_temp;
237 235
238 /* Invalidate subscriber reference */ 236 /* Invalidate subscriber reference */
239 237
240 tipc_ref_discard(subscriber->ref); 238 port_ref = subscriber->port_ref;
239 subscriber->port_ref = 0;
241 spin_unlock_bh(subscriber->lock); 240 spin_unlock_bh(subscriber->lock);
242 241
242 /* Sever connection to subscriber */
243
244 tipc_shutdown(port_ref);
245 tipc_deleteport(port_ref);
246
243 /* Destroy any existing subscriptions for subscriber */ 247 /* Destroy any existing subscriptions for subscriber */
244 248
245 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, 249 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
@@ -253,27 +257,25 @@ static void subscr_terminate(struct subscriber *subscriber)
253 subscr_del(sub); 257 subscr_del(sub);
254 } 258 }
255 259
256 /* Sever connection to subscriber */
257
258 tipc_shutdown(subscriber->port_ref);
259 tipc_deleteport(subscriber->port_ref);
260
261 /* Remove subscriber from topology server's subscriber list */ 260 /* Remove subscriber from topology server's subscriber list */
262 261
263 spin_lock_bh(&topsrv.lock); 262 spin_lock_bh(&topsrv.lock);
264 list_del(&subscriber->subscriber_list); 263 list_del(&subscriber->subscriber_list);
265 spin_unlock_bh(&topsrv.lock); 264 spin_unlock_bh(&topsrv.lock);
266 265
267 /* Now destroy subscriber */ 266 /* Reclaim subscriber lock */
268 267
269 spin_lock_bh(subscriber->lock); 268 spin_lock_bh(subscriber->lock);
269
270 /* Now destroy subscriber */
271
270 kfree(subscriber); 272 kfree(subscriber);
271} 273}
272 274
273/** 275/**
274 * subscr_cancel - handle subscription cancellation request 276 * subscr_cancel - handle subscription cancellation request
275 * 277 *
276 * Called with subscriber locked. Routine must temporarily release this lock 278 * Called with subscriber port locked. Routine must temporarily release lock
277 * to enable the subscription timeout routine to finish without deadlocking; 279 * to enable the subscription timeout routine to finish without deadlocking;
278 * the lock is then reclaimed to allow caller to release it upon return. 280 * the lock is then reclaimed to allow caller to release it upon return.
279 * 281 *
@@ -316,27 +318,25 @@ static void subscr_cancel(struct tipc_subscr *s,
316/** 318/**
317 * subscr_subscribe - create subscription for subscriber 319 * subscr_subscribe - create subscription for subscriber
318 * 320 *
319 * Called with subscriber locked 321 * Called with subscriber port locked.
320 */ 322 */
321 323
322static void subscr_subscribe(struct tipc_subscr *s, 324static struct subscription *subscr_subscribe(struct tipc_subscr *s,
323 struct subscriber *subscriber) 325 struct subscriber *subscriber)
324{ 326{
325 struct subscription *sub; 327 struct subscription *sub;
328 int swap;
326 329
327 /* Determine/update subscriber's endianness */ 330 /* Determine subscriber's endianness */
328 331
329 if (s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE)) 332 swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
330 subscriber->swap = 0;
331 else
332 subscriber->swap = 1;
333 333
334 /* Detect & process a subscription cancellation request */ 334 /* Detect & process a subscription cancellation request */
335 335
336 if (s->filter & htohl(TIPC_SUB_CANCEL, subscriber->swap)) { 336 if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
337 s->filter &= ~htohl(TIPC_SUB_CANCEL, subscriber->swap); 337 s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
338 subscr_cancel(s, subscriber); 338 subscr_cancel(s, subscriber);
339 return; 339 return NULL;
340 } 340 }
341 341
342 /* Refuse subscription if global limit exceeded */ 342 /* Refuse subscription if global limit exceeded */
@@ -345,63 +345,66 @@ static void subscr_subscribe(struct tipc_subscr *s,
345 warn("Subscription rejected, subscription limit reached (%u)\n", 345 warn("Subscription rejected, subscription limit reached (%u)\n",
346 tipc_max_subscriptions); 346 tipc_max_subscriptions);
347 subscr_terminate(subscriber); 347 subscr_terminate(subscriber);
348 return; 348 return NULL;
349 } 349 }
350 350
351 /* Allocate subscription object */ 351 /* Allocate subscription object */
352 352
353 sub = kzalloc(sizeof(*sub), GFP_ATOMIC); 353 sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
354 if (!sub) { 354 if (!sub) {
355 warn("Subscription rejected, no memory\n"); 355 warn("Subscription rejected, no memory\n");
356 subscr_terminate(subscriber); 356 subscr_terminate(subscriber);
357 return; 357 return NULL;
358 } 358 }
359 359
360 /* Initialize subscription object */ 360 /* Initialize subscription object */
361 361
362 sub->seq.type = htohl(s->seq.type, subscriber->swap); 362 sub->seq.type = htohl(s->seq.type, swap);
363 sub->seq.lower = htohl(s->seq.lower, subscriber->swap); 363 sub->seq.lower = htohl(s->seq.lower, swap);
364 sub->seq.upper = htohl(s->seq.upper, subscriber->swap); 364 sub->seq.upper = htohl(s->seq.upper, swap);
365 sub->timeout = htohl(s->timeout, subscriber->swap); 365 sub->timeout = htohl(s->timeout, swap);
366 sub->filter = htohl(s->filter, subscriber->swap); 366 sub->filter = htohl(s->filter, swap);
367 if ((!(sub->filter & TIPC_SUB_PORTS) 367 if ((!(sub->filter & TIPC_SUB_PORTS)
368 == !(sub->filter & TIPC_SUB_SERVICE)) 368 == !(sub->filter & TIPC_SUB_SERVICE))
369 || (sub->seq.lower > sub->seq.upper)) { 369 || (sub->seq.lower > sub->seq.upper)) {
370 warn("Subscription rejected, illegal request\n"); 370 warn("Subscription rejected, illegal request\n");
371 kfree(sub); 371 kfree(sub);
372 subscr_terminate(subscriber); 372 subscr_terminate(subscriber);
373 return; 373 return NULL;
374 } 374 }
375 memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); 375 sub->event_cb = subscr_send_event;
376 INIT_LIST_HEAD(&sub->subscription_list);
377 INIT_LIST_HEAD(&sub->nameseq_list); 376 INIT_LIST_HEAD(&sub->nameseq_list);
378 list_add(&sub->subscription_list, &subscriber->subscription_list); 377 list_add(&sub->subscription_list, &subscriber->subscription_list);
378 sub->server_ref = subscriber->port_ref;
379 sub->swap = swap;
380 memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
379 atomic_inc(&topsrv.subscription_count); 381 atomic_inc(&topsrv.subscription_count);
380 if (sub->timeout != TIPC_WAIT_FOREVER) { 382 if (sub->timeout != TIPC_WAIT_FOREVER) {
381 k_init_timer(&sub->timer, 383 k_init_timer(&sub->timer,
382 (Handler)subscr_timeout, (unsigned long)sub); 384 (Handler)subscr_timeout, (unsigned long)sub);
383 k_start_timer(&sub->timer, sub->timeout); 385 k_start_timer(&sub->timer, sub->timeout);
384 } 386 }
385 sub->owner = subscriber; 387
386 tipc_nametbl_subscribe(sub); 388 return sub;
387} 389}
388 390
389/** 391/**
390 * subscr_conn_shutdown_event - handle termination request from subscriber 392 * subscr_conn_shutdown_event - handle termination request from subscriber
393 *
394 * Called with subscriber's server port unlocked.
391 */ 395 */
392 396
393static void subscr_conn_shutdown_event(void *usr_handle, 397static void subscr_conn_shutdown_event(void *usr_handle,
394 u32 portref, 398 u32 port_ref,
395 struct sk_buff **buf, 399 struct sk_buff **buf,
396 unsigned char const *data, 400 unsigned char const *data,
397 unsigned int size, 401 unsigned int size,
398 int reason) 402 int reason)
399{ 403{
400 struct subscriber *subscriber; 404 struct subscriber *subscriber = usr_handle;
401 spinlock_t *subscriber_lock; 405 spinlock_t *subscriber_lock;
402 406
403 subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle); 407 if (tipc_port_lock(port_ref) == NULL)
404 if (subscriber == NULL)
405 return; 408 return;
406 409
407 subscriber_lock = subscriber->lock; 410 subscriber_lock = subscriber->lock;
@@ -411,6 +414,8 @@ static void subscr_conn_shutdown_event(void *usr_handle,
411 414
412/** 415/**
413 * subscr_conn_msg_event - handle new subscription request from subscriber 416 * subscr_conn_msg_event - handle new subscription request from subscriber
417 *
418 * Called with subscriber's server port unlocked.
414 */ 419 */
415 420
416static void subscr_conn_msg_event(void *usr_handle, 421static void subscr_conn_msg_event(void *usr_handle,
@@ -419,20 +424,46 @@ static void subscr_conn_msg_event(void *usr_handle,
419 const unchar *data, 424 const unchar *data,
420 u32 size) 425 u32 size)
421{ 426{
422 struct subscriber *subscriber; 427 struct subscriber *subscriber = usr_handle;
423 spinlock_t *subscriber_lock; 428 spinlock_t *subscriber_lock;
429 struct subscription *sub;
430
431 /*
432 * Lock subscriber's server port (& make a local copy of lock pointer,
433 * in case subscriber is deleted while processing subscription request)
434 */
424 435
425 subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle); 436 if (tipc_port_lock(port_ref) == NULL)
426 if (subscriber == NULL)
427 return; 437 return;
428 438
429 subscriber_lock = subscriber->lock; 439 subscriber_lock = subscriber->lock;
430 if (size != sizeof(struct tipc_subscr))
431 subscr_terminate(subscriber);
432 else
433 subscr_subscribe((struct tipc_subscr *)data, subscriber);
434 440
435 spin_unlock_bh(subscriber_lock); 441 if (size != sizeof(struct tipc_subscr)) {
442 subscr_terminate(subscriber);
443 spin_unlock_bh(subscriber_lock);
444 } else {
445 sub = subscr_subscribe((struct tipc_subscr *)data, subscriber);
446 spin_unlock_bh(subscriber_lock);
447 if (sub != NULL) {
448
449 /*
450 * We must release the server port lock before adding a
451 * subscription to the name table since TIPC needs to be
452 * able to (re)acquire the port lock if an event message
453 * issued by the subscription process is rejected and
454 * returned. The subscription cannot be deleted while
455 * it is being added to the name table because:
456 * a) the single-threading of the native API port code
457 * ensures the subscription cannot be cancelled and
458 * the subscriber connection cannot be broken, and
459 * b) the name table lock ensures the subscription
460 * timeout code cannot delete the subscription,
461 * so the subscription object is still protected.
462 */
463
464 tipc_nametbl_subscribe(sub);
465 }
466 }
436} 467}
437 468
438/** 469/**
@@ -448,16 +479,10 @@ static void subscr_named_msg_event(void *usr_handle,
448 struct tipc_portid const *orig, 479 struct tipc_portid const *orig,
449 struct tipc_name_seq const *dest) 480 struct tipc_name_seq const *dest)
450{ 481{
451 struct subscriber *subscriber; 482 static struct iovec msg_sect = {NULL, 0};
452 struct iovec msg_sect = {NULL, 0};
453 spinlock_t *subscriber_lock;
454 483
455 dbg("subscr_named_msg_event: orig = %x own = %x,\n", 484 struct subscriber *subscriber;
456 orig->node, tipc_own_addr); 485 u32 server_port_ref;
457 if (size && (size != sizeof(struct tipc_subscr))) {
458 warn("Subscriber rejected, invalid subscription size\n");
459 return;
460 }
461 486
462 /* Create subscriber object */ 487 /* Create subscriber object */
463 488
@@ -468,17 +493,11 @@ static void subscr_named_msg_event(void *usr_handle,
468 } 493 }
469 INIT_LIST_HEAD(&subscriber->subscription_list); 494 INIT_LIST_HEAD(&subscriber->subscription_list);
470 INIT_LIST_HEAD(&subscriber->subscriber_list); 495 INIT_LIST_HEAD(&subscriber->subscriber_list);
471 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
472 if (subscriber->ref == 0) {
473 warn("Subscriber rejected, reference table exhausted\n");
474 kfree(subscriber);
475 return;
476 }
477 496
478 /* Establish a connection to subscriber */ 497 /* Create server port & establish connection to subscriber */
479 498
480 tipc_createport(topsrv.user_ref, 499 tipc_createport(topsrv.user_ref,
481 (void *)(unsigned long)subscriber->ref, 500 subscriber,
482 importance, 501 importance,
483 NULL, 502 NULL,
484 NULL, 503 NULL,
@@ -490,32 +509,36 @@ static void subscr_named_msg_event(void *usr_handle,
490 &subscriber->port_ref); 509 &subscriber->port_ref);
491 if (subscriber->port_ref == 0) { 510 if (subscriber->port_ref == 0) {
492 warn("Subscriber rejected, unable to create port\n"); 511 warn("Subscriber rejected, unable to create port\n");
493 tipc_ref_discard(subscriber->ref);
494 kfree(subscriber); 512 kfree(subscriber);
495 return; 513 return;
496 } 514 }
497 tipc_connect2port(subscriber->port_ref, orig); 515 tipc_connect2port(subscriber->port_ref, orig);
498 516
517 /* Lock server port (& save lock address for future use) */
518
519 subscriber->lock = tipc_port_lock(subscriber->port_ref)->publ.lock;
499 520
500 /* Add subscriber to topology server's subscriber list */ 521 /* Add subscriber to topology server's subscriber list */
501 522
502 tipc_ref_lock(subscriber->ref);
503 spin_lock_bh(&topsrv.lock); 523 spin_lock_bh(&topsrv.lock);
504 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list); 524 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
505 spin_unlock_bh(&topsrv.lock); 525 spin_unlock_bh(&topsrv.lock);
506 526
507 /* 527 /* Unlock server port */
508 * Subscribe now if message contains a subscription,
509 * otherwise send an empty response to complete connection handshaking
510 */
511 528
512 subscriber_lock = subscriber->lock; 529 server_port_ref = subscriber->port_ref;
513 if (size) 530 spin_unlock_bh(subscriber->lock);
514 subscr_subscribe((struct tipc_subscr *)data, subscriber);
515 else
516 tipc_send(subscriber->port_ref, 1, &msg_sect);
517 531
518 spin_unlock_bh(subscriber_lock); 532 /* Send an ACK- to complete connection handshaking */
533
534 tipc_send(server_port_ref, 1, &msg_sect);
535
536 /* Handle optional subscription request */
537
538 if (size != 0) {
539 subscr_conn_msg_event(subscriber, server_port_ref,
540 buf, data, size);
541 }
519} 542}
520 543
521int tipc_subscr_start(void) 544int tipc_subscr_start(void)
@@ -574,8 +597,8 @@ void tipc_subscr_stop(void)
574 list_for_each_entry_safe(subscriber, subscriber_temp, 597 list_for_each_entry_safe(subscriber, subscriber_temp,
575 &topsrv.subscriber_list, 598 &topsrv.subscriber_list,
576 subscriber_list) { 599 subscriber_list) {
577 tipc_ref_lock(subscriber->ref);
578 subscriber_lock = subscriber->lock; 600 subscriber_lock = subscriber->lock;
601 spin_lock_bh(subscriber_lock);
579 subscr_terminate(subscriber); 602 subscr_terminate(subscriber);
580 spin_unlock_bh(subscriber_lock); 603 spin_unlock_bh(subscriber_lock);
581 } 604 }
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 93a8e674fac1..45d89bf4d202 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/subscr.h: Include file for TIPC subscription service 2 * net/tipc/subscr.h: Include file for TIPC network topology service
3 * 3 *
4 * Copyright (c) 2003-2006, Ericsson AB 4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -37,34 +37,44 @@
37#ifndef _TIPC_SUBSCR_H 37#ifndef _TIPC_SUBSCR_H
38#define _TIPC_SUBSCR_H 38#define _TIPC_SUBSCR_H
39 39
40struct subscription;
41
42typedef void (*tipc_subscr_event) (struct subscription *sub,
43 u32 found_lower, u32 found_upper,
44 u32 event, u32 port_ref, u32 node);
45
40/** 46/**
41 * struct subscription - TIPC network topology subscription object 47 * struct subscription - TIPC network topology subscription object
42 * @seq: name sequence associated with subscription 48 * @seq: name sequence associated with subscription
43 * @timeout: duration of subscription (in ms) 49 * @timeout: duration of subscription (in ms)
44 * @filter: event filtering to be done for subscription 50 * @filter: event filtering to be done for subscription
45 * @evt: template for events generated by subscription 51 * @event_cb: routine invoked when a subscription event is detected
46 * @subscription_list: adjacent subscriptions in subscriber's subscription list 52 * @timer: timer governing subscription duration (optional)
47 * @nameseq_list: adjacent subscriptions in name sequence's subscription list 53 * @nameseq_list: adjacent subscriptions in name sequence's subscription list
48 * @timer_ref: reference to timer governing subscription duration (may be NULL) 54 * @subscription_list: adjacent subscriptions in subscriber's subscription list
49 * @owner: pointer to subscriber object associated with this subscription 55 * @server_ref: object reference of server port associated with subscription
56 * @swap: indicates if subscriber uses opposite endianness in its messages
57 * @evt: template for events generated by subscription
50 */ 58 */
51 59
52struct subscription { 60struct subscription {
53 struct tipc_name_seq seq; 61 struct tipc_name_seq seq;
54 u32 timeout; 62 u32 timeout;
55 u32 filter; 63 u32 filter;
56 struct tipc_event evt; 64 tipc_subscr_event event_cb;
57 struct list_head subscription_list;
58 struct list_head nameseq_list;
59 struct timer_list timer; 65 struct timer_list timer;
60 struct subscriber *owner; 66 struct list_head nameseq_list;
67 struct list_head subscription_list;
68 u32 server_ref;
69 int swap;
70 struct tipc_event evt;
61}; 71};
62 72
63int tipc_subscr_overlap(struct subscription * sub, 73int tipc_subscr_overlap(struct subscription *sub,
64 u32 found_lower, 74 u32 found_lower,
65 u32 found_upper); 75 u32 found_upper);
66 76
67void tipc_subscr_report_overlap(struct subscription * sub, 77void tipc_subscr_report_overlap(struct subscription *sub,
68 u32 found_lower, 78 u32 found_lower,
69 u32 found_upper, 79 u32 found_upper,
70 u32 event, 80 u32 event,
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index e18cd3628db4..392e80e3268d 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -8,8 +8,6 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * Version: $Id: af_unix.c,v 1.133 2002/02/08 03:57:19 davem Exp $
12 *
13 * Fixes: 11 * Fixes:
14 * Linus Torvalds : Assorted bug cures. 12 * Linus Torvalds : Assorted bug cures.
15 * Niibe Yutaka : async I/O support. 13 * Niibe Yutaka : async I/O support.
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 9ab31a3ce3ad..b210a88d0960 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -350,9 +350,9 @@ __be16 wanrouter_type_trans(struct sk_buff *skb, struct net_device *dev)
350 * o execute requested action or pass command to the device driver 350 * o execute requested action or pass command to the device driver
351 */ 351 */
352 352
353int wanrouter_ioctl(struct inode *inode, struct file *file, 353long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
354 unsigned int cmd, unsigned long arg)
355{ 354{
355 struct inode *inode = file->f_path.dentry->d_inode;
356 int err = 0; 356 int err = 0;
357 struct proc_dir_entry *dent; 357 struct proc_dir_entry *dent;
358 struct wan_device *wandev; 358 struct wan_device *wandev;
@@ -372,6 +372,7 @@ int wanrouter_ioctl(struct inode *inode, struct file *file,
372 if (wandev->magic != ROUTER_MAGIC) 372 if (wandev->magic != ROUTER_MAGIC)
373 return -EINVAL; 373 return -EINVAL;
374 374
375 lock_kernel();
375 switch (cmd) { 376 switch (cmd) {
376 case ROUTER_SETUP: 377 case ROUTER_SETUP:
377 err = wanrouter_device_setup(wandev, data); 378 err = wanrouter_device_setup(wandev, data);
@@ -403,6 +404,7 @@ int wanrouter_ioctl(struct inode *inode, struct file *file,
403 err = wandev->ioctl(wandev, cmd, arg); 404 err = wandev->ioctl(wandev, cmd, arg);
404 else err = -EINVAL; 405 else err = -EINVAL;
405 } 406 }
407 unlock_kernel();
406 return err; 408 return err;
407} 409}
408 410
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c
index 5bebe40bf4e6..267f7ff49827 100644
--- a/net/wanrouter/wanproc.c
+++ b/net/wanrouter/wanproc.c
@@ -278,7 +278,7 @@ static const struct file_operations wandev_fops = {
278 .read = seq_read, 278 .read = seq_read,
279 .llseek = seq_lseek, 279 .llseek = seq_lseek,
280 .release = single_release, 280 .release = single_release,
281 .ioctl = wanrouter_ioctl, 281 .unlocked_ioctl = wanrouter_ioctl,
282}; 282};
283 283
284/* 284/*
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 80afacdae46c..f1da0b93bc56 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -143,8 +143,11 @@ void cfg80211_put_dev(struct cfg80211_registered_device *drv)
143int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, 143int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
144 char *newname) 144 char *newname)
145{ 145{
146 struct cfg80211_registered_device *drv;
146 int idx, taken = -1, result, digits; 147 int idx, taken = -1, result, digits;
147 148
149 mutex_lock(&cfg80211_drv_mutex);
150
148 /* prohibit calling the thing phy%d when %d is not its number */ 151 /* prohibit calling the thing phy%d when %d is not its number */
149 sscanf(newname, PHY_NAME "%d%n", &idx, &taken); 152 sscanf(newname, PHY_NAME "%d%n", &idx, &taken);
150 if (taken == strlen(newname) && idx != rdev->idx) { 153 if (taken == strlen(newname) && idx != rdev->idx) {
@@ -156,14 +159,30 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
156 * deny the name if it is phy<idx> where <idx> is printed 159 * deny the name if it is phy<idx> where <idx> is printed
157 * without leading zeroes. taken == strlen(newname) here 160 * without leading zeroes. taken == strlen(newname) here
158 */ 161 */
162 result = -EINVAL;
159 if (taken == strlen(PHY_NAME) + digits) 163 if (taken == strlen(PHY_NAME) + digits)
160 return -EINVAL; 164 goto out_unlock;
165 }
166
167
168 /* Ignore nop renames */
169 result = 0;
170 if (strcmp(newname, dev_name(&rdev->wiphy.dev)) == 0)
171 goto out_unlock;
172
173 /* Ensure another device does not already have this name. */
174 list_for_each_entry(drv, &cfg80211_drv_list, list) {
175 result = -EINVAL;
176 if (strcmp(newname, dev_name(&drv->wiphy.dev)) == 0)
177 goto out_unlock;
161 } 178 }
162 179
163 /* this will check for collisions */ 180 /* this will only check for collisions in sysfs
181 * which is not even always compiled in.
182 */
164 result = device_rename(&rdev->wiphy.dev, newname); 183 result = device_rename(&rdev->wiphy.dev, newname);
165 if (result) 184 if (result)
166 return result; 185 goto out_unlock;
167 186
168 if (!debugfs_rename(rdev->wiphy.debugfsdir->d_parent, 187 if (!debugfs_rename(rdev->wiphy.debugfsdir->d_parent,
169 rdev->wiphy.debugfsdir, 188 rdev->wiphy.debugfsdir,
@@ -172,9 +191,13 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
172 printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n", 191 printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n",
173 newname); 192 newname);
174 193
175 nl80211_notify_dev_rename(rdev); 194 result = 0;
195out_unlock:
196 mutex_unlock(&cfg80211_drv_mutex);
197 if (result == 0)
198 nl80211_notify_dev_rename(rdev);
176 199
177 return 0; 200 return result;
178} 201}
179 202
180/* exported functions */ 203/* exported functions */
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index 28fbd0b0b568..f591871a7b4f 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -59,23 +59,21 @@ int ieee80211_radiotap_iterator_init(
59 return -EINVAL; 59 return -EINVAL;
60 60
61 /* sanity check for allowed length and radiotap length field */ 61 /* sanity check for allowed length and radiotap length field */
62 if (max_length < le16_to_cpu(get_unaligned(&radiotap_header->it_len))) 62 if (max_length < get_unaligned_le16(&radiotap_header->it_len))
63 return -EINVAL; 63 return -EINVAL;
64 64
65 iterator->rtheader = radiotap_header; 65 iterator->rtheader = radiotap_header;
66 iterator->max_length = le16_to_cpu(get_unaligned( 66 iterator->max_length = get_unaligned_le16(&radiotap_header->it_len);
67 &radiotap_header->it_len));
68 iterator->arg_index = 0; 67 iterator->arg_index = 0;
69 iterator->bitmap_shifter = le32_to_cpu(get_unaligned( 68 iterator->bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present);
70 &radiotap_header->it_present));
71 iterator->arg = (u8 *)radiotap_header + sizeof(*radiotap_header); 69 iterator->arg = (u8 *)radiotap_header + sizeof(*radiotap_header);
72 iterator->this_arg = NULL; 70 iterator->this_arg = NULL;
73 71
74 /* find payload start allowing for extended bitmap(s) */ 72 /* find payload start allowing for extended bitmap(s) */
75 73
76 if (unlikely(iterator->bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT))) { 74 if (unlikely(iterator->bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT))) {
77 while (le32_to_cpu(get_unaligned((__le32 *)iterator->arg)) & 75 while (get_unaligned_le32(iterator->arg) &
78 (1<<IEEE80211_RADIOTAP_EXT)) { 76 (1 << IEEE80211_RADIOTAP_EXT)) {
79 iterator->arg += sizeof(u32); 77 iterator->arg += sizeof(u32);
80 78
81 /* 79 /*
@@ -241,8 +239,8 @@ int ieee80211_radiotap_iterator_next(
241 if (iterator->bitmap_shifter & 1) { 239 if (iterator->bitmap_shifter & 1) {
242 /* b31 was set, there is more */ 240 /* b31 was set, there is more */
243 /* move to next u32 bitmap */ 241 /* move to next u32 bitmap */
244 iterator->bitmap_shifter = le32_to_cpu( 242 iterator->bitmap_shifter =
245 get_unaligned(iterator->next_bitmap)); 243 get_unaligned_le32(iterator->next_bitmap);
246 iterator->next_bitmap++; 244 iterator->next_bitmap++;
247 } else 245 } else
248 /* no more bitmaps: end */ 246 /* no more bitmaps: end */