aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-01 17:08:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-01 17:08:52 -0400
commit73287a43cc79ca06629a88d1a199cd283f42456a (patch)
treeacf4456e260115bea77ee31a29f10ce17f0db45c /drivers
parent251df49db3327c64bf917bfdba94491fde2b4ee0 (diff)
parent20074f357da4a637430aec2879c9d864c5d2c23c (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights (1721 non-merge commits, this has to be a record of some sort): 1) Add 'random' mode to team driver, from Jiri Pirko and Eric Dumazet. 2) Make it so that any driver that supports configuration of multiple MAC addresses can provide the forwarding database add and del calls by providing a default implementation and hooking that up if the driver doesn't have an explicit set of handlers. From Vlad Yasevich. 3) Support GSO segmentation over tunnels and other encapsulating devices such as VXLAN, from Pravin B Shelar. 4) Support L2 GRE tunnels in the flow dissector, from Michael Dalton. 5) Implement Tail Loss Probe (TLP) detection in TCP, from Nandita Dukkipati. 6) In the PHY layer, allow supporting wake-on-lan in situations where the PHY registers have to be written for it to be configured. Use it to support wake-on-lan in mv643xx_eth. From Michael Stapelberg. 7) Significantly improve firewire IPV6 support, from YOSHIFUJI Hideaki. 8) Allow multiple packets to be sent in a single transmission using network coding in batman-adv, from Martin Hundebøll. 9) Add support for T5 cxgb4 chips, from Santosh Rastapur. 10) Generalize the VXLAN forwarding tables so that there is more flexibility in configurating various aspects of the endpoints. From David Stevens. 11) Support RSS and TSO in hardware over GRE tunnels in bxn2x driver, from Dmitry Kravkov. 12) Zero copy support in nfnelink_queue, from Eric Dumazet and Pablo Neira Ayuso. 13) Start adding networking selftests. 14) In situations of overload on the same AF_PACKET fanout socket, or per-cpu packet receive queue, minimize drop by distributing the load to other cpus/fanouts. From Willem de Bruijn and Eric Dumazet. 15) Add support for new payload offset BPF instruction, from Daniel Borkmann. 16) Convert several drivers over to mdoule_platform_driver(), from Sachin Kamat. 17) Provide a minimal BPF JIT image disassembler userspace tool, from Daniel Borkmann. 18) Rewrite F-RTO implementation in TCP to match the final specification of it in RFC4138 and RFC5682. From Yuchung Cheng. 19) Provide netlink socket diag of netlink sockets ("Yo dawg, I hear you like netlink, so I implemented netlink dumping of netlink sockets.") From Andrey Vagin. 20) Remove ugly passing of rtnetlink attributes into rtnl_doit functions, from Thomas Graf. 21) Allow userspace to be able to see if a configuration change occurs in the middle of an address or device list dump, from Nicolas Dichtel. 22) Support RFC3168 ECN protection for ipv6 fragments, from Hannes Frederic Sowa. 23) Increase accuracy of packet length used by packet scheduler, from Jason Wang. 24) Beginning set of changes to make ipv4/ipv6 fragment handling more scalable and less susceptible to overload and locking contention, from Jesper Dangaard Brouer. 25) Get rid of using non-type-safe NLMSG_* macros and use nlmsg_*() instead. From Hong Zhiguo. 26) Optimize route usage in IPVS by avoiding reference counting where possible, from Julian Anastasov. 27) Convert IPVS schedulers to RCU, also from Julian Anastasov. 28) Support cpu fanouts in xt_NFQUEUE netfilter target, from Holger Eitzenberger. 29) Network namespace support for nf_log, ebt_log, xt_LOG, ipt_ULOG, nfnetlink_log, and nfnetlink_queue. From Gao feng. 30) Implement RFC3168 ECN protection, from Hannes Frederic Sowa. 31) Support several new r8169 chips, from Hayes Wang. 32) Support tokenized interface identifiers in ipv6, from Daniel Borkmann. 33) Use usbnet_link_change() helper in USB net driver, from Ming Lei. 34) Add 802.1ad vlan offload support, from Patrick McHardy. 35) Support mmap() based netlink communication, also from Patrick McHardy. 36) Support HW timestamping in mlx4 driver, from Amir Vadai. 37) Rationalize AF_PACKET packet timestamping when transmitting, from Willem de Bruijn and Daniel Borkmann. 38) Bring parity to what's provided by /proc/net/packet socket dumping and the info provided by netlink socket dumping of AF_PACKET sockets. From Nicolas Dichtel. 39) Fix peeking beyond zero sized SKBs in AF_UNIX, from Benjamin Poirier" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1722 commits) filter: fix va_list build error af_unix: fix a fatal race with bit fields bnx2x: Prevent memory leak when cnic is absent bnx2x: correct reading of speed capabilities net: sctp: attribute printl with __printf for gcc fmt checks netlink: kconfig: move mmap i/o into netlink kconfig netpoll: convert mutex into a semaphore netlink: Fix skb ref counting. net_sched: act_ipt forward compat with xtables mlx4_en: fix a build error on 32bit arches Revert "bnx2x: allow nvram test to run when device is down" bridge: avoid OOPS if root port not found drivers: net: cpsw: fix kernel warn on cpsw irq enable sh_eth: use random MAC address if no valid one supplied 3c509.c: call SET_NETDEV_DEV for all device types (ISA/ISAPnP/EISA) tg3: fix to append hardware time stamping flags unix/stream: fix peeking with an offset larger than data in queue unix/dgram: fix peeking with an offset larger than data in queue unix/dgram: peek beyond 0-sized skbs openvswitch: Remove unneeded ovs_netdev_get_ifindex() ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/bcma/core.c8
-rw-r--r--drivers/bcma/driver_chipcommon.c5
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c34
-rw-r--r--drivers/bcma/main.c5
-rw-r--r--drivers/bcma/scan.c16
-rw-r--r--drivers/bcma/sprom.c1
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btmrvl_debugfs.c266
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c49
-rw-r--r--drivers/bluetooth/btusb.c418
-rw-r--r--drivers/bluetooth/hci_h4.c3
-rw-r--r--drivers/bluetooth/hci_ldisc.c15
-rw-r--r--drivers/connector/cn_proc.c25
-rw-r--r--drivers/connector/connector.c12
-rw-r--r--drivers/dma/ioat/dca.c11
-rw-r--r--drivers/firewire/Kconfig6
-rw-r--r--drivers/firewire/net.c462
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c68
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c34
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h14
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c155
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c15
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c113
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h11
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c14
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/isdn/capi/capidrv.c3
-rw-r--r--drivers/isdn/divert/isdn_divert.c8
-rw-r--r--drivers/isdn/hisax/fsm.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/i4l/isdn_net.c2
-rw-r--r--drivers/isdn/mISDN/socket.c1
-rw-r--r--drivers/isdn/sc/init.c4
-rw-r--r--drivers/media/dvb-core/dvb_net.c10
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/appletalk/Kconfig18
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bond_main.c72
-rw-r--r--drivers/net/caif/Kconfig7
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/caif/caif_hsi.c7
-rw-r--r--drivers/net/caif/caif_serial.c4
-rw-r--r--drivers/net/caif/caif_shm_u5500.c128
-rw-r--r--drivers/net/caif/caif_shmcore.c747
-rw-r--r--drivers/net/caif/caif_spi.c6
-rw-r--r--drivers/net/caif/caif_spi_slave.c3
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/at91_can.c76
-rw-r--r--drivers/net/can/bfin_can.c4
-rw-r--r--drivers/net/can/mcp251x.c65
-rw-r--r--drivers/net/can/sja1000/ems_pci.c6
-rw-r--r--drivers/net/can/sja1000/ems_pcmcia.c6
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c4
-rw-r--r--drivers/net/can/sja1000/peak_pci.c2
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c8
-rw-r--r--drivers/net/can/sja1000/plx_pci.c12
-rw-r--r--drivers/net/can/sja1000/sja1000.c126
-rw-r--r--drivers/net/can/sja1000/sja1000.h68
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/typhoon.c6
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c10
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c6
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c24
-rw-r--r--drivers/net/ethernet/alteon/acenic.c4
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/a2065.c1
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c6
-rw-r--r--drivers/net/ethernet/amd/ariadne.c1
-rw-r--r--drivers/net/ethernet/amd/atarilance.c6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/mvme147.c4
-rw-r--r--drivers/net/ethernet/amd/ni65.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c1
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c7
-rw-r--r--drivers/net/ethernet/amd/sunlance.c9
-rw-r--r--drivers/net/ethernet/apple/macmace.c16
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c24
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c22
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c21
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c21
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c10
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c73
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c84
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c368
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h47
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c377
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h91
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h252
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c240
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c349
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c79
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c351
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h27
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c77
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c126
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h9
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h3
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c912
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h30
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c17
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c80
-rw-r--r--drivers/net/ethernet/cadence/macb.c84
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h55
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c819
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c93
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c256
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h64
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h98
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c75
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c14
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c74
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c13
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c3
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c17
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c9
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h14
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c257
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h103
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c177
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c307
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c22
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c8
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/fec.h10
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c (renamed from drivers/net/ethernet/freescale/fec.c)207
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c16
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c7
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c176
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h8
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c29
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c881
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c24
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c8
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c28
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c131
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c38
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h27
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c240
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c408
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c37
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c343
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c62
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c134
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c261
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h50
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h60
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c156
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c124
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h17
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h52
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c27
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c261
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h53
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h133
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c354
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c29
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c1419
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c61
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c37
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c110
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c179
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c121
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c7
-rw-r--r--drivers/net/ethernet/jme.c6
-rw-r--r--drivers/net/ethernet/marvell/Kconfig6
-rw-r--r--drivers/net/ethernet/marvell/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c378
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c142
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c14
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c27
-rw-r--r--drivers/net/ethernet/marvell/sky2.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c204
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c151
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c235
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c129
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c20
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c53
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c32
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c4
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c12
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c4
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c12
-rw-r--r--drivers/net/ethernet/neterion/s2io.c10
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c14
-rw-r--r--drivers/net/ethernet/netx-eth.c2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c20
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c64
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c23
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c5
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c222
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h129
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c505
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h204
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c107
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c75
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c125
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c63
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c104
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c371
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h263
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c1954
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c1780
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c255
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c34
-rw-r--r--drivers/net/ethernet/rdc/r6040.c12
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c8
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/atp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c295
-rw-r--r--drivers/net/ethernet/renesas/Kconfig3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c410
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h220
-rw-r--r--drivers/net/ethernet/s6gmac.c16
-rw-r--r--drivers/net/ethernet/seeq/ether3.c22
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.c267
-rw-r--r--drivers/net/ethernet/sfc/efx.h14
-rw-r--r--drivers/net/ethernet/sfc/enum.h12
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon.c17
-rw-r--r--drivers/net/ethernet/sfc/filter.c249
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h1
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h97
-rw-r--r--drivers/net/ethernet/sfc/nic.c94
-rw-r--r--drivers/net/ethernet/sfc/ptp.c116
-rw-r--r--drivers/net/ethernet/sfc/rx.c793
-rw-r--r--drivers/net/ethernet/sfc/siena.c25
-rw-r--r--drivers/net/ethernet/sgi/meth.c5
-rw-r--r--drivers/net/ethernet/sis/sis900.c41
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c92
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h199
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h81
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c168
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c151
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c89
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c40
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h73
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c156
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c148
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1340
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c211
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h74
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c13
-rw-r--r--drivers/net/ethernet/sun/sunqe.c5
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c17
-rw-r--r--drivers/net/ethernet/ti/cpsw.c330
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c31
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c54
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c1
-rw-r--r--drivers/net/ethernet/ti/tlan.c5
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c240
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c7
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c21
-rw-r--r--drivers/net/ethernet/via/via-rhine.c17
-rw-r--r--drivers/net/ethernet/via/via-velocity.c15
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c33
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c25
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c1
-rw-r--r--drivers/net/fddi/defxx.c9
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hyperv/netvsc.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c2
-rw-r--r--drivers/net/ieee802154/at86rf230.c151
-rw-r--r--drivers/net/ieee802154/fakehard.c21
-rw-r--r--drivers/net/ieee802154/mrf24j40.c41
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/irda/au1k_ir.c18
-rw-r--r--drivers/net/irda/bfin_sir.c3
-rw-r--r--drivers/net/irda/nsc-ircc.c6
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/smsc-ircc2.c17
-rw-r--r--drivers/net/irda/via-ircc.c6
-rw-r--r--drivers/net/irda/w83977af_ir.c7
-rw-r--r--drivers/net/macvlan.c29
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell.c127
-rw-r--r--drivers/net/phy/mdio-gpio.c12
-rw-r--r--drivers/net/phy/mdio-octeon.c107
-rw-r--r--drivers/net/phy/micrel.c41
-rw-r--r--drivers/net/phy/phy.c66
-rw-r--r--drivers/net/phy/spi_ks8995.c8
-rw-r--r--drivers/net/phy/vitesse.c3
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/ppp/ppp_synctty.c53
-rw-r--r--drivers/net/team/Kconfig12
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team.c45
-rw-r--r--drivers/net/team/team_mode_broadcast.c14
-rw-r--r--drivers/net/team/team_mode_random.c71
-rw-r--r--drivers/net/team/team_mode_roundrobin.c36
-rw-r--r--drivers/net/tun.c20
-rw-r--r--drivers/net/usb/asix_devices.c6
-rw-r--r--drivers/net/usb/ax88179_178a.c12
-rw-r--r--drivers/net/usb/cdc_ether.c5
-rw-r--r--drivers/net/usb/cdc_mbim.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c18
-rw-r--r--drivers/net/usb/dm9601.c7
-rw-r--r--drivers/net/usb/mcs7830.c6
-rw-r--r--drivers/net/usb/pegasus.c447
-rw-r--r--drivers/net/usb/pegasus.h11
-rw-r--r--drivers/net/usb/sierra_net.c3
-rw-r--r--drivers/net/usb/usbnet.c45
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c17
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c5
-rw-r--r--drivers/net/vxlan.c546
-rw-r--r--drivers/net/wireless/adm8211.c3
-rw-r--r--drivers/net/wireless/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c16
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/trace.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig9
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c15
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h3
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c72
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h11
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c21
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c15
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c116
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c41
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c16
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.c23
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h332
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c38
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c92
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h49
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c43
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pri_detector.c49
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pri_detector.h27
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c198
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c46
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c177
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c137
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h8
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c11
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c85
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c69
-rw-r--r--drivers/net/wireless/ath/hw.c6
-rw-r--r--drivers/net/wireless/ath/key.c9
-rw-r--r--drivers/net/wireless/ath/reg.h4
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile4
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c33
-rw-r--r--drivers/net/wireless/ath/wil6210/dbg_hexdump.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c58
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c25
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c60
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c36
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h49
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h17
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c154
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h363
-rw-r--r--drivers/net/wireless/b43/Kconfig6
-rw-r--r--drivers/net/wireless/b43/b43.h10
-rw-r--r--drivers/net/wireless/b43/dma.c9
-rw-r--r--drivers/net/wireless/b43/main.c38
-rw-r--r--drivers/net/wireless/b43/phy_ht.c708
-rw-r--r--drivers/net/wireless/b43/phy_ht.h83
-rw-r--r--drivers/net/wireless/b43/phy_lcn.c5
-rw-r--r--drivers/net/wireless/b43/phy_lp.c16
-rw-r--r--drivers/net/wireless/b43/phy_n.c716
-rw-r--r--drivers/net/wireless/b43/phy_n.h146
-rw-r--r--drivers/net/wireless/b43/radio_2056.c6
-rw-r--r--drivers/net/wireless/b43/radio_2059.c39
-rw-r--r--drivers/net/wireless/b43/radio_2059.h14
-rw-r--r--drivers/net/wireless/b43/sdio.h4
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c101
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h29
-rw-r--r--drivers/net/wireless/b43/tables_phy_lcn.c6
-rw-r--r--drivers/net/wireless/b43legacy/dma.c8
-rw-r--r--drivers/net/wireless/b43legacy/main.c9
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig14
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c269
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c176
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/btcoex.c497
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/btcoex.h29
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h41
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c34
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c33
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c80
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h55
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c148
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c794
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.c25
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c2067
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h33
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c288
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c369
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h101
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h32
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c22
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h101
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c37
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c594
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h25
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/Makefile7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/d11.h1
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/led.c126
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/led.h36
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c97
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c377
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.h25
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c40
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h1
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c35
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.c54
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h17
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/Makefile9
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/d11.c162
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c37
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_d11.h145
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h27
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_wifi.h28
-rw-r--r--drivers/net/wireless/brcm80211/include/chipcommon.h14
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c32
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c4
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h4
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c65
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965.c2
-rw-r--r--drivers/net/wireless/iwlegacy/common.c26
-rw-r--r--drivers/net/wireless/iwlegacy/common.h11
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig11
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c42
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c18
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c32
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c11
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c58
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c (renamed from drivers/net/wireless/iwlwifi/pcie/1000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c (renamed from drivers/net/wireless/iwlwifi/pcie/2000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c (renamed from drivers/net/wireless/iwlwifi/pcie/5000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c (renamed from drivers/net/wireless/iwlwifi/pcie/6000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c (renamed from drivers/net/wireless/iwlwifi/pcie/7000.c)63
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h49
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c51
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/binding.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c589
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c267
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c257
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h319
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h53
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h66
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h83
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c37
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c87
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c119
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h59
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c145
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c48
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c117
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c50
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c99
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c50
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c28
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c9
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/cfg.h115
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c25
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c13
-rw-r--r--drivers/net/wireless/libertas_tf/main.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c125
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c43
-rw-r--r--drivers/net/wireless/mwifiex/11ac.h17
-rw-r--r--drivers/net/wireless/mwifiex/11n.c22
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c14
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c2
-rw-r--r--drivers/net/wireless/mwifiex/Makefile1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c255
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c39
-rw-r--r--drivers/net/wireless/mwifiex/decl.h11
-rw-r--r--drivers/net/wireless/mwifiex/ethtool.c70
-rw-r--r--drivers/net/wireless/mwifiex/fw.h83
-rw-r--r--drivers/net/wireless/mwifiex/init.c33
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h31
-rw-r--r--drivers/net/wireless/mwifiex/join.c23
-rw-r--r--drivers/net/wireless/mwifiex/main.c15
-rw-r--r--drivers/net/wireless/mwifiex/main.h23
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c216
-rw-r--r--drivers/net/wireless/mwifiex/scan.c37
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c83
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c30
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c2
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c55
-rw-r--r--drivers/net/wireless/mwifiex/util.c6
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c208
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h3
-rw-r--r--drivers/net/wireless/mwl8k.c167
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/p54/fwio.c4
-rw-r--r--drivers/net/wireless/p54/main.c6
-rw-r--r--drivers/net/wireless/p54/p54spi.c6
-rw-r--r--drivers/net/wireless/p54/txrx.c4
-rw-r--r--drivers/net/wireless/ray_cs.c8
-rw-r--r--drivers/net/wireless/rndis_wlan.c5
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig7
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c323
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c353
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c35
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h103
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c1489
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c377
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c111
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h95
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c24
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c25
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c19
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mmio.c52
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mmio.h52
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c14
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c57
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h15
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c44
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c550
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c32
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/grf5101.c3
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/max2820.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225.c3
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/sa2400.c3
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8225.c3
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig9
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile1
-rw-r--r--drivers/net/wireless/rtlwifi/base.c383
-rw-r--r--drivers/net/wireless/rtlwifi/base.h14
-rw-r--r--drivers/net/wireless/rtlwifi/core.c221
-rw-r--r--drivers/net/wireless/rtlwifi/debug.c5
-rw-r--r--drivers/net/wireless/rtlwifi/debug.h13
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c53
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.h1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c150
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h2
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c330
-rw-r--r--drivers/net/wireless/rtlwifi/ps.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/Makefile16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/def.h324
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/dm.c1794
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/dm.h326
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/fw.c830
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/fw.h301
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c2530
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.h68
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/led.c157
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/led.h38
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.c2202
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.h236
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c109
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h327
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.c140
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.h97
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/reg.h2258
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/rf.c467
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/rf.h46
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.c400
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.h36
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/table.c643
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/table.h47
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c817
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.h795
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c105
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c99
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c118
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c328
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c32
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c40
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/reg.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/def.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c49
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c150
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c61
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c300
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.c88
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.h6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/fw.c97
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/fw.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c70
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/led.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c14
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c226
-rw-r--r--drivers/net/wireless/rtlwifi/usb.h5
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h224
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c5
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c4
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h2
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.c6
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c25
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h29
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c29
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h16
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c32
-rw-r--r--drivers/net/wireless/ti/wlcore/debug.h33
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c9
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c205
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c39
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h29
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c4
-rw-r--r--drivers/net/xen-netback/netback.c316
-rw-r--r--drivers/net/xen-netfront.c47
-rw-r--r--drivers/nfc/Kconfig10
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/mei_phy.c164
-rw-r--r--drivers/nfc/mei_phy.h30
-rw-r--r--drivers/nfc/microread/Kconfig2
-rw-r--r--drivers/nfc/microread/mei.c139
-rw-r--r--drivers/nfc/pn533.c653
-rw-r--r--drivers/nfc/pn544/Kconfig13
-rw-r--r--drivers/nfc/pn544/Makefile2
-rw-r--r--drivers/nfc/pn544/mei.c121
-rw-r--r--drivers/of/of_mdio.c64
-rw-r--r--drivers/pci/iov.c41
-rw-r--r--drivers/ptp/ptp_clock.c38
-rw-r--r--drivers/ptp/ptp_pch.c29
-rw-r--r--drivers/s390/kvm/virtio_ccw.c6
-rw-r--r--drivers/s390/net/qeth_core.h4
-rw-r--r--drivers/s390/net/qeth_core_main.c8
-rw-r--r--drivers/s390/net/qeth_l2_main.c13
-rw-r--r--drivers/s390/net/qeth_l3_main.c33
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h23
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c35
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c22
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c9
-rw-r--r--drivers/scsi/csiostor/Makefile3
-rw-r--r--drivers/scsi/csiostor/csio_hw.c559
-rw-r--r--drivers/scsi/csiostor/csio_hw.h47
-rw-r--r--drivers/scsi/csiostor/csio_hw_chip.h175
-rw-r--r--drivers/scsi/csiostor/csio_hw_t4.c403
-rw-r--r--drivers/scsi/csiostor/csio_hw_t5.c397
-rw-r--r--drivers/scsi/csiostor/csio_init.c48
-rw-r--r--drivers/scsi/csiostor/csio_init.h29
-rw-r--r--drivers/scsi/csiostor/csio_lnode.h2
-rw-r--r--drivers/scsi/csiostor/csio_rnode.c10
-rw-r--r--drivers/scsi/csiostor/csio_rnode.h2
-rw-r--r--drivers/scsi/csiostor/csio_wr.c60
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/scsi_netlink.c4
-rw-r--r--drivers/scsi/scsi_transport_fc.c21
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c70
-rw-r--r--drivers/ssb/driver_chipcommon.c2
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c60
-rw-r--r--drivers/ssb/driver_mipscore.c25
-rw-r--r--drivers/ssb/driver_pcicore.c15
-rw-r--r--drivers/ssb/embedded.c5
-rw-r--r--drivers/ssb/main.c51
-rw-r--r--drivers/ssb/pci.c120
-rw-r--r--drivers/ssb/pcmcia.c46
-rw-r--r--drivers/ssb/scan.c31
-rw-r--r--drivers/ssb/sprom.c4
-rw-r--r--drivers/ssb/ssb_private.h19
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c12
-rw-r--r--drivers/vhost/net.c74
-rw-r--r--drivers/vhost/vhost.c3
899 files changed, 58235 insertions, 19130 deletions
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index d6891267f5bb..507362a76a73 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1055,7 +1055,7 @@ static int he_start(struct atm_dev *dev)
1055 he_writel(he_dev, 0x0, RESET_CNTL); 1055 he_writel(he_dev, 0x0, RESET_CNTL);
1056 he_writel(he_dev, 0xff, RESET_CNTL); 1056 he_writel(he_dev, 0xff, RESET_CNTL);
1057 1057
1058 udelay(16*1000); /* 16 ms */ 1058 msleep(16); /* 16 ms */
1059 status = he_readl(he_dev, RESET_CNTL); 1059 status = he_readl(he_dev, RESET_CNTL);
1060 if ((status & BOARD_RST_STATUS) == 0) { 1060 if ((status & BOARD_RST_STATUS) == 0) {
1061 hprintk("reset failed\n"); 1061 hprintk("reset failed\n");
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index 03bbe104338f..17b26ce7e051 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -104,7 +104,13 @@ void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
104 if (i) 104 if (i)
105 bcma_err(core->bus, "PLL enable timeout\n"); 105 bcma_err(core->bus, "PLL enable timeout\n");
106 } else { 106 } else {
107 bcma_warn(core->bus, "Disabling PLL not supported yet!\n"); 107 /*
108 * Mask the PLL but don't wait for it to be disabled. PLL may be
109 * shared between cores and will be still up if there is another
110 * core using it.
111 */
112 bcma_mask32(core, BCMA_CLKCTLST, ~req);
113 bcma_read32(core, BCMA_CLKCTLST);
108 } 114 }
109} 115}
110EXPORT_SYMBOL_GPL(bcma_core_pll_ctl); 116EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index 28fa50ad87be..036c6744b39b 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -25,13 +25,14 @@ static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
25 return value; 25 return value;
26} 26}
27 27
28static u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc) 28u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc)
29{ 29{
30 if (cc->capabilities & BCMA_CC_CAP_PMU) 30 if (cc->capabilities & BCMA_CC_CAP_PMU)
31 return bcma_pmu_get_alp_clock(cc); 31 return bcma_pmu_get_alp_clock(cc);
32 32
33 return 20000000; 33 return 20000000;
34} 34}
35EXPORT_SYMBOL_GPL(bcma_chipco_get_alp_clock);
35 36
36static u32 bcma_chipco_watchdog_get_max_timer(struct bcma_drv_cc *cc) 37static u32 bcma_chipco_watchdog_get_max_timer(struct bcma_drv_cc *cc)
37{ 38{
@@ -213,6 +214,7 @@ u32 bcma_chipco_gpio_out(struct bcma_drv_cc *cc, u32 mask, u32 value)
213 214
214 return res; 215 return res;
215} 216}
217EXPORT_SYMBOL_GPL(bcma_chipco_gpio_out);
216 218
217u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value) 219u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value)
218{ 220{
@@ -225,6 +227,7 @@ u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value)
225 227
226 return res; 228 return res;
227} 229}
230EXPORT_SYMBOL_GPL(bcma_chipco_gpio_outen);
228 231
229/* 232/*
230 * If the bit is set to 0, chipcommon controlls this GPIO, 233 * If the bit is set to 0, chipcommon controlls this GPIO,
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index 932b101dee36..edca73af3cc0 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -174,19 +174,35 @@ u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc)
174 struct bcma_bus *bus = cc->core->bus; 174 struct bcma_bus *bus = cc->core->bus;
175 175
176 switch (bus->chipinfo.id) { 176 switch (bus->chipinfo.id) {
177 case BCMA_CHIP_ID_BCM4313:
178 case BCMA_CHIP_ID_BCM43224:
179 case BCMA_CHIP_ID_BCM43225:
180 case BCMA_CHIP_ID_BCM43227:
181 case BCMA_CHIP_ID_BCM43228:
182 case BCMA_CHIP_ID_BCM4331:
183 case BCMA_CHIP_ID_BCM43421:
184 case BCMA_CHIP_ID_BCM43428:
185 case BCMA_CHIP_ID_BCM43431:
177 case BCMA_CHIP_ID_BCM4716: 186 case BCMA_CHIP_ID_BCM4716:
178 case BCMA_CHIP_ID_BCM4748:
179 case BCMA_CHIP_ID_BCM47162: 187 case BCMA_CHIP_ID_BCM47162:
180 case BCMA_CHIP_ID_BCM4313: 188 case BCMA_CHIP_ID_BCM4748:
181 case BCMA_CHIP_ID_BCM5357:
182 case BCMA_CHIP_ID_BCM4749: 189 case BCMA_CHIP_ID_BCM4749:
190 case BCMA_CHIP_ID_BCM5357:
183 case BCMA_CHIP_ID_BCM53572: 191 case BCMA_CHIP_ID_BCM53572:
192 case BCMA_CHIP_ID_BCM6362:
184 /* always 20Mhz */ 193 /* always 20Mhz */
185 return 20000 * 1000; 194 return 20000 * 1000;
186 case BCMA_CHIP_ID_BCM5356:
187 case BCMA_CHIP_ID_BCM4706: 195 case BCMA_CHIP_ID_BCM4706:
196 case BCMA_CHIP_ID_BCM5356:
188 /* always 25Mhz */ 197 /* always 25Mhz */
189 return 25000 * 1000; 198 return 25000 * 1000;
199 case BCMA_CHIP_ID_BCM43460:
200 case BCMA_CHIP_ID_BCM4352:
201 case BCMA_CHIP_ID_BCM4360:
202 if (cc->status & BCMA_CC_CHIPST_4360_XTAL_40MZ)
203 return 40000 * 1000;
204 else
205 return 20000 * 1000;
190 default: 206 default:
191 bcma_warn(bus, "No ALP clock specified for %04X device, pmu rev. %d, using default %d Hz\n", 207 bcma_warn(bus, "No ALP clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
192 bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK); 208 bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
@@ -373,7 +389,7 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
373 tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT; 389 tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT;
374 bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp); 390 bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
375 391
376 tmp = 1 << 10; 392 tmp = BCMA_CC_PMU_CTL_PLL_UPD;
377 break; 393 break;
378 394
379 case BCMA_CHIP_ID_BCM4331: 395 case BCMA_CHIP_ID_BCM4331:
@@ -394,7 +410,7 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
394 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2, 410 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
395 0x03000a08); 411 0x03000a08);
396 } 412 }
397 tmp = 1 << 10; 413 tmp = BCMA_CC_PMU_CTL_PLL_UPD;
398 break; 414 break;
399 415
400 case BCMA_CHIP_ID_BCM43224: 416 case BCMA_CHIP_ID_BCM43224:
@@ -427,7 +443,7 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
427 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5, 443 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
428 0x88888815); 444 0x88888815);
429 } 445 }
430 tmp = 1 << 10; 446 tmp = BCMA_CC_PMU_CTL_PLL_UPD;
431 break; 447 break;
432 448
433 case BCMA_CHIP_ID_BCM4716: 449 case BCMA_CHIP_ID_BCM4716:
@@ -461,7 +477,7 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
461 0x88888815); 477 0x88888815);
462 } 478 }
463 479
464 tmp = 3 << 9; 480 tmp = BCMA_CC_PMU_CTL_PLL_UPD | BCMA_CC_PMU_CTL_NOILPONW;
465 break; 481 break;
466 482
467 case BCMA_CHIP_ID_BCM43227: 483 case BCMA_CHIP_ID_BCM43227:
@@ -497,7 +513,7 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
497 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5, 513 bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
498 0x88888815); 514 0x88888815);
499 } 515 }
500 tmp = 1 << 10; 516 tmp = BCMA_CC_PMU_CTL_PLL_UPD;
501 break; 517 break;
502 default: 518 default:
503 bcma_err(bus, "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n", 519 bcma_err(bus, "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 9a6188add590..f72f52b4b1dd 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -120,6 +120,11 @@ static int bcma_register_cores(struct bcma_bus *bus)
120 continue; 120 continue;
121 } 121 }
122 122
123 /* Only first GMAC core on BCM4706 is connected and working */
124 if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
125 core->core_unit > 0)
126 continue;
127
123 core->dev.release = bcma_release_core_dev; 128 core->dev.release = bcma_release_core_dev;
124 core->dev.bus = &bcma_bus_type; 129 core->dev.bus = &bcma_bus_type;
125 dev_set_name(&core->dev, "bcma%d:%d", bus->num, dev_id); 130 dev_set_name(&core->dev, "bcma%d:%d", bus->num, dev_id);
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 8d0b57164018..bca9c80056fe 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -137,19 +137,19 @@ static void bcma_scan_switch_core(struct bcma_bus *bus, u32 addr)
137 addr); 137 addr);
138} 138}
139 139
140static u32 bcma_erom_get_ent(struct bcma_bus *bus, u32 **eromptr) 140static u32 bcma_erom_get_ent(struct bcma_bus *bus, u32 __iomem **eromptr)
141{ 141{
142 u32 ent = readl(*eromptr); 142 u32 ent = readl(*eromptr);
143 (*eromptr)++; 143 (*eromptr)++;
144 return ent; 144 return ent;
145} 145}
146 146
147static void bcma_erom_push_ent(u32 **eromptr) 147static void bcma_erom_push_ent(u32 __iomem **eromptr)
148{ 148{
149 (*eromptr)--; 149 (*eromptr)--;
150} 150}
151 151
152static s32 bcma_erom_get_ci(struct bcma_bus *bus, u32 **eromptr) 152static s32 bcma_erom_get_ci(struct bcma_bus *bus, u32 __iomem **eromptr)
153{ 153{
154 u32 ent = bcma_erom_get_ent(bus, eromptr); 154 u32 ent = bcma_erom_get_ent(bus, eromptr);
155 if (!(ent & SCAN_ER_VALID)) 155 if (!(ent & SCAN_ER_VALID))
@@ -159,14 +159,14 @@ static s32 bcma_erom_get_ci(struct bcma_bus *bus, u32 **eromptr)
159 return ent; 159 return ent;
160} 160}
161 161
162static bool bcma_erom_is_end(struct bcma_bus *bus, u32 **eromptr) 162static bool bcma_erom_is_end(struct bcma_bus *bus, u32 __iomem **eromptr)
163{ 163{
164 u32 ent = bcma_erom_get_ent(bus, eromptr); 164 u32 ent = bcma_erom_get_ent(bus, eromptr);
165 bcma_erom_push_ent(eromptr); 165 bcma_erom_push_ent(eromptr);
166 return (ent == (SCAN_ER_TAG_END | SCAN_ER_VALID)); 166 return (ent == (SCAN_ER_TAG_END | SCAN_ER_VALID));
167} 167}
168 168
169static bool bcma_erom_is_bridge(struct bcma_bus *bus, u32 **eromptr) 169static bool bcma_erom_is_bridge(struct bcma_bus *bus, u32 __iomem **eromptr)
170{ 170{
171 u32 ent = bcma_erom_get_ent(bus, eromptr); 171 u32 ent = bcma_erom_get_ent(bus, eromptr);
172 bcma_erom_push_ent(eromptr); 172 bcma_erom_push_ent(eromptr);
@@ -175,7 +175,7 @@ static bool bcma_erom_is_bridge(struct bcma_bus *bus, u32 **eromptr)
175 ((ent & SCAN_ADDR_TYPE) == SCAN_ADDR_TYPE_BRIDGE)); 175 ((ent & SCAN_ADDR_TYPE) == SCAN_ADDR_TYPE_BRIDGE));
176} 176}
177 177
178static void bcma_erom_skip_component(struct bcma_bus *bus, u32 **eromptr) 178static void bcma_erom_skip_component(struct bcma_bus *bus, u32 __iomem **eromptr)
179{ 179{
180 u32 ent; 180 u32 ent;
181 while (1) { 181 while (1) {
@@ -189,7 +189,7 @@ static void bcma_erom_skip_component(struct bcma_bus *bus, u32 **eromptr)
189 bcma_erom_push_ent(eromptr); 189 bcma_erom_push_ent(eromptr);
190} 190}
191 191
192static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 **eromptr) 192static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr)
193{ 193{
194 u32 ent = bcma_erom_get_ent(bus, eromptr); 194 u32 ent = bcma_erom_get_ent(bus, eromptr);
195 if (!(ent & SCAN_ER_VALID)) 195 if (!(ent & SCAN_ER_VALID))
@@ -199,7 +199,7 @@ static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 **eromptr)
199 return ent; 199 return ent;
200} 200}
201 201
202static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 **eromptr, 202static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
203 u32 type, u8 port) 203 u32 type, u8 port)
204{ 204{
205 u32 addrl, addrh, sizel, sizeh = 0; 205 u32 addrl, addrh, sizel, sizeh = 0;
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 4adf9ef9a113..8934298a638d 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -217,6 +217,7 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
217 } 217 }
218 218
219 SPEX(board_rev, SSB_SPROM8_BOARDREV, ~0, 0); 219 SPEX(board_rev, SSB_SPROM8_BOARDREV, ~0, 0);
220 SPEX(board_type, SSB_SPROM1_SPID, ~0, 0);
220 221
221 SPEX(txpid2g[0], SSB_SPROM4_TXPID2G01, SSB_SPROM4_TXPID2G0, 222 SPEX(txpid2g[0], SSB_SPROM4_TXPID2G01, SSB_SPROM4_TXPID2G0,
222 SSB_SPROM4_TXPID2G0_SHIFT); 223 SSB_SPROM4_TXPID2G0_SHIFT);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 6aab00ef4379..11f467c00d0a 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -90,6 +90,7 @@ static struct usb_device_id ath3k_table[] = {
90 { USB_DEVICE(0x13d3, 0x3393) }, 90 { USB_DEVICE(0x13d3, 0x3393) },
91 { USB_DEVICE(0x0489, 0xe04e) }, 91 { USB_DEVICE(0x0489, 0xe04e) },
92 { USB_DEVICE(0x0489, 0xe056) }, 92 { USB_DEVICE(0x0489, 0xe056) },
93 { USB_DEVICE(0x0489, 0xe04d) },
93 94
94 /* Atheros AR5BBU12 with sflash firmware */ 95 /* Atheros AR5BBU12 with sflash firmware */
95 { USB_DEVICE(0x0489, 0xE02C) }, 96 { USB_DEVICE(0x0489, 0xE02C) },
@@ -126,6 +127,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
126 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 127 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
127 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 128 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
128 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, 129 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
129 131
130 /* Atheros AR5BBU22 with sflash firmware */ 132 /* Atheros AR5BBU22 with sflash firmware */
131 { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, 133 { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index 428dbb7574bd..db2c3c305df8 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -29,20 +29,6 @@
29struct btmrvl_debugfs_data { 29struct btmrvl_debugfs_data {
30 struct dentry *config_dir; 30 struct dentry *config_dir;
31 struct dentry *status_dir; 31 struct dentry *status_dir;
32
33 /* config */
34 struct dentry *psmode;
35 struct dentry *pscmd;
36 struct dentry *hsmode;
37 struct dentry *hscmd;
38 struct dentry *gpiogap;
39 struct dentry *hscfgcmd;
40
41 /* status */
42 struct dentry *curpsmode;
43 struct dentry *hsstate;
44 struct dentry *psstate;
45 struct dentry *txdnldready;
46}; 32};
47 33
48static ssize_t btmrvl_hscfgcmd_write(struct file *file, 34static ssize_t btmrvl_hscfgcmd_write(struct file *file,
@@ -91,47 +77,6 @@ static const struct file_operations btmrvl_hscfgcmd_fops = {
91 .llseek = default_llseek, 77 .llseek = default_llseek,
92}; 78};
93 79
94static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf,
95 size_t count, loff_t *ppos)
96{
97 struct btmrvl_private *priv = file->private_data;
98 char buf[16];
99 long result, ret;
100
101 memset(buf, 0, sizeof(buf));
102
103 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
104 return -EFAULT;
105
106 ret = strict_strtol(buf, 10, &result);
107 if (ret)
108 return ret;
109
110 priv->btmrvl_dev.psmode = result;
111
112 return count;
113}
114
115static ssize_t btmrvl_psmode_read(struct file *file, char __user *userbuf,
116 size_t count, loff_t *ppos)
117{
118 struct btmrvl_private *priv = file->private_data;
119 char buf[16];
120 int ret;
121
122 ret = snprintf(buf, sizeof(buf) - 1, "%d\n",
123 priv->btmrvl_dev.psmode);
124
125 return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
126}
127
128static const struct file_operations btmrvl_psmode_fops = {
129 .read = btmrvl_psmode_read,
130 .write = btmrvl_psmode_write,
131 .open = simple_open,
132 .llseek = default_llseek,
133};
134
135static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf, 80static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf,
136 size_t count, loff_t *ppos) 81 size_t count, loff_t *ppos)
137{ 82{
@@ -178,47 +123,6 @@ static const struct file_operations btmrvl_pscmd_fops = {
178 .llseek = default_llseek, 123 .llseek = default_llseek,
179}; 124};
180 125
181static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf,
182 size_t count, loff_t *ppos)
183{
184 struct btmrvl_private *priv = file->private_data;
185 char buf[16];
186 long result, ret;
187
188 memset(buf, 0, sizeof(buf));
189
190 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
191 return -EFAULT;
192
193 ret = strict_strtol(buf, 16, &result);
194 if (ret)
195 return ret;
196
197 priv->btmrvl_dev.gpio_gap = result;
198
199 return count;
200}
201
202static ssize_t btmrvl_gpiogap_read(struct file *file, char __user *userbuf,
203 size_t count, loff_t *ppos)
204{
205 struct btmrvl_private *priv = file->private_data;
206 char buf[16];
207 int ret;
208
209 ret = snprintf(buf, sizeof(buf) - 1, "0x%x\n",
210 priv->btmrvl_dev.gpio_gap);
211
212 return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
213}
214
215static const struct file_operations btmrvl_gpiogap_fops = {
216 .read = btmrvl_gpiogap_read,
217 .write = btmrvl_gpiogap_write,
218 .open = simple_open,
219 .llseek = default_llseek,
220};
221
222static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf, 126static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf,
223 size_t count, loff_t *ppos) 127 size_t count, loff_t *ppos)
224{ 128{
@@ -263,119 +167,6 @@ static const struct file_operations btmrvl_hscmd_fops = {
263 .llseek = default_llseek, 167 .llseek = default_llseek,
264}; 168};
265 169
266static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf,
267 size_t count, loff_t *ppos)
268{
269 struct btmrvl_private *priv = file->private_data;
270 char buf[16];
271 long result, ret;
272
273 memset(buf, 0, sizeof(buf));
274
275 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
276 return -EFAULT;
277
278 ret = strict_strtol(buf, 10, &result);
279 if (ret)
280 return ret;
281
282 priv->btmrvl_dev.hsmode = result;
283
284 return count;
285}
286
287static ssize_t btmrvl_hsmode_read(struct file *file, char __user * userbuf,
288 size_t count, loff_t *ppos)
289{
290 struct btmrvl_private *priv = file->private_data;
291 char buf[16];
292 int ret;
293
294 ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.hsmode);
295
296 return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
297}
298
299static const struct file_operations btmrvl_hsmode_fops = {
300 .read = btmrvl_hsmode_read,
301 .write = btmrvl_hsmode_write,
302 .open = simple_open,
303 .llseek = default_llseek,
304};
305
306static ssize_t btmrvl_curpsmode_read(struct file *file, char __user *userbuf,
307 size_t count, loff_t *ppos)
308{
309 struct btmrvl_private *priv = file->private_data;
310 char buf[16];
311 int ret;
312
313 ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->adapter->psmode);
314
315 return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
316}
317
318static const struct file_operations btmrvl_curpsmode_fops = {
319 .read = btmrvl_curpsmode_read,
320 .open = simple_open,
321 .llseek = default_llseek,
322};
323
324static ssize_t btmrvl_psstate_read(struct file *file, char __user * userbuf,
325 size_t count, loff_t *ppos)
326{
327 struct btmrvl_private *priv = file->private_data;
328 char buf[16];
329 int ret;
330
331 ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->adapter->ps_state);
332
333 return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
334}
335
336static const struct file_operations btmrvl_psstate_fops = {
337 .read = btmrvl_psstate_read,
338 .open = simple_open,
339 .llseek = default_llseek,
340};
341
342static ssize_t btmrvl_hsstate_read(struct file *file, char __user *userbuf,
343 size_t count, loff_t *ppos)
344{
345 struct btmrvl_private *priv = file->private_data;
346 char buf[16];
347 int ret;
348
349 ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->adapter->hs_state);
350
351 return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
352}
353
354static const struct file_operations btmrvl_hsstate_fops = {
355 .read = btmrvl_hsstate_read,
356 .open = simple_open,
357 .llseek = default_llseek,
358};
359
360static ssize_t btmrvl_txdnldready_read(struct file *file, char __user *userbuf,
361 size_t count, loff_t *ppos)
362{
363 struct btmrvl_private *priv = file->private_data;
364 char buf[16];
365 int ret;
366
367 ret = snprintf(buf, sizeof(buf) - 1, "%d\n",
368 priv->btmrvl_dev.tx_dnld_rdy);
369
370 return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
371}
372
373static const struct file_operations btmrvl_txdnldready_fops = {
374 .read = btmrvl_txdnldready_read,
375 .open = simple_open,
376 .llseek = default_llseek,
377};
378
379void btmrvl_debugfs_init(struct hci_dev *hdev) 170void btmrvl_debugfs_init(struct hci_dev *hdev)
380{ 171{
381 struct btmrvl_private *priv = hci_get_drvdata(hdev); 172 struct btmrvl_private *priv = hci_get_drvdata(hdev);
@@ -394,30 +185,28 @@ void btmrvl_debugfs_init(struct hci_dev *hdev)
394 185
395 dbg->config_dir = debugfs_create_dir("config", hdev->debugfs); 186 dbg->config_dir = debugfs_create_dir("config", hdev->debugfs);
396 187
397 dbg->psmode = debugfs_create_file("psmode", 0644, dbg->config_dir, 188 debugfs_create_u8("psmode", 0644, dbg->config_dir,
398 priv, &btmrvl_psmode_fops); 189 &priv->btmrvl_dev.psmode);
399 dbg->pscmd = debugfs_create_file("pscmd", 0644, dbg->config_dir, 190 debugfs_create_file("pscmd", 0644, dbg->config_dir,
400 priv, &btmrvl_pscmd_fops); 191 priv, &btmrvl_pscmd_fops);
401 dbg->gpiogap = debugfs_create_file("gpiogap", 0644, dbg->config_dir, 192 debugfs_create_x16("gpiogap", 0644, dbg->config_dir,
402 priv, &btmrvl_gpiogap_fops); 193 &priv->btmrvl_dev.gpio_gap);
403 dbg->hsmode = debugfs_create_file("hsmode", 0644, dbg->config_dir, 194 debugfs_create_u8("hsmode", 0644, dbg->config_dir,
404 priv, &btmrvl_hsmode_fops); 195 &priv->btmrvl_dev.hsmode);
405 dbg->hscmd = debugfs_create_file("hscmd", 0644, dbg->config_dir, 196 debugfs_create_file("hscmd", 0644, dbg->config_dir,
406 priv, &btmrvl_hscmd_fops); 197 priv, &btmrvl_hscmd_fops);
407 dbg->hscfgcmd = debugfs_create_file("hscfgcmd", 0644, dbg->config_dir, 198 debugfs_create_file("hscfgcmd", 0644, dbg->config_dir,
408 priv, &btmrvl_hscfgcmd_fops); 199 priv, &btmrvl_hscfgcmd_fops);
409 200
410 dbg->status_dir = debugfs_create_dir("status", hdev->debugfs); 201 dbg->status_dir = debugfs_create_dir("status", hdev->debugfs);
411 dbg->curpsmode = debugfs_create_file("curpsmode", 0444, 202 debugfs_create_u8("curpsmode", 0444, dbg->status_dir,
412 dbg->status_dir, priv, 203 &priv->adapter->psmode);
413 &btmrvl_curpsmode_fops); 204 debugfs_create_u8("psstate", 0444, dbg->status_dir,
414 dbg->psstate = debugfs_create_file("psstate", 0444, dbg->status_dir, 205 &priv->adapter->ps_state);
415 priv, &btmrvl_psstate_fops); 206 debugfs_create_u8("hsstate", 0444, dbg->status_dir,
416 dbg->hsstate = debugfs_create_file("hsstate", 0444, dbg->status_dir, 207 &priv->adapter->hs_state);
417 priv, &btmrvl_hsstate_fops); 208 debugfs_create_u8("txdnldready", 0444, dbg->status_dir,
418 dbg->txdnldready = debugfs_create_file("txdnldready", 0444, 209 &priv->btmrvl_dev.tx_dnld_rdy);
419 dbg->status_dir, priv,
420 &btmrvl_txdnldready_fops);
421} 210}
422 211
423void btmrvl_debugfs_remove(struct hci_dev *hdev) 212void btmrvl_debugfs_remove(struct hci_dev *hdev)
@@ -428,19 +217,8 @@ void btmrvl_debugfs_remove(struct hci_dev *hdev)
428 if (!dbg) 217 if (!dbg)
429 return; 218 return;
430 219
431 debugfs_remove(dbg->psmode); 220 debugfs_remove_recursive(dbg->config_dir);
432 debugfs_remove(dbg->pscmd); 221 debugfs_remove_recursive(dbg->status_dir);
433 debugfs_remove(dbg->gpiogap);
434 debugfs_remove(dbg->hsmode);
435 debugfs_remove(dbg->hscmd);
436 debugfs_remove(dbg->hscfgcmd);
437 debugfs_remove(dbg->config_dir);
438
439 debugfs_remove(dbg->curpsmode);
440 debugfs_remove(dbg->psstate);
441 debugfs_remove(dbg->hsstate);
442 debugfs_remove(dbg->txdnldready);
443 debugfs_remove(dbg->status_dir);
444 222
445 kfree(dbg); 223 kfree(dbg);
446} 224}
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 9959d4cb23dc..c63488c54f4a 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -83,8 +83,8 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
83}; 83};
84 84
85static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = { 85static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
86 .helper = "sd8688_helper.bin", 86 .helper = "mrvl/sd8688_helper.bin",
87 .firmware = "sd8688.bin", 87 .firmware = "mrvl/sd8688.bin",
88 .reg = &btmrvl_reg_8688, 88 .reg = &btmrvl_reg_8688,
89 .sd_blksz_fw_dl = 64, 89 .sd_blksz_fw_dl = 64,
90}; 90};
@@ -228,24 +228,24 @@ failed:
228static int btmrvl_sdio_verify_fw_download(struct btmrvl_sdio_card *card, 228static int btmrvl_sdio_verify_fw_download(struct btmrvl_sdio_card *card,
229 int pollnum) 229 int pollnum)
230{ 230{
231 int ret = -ETIMEDOUT;
232 u16 firmwarestat; 231 u16 firmwarestat;
233 unsigned int tries; 232 int tries, ret;
234 233
235 /* Wait for firmware to become ready */ 234 /* Wait for firmware to become ready */
236 for (tries = 0; tries < pollnum; tries++) { 235 for (tries = 0; tries < pollnum; tries++) {
237 if (btmrvl_sdio_read_fw_status(card, &firmwarestat) < 0) 236 sdio_claim_host(card->func);
237 ret = btmrvl_sdio_read_fw_status(card, &firmwarestat);
238 sdio_release_host(card->func);
239 if (ret < 0)
238 continue; 240 continue;
239 241
240 if (firmwarestat == FIRMWARE_READY) { 242 if (firmwarestat == FIRMWARE_READY)
241 ret = 0; 243 return 0;
242 break; 244
243 } else { 245 msleep(10);
244 msleep(10);
245 }
246 } 246 }
247 247
248 return ret; 248 return -ETIMEDOUT;
249} 249}
250 250
251static int btmrvl_sdio_download_helper(struct btmrvl_sdio_card *card) 251static int btmrvl_sdio_download_helper(struct btmrvl_sdio_card *card)
@@ -874,7 +874,7 @@ exit:
874 874
875static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card) 875static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card)
876{ 876{
877 int ret = 0; 877 int ret;
878 u8 fws0; 878 u8 fws0;
879 int pollnum = MAX_POLL_TRIES; 879 int pollnum = MAX_POLL_TRIES;
880 880
@@ -882,13 +882,14 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card)
882 BT_ERR("card or function is NULL!"); 882 BT_ERR("card or function is NULL!");
883 return -EINVAL; 883 return -EINVAL;
884 } 884 }
885 sdio_claim_host(card->func);
886 885
887 if (!btmrvl_sdio_verify_fw_download(card, 1)) { 886 if (!btmrvl_sdio_verify_fw_download(card, 1)) {
888 BT_DBG("Firmware already downloaded!"); 887 BT_DBG("Firmware already downloaded!");
889 goto done; 888 return 0;
890 } 889 }
891 890
891 sdio_claim_host(card->func);
892
892 /* Check if other function driver is downloading the firmware */ 893 /* Check if other function driver is downloading the firmware */
893 fws0 = sdio_readb(card->func, card->reg->card_fw_status0, &ret); 894 fws0 = sdio_readb(card->func, card->reg->card_fw_status0, &ret);
894 if (ret) { 895 if (ret) {
@@ -918,15 +919,21 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card)
918 } 919 }
919 } 920 }
920 921
922 sdio_release_host(card->func);
923
924 /*
925 * winner or not, with this test the FW synchronizes when the
926 * module can continue its initialization
927 */
921 if (btmrvl_sdio_verify_fw_download(card, pollnum)) { 928 if (btmrvl_sdio_verify_fw_download(card, pollnum)) {
922 BT_ERR("FW failed to be active in time!"); 929 BT_ERR("FW failed to be active in time!");
923 ret = -ETIMEDOUT; 930 return -ETIMEDOUT;
924 goto done;
925 } 931 }
926 932
933 return 0;
934
927done: 935done:
928 sdio_release_host(card->func); 936 sdio_release_host(card->func);
929
930 return ret; 937 return ret;
931} 938}
932 939
@@ -989,8 +996,6 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
989 goto unreg_dev; 996 goto unreg_dev;
990 } 997 }
991 998
992 msleep(100);
993
994 btmrvl_sdio_enable_host_int(card); 999 btmrvl_sdio_enable_host_int(card);
995 1000
996 priv = btmrvl_add_card(card); 1001 priv = btmrvl_add_card(card);
@@ -1185,7 +1190,7 @@ MODULE_AUTHOR("Marvell International Ltd.");
1185MODULE_DESCRIPTION("Marvell BT-over-SDIO driver ver " VERSION); 1190MODULE_DESCRIPTION("Marvell BT-over-SDIO driver ver " VERSION);
1186MODULE_VERSION(VERSION); 1191MODULE_VERSION(VERSION);
1187MODULE_LICENSE("GPL v2"); 1192MODULE_LICENSE("GPL v2");
1188MODULE_FIRMWARE("sd8688_helper.bin"); 1193MODULE_FIRMWARE("mrvl/sd8688_helper.bin");
1189MODULE_FIRMWARE("sd8688.bin"); 1194MODULE_FIRMWARE("mrvl/sd8688.bin");
1190MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); 1195MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
1191MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); 1196MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 2cc5f774a29c..7a7e5f8ecadc 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/usb.h> 25#include <linux/usb.h>
26#include <linux/firmware.h>
26 27
27#include <net/bluetooth/bluetooth.h> 28#include <net/bluetooth/bluetooth.h>
28#include <net/bluetooth/hci_core.h> 29#include <net/bluetooth/hci_core.h>
@@ -47,6 +48,7 @@ static struct usb_driver btusb_driver;
47#define BTUSB_BROKEN_ISOC 0x20 48#define BTUSB_BROKEN_ISOC 0x20
48#define BTUSB_WRONG_SCO_MTU 0x40 49#define BTUSB_WRONG_SCO_MTU 0x40
49#define BTUSB_ATH3012 0x80 50#define BTUSB_ATH3012 0x80
51#define BTUSB_INTEL 0x100
50 52
51static struct usb_device_id btusb_table[] = { 53static struct usb_device_id btusb_table[] = {
52 /* Generic Bluetooth USB device */ 54 /* Generic Bluetooth USB device */
@@ -148,6 +150,7 @@ static struct usb_device_id blacklist_table[] = {
148 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 150 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
149 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 151 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
150 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, 152 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
153 { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
151 154
152 /* Atheros AR5BBU12 with sflash firmware */ 155 /* Atheros AR5BBU12 with sflash firmware */
153 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, 156 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -206,6 +209,9 @@ static struct usb_device_id blacklist_table[] = {
206 /* Frontline ComProbe Bluetooth Sniffer */ 209 /* Frontline ComProbe Bluetooth Sniffer */
207 { USB_DEVICE(0x16d3, 0x0002), .driver_info = BTUSB_SNIFFER }, 210 { USB_DEVICE(0x16d3, 0x0002), .driver_info = BTUSB_SNIFFER },
208 211
212 /* Intel Bluetooth device */
213 { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
214
209 { } /* Terminating entry */ 215 { } /* Terminating entry */
210}; 216};
211 217
@@ -926,6 +932,391 @@ static void btusb_waker(struct work_struct *work)
926 usb_autopm_put_interface(data->intf); 932 usb_autopm_put_interface(data->intf);
927} 933}
928 934
935static int btusb_setup_bcm92035(struct hci_dev *hdev)
936{
937 struct sk_buff *skb;
938 u8 val = 0x00;
939
940 BT_DBG("%s", hdev->name);
941
942 skb = __hci_cmd_sync(hdev, 0xfc3b, 1, &val, HCI_INIT_TIMEOUT);
943 if (IS_ERR(skb))
944 BT_ERR("BCM92035 command failed (%ld)", -PTR_ERR(skb));
945 else
946 kfree_skb(skb);
947
948 return 0;
949}
950
951struct intel_version {
952 u8 status;
953 u8 hw_platform;
954 u8 hw_variant;
955 u8 hw_revision;
956 u8 fw_variant;
957 u8 fw_revision;
958 u8 fw_build_num;
959 u8 fw_build_ww;
960 u8 fw_build_yy;
961 u8 fw_patch_num;
962} __packed;
963
964static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
965 struct intel_version *ver)
966{
967 const struct firmware *fw;
968 char fwname[64];
969 int ret;
970
971 snprintf(fwname, sizeof(fwname),
972 "intel/ibt-hw-%x.%x.%x-fw-%x.%x.%x.%x.%x.bseq",
973 ver->hw_platform, ver->hw_variant, ver->hw_revision,
974 ver->fw_variant, ver->fw_revision, ver->fw_build_num,
975 ver->fw_build_ww, ver->fw_build_yy);
976
977 ret = request_firmware(&fw, fwname, &hdev->dev);
978 if (ret < 0) {
979 if (ret == -EINVAL) {
980 BT_ERR("%s Intel firmware file request failed (%d)",
981 hdev->name, ret);
982 return NULL;
983 }
984
985 BT_ERR("%s failed to open Intel firmware file: %s(%d)",
986 hdev->name, fwname, ret);
987
988 /* If the correct firmware patch file is not found, use the
989 * default firmware patch file instead
990 */
991 snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bseq",
992 ver->hw_platform, ver->hw_variant);
993 if (request_firmware(&fw, fwname, &hdev->dev) < 0) {
994 BT_ERR("%s failed to open default Intel fw file: %s",
995 hdev->name, fwname);
996 return NULL;
997 }
998 }
999
1000 BT_INFO("%s: Intel Bluetooth firmware file: %s", hdev->name, fwname);
1001
1002 return fw;
1003}
1004
1005static int btusb_setup_intel_patching(struct hci_dev *hdev,
1006 const struct firmware *fw,
1007 const u8 **fw_ptr, int *disable_patch)
1008{
1009 struct sk_buff *skb;
1010 struct hci_command_hdr *cmd;
1011 const u8 *cmd_param;
1012 struct hci_event_hdr *evt = NULL;
1013 const u8 *evt_param = NULL;
1014 int remain = fw->size - (*fw_ptr - fw->data);
1015
1016 /* The first byte indicates the types of the patch command or event.
1017 * 0x01 means HCI command and 0x02 is HCI event. If the first bytes
1018 * in the current firmware buffer doesn't start with 0x01 or
1019 * the size of remain buffer is smaller than HCI command header,
1020 * the firmware file is corrupted and it should stop the patching
1021 * process.
1022 */
1023 if (remain > HCI_COMMAND_HDR_SIZE && *fw_ptr[0] != 0x01) {
1024 BT_ERR("%s Intel fw corrupted: invalid cmd read", hdev->name);
1025 return -EINVAL;
1026 }
1027 (*fw_ptr)++;
1028 remain--;
1029
1030 cmd = (struct hci_command_hdr *)(*fw_ptr);
1031 *fw_ptr += sizeof(*cmd);
1032 remain -= sizeof(*cmd);
1033
1034 /* Ensure that the remain firmware data is long enough than the length
1035 * of command parameter. If not, the firmware file is corrupted.
1036 */
1037 if (remain < cmd->plen) {
1038 BT_ERR("%s Intel fw corrupted: invalid cmd len", hdev->name);
1039 return -EFAULT;
1040 }
1041
1042 /* If there is a command that loads a patch in the firmware
1043 * file, then enable the patch upon success, otherwise just
1044 * disable the manufacturer mode, for example patch activation
1045 * is not required when the default firmware patch file is used
1046 * because there are no patch data to load.
1047 */
1048 if (*disable_patch && le16_to_cpu(cmd->opcode) == 0xfc8e)
1049 *disable_patch = 0;
1050
1051 cmd_param = *fw_ptr;
1052 *fw_ptr += cmd->plen;
1053 remain -= cmd->plen;
1054
1055 /* This reads the expected events when the above command is sent to the
1056 * device. Some vendor commands expects more than one events, for
1057 * example command status event followed by vendor specific event.
1058 * For this case, it only keeps the last expected event. so the command
1059 * can be sent with __hci_cmd_sync_ev() which returns the sk_buff of
1060 * last expected event.
1061 */
1062 while (remain > HCI_EVENT_HDR_SIZE && *fw_ptr[0] == 0x02) {
1063 (*fw_ptr)++;
1064 remain--;
1065
1066 evt = (struct hci_event_hdr *)(*fw_ptr);
1067 *fw_ptr += sizeof(*evt);
1068 remain -= sizeof(*evt);
1069
1070 if (remain < evt->plen) {
1071 BT_ERR("%s Intel fw corrupted: invalid evt len",
1072 hdev->name);
1073 return -EFAULT;
1074 }
1075
1076 evt_param = *fw_ptr;
1077 *fw_ptr += evt->plen;
1078 remain -= evt->plen;
1079 }
1080
1081 /* Every HCI commands in the firmware file has its correspond event.
1082 * If event is not found or remain is smaller than zero, the firmware
1083 * file is corrupted.
1084 */
1085 if (!evt || !evt_param || remain < 0) {
1086 BT_ERR("%s Intel fw corrupted: invalid evt read", hdev->name);
1087 return -EFAULT;
1088 }
1089
1090 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cmd->opcode), cmd->plen,
1091 cmd_param, evt->evt, HCI_INIT_TIMEOUT);
1092 if (IS_ERR(skb)) {
1093 BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)",
1094 hdev->name, cmd->opcode, PTR_ERR(skb));
1095 return -PTR_ERR(skb);
1096 }
1097
1098 /* It ensures that the returned event matches the event data read from
1099 * the firmware file. At fist, it checks the length and then
1100 * the contents of the event.
1101 */
1102 if (skb->len != evt->plen) {
1103 BT_ERR("%s mismatch event length (opcode 0x%4.4x)", hdev->name,
1104 le16_to_cpu(cmd->opcode));
1105 kfree_skb(skb);
1106 return -EFAULT;
1107 }
1108
1109 if (memcmp(skb->data, evt_param, evt->plen)) {
1110 BT_ERR("%s mismatch event parameter (opcode 0x%4.4x)",
1111 hdev->name, le16_to_cpu(cmd->opcode));
1112 kfree_skb(skb);
1113 return -EFAULT;
1114 }
1115 kfree_skb(skb);
1116
1117 return 0;
1118}
1119
1120static int btusb_setup_intel(struct hci_dev *hdev)
1121{
1122 struct sk_buff *skb;
1123 const struct firmware *fw;
1124 const u8 *fw_ptr;
1125 int disable_patch;
1126 struct intel_version *ver;
1127
1128 const u8 mfg_enable[] = { 0x01, 0x00 };
1129 const u8 mfg_disable[] = { 0x00, 0x00 };
1130 const u8 mfg_reset_deactivate[] = { 0x00, 0x01 };
1131 const u8 mfg_reset_activate[] = { 0x00, 0x02 };
1132
1133 BT_DBG("%s", hdev->name);
1134
1135 /* The controller has a bug with the first HCI command sent to it
1136 * returning number of completed commands as zero. This would stall the
1137 * command processing in the Bluetooth core.
1138 *
1139 * As a workaround, send HCI Reset command first which will reset the
1140 * number of completed commands and allow normal command processing
1141 * from now on.
1142 */
1143 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
1144 if (IS_ERR(skb)) {
1145 BT_ERR("%s sending initial HCI reset command failed (%ld)",
1146 hdev->name, PTR_ERR(skb));
1147 return -PTR_ERR(skb);
1148 }
1149 kfree_skb(skb);
1150
1151 /* Read Intel specific controller version first to allow selection of
1152 * which firmware file to load.
1153 *
1154 * The returned information are hardware variant and revision plus
1155 * firmware variant, revision and build number.
1156 */
1157 skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
1158 if (IS_ERR(skb)) {
1159 BT_ERR("%s reading Intel fw version command failed (%ld)",
1160 hdev->name, PTR_ERR(skb));
1161 return -PTR_ERR(skb);
1162 }
1163
1164 if (skb->len != sizeof(*ver)) {
1165 BT_ERR("%s Intel version event length mismatch", hdev->name);
1166 kfree_skb(skb);
1167 return -EIO;
1168 }
1169
1170 ver = (struct intel_version *)skb->data;
1171 if (ver->status) {
1172 BT_ERR("%s Intel fw version event failed (%02x)", hdev->name,
1173 ver->status);
1174 kfree_skb(skb);
1175 return -bt_to_errno(ver->status);
1176 }
1177
1178 BT_INFO("%s: read Intel version: %02x%02x%02x%02x%02x%02x%02x%02x%02x",
1179 hdev->name, ver->hw_platform, ver->hw_variant,
1180 ver->hw_revision, ver->fw_variant, ver->fw_revision,
1181 ver->fw_build_num, ver->fw_build_ww, ver->fw_build_yy,
1182 ver->fw_patch_num);
1183
1184 /* fw_patch_num indicates the version of patch the device currently
1185 * have. If there is no patch data in the device, it is always 0x00.
1186 * So, if it is other than 0x00, no need to patch the deivce again.
1187 */
1188 if (ver->fw_patch_num) {
1189 BT_INFO("%s: Intel device is already patched. patch num: %02x",
1190 hdev->name, ver->fw_patch_num);
1191 kfree_skb(skb);
1192 return 0;
1193 }
1194
1195 /* Opens the firmware patch file based on the firmware version read
1196 * from the controller. If it fails to open the matching firmware
1197 * patch file, it tries to open the default firmware patch file.
1198 * If no patch file is found, allow the device to operate without
1199 * a patch.
1200 */
1201 fw = btusb_setup_intel_get_fw(hdev, ver);
1202 if (!fw) {
1203 kfree_skb(skb);
1204 return 0;
1205 }
1206 fw_ptr = fw->data;
1207
1208 /* This Intel specific command enables the manufacturer mode of the
1209 * controller.
1210 *
1211 * Only while this mode is enabled, the driver can download the
1212 * firmware patch data and configuration parameters.
1213 */
1214 skb = __hci_cmd_sync(hdev, 0xfc11, 2, mfg_enable, HCI_INIT_TIMEOUT);
1215 if (IS_ERR(skb)) {
1216 BT_ERR("%s entering Intel manufacturer mode failed (%ld)",
1217 hdev->name, PTR_ERR(skb));
1218 release_firmware(fw);
1219 return -PTR_ERR(skb);
1220 }
1221
1222 if (skb->data[0]) {
1223 u8 evt_status = skb->data[0];
1224 BT_ERR("%s enable Intel manufacturer mode event failed (%02x)",
1225 hdev->name, evt_status);
1226 kfree_skb(skb);
1227 release_firmware(fw);
1228 return -bt_to_errno(evt_status);
1229 }
1230 kfree_skb(skb);
1231
1232 disable_patch = 1;
1233
1234 /* The firmware data file consists of list of Intel specific HCI
1235 * commands and its expected events. The first byte indicates the
1236 * type of the message, either HCI command or HCI event.
1237 *
1238 * It reads the command and its expected event from the firmware file,
1239 * and send to the controller. Once __hci_cmd_sync_ev() returns,
1240 * the returned event is compared with the event read from the firmware
1241 * file and it will continue until all the messages are downloaded to
1242 * the controller.
1243 *
1244 * Once the firmware patching is completed successfully,
1245 * the manufacturer mode is disabled with reset and activating the
1246 * downloaded patch.
1247 *
1248 * If the firmware patching fails, the manufacturer mode is
1249 * disabled with reset and deactivating the patch.
1250 *
1251 * If the default patch file is used, no reset is done when disabling
1252 * the manufacturer.
1253 */
1254 while (fw->size > fw_ptr - fw->data) {
1255 int ret;
1256
1257 ret = btusb_setup_intel_patching(hdev, fw, &fw_ptr,
1258 &disable_patch);
1259 if (ret < 0)
1260 goto exit_mfg_deactivate;
1261 }
1262
1263 release_firmware(fw);
1264
1265 if (disable_patch)
1266 goto exit_mfg_disable;
1267
1268 /* Patching completed successfully and disable the manufacturer mode
1269 * with reset and activate the downloaded firmware patches.
1270 */
1271 skb = __hci_cmd_sync(hdev, 0xfc11, sizeof(mfg_reset_activate),
1272 mfg_reset_activate, HCI_INIT_TIMEOUT);
1273 if (IS_ERR(skb)) {
1274 BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
1275 hdev->name, PTR_ERR(skb));
1276 return -PTR_ERR(skb);
1277 }
1278 kfree_skb(skb);
1279
1280 BT_INFO("%s: Intel Bluetooth firmware patch completed and activated",
1281 hdev->name);
1282
1283 return 0;
1284
1285exit_mfg_disable:
1286 /* Disable the manufacturer mode without reset */
1287 skb = __hci_cmd_sync(hdev, 0xfc11, sizeof(mfg_disable), mfg_disable,
1288 HCI_INIT_TIMEOUT);
1289 if (IS_ERR(skb)) {
1290 BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
1291 hdev->name, PTR_ERR(skb));
1292 return -PTR_ERR(skb);
1293 }
1294 kfree_skb(skb);
1295
1296 BT_INFO("%s: Intel Bluetooth firmware patch completed", hdev->name);
1297 return 0;
1298
1299exit_mfg_deactivate:
1300 release_firmware(fw);
1301
1302 /* Patching failed. Disable the manufacturer mode with reset and
1303 * deactivate the downloaded firmware patches.
1304 */
1305 skb = __hci_cmd_sync(hdev, 0xfc11, sizeof(mfg_reset_deactivate),
1306 mfg_reset_deactivate, HCI_INIT_TIMEOUT);
1307 if (IS_ERR(skb)) {
1308 BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
1309 hdev->name, PTR_ERR(skb));
1310 return -PTR_ERR(skb);
1311 }
1312 kfree_skb(skb);
1313
1314 BT_INFO("%s: Intel Bluetooth firmware patch completed and deactivated",
1315 hdev->name);
1316
1317 return 0;
1318}
1319
929static int btusb_probe(struct usb_interface *intf, 1320static int btusb_probe(struct usb_interface *intf,
930 const struct usb_device_id *id) 1321 const struct usb_device_id *id)
931{ 1322{
@@ -1022,11 +1413,17 @@ static int btusb_probe(struct usb_interface *intf,
1022 1413
1023 SET_HCIDEV_DEV(hdev, &intf->dev); 1414 SET_HCIDEV_DEV(hdev, &intf->dev);
1024 1415
1025 hdev->open = btusb_open; 1416 hdev->open = btusb_open;
1026 hdev->close = btusb_close; 1417 hdev->close = btusb_close;
1027 hdev->flush = btusb_flush; 1418 hdev->flush = btusb_flush;
1028 hdev->send = btusb_send_frame; 1419 hdev->send = btusb_send_frame;
1029 hdev->notify = btusb_notify; 1420 hdev->notify = btusb_notify;
1421
1422 if (id->driver_info & BTUSB_BCM92035)
1423 hdev->setup = btusb_setup_bcm92035;
1424
1425 if (id->driver_info & BTUSB_INTEL)
1426 hdev->setup = btusb_setup_intel;
1030 1427
1031 /* Interface numbers are hardcoded in the specification */ 1428 /* Interface numbers are hardcoded in the specification */
1032 data->isoc = usb_ifnum_to_if(data->udev, 1); 1429 data->isoc = usb_ifnum_to_if(data->udev, 1);
@@ -1065,17 +1462,6 @@ static int btusb_probe(struct usb_interface *intf,
1065 data->isoc = NULL; 1462 data->isoc = NULL;
1066 } 1463 }
1067 1464
1068 if (id->driver_info & BTUSB_BCM92035) {
1069 unsigned char cmd[] = { 0x3b, 0xfc, 0x01, 0x00 };
1070 struct sk_buff *skb;
1071
1072 skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
1073 if (skb) {
1074 memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
1075 skb_queue_tail(&hdev->driver_init, skb);
1076 }
1077 }
1078
1079 if (data->isoc) { 1465 if (data->isoc) {
1080 err = usb_driver_claim_interface(&btusb_driver, 1466 err = usb_driver_claim_interface(&btusb_driver,
1081 data->isoc, data); 1467 data->isoc, data);
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index c60623f206d4..8ae9f1ea2bb5 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -153,6 +153,9 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
153{ 153{
154 int ret; 154 int ret;
155 155
156 if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
157 return -EUNATCH;
158
156 ret = hci_recv_stream_fragment(hu->hdev, data, count); 159 ret = hci_recv_stream_fragment(hu->hdev, data, count);
157 if (ret < 0) { 160 if (ret < 0) {
158 BT_ERR("Frame Reassembly Failed"); 161 BT_ERR("Frame Reassembly Failed");
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index ed0fade46aed..bc68a440d432 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -260,12 +260,12 @@ static int hci_uart_send_frame(struct sk_buff *skb)
260 260
261/* ------ LDISC part ------ */ 261/* ------ LDISC part ------ */
262/* hci_uart_tty_open 262/* hci_uart_tty_open
263 * 263 *
264 * Called when line discipline changed to HCI_UART. 264 * Called when line discipline changed to HCI_UART.
265 * 265 *
266 * Arguments: 266 * Arguments:
267 * tty pointer to tty info structure 267 * tty pointer to tty info structure
268 * Return Value: 268 * Return Value:
269 * 0 if success, otherwise error code 269 * 0 if success, otherwise error code
270 */ 270 */
271static int hci_uart_tty_open(struct tty_struct *tty) 271static int hci_uart_tty_open(struct tty_struct *tty)
@@ -365,15 +365,15 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
365} 365}
366 366
367/* hci_uart_tty_receive() 367/* hci_uart_tty_receive()
368 * 368 *
369 * Called by tty low level driver when receive data is 369 * Called by tty low level driver when receive data is
370 * available. 370 * available.
371 * 371 *
372 * Arguments: tty pointer to tty isntance data 372 * Arguments: tty pointer to tty isntance data
373 * data pointer to received data 373 * data pointer to received data
374 * flags pointer to flags for data 374 * flags pointer to flags for data
375 * count count of received data in bytes 375 * count count of received data in bytes
376 * 376 *
377 * Return Value: None 377 * Return Value: None
378 */ 378 */
379static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count) 379static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
@@ -388,7 +388,10 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *f
388 388
389 spin_lock(&hu->rx_lock); 389 spin_lock(&hu->rx_lock);
390 hu->proto->recv(hu, (void *) data, count); 390 hu->proto->recv(hu, (void *) data, count);
391 hu->hdev->stat.byte_rx += count; 391
392 if (hu->hdev)
393 hu->hdev->stat.byte_rx += count;
394
392 spin_unlock(&hu->rx_lock); 395 spin_unlock(&hu->rx_lock);
393 396
394 tty_unthrottle(tty); 397 tty_unthrottle(tty);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 1110478dd0fd..08ae128cce9b 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -232,6 +232,31 @@ void proc_comm_connector(struct task_struct *task)
232 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 232 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
233} 233}
234 234
235void proc_coredump_connector(struct task_struct *task)
236{
237 struct cn_msg *msg;
238 struct proc_event *ev;
239 __u8 buffer[CN_PROC_MSG_SIZE];
240 struct timespec ts;
241
242 if (atomic_read(&proc_event_num_listeners) < 1)
243 return;
244
245 msg = (struct cn_msg *)buffer;
246 ev = (struct proc_event *)msg->data;
247 get_seq(&msg->seq, &ev->cpu);
248 ktime_get_ts(&ts); /* get high res monotonic timestamp */
249 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
250 ev->what = PROC_EVENT_COREDUMP;
251 ev->event_data.coredump.process_pid = task->pid;
252 ev->event_data.coredump.process_tgid = task->tgid;
253
254 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
255 msg->ack = 0; /* not used */
256 msg->len = sizeof(*ev);
257 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
258}
259
235void proc_exit_connector(struct task_struct *task) 260void proc_exit_connector(struct task_struct *task)
236{ 261{
237 struct cn_msg *msg; 262 struct cn_msg *msg;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index f1b7e244bfc1..6ecfa758942c 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -23,7 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/skbuff.h> 25#include <linux/skbuff.h>
26#include <linux/netlink.h> 26#include <net/netlink.h>
27#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
28#include <linux/connector.h> 28#include <linux/connector.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
@@ -95,13 +95,13 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
95 if (!netlink_has_listeners(dev->nls, group)) 95 if (!netlink_has_listeners(dev->nls, group))
96 return -ESRCH; 96 return -ESRCH;
97 97
98 size = NLMSG_SPACE(sizeof(*msg) + msg->len); 98 size = sizeof(*msg) + msg->len;
99 99
100 skb = alloc_skb(size, gfp_mask); 100 skb = nlmsg_new(size, gfp_mask);
101 if (!skb) 101 if (!skb)
102 return -ENOMEM; 102 return -ENOMEM;
103 103
104 nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh), 0); 104 nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
105 if (!nlh) { 105 if (!nlh) {
106 kfree_skb(skb); 106 kfree_skb(skb);
107 return -EMSGSIZE; 107 return -EMSGSIZE;
@@ -124,7 +124,7 @@ static int cn_call_callback(struct sk_buff *skb)
124{ 124{
125 struct cn_callback_entry *i, *cbq = NULL; 125 struct cn_callback_entry *i, *cbq = NULL;
126 struct cn_dev *dev = &cdev; 126 struct cn_dev *dev = &cdev;
127 struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb)); 127 struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
128 struct netlink_skb_parms *nsp = &NETLINK_CB(skb); 128 struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
129 int err = -ENODEV; 129 int err = -ENODEV;
130 130
@@ -162,7 +162,7 @@ static void cn_rx_skb(struct sk_buff *__skb)
162 162
163 skb = skb_get(__skb); 163 skb = skb_get(__skb);
164 164
165 if (skb->len >= NLMSG_SPACE(0)) { 165 if (skb->len >= NLMSG_HDRLEN) {
166 nlh = nlmsg_hdr(skb); 166 nlh = nlmsg_hdr(skb);
167 167
168 if (nlh->nlmsg_len < sizeof(struct cn_msg) || 168 if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 9b041858d10d..9e84d5bc9307 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -470,8 +470,10 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
470 } 470 }
471 471
472 if (!dca2_tag_map_valid(ioatdca->tag_map)) { 472 if (!dca2_tag_map_valid(ioatdca->tag_map)) {
473 dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, " 473 WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
474 "disabling DCA\n"); 474 "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
475 dev_driver_string(&pdev->dev),
476 dev_name(&pdev->dev));
475 free_dca_provider(dca); 477 free_dca_provider(dca);
476 return NULL; 478 return NULL;
477 } 479 }
@@ -689,7 +691,10 @@ struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
689 } 691 }
690 692
691 if (dca3_tag_map_invalid(ioatdca->tag_map)) { 693 if (dca3_tag_map_invalid(ioatdca->tag_map)) {
692 dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n"); 694 WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
695 "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
696 dev_driver_string(&pdev->dev),
697 dev_name(&pdev->dev));
693 free_dca_provider(dca); 698 free_dca_provider(dca);
694 return NULL; 699 return NULL;
695 } 700 }
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 7224533e8ca6..7a701a58bbf0 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -47,9 +47,9 @@ config FIREWIRE_NET
47 tristate "IP networking over 1394" 47 tristate "IP networking over 1394"
48 depends on FIREWIRE && INET 48 depends on FIREWIRE && INET
49 help 49 help
50 This enables IPv4 over IEEE 1394, providing IP connectivity with 50 This enables IPv4/IPv6 over IEEE 1394, providing IP connectivity
51 other implementations of RFC 2734 as found on several operating 51 with other implementations of RFC 2734/3146 as found on several
52 systems. Multicast support is currently limited. 52 operating systems. Multicast support is currently limited.
53 53
54 To compile this driver as a module, say M here: The module will be 54 To compile this driver as a module, say M here: The module will be
55 called firewire-net. 55 called firewire-net.
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 2b27bff2591a..4d565365e476 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * IPv4 over IEEE 1394, per RFC 2734 2 * IPv4 over IEEE 1394, per RFC 2734
3 * IPv6 over IEEE 1394, per RFC 3146
3 * 4 *
4 * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com> 5 * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com>
5 * 6 *
@@ -28,6 +29,7 @@
28 29
29#include <asm/unaligned.h> 30#include <asm/unaligned.h>
30#include <net/arp.h> 31#include <net/arp.h>
32#include <net/firewire.h>
31 33
32/* rx limits */ 34/* rx limits */
33#define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */ 35#define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */
@@ -45,6 +47,7 @@
45 47
46#define IANA_SPECIFIER_ID 0x00005eU 48#define IANA_SPECIFIER_ID 0x00005eU
47#define RFC2734_SW_VERSION 0x000001U 49#define RFC2734_SW_VERSION 0x000001U
50#define RFC3146_SW_VERSION 0x000002U
48 51
49#define IEEE1394_GASP_HDR_SIZE 8 52#define IEEE1394_GASP_HDR_SIZE 8
50 53
@@ -57,32 +60,10 @@
57#define RFC2374_HDR_LASTFRAG 2 /* last fragment */ 60#define RFC2374_HDR_LASTFRAG 2 /* last fragment */
58#define RFC2374_HDR_INTFRAG 3 /* interior fragment */ 61#define RFC2374_HDR_INTFRAG 3 /* interior fragment */
59 62
60#define RFC2734_HW_ADDR_LEN 16 63static bool fwnet_hwaddr_is_multicast(u8 *ha)
61 64{
62struct rfc2734_arp { 65 return !!(*ha & 1);
63 __be16 hw_type; /* 0x0018 */ 66}
64 __be16 proto_type; /* 0x0806 */
65 u8 hw_addr_len; /* 16 */
66 u8 ip_addr_len; /* 4 */
67 __be16 opcode; /* ARP Opcode */
68 /* Above is exactly the same format as struct arphdr */
69
70 __be64 s_uniq_id; /* Sender's 64bit EUI */
71 u8 max_rec; /* Sender's max packet size */
72 u8 sspd; /* Sender's max speed */
73 __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */
74 __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
75 __be32 sip; /* Sender's IP Address */
76 __be32 tip; /* IP Address of requested hw addr */
77} __packed;
78
79/* This header format is specific to this driver implementation. */
80#define FWNET_ALEN 8
81#define FWNET_HLEN 10
82struct fwnet_header {
83 u8 h_dest[FWNET_ALEN]; /* destination address */
84 __be16 h_proto; /* packet type ID field */
85} __packed;
86 67
87/* IPv4 and IPv6 encapsulation header */ 68/* IPv4 and IPv6 encapsulation header */
88struct rfc2734_header { 69struct rfc2734_header {
@@ -191,8 +172,6 @@ struct fwnet_peer {
191 struct list_head peer_link; 172 struct list_head peer_link;
192 struct fwnet_device *dev; 173 struct fwnet_device *dev;
193 u64 guid; 174 u64 guid;
194 u64 fifo;
195 __be32 ip;
196 175
197 /* guarded by dev->lock */ 176 /* guarded by dev->lock */
198 struct list_head pd_list; /* received partial datagrams */ 177 struct list_head pd_list; /* received partial datagrams */
@@ -222,6 +201,15 @@ struct fwnet_packet_task {
222}; 201};
223 202
224/* 203/*
204 * Get fifo address embedded in hwaddr
205 */
206static __u64 fwnet_hwaddr_fifo(union fwnet_hwaddr *ha)
207{
208 return (u64)get_unaligned_be16(&ha->uc.fifo_hi) << 32
209 | get_unaligned_be32(&ha->uc.fifo_lo);
210}
211
212/*
225 * saddr == NULL means use device source address. 213 * saddr == NULL means use device source address.
226 * daddr == NULL means leave destination address (eg unresolved arp). 214 * daddr == NULL means leave destination address (eg unresolved arp).
227 */ 215 */
@@ -513,10 +501,20 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
513 bool is_broadcast, u16 ether_type) 501 bool is_broadcast, u16 ether_type)
514{ 502{
515 struct fwnet_device *dev; 503 struct fwnet_device *dev;
516 static const __be64 broadcast_hw = cpu_to_be64(~0ULL);
517 int status; 504 int status;
518 __be64 guid; 505 __be64 guid;
519 506
507 switch (ether_type) {
508 case ETH_P_ARP:
509 case ETH_P_IP:
510#if IS_ENABLED(CONFIG_IPV6)
511 case ETH_P_IPV6:
512#endif
513 break;
514 default:
515 goto err;
516 }
517
520 dev = netdev_priv(net); 518 dev = netdev_priv(net);
521 /* Write metadata, and then pass to the receive level */ 519 /* Write metadata, and then pass to the receive level */
522 skb->dev = net; 520 skb->dev = net;
@@ -524,92 +522,11 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
524 522
525 /* 523 /*
526 * Parse the encapsulation header. This actually does the job of 524 * Parse the encapsulation header. This actually does the job of
527 * converting to an ethernet frame header, as well as arp 525 * converting to an ethernet-like pseudo frame header.
528 * conversion if needed. ARP conversion is easier in this
529 * direction, since we are using ethernet as our backend.
530 */ 526 */
531 /*
532 * If this is an ARP packet, convert it. First, we want to make
533 * use of some of the fields, since they tell us a little bit
534 * about the sending machine.
535 */
536 if (ether_type == ETH_P_ARP) {
537 struct rfc2734_arp *arp1394;
538 struct arphdr *arp;
539 unsigned char *arp_ptr;
540 u64 fifo_addr;
541 u64 peer_guid;
542 unsigned sspd;
543 u16 max_payload;
544 struct fwnet_peer *peer;
545 unsigned long flags;
546
547 arp1394 = (struct rfc2734_arp *)skb->data;
548 arp = (struct arphdr *)skb->data;
549 arp_ptr = (unsigned char *)(arp + 1);
550 peer_guid = get_unaligned_be64(&arp1394->s_uniq_id);
551 fifo_addr = (u64)get_unaligned_be16(&arp1394->fifo_hi) << 32
552 | get_unaligned_be32(&arp1394->fifo_lo);
553
554 sspd = arp1394->sspd;
555 /* Sanity check. OS X 10.3 PPC reportedly sends 131. */
556 if (sspd > SCODE_3200) {
557 dev_notice(&net->dev, "sspd %x out of range\n", sspd);
558 sspd = SCODE_3200;
559 }
560 max_payload = fwnet_max_payload(arp1394->max_rec, sspd);
561
562 spin_lock_irqsave(&dev->lock, flags);
563 peer = fwnet_peer_find_by_guid(dev, peer_guid);
564 if (peer) {
565 peer->fifo = fifo_addr;
566
567 if (peer->speed > sspd)
568 peer->speed = sspd;
569 if (peer->max_payload > max_payload)
570 peer->max_payload = max_payload;
571
572 peer->ip = arp1394->sip;
573 }
574 spin_unlock_irqrestore(&dev->lock, flags);
575
576 if (!peer) {
577 dev_notice(&net->dev,
578 "no peer for ARP packet from %016llx\n",
579 (unsigned long long)peer_guid);
580 goto no_peer;
581 }
582
583 /*
584 * Now that we're done with the 1394 specific stuff, we'll
585 * need to alter some of the data. Believe it or not, all
586 * that needs to be done is sender_IP_address needs to be
587 * moved, the destination hardware address get stuffed
588 * in and the hardware address length set to 8.
589 *
590 * IMPORTANT: The code below overwrites 1394 specific data
591 * needed above so keep the munging of the data for the
592 * higher level IP stack last.
593 */
594
595 arp->ar_hln = 8;
596 /* skip over sender unique id */
597 arp_ptr += arp->ar_hln;
598 /* move sender IP addr */
599 put_unaligned(arp1394->sip, (u32 *)arp_ptr);
600 /* skip over sender IP addr */
601 arp_ptr += arp->ar_pln;
602
603 if (arp->ar_op == htons(ARPOP_REQUEST))
604 memset(arp_ptr, 0, sizeof(u64));
605 else
606 memcpy(arp_ptr, net->dev_addr, sizeof(u64));
607 }
608
609 /* Now add the ethernet header. */
610 guid = cpu_to_be64(dev->card->guid); 527 guid = cpu_to_be64(dev->card->guid);
611 if (dev_hard_header(skb, net, ether_type, 528 if (dev_hard_header(skb, net, ether_type,
612 is_broadcast ? &broadcast_hw : &guid, 529 is_broadcast ? net->broadcast : net->dev_addr,
613 NULL, skb->len) >= 0) { 530 NULL, skb->len) >= 0) {
614 struct fwnet_header *eth; 531 struct fwnet_header *eth;
615 u16 *rawp; 532 u16 *rawp;
@@ -618,7 +535,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
618 skb_reset_mac_header(skb); 535 skb_reset_mac_header(skb);
619 skb_pull(skb, sizeof(*eth)); 536 skb_pull(skb, sizeof(*eth));
620 eth = (struct fwnet_header *)skb_mac_header(skb); 537 eth = (struct fwnet_header *)skb_mac_header(skb);
621 if (*eth->h_dest & 1) { 538 if (fwnet_hwaddr_is_multicast(eth->h_dest)) {
622 if (memcmp(eth->h_dest, net->broadcast, 539 if (memcmp(eth->h_dest, net->broadcast,
623 net->addr_len) == 0) 540 net->addr_len) == 0)
624 skb->pkt_type = PACKET_BROADCAST; 541 skb->pkt_type = PACKET_BROADCAST;
@@ -630,7 +547,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
630 if (memcmp(eth->h_dest, net->dev_addr, net->addr_len)) 547 if (memcmp(eth->h_dest, net->dev_addr, net->addr_len))
631 skb->pkt_type = PACKET_OTHERHOST; 548 skb->pkt_type = PACKET_OTHERHOST;
632 } 549 }
633 if (ntohs(eth->h_proto) >= 1536) { 550 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
634 protocol = eth->h_proto; 551 protocol = eth->h_proto;
635 } else { 552 } else {
636 rawp = (u16 *)skb->data; 553 rawp = (u16 *)skb->data;
@@ -652,7 +569,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
652 569
653 return 0; 570 return 0;
654 571
655 no_peer: 572 err:
656 net->stats.rx_errors++; 573 net->stats.rx_errors++;
657 net->stats.rx_dropped++; 574 net->stats.rx_dropped++;
658 575
@@ -856,7 +773,12 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
856 ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; 773 ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
857 source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; 774 source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
858 775
859 if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) { 776 if (specifier_id == IANA_SPECIFIER_ID &&
777 (ver == RFC2734_SW_VERSION
778#if IS_ENABLED(CONFIG_IPV6)
779 || ver == RFC3146_SW_VERSION
780#endif
781 )) {
860 buf_ptr += 2; 782 buf_ptr += 2;
861 length -= IEEE1394_GASP_HDR_SIZE; 783 length -= IEEE1394_GASP_HDR_SIZE;
862 fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, 784 fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
@@ -1059,16 +981,27 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
1059 u8 *p; 981 u8 *p;
1060 int generation; 982 int generation;
1061 int node_id; 983 int node_id;
984 unsigned int sw_version;
1062 985
1063 /* ptask->generation may not have been set yet */ 986 /* ptask->generation may not have been set yet */
1064 generation = dev->card->generation; 987 generation = dev->card->generation;
1065 smp_rmb(); 988 smp_rmb();
1066 node_id = dev->card->node_id; 989 node_id = dev->card->node_id;
1067 990
991 switch (ptask->skb->protocol) {
992 default:
993 sw_version = RFC2734_SW_VERSION;
994 break;
995#if IS_ENABLED(CONFIG_IPV6)
996 case htons(ETH_P_IPV6):
997 sw_version = RFC3146_SW_VERSION;
998#endif
999 }
1000
1068 p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE); 1001 p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
1069 put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p); 1002 put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
1070 put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24 1003 put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
1071 | RFC2734_SW_VERSION, &p[4]); 1004 | sw_version, &p[4]);
1072 1005
1073 /* We should not transmit if broadcast_channel.valid == 0. */ 1006 /* We should not transmit if broadcast_channel.valid == 0. */
1074 fw_send_request(dev->card, &ptask->transaction, 1007 fw_send_request(dev->card, &ptask->transaction,
@@ -1116,6 +1049,62 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
1116 return 0; 1049 return 0;
1117} 1050}
1118 1051
1052static void fwnet_fifo_stop(struct fwnet_device *dev)
1053{
1054 if (dev->local_fifo == FWNET_NO_FIFO_ADDR)
1055 return;
1056
1057 fw_core_remove_address_handler(&dev->handler);
1058 dev->local_fifo = FWNET_NO_FIFO_ADDR;
1059}
1060
1061static int fwnet_fifo_start(struct fwnet_device *dev)
1062{
1063 int retval;
1064
1065 if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
1066 return 0;
1067
1068 dev->handler.length = 4096;
1069 dev->handler.address_callback = fwnet_receive_packet;
1070 dev->handler.callback_data = dev;
1071
1072 retval = fw_core_add_address_handler(&dev->handler,
1073 &fw_high_memory_region);
1074 if (retval < 0)
1075 return retval;
1076
1077 dev->local_fifo = dev->handler.offset;
1078
1079 return 0;
1080}
1081
1082static void __fwnet_broadcast_stop(struct fwnet_device *dev)
1083{
1084 unsigned u;
1085
1086 if (dev->broadcast_state != FWNET_BROADCAST_ERROR) {
1087 for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++)
1088 kunmap(dev->broadcast_rcv_buffer.pages[u]);
1089 fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
1090 }
1091 if (dev->broadcast_rcv_context) {
1092 fw_iso_context_destroy(dev->broadcast_rcv_context);
1093 dev->broadcast_rcv_context = NULL;
1094 }
1095 kfree(dev->broadcast_rcv_buffer_ptrs);
1096 dev->broadcast_rcv_buffer_ptrs = NULL;
1097 dev->broadcast_state = FWNET_BROADCAST_ERROR;
1098}
1099
1100static void fwnet_broadcast_stop(struct fwnet_device *dev)
1101{
1102 if (dev->broadcast_state == FWNET_BROADCAST_ERROR)
1103 return;
1104 fw_iso_context_stop(dev->broadcast_rcv_context);
1105 __fwnet_broadcast_stop(dev);
1106}
1107
1119static int fwnet_broadcast_start(struct fwnet_device *dev) 1108static int fwnet_broadcast_start(struct fwnet_device *dev)
1120{ 1109{
1121 struct fw_iso_context *context; 1110 struct fw_iso_context *context;
@@ -1124,60 +1113,47 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
1124 unsigned max_receive; 1113 unsigned max_receive;
1125 struct fw_iso_packet packet; 1114 struct fw_iso_packet packet;
1126 unsigned long offset; 1115 unsigned long offset;
1116 void **ptrptr;
1127 unsigned u; 1117 unsigned u;
1128 1118
1129 if (dev->local_fifo == FWNET_NO_FIFO_ADDR) { 1119 if (dev->broadcast_state != FWNET_BROADCAST_ERROR)
1130 dev->handler.length = 4096; 1120 return 0;
1131 dev->handler.address_callback = fwnet_receive_packet;
1132 dev->handler.callback_data = dev;
1133
1134 retval = fw_core_add_address_handler(&dev->handler,
1135 &fw_high_memory_region);
1136 if (retval < 0)
1137 goto failed_initial;
1138
1139 dev->local_fifo = dev->handler.offset;
1140 }
1141 1121
1142 max_receive = 1U << (dev->card->max_receive + 1); 1122 max_receive = 1U << (dev->card->max_receive + 1);
1143 num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive; 1123 num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
1144 1124
1145 if (!dev->broadcast_rcv_context) { 1125 ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL);
1146 void **ptrptr; 1126 if (!ptrptr) {
1147 1127 retval = -ENOMEM;
1148 context = fw_iso_context_create(dev->card, 1128 goto failed;
1149 FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL, 1129 }
1150 dev->card->link_speed, 8, fwnet_receive_broadcast, dev); 1130 dev->broadcast_rcv_buffer_ptrs = ptrptr;
1151 if (IS_ERR(context)) { 1131
1152 retval = PTR_ERR(context); 1132 context = fw_iso_context_create(dev->card, FW_ISO_CONTEXT_RECEIVE,
1153 goto failed_context_create; 1133 IEEE1394_BROADCAST_CHANNEL,
1154 } 1134 dev->card->link_speed, 8,
1135 fwnet_receive_broadcast, dev);
1136 if (IS_ERR(context)) {
1137 retval = PTR_ERR(context);
1138 goto failed;
1139 }
1155 1140
1156 retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, 1141 retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, dev->card,
1157 dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE); 1142 FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
1158 if (retval < 0) 1143 if (retval < 0)
1159 goto failed_buffer_init; 1144 goto failed;
1160 1145
1161 ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL); 1146 dev->broadcast_state = FWNET_BROADCAST_STOPPED;
1162 if (!ptrptr) {
1163 retval = -ENOMEM;
1164 goto failed_ptrs_alloc;
1165 }
1166 1147
1167 dev->broadcast_rcv_buffer_ptrs = ptrptr; 1148 for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
1168 for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) { 1149 void *ptr;
1169 void *ptr; 1150 unsigned v;
1170 unsigned v;
1171 1151
1172 ptr = kmap(dev->broadcast_rcv_buffer.pages[u]); 1152 ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
1173 for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++) 1153 for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
1174 *ptrptr++ = (void *) 1154 *ptrptr++ = (void *) ((char *)ptr + v * max_receive);
1175 ((char *)ptr + v * max_receive);
1176 }
1177 dev->broadcast_rcv_context = context;
1178 } else {
1179 context = dev->broadcast_rcv_context;
1180 } 1155 }
1156 dev->broadcast_rcv_context = context;
1181 1157
1182 packet.payload_length = max_receive; 1158 packet.payload_length = max_receive;
1183 packet.interrupt = 1; 1159 packet.interrupt = 1;
@@ -1191,7 +1167,7 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
1191 retval = fw_iso_context_queue(context, &packet, 1167 retval = fw_iso_context_queue(context, &packet,
1192 &dev->broadcast_rcv_buffer, offset); 1168 &dev->broadcast_rcv_buffer, offset);
1193 if (retval < 0) 1169 if (retval < 0)
1194 goto failed_rcv_queue; 1170 goto failed;
1195 1171
1196 offset += max_receive; 1172 offset += max_receive;
1197 } 1173 }
@@ -1201,7 +1177,7 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
1201 retval = fw_iso_context_start(context, -1, 0, 1177 retval = fw_iso_context_start(context, -1, 0,
1202 FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */ 1178 FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */
1203 if (retval < 0) 1179 if (retval < 0)
1204 goto failed_rcv_queue; 1180 goto failed;
1205 1181
1206 /* FIXME: adjust it according to the min. speed of all known peers? */ 1182 /* FIXME: adjust it according to the min. speed of all known peers? */
1207 dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100 1183 dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100
@@ -1210,19 +1186,8 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
1210 1186
1211 return 0; 1187 return 0;
1212 1188
1213 failed_rcv_queue: 1189 failed:
1214 kfree(dev->broadcast_rcv_buffer_ptrs); 1190 __fwnet_broadcast_stop(dev);
1215 dev->broadcast_rcv_buffer_ptrs = NULL;
1216 failed_ptrs_alloc:
1217 fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
1218 failed_buffer_init:
1219 fw_iso_context_destroy(context);
1220 dev->broadcast_rcv_context = NULL;
1221 failed_context_create:
1222 fw_core_remove_address_handler(&dev->handler);
1223 failed_initial:
1224 dev->local_fifo = FWNET_NO_FIFO_ADDR;
1225
1226 return retval; 1191 return retval;
1227} 1192}
1228 1193
@@ -1240,11 +1205,10 @@ static int fwnet_open(struct net_device *net)
1240 struct fwnet_device *dev = netdev_priv(net); 1205 struct fwnet_device *dev = netdev_priv(net);
1241 int ret; 1206 int ret;
1242 1207
1243 if (dev->broadcast_state == FWNET_BROADCAST_ERROR) { 1208 ret = fwnet_broadcast_start(dev);
1244 ret = fwnet_broadcast_start(dev); 1209 if (ret)
1245 if (ret) 1210 return ret;
1246 return ret; 1211
1247 }
1248 netif_start_queue(net); 1212 netif_start_queue(net);
1249 1213
1250 spin_lock_irq(&dev->lock); 1214 spin_lock_irq(&dev->lock);
@@ -1257,9 +1221,10 @@ static int fwnet_open(struct net_device *net)
1257/* ifdown */ 1221/* ifdown */
1258static int fwnet_stop(struct net_device *net) 1222static int fwnet_stop(struct net_device *net)
1259{ 1223{
1260 netif_stop_queue(net); 1224 struct fwnet_device *dev = netdev_priv(net);
1261 1225
1262 /* Deallocate iso context for use by other applications? */ 1226 netif_stop_queue(net);
1227 fwnet_broadcast_stop(dev);
1263 1228
1264 return 0; 1229 return 0;
1265} 1230}
@@ -1299,19 +1264,27 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
1299 * We might need to rebuild the header on tx failure. 1264 * We might need to rebuild the header on tx failure.
1300 */ 1265 */
1301 memcpy(&hdr_buf, skb->data, sizeof(hdr_buf)); 1266 memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
1302 skb_pull(skb, sizeof(hdr_buf));
1303
1304 proto = hdr_buf.h_proto; 1267 proto = hdr_buf.h_proto;
1268
1269 switch (proto) {
1270 case htons(ETH_P_ARP):
1271 case htons(ETH_P_IP):
1272#if IS_ENABLED(CONFIG_IPV6)
1273 case htons(ETH_P_IPV6):
1274#endif
1275 break;
1276 default:
1277 goto fail;
1278 }
1279
1280 skb_pull(skb, sizeof(hdr_buf));
1305 dg_size = skb->len; 1281 dg_size = skb->len;
1306 1282
1307 /* 1283 /*
1308 * Set the transmission type for the packet. ARP packets and IP 1284 * Set the transmission type for the packet. ARP packets and IP
1309 * broadcast packets are sent via GASP. 1285 * broadcast packets are sent via GASP.
1310 */ 1286 */
1311 if (memcmp(hdr_buf.h_dest, net->broadcast, FWNET_ALEN) == 0 1287 if (fwnet_hwaddr_is_multicast(hdr_buf.h_dest)) {
1312 || proto == htons(ETH_P_ARP)
1313 || (proto == htons(ETH_P_IP)
1314 && IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) {
1315 max_payload = dev->broadcast_xmt_max_payload; 1288 max_payload = dev->broadcast_xmt_max_payload;
1316 datagram_label_ptr = &dev->broadcast_xmt_datagramlabel; 1289 datagram_label_ptr = &dev->broadcast_xmt_datagramlabel;
1317 1290
@@ -1320,11 +1293,12 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
1320 ptask->dest_node = IEEE1394_ALL_NODES; 1293 ptask->dest_node = IEEE1394_ALL_NODES;
1321 ptask->speed = SCODE_100; 1294 ptask->speed = SCODE_100;
1322 } else { 1295 } else {
1323 __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest); 1296 union fwnet_hwaddr *ha = (union fwnet_hwaddr *)hdr_buf.h_dest;
1297 __be64 guid = get_unaligned(&ha->uc.uniq_id);
1324 u8 generation; 1298 u8 generation;
1325 1299
1326 peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); 1300 peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
1327 if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR) 1301 if (!peer)
1328 goto fail; 1302 goto fail;
1329 1303
1330 generation = peer->generation; 1304 generation = peer->generation;
@@ -1332,32 +1306,12 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
1332 max_payload = peer->max_payload; 1306 max_payload = peer->max_payload;
1333 datagram_label_ptr = &peer->datagram_label; 1307 datagram_label_ptr = &peer->datagram_label;
1334 1308
1335 ptask->fifo_addr = peer->fifo; 1309 ptask->fifo_addr = fwnet_hwaddr_fifo(ha);
1336 ptask->generation = generation; 1310 ptask->generation = generation;
1337 ptask->dest_node = dest_node; 1311 ptask->dest_node = dest_node;
1338 ptask->speed = peer->speed; 1312 ptask->speed = peer->speed;
1339 } 1313 }
1340 1314
1341 /* If this is an ARP packet, convert it */
1342 if (proto == htons(ETH_P_ARP)) {
1343 struct arphdr *arp = (struct arphdr *)skb->data;
1344 unsigned char *arp_ptr = (unsigned char *)(arp + 1);
1345 struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data;
1346 __be32 ipaddr;
1347
1348 ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN));
1349
1350 arp1394->hw_addr_len = RFC2734_HW_ADDR_LEN;
1351 arp1394->max_rec = dev->card->max_receive;
1352 arp1394->sspd = dev->card->link_speed;
1353
1354 put_unaligned_be16(dev->local_fifo >> 32,
1355 &arp1394->fifo_hi);
1356 put_unaligned_be32(dev->local_fifo & 0xffffffff,
1357 &arp1394->fifo_lo);
1358 put_unaligned(ipaddr, &arp1394->sip);
1359 }
1360
1361 ptask->hdr.w0 = 0; 1315 ptask->hdr.w0 = 0;
1362 ptask->hdr.w1 = 0; 1316 ptask->hdr.w1 = 0;
1363 ptask->skb = skb; 1317 ptask->skb = skb;
@@ -1472,8 +1426,6 @@ static int fwnet_add_peer(struct fwnet_device *dev,
1472 1426
1473 peer->dev = dev; 1427 peer->dev = dev;
1474 peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; 1428 peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
1475 peer->fifo = FWNET_NO_FIFO_ADDR;
1476 peer->ip = 0;
1477 INIT_LIST_HEAD(&peer->pd_list); 1429 INIT_LIST_HEAD(&peer->pd_list);
1478 peer->pdg_size = 0; 1430 peer->pdg_size = 0;
1479 peer->datagram_label = 0; 1431 peer->datagram_label = 0;
@@ -1503,6 +1455,7 @@ static int fwnet_probe(struct device *_dev)
1503 struct fwnet_device *dev; 1455 struct fwnet_device *dev;
1504 unsigned max_mtu; 1456 unsigned max_mtu;
1505 int ret; 1457 int ret;
1458 union fwnet_hwaddr *ha;
1506 1459
1507 mutex_lock(&fwnet_device_mutex); 1460 mutex_lock(&fwnet_device_mutex);
1508 1461
@@ -1533,6 +1486,11 @@ static int fwnet_probe(struct device *_dev)
1533 dev->card = card; 1486 dev->card = card;
1534 dev->netdev = net; 1487 dev->netdev = net;
1535 1488
1489 ret = fwnet_fifo_start(dev);
1490 if (ret < 0)
1491 goto out;
1492 dev->local_fifo = dev->handler.offset;
1493
1536 /* 1494 /*
1537 * Use the RFC 2734 default 1500 octets or the maximum payload 1495 * Use the RFC 2734 default 1500 octets or the maximum payload
1538 * as initial MTU 1496 * as initial MTU
@@ -1542,24 +1500,31 @@ static int fwnet_probe(struct device *_dev)
1542 net->mtu = min(1500U, max_mtu); 1500 net->mtu = min(1500U, max_mtu);
1543 1501
1544 /* Set our hardware address while we're at it */ 1502 /* Set our hardware address while we're at it */
1545 put_unaligned_be64(card->guid, net->dev_addr); 1503 ha = (union fwnet_hwaddr *)net->dev_addr;
1546 put_unaligned_be64(~0ULL, net->broadcast); 1504 put_unaligned_be64(card->guid, &ha->uc.uniq_id);
1505 ha->uc.max_rec = dev->card->max_receive;
1506 ha->uc.sspd = dev->card->link_speed;
1507 put_unaligned_be16(dev->local_fifo >> 32, &ha->uc.fifo_hi);
1508 put_unaligned_be32(dev->local_fifo & 0xffffffff, &ha->uc.fifo_lo);
1509
1510 memset(net->broadcast, -1, net->addr_len);
1511
1547 ret = register_netdev(net); 1512 ret = register_netdev(net);
1548 if (ret) 1513 if (ret)
1549 goto out; 1514 goto out;
1550 1515
1551 list_add_tail(&dev->dev_link, &fwnet_device_list); 1516 list_add_tail(&dev->dev_link, &fwnet_device_list);
1552 dev_notice(&net->dev, "IPv4 over IEEE 1394 on card %s\n", 1517 dev_notice(&net->dev, "IP over IEEE 1394 on card %s\n",
1553 dev_name(card->device)); 1518 dev_name(card->device));
1554 have_dev: 1519 have_dev:
1555 ret = fwnet_add_peer(dev, unit, device); 1520 ret = fwnet_add_peer(dev, unit, device);
1556 if (ret && allocated_netdev) { 1521 if (ret && allocated_netdev) {
1557 unregister_netdev(net); 1522 unregister_netdev(net);
1558 list_del(&dev->dev_link); 1523 list_del(&dev->dev_link);
1559 }
1560 out: 1524 out:
1561 if (ret && allocated_netdev) 1525 fwnet_fifo_stop(dev);
1562 free_netdev(net); 1526 free_netdev(net);
1527 }
1563 1528
1564 mutex_unlock(&fwnet_device_mutex); 1529 mutex_unlock(&fwnet_device_mutex);
1565 1530
@@ -1592,22 +1557,14 @@ static int fwnet_remove(struct device *_dev)
1592 mutex_lock(&fwnet_device_mutex); 1557 mutex_lock(&fwnet_device_mutex);
1593 1558
1594 net = dev->netdev; 1559 net = dev->netdev;
1595 if (net && peer->ip)
1596 arp_invalidate(net, peer->ip);
1597 1560
1598 fwnet_remove_peer(peer, dev); 1561 fwnet_remove_peer(peer, dev);
1599 1562
1600 if (list_empty(&dev->peer_list)) { 1563 if (list_empty(&dev->peer_list)) {
1601 unregister_netdev(net); 1564 unregister_netdev(net);
1602 1565
1603 if (dev->local_fifo != FWNET_NO_FIFO_ADDR) 1566 fwnet_fifo_stop(dev);
1604 fw_core_remove_address_handler(&dev->handler); 1567
1605 if (dev->broadcast_rcv_context) {
1606 fw_iso_context_stop(dev->broadcast_rcv_context);
1607 fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer,
1608 dev->card);
1609 fw_iso_context_destroy(dev->broadcast_rcv_context);
1610 }
1611 for (i = 0; dev->queued_datagrams && i < 5; i++) 1568 for (i = 0; dev->queued_datagrams && i < 5; i++)
1612 ssleep(1); 1569 ssleep(1);
1613 WARN_ON(dev->queued_datagrams); 1570 WARN_ON(dev->queued_datagrams);
@@ -1646,6 +1603,14 @@ static const struct ieee1394_device_id fwnet_id_table[] = {
1646 .specifier_id = IANA_SPECIFIER_ID, 1603 .specifier_id = IANA_SPECIFIER_ID,
1647 .version = RFC2734_SW_VERSION, 1604 .version = RFC2734_SW_VERSION,
1648 }, 1605 },
1606#if IS_ENABLED(CONFIG_IPV6)
1607 {
1608 .match_flags = IEEE1394_MATCH_SPECIFIER_ID |
1609 IEEE1394_MATCH_VERSION,
1610 .specifier_id = IANA_SPECIFIER_ID,
1611 .version = RFC3146_SW_VERSION,
1612 },
1613#endif
1649 { } 1614 { }
1650}; 1615};
1651 1616
@@ -1683,6 +1648,30 @@ static struct fw_descriptor rfc2374_unit_directory = {
1683 .data = rfc2374_unit_directory_data 1648 .data = rfc2374_unit_directory_data
1684}; 1649};
1685 1650
1651#if IS_ENABLED(CONFIG_IPV6)
1652static const u32 rfc3146_unit_directory_data[] = {
1653 0x00040000, /* directory_length */
1654 0x1200005e, /* unit_specifier_id: IANA */
1655 0x81000003, /* textual descriptor offset */
1656 0x13000002, /* unit_sw_version: RFC 3146 */
1657 0x81000005, /* textual descriptor offset */
1658 0x00030000, /* descriptor_length */
1659 0x00000000, /* text */
1660 0x00000000, /* minimal ASCII, en */
1661 0x49414e41, /* I A N A */
1662 0x00030000, /* descriptor_length */
1663 0x00000000, /* text */
1664 0x00000000, /* minimal ASCII, en */
1665 0x49507636, /* I P v 6 */
1666};
1667
1668static struct fw_descriptor rfc3146_unit_directory = {
1669 .length = ARRAY_SIZE(rfc3146_unit_directory_data),
1670 .key = (CSR_DIRECTORY | CSR_UNIT) << 24,
1671 .data = rfc3146_unit_directory_data
1672};
1673#endif
1674
1686static int __init fwnet_init(void) 1675static int __init fwnet_init(void)
1687{ 1676{
1688 int err; 1677 int err;
@@ -1691,11 +1680,17 @@ static int __init fwnet_init(void)
1691 if (err) 1680 if (err)
1692 return err; 1681 return err;
1693 1682
1683#if IS_ENABLED(CONFIG_IPV6)
1684 err = fw_core_add_descriptor(&rfc3146_unit_directory);
1685 if (err)
1686 goto out;
1687#endif
1688
1694 fwnet_packet_task_cache = kmem_cache_create("packet_task", 1689 fwnet_packet_task_cache = kmem_cache_create("packet_task",
1695 sizeof(struct fwnet_packet_task), 0, 0, NULL); 1690 sizeof(struct fwnet_packet_task), 0, 0, NULL);
1696 if (!fwnet_packet_task_cache) { 1691 if (!fwnet_packet_task_cache) {
1697 err = -ENOMEM; 1692 err = -ENOMEM;
1698 goto out; 1693 goto out2;
1699 } 1694 }
1700 1695
1701 err = driver_register(&fwnet_driver.driver); 1696 err = driver_register(&fwnet_driver.driver);
@@ -1703,7 +1698,11 @@ static int __init fwnet_init(void)
1703 return 0; 1698 return 0;
1704 1699
1705 kmem_cache_destroy(fwnet_packet_task_cache); 1700 kmem_cache_destroy(fwnet_packet_task_cache);
1701out2:
1702#if IS_ENABLED(CONFIG_IPV6)
1703 fw_core_remove_descriptor(&rfc3146_unit_directory);
1706out: 1704out:
1705#endif
1707 fw_core_remove_descriptor(&rfc2374_unit_directory); 1706 fw_core_remove_descriptor(&rfc2374_unit_directory);
1708 1707
1709 return err; 1708 return err;
@@ -1714,11 +1713,14 @@ static void __exit fwnet_cleanup(void)
1714{ 1713{
1715 driver_unregister(&fwnet_driver.driver); 1714 driver_unregister(&fwnet_driver.driver);
1716 kmem_cache_destroy(fwnet_packet_task_cache); 1715 kmem_cache_destroy(fwnet_packet_task_cache);
1716#if IS_ENABLED(CONFIG_IPV6)
1717 fw_core_remove_descriptor(&rfc3146_unit_directory);
1718#endif
1717 fw_core_remove_descriptor(&rfc2374_unit_directory); 1719 fw_core_remove_descriptor(&rfc2374_unit_directory);
1718} 1720}
1719module_exit(fwnet_cleanup); 1721module_exit(fwnet_cleanup);
1720 1722
1721MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>"); 1723MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>");
1722MODULE_DESCRIPTION("IPv4 over IEEE1394 as per RFC 2734"); 1724MODULE_DESCRIPTION("IP over IEEE1394 as per RFC 2734/3146");
1723MODULE_LICENSE("GPL"); 1725MODULE_LICENSE("GPL");
1724MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table); 1726MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index a3fde52840ca..65c30ea8c1a1 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -511,12 +511,16 @@ static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
511static int send_connect(struct c4iw_ep *ep) 511static int send_connect(struct c4iw_ep *ep)
512{ 512{
513 struct cpl_act_open_req *req; 513 struct cpl_act_open_req *req;
514 struct cpl_t5_act_open_req *t5_req;
514 struct sk_buff *skb; 515 struct sk_buff *skb;
515 u64 opt0; 516 u64 opt0;
516 u32 opt2; 517 u32 opt2;
517 unsigned int mtu_idx; 518 unsigned int mtu_idx;
518 int wscale; 519 int wscale;
519 int wrlen = roundup(sizeof *req, 16); 520 int size = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
521 sizeof(struct cpl_act_open_req) :
522 sizeof(struct cpl_t5_act_open_req);
523 int wrlen = roundup(size, 16);
520 524
521 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); 525 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
522 526
@@ -552,17 +556,36 @@ static int send_connect(struct c4iw_ep *ep)
552 opt2 |= WND_SCALE_EN(1); 556 opt2 |= WND_SCALE_EN(1);
553 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 557 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
554 558
555 req = (struct cpl_act_open_req *) skb_put(skb, wrlen); 559 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
556 INIT_TP_WR(req, 0); 560 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
557 OPCODE_TID(req) = cpu_to_be32( 561 INIT_TP_WR(req, 0);
558 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); 562 OPCODE_TID(req) = cpu_to_be32(
559 req->local_port = ep->com.local_addr.sin_port; 563 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
560 req->peer_port = ep->com.remote_addr.sin_port; 564 ((ep->rss_qid << 14) | ep->atid)));
561 req->local_ip = ep->com.local_addr.sin_addr.s_addr; 565 req->local_port = ep->com.local_addr.sin_port;
562 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; 566 req->peer_port = ep->com.remote_addr.sin_port;
563 req->opt0 = cpu_to_be64(opt0); 567 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
564 req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t)); 568 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
565 req->opt2 = cpu_to_be32(opt2); 569 req->opt0 = cpu_to_be64(opt0);
570 req->params = cpu_to_be32(select_ntuple(ep->com.dev,
571 ep->dst, ep->l2t));
572 req->opt2 = cpu_to_be32(opt2);
573 } else {
574 t5_req = (struct cpl_t5_act_open_req *) skb_put(skb, wrlen);
575 INIT_TP_WR(t5_req, 0);
576 OPCODE_TID(t5_req) = cpu_to_be32(
577 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
578 ((ep->rss_qid << 14) | ep->atid)));
579 t5_req->local_port = ep->com.local_addr.sin_port;
580 t5_req->peer_port = ep->com.remote_addr.sin_port;
581 t5_req->local_ip = ep->com.local_addr.sin_addr.s_addr;
582 t5_req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
583 t5_req->opt0 = cpu_to_be64(opt0);
584 t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
585 select_ntuple(ep->com.dev, ep->dst, ep->l2t)));
586 t5_req->opt2 = cpu_to_be32(opt2);
587 }
588
566 set_bit(ACT_OPEN_REQ, &ep->com.history); 589 set_bit(ACT_OPEN_REQ, &ep->com.history);
567 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 590 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
568} 591}
@@ -1676,9 +1699,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1676 case CPL_ERR_CONN_TIMEDOUT: 1699 case CPL_ERR_CONN_TIMEDOUT:
1677 break; 1700 break;
1678 case CPL_ERR_TCAM_FULL: 1701 case CPL_ERR_TCAM_FULL:
1702 dev->rdev.stats.tcam_full++;
1679 if (dev->rdev.lldi.enable_fw_ofld_conn) { 1703 if (dev->rdev.lldi.enable_fw_ofld_conn) {
1680 mutex_lock(&dev->rdev.stats.lock); 1704 mutex_lock(&dev->rdev.stats.lock);
1681 dev->rdev.stats.tcam_full++;
1682 mutex_unlock(&dev->rdev.stats.lock); 1705 mutex_unlock(&dev->rdev.stats.lock);
1683 send_fw_act_open_req(ep, 1706 send_fw_act_open_req(ep,
1684 GET_TID_TID(GET_AOPEN_ATID( 1707 GET_TID_TID(GET_AOPEN_ATID(
@@ -2875,12 +2898,14 @@ static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2875static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) 2898static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
2876{ 2899{
2877 u32 l2info; 2900 u32 l2info;
2878 u16 vlantag, len, hdr_len; 2901 u16 vlantag, len, hdr_len, eth_hdr_len;
2879 u8 intf; 2902 u8 intf;
2880 struct cpl_rx_pkt *cpl = cplhdr(skb); 2903 struct cpl_rx_pkt *cpl = cplhdr(skb);
2881 struct cpl_pass_accept_req *req; 2904 struct cpl_pass_accept_req *req;
2882 struct tcp_options_received tmp_opt; 2905 struct tcp_options_received tmp_opt;
2906 struct c4iw_dev *dev;
2883 2907
2908 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
2884 /* Store values from cpl_rx_pkt in temporary location. */ 2909 /* Store values from cpl_rx_pkt in temporary location. */
2885 vlantag = (__force u16) cpl->vlan; 2910 vlantag = (__force u16) cpl->vlan;
2886 len = (__force u16) cpl->len; 2911 len = (__force u16) cpl->len;
@@ -2896,7 +2921,7 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
2896 */ 2921 */
2897 memset(&tmp_opt, 0, sizeof(tmp_opt)); 2922 memset(&tmp_opt, 0, sizeof(tmp_opt));
2898 tcp_clear_options(&tmp_opt); 2923 tcp_clear_options(&tmp_opt);
2899 tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL); 2924 tcp_parse_options(skb, &tmp_opt, 0, NULL);
2900 2925
2901 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); 2926 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
2902 memset(req, 0, sizeof(*req)); 2927 memset(req, 0, sizeof(*req));
@@ -2904,14 +2929,16 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
2904 V_SYN_MAC_IDX(G_RX_MACIDX( 2929 V_SYN_MAC_IDX(G_RX_MACIDX(
2905 (__force int) htonl(l2info))) | 2930 (__force int) htonl(l2info))) |
2906 F_SYN_XACT_MATCH); 2931 F_SYN_XACT_MATCH);
2932 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
2933 G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
2934 G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
2907 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( 2935 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
2908 (__force int) htonl(l2info))) | 2936 (__force int) htonl(l2info))) |
2909 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( 2937 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
2910 (__force int) htons(hdr_len))) | 2938 (__force int) htons(hdr_len))) |
2911 V_IP_HDR_LEN(G_RX_IPHDR_LEN( 2939 V_IP_HDR_LEN(G_RX_IPHDR_LEN(
2912 (__force int) htons(hdr_len))) | 2940 (__force int) htons(hdr_len))) |
2913 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN( 2941 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
2914 (__force int) htonl(l2info))));
2915 req->vlan = (__force __be16) vlantag; 2942 req->vlan = (__force __be16) vlantag;
2916 req->len = (__force __be16) len; 2943 req->len = (__force __be16) len;
2917 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | 2944 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
@@ -2999,7 +3026,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
2999 u16 window; 3026 u16 window;
3000 struct port_info *pi; 3027 struct port_info *pi;
3001 struct net_device *pdev; 3028 struct net_device *pdev;
3002 u16 rss_qid; 3029 u16 rss_qid, eth_hdr_len;
3003 int step; 3030 int step;
3004 u32 tx_chan; 3031 u32 tx_chan;
3005 struct neighbour *neigh; 3032 struct neighbour *neigh;
@@ -3028,7 +3055,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3028 goto reject; 3055 goto reject;
3029 } 3056 }
3030 3057
3031 if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) { 3058 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
3059 G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
3060 G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
3061 if (eth_hdr_len == ETH_HLEN) {
3032 eh = (struct ethhdr *)(req + 1); 3062 eh = (struct ethhdr *)(req + 1);
3033 iph = (struct iphdr *)(eh + 1); 3063 iph = (struct iphdr *)(eh + 1);
3034 } else { 3064 } else {
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 80069ad595c1..ae656016e1ae 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -41,10 +41,20 @@
41#define DRV_VERSION "0.1" 41#define DRV_VERSION "0.1"
42 42
43MODULE_AUTHOR("Steve Wise"); 43MODULE_AUTHOR("Steve Wise");
44MODULE_DESCRIPTION("Chelsio T4 RDMA Driver"); 44MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
45MODULE_LICENSE("Dual BSD/GPL"); 45MODULE_LICENSE("Dual BSD/GPL");
46MODULE_VERSION(DRV_VERSION); 46MODULE_VERSION(DRV_VERSION);
47 47
48static int allow_db_fc_on_t5;
49module_param(allow_db_fc_on_t5, int, 0644);
50MODULE_PARM_DESC(allow_db_fc_on_t5,
51 "Allow DB Flow Control on T5 (default = 0)");
52
53static int allow_db_coalescing_on_t5;
54module_param(allow_db_coalescing_on_t5, int, 0644);
55MODULE_PARM_DESC(allow_db_coalescing_on_t5,
56 "Allow DB Coalescing on T5 (default = 0)");
57
48struct uld_ctx { 58struct uld_ctx {
49 struct list_head entry; 59 struct list_head entry;
50 struct cxgb4_lld_info lldi; 60 struct cxgb4_lld_info lldi;
@@ -614,7 +624,7 @@ static int rdma_supported(const struct cxgb4_lld_info *infop)
614{ 624{
615 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && 625 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
616 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && 626 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
617 infop->vr->cq.size > 0 && infop->vr->ocq.size > 0; 627 infop->vr->cq.size > 0;
618} 628}
619 629
620static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) 630static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
@@ -627,6 +637,22 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
627 pci_name(infop->pdev)); 637 pci_name(infop->pdev));
628 return ERR_PTR(-ENOSYS); 638 return ERR_PTR(-ENOSYS);
629 } 639 }
640 if (!ocqp_supported(infop))
641 pr_info("%s: On-Chip Queues not supported on this device.\n",
642 pci_name(infop->pdev));
643
644 if (!is_t4(infop->adapter_type)) {
645 if (!allow_db_fc_on_t5) {
646 db_fc_threshold = 100000;
647 pr_info("DB Flow Control Disabled.\n");
648 }
649
650 if (!allow_db_coalescing_on_t5) {
651 db_coalescing_threshold = -1;
652 pr_info("DB Coalescing Disabled.\n");
653 }
654 }
655
630 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); 656 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
631 if (!devp) { 657 if (!devp) {
632 printk(KERN_ERR MOD "Cannot allocate ib device\n"); 658 printk(KERN_ERR MOD "Cannot allocate ib device\n");
@@ -678,8 +704,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
678 int i; 704 int i;
679 705
680 if (!vers_printed++) 706 if (!vers_printed++)
681 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", 707 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
682 DRV_VERSION); 708 DRV_VERSION);
683 709
684 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 710 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
685 if (!ctx) { 711 if (!ctx) {
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7eec5e13fa8c..485183ad34cd 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -162,7 +162,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
162 return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5)); 162 return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
163} 163}
164 164
165#define C4IW_WR_TO (10*HZ) 165#define C4IW_WR_TO (30*HZ)
166 166
167struct c4iw_wr_wait { 167struct c4iw_wr_wait {
168 struct completion completion; 168 struct completion completion;
@@ -369,7 +369,6 @@ struct c4iw_fr_page_list {
369 DEFINE_DMA_UNMAP_ADDR(mapping); 369 DEFINE_DMA_UNMAP_ADDR(mapping);
370 dma_addr_t dma_addr; 370 dma_addr_t dma_addr;
371 struct c4iw_dev *dev; 371 struct c4iw_dev *dev;
372 int size;
373}; 372};
374 373
375static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list( 374static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
@@ -817,6 +816,15 @@ static inline int compute_wscale(int win)
817 return wscale; 816 return wscale;
818} 817}
819 818
819static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
820{
821#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
822 return infop->vr->ocq.size > 0;
823#else
824 return 0;
825#endif
826}
827
820u32 c4iw_id_alloc(struct c4iw_id_table *alloc); 828u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
821void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj); 829void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
822int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, 830int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
@@ -930,6 +938,8 @@ extern struct cxgb4_client t4c_client;
930extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; 938extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
931extern int c4iw_max_read_depth; 939extern int c4iw_max_read_depth;
932extern int db_fc_threshold; 940extern int db_fc_threshold;
941extern int db_coalescing_threshold;
942extern int use_dsgl;
933 943
934 944
935#endif 945#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 903a92d6f91d..4cb8eb24497c 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -30,16 +30,76 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32 32
33#include <linux/module.h>
34#include <linux/moduleparam.h>
33#include <rdma/ib_umem.h> 35#include <rdma/ib_umem.h>
34#include <linux/atomic.h> 36#include <linux/atomic.h>
35 37
36#include "iw_cxgb4.h" 38#include "iw_cxgb4.h"
37 39
40int use_dsgl = 1;
41module_param(use_dsgl, int, 0644);
42MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1)");
43
38#define T4_ULPTX_MIN_IO 32 44#define T4_ULPTX_MIN_IO 32
39#define C4IW_MAX_INLINE_SIZE 96 45#define C4IW_MAX_INLINE_SIZE 96
46#define T4_ULPTX_MAX_DMA 1024
47#define C4IW_INLINE_THRESHOLD 128
40 48
41static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, 49static int inline_threshold = C4IW_INLINE_THRESHOLD;
42 void *data) 50module_param(inline_threshold, int, 0644);
51MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
52
53static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
54 u32 len, dma_addr_t data, int wait)
55{
56 struct sk_buff *skb;
57 struct ulp_mem_io *req;
58 struct ulptx_sgl *sgl;
59 u8 wr_len;
60 int ret = 0;
61 struct c4iw_wr_wait wr_wait;
62
63 addr &= 0x7FFFFFF;
64
65 if (wait)
66 c4iw_init_wr_wait(&wr_wait);
67 wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
68
69 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
70 if (!skb)
71 return -ENOMEM;
72 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
73
74 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
75 memset(req, 0, wr_len);
76 INIT_ULPTX_WR(req, wr_len, 0, 0);
77 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
78 (wait ? FW_WR_COMPL(1) : 0));
79 req->wr.wr_lo = wait ? (__force __be64)&wr_wait : 0;
80 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
81 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
82 req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
83 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5));
84 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
85 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr));
86
87 sgl = (struct ulptx_sgl *)(req + 1);
88 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) |
89 ULPTX_NSGE(1));
90 sgl->len0 = cpu_to_be32(len);
91 sgl->addr0 = cpu_to_be64(data);
92
93 ret = c4iw_ofld_send(rdev, skb);
94 if (ret)
95 return ret;
96 if (wait)
97 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
98 return ret;
99}
100
101static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
102 void *data)
43{ 103{
44 struct sk_buff *skb; 104 struct sk_buff *skb;
45 struct ulp_mem_io *req; 105 struct ulp_mem_io *req;
@@ -47,6 +107,12 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
47 u8 wr_len, *to_dp, *from_dp; 107 u8 wr_len, *to_dp, *from_dp;
48 int copy_len, num_wqe, i, ret = 0; 108 int copy_len, num_wqe, i, ret = 0;
49 struct c4iw_wr_wait wr_wait; 109 struct c4iw_wr_wait wr_wait;
110 __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
111
112 if (is_t4(rdev->lldi.adapter_type))
113 cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1));
114 else
115 cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1));
50 116
51 addr &= 0x7FFFFFF; 117 addr &= 0x7FFFFFF;
52 PDBG("%s addr 0x%x len %u\n", __func__, addr, len); 118 PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
@@ -77,7 +143,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
77 req->wr.wr_mid = cpu_to_be32( 143 req->wr.wr_mid = cpu_to_be32(
78 FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); 144 FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
79 145
80 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23)); 146 req->cmd = cmd;
81 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN( 147 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
82 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); 148 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
83 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 149 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
@@ -107,6 +173,67 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
107 return ret; 173 return ret;
108} 174}
109 175
176int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
177{
178 u32 remain = len;
179 u32 dmalen;
180 int ret = 0;
181 dma_addr_t daddr;
182 dma_addr_t save;
183
184 daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
185 if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
186 return -1;
187 save = daddr;
188
189 while (remain > inline_threshold) {
190 if (remain < T4_ULPTX_MAX_DMA) {
191 if (remain & ~T4_ULPTX_MIN_IO)
192 dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
193 else
194 dmalen = remain;
195 } else
196 dmalen = T4_ULPTX_MAX_DMA;
197 remain -= dmalen;
198 ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
199 !remain);
200 if (ret)
201 goto out;
202 addr += dmalen >> 5;
203 data += dmalen;
204 daddr += dmalen;
205 }
206 if (remain)
207 ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
208out:
209 dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
210 return ret;
211}
212
213/*
214 * write len bytes of data into addr (32B aligned address)
215 * If data is NULL, clear len byte of memory to zero.
216 */
217static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
218 void *data)
219{
220 if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
221 if (len > inline_threshold) {
222 if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
223 printk_ratelimited(KERN_WARNING
224 "%s: dma map"
225 " failure (non fatal)\n",
226 pci_name(rdev->lldi.pdev));
227 return _c4iw_write_mem_inline(rdev, addr, len,
228 data);
229 } else
230 return 0;
231 } else
232 return _c4iw_write_mem_inline(rdev, addr, len, data);
233 } else
234 return _c4iw_write_mem_inline(rdev, addr, len, data);
235}
236
110/* 237/*
111 * Build and write a TPT entry. 238 * Build and write a TPT entry.
112 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size, 239 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
@@ -760,19 +887,23 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
760 struct c4iw_fr_page_list *c4pl; 887 struct c4iw_fr_page_list *c4pl;
761 struct c4iw_dev *dev = to_c4iw_dev(device); 888 struct c4iw_dev *dev = to_c4iw_dev(device);
762 dma_addr_t dma_addr; 889 dma_addr_t dma_addr;
763 int size = sizeof *c4pl + page_list_len * sizeof(u64); 890 int pll_len = roundup(page_list_len * sizeof(u64), 32);
764 891
765 c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size, 892 c4pl = kmalloc(sizeof(*c4pl), GFP_KERNEL);
766 &dma_addr, GFP_KERNEL);
767 if (!c4pl) 893 if (!c4pl)
768 return ERR_PTR(-ENOMEM); 894 return ERR_PTR(-ENOMEM);
769 895
896 c4pl->ibpl.page_list = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev,
897 pll_len, &dma_addr,
898 GFP_KERNEL);
899 if (!c4pl->ibpl.page_list) {
900 kfree(c4pl);
901 return ERR_PTR(-ENOMEM);
902 }
770 dma_unmap_addr_set(c4pl, mapping, dma_addr); 903 dma_unmap_addr_set(c4pl, mapping, dma_addr);
771 c4pl->dma_addr = dma_addr; 904 c4pl->dma_addr = dma_addr;
772 c4pl->dev = dev; 905 c4pl->dev = dev;
773 c4pl->size = size; 906 c4pl->ibpl.max_page_list_len = pll_len;
774 c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
775 c4pl->ibpl.max_page_list_len = page_list_len;
776 907
777 return &c4pl->ibpl; 908 return &c4pl->ibpl;
778} 909}
@@ -781,8 +912,10 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
781{ 912{
782 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); 913 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
783 914
784 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size, 915 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev,
785 c4pl, dma_unmap_addr(c4pl, mapping)); 916 c4pl->ibpl.max_page_list_len,
917 c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping));
918 kfree(c4pl);
786} 919}
787 920
788int c4iw_dereg_mr(struct ib_mr *ib_mr) 921int c4iw_dereg_mr(struct ib_mr *ib_mr)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index e084fdc6da7f..7e94c9a656a1 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -162,8 +162,14 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
162 */ 162 */
163 if (addr >= rdev->oc_mw_pa) 163 if (addr >= rdev->oc_mw_pa)
164 vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot); 164 vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
165 else 165 else {
166 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 166 if (is_t5(rdev->lldi.adapter_type))
167 vma->vm_page_prot =
168 t4_pgprot_wc(vma->vm_page_prot);
169 else
170 vma->vm_page_prot =
171 pgprot_noncached(vma->vm_page_prot);
172 }
167 ret = io_remap_pfn_range(vma, vma->vm_start, 173 ret = io_remap_pfn_range(vma, vma->vm_start,
168 addr >> PAGE_SHIFT, 174 addr >> PAGE_SHIFT,
169 len, vma->vm_page_prot); 175 len, vma->vm_page_prot);
@@ -263,7 +269,7 @@ static int c4iw_query_device(struct ib_device *ibdev,
263 dev = to_c4iw_dev(ibdev); 269 dev = to_c4iw_dev(ibdev);
264 memset(props, 0, sizeof *props); 270 memset(props, 0, sizeof *props);
265 memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); 271 memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
266 props->hw_ver = dev->rdev.lldi.adapter_type; 272 props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
267 props->fw_ver = dev->rdev.lldi.fw_vers; 273 props->fw_ver = dev->rdev.lldi.fw_vers;
268 props->device_cap_flags = dev->device_cap_flags; 274 props->device_cap_flags = dev->device_cap_flags;
269 props->page_size_cap = T4_PAGESIZE_MASK; 275 props->page_size_cap = T4_PAGESIZE_MASK;
@@ -346,7 +352,8 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
346 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, 352 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
347 ibdev.dev); 353 ibdev.dev);
348 PDBG("%s dev 0x%p\n", __func__, dev); 354 PDBG("%s dev 0x%p\n", __func__, dev);
349 return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type); 355 return sprintf(buf, "%d\n",
356 CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
350} 357}
351 358
352static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, 359static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 70b1808a08f4..5b059e2d80cc 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -42,10 +42,21 @@ static int ocqp_support = 1;
42module_param(ocqp_support, int, 0644); 42module_param(ocqp_support, int, 0644);
43MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)"); 43MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
44 44
45int db_fc_threshold = 2000; 45int db_fc_threshold = 1000;
46module_param(db_fc_threshold, int, 0644); 46module_param(db_fc_threshold, int, 0644);
47MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic " 47MODULE_PARM_DESC(db_fc_threshold,
48 "db flow control mode (default = 2000)"); 48 "QP count/threshold that triggers"
49 " automatic db flow control mode (default = 1000)");
50
51int db_coalescing_threshold;
52module_param(db_coalescing_threshold, int, 0644);
53MODULE_PARM_DESC(db_coalescing_threshold,
54 "QP count/threshold that triggers"
55 " disabling db coalescing (default = 0)");
56
57static int max_fr_immd = T4_MAX_FR_IMMD;
58module_param(max_fr_immd, int, 0644);
59MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
49 60
50static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) 61static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
51{ 62{
@@ -76,7 +87,7 @@ static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
76 87
77static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) 88static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
78{ 89{
79 if (!ocqp_support || !t4_ocqp_supported()) 90 if (!ocqp_support || !ocqp_supported(&rdev->lldi))
80 return -ENOSYS; 91 return -ENOSYS;
81 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize); 92 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
82 if (!sq->dma_addr) 93 if (!sq->dma_addr)
@@ -129,7 +140,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
129 int wr_len; 140 int wr_len;
130 struct c4iw_wr_wait wr_wait; 141 struct c4iw_wr_wait wr_wait;
131 struct sk_buff *skb; 142 struct sk_buff *skb;
132 int ret; 143 int ret = 0;
133 int eqsize; 144 int eqsize;
134 145
135 wq->sq.qid = c4iw_get_qpid(rdev, uctx); 146 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
@@ -169,17 +180,14 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
169 } 180 }
170 181
171 if (user) { 182 if (user) {
172 ret = alloc_oc_sq(rdev, &wq->sq); 183 if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
173 if (ret)
174 goto free_hwaddr; 184 goto free_hwaddr;
175 185 } else {
176 ret = alloc_host_sq(rdev, &wq->sq);
177 if (ret)
178 goto free_sq;
179 } else
180 ret = alloc_host_sq(rdev, &wq->sq); 186 ret = alloc_host_sq(rdev, &wq->sq);
181 if (ret) 187 if (ret)
182 goto free_hwaddr; 188 goto free_hwaddr;
189 }
190
183 memset(wq->sq.queue, 0, wq->sq.memsize); 191 memset(wq->sq.queue, 0, wq->sq.memsize);
184 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); 192 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
185 193
@@ -534,7 +542,7 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
534} 542}
535 543
536static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, 544static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
537 struct ib_send_wr *wr, u8 *len16) 545 struct ib_send_wr *wr, u8 *len16, u8 t5dev)
538{ 546{
539 547
540 struct fw_ri_immd *imdp; 548 struct fw_ri_immd *imdp;
@@ -556,28 +564,51 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
556 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); 564 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
557 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & 565 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
558 0xffffffff); 566 0xffffffff);
559 WARN_ON(pbllen > T4_MAX_FR_IMMD); 567
560 imdp = (struct fw_ri_immd *)(&wqe->fr + 1); 568 if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
561 imdp->op = FW_RI_DATA_IMMD; 569 struct c4iw_fr_page_list *c4pl =
562 imdp->r1 = 0; 570 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
563 imdp->r2 = 0; 571 struct fw_ri_dsgl *sglp;
564 imdp->immdlen = cpu_to_be32(pbllen); 572
565 p = (__be64 *)(imdp + 1); 573 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
566 rem = pbllen; 574 wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
567 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { 575 cpu_to_be64((u64)
568 *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]); 576 wr->wr.fast_reg.page_list->page_list[i]);
569 rem -= sizeof *p; 577 }
570 if (++p == (__be64 *)&sq->queue[sq->size]) 578
571 p = (__be64 *)sq->queue; 579 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
572 } 580 sglp->op = FW_RI_DATA_DSGL;
573 BUG_ON(rem < 0); 581 sglp->r1 = 0;
574 while (rem) { 582 sglp->nsge = cpu_to_be16(1);
575 *p = 0; 583 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
576 rem -= sizeof *p; 584 sglp->len0 = cpu_to_be32(pbllen);
577 if (++p == (__be64 *)&sq->queue[sq->size]) 585
578 p = (__be64 *)sq->queue; 586 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
587 } else {
588 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
589 imdp->op = FW_RI_DATA_IMMD;
590 imdp->r1 = 0;
591 imdp->r2 = 0;
592 imdp->immdlen = cpu_to_be32(pbllen);
593 p = (__be64 *)(imdp + 1);
594 rem = pbllen;
595 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
596 *p = cpu_to_be64(
597 (u64)wr->wr.fast_reg.page_list->page_list[i]);
598 rem -= sizeof(*p);
599 if (++p == (__be64 *)&sq->queue[sq->size])
600 p = (__be64 *)sq->queue;
601 }
602 BUG_ON(rem < 0);
603 while (rem) {
604 *p = 0;
605 rem -= sizeof(*p);
606 if (++p == (__be64 *)&sq->queue[sq->size])
607 p = (__be64 *)sq->queue;
608 }
609 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
610 + pbllen, 16);
579 } 611 }
580 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
581 return 0; 612 return 0;
582} 613}
583 614
@@ -678,7 +709,10 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
678 case IB_WR_FAST_REG_MR: 709 case IB_WR_FAST_REG_MR:
679 fw_opcode = FW_RI_FR_NSMR_WR; 710 fw_opcode = FW_RI_FR_NSMR_WR;
680 swsqe->opcode = FW_RI_FAST_REGISTER; 711 swsqe->opcode = FW_RI_FAST_REGISTER;
681 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16); 712 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
713 is_t5(
714 qhp->rhp->rdev.lldi.adapter_type) ?
715 1 : 0);
682 break; 716 break;
683 case IB_WR_LOCAL_INV: 717 case IB_WR_LOCAL_INV:
684 if (wr->send_flags & IB_SEND_FENCE) 718 if (wr->send_flags & IB_SEND_FENCE)
@@ -1450,6 +1484,9 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
1450 rhp->db_state = NORMAL; 1484 rhp->db_state = NORMAL;
1451 idr_for_each(&rhp->qpidr, enable_qp_db, NULL); 1485 idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
1452 } 1486 }
1487 if (db_coalescing_threshold >= 0)
1488 if (rhp->qpcnt <= db_coalescing_threshold)
1489 cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
1453 spin_unlock_irq(&rhp->lock); 1490 spin_unlock_irq(&rhp->lock);
1454 atomic_dec(&qhp->refcnt); 1491 atomic_dec(&qhp->refcnt);
1455 wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); 1492 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
@@ -1561,11 +1598,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1561 spin_lock_irq(&rhp->lock); 1598 spin_lock_irq(&rhp->lock);
1562 if (rhp->db_state != NORMAL) 1599 if (rhp->db_state != NORMAL)
1563 t4_disable_wq_db(&qhp->wq); 1600 t4_disable_wq_db(&qhp->wq);
1564 if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) { 1601 rhp->qpcnt++;
1602 if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
1565 rhp->rdev.stats.db_state_transitions++; 1603 rhp->rdev.stats.db_state_transitions++;
1566 rhp->db_state = FLOW_CONTROL; 1604 rhp->db_state = FLOW_CONTROL;
1567 idr_for_each(&rhp->qpidr, disable_qp_db, NULL); 1605 idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
1568 } 1606 }
1607 if (db_coalescing_threshold >= 0)
1608 if (rhp->qpcnt > db_coalescing_threshold)
1609 cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
1569 ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); 1610 ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1570 spin_unlock_irq(&rhp->lock); 1611 spin_unlock_irq(&rhp->lock);
1571 if (ret) 1612 if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 16f26ab29302..ebcb03bd1b72 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -84,7 +84,7 @@ struct t4_status_page {
84 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) 84 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
85#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ 85#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
86 sizeof(struct fw_ri_immd)) & ~31UL) 86 sizeof(struct fw_ri_immd)) & ~31UL)
87#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) 87#define T4_MAX_FR_DEPTH (1024 / sizeof(u64))
88 88
89#define T4_RQ_NUM_SLOTS 2 89#define T4_RQ_NUM_SLOTS 2
90#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS) 90#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
@@ -280,15 +280,6 @@ static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
280#endif 280#endif
281} 281}
282 282
283static inline int t4_ocqp_supported(void)
284{
285#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
286 return 1;
287#else
288 return 0;
289#endif
290}
291
292enum { 283enum {
293 T4_SQ_ONCHIP = (1<<0), 284 T4_SQ_ONCHIP = (1<<0),
294}; 285};
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index ae67df35dd4d..73b3a7132587 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -228,7 +228,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
228 vector = dev->eq_table[vector % ibdev->num_comp_vectors]; 228 vector = dev->eq_table[vector % ibdev->num_comp_vectors];
229 229
230 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, 230 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
231 cq->db.dma, &cq->mcq, vector, 0); 231 cq->db.dma, &cq->mcq, vector, 0, 0);
232 if (err) 232 if (err)
233 goto err_dbmap; 233 goto err_dbmap;
234 234
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 67647e264611..418004c93feb 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2948,7 +2948,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2948 nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n", 2948 nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
2949 nesvnic->netdev->name, vlan_tag); 2949 nesvnic->netdev->name, vlan_tag);
2950 2950
2951 __vlan_hwaccel_put_tag(rx_skb, vlan_tag); 2951 __vlan_hwaccel_put_tag(rx_skb, htons(ETH_P_8021Q), vlan_tag);
2952 } 2952 }
2953 if (nes_use_lro) 2953 if (nes_use_lro)
2954 lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL); 2954 lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL);
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 85cf4d1ac442..49eb5111d2cd 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1599,7 +1599,7 @@ static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev,
1599 1599
1600 /* Enable/Disable VLAN Stripping */ 1600 /* Enable/Disable VLAN Stripping */
1601 u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG); 1601 u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG);
1602 if (features & NETIF_F_HW_VLAN_RX) 1602 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1603 u32temp &= 0xfdffffff; 1603 u32temp &= 0xfdffffff;
1604 else 1604 else
1605 u32temp |= 0x02000000; 1605 u32temp |= 0x02000000;
@@ -1614,10 +1614,10 @@ static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_feat
1614 * Since there is no support for separate rx/tx vlan accel 1614 * Since there is no support for separate rx/tx vlan accel
1615 * enable/disable make sure tx flag is always in same state as rx. 1615 * enable/disable make sure tx flag is always in same state as rx.
1616 */ 1616 */
1617 if (features & NETIF_F_HW_VLAN_RX) 1617 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1618 features |= NETIF_F_HW_VLAN_TX; 1618 features |= NETIF_F_HW_VLAN_CTAG_TX;
1619 else 1619 else
1620 features &= ~NETIF_F_HW_VLAN_TX; 1620 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
1621 1621
1622 return features; 1622 return features;
1623} 1623}
@@ -1628,7 +1628,7 @@ static int nes_set_features(struct net_device *netdev, netdev_features_t feature
1628 struct nes_device *nesdev = nesvnic->nesdev; 1628 struct nes_device *nesdev = nesvnic->nesdev;
1629 u32 changed = netdev->features ^ features; 1629 u32 changed = netdev->features ^ features;
1630 1630
1631 if (changed & NETIF_F_HW_VLAN_RX) 1631 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1632 nes_vlan_mode(netdev, nesdev, features); 1632 nes_vlan_mode(netdev, nesdev, features);
1633 1633
1634 return 0; 1634 return 0;
@@ -1706,11 +1706,11 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1706 netdev->dev_addr[4] = (u8)(u64temp>>8); 1706 netdev->dev_addr[4] = (u8)(u64temp>>8);
1707 netdev->dev_addr[5] = (u8)u64temp; 1707 netdev->dev_addr[5] = (u8)u64temp;
1708 1708
1709 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX; 1709 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
1710 if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) 1710 if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV))
1711 netdev->hw_features |= NETIF_F_TSO; 1711 netdev->hw_features |= NETIF_F_TSO;
1712 1712
1713 netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX; 1713 netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX;
1714 netdev->hw_features |= NETIF_F_LRO; 1714 netdev->hw_features |= NETIF_F_LRO;
1715 1715
1716 nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d," 1716 nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 8534afd04e7c..554b9063da54 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -730,7 +730,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
730 if ((header->proto != htons(ETH_P_IP)) && 730 if ((header->proto != htons(ETH_P_IP)) &&
731 (header->proto != htons(ETH_P_IPV6)) && 731 (header->proto != htons(ETH_P_IPV6)) &&
732 (header->proto != htons(ETH_P_ARP)) && 732 (header->proto != htons(ETH_P_ARP)) &&
733 (header->proto != htons(ETH_P_RARP))) { 733 (header->proto != htons(ETH_P_RARP)) &&
734 (header->proto != htons(ETH_P_TIPC))) {
734 /* ethertype not supported by IPoIB */ 735 /* ethertype not supported by IPoIB */
735 ++dev->stats.tx_dropped; 736 ++dev->stats.tx_dropped;
736 dev_kfree_skb_any(skb); 737 dev_kfree_skb_any(skb);
@@ -751,6 +752,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
751 switch (header->proto) { 752 switch (header->proto) {
752 case htons(ETH_P_IP): 753 case htons(ETH_P_IP):
753 case htons(ETH_P_IPV6): 754 case htons(ETH_P_IPV6):
755 case htons(ETH_P_TIPC):
754 neigh = ipoib_neigh_get(dev, cb->hwaddr); 756 neigh = ipoib_neigh_get(dev, cb->hwaddr);
755 if (unlikely(!neigh)) { 757 if (unlikely(!neigh)) {
756 neigh_add_path(skb, cb->hwaddr, dev); 758 neigh_add_path(skb, cb->hwaddr, dev);
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 832bc807ed20..cc9f1927a322 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -469,8 +469,7 @@ static int capidrv_add_ack(struct capidrv_ncci *nccip,
469{ 469{
470 struct ncci_datahandle_queue *n, **pp; 470 struct ncci_datahandle_queue *n, **pp;
471 471
472 n = (struct ncci_datahandle_queue *) 472 n = kmalloc(sizeof(struct ncci_datahandle_queue), GFP_ATOMIC);
473 kmalloc(sizeof(struct ncci_datahandle_queue), GFP_ATOMIC);
474 if (!n) { 473 if (!n) {
475 printk(KERN_ERR "capidrv: kmalloc ncci_datahandle failed\n"); 474 printk(KERN_ERR "capidrv: kmalloc ncci_datahandle failed\n");
476 return -1; 475 return -1;
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index db432e635496..50749a70c5ca 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -441,8 +441,7 @@ static int isdn_divert_icall(isdn_ctrl *ic)
441 441
442 switch (dv->rule.action) { 442 switch (dv->rule.action) {
443 case DEFLECT_IGNORE: 443 case DEFLECT_IGNORE:
444 return (0); 444 return 0;
445 break;
446 445
447 case DEFLECT_ALERT: 446 case DEFLECT_ALERT:
448 case DEFLECT_PROCEED: 447 case DEFLECT_PROCEED:
@@ -510,10 +509,9 @@ static int isdn_divert_icall(isdn_ctrl *ic)
510 break; 509 break;
511 510
512 default: 511 default:
513 return (0); /* ignore call */ 512 return 0; /* ignore call */
514 break;
515 } /* switch action */ 513 } /* switch action */
516 break; 514 break; /* will break the 'for' looping */
517 } /* scan_table */ 515 } /* scan_table */
518 516
519 if (cs) { 517 if (cs) {
diff --git a/drivers/isdn/hisax/fsm.c b/drivers/isdn/hisax/fsm.c
index 1bb291021fdb..c7a94713e9ec 100644
--- a/drivers/isdn/hisax/fsm.c
+++ b/drivers/isdn/hisax/fsm.c
@@ -26,7 +26,7 @@ FsmNew(struct Fsm *fsm, struct FsmNode *fnlist, int fncount)
26{ 26{
27 int i; 27 int i;
28 28
29 fsm->jumpmatrix = (FSMFNPTR *) 29 fsm->jumpmatrix =
30 kzalloc(sizeof(FSMFNPTR) * fsm->state_count * fsm->event_count, GFP_KERNEL); 30 kzalloc(sizeof(FSMFNPTR) * fsm->state_count * fsm->event_count, GFP_KERNEL);
31 if (!fsm->jumpmatrix) 31 if (!fsm->jumpmatrix)
32 return -ENOMEM; 32 return -ENOMEM;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 90f34ae2b80f..dc4574f735ef 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -1479,7 +1479,7 @@ int setup_hfcsx(struct IsdnCard *card)
1479 release_region(cs->hw.hfcsx.base, 2); 1479 release_region(cs->hw.hfcsx.base, 2);
1480 return (0); 1480 return (0);
1481 } 1481 }
1482 if (!(cs->hw.hfcsx.extra = (void *) 1482 if (!(cs->hw.hfcsx.extra =
1483 kmalloc(sizeof(struct hfcsx_extra), GFP_ATOMIC))) { 1483 kmalloc(sizeof(struct hfcsx_extra), GFP_ATOMIC))) {
1484 release_region(cs->hw.hfcsx.base, 2); 1484 release_region(cs->hw.hfcsx.base, 2);
1485 printk(KERN_WARNING "HFC-SX: unable to allocate memory\n"); 1485 printk(KERN_WARNING "HFC-SX: unable to allocate memory\n");
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index babc621a07fb..88d657dff474 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1385,7 +1385,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev)
1385 if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN)) 1385 if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
1386 skb->pkt_type = PACKET_OTHERHOST; 1386 skb->pkt_type = PACKET_OTHERHOST;
1387 } 1387 }
1388 if (ntohs(eth->h_proto) >= 1536) 1388 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
1389 return eth->h_proto; 1389 return eth->h_proto;
1390 1390
1391 rawp = skb->data; 1391 rawp = skb->data;
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 8b07f83d48ad..e47dcb9d1e91 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -578,6 +578,7 @@ data_sock_getname(struct socket *sock, struct sockaddr *addr,
578 lock_sock(sk); 578 lock_sock(sk);
579 579
580 *addr_len = sizeof(*maddr); 580 *addr_len = sizeof(*maddr);
581 maddr->family = AF_ISDN;
581 maddr->dev = _pms(sk)->dev->id; 582 maddr->dev = _pms(sk)->dev->id;
582 maddr->channel = _pms(sk)->ch.nr; 583 maddr->channel = _pms(sk)->ch.nr;
583 maddr->sapi = _pms(sk)->ch.addr & 0xff; 584 maddr->sapi = _pms(sk)->ch.addr & 0xff;
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index 6b580b2c717f..ca997bd4e818 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -33,8 +33,8 @@ static unsigned long ram[] = {0, 0, 0, 0};
33static bool do_reset = 0; 33static bool do_reset = 0;
34 34
35module_param_array(io, int, NULL, 0); 35module_param_array(io, int, NULL, 0);
36module_param_array(irq, int, NULL, 0); 36module_param_array(irq, byte, NULL, 0);
37module_param_array(ram, int, NULL, 0); 37module_param_array(ram, long, NULL, 0);
38module_param(do_reset, bool, 0); 38module_param(do_reset, bool, 0);
39 39
40static int identify_board(unsigned long, unsigned int); 40static int identify_board(unsigned long, unsigned int);
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index e17cb85d3ecf..c3cc3b52662b 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -185,7 +185,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
185 skb->pkt_type=PACKET_MULTICAST; 185 skb->pkt_type=PACKET_MULTICAST;
186 } 186 }
187 187
188 if (ntohs(eth->h_proto) >= 1536) 188 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
189 return eth->h_proto; 189 return eth->h_proto;
190 190
191 rawp = skb->data; 191 rawp = skb->data;
@@ -228,9 +228,9 @@ static int ule_test_sndu( struct dvb_net_priv *p )
228static int ule_bridged_sndu( struct dvb_net_priv *p ) 228static int ule_bridged_sndu( struct dvb_net_priv *p )
229{ 229{
230 struct ethhdr *hdr = (struct ethhdr*) p->ule_next_hdr; 230 struct ethhdr *hdr = (struct ethhdr*) p->ule_next_hdr;
231 if(ntohs(hdr->h_proto) < 1536) { 231 if(ntohs(hdr->h_proto) < ETH_P_802_3_MIN) {
232 int framelen = p->ule_sndu_len - ((p->ule_next_hdr+sizeof(struct ethhdr)) - p->ule_skb->data); 232 int framelen = p->ule_sndu_len - ((p->ule_next_hdr+sizeof(struct ethhdr)) - p->ule_skb->data);
233 /* A frame Type < 1536 for a bridged frame, introduces a LLC Length field. */ 233 /* A frame Type < ETH_P_802_3_MIN for a bridged frame, introduces a LLC Length field. */
234 if(framelen != ntohs(hdr->h_proto)) { 234 if(framelen != ntohs(hdr->h_proto)) {
235 return -1; 235 return -1;
236 } 236 }
@@ -320,7 +320,7 @@ static int handle_ule_extensions( struct dvb_net_priv *p )
320 (int) p->ule_sndu_type, l, total_ext_len); 320 (int) p->ule_sndu_type, l, total_ext_len);
321#endif 321#endif
322 322
323 } while (p->ule_sndu_type < 1536); 323 } while (p->ule_sndu_type < ETH_P_802_3_MIN);
324 324
325 return total_ext_len; 325 return total_ext_len;
326} 326}
@@ -712,7 +712,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
712 } 712 }
713 713
714 /* Handle ULE Extension Headers. */ 714 /* Handle ULE Extension Headers. */
715 if (priv->ule_sndu_type < 1536) { 715 if (priv->ule_sndu_type < ETH_P_802_3_MIN) {
716 /* There is an extension header. Handle it accordingly. */ 716 /* There is an extension header. Handle it accordingly. */
717 int l = handle_ule_extensions(priv); 717 int l = handle_ule_extensions(priv);
718 if (l < 0) { 718 if (l < 0) {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 87f1d39ca551..3835321b8cf3 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -151,6 +151,7 @@ config MACVTAP
151config VXLAN 151config VXLAN
152 tristate "Virtual eXtensible Local Area Network (VXLAN)" 152 tristate "Virtual eXtensible Local Area Network (VXLAN)"
153 depends on INET 153 depends on INET
154 select NET_IP_TUNNEL
154 ---help--- 155 ---help---
155 This allows one to create vxlan virtual interfaces that provide 156 This allows one to create vxlan virtual interfaces that provide
156 Layer 2 Networks over Layer 3 Networks. VXLAN is often used 157 Layer 2 Networks over Layer 3 Networks. VXLAN is often used
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index f5a89164e779..4ce6ca5f3d36 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -106,20 +106,4 @@ config IPDDP_ENCAP
106 IP packets inside AppleTalk frames; this is useful if your Linux box 106 IP packets inside AppleTalk frames; this is useful if your Linux box
107 is stuck on an AppleTalk network (which hopefully contains a 107 is stuck on an AppleTalk network (which hopefully contains a
108 decapsulator somewhere). Please see 108 decapsulator somewhere). Please see
109 <file:Documentation/networking/ipddp.txt> for more information. If 109 <file:Documentation/networking/ipddp.txt> for more information.
110 you said Y to "AppleTalk-IP driver support" above and you say Y
111 here, then you cannot say Y to "AppleTalk-IP to IP Decapsulation
112 support", below.
113
114config IPDDP_DECAP
115 bool "Appletalk-IP to IP Decapsulation support"
116 depends on IPDDP
117 help
118 If you say Y here, the AppleTalk-IP code will be able to decapsulate
119 AppleTalk-IP frames to IP packets; this is useful if you want your
120 Linux box to act as an Internet gateway for an AppleTalk network.
121 Please see <file:Documentation/networking/ipddp.txt> for more
122 information. If you said Y to "AppleTalk-IP driver support" above
123 and you say Y here, then you cannot say Y to "IP to AppleTalk-IP
124 Encapsulation support", above.
125
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f5e052723029..e02cc265723a 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -514,7 +514,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
514 skb->dev = client_info->slave->dev; 514 skb->dev = client_info->slave->dev;
515 515
516 if (client_info->tag) { 516 if (client_info->tag) {
517 skb = vlan_put_tag(skb, client_info->vlan_id); 517 skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id);
518 if (!skb) { 518 if (!skb) {
519 pr_err("%s: Error: failed to insert VLAN tag\n", 519 pr_err("%s: Error: failed to insert VLAN tag\n",
520 client_info->slave->bond->dev->name); 520 client_info->slave->bond->dev->name);
@@ -1014,7 +1014,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
1014 continue; 1014 continue;
1015 } 1015 }
1016 1016
1017 skb = vlan_put_tag(skb, vlan->vlan_id); 1017 skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan->vlan_id);
1018 if (!skb) { 1018 if (!skb) {
1019 pr_err("%s: Error: failed to insert VLAN tag\n", 1019 pr_err("%s: Error: failed to insert VLAN tag\n",
1020 bond->dev->name); 1020 bond->dev->name);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index dbbea0eec134..d0aade04e49a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -428,14 +428,15 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
428 * @bond_dev: bonding net device that got called 428 * @bond_dev: bonding net device that got called
429 * @vid: vlan id being added 429 * @vid: vlan id being added
430 */ 430 */
431static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid) 431static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
432 __be16 proto, u16 vid)
432{ 433{
433 struct bonding *bond = netdev_priv(bond_dev); 434 struct bonding *bond = netdev_priv(bond_dev);
434 struct slave *slave, *stop_at; 435 struct slave *slave, *stop_at;
435 int i, res; 436 int i, res;
436 437
437 bond_for_each_slave(bond, slave, i) { 438 bond_for_each_slave(bond, slave, i) {
438 res = vlan_vid_add(slave->dev, vid); 439 res = vlan_vid_add(slave->dev, proto, vid);
439 if (res) 440 if (res)
440 goto unwind; 441 goto unwind;
441 } 442 }
@@ -453,7 +454,7 @@ unwind:
453 /* unwind from head to the slave that failed */ 454 /* unwind from head to the slave that failed */
454 stop_at = slave; 455 stop_at = slave;
455 bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) 456 bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
456 vlan_vid_del(slave->dev, vid); 457 vlan_vid_del(slave->dev, proto, vid);
457 458
458 return res; 459 return res;
459} 460}
@@ -463,14 +464,15 @@ unwind:
463 * @bond_dev: bonding net device that got called 464 * @bond_dev: bonding net device that got called
464 * @vid: vlan id being removed 465 * @vid: vlan id being removed
465 */ 466 */
466static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid) 467static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
468 __be16 proto, u16 vid)
467{ 469{
468 struct bonding *bond = netdev_priv(bond_dev); 470 struct bonding *bond = netdev_priv(bond_dev);
469 struct slave *slave; 471 struct slave *slave;
470 int i, res; 472 int i, res;
471 473
472 bond_for_each_slave(bond, slave, i) 474 bond_for_each_slave(bond, slave, i)
473 vlan_vid_del(slave->dev, vid); 475 vlan_vid_del(slave->dev, proto, vid);
474 476
475 res = bond_del_vlan(bond, vid); 477 res = bond_del_vlan(bond, vid);
476 if (res) { 478 if (res) {
@@ -488,7 +490,8 @@ static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *sla
488 int res; 490 int res;
489 491
490 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 492 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
491 res = vlan_vid_add(slave_dev, vlan->vlan_id); 493 res = vlan_vid_add(slave_dev, htons(ETH_P_8021Q),
494 vlan->vlan_id);
492 if (res) 495 if (res)
493 pr_warning("%s: Failed to add vlan id %d to device %s\n", 496 pr_warning("%s: Failed to add vlan id %d to device %s\n",
494 bond->dev->name, vlan->vlan_id, 497 bond->dev->name, vlan->vlan_id,
@@ -504,7 +507,7 @@ static void bond_del_vlans_from_slave(struct bonding *bond,
504 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 507 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
505 if (!vlan->vlan_id) 508 if (!vlan->vlan_id)
506 continue; 509 continue;
507 vlan_vid_del(slave_dev, vlan->vlan_id); 510 vlan_vid_del(slave_dev, htons(ETH_P_8021Q), vlan->vlan_id);
508 } 511 }
509} 512}
510 513
@@ -779,7 +782,7 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
779 782
780 /* rejoin all groups on vlan devices */ 783 /* rejoin all groups on vlan devices */
781 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 784 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
782 vlan_dev = __vlan_find_dev_deep(bond_dev, 785 vlan_dev = __vlan_find_dev_deep(bond_dev, htons(ETH_P_8021Q),
783 vlan->vlan_id); 786 vlan->vlan_id);
784 if (vlan_dev) 787 if (vlan_dev)
785 __bond_resend_igmp_join_requests(vlan_dev); 788 __bond_resend_igmp_join_requests(vlan_dev);
@@ -796,9 +799,8 @@ static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
796{ 799{
797 struct bonding *bond = container_of(work, struct bonding, 800 struct bonding *bond = container_of(work, struct bonding,
798 mcast_work.work); 801 mcast_work.work);
799 rcu_read_lock(); 802
800 bond_resend_igmp_join_requests(bond); 803 bond_resend_igmp_join_requests(bond);
801 rcu_read_unlock();
802} 804}
803 805
804/* 806/*
@@ -1915,14 +1917,16 @@ err_detach:
1915 bond_detach_slave(bond, new_slave); 1917 bond_detach_slave(bond, new_slave);
1916 if (bond->primary_slave == new_slave) 1918 if (bond->primary_slave == new_slave)
1917 bond->primary_slave = NULL; 1919 bond->primary_slave = NULL;
1918 write_unlock_bh(&bond->lock);
1919 if (bond->curr_active_slave == new_slave) { 1920 if (bond->curr_active_slave == new_slave) {
1921 bond_change_active_slave(bond, NULL);
1922 write_unlock_bh(&bond->lock);
1920 read_lock(&bond->lock); 1923 read_lock(&bond->lock);
1921 write_lock_bh(&bond->curr_slave_lock); 1924 write_lock_bh(&bond->curr_slave_lock);
1922 bond_change_active_slave(bond, NULL);
1923 bond_select_active_slave(bond); 1925 bond_select_active_slave(bond);
1924 write_unlock_bh(&bond->curr_slave_lock); 1926 write_unlock_bh(&bond->curr_slave_lock);
1925 read_unlock(&bond->lock); 1927 read_unlock(&bond->lock);
1928 } else {
1929 write_unlock_bh(&bond->lock);
1926 } 1930 }
1927 slave_disable_netpoll(new_slave); 1931 slave_disable_netpoll(new_slave);
1928 1932
@@ -2532,7 +2536,8 @@ static int bond_has_this_ip(struct bonding *bond, __be32 ip)
2532 2536
2533 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 2537 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
2534 rcu_read_lock(); 2538 rcu_read_lock();
2535 vlan_dev = __vlan_find_dev_deep(bond->dev, vlan->vlan_id); 2539 vlan_dev = __vlan_find_dev_deep(bond->dev, htons(ETH_P_8021Q),
2540 vlan->vlan_id);
2536 rcu_read_unlock(); 2541 rcu_read_unlock();
2537 if (vlan_dev && ip == bond_confirm_addr(vlan_dev, 0, ip)) 2542 if (vlan_dev && ip == bond_confirm_addr(vlan_dev, 0, ip))
2538 return 1; 2543 return 1;
@@ -2561,7 +2566,7 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
2561 return; 2566 return;
2562 } 2567 }
2563 if (vlan_id) { 2568 if (vlan_id) {
2564 skb = vlan_put_tag(skb, vlan_id); 2569 skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2565 if (!skb) { 2570 if (!skb) {
2566 pr_err("failed to insert VLAN tag\n"); 2571 pr_err("failed to insert VLAN tag\n");
2567 return; 2572 return;
@@ -2623,6 +2628,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2623 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 2628 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
2624 rcu_read_lock(); 2629 rcu_read_lock();
2625 vlan_dev = __vlan_find_dev_deep(bond->dev, 2630 vlan_dev = __vlan_find_dev_deep(bond->dev,
2631 htons(ETH_P_8021Q),
2626 vlan->vlan_id); 2632 vlan->vlan_id);
2627 rcu_read_unlock(); 2633 rcu_read_unlock();
2628 if (vlan_dev == rt->dst.dev) { 2634 if (vlan_dev == rt->dst.dev) {
@@ -4258,6 +4264,37 @@ void bond_set_mode_ops(struct bonding *bond, int mode)
4258 } 4264 }
4259} 4265}
4260 4266
4267static int bond_ethtool_get_settings(struct net_device *bond_dev,
4268 struct ethtool_cmd *ecmd)
4269{
4270 struct bonding *bond = netdev_priv(bond_dev);
4271 struct slave *slave;
4272 int i;
4273 unsigned long speed = 0;
4274
4275 ecmd->duplex = DUPLEX_UNKNOWN;
4276 ecmd->port = PORT_OTHER;
4277
4278 /* Since SLAVE_IS_OK returns false for all inactive or down slaves, we
4279 * do not need to check mode. Though link speed might not represent
4280 * the true receive or transmit bandwidth (not all modes are symmetric)
4281 * this is an accurate maximum.
4282 */
4283 read_lock(&bond->lock);
4284 bond_for_each_slave(bond, slave, i) {
4285 if (SLAVE_IS_OK(slave)) {
4286 if (slave->speed != SPEED_UNKNOWN)
4287 speed += slave->speed;
4288 if (ecmd->duplex == DUPLEX_UNKNOWN &&
4289 slave->duplex != DUPLEX_UNKNOWN)
4290 ecmd->duplex = slave->duplex;
4291 }
4292 }
4293 ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN);
4294 read_unlock(&bond->lock);
4295 return 0;
4296}
4297
4261static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, 4298static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4262 struct ethtool_drvinfo *drvinfo) 4299 struct ethtool_drvinfo *drvinfo)
4263{ 4300{
@@ -4269,6 +4306,7 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4269 4306
4270static const struct ethtool_ops bond_ethtool_ops = { 4307static const struct ethtool_ops bond_ethtool_ops = {
4271 .get_drvinfo = bond_ethtool_get_drvinfo, 4308 .get_drvinfo = bond_ethtool_get_drvinfo,
4309 .get_settings = bond_ethtool_get_settings,
4272 .get_link = ethtool_op_get_link, 4310 .get_link = ethtool_op_get_link,
4273}; 4311};
4274 4312
@@ -4359,9 +4397,9 @@ static void bond_setup(struct net_device *bond_dev)
4359 */ 4397 */
4360 4398
4361 bond_dev->hw_features = BOND_VLAN_FEATURES | 4399 bond_dev->hw_features = BOND_VLAN_FEATURES |
4362 NETIF_F_HW_VLAN_TX | 4400 NETIF_F_HW_VLAN_CTAG_TX |
4363 NETIF_F_HW_VLAN_RX | 4401 NETIF_F_HW_VLAN_CTAG_RX |
4364 NETIF_F_HW_VLAN_FILTER; 4402 NETIF_F_HW_VLAN_CTAG_FILTER;
4365 4403
4366 bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); 4404 bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
4367 bond_dev->features |= bond_dev->hw_features; 4405 bond_dev->features |= bond_dev->hw_features;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 60c2142373c9..a966128c2a7a 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -32,13 +32,6 @@ config CAIF_SPI_SYNC
32 help to synchronize to the next transfer in case of over or under-runs. 32 help to synchronize to the next transfer in case of over or under-runs.
33 This option also needs to be enabled on the modem. 33 This option also needs to be enabled on the modem.
34 34
35config CAIF_SHM
36 tristate "CAIF shared memory protocol driver"
37 depends on CAIF && U5500_MBOX
38 default n
39 ---help---
40 The CAIF shared memory protocol driver for the STE UX5500 platform.
41
42config CAIF_HSI 35config CAIF_HSI
43 tristate "CAIF HSI transport driver" 36 tristate "CAIF HSI transport driver"
44 depends on CAIF 37 depends on CAIF
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 91dff861560f..15a9d2fc753d 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -7,9 +7,5 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
7cfspi_slave-objs := caif_spi.o caif_spi_slave.o 7cfspi_slave-objs := caif_spi.o caif_spi_slave.o
8obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o 8obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
9 9
10# Shared memory
11caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
12obj-$(CONFIG_CAIF_SHM) += caif_shm.o
13
14# HSI interface 10# HSI interface
15obj-$(CONFIG_CAIF_HSI) += caif_hsi.o 11obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 0def8b3106f4..5e40a8b68cbe 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1,8 +1,7 @@
1/* 1/*
2 * Copyright (C) ST-Ericsson AB 2010 2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com 3 * Author: Daniel Martensson
4 * Author: Daniel Martensson / daniel.martensson@stericsson.com 4 * Dmitry.Tarnyagin / dmitry.tarnyagin@lockless.no
5 * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2. 5 * License terms: GNU General Public License (GPL) version 2.
7 */ 6 */
8 7
@@ -25,7 +24,7 @@
25#include <net/caif/caif_hsi.h> 24#include <net/caif/caif_hsi.h>
26 25
27MODULE_LICENSE("GPL"); 26MODULE_LICENSE("GPL");
28MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>"); 27MODULE_AUTHOR("Daniel Martensson");
29MODULE_DESCRIPTION("CAIF HSI driver"); 28MODULE_DESCRIPTION("CAIF HSI driver");
30 29
31/* Returns the number of padding bytes for alignment. */ 30/* Returns the number of padding bytes for alignment. */
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index be90debc7cd1..77be3cb0b5fe 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) ST-Ericsson AB 2010 2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland / sjur.brandeland@stericsson.com 3 * Author: Sjur Brendeland
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
@@ -21,7 +21,7 @@
21#include <linux/debugfs.h> 21#include <linux/debugfs.h>
22 22
23MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
24MODULE_AUTHOR("Sjur Brendeland<sjur.brandeland@stericsson.com>"); 24MODULE_AUTHOR("Sjur Brendeland");
25MODULE_DESCRIPTION("CAIF serial device TTY line discipline"); 25MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
26MODULE_LICENSE("GPL"); 26MODULE_LICENSE("GPL");
27MODULE_ALIAS_LDISC(N_CAIF); 27MODULE_ALIAS_LDISC(N_CAIF);
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
deleted file mode 100644
index 89d76b7b325a..000000000000
--- a/drivers/net/caif/caif_shm_u5500.c
+++ /dev/null
@@ -1,128 +0,0 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
9
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/netdevice.h>
13#include <mach/mbox-db5500.h>
14#include <net/caif/caif_shm.h>
15
16MODULE_LICENSE("GPL");
17MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
18
19#define MAX_SHM_INSTANCES 1
20
21enum {
22 MBX_ACC0,
23 MBX_ACC1,
24 MBX_DSP
25};
26
27static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
28
29static unsigned int shm_start;
30static unsigned int shm_size;
31
32module_param(shm_size, uint , 0440);
33MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
34
35module_param(shm_start, uint , 0440);
36MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
37
38static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
39{
40 /* Always block until msg is written successfully */
41 mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
42 return 0;
43}
44
45static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
46 void *pshm_drv)
47{
48 /*
49 * For UX5500, we have only 1 SHM instance which uses MBX0
50 * for communication with the peer modem
51 */
52 pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
53
54 if (!pshm_dev->hmbx)
55 return -ENODEV;
56 else
57 return 0;
58}
59
60static int __init caif_shmdev_init(void)
61{
62 int i, result;
63
64 /* Loop is currently overkill, there is only one instance */
65 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
66
67 shmdev_lyr[i].shm_base_addr = shm_start;
68 shmdev_lyr[i].shm_total_sz = shm_size;
69
70 if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
71 || (shmdev_lyr[i].shm_total_sz <= 0)) {
72 pr_warn("ERROR,"
73 "Shared memory Address and/or Size incorrect"
74 ", Bailing out ...\n");
75 result = -EINVAL;
76 goto clean;
77 }
78
79 pr_info("SHM AREA (instance %d) STARTS"
80 " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
81
82 shmdev_lyr[i].shm_id = i;
83 shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
84 shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
85
86 /*
87 * Finally, CAIF core module is called with details in place:
88 * 1. SHM base address
89 * 2. SHM size
90 * 3. MBX handle
91 */
92 result = caif_shmcore_probe(&shmdev_lyr[i]);
93 if (result) {
94 pr_warn("ERROR[%d],"
95 "Could not probe SHM core (instance %d)"
96 " Bailing out ...\n", result, i);
97 goto clean;
98 }
99 }
100
101 return 0;
102
103clean:
104 /*
105 * For now, we assume that even if one instance of SHM fails, we bail
106 * out of the driver support completely. For this, we need to release
107 * any memory allocated and unregister any instance of SHM net device.
108 */
109 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
110 if (shmdev_lyr[i].pshm_netdev)
111 unregister_netdev(shmdev_lyr[i].pshm_netdev);
112 }
113 return result;
114}
115
116static void __exit caif_shmdev_exit(void)
117{
118 int i;
119
120 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
121 caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
122 kfree((void *)shmdev_lyr[i].shm_base_addr);
123 }
124
125}
126
127module_init(caif_shmdev_init);
128module_exit(caif_shmdev_exit);
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
deleted file mode 100644
index bce8bac311c9..000000000000
--- a/drivers/net/caif/caif_shmcore.c
+++ /dev/null
@@ -1,747 +0,0 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
5 * Daniel Martensson / daniel.martensson@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
10
11#include <linux/spinlock.h>
12#include <linux/sched.h>
13#include <linux/list.h>
14#include <linux/netdevice.h>
15#include <linux/if_arp.h>
16#include <linux/io.h>
17
18#include <net/caif/caif_device.h>
19#include <net/caif/caif_shm.h>
20
21#define NR_TX_BUF 6
22#define NR_RX_BUF 6
23#define TX_BUF_SZ 0x2000
24#define RX_BUF_SZ 0x2000
25
26#define CAIF_NEEDED_HEADROOM 32
27
28#define CAIF_FLOW_ON 1
29#define CAIF_FLOW_OFF 0
30
31#define LOW_WATERMARK 3
32#define HIGH_WATERMARK 4
33
34/* Maximum number of CAIF buffers per shared memory buffer. */
35#define SHM_MAX_FRMS_PER_BUF 10
36
37/*
38 * Size in bytes of the descriptor area
39 * (With end of descriptor signalling)
40 */
41#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
42 sizeof(struct shm_pck_desc))
43
44/*
45 * Offset to the first CAIF frame within a shared memory buffer.
46 * Aligned on 32 bytes.
47 */
48#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
49
50/* Number of bytes for CAIF shared memory header. */
51#define SHM_HDR_LEN 1
52
53/* Number of padding bytes for the complete CAIF frame. */
54#define SHM_FRM_PAD_LEN 4
55
56#define CAIF_MAX_MTU 4096
57
58#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
59#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
60
61#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
62#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
63
64#define SHM_FULL_MASK (0x0F << 0)
65#define SHM_EMPTY_MASK (0x0F << 4)
66
67struct shm_pck_desc {
68 /*
69 * Offset from start of shared memory area to start of
70 * shared memory CAIF frame.
71 */
72 u32 frm_ofs;
73 u32 frm_len;
74};
75
76struct buf_list {
77 unsigned char *desc_vptr;
78 u32 phy_addr;
79 u32 index;
80 u32 len;
81 u32 frames;
82 u32 frm_ofs;
83 struct list_head list;
84};
85
86struct shm_caif_frm {
87 /* Number of bytes of padding before the CAIF frame. */
88 u8 hdr_ofs;
89};
90
91struct shmdrv_layer {
92 /* caif_dev_common must always be first in the structure*/
93 struct caif_dev_common cfdev;
94
95 u32 shm_tx_addr;
96 u32 shm_rx_addr;
97 u32 shm_base_addr;
98 u32 tx_empty_available;
99 spinlock_t lock;
100
101 struct list_head tx_empty_list;
102 struct list_head tx_pend_list;
103 struct list_head tx_full_list;
104 struct list_head rx_empty_list;
105 struct list_head rx_pend_list;
106 struct list_head rx_full_list;
107
108 struct workqueue_struct *pshm_tx_workqueue;
109 struct workqueue_struct *pshm_rx_workqueue;
110
111 struct work_struct shm_tx_work;
112 struct work_struct shm_rx_work;
113
114 struct sk_buff_head sk_qhead;
115 struct shmdev_layer *pshm_dev;
116};
117
118static int shm_netdev_open(struct net_device *shm_netdev)
119{
120 netif_wake_queue(shm_netdev);
121 return 0;
122}
123
124static int shm_netdev_close(struct net_device *shm_netdev)
125{
126 netif_stop_queue(shm_netdev);
127 return 0;
128}
129
130int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
131{
132 struct buf_list *pbuf;
133 struct shmdrv_layer *pshm_drv;
134 struct list_head *pos;
135 u32 avail_emptybuff = 0;
136 unsigned long flags = 0;
137
138 pshm_drv = priv;
139
140 /* Check for received buffers. */
141 if (mbx_msg & SHM_FULL_MASK) {
142 int idx;
143
144 spin_lock_irqsave(&pshm_drv->lock, flags);
145
146 /* Check whether we have any outstanding buffers. */
147 if (list_empty(&pshm_drv->rx_empty_list)) {
148
149 /* Release spin lock. */
150 spin_unlock_irqrestore(&pshm_drv->lock, flags);
151
152 /* We print even in IRQ context... */
153 pr_warn("No empty Rx buffers to fill: "
154 "mbx_msg:%x\n", mbx_msg);
155
156 /* Bail out. */
157 goto err_sync;
158 }
159
160 pbuf =
161 list_entry(pshm_drv->rx_empty_list.next,
162 struct buf_list, list);
163 idx = pbuf->index;
164
165 /* Check buffer synchronization. */
166 if (idx != SHM_GET_FULL(mbx_msg)) {
167
168 /* We print even in IRQ context... */
169 pr_warn(
170 "phyif_shm_mbx_msg_cb: RX full out of sync:"
171 " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
172 idx, mbx_msg, SHM_GET_FULL(mbx_msg));
173
174 spin_unlock_irqrestore(&pshm_drv->lock, flags);
175
176 /* Bail out. */
177 goto err_sync;
178 }
179
180 list_del_init(&pbuf->list);
181 list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
182
183 spin_unlock_irqrestore(&pshm_drv->lock, flags);
184
185 /* Schedule RX work queue. */
186 if (!work_pending(&pshm_drv->shm_rx_work))
187 queue_work(pshm_drv->pshm_rx_workqueue,
188 &pshm_drv->shm_rx_work);
189 }
190
191 /* Check for emptied buffers. */
192 if (mbx_msg & SHM_EMPTY_MASK) {
193 int idx;
194
195 spin_lock_irqsave(&pshm_drv->lock, flags);
196
197 /* Check whether we have any outstanding buffers. */
198 if (list_empty(&pshm_drv->tx_full_list)) {
199
200 /* We print even in IRQ context... */
201 pr_warn("No TX to empty: msg:%x\n", mbx_msg);
202
203 spin_unlock_irqrestore(&pshm_drv->lock, flags);
204
205 /* Bail out. */
206 goto err_sync;
207 }
208
209 pbuf =
210 list_entry(pshm_drv->tx_full_list.next,
211 struct buf_list, list);
212 idx = pbuf->index;
213
214 /* Check buffer synchronization. */
215 if (idx != SHM_GET_EMPTY(mbx_msg)) {
216
217 spin_unlock_irqrestore(&pshm_drv->lock, flags);
218
219 /* We print even in IRQ context... */
220 pr_warn("TX empty "
221 "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
222
223 /* Bail out. */
224 goto err_sync;
225 }
226 list_del_init(&pbuf->list);
227
228 /* Reset buffer parameters. */
229 pbuf->frames = 0;
230 pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
231
232 list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
233
234 /* Check the available no. of buffers in the empty list */
235 list_for_each(pos, &pshm_drv->tx_empty_list)
236 avail_emptybuff++;
237
238 /* Check whether we have to wake up the transmitter. */
239 if ((avail_emptybuff > HIGH_WATERMARK) &&
240 (!pshm_drv->tx_empty_available)) {
241 pshm_drv->tx_empty_available = 1;
242 spin_unlock_irqrestore(&pshm_drv->lock, flags);
243 pshm_drv->cfdev.flowctrl
244 (pshm_drv->pshm_dev->pshm_netdev,
245 CAIF_FLOW_ON);
246
247
248 /* Schedule the work queue. if required */
249 if (!work_pending(&pshm_drv->shm_tx_work))
250 queue_work(pshm_drv->pshm_tx_workqueue,
251 &pshm_drv->shm_tx_work);
252 } else
253 spin_unlock_irqrestore(&pshm_drv->lock, flags);
254 }
255
256 return 0;
257
258err_sync:
259 return -EIO;
260}
261
262static void shm_rx_work_func(struct work_struct *rx_work)
263{
264 struct shmdrv_layer *pshm_drv;
265 struct buf_list *pbuf;
266 unsigned long flags = 0;
267 struct sk_buff *skb;
268 char *p;
269 int ret;
270
271 pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
272
273 while (1) {
274
275 struct shm_pck_desc *pck_desc;
276
277 spin_lock_irqsave(&pshm_drv->lock, flags);
278
279 /* Check for received buffers. */
280 if (list_empty(&pshm_drv->rx_full_list)) {
281 spin_unlock_irqrestore(&pshm_drv->lock, flags);
282 break;
283 }
284
285 pbuf =
286 list_entry(pshm_drv->rx_full_list.next, struct buf_list,
287 list);
288 list_del_init(&pbuf->list);
289 spin_unlock_irqrestore(&pshm_drv->lock, flags);
290
291 /* Retrieve pointer to start of the packet descriptor area. */
292 pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
293
294 /*
295 * Check whether descriptor contains a CAIF shared memory
296 * frame.
297 */
298 while (pck_desc->frm_ofs) {
299 unsigned int frm_buf_ofs;
300 unsigned int frm_pck_ofs;
301 unsigned int frm_pck_len;
302 /*
303 * Check whether offset is within buffer limits
304 * (lower).
305 */
306 if (pck_desc->frm_ofs <
307 (pbuf->phy_addr - pshm_drv->shm_base_addr))
308 break;
309 /*
310 * Check whether offset is within buffer limits
311 * (higher).
312 */
313 if (pck_desc->frm_ofs >
314 ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
315 pbuf->len))
316 break;
317
318 /* Calculate offset from start of buffer. */
319 frm_buf_ofs =
320 pck_desc->frm_ofs - (pbuf->phy_addr -
321 pshm_drv->shm_base_addr);
322
323 /*
324 * Calculate offset and length of CAIF packet while
325 * taking care of the shared memory header.
326 */
327 frm_pck_ofs =
328 frm_buf_ofs + SHM_HDR_LEN +
329 (*(pbuf->desc_vptr + frm_buf_ofs));
330 frm_pck_len =
331 (pck_desc->frm_len - SHM_HDR_LEN -
332 (*(pbuf->desc_vptr + frm_buf_ofs)));
333
334 /* Check whether CAIF packet is within buffer limits */
335 if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
336 break;
337
338 /* Get a suitable CAIF packet and copy in data. */
339 skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
340 frm_pck_len + 1);
341
342 if (skb == NULL) {
343 pr_info("OOM: Try next frame in descriptor\n");
344 break;
345 }
346
347 p = skb_put(skb, frm_pck_len);
348 memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
349
350 skb->protocol = htons(ETH_P_CAIF);
351 skb_reset_mac_header(skb);
352 skb->dev = pshm_drv->pshm_dev->pshm_netdev;
353
354 /* Push received packet up the stack. */
355 ret = netif_rx_ni(skb);
356
357 if (!ret) {
358 pshm_drv->pshm_dev->pshm_netdev->stats.
359 rx_packets++;
360 pshm_drv->pshm_dev->pshm_netdev->stats.
361 rx_bytes += pck_desc->frm_len;
362 } else
363 ++pshm_drv->pshm_dev->pshm_netdev->stats.
364 rx_dropped;
365 /* Move to next packet descriptor. */
366 pck_desc++;
367 }
368
369 spin_lock_irqsave(&pshm_drv->lock, flags);
370 list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
371
372 spin_unlock_irqrestore(&pshm_drv->lock, flags);
373
374 }
375
376 /* Schedule the work queue. if required */
377 if (!work_pending(&pshm_drv->shm_tx_work))
378 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
379
380}
381
382static void shm_tx_work_func(struct work_struct *tx_work)
383{
384 u32 mbox_msg;
385 unsigned int frmlen, avail_emptybuff, append = 0;
386 unsigned long flags = 0;
387 struct buf_list *pbuf = NULL;
388 struct shmdrv_layer *pshm_drv;
389 struct shm_caif_frm *frm;
390 struct sk_buff *skb;
391 struct shm_pck_desc *pck_desc;
392 struct list_head *pos;
393
394 pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
395
396 do {
397 /* Initialize mailbox message. */
398 mbox_msg = 0x00;
399 avail_emptybuff = 0;
400
401 spin_lock_irqsave(&pshm_drv->lock, flags);
402
403 /* Check for pending receive buffers. */
404 if (!list_empty(&pshm_drv->rx_pend_list)) {
405
406 pbuf = list_entry(pshm_drv->rx_pend_list.next,
407 struct buf_list, list);
408
409 list_del_init(&pbuf->list);
410 list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
411 /*
412 * Value index is never changed,
413 * so read access should be safe.
414 */
415 mbox_msg |= SHM_SET_EMPTY(pbuf->index);
416 }
417
418 skb = skb_peek(&pshm_drv->sk_qhead);
419
420 if (skb == NULL)
421 goto send_msg;
422 /* Check the available no. of buffers in the empty list */
423 list_for_each(pos, &pshm_drv->tx_empty_list)
424 avail_emptybuff++;
425
426 if ((avail_emptybuff < LOW_WATERMARK) &&
427 pshm_drv->tx_empty_available) {
428 /* Update blocking condition. */
429 pshm_drv->tx_empty_available = 0;
430 spin_unlock_irqrestore(&pshm_drv->lock, flags);
431 pshm_drv->cfdev.flowctrl
432 (pshm_drv->pshm_dev->pshm_netdev,
433 CAIF_FLOW_OFF);
434 spin_lock_irqsave(&pshm_drv->lock, flags);
435 }
436 /*
437 * We simply return back to the caller if we do not have space
438 * either in Tx pending list or Tx empty list. In this case,
439 * we hold the received skb in the skb list, waiting to
440 * be transmitted once Tx buffers become available
441 */
442 if (list_empty(&pshm_drv->tx_empty_list))
443 goto send_msg;
444
445 /* Get the first free Tx buffer. */
446 pbuf = list_entry(pshm_drv->tx_empty_list.next,
447 struct buf_list, list);
448 do {
449 if (append) {
450 skb = skb_peek(&pshm_drv->sk_qhead);
451 if (skb == NULL)
452 break;
453 }
454
455 frm = (struct shm_caif_frm *)
456 (pbuf->desc_vptr + pbuf->frm_ofs);
457
458 frm->hdr_ofs = 0;
459 frmlen = 0;
460 frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
461
462 /* Add tail padding if needed. */
463 if (frmlen % SHM_FRM_PAD_LEN)
464 frmlen += SHM_FRM_PAD_LEN -
465 (frmlen % SHM_FRM_PAD_LEN);
466
467 /*
468 * Verify that packet, header and additional padding
469 * can fit within the buffer frame area.
470 */
471 if (frmlen >= (pbuf->len - pbuf->frm_ofs))
472 break;
473
474 if (!append) {
475 list_del_init(&pbuf->list);
476 append = 1;
477 }
478
479 skb = skb_dequeue(&pshm_drv->sk_qhead);
480 if (skb == NULL)
481 break;
482 /* Copy in CAIF frame. */
483 skb_copy_bits(skb, 0, pbuf->desc_vptr +
484 pbuf->frm_ofs + SHM_HDR_LEN +
485 frm->hdr_ofs, skb->len);
486
487 pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
488 pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
489 frmlen;
490 dev_kfree_skb_irq(skb);
491
492 /* Fill in the shared memory packet descriptor area. */
493 pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
494 /* Forward to current frame. */
495 pck_desc += pbuf->frames;
496 pck_desc->frm_ofs = (pbuf->phy_addr -
497 pshm_drv->shm_base_addr) +
498 pbuf->frm_ofs;
499 pck_desc->frm_len = frmlen;
500 /* Terminate packet descriptor area. */
501 pck_desc++;
502 pck_desc->frm_ofs = 0;
503 /* Update buffer parameters. */
504 pbuf->frames++;
505 pbuf->frm_ofs += frmlen + (frmlen % 32);
506
507 } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
508
509 /* Assign buffer as full. */
510 list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
511 append = 0;
512 mbox_msg |= SHM_SET_FULL(pbuf->index);
513send_msg:
514 spin_unlock_irqrestore(&pshm_drv->lock, flags);
515
516 if (mbox_msg)
517 pshm_drv->pshm_dev->pshmdev_mbxsend
518 (pshm_drv->pshm_dev->shm_id, mbox_msg);
519 } while (mbox_msg);
520}
521
522static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
523{
524 struct shmdrv_layer *pshm_drv;
525
526 pshm_drv = netdev_priv(shm_netdev);
527
528 skb_queue_tail(&pshm_drv->sk_qhead, skb);
529
530 /* Schedule Tx work queue. for deferred processing of skbs*/
531 if (!work_pending(&pshm_drv->shm_tx_work))
532 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
533
534 return 0;
535}
536
537static const struct net_device_ops netdev_ops = {
538 .ndo_open = shm_netdev_open,
539 .ndo_stop = shm_netdev_close,
540 .ndo_start_xmit = shm_netdev_tx,
541};
542
543static void shm_netdev_setup(struct net_device *pshm_netdev)
544{
545 struct shmdrv_layer *pshm_drv;
546 pshm_netdev->netdev_ops = &netdev_ops;
547
548 pshm_netdev->mtu = CAIF_MAX_MTU;
549 pshm_netdev->type = ARPHRD_CAIF;
550 pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
551 pshm_netdev->tx_queue_len = 0;
552 pshm_netdev->destructor = free_netdev;
553
554 pshm_drv = netdev_priv(pshm_netdev);
555
556 /* Initialize structures in a clean state. */
557 memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
558
559 pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
560}
561
562int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
563{
564 int result, j;
565 struct shmdrv_layer *pshm_drv = NULL;
566
567 pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
568 "cfshm%d", shm_netdev_setup);
569 if (!pshm_dev->pshm_netdev)
570 return -ENOMEM;
571
572 pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
573 pshm_drv->pshm_dev = pshm_dev;
574
575 /*
576 * Initialization starts with the verification of the
577 * availability of MBX driver by calling its setup function.
578 * MBX driver must be available by this time for proper
579 * functioning of SHM driver.
580 */
581 if ((pshm_dev->pshmdev_mbxsetup
582 (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
583 pr_warn("Could not config. SHM Mailbox,"
584 " Bailing out.....\n");
585 free_netdev(pshm_dev->pshm_netdev);
586 return -ENODEV;
587 }
588
589 skb_queue_head_init(&pshm_drv->sk_qhead);
590
591 pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
592 " INSTANCE AT pshm_drv =0x%p\n",
593 pshm_drv->pshm_dev->shm_id, pshm_drv);
594
595 if (pshm_dev->shm_total_sz <
596 (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
597
598 pr_warn("ERROR, Amount of available"
599 " Phys. SHM cannot accommodate current SHM "
600 "driver configuration, Bailing out ...\n");
601 free_netdev(pshm_dev->pshm_netdev);
602 return -ENOMEM;
603 }
604
605 pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
606 pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
607
608 if (pshm_dev->shm_loopback)
609 pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
610 else
611 pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
612 (NR_TX_BUF * TX_BUF_SZ);
613
614 spin_lock_init(&pshm_drv->lock);
615 INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
616 INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
617 INIT_LIST_HEAD(&pshm_drv->tx_full_list);
618
619 INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
620 INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
621 INIT_LIST_HEAD(&pshm_drv->rx_full_list);
622
623 INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
624 INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
625
626 pshm_drv->pshm_tx_workqueue =
627 create_singlethread_workqueue("shm_tx_work");
628 pshm_drv->pshm_rx_workqueue =
629 create_singlethread_workqueue("shm_rx_work");
630
631 for (j = 0; j < NR_TX_BUF; j++) {
632 struct buf_list *tx_buf =
633 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
634
635 if (tx_buf == NULL) {
636 free_netdev(pshm_dev->pshm_netdev);
637 return -ENOMEM;
638 }
639 tx_buf->index = j;
640 tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
641 tx_buf->len = TX_BUF_SZ;
642 tx_buf->frames = 0;
643 tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
644
645 if (pshm_dev->shm_loopback)
646 tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
647 else
648 /*
649 * FIXME: the result of ioremap is not a pointer - arnd
650 */
651 tx_buf->desc_vptr =
652 ioremap(tx_buf->phy_addr, TX_BUF_SZ);
653
654 list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
655 }
656
657 for (j = 0; j < NR_RX_BUF; j++) {
658 struct buf_list *rx_buf =
659 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
660
661 if (rx_buf == NULL) {
662 free_netdev(pshm_dev->pshm_netdev);
663 return -ENOMEM;
664 }
665 rx_buf->index = j;
666 rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
667 rx_buf->len = RX_BUF_SZ;
668
669 if (pshm_dev->shm_loopback)
670 rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
671 else
672 rx_buf->desc_vptr =
673 ioremap(rx_buf->phy_addr, RX_BUF_SZ);
674 list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
675 }
676
677 pshm_drv->tx_empty_available = 1;
678 result = register_netdev(pshm_dev->pshm_netdev);
679 if (result)
680 pr_warn("ERROR[%d], SHM could not, "
681 "register with NW FRMWK Bailing out ...\n", result);
682
683 return result;
684}
685
686void caif_shmcore_remove(struct net_device *pshm_netdev)
687{
688 struct buf_list *pbuf;
689 struct shmdrv_layer *pshm_drv = NULL;
690
691 pshm_drv = netdev_priv(pshm_netdev);
692
693 while (!(list_empty(&pshm_drv->tx_pend_list))) {
694 pbuf =
695 list_entry(pshm_drv->tx_pend_list.next,
696 struct buf_list, list);
697
698 list_del(&pbuf->list);
699 kfree(pbuf);
700 }
701
702 while (!(list_empty(&pshm_drv->tx_full_list))) {
703 pbuf =
704 list_entry(pshm_drv->tx_full_list.next,
705 struct buf_list, list);
706 list_del(&pbuf->list);
707 kfree(pbuf);
708 }
709
710 while (!(list_empty(&pshm_drv->tx_empty_list))) {
711 pbuf =
712 list_entry(pshm_drv->tx_empty_list.next,
713 struct buf_list, list);
714 list_del(&pbuf->list);
715 kfree(pbuf);
716 }
717
718 while (!(list_empty(&pshm_drv->rx_full_list))) {
719 pbuf =
720 list_entry(pshm_drv->tx_full_list.next,
721 struct buf_list, list);
722 list_del(&pbuf->list);
723 kfree(pbuf);
724 }
725
726 while (!(list_empty(&pshm_drv->rx_pend_list))) {
727 pbuf =
728 list_entry(pshm_drv->tx_pend_list.next,
729 struct buf_list, list);
730 list_del(&pbuf->list);
731 kfree(pbuf);
732 }
733
734 while (!(list_empty(&pshm_drv->rx_empty_list))) {
735 pbuf =
736 list_entry(pshm_drv->rx_empty_list.next,
737 struct buf_list, list);
738 list_del(&pbuf->list);
739 kfree(pbuf);
740 }
741
742 /* Destroy work queues. */
743 destroy_workqueue(pshm_drv->pshm_tx_workqueue);
744 destroy_workqueue(pshm_drv->pshm_rx_workqueue);
745
746 unregister_netdev(pshm_netdev);
747}
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index b71ce9bf0afb..155db68e13ba 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * Copyright (C) ST-Ericsson AB 2010 2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com 3 * Author: Daniel Martensson
4 * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2. 4 * License terms: GNU General Public License (GPL) version 2.
6 */ 5 */
7 6
@@ -29,7 +28,7 @@
29#endif /* CONFIG_CAIF_SPI_SYNC */ 28#endif /* CONFIG_CAIF_SPI_SYNC */
30 29
31MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
32MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>"); 31MODULE_AUTHOR("Daniel Martensson");
33MODULE_DESCRIPTION("CAIF SPI driver"); 32MODULE_DESCRIPTION("CAIF SPI driver");
34 33
35/* Returns the number of padding bytes for alignment. */ 34/* Returns the number of padding bytes for alignment. */
@@ -864,6 +863,7 @@ static int __init cfspi_init_module(void)
864 driver_remove_file(&cfspi_spi_driver.driver, 863 driver_remove_file(&cfspi_spi_driver.driver,
865 &driver_attr_up_head_align); 864 &driver_attr_up_head_align);
866 err_create_up_head_align: 865 err_create_up_head_align:
866 platform_driver_unregister(&cfspi_spi_driver);
867 err_dev_register: 867 err_dev_register:
868 return result; 868 return result;
869} 869}
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index e139e133fc79..ee92ad5a6cf8 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * Copyright (C) ST-Ericsson AB 2010 2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com 3 * Author: Daniel Martensson
4 * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2. 4 * License terms: GNU General Public License (GPL) version 2.
6 */ 5 */
7#include <linux/init.h> 6#include <linux/init.h>
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9862b2e07644..e456b70933c2 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -65,7 +65,7 @@ config CAN_LEDS
65 65
66config CAN_AT91 66config CAN_AT91
67 tristate "Atmel AT91 onchip CAN controller" 67 tristate "Atmel AT91 onchip CAN controller"
68 depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9X5 68 depends on ARM
69 ---help--- 69 ---help---
70 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263 70 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
71 and AT91SAM9X5 processors. 71 and AT91SAM9X5 processors.
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 44f363792b59..db52f4414def 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -27,6 +27,7 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/of.h>
30#include <linux/platform_device.h> 31#include <linux/platform_device.h>
31#include <linux/rtnetlink.h> 32#include <linux/rtnetlink.h>
32#include <linux/skbuff.h> 33#include <linux/skbuff.h>
@@ -155,19 +156,20 @@ struct at91_priv {
155 canid_t mb0_id; 156 canid_t mb0_id;
156}; 157};
157 158
158static const struct at91_devtype_data at91_devtype_data[] = { 159static const struct at91_devtype_data at91_at91sam9263_data = {
159 [AT91_DEVTYPE_SAM9263] = { 160 .rx_first = 1,
160 .rx_first = 1, 161 .rx_split = 8,
161 .rx_split = 8, 162 .rx_last = 11,
162 .rx_last = 11, 163 .tx_shift = 2,
163 .tx_shift = 2, 164 .type = AT91_DEVTYPE_SAM9263,
164 }, 165};
165 [AT91_DEVTYPE_SAM9X5] = { 166
166 .rx_first = 0, 167static const struct at91_devtype_data at91_at91sam9x5_data = {
167 .rx_split = 4, 168 .rx_first = 0,
168 .rx_last = 5, 169 .rx_split = 4,
169 .tx_shift = 1, 170 .rx_last = 5,
170 }, 171 .tx_shift = 1,
172 .type = AT91_DEVTYPE_SAM9X5,
171}; 173};
172 174
173static const struct can_bittiming_const at91_bittiming_const = { 175static const struct can_bittiming_const at91_bittiming_const = {
@@ -1249,10 +1251,42 @@ static struct attribute_group at91_sysfs_attr_group = {
1249 .attrs = at91_sysfs_attrs, 1251 .attrs = at91_sysfs_attrs,
1250}; 1252};
1251 1253
1254#if defined(CONFIG_OF)
1255static const struct of_device_id at91_can_dt_ids[] = {
1256 {
1257 .compatible = "atmel,at91sam9x5-can",
1258 .data = &at91_at91sam9x5_data,
1259 }, {
1260 .compatible = "atmel,at91sam9263-can",
1261 .data = &at91_at91sam9263_data,
1262 }, {
1263 /* sentinel */
1264 }
1265};
1266MODULE_DEVICE_TABLE(of, at91_can_dt_ids);
1267#else
1268#define at91_can_dt_ids NULL
1269#endif
1270
1271static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_device *pdev)
1272{
1273 if (pdev->dev.of_node) {
1274 const struct of_device_id *match;
1275
1276 match = of_match_node(at91_can_dt_ids, pdev->dev.of_node);
1277 if (!match) {
1278 dev_err(&pdev->dev, "no matching node found in dtb\n");
1279 return NULL;
1280 }
1281 return (const struct at91_devtype_data *)match->data;
1282 }
1283 return (const struct at91_devtype_data *)
1284 platform_get_device_id(pdev)->driver_data;
1285}
1286
1252static int at91_can_probe(struct platform_device *pdev) 1287static int at91_can_probe(struct platform_device *pdev)
1253{ 1288{
1254 const struct at91_devtype_data *devtype_data; 1289 const struct at91_devtype_data *devtype_data;
1255 enum at91_devtype devtype;
1256 struct net_device *dev; 1290 struct net_device *dev;
1257 struct at91_priv *priv; 1291 struct at91_priv *priv;
1258 struct resource *res; 1292 struct resource *res;
@@ -1260,8 +1294,12 @@ static int at91_can_probe(struct platform_device *pdev)
1260 void __iomem *addr; 1294 void __iomem *addr;
1261 int err, irq; 1295 int err, irq;
1262 1296
1263 devtype = pdev->id_entry->driver_data; 1297 devtype_data = at91_can_get_driver_data(pdev);
1264 devtype_data = &at91_devtype_data[devtype]; 1298 if (!devtype_data) {
1299 dev_err(&pdev->dev, "no driver data\n");
1300 err = -ENODEV;
1301 goto exit;
1302 }
1265 1303
1266 clk = clk_get(&pdev->dev, "can_clk"); 1304 clk = clk_get(&pdev->dev, "can_clk");
1267 if (IS_ERR(clk)) { 1305 if (IS_ERR(clk)) {
@@ -1310,7 +1348,6 @@ static int at91_can_probe(struct platform_device *pdev)
1310 priv->dev = dev; 1348 priv->dev = dev;
1311 priv->reg_base = addr; 1349 priv->reg_base = addr;
1312 priv->devtype_data = *devtype_data; 1350 priv->devtype_data = *devtype_data;
1313 priv->devtype_data.type = devtype;
1314 priv->clk = clk; 1351 priv->clk = clk;
1315 priv->pdata = pdev->dev.platform_data; 1352 priv->pdata = pdev->dev.platform_data;
1316 priv->mb0_id = 0x7ff; 1353 priv->mb0_id = 0x7ff;
@@ -1373,10 +1410,10 @@ static int at91_can_remove(struct platform_device *pdev)
1373static const struct platform_device_id at91_can_id_table[] = { 1410static const struct platform_device_id at91_can_id_table[] = {
1374 { 1411 {
1375 .name = "at91_can", 1412 .name = "at91_can",
1376 .driver_data = AT91_DEVTYPE_SAM9263, 1413 .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
1377 }, { 1414 }, {
1378 .name = "at91sam9x5_can", 1415 .name = "at91sam9x5_can",
1379 .driver_data = AT91_DEVTYPE_SAM9X5, 1416 .driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
1380 }, { 1417 }, {
1381 /* sentinel */ 1418 /* sentinel */
1382 } 1419 }
@@ -1389,6 +1426,7 @@ static struct platform_driver at91_can_driver = {
1389 .driver = { 1426 .driver = {
1390 .name = KBUILD_MODNAME, 1427 .name = KBUILD_MODNAME,
1391 .owner = THIS_MODULE, 1428 .owner = THIS_MODULE,
1429 .of_match_table = at91_can_dt_ids,
1392 }, 1430 },
1393 .id_table = at91_can_id_table, 1431 .id_table = at91_can_id_table,
1394}; 1432};
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 6a0532176b69..d4a15e82bfc0 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -412,7 +412,7 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
412 return 0; 412 return 0;
413} 413}
414 414
415irqreturn_t bfin_can_interrupt(int irq, void *dev_id) 415static irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
416{ 416{
417 struct net_device *dev = dev_id; 417 struct net_device *dev = dev_id;
418 struct bfin_can_priv *priv = netdev_priv(dev); 418 struct bfin_can_priv *priv = netdev_priv(dev);
@@ -504,7 +504,7 @@ static int bfin_can_close(struct net_device *dev)
504 return 0; 504 return 0;
505} 505}
506 506
507struct net_device *alloc_bfin_candev(void) 507static struct net_device *alloc_bfin_candev(void)
508{ 508{
509 struct net_device *dev; 509 struct net_device *dev;
510 struct bfin_can_priv *priv; 510 struct bfin_can_priv *priv;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 9aa0c64c33c8..8cda23bf0614 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -269,7 +269,7 @@ struct mcp251x_priv {
269#define MCP251X_IS(_model) \ 269#define MCP251X_IS(_model) \
270static inline int mcp251x_is_##_model(struct spi_device *spi) \ 270static inline int mcp251x_is_##_model(struct spi_device *spi) \
271{ \ 271{ \
272 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); \ 272 struct mcp251x_priv *priv = spi_get_drvdata(spi); \
273 return priv->model == CAN_MCP251X_MCP##_model; \ 273 return priv->model == CAN_MCP251X_MCP##_model; \
274} 274}
275 275
@@ -305,7 +305,7 @@ static void mcp251x_clean(struct net_device *net)
305 */ 305 */
306static int mcp251x_spi_trans(struct spi_device *spi, int len) 306static int mcp251x_spi_trans(struct spi_device *spi, int len)
307{ 307{
308 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 308 struct mcp251x_priv *priv = spi_get_drvdata(spi);
309 struct spi_transfer t = { 309 struct spi_transfer t = {
310 .tx_buf = priv->spi_tx_buf, 310 .tx_buf = priv->spi_tx_buf,
311 .rx_buf = priv->spi_rx_buf, 311 .rx_buf = priv->spi_rx_buf,
@@ -333,7 +333,7 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len)
333 333
334static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg) 334static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
335{ 335{
336 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 336 struct mcp251x_priv *priv = spi_get_drvdata(spi);
337 u8 val = 0; 337 u8 val = 0;
338 338
339 priv->spi_tx_buf[0] = INSTRUCTION_READ; 339 priv->spi_tx_buf[0] = INSTRUCTION_READ;
@@ -348,7 +348,7 @@ static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
348static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg, 348static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg,
349 uint8_t *v1, uint8_t *v2) 349 uint8_t *v1, uint8_t *v2)
350{ 350{
351 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 351 struct mcp251x_priv *priv = spi_get_drvdata(spi);
352 352
353 priv->spi_tx_buf[0] = INSTRUCTION_READ; 353 priv->spi_tx_buf[0] = INSTRUCTION_READ;
354 priv->spi_tx_buf[1] = reg; 354 priv->spi_tx_buf[1] = reg;
@@ -361,7 +361,7 @@ static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg,
361 361
362static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val) 362static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
363{ 363{
364 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 364 struct mcp251x_priv *priv = spi_get_drvdata(spi);
365 365
366 priv->spi_tx_buf[0] = INSTRUCTION_WRITE; 366 priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
367 priv->spi_tx_buf[1] = reg; 367 priv->spi_tx_buf[1] = reg;
@@ -373,7 +373,7 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
373static void mcp251x_write_bits(struct spi_device *spi, u8 reg, 373static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
374 u8 mask, uint8_t val) 374 u8 mask, uint8_t val)
375{ 375{
376 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 376 struct mcp251x_priv *priv = spi_get_drvdata(spi);
377 377
378 priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY; 378 priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
379 priv->spi_tx_buf[1] = reg; 379 priv->spi_tx_buf[1] = reg;
@@ -386,7 +386,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
386static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, 386static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
387 int len, int tx_buf_idx) 387 int len, int tx_buf_idx)
388{ 388{
389 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 389 struct mcp251x_priv *priv = spi_get_drvdata(spi);
390 390
391 if (mcp251x_is_2510(spi)) { 391 if (mcp251x_is_2510(spi)) {
392 int i; 392 int i;
@@ -403,7 +403,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
403static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, 403static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
404 int tx_buf_idx) 404 int tx_buf_idx)
405{ 405{
406 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 406 struct mcp251x_priv *priv = spi_get_drvdata(spi);
407 u32 sid, eid, exide, rtr; 407 u32 sid, eid, exide, rtr;
408 u8 buf[SPI_TRANSFER_BUF_LEN]; 408 u8 buf[SPI_TRANSFER_BUF_LEN];
409 409
@@ -434,7 +434,7 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
434static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, 434static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
435 int buf_idx) 435 int buf_idx)
436{ 436{
437 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 437 struct mcp251x_priv *priv = spi_get_drvdata(spi);
438 438
439 if (mcp251x_is_2510(spi)) { 439 if (mcp251x_is_2510(spi)) {
440 int i, len; 440 int i, len;
@@ -454,7 +454,7 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
454 454
455static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx) 455static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
456{ 456{
457 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 457 struct mcp251x_priv *priv = spi_get_drvdata(spi);
458 struct sk_buff *skb; 458 struct sk_buff *skb;
459 struct can_frame *frame; 459 struct can_frame *frame;
460 u8 buf[SPI_TRANSFER_BUF_LEN]; 460 u8 buf[SPI_TRANSFER_BUF_LEN];
@@ -550,7 +550,7 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
550 550
551static int mcp251x_set_normal_mode(struct spi_device *spi) 551static int mcp251x_set_normal_mode(struct spi_device *spi)
552{ 552{
553 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 553 struct mcp251x_priv *priv = spi_get_drvdata(spi);
554 unsigned long timeout; 554 unsigned long timeout;
555 555
556 /* Enable interrupts */ 556 /* Enable interrupts */
@@ -620,7 +620,7 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
620 620
621static int mcp251x_hw_reset(struct spi_device *spi) 621static int mcp251x_hw_reset(struct spi_device *spi)
622{ 622{
623 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 623 struct mcp251x_priv *priv = spi_get_drvdata(spi);
624 int ret; 624 int ret;
625 unsigned long timeout; 625 unsigned long timeout;
626 626
@@ -1026,7 +1026,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
1026 CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY; 1026 CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
1027 priv->model = spi_get_device_id(spi)->driver_data; 1027 priv->model = spi_get_device_id(spi)->driver_data;
1028 priv->net = net; 1028 priv->net = net;
1029 dev_set_drvdata(&spi->dev, priv); 1029 spi_set_drvdata(spi, priv);
1030 1030
1031 priv->spi = spi; 1031 priv->spi = spi;
1032 mutex_init(&priv->mcp_lock); 1032 mutex_init(&priv->mcp_lock);
@@ -1124,7 +1124,7 @@ error_out:
1124static int mcp251x_can_remove(struct spi_device *spi) 1124static int mcp251x_can_remove(struct spi_device *spi)
1125{ 1125{
1126 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 1126 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1127 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 1127 struct mcp251x_priv *priv = spi_get_drvdata(spi);
1128 struct net_device *net = priv->net; 1128 struct net_device *net = priv->net;
1129 1129
1130 unregister_candev(net); 1130 unregister_candev(net);
@@ -1144,11 +1144,13 @@ static int mcp251x_can_remove(struct spi_device *spi)
1144 return 0; 1144 return 0;
1145} 1145}
1146 1146
1147#ifdef CONFIG_PM 1147#ifdef CONFIG_PM_SLEEP
1148static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state) 1148
1149static int mcp251x_can_suspend(struct device *dev)
1149{ 1150{
1151 struct spi_device *spi = to_spi_device(dev);
1150 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 1152 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1151 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 1153 struct mcp251x_priv *priv = spi_get_drvdata(spi);
1152 struct net_device *net = priv->net; 1154 struct net_device *net = priv->net;
1153 1155
1154 priv->force_quit = 1; 1156 priv->force_quit = 1;
@@ -1176,10 +1178,11 @@ static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
1176 return 0; 1178 return 0;
1177} 1179}
1178 1180
1179static int mcp251x_can_resume(struct spi_device *spi) 1181static int mcp251x_can_resume(struct device *dev)
1180{ 1182{
1183 struct spi_device *spi = to_spi_device(dev);
1181 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 1184 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1182 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 1185 struct mcp251x_priv *priv = spi_get_drvdata(spi);
1183 1186
1184 if (priv->after_suspend & AFTER_SUSPEND_POWER) { 1187 if (priv->after_suspend & AFTER_SUSPEND_POWER) {
1185 pdata->power_enable(1); 1188 pdata->power_enable(1);
@@ -1197,11 +1200,11 @@ static int mcp251x_can_resume(struct spi_device *spi)
1197 enable_irq(spi->irq); 1200 enable_irq(spi->irq);
1198 return 0; 1201 return 0;
1199} 1202}
1200#else
1201#define mcp251x_can_suspend NULL
1202#define mcp251x_can_resume NULL
1203#endif 1203#endif
1204 1204
1205static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
1206 mcp251x_can_resume);
1207
1205static const struct spi_device_id mcp251x_id_table[] = { 1208static const struct spi_device_id mcp251x_id_table[] = {
1206 { "mcp2510", CAN_MCP251X_MCP2510 }, 1209 { "mcp2510", CAN_MCP251X_MCP2510 },
1207 { "mcp2515", CAN_MCP251X_MCP2515 }, 1210 { "mcp2515", CAN_MCP251X_MCP2515 },
@@ -1213,29 +1216,15 @@ MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
1213static struct spi_driver mcp251x_can_driver = { 1216static struct spi_driver mcp251x_can_driver = {
1214 .driver = { 1217 .driver = {
1215 .name = DEVICE_NAME, 1218 .name = DEVICE_NAME,
1216 .bus = &spi_bus_type,
1217 .owner = THIS_MODULE, 1219 .owner = THIS_MODULE,
1220 .pm = &mcp251x_can_pm_ops,
1218 }, 1221 },
1219 1222
1220 .id_table = mcp251x_id_table, 1223 .id_table = mcp251x_id_table,
1221 .probe = mcp251x_can_probe, 1224 .probe = mcp251x_can_probe,
1222 .remove = mcp251x_can_remove, 1225 .remove = mcp251x_can_remove,
1223 .suspend = mcp251x_can_suspend,
1224 .resume = mcp251x_can_resume,
1225}; 1226};
1226 1227module_spi_driver(mcp251x_can_driver);
1227static int __init mcp251x_can_init(void)
1228{
1229 return spi_register_driver(&mcp251x_can_driver);
1230}
1231
1232static void __exit mcp251x_can_exit(void)
1233{
1234 spi_unregister_driver(&mcp251x_can_driver);
1235}
1236
1237module_init(mcp251x_can_init);
1238module_exit(mcp251x_can_exit);
1239 1228
1240MODULE_AUTHOR("Chris Elston <celston@katalix.com>, " 1229MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
1241 "Christian Pellegrin <chripell@evolware.org>"); 1230 "Christian Pellegrin <chripell@evolware.org>");
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 36d298da2af6..3752342a678a 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -168,12 +168,12 @@ static inline int ems_pci_check_chan(const struct sja1000_priv *priv)
168 unsigned char res; 168 unsigned char res;
169 169
170 /* Make sure SJA1000 is in reset mode */ 170 /* Make sure SJA1000 is in reset mode */
171 priv->write_reg(priv, REG_MOD, 1); 171 priv->write_reg(priv, SJA1000_MOD, 1);
172 172
173 priv->write_reg(priv, REG_CDR, CDR_PELICAN); 173 priv->write_reg(priv, SJA1000_CDR, CDR_PELICAN);
174 174
175 /* read reset-values */ 175 /* read reset-values */
176 res = priv->read_reg(priv, REG_CDR); 176 res = priv->read_reg(priv, SJA1000_CDR);
177 177
178 if (res == CDR_PELICAN) 178 if (res == CDR_PELICAN)
179 return 1; 179 return 1;
diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c
index 321c27e1c7fc..9e535f2ef52b 100644
--- a/drivers/net/can/sja1000/ems_pcmcia.c
+++ b/drivers/net/can/sja1000/ems_pcmcia.c
@@ -126,11 +126,11 @@ static irqreturn_t ems_pcmcia_interrupt(int irq, void *dev_id)
126static inline int ems_pcmcia_check_chan(struct sja1000_priv *priv) 126static inline int ems_pcmcia_check_chan(struct sja1000_priv *priv)
127{ 127{
128 /* Make sure SJA1000 is in reset mode */ 128 /* Make sure SJA1000 is in reset mode */
129 ems_pcmcia_write_reg(priv, REG_MOD, 1); 129 ems_pcmcia_write_reg(priv, SJA1000_MOD, 1);
130 ems_pcmcia_write_reg(priv, REG_CDR, CDR_PELICAN); 130 ems_pcmcia_write_reg(priv, SJA1000_CDR, CDR_PELICAN);
131 131
132 /* read reset-values */ 132 /* read reset-values */
133 if (ems_pcmcia_read_reg(priv, REG_CDR) == CDR_PELICAN) 133 if (ems_pcmcia_read_reg(priv, SJA1000_CDR) == CDR_PELICAN)
134 return 1; 134 return 1;
135 135
136 return 0; 136 return 0;
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 37b0381f532e..217585b97cd3 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -159,9 +159,9 @@ static int number_of_sja1000_chip(void __iomem *base_addr)
159 for (i = 0; i < MAX_NO_OF_CHANNELS; i++) { 159 for (i = 0; i < MAX_NO_OF_CHANNELS; i++) {
160 /* reset chip */ 160 /* reset chip */
161 iowrite8(MOD_RM, base_addr + 161 iowrite8(MOD_RM, base_addr +
162 (i * KVASER_PCI_PORT_BYTES) + REG_MOD); 162 (i * KVASER_PCI_PORT_BYTES) + SJA1000_MOD);
163 status = ioread8(base_addr + 163 status = ioread8(base_addr +
164 (i * KVASER_PCI_PORT_BYTES) + REG_MOD); 164 (i * KVASER_PCI_PORT_BYTES) + SJA1000_MOD);
165 /* check reset bit */ 165 /* check reset bit */
166 if (!(status & MOD_RM)) 166 if (!(status & MOD_RM))
167 break; 167 break;
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index d1e7f1006ddd..6b6f0ad75090 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -402,7 +402,7 @@ static void peak_pciec_write_reg(const struct sja1000_priv *priv,
402 int c = (priv->reg_base - card->reg_base) / PEAK_PCI_CHAN_SIZE; 402 int c = (priv->reg_base - card->reg_base) / PEAK_PCI_CHAN_SIZE;
403 403
404 /* sja1000 register changes control the leds state */ 404 /* sja1000 register changes control the leds state */
405 if (port == REG_MOD) 405 if (port == SJA1000_MOD)
406 switch (val) { 406 switch (val) {
407 case MOD_RM: 407 case MOD_RM:
408 /* Reset Mode: set led on */ 408 /* Reset Mode: set led on */
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 0a707f70661c..f7ad754dd2aa 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -196,7 +196,7 @@ static void pcan_write_canreg(const struct sja1000_priv *priv, int port, u8 v)
196 int c = (priv->reg_base - card->ioport_addr) / PCC_CHAN_SIZE; 196 int c = (priv->reg_base - card->ioport_addr) / PCC_CHAN_SIZE;
197 197
198 /* sja1000 register changes control the leds state */ 198 /* sja1000 register changes control the leds state */
199 if (port == REG_MOD) 199 if (port == SJA1000_MOD)
200 switch (v) { 200 switch (v) {
201 case MOD_RM: 201 case MOD_RM:
202 /* Reset Mode: set led on */ 202 /* Reset Mode: set led on */
@@ -509,11 +509,11 @@ static void pcan_free_channels(struct pcan_pccard *card)
509static inline int pcan_channel_present(struct sja1000_priv *priv) 509static inline int pcan_channel_present(struct sja1000_priv *priv)
510{ 510{
511 /* make sure SJA1000 is in reset mode */ 511 /* make sure SJA1000 is in reset mode */
512 pcan_write_canreg(priv, REG_MOD, 1); 512 pcan_write_canreg(priv, SJA1000_MOD, 1);
513 pcan_write_canreg(priv, REG_CDR, CDR_PELICAN); 513 pcan_write_canreg(priv, SJA1000_CDR, CDR_PELICAN);
514 514
515 /* read reset-values */ 515 /* read reset-values */
516 if (pcan_read_canreg(priv, REG_CDR) == CDR_PELICAN) 516 if (pcan_read_canreg(priv, SJA1000_CDR) == CDR_PELICAN)
517 return 1; 517 return 1;
518 518
519 return 0; 519 return 0;
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 3c18d7d000ed..c52c1e96bf90 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -348,20 +348,20 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
348 */ 348 */
349 if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) == 349 if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
350 REG_CR_BASICCAN_INITIAL && 350 REG_CR_BASICCAN_INITIAL &&
351 (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) && 351 (priv->read_reg(priv, SJA1000_SR) == REG_SR_BASICCAN_INITIAL) &&
352 (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL)) 352 (priv->read_reg(priv, SJA1000_IR) == REG_IR_BASICCAN_INITIAL))
353 flag = 1; 353 flag = 1;
354 354
355 /* Bring the SJA1000 into the PeliCAN mode*/ 355 /* Bring the SJA1000 into the PeliCAN mode*/
356 priv->write_reg(priv, REG_CDR, CDR_PELICAN); 356 priv->write_reg(priv, SJA1000_CDR, CDR_PELICAN);
357 357
358 /* 358 /*
359 * Check registers after reset in the PeliCAN mode. 359 * Check registers after reset in the PeliCAN mode.
360 * See states on p. 23 of the Datasheet. 360 * See states on p. 23 of the Datasheet.
361 */ 361 */
362 if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL && 362 if (priv->read_reg(priv, SJA1000_MOD) == REG_MOD_PELICAN_INITIAL &&
363 priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL && 363 priv->read_reg(priv, SJA1000_SR) == REG_SR_PELICAN_INITIAL &&
364 priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL) 364 priv->read_reg(priv, SJA1000_IR) == REG_IR_PELICAN_INITIAL)
365 return flag; 365 return flag;
366 366
367 return 0; 367 return 0;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index e4df307eaa90..7164a999f50f 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -91,14 +91,14 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
91 * the write_reg() operation - especially on SMP systems. 91 * the write_reg() operation - especially on SMP systems.
92 */ 92 */
93 spin_lock_irqsave(&priv->cmdreg_lock, flags); 93 spin_lock_irqsave(&priv->cmdreg_lock, flags);
94 priv->write_reg(priv, REG_CMR, val); 94 priv->write_reg(priv, SJA1000_CMR, val);
95 priv->read_reg(priv, SJA1000_REG_SR); 95 priv->read_reg(priv, SJA1000_SR);
96 spin_unlock_irqrestore(&priv->cmdreg_lock, flags); 96 spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
97} 97}
98 98
99static int sja1000_is_absent(struct sja1000_priv *priv) 99static int sja1000_is_absent(struct sja1000_priv *priv)
100{ 100{
101 return (priv->read_reg(priv, REG_MOD) == 0xFF); 101 return (priv->read_reg(priv, SJA1000_MOD) == 0xFF);
102} 102}
103 103
104static int sja1000_probe_chip(struct net_device *dev) 104static int sja1000_probe_chip(struct net_device *dev)
@@ -116,11 +116,11 @@ static int sja1000_probe_chip(struct net_device *dev)
116static void set_reset_mode(struct net_device *dev) 116static void set_reset_mode(struct net_device *dev)
117{ 117{
118 struct sja1000_priv *priv = netdev_priv(dev); 118 struct sja1000_priv *priv = netdev_priv(dev);
119 unsigned char status = priv->read_reg(priv, REG_MOD); 119 unsigned char status = priv->read_reg(priv, SJA1000_MOD);
120 int i; 120 int i;
121 121
122 /* disable interrupts */ 122 /* disable interrupts */
123 priv->write_reg(priv, REG_IER, IRQ_OFF); 123 priv->write_reg(priv, SJA1000_IER, IRQ_OFF);
124 124
125 for (i = 0; i < 100; i++) { 125 for (i = 0; i < 100; i++) {
126 /* check reset bit */ 126 /* check reset bit */
@@ -129,9 +129,10 @@ static void set_reset_mode(struct net_device *dev)
129 return; 129 return;
130 } 130 }
131 131
132 priv->write_reg(priv, REG_MOD, MOD_RM); /* reset chip */ 132 /* reset chip */
133 priv->write_reg(priv, SJA1000_MOD, MOD_RM);
133 udelay(10); 134 udelay(10);
134 status = priv->read_reg(priv, REG_MOD); 135 status = priv->read_reg(priv, SJA1000_MOD);
135 } 136 }
136 137
137 netdev_err(dev, "setting SJA1000 into reset mode failed!\n"); 138 netdev_err(dev, "setting SJA1000 into reset mode failed!\n");
@@ -140,7 +141,7 @@ static void set_reset_mode(struct net_device *dev)
140static void set_normal_mode(struct net_device *dev) 141static void set_normal_mode(struct net_device *dev)
141{ 142{
142 struct sja1000_priv *priv = netdev_priv(dev); 143 struct sja1000_priv *priv = netdev_priv(dev);
143 unsigned char status = priv->read_reg(priv, REG_MOD); 144 unsigned char status = priv->read_reg(priv, SJA1000_MOD);
144 int i; 145 int i;
145 146
146 for (i = 0; i < 100; i++) { 147 for (i = 0; i < 100; i++) {
@@ -149,22 +150,22 @@ static void set_normal_mode(struct net_device *dev)
149 priv->can.state = CAN_STATE_ERROR_ACTIVE; 150 priv->can.state = CAN_STATE_ERROR_ACTIVE;
150 /* enable interrupts */ 151 /* enable interrupts */
151 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 152 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
152 priv->write_reg(priv, REG_IER, IRQ_ALL); 153 priv->write_reg(priv, SJA1000_IER, IRQ_ALL);
153 else 154 else
154 priv->write_reg(priv, REG_IER, 155 priv->write_reg(priv, SJA1000_IER,
155 IRQ_ALL & ~IRQ_BEI); 156 IRQ_ALL & ~IRQ_BEI);
156 return; 157 return;
157 } 158 }
158 159
159 /* set chip to normal mode */ 160 /* set chip to normal mode */
160 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 161 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
161 priv->write_reg(priv, REG_MOD, MOD_LOM); 162 priv->write_reg(priv, SJA1000_MOD, MOD_LOM);
162 else 163 else
163 priv->write_reg(priv, REG_MOD, 0x00); 164 priv->write_reg(priv, SJA1000_MOD, 0x00);
164 165
165 udelay(10); 166 udelay(10);
166 167
167 status = priv->read_reg(priv, REG_MOD); 168 status = priv->read_reg(priv, SJA1000_MOD);
168 } 169 }
169 170
170 netdev_err(dev, "setting SJA1000 into normal mode failed!\n"); 171 netdev_err(dev, "setting SJA1000 into normal mode failed!\n");
@@ -179,9 +180,9 @@ static void sja1000_start(struct net_device *dev)
179 set_reset_mode(dev); 180 set_reset_mode(dev);
180 181
181 /* Clear error counters and error code capture */ 182 /* Clear error counters and error code capture */
182 priv->write_reg(priv, REG_TXERR, 0x0); 183 priv->write_reg(priv, SJA1000_TXERR, 0x0);
183 priv->write_reg(priv, REG_RXERR, 0x0); 184 priv->write_reg(priv, SJA1000_RXERR, 0x0);
184 priv->read_reg(priv, REG_ECC); 185 priv->read_reg(priv, SJA1000_ECC);
185 186
186 /* leave reset mode */ 187 /* leave reset mode */
187 set_normal_mode(dev); 188 set_normal_mode(dev);
@@ -217,8 +218,8 @@ static int sja1000_set_bittiming(struct net_device *dev)
217 218
218 netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1); 219 netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
219 220
220 priv->write_reg(priv, REG_BTR0, btr0); 221 priv->write_reg(priv, SJA1000_BTR0, btr0);
221 priv->write_reg(priv, REG_BTR1, btr1); 222 priv->write_reg(priv, SJA1000_BTR1, btr1);
222 223
223 return 0; 224 return 0;
224} 225}
@@ -228,8 +229,8 @@ static int sja1000_get_berr_counter(const struct net_device *dev,
228{ 229{
229 struct sja1000_priv *priv = netdev_priv(dev); 230 struct sja1000_priv *priv = netdev_priv(dev);
230 231
231 bec->txerr = priv->read_reg(priv, REG_TXERR); 232 bec->txerr = priv->read_reg(priv, SJA1000_TXERR);
232 bec->rxerr = priv->read_reg(priv, REG_RXERR); 233 bec->rxerr = priv->read_reg(priv, SJA1000_RXERR);
233 234
234 return 0; 235 return 0;
235} 236}
@@ -247,20 +248,20 @@ static void chipset_init(struct net_device *dev)
247 struct sja1000_priv *priv = netdev_priv(dev); 248 struct sja1000_priv *priv = netdev_priv(dev);
248 249
249 /* set clock divider and output control register */ 250 /* set clock divider and output control register */
250 priv->write_reg(priv, REG_CDR, priv->cdr | CDR_PELICAN); 251 priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
251 252
252 /* set acceptance filter (accept all) */ 253 /* set acceptance filter (accept all) */
253 priv->write_reg(priv, REG_ACCC0, 0x00); 254 priv->write_reg(priv, SJA1000_ACCC0, 0x00);
254 priv->write_reg(priv, REG_ACCC1, 0x00); 255 priv->write_reg(priv, SJA1000_ACCC1, 0x00);
255 priv->write_reg(priv, REG_ACCC2, 0x00); 256 priv->write_reg(priv, SJA1000_ACCC2, 0x00);
256 priv->write_reg(priv, REG_ACCC3, 0x00); 257 priv->write_reg(priv, SJA1000_ACCC3, 0x00);
257 258
258 priv->write_reg(priv, REG_ACCM0, 0xFF); 259 priv->write_reg(priv, SJA1000_ACCM0, 0xFF);
259 priv->write_reg(priv, REG_ACCM1, 0xFF); 260 priv->write_reg(priv, SJA1000_ACCM1, 0xFF);
260 priv->write_reg(priv, REG_ACCM2, 0xFF); 261 priv->write_reg(priv, SJA1000_ACCM2, 0xFF);
261 priv->write_reg(priv, REG_ACCM3, 0xFF); 262 priv->write_reg(priv, SJA1000_ACCM3, 0xFF);
262 263
263 priv->write_reg(priv, REG_OCR, priv->ocr | OCR_MODE_NORMAL); 264 priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL);
264} 265}
265 266
266/* 267/*
@@ -289,21 +290,21 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
289 id = cf->can_id; 290 id = cf->can_id;
290 291
291 if (id & CAN_RTR_FLAG) 292 if (id & CAN_RTR_FLAG)
292 fi |= FI_RTR; 293 fi |= SJA1000_FI_RTR;
293 294
294 if (id & CAN_EFF_FLAG) { 295 if (id & CAN_EFF_FLAG) {
295 fi |= FI_FF; 296 fi |= SJA1000_FI_FF;
296 dreg = EFF_BUF; 297 dreg = SJA1000_EFF_BUF;
297 priv->write_reg(priv, REG_FI, fi); 298 priv->write_reg(priv, SJA1000_FI, fi);
298 priv->write_reg(priv, REG_ID1, (id & 0x1fe00000) >> (5 + 16)); 299 priv->write_reg(priv, SJA1000_ID1, (id & 0x1fe00000) >> 21);
299 priv->write_reg(priv, REG_ID2, (id & 0x001fe000) >> (5 + 8)); 300 priv->write_reg(priv, SJA1000_ID2, (id & 0x001fe000) >> 13);
300 priv->write_reg(priv, REG_ID3, (id & 0x00001fe0) >> 5); 301 priv->write_reg(priv, SJA1000_ID3, (id & 0x00001fe0) >> 5);
301 priv->write_reg(priv, REG_ID4, (id & 0x0000001f) << 3); 302 priv->write_reg(priv, SJA1000_ID4, (id & 0x0000001f) << 3);
302 } else { 303 } else {
303 dreg = SFF_BUF; 304 dreg = SJA1000_SFF_BUF;
304 priv->write_reg(priv, REG_FI, fi); 305 priv->write_reg(priv, SJA1000_FI, fi);
305 priv->write_reg(priv, REG_ID1, (id & 0x000007f8) >> 3); 306 priv->write_reg(priv, SJA1000_ID1, (id & 0x000007f8) >> 3);
306 priv->write_reg(priv, REG_ID2, (id & 0x00000007) << 5); 307 priv->write_reg(priv, SJA1000_ID2, (id & 0x00000007) << 5);
307 } 308 }
308 309
309 for (i = 0; i < dlc; i++) 310 for (i = 0; i < dlc; i++)
@@ -335,25 +336,25 @@ static void sja1000_rx(struct net_device *dev)
335 if (skb == NULL) 336 if (skb == NULL)
336 return; 337 return;
337 338
338 fi = priv->read_reg(priv, REG_FI); 339 fi = priv->read_reg(priv, SJA1000_FI);
339 340
340 if (fi & FI_FF) { 341 if (fi & SJA1000_FI_FF) {
341 /* extended frame format (EFF) */ 342 /* extended frame format (EFF) */
342 dreg = EFF_BUF; 343 dreg = SJA1000_EFF_BUF;
343 id = (priv->read_reg(priv, REG_ID1) << (5 + 16)) 344 id = (priv->read_reg(priv, SJA1000_ID1) << 21)
344 | (priv->read_reg(priv, REG_ID2) << (5 + 8)) 345 | (priv->read_reg(priv, SJA1000_ID2) << 13)
345 | (priv->read_reg(priv, REG_ID3) << 5) 346 | (priv->read_reg(priv, SJA1000_ID3) << 5)
346 | (priv->read_reg(priv, REG_ID4) >> 3); 347 | (priv->read_reg(priv, SJA1000_ID4) >> 3);
347 id |= CAN_EFF_FLAG; 348 id |= CAN_EFF_FLAG;
348 } else { 349 } else {
349 /* standard frame format (SFF) */ 350 /* standard frame format (SFF) */
350 dreg = SFF_BUF; 351 dreg = SJA1000_SFF_BUF;
351 id = (priv->read_reg(priv, REG_ID1) << 3) 352 id = (priv->read_reg(priv, SJA1000_ID1) << 3)
352 | (priv->read_reg(priv, REG_ID2) >> 5); 353 | (priv->read_reg(priv, SJA1000_ID2) >> 5);
353 } 354 }
354 355
355 cf->can_dlc = get_can_dlc(fi & 0x0F); 356 cf->can_dlc = get_can_dlc(fi & 0x0F);
356 if (fi & FI_RTR) { 357 if (fi & SJA1000_FI_RTR) {
357 id |= CAN_RTR_FLAG; 358 id |= CAN_RTR_FLAG;
358 } else { 359 } else {
359 for (i = 0; i < cf->can_dlc; i++) 360 for (i = 0; i < cf->can_dlc; i++)
@@ -414,7 +415,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
414 priv->can.can_stats.bus_error++; 415 priv->can.can_stats.bus_error++;
415 stats->rx_errors++; 416 stats->rx_errors++;
416 417
417 ecc = priv->read_reg(priv, REG_ECC); 418 ecc = priv->read_reg(priv, SJA1000_ECC);
418 419
419 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 420 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
420 421
@@ -448,7 +449,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
448 if (isrc & IRQ_ALI) { 449 if (isrc & IRQ_ALI) {
449 /* arbitration lost interrupt */ 450 /* arbitration lost interrupt */
450 netdev_dbg(dev, "arbitration lost interrupt\n"); 451 netdev_dbg(dev, "arbitration lost interrupt\n");
451 alc = priv->read_reg(priv, REG_ALC); 452 alc = priv->read_reg(priv, SJA1000_ALC);
452 priv->can.can_stats.arbitration_lost++; 453 priv->can.can_stats.arbitration_lost++;
453 stats->tx_errors++; 454 stats->tx_errors++;
454 cf->can_id |= CAN_ERR_LOSTARB; 455 cf->can_id |= CAN_ERR_LOSTARB;
@@ -457,8 +458,8 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
457 458
458 if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING || 459 if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
459 state == CAN_STATE_ERROR_PASSIVE)) { 460 state == CAN_STATE_ERROR_PASSIVE)) {
460 uint8_t rxerr = priv->read_reg(priv, REG_RXERR); 461 uint8_t rxerr = priv->read_reg(priv, SJA1000_RXERR);
461 uint8_t txerr = priv->read_reg(priv, REG_TXERR); 462 uint8_t txerr = priv->read_reg(priv, SJA1000_TXERR);
462 cf->can_id |= CAN_ERR_CRTL; 463 cf->can_id |= CAN_ERR_CRTL;
463 if (state == CAN_STATE_ERROR_WARNING) { 464 if (state == CAN_STATE_ERROR_WARNING) {
464 priv->can.can_stats.error_warning++; 465 priv->can.can_stats.error_warning++;
@@ -494,15 +495,16 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
494 int n = 0; 495 int n = 0;
495 496
496 /* Shared interrupts and IRQ off? */ 497 /* Shared interrupts and IRQ off? */
497 if (priv->read_reg(priv, REG_IER) == IRQ_OFF) 498 if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
498 return IRQ_NONE; 499 return IRQ_NONE;
499 500
500 if (priv->pre_irq) 501 if (priv->pre_irq)
501 priv->pre_irq(priv); 502 priv->pre_irq(priv);
502 503
503 while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) { 504 while ((isrc = priv->read_reg(priv, SJA1000_IR)) &&
505 (n < SJA1000_MAX_IRQ)) {
504 n++; 506 n++;
505 status = priv->read_reg(priv, SJA1000_REG_SR); 507 status = priv->read_reg(priv, SJA1000_SR);
506 /* check for absent controller due to hw unplug */ 508 /* check for absent controller due to hw unplug */
507 if (status == 0xFF && sja1000_is_absent(priv)) 509 if (status == 0xFF && sja1000_is_absent(priv))
508 return IRQ_NONE; 510 return IRQ_NONE;
@@ -519,7 +521,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
519 } else { 521 } else {
520 /* transmission complete */ 522 /* transmission complete */
521 stats->tx_bytes += 523 stats->tx_bytes +=
522 priv->read_reg(priv, REG_FI) & 0xf; 524 priv->read_reg(priv, SJA1000_FI) & 0xf;
523 stats->tx_packets++; 525 stats->tx_packets++;
524 can_get_echo_skb(dev, 0); 526 can_get_echo_skb(dev, 0);
525 } 527 }
@@ -530,7 +532,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
530 /* receive interrupt */ 532 /* receive interrupt */
531 while (status & SR_RBS) { 533 while (status & SR_RBS) {
532 sja1000_rx(dev); 534 sja1000_rx(dev);
533 status = priv->read_reg(priv, SJA1000_REG_SR); 535 status = priv->read_reg(priv, SJA1000_SR);
534 /* check for absent controller */ 536 /* check for absent controller */
535 if (status == 0xFF && sja1000_is_absent(priv)) 537 if (status == 0xFF && sja1000_is_absent(priv))
536 return IRQ_NONE; 538 return IRQ_NONE;
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index aa48e053da27..9d46398f8154 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -54,46 +54,46 @@
54#define SJA1000_MAX_IRQ 20 /* max. number of interrupts handled in ISR */ 54#define SJA1000_MAX_IRQ 20 /* max. number of interrupts handled in ISR */
55 55
56/* SJA1000 registers - manual section 6.4 (Pelican Mode) */ 56/* SJA1000 registers - manual section 6.4 (Pelican Mode) */
57#define REG_MOD 0x00 57#define SJA1000_MOD 0x00
58#define REG_CMR 0x01 58#define SJA1000_CMR 0x01
59#define SJA1000_REG_SR 0x02 59#define SJA1000_SR 0x02
60#define REG_IR 0x03 60#define SJA1000_IR 0x03
61#define REG_IER 0x04 61#define SJA1000_IER 0x04
62#define REG_ALC 0x0B 62#define SJA1000_ALC 0x0B
63#define REG_ECC 0x0C 63#define SJA1000_ECC 0x0C
64#define REG_EWL 0x0D 64#define SJA1000_EWL 0x0D
65#define REG_RXERR 0x0E 65#define SJA1000_RXERR 0x0E
66#define REG_TXERR 0x0F 66#define SJA1000_TXERR 0x0F
67#define REG_ACCC0 0x10 67#define SJA1000_ACCC0 0x10
68#define REG_ACCC1 0x11 68#define SJA1000_ACCC1 0x11
69#define REG_ACCC2 0x12 69#define SJA1000_ACCC2 0x12
70#define REG_ACCC3 0x13 70#define SJA1000_ACCC3 0x13
71#define REG_ACCM0 0x14 71#define SJA1000_ACCM0 0x14
72#define REG_ACCM1 0x15 72#define SJA1000_ACCM1 0x15
73#define REG_ACCM2 0x16 73#define SJA1000_ACCM2 0x16
74#define REG_ACCM3 0x17 74#define SJA1000_ACCM3 0x17
75#define REG_RMC 0x1D 75#define SJA1000_RMC 0x1D
76#define REG_RBSA 0x1E 76#define SJA1000_RBSA 0x1E
77 77
78/* Common registers - manual section 6.5 */ 78/* Common registers - manual section 6.5 */
79#define REG_BTR0 0x06 79#define SJA1000_BTR0 0x06
80#define REG_BTR1 0x07 80#define SJA1000_BTR1 0x07
81#define REG_OCR 0x08 81#define SJA1000_OCR 0x08
82#define REG_CDR 0x1F 82#define SJA1000_CDR 0x1F
83 83
84#define REG_FI 0x10 84#define SJA1000_FI 0x10
85#define SFF_BUF 0x13 85#define SJA1000_SFF_BUF 0x13
86#define EFF_BUF 0x15 86#define SJA1000_EFF_BUF 0x15
87 87
88#define FI_FF 0x80 88#define SJA1000_FI_FF 0x80
89#define FI_RTR 0x40 89#define SJA1000_FI_RTR 0x40
90 90
91#define REG_ID1 0x11 91#define SJA1000_ID1 0x11
92#define REG_ID2 0x12 92#define SJA1000_ID2 0x12
93#define REG_ID3 0x13 93#define SJA1000_ID3 0x13
94#define REG_ID4 0x14 94#define SJA1000_ID4 0x14
95 95
96#define CAN_RAM 0x20 96#define SJA1000_CAN_RAM 0x20
97 97
98/* mode register */ 98/* mode register */
99#define MOD_RM 0x01 99#define MOD_RM 0x01
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index f36ff99fd394..adb4bf5eb4b4 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -306,6 +306,7 @@ static int el3_isa_match(struct device *pdev, unsigned int ndev)
306 if (!dev) 306 if (!dev)
307 return -ENOMEM; 307 return -ENOMEM;
308 308
309 SET_NETDEV_DEV(dev, pdev);
309 netdev_boot_setup_check(dev); 310 netdev_boot_setup_check(dev);
310 311
311 if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) { 312 if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) {
@@ -595,6 +596,7 @@ static int __init el3_eisa_probe (struct device *device)
595 return -ENOMEM; 596 return -ENOMEM;
596 } 597 }
597 598
599 SET_NETDEV_DEV(dev, device);
598 netdev_boot_setup_check(dev); 600 netdev_boot_setup_check(dev);
599 601
600 el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA); 602 el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA);
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 27aaaf99e73e..144942f6372b 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -1690,7 +1690,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
1690 skb_checksum_none_assert(new_skb); 1690 skb_checksum_none_assert(new_skb);
1691 1691
1692 if (rx->rxStatus & TYPHOON_RX_VLAN) 1692 if (rx->rxStatus & TYPHOON_RX_VLAN)
1693 __vlan_hwaccel_put_tag(new_skb, 1693 __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q),
1694 ntohl(rx->vlanTag) & 0xffff); 1694 ntohl(rx->vlanTag) & 0xffff);
1695 netif_receive_skb(new_skb); 1695 netif_receive_skb(new_skb);
1696 1696
@@ -2445,9 +2445,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2445 * settings -- so we only allow the user to toggle the TX processing. 2445 * settings -- so we only allow the user to toggle the TX processing.
2446 */ 2446 */
2447 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 2447 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2448 NETIF_F_HW_VLAN_TX; 2448 NETIF_F_HW_VLAN_CTAG_TX;
2449 dev->features = dev->hw_features | 2449 dev->features = dev->hw_features |
2450 NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM; 2450 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
2451 2451
2452 if(register_netdev(dev) < 0) { 2452 if(register_netdev(dev) < 0) {
2453 err_msg = "unable to register netdev"; 2453 err_msg = "unable to register netdev";
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 549b77500579..8b04bfc20cfb 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -594,7 +594,8 @@ static const struct ethtool_ops ethtool_ops;
594 594
595 595
596#ifdef VLAN_SUPPORT 596#ifdef VLAN_SUPPORT
597static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 597static int netdev_vlan_rx_add_vid(struct net_device *dev,
598 __be16 proto, u16 vid)
598{ 599{
599 struct netdev_private *np = netdev_priv(dev); 600 struct netdev_private *np = netdev_priv(dev);
600 601
@@ -608,7 +609,8 @@ static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
608 return 0; 609 return 0;
609} 610}
610 611
611static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 612static int netdev_vlan_rx_kill_vid(struct net_device *dev,
613 __be16 proto, u16 vid)
612{ 614{
613 struct netdev_private *np = netdev_priv(dev); 615 struct netdev_private *np = netdev_priv(dev);
614 616
@@ -702,7 +704,7 @@ static int starfire_init_one(struct pci_dev *pdev,
702#endif /* ZEROCOPY */ 704#endif /* ZEROCOPY */
703 705
704#ifdef VLAN_SUPPORT 706#ifdef VLAN_SUPPORT
705 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; 707 dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
706#endif /* VLAN_RX_KILL_VID */ 708#endif /* VLAN_RX_KILL_VID */
707#ifdef ADDR_64BITS 709#ifdef ADDR_64BITS
708 dev->features |= NETIF_F_HIGHDMA; 710 dev->features |= NETIF_F_HIGHDMA;
@@ -1496,7 +1498,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1496 printk(KERN_DEBUG " netdev_rx() vlanid = %d\n", 1498 printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
1497 vlid); 1499 vlid);
1498 } 1500 }
1499 __vlan_hwaccel_put_tag(skb, vlid); 1501 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
1500 } 1502 }
1501#endif /* VLAN_SUPPORT */ 1503#endif /* VLAN_SUPPORT */
1502 netif_receive_skb(skb); 1504 netif_receive_skb(skb);
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index a175d0be1ae1..ee705771bd2c 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -188,10 +188,9 @@ static int desc_list_init(struct net_device *dev)
188 188
189 /* allocate a new skb for next time receive */ 189 /* allocate a new skb for next time receive */
190 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); 190 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
191 if (!new_skb) { 191 if (!new_skb)
192 pr_notice("init: low on mem - packet dropped\n");
193 goto init_error; 192 goto init_error;
194 } 193
195 skb_reserve(new_skb, NET_IP_ALIGN); 194 skb_reserve(new_skb, NET_IP_ALIGN);
196 /* Invidate the data cache of skb->data range when it is write back 195 /* Invidate the data cache of skb->data range when it is write back
197 * cache. It will prevent overwritting the new data from DMA 196 * cache. It will prevent overwritting the new data from DMA
@@ -1236,7 +1235,6 @@ static void bfin_mac_rx(struct net_device *dev)
1236 1235
1237 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); 1236 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
1238 if (!new_skb) { 1237 if (!new_skb) {
1239 netdev_notice(dev, "rx: low on mem - packet dropped\n");
1240 dev->stats.rx_dropped++; 1238 dev->stats.rx_dropped++;
1241 goto out; 1239 goto out;
1242 } 1240 }
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 0be2195e5034..269295403fc4 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1464,35 +1464,23 @@ static int greth_of_probe(struct platform_device *ofdev)
1464 } 1464 }
1465 1465
1466 /* Allocate TX descriptor ring in coherent memory */ 1466 /* Allocate TX descriptor ring in coherent memory */
1467 greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev, 1467 greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1468 1024, 1468 &greth->tx_bd_base_phys,
1469 &greth->tx_bd_base_phys, 1469 GFP_KERNEL | __GFP_ZERO);
1470 GFP_KERNEL);
1471
1472 if (!greth->tx_bd_base) { 1470 if (!greth->tx_bd_base) {
1473 if (netif_msg_probe(greth))
1474 dev_err(&dev->dev, "could not allocate descriptor memory.\n");
1475 err = -ENOMEM; 1471 err = -ENOMEM;
1476 goto error3; 1472 goto error3;
1477 } 1473 }
1478 1474
1479 memset(greth->tx_bd_base, 0, 1024);
1480
1481 /* Allocate RX descriptor ring in coherent memory */ 1475 /* Allocate RX descriptor ring in coherent memory */
1482 greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev, 1476 greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1483 1024, 1477 &greth->rx_bd_base_phys,
1484 &greth->rx_bd_base_phys, 1478 GFP_KERNEL | __GFP_ZERO);
1485 GFP_KERNEL);
1486
1487 if (!greth->rx_bd_base) { 1479 if (!greth->rx_bd_base) {
1488 if (netif_msg_probe(greth))
1489 dev_err(greth->dev, "could not allocate descriptor memory.\n");
1490 err = -ENOMEM; 1480 err = -ENOMEM;
1491 goto error4; 1481 goto error4;
1492 } 1482 }
1493 1483
1494 memset(greth->rx_bd_base, 0, 1024);
1495
1496 /* Get MAC address from: module param, OF property or ID prom */ 1484 /* Get MAC address from: module param, OF property or ID prom */
1497 for (i = 0; i < 6; i++) { 1485 for (i = 0; i < 6; i++) {
1498 if (macaddr[i] != 0) 1486 if (macaddr[i] != 0)
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index c0bc41a784ca..b7894f8af9d1 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -472,7 +472,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
472 ap->name = pci_name(pdev); 472 ap->name = pci_name(pdev);
473 473
474 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 474 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
475 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 475 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
476 476
477 dev->watchdog_timeo = 5*HZ; 477 dev->watchdog_timeo = 5*HZ;
478 478
@@ -2019,7 +2019,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
2019 2019
2020 /* send it up */ 2020 /* send it up */
2021 if ((bd_flags & BD_FLG_VLAN_TAG)) 2021 if ((bd_flags & BD_FLG_VLAN_TAG))
2022 __vlan_hwaccel_put_tag(skb, retdesc->vlan); 2022 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), retdesc->vlan);
2023 netif_rx(skb); 2023 netif_rx(skb);
2024 2024
2025 dev->stats.rx_packets++; 2025 dev->stats.rx_packets++;
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 6e722dc37db7..65926a956575 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -318,8 +318,6 @@ static int lance_rx (struct net_device *dev)
318 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); 318 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
319 319
320 if (!skb) { 320 if (!skb) {
321 printk ("%s: Memory squeeze, deferring packet.\n",
322 dev->name);
323 dev->stats.rx_dropped++; 321 dev->stats.rx_dropped++;
324 rd->mblength = 0; 322 rd->mblength = 0;
325 rd->rmd1_bits = LE_R1_OWN; 323 rd->rmd1_bits = LE_R1_OWN;
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 3789affbc0e5..0866e7627433 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -293,7 +293,6 @@ static int lance_rx(struct net_device *dev)
293 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); 293 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
294 294
295 if (!skb) { 295 if (!skb) {
296 netdev_warn(dev, "Memory squeeze, deferring packet\n");
297 dev->stats.rx_dropped++; 296 dev->stats.rx_dropped++;
298 rd->mblength = 0; 297 rd->mblength = 0;
299 rd->rmd1_bits = LE_R1_OWN; 298 rd->rmd1_bits = LE_R1_OWN;
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 60e2b701afe7..9793767996a2 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -528,7 +528,6 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
528 dev->stats.rx_packets++; 528 dev->stats.rx_packets++;
529 } else { 529 } else {
530 am_writeword (dev, hdraddr + 2, RMD_OWN); 530 am_writeword (dev, hdraddr + 2, RMD_OWN);
531 printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
532 dev->stats.rx_dropped++; 531 dev->stats.rx_dropped++;
533 break; 532 break;
534 } 533 }
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 42d4e6ad58a5..8e6b665a6726 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -793,7 +793,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
793#if AMD8111E_VLAN_TAG_USED 793#if AMD8111E_VLAN_TAG_USED
794 if (vtag == TT_VLAN_TAGGED){ 794 if (vtag == TT_VLAN_TAGGED){
795 u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); 795 u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
796 __vlan_hwaccel_put_tag(skb, vlan_tag); 796 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
797 } 797 }
798#endif 798#endif
799 netif_receive_skb(skb); 799 netif_receive_skb(skb);
@@ -1869,7 +1869,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
1869 SET_NETDEV_DEV(dev, &pdev->dev); 1869 SET_NETDEV_DEV(dev, &pdev->dev);
1870 1870
1871#if AMD8111E_VLAN_TAG_USED 1871#if AMD8111E_VLAN_TAG_USED
1872 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ; 1872 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ;
1873#endif 1873#endif
1874 1874
1875 lp = netdev_priv(dev); 1875 lp = netdev_priv(dev);
@@ -1907,7 +1907,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
1907 netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32); 1907 netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
1908 1908
1909#if AMD8111E_VLAN_TAG_USED 1909#if AMD8111E_VLAN_TAG_USED
1910 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1910 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1911#endif 1911#endif
1912 /* Probe the external PHY */ 1912 /* Probe the external PHY */
1913 amd8111e_probe_ext_phy(dev); 1913 amd8111e_probe_ext_phy(dev);
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 98f4522fd17b..c178eb4c8166 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -193,7 +193,6 @@ static int ariadne_rx(struct net_device *dev)
193 193
194 skb = netdev_alloc_skb(dev, pkt_len + 2); 194 skb = netdev_alloc_skb(dev, pkt_len + 2);
195 if (skb == NULL) { 195 if (skb == NULL) {
196 netdev_warn(dev, "Memory squeeze, deferring packet\n");
197 for (i = 0; i < RX_RING_SIZE; i++) 196 for (i = 0; i < RX_RING_SIZE; i++)
198 if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN) 197 if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
199 break; 198 break;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 84219df72f51..e8d0ef508f48 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -996,8 +996,6 @@ static int lance_rx( struct net_device *dev )
996 else { 996 else {
997 skb = netdev_alloc_skb(dev, pkt_len + 2); 997 skb = netdev_alloc_skb(dev, pkt_len + 2);
998 if (skb == NULL) { 998 if (skb == NULL) {
999 DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
1000 dev->name ));
1001 for( i = 0; i < RX_RING_SIZE; i++ ) 999 for( i = 0; i < RX_RING_SIZE; i++ )
1002 if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag & 1000 if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
1003 RMD1_OWN_CHIP) 1001 RMD1_OWN_CHIP)
@@ -1149,9 +1147,7 @@ static struct net_device *atarilance_dev;
1149static int __init atarilance_module_init(void) 1147static int __init atarilance_module_init(void)
1150{ 1148{
1151 atarilance_dev = atarilance_probe(-1); 1149 atarilance_dev = atarilance_probe(-1);
1152 if (IS_ERR(atarilance_dev)) 1150 return PTR_RET(atarilance_dev);
1153 return PTR_ERR(atarilance_dev);
1154 return 0;
1155} 1151}
1156 1152
1157static void __exit atarilance_module_exit(void) 1153static void __exit atarilance_module_exit(void)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index de774d419144..688aede742c7 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -727,7 +727,6 @@ static int au1000_rx(struct net_device *dev)
727 frmlen -= 4; /* Remove FCS */ 727 frmlen -= 4; /* Remove FCS */
728 skb = netdev_alloc_skb(dev, frmlen + 2); 728 skb = netdev_alloc_skb(dev, frmlen + 2);
729 if (skb == NULL) { 729 if (skb == NULL) {
730 netdev_err(dev, "Memory squeeze, dropping packet.\n");
731 dev->stats.rx_dropped++; 730 dev->stats.rx_dropped++;
732 continue; 731 continue;
733 } 732 }
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index baca0bd1b393..3d86ffeb4e15 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -607,8 +607,6 @@ static int lance_rx(struct net_device *dev)
607 skb = netdev_alloc_skb(dev, len + 2); 607 skb = netdev_alloc_skb(dev, len + 2);
608 608
609 if (skb == 0) { 609 if (skb == 0) {
610 printk("%s: Memory squeeze, deferring packet.\n",
611 dev->name);
612 dev->stats.rx_dropped++; 610 dev->stats.rx_dropped++;
613 *rds_ptr(rd, mblength, lp->type) = 0; 611 *rds_ptr(rd, mblength, lp->type) = 0;
614 *rds_ptr(rd, rmd1, lp->type) = 612 *rds_ptr(rd, rmd1, lp->type) =
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 9af3c307862c..a51497c9d2af 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -188,9 +188,7 @@ static struct net_device *dev_mvme147_lance;
188int __init init_module(void) 188int __init init_module(void)
189{ 189{
190 dev_mvme147_lance = mvme147lance_probe(-1); 190 dev_mvme147_lance = mvme147lance_probe(-1);
191 if (IS_ERR(dev_mvme147_lance)) 191 return PTR_RET(dev_mvme147_lance);
192 return PTR_ERR(dev_mvme147_lance);
193 return 0;
194} 192}
195 193
196void __exit cleanup_module(void) 194void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 013b65108536..26fc0ce0faa3 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -1238,7 +1238,7 @@ MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
1238int __init init_module(void) 1238int __init init_module(void)
1239{ 1239{
1240 dev_ni65 = ni65_probe(-1); 1240 dev_ni65 = ni65_probe(-1);
1241 return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0; 1241 return PTR_RET(dev_ni65);
1242} 1242}
1243 1243
1244void __exit cleanup_module(void) 1244void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 797f847edf13..ed2130727643 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1166,7 +1166,6 @@ static void pcnet32_rx_entry(struct net_device *dev,
1166 skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN); 1166 skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
1167 1167
1168 if (skb == NULL) { 1168 if (skb == NULL) {
1169 netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
1170 dev->stats.rx_dropped++; 1169 dev->stats.rx_dropped++;
1171 return; 1170 return;
1172 } 1171 }
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 74b3891b6483..4375abe61da1 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -812,9 +812,6 @@ static int lance_rx( struct net_device *dev )
812 else { 812 else {
813 skb = netdev_alloc_skb(dev, pkt_len + 2); 813 skb = netdev_alloc_skb(dev, pkt_len + 2);
814 if (skb == NULL) { 814 if (skb == NULL) {
815 DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
816 dev->name ));
817
818 dev->stats.rx_dropped++; 815 dev->stats.rx_dropped++;
819 head->msg_length = 0; 816 head->msg_length = 0;
820 head->flag |= RMD1_OWN_CHIP; 817 head->flag |= RMD1_OWN_CHIP;
@@ -943,9 +940,7 @@ static struct net_device *sun3lance_dev;
943int __init init_module(void) 940int __init init_module(void)
944{ 941{
945 sun3lance_dev = sun3lance_probe(-1); 942 sun3lance_dev = sun3lance_probe(-1);
946 if (IS_ERR(sun3lance_dev)) 943 return PTR_RET(sun3lance_dev);
947 return PTR_ERR(sun3lance_dev);
948 return 0;
949} 944}
950 945
951void __exit cleanup_module(void) 946void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 6a40290d3727..f47b780892e9 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -536,8 +536,6 @@ static void lance_rx_dvma(struct net_device *dev)
536 skb = netdev_alloc_skb(dev, len + 2); 536 skb = netdev_alloc_skb(dev, len + 2);
537 537
538 if (skb == NULL) { 538 if (skb == NULL) {
539 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
540 dev->name);
541 dev->stats.rx_dropped++; 539 dev->stats.rx_dropped++;
542 rd->mblength = 0; 540 rd->mblength = 0;
543 rd->rmd1_bits = LE_R1_OWN; 541 rd->rmd1_bits = LE_R1_OWN;
@@ -708,8 +706,6 @@ static void lance_rx_pio(struct net_device *dev)
708 skb = netdev_alloc_skb(dev, len + 2); 706 skb = netdev_alloc_skb(dev, len + 2);
709 707
710 if (skb == NULL) { 708 if (skb == NULL) {
711 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
712 dev->name);
713 dev->stats.rx_dropped++; 709 dev->stats.rx_dropped++;
714 sbus_writew(0, &rd->mblength); 710 sbus_writew(0, &rd->mblength);
715 sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); 711 sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
@@ -1377,10 +1373,9 @@ static int sparc_lance_probe_one(struct platform_device *op,
1377 dma_alloc_coherent(&op->dev, 1373 dma_alloc_coherent(&op->dev,
1378 sizeof(struct lance_init_block), 1374 sizeof(struct lance_init_block),
1379 &lp->init_block_dvma, GFP_ATOMIC); 1375 &lp->init_block_dvma, GFP_ATOMIC);
1380 if (!lp->init_block_mem) { 1376 if (!lp->init_block_mem)
1381 printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n");
1382 goto fail; 1377 goto fail;
1383 } 1378
1384 lp->pio_buffer = 0; 1379 lp->pio_buffer = 0;
1385 lp->init_ring = lance_init_ring_dvma; 1380 lp->init_ring = lance_init_ring_dvma;
1386 lp->rx = lance_rx_dvma; 1381 lp->rx = lance_rx_dvma;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index a206779c68cf..4ce8ceb62205 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -386,20 +386,16 @@ static int mace_open(struct net_device *dev)
386 /* Allocate the DMA ring buffers */ 386 /* Allocate the DMA ring buffers */
387 387
388 mp->tx_ring = dma_alloc_coherent(mp->device, 388 mp->tx_ring = dma_alloc_coherent(mp->device,
389 N_TX_RING * MACE_BUFF_SIZE, 389 N_TX_RING * MACE_BUFF_SIZE,
390 &mp->tx_ring_phys, GFP_KERNEL); 390 &mp->tx_ring_phys, GFP_KERNEL);
391 if (mp->tx_ring == NULL) { 391 if (mp->tx_ring == NULL)
392 printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
393 goto out1; 392 goto out1;
394 }
395 393
396 mp->rx_ring = dma_alloc_coherent(mp->device, 394 mp->rx_ring = dma_alloc_coherent(mp->device,
397 N_RX_RING * MACE_BUFF_SIZE, 395 N_RX_RING * MACE_BUFF_SIZE,
398 &mp->rx_ring_phys, GFP_KERNEL); 396 &mp->rx_ring_phys, GFP_KERNEL);
399 if (mp->rx_ring == NULL) { 397 if (mp->rx_ring == NULL)
400 printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
401 goto out2; 398 goto out2;
402 }
403 399
404 mace_dma_off(dev); 400 mace_dma_off(dev);
405 401
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 1f07fc633ab9..0ba900762b13 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -417,7 +417,7 @@ static void atl1c_set_multi(struct net_device *netdev)
417 417
418static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) 418static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
419{ 419{
420 if (features & NETIF_F_HW_VLAN_RX) { 420 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
421 /* enable VLAN tag insert/strip */ 421 /* enable VLAN tag insert/strip */
422 *mac_ctrl_data |= MAC_CTRL_RMV_VLAN; 422 *mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
423 } else { 423 } else {
@@ -494,10 +494,10 @@ static netdev_features_t atl1c_fix_features(struct net_device *netdev,
494 * Since there is no support for separate rx/tx vlan accel 494 * Since there is no support for separate rx/tx vlan accel
495 * enable/disable make sure tx flag is always in same state as rx. 495 * enable/disable make sure tx flag is always in same state as rx.
496 */ 496 */
497 if (features & NETIF_F_HW_VLAN_RX) 497 if (features & NETIF_F_HW_VLAN_CTAG_RX)
498 features |= NETIF_F_HW_VLAN_TX; 498 features |= NETIF_F_HW_VLAN_CTAG_TX;
499 else 499 else
500 features &= ~NETIF_F_HW_VLAN_TX; 500 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
501 501
502 if (netdev->mtu > MAX_TSO_FRAME_SIZE) 502 if (netdev->mtu > MAX_TSO_FRAME_SIZE)
503 features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 503 features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
@@ -510,7 +510,7 @@ static int atl1c_set_features(struct net_device *netdev,
510{ 510{
511 netdev_features_t changed = netdev->features ^ features; 511 netdev_features_t changed = netdev->features ^ features;
512 512
513 if (changed & NETIF_F_HW_VLAN_RX) 513 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
514 atl1c_vlan_mode(netdev, features); 514 atl1c_vlan_mode(netdev, features);
515 515
516 return 0; 516 return 0;
@@ -1809,7 +1809,7 @@ rrs_checked:
1809 1809
1810 AT_TAG_TO_VLAN(rrs->vlan_tag, vlan); 1810 AT_TAG_TO_VLAN(rrs->vlan_tag, vlan);
1811 vlan = le16_to_cpu(vlan); 1811 vlan = le16_to_cpu(vlan);
1812 __vlan_hwaccel_put_tag(skb, vlan); 1812 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
1813 } 1813 }
1814 netif_receive_skb(skb); 1814 netif_receive_skb(skb);
1815 1815
@@ -2475,13 +2475,13 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2475 atl1c_set_ethtool_ops(netdev); 2475 atl1c_set_ethtool_ops(netdev);
2476 2476
2477 /* TODO: add when ready */ 2477 /* TODO: add when ready */
2478 netdev->hw_features = NETIF_F_SG | 2478 netdev->hw_features = NETIF_F_SG |
2479 NETIF_F_HW_CSUM | 2479 NETIF_F_HW_CSUM |
2480 NETIF_F_HW_VLAN_RX | 2480 NETIF_F_HW_VLAN_CTAG_RX |
2481 NETIF_F_TSO | 2481 NETIF_F_TSO |
2482 NETIF_F_TSO6; 2482 NETIF_F_TSO6;
2483 netdev->features = netdev->hw_features | 2483 netdev->features = netdev->hw_features |
2484 NETIF_F_HW_VLAN_TX; 2484 NETIF_F_HW_VLAN_CTAG_TX;
2485 return 0; 2485 return 0;
2486} 2486}
2487 2487
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index ac25f05ff68f..0688bb82b442 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -315,7 +315,7 @@ static void atl1e_set_multi(struct net_device *netdev)
315 315
316static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) 316static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
317{ 317{
318 if (features & NETIF_F_HW_VLAN_RX) { 318 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
319 /* enable VLAN tag insert/strip */ 319 /* enable VLAN tag insert/strip */
320 *mac_ctrl_data |= MAC_CTRL_RMV_VLAN; 320 *mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
321 } else { 321 } else {
@@ -378,10 +378,10 @@ static netdev_features_t atl1e_fix_features(struct net_device *netdev,
378 * Since there is no support for separate rx/tx vlan accel 378 * Since there is no support for separate rx/tx vlan accel
379 * enable/disable make sure tx flag is always in same state as rx. 379 * enable/disable make sure tx flag is always in same state as rx.
380 */ 380 */
381 if (features & NETIF_F_HW_VLAN_RX) 381 if (features & NETIF_F_HW_VLAN_CTAG_RX)
382 features |= NETIF_F_HW_VLAN_TX; 382 features |= NETIF_F_HW_VLAN_CTAG_TX;
383 else 383 else
384 features &= ~NETIF_F_HW_VLAN_TX; 384 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
385 385
386 return features; 386 return features;
387} 387}
@@ -391,7 +391,7 @@ static int atl1e_set_features(struct net_device *netdev,
391{ 391{
392 netdev_features_t changed = netdev->features ^ features; 392 netdev_features_t changed = netdev->features ^ features;
393 393
394 if (changed & NETIF_F_HW_VLAN_RX) 394 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
395 atl1e_vlan_mode(netdev, features); 395 atl1e_vlan_mode(netdev, features);
396 396
397 return 0; 397 return 0;
@@ -1420,11 +1420,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1420 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & 1420 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
1421 RRS_PKT_SIZE_MASK) - 4; /* CRC */ 1421 RRS_PKT_SIZE_MASK) - 4; /* CRC */
1422 skb = netdev_alloc_skb_ip_align(netdev, packet_size); 1422 skb = netdev_alloc_skb_ip_align(netdev, packet_size);
1423 if (skb == NULL) { 1423 if (skb == NULL)
1424 netdev_warn(netdev,
1425 "Memory squeeze, deferring packet\n");
1426 goto skip_pkt; 1424 goto skip_pkt;
1427 } 1425
1428 memcpy(skb->data, (u8 *)(prrs + 1), packet_size); 1426 memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
1429 skb_put(skb, packet_size); 1427 skb_put(skb, packet_size);
1430 skb->protocol = eth_type_trans(skb, netdev); 1428 skb->protocol = eth_type_trans(skb, netdev);
@@ -1437,7 +1435,7 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1437 netdev_dbg(netdev, 1435 netdev_dbg(netdev,
1438 "RXD VLAN TAG<RRD>=0x%04x\n", 1436 "RXD VLAN TAG<RRD>=0x%04x\n",
1439 prrs->vtag); 1437 prrs->vtag);
1440 __vlan_hwaccel_put_tag(skb, vlan_tag); 1438 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1441 } 1439 }
1442 netif_receive_skb(skb); 1440 netif_receive_skb(skb);
1443 1441
@@ -2200,9 +2198,9 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2200 atl1e_set_ethtool_ops(netdev); 2198 atl1e_set_ethtool_ops(netdev);
2201 2199
2202 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | 2200 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
2203 NETIF_F_HW_VLAN_RX; 2201 NETIF_F_HW_VLAN_CTAG_RX;
2204 netdev->features = netdev->hw_features | NETIF_F_LLTX | 2202 netdev->features = netdev->hw_features | NETIF_F_LLTX |
2205 NETIF_F_HW_VLAN_TX; 2203 NETIF_F_HW_VLAN_CTAG_TX;
2206 2204
2207 return 0; 2205 return 0;
2208} 2206}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 5b0d9931c720..fa0915f3999b 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2024,7 +2024,7 @@ rrd_ok:
2024 ((rrd->vlan_tag & 7) << 13) | 2024 ((rrd->vlan_tag & 7) << 13) |
2025 ((rrd->vlan_tag & 8) << 9); 2025 ((rrd->vlan_tag & 8) << 9);
2026 2026
2027 __vlan_hwaccel_put_tag(skb, vlan_tag); 2027 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
2028 } 2028 }
2029 netif_receive_skb(skb); 2029 netif_receive_skb(skb);
2030 2030
@@ -2774,7 +2774,7 @@ static int atl1_close(struct net_device *netdev)
2774 return 0; 2774 return 0;
2775} 2775}
2776 2776
2777#ifdef CONFIG_PM 2777#ifdef CONFIG_PM_SLEEP
2778static int atl1_suspend(struct device *dev) 2778static int atl1_suspend(struct device *dev)
2779{ 2779{
2780 struct pci_dev *pdev = to_pci_dev(dev); 2780 struct pci_dev *pdev = to_pci_dev(dev);
@@ -2876,23 +2876,18 @@ static int atl1_resume(struct device *dev)
2876 2876
2877 return 0; 2877 return 0;
2878} 2878}
2879#endif
2879 2880
2880static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume); 2881static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
2881#define ATL1_PM_OPS (&atl1_pm_ops)
2882
2883#else
2884
2885static int atl1_suspend(struct device *dev) { return 0; }
2886
2887#define ATL1_PM_OPS NULL
2888#endif
2889 2882
2890static void atl1_shutdown(struct pci_dev *pdev) 2883static void atl1_shutdown(struct pci_dev *pdev)
2891{ 2884{
2892 struct net_device *netdev = pci_get_drvdata(pdev); 2885 struct net_device *netdev = pci_get_drvdata(pdev);
2893 struct atl1_adapter *adapter = netdev_priv(netdev); 2886 struct atl1_adapter *adapter = netdev_priv(netdev);
2894 2887
2888#ifdef CONFIG_PM_SLEEP
2895 atl1_suspend(&pdev->dev); 2889 atl1_suspend(&pdev->dev);
2890#endif
2896 pci_wake_from_d3(pdev, adapter->wol); 2891 pci_wake_from_d3(pdev, adapter->wol);
2897 pci_set_power_state(pdev, PCI_D3hot); 2892 pci_set_power_state(pdev, PCI_D3hot);
2898} 2893}
@@ -3023,10 +3018,10 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3023 3018
3024 netdev->features = NETIF_F_HW_CSUM; 3019 netdev->features = NETIF_F_HW_CSUM;
3025 netdev->features |= NETIF_F_SG; 3020 netdev->features |= NETIF_F_SG;
3026 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 3021 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
3027 3022
3028 netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO | 3023 netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO |
3029 NETIF_F_HW_VLAN_RX; 3024 NETIF_F_HW_VLAN_CTAG_RX;
3030 3025
3031 /* is this valid? see atl1_setup_mac_ctrl() */ 3026 /* is this valid? see atl1_setup_mac_ctrl() */
3032 netdev->features |= NETIF_F_RXCSUM; 3027 netdev->features |= NETIF_F_RXCSUM;
@@ -3147,7 +3142,7 @@ static struct pci_driver atl1_driver = {
3147 .probe = atl1_probe, 3142 .probe = atl1_probe,
3148 .remove = atl1_remove, 3143 .remove = atl1_remove,
3149 .shutdown = atl1_shutdown, 3144 .shutdown = atl1_shutdown,
3150 .driver.pm = ATL1_PM_OPS, 3145 .driver.pm = &atl1_pm_ops,
3151}; 3146};
3152 3147
3153/** 3148/**
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 1278b47022e0..265ce1b752ed 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -363,7 +363,7 @@ static inline void atl2_irq_disable(struct atl2_adapter *adapter)
363 363
364static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl) 364static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl)
365{ 365{
366 if (features & NETIF_F_HW_VLAN_RX) { 366 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
367 /* enable VLAN tag insert/strip */ 367 /* enable VLAN tag insert/strip */
368 *ctrl |= MAC_CTRL_RMV_VLAN; 368 *ctrl |= MAC_CTRL_RMV_VLAN;
369 } else { 369 } else {
@@ -399,10 +399,10 @@ static netdev_features_t atl2_fix_features(struct net_device *netdev,
399 * Since there is no support for separate rx/tx vlan accel 399 * Since there is no support for separate rx/tx vlan accel
400 * enable/disable make sure tx flag is always in same state as rx. 400 * enable/disable make sure tx flag is always in same state as rx.
401 */ 401 */
402 if (features & NETIF_F_HW_VLAN_RX) 402 if (features & NETIF_F_HW_VLAN_CTAG_RX)
403 features |= NETIF_F_HW_VLAN_TX; 403 features |= NETIF_F_HW_VLAN_CTAG_TX;
404 else 404 else
405 features &= ~NETIF_F_HW_VLAN_TX; 405 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
406 406
407 return features; 407 return features;
408} 408}
@@ -412,7 +412,7 @@ static int atl2_set_features(struct net_device *netdev,
412{ 412{
413 netdev_features_t changed = netdev->features ^ features; 413 netdev_features_t changed = netdev->features ^ features;
414 414
415 if (changed & NETIF_F_HW_VLAN_RX) 415 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
416 atl2_vlan_mode(netdev, features); 416 atl2_vlan_mode(netdev, features);
417 417
418 return 0; 418 return 0;
@@ -437,9 +437,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
437 /* alloc new buffer */ 437 /* alloc new buffer */
438 skb = netdev_alloc_skb_ip_align(netdev, rx_size); 438 skb = netdev_alloc_skb_ip_align(netdev, rx_size);
439 if (NULL == skb) { 439 if (NULL == skb) {
440 printk(KERN_WARNING
441 "%s: Mem squeeze, deferring packet.\n",
442 netdev->name);
443 /* 440 /*
444 * Check that some rx space is free. If not, 441 * Check that some rx space is free. If not,
445 * free one and mark stats->rx_dropped++. 442 * free one and mark stats->rx_dropped++.
@@ -455,7 +452,7 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
455 ((rxd->status.vtag&7) << 13) | 452 ((rxd->status.vtag&7) << 13) |
456 ((rxd->status.vtag&8) << 9); 453 ((rxd->status.vtag&8) << 9);
457 454
458 __vlan_hwaccel_put_tag(skb, vlan_tag); 455 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
459 } 456 }
460 netif_rx(skb); 457 netif_rx(skb);
461 netdev->stats.rx_bytes += rx_size; 458 netdev->stats.rx_bytes += rx_size;
@@ -890,7 +887,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
890 skb->len-copy_len); 887 skb->len-copy_len);
891 offset = ((u32)(skb->len-copy_len + 3) & ~3); 888 offset = ((u32)(skb->len-copy_len + 3) & ~3);
892 } 889 }
893#ifdef NETIF_F_HW_VLAN_TX 890#ifdef NETIF_F_HW_VLAN_CTAG_TX
894 if (vlan_tx_tag_present(skb)) { 891 if (vlan_tx_tag_present(skb)) {
895 u16 vlan_tag = vlan_tx_tag_get(skb); 892 u16 vlan_tag = vlan_tx_tag_get(skb);
896 vlan_tag = (vlan_tag << 4) | 893 vlan_tag = (vlan_tag << 4) |
@@ -1416,8 +1413,8 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1416 1413
1417 err = -EIO; 1414 err = -EIO;
1418 1415
1419 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_RX; 1416 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
1420 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 1417 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
1421 1418
1422 /* Init PHY as early as possible due to power saving issue */ 1419 /* Init PHY as early as possible due to power saving issue */
1423 atl2_phy_init(&adapter->hw); 1420 atl2_phy_init(&adapter->hw);
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index f82eb1699464..46a622cceee4 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -220,7 +220,7 @@ static void atlx_link_chg_task(struct work_struct *work)
220 220
221static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl) 221static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl)
222{ 222{
223 if (features & NETIF_F_HW_VLAN_RX) { 223 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
224 /* enable VLAN tag insert/strip */ 224 /* enable VLAN tag insert/strip */
225 *ctrl |= MAC_CTRL_RMV_VLAN; 225 *ctrl |= MAC_CTRL_RMV_VLAN;
226 } else { 226 } else {
@@ -257,10 +257,10 @@ static netdev_features_t atlx_fix_features(struct net_device *netdev,
257 * Since there is no support for separate rx/tx vlan accel 257 * Since there is no support for separate rx/tx vlan accel
258 * enable/disable make sure tx flag is always in same state as rx. 258 * enable/disable make sure tx flag is always in same state as rx.
259 */ 259 */
260 if (features & NETIF_F_HW_VLAN_RX) 260 if (features & NETIF_F_HW_VLAN_CTAG_RX)
261 features |= NETIF_F_HW_VLAN_TX; 261 features |= NETIF_F_HW_VLAN_CTAG_TX;
262 else 262 else
263 features &= ~NETIF_F_HW_VLAN_TX; 263 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
264 264
265 return features; 265 return features;
266} 266}
@@ -270,7 +270,7 @@ static int atlx_set_features(struct net_device *netdev,
270{ 270{
271 netdev_features_t changed = netdev->features ^ features; 271 netdev_features_t changed = netdev->features ^ features;
272 272
273 if (changed & NETIF_F_HW_VLAN_RX) 273 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
274 atlx_vlan_mode(netdev, features); 274 atlx_vlan_mode(netdev, features);
275 275
276 return 0; 276 return 0;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 7d81e059e811..0b3e23ec37f7 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -862,27 +862,25 @@ static int bcm_enet_open(struct net_device *dev)
862 862
863 /* allocate rx dma ring */ 863 /* allocate rx dma ring */
864 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 864 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
865 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 865 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma,
866 GFP_KERNEL | __GFP_ZERO);
866 if (!p) { 867 if (!p) {
867 dev_err(kdev, "cannot allocate rx ring %u\n", size);
868 ret = -ENOMEM; 868 ret = -ENOMEM;
869 goto out_freeirq_tx; 869 goto out_freeirq_tx;
870 } 870 }
871 871
872 memset(p, 0, size);
873 priv->rx_desc_alloc_size = size; 872 priv->rx_desc_alloc_size = size;
874 priv->rx_desc_cpu = p; 873 priv->rx_desc_cpu = p;
875 874
876 /* allocate tx dma ring */ 875 /* allocate tx dma ring */
877 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 876 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
878 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 877 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma,
878 GFP_KERNEL | __GFP_ZERO);
879 if (!p) { 879 if (!p) {
880 dev_err(kdev, "cannot allocate tx ring\n");
881 ret = -ENOMEM; 880 ret = -ENOMEM;
882 goto out_free_rx_ring; 881 goto out_free_rx_ring;
883 } 882 }
884 883
885 memset(p, 0, size);
886 priv->tx_desc_alloc_size = size; 884 priv->tx_desc_alloc_size = size;
887 priv->tx_desc_cpu = p; 885 priv->tx_desc_cpu = p;
888 886
@@ -1619,7 +1617,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
1619 struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; 1617 struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1620 struct mii_bus *bus; 1618 struct mii_bus *bus;
1621 const char *clk_name; 1619 const char *clk_name;
1622 unsigned int iomem_size;
1623 int i, ret; 1620 int i, ret;
1624 1621
1625 /* stop if shared driver failed, assume driver->probe will be 1622 /* stop if shared driver failed, assume driver->probe will be
@@ -1644,17 +1641,12 @@ static int bcm_enet_probe(struct platform_device *pdev)
1644 if (ret) 1641 if (ret)
1645 goto out; 1642 goto out;
1646 1643
1647 iomem_size = resource_size(res_mem); 1644 priv->base = devm_request_and_ioremap(&pdev->dev, res_mem);
1648 if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
1649 ret = -EBUSY;
1650 goto out;
1651 }
1652
1653 priv->base = ioremap(res_mem->start, iomem_size);
1654 if (priv->base == NULL) { 1645 if (priv->base == NULL) {
1655 ret = -ENOMEM; 1646 ret = -ENOMEM;
1656 goto out_release_mem; 1647 goto out;
1657 } 1648 }
1649
1658 dev->irq = priv->irq = res_irq->start; 1650 dev->irq = priv->irq = res_irq->start;
1659 priv->irq_rx = res_irq_rx->start; 1651 priv->irq_rx = res_irq_rx->start;
1660 priv->irq_tx = res_irq_tx->start; 1652 priv->irq_tx = res_irq_tx->start;
@@ -1674,9 +1666,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
1674 priv->mac_clk = clk_get(&pdev->dev, clk_name); 1666 priv->mac_clk = clk_get(&pdev->dev, clk_name);
1675 if (IS_ERR(priv->mac_clk)) { 1667 if (IS_ERR(priv->mac_clk)) {
1676 ret = PTR_ERR(priv->mac_clk); 1668 ret = PTR_ERR(priv->mac_clk);
1677 goto out_unmap; 1669 goto out;
1678 } 1670 }
1679 clk_enable(priv->mac_clk); 1671 clk_prepare_enable(priv->mac_clk);
1680 1672
1681 /* initialize default and fetch platform data */ 1673 /* initialize default and fetch platform data */
1682 priv->rx_ring_size = BCMENET_DEF_RX_DESC; 1674 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
@@ -1705,7 +1697,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
1705 priv->phy_clk = NULL; 1697 priv->phy_clk = NULL;
1706 goto out_put_clk_mac; 1698 goto out_put_clk_mac;
1707 } 1699 }
1708 clk_enable(priv->phy_clk); 1700 clk_prepare_enable(priv->phy_clk);
1709 } 1701 }
1710 1702
1711 /* do minimal hardware init to be able to probe mii bus */ 1703 /* do minimal hardware init to be able to probe mii bus */
@@ -1733,7 +1725,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
1733 * if a slave is not present on hw */ 1725 * if a slave is not present on hw */
1734 bus->phy_mask = ~(1 << priv->phy_id); 1726 bus->phy_mask = ~(1 << priv->phy_id);
1735 1727
1736 bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 1728 bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
1729 GFP_KERNEL);
1737 if (!bus->irq) { 1730 if (!bus->irq) {
1738 ret = -ENOMEM; 1731 ret = -ENOMEM;
1739 goto out_free_mdio; 1732 goto out_free_mdio;
@@ -1794,10 +1787,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
1794 return 0; 1787 return 0;
1795 1788
1796out_unregister_mdio: 1789out_unregister_mdio:
1797 if (priv->mii_bus) { 1790 if (priv->mii_bus)
1798 mdiobus_unregister(priv->mii_bus); 1791 mdiobus_unregister(priv->mii_bus);
1799 kfree(priv->mii_bus->irq);
1800 }
1801 1792
1802out_free_mdio: 1793out_free_mdio:
1803 if (priv->mii_bus) 1794 if (priv->mii_bus)
@@ -1807,19 +1798,13 @@ out_uninit_hw:
1807 /* turn off mdc clock */ 1798 /* turn off mdc clock */
1808 enet_writel(priv, 0, ENET_MIISC_REG); 1799 enet_writel(priv, 0, ENET_MIISC_REG);
1809 if (priv->phy_clk) { 1800 if (priv->phy_clk) {
1810 clk_disable(priv->phy_clk); 1801 clk_disable_unprepare(priv->phy_clk);
1811 clk_put(priv->phy_clk); 1802 clk_put(priv->phy_clk);
1812 } 1803 }
1813 1804
1814out_put_clk_mac: 1805out_put_clk_mac:
1815 clk_disable(priv->mac_clk); 1806 clk_disable_unprepare(priv->mac_clk);
1816 clk_put(priv->mac_clk); 1807 clk_put(priv->mac_clk);
1817
1818out_unmap:
1819 iounmap(priv->base);
1820
1821out_release_mem:
1822 release_mem_region(res_mem->start, iomem_size);
1823out: 1808out:
1824 free_netdev(dev); 1809 free_netdev(dev);
1825 return ret; 1810 return ret;
@@ -1833,7 +1818,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
1833{ 1818{
1834 struct bcm_enet_priv *priv; 1819 struct bcm_enet_priv *priv;
1835 struct net_device *dev; 1820 struct net_device *dev;
1836 struct resource *res;
1837 1821
1838 /* stop netdevice */ 1822 /* stop netdevice */
1839 dev = platform_get_drvdata(pdev); 1823 dev = platform_get_drvdata(pdev);
@@ -1845,7 +1829,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
1845 1829
1846 if (priv->has_phy) { 1830 if (priv->has_phy) {
1847 mdiobus_unregister(priv->mii_bus); 1831 mdiobus_unregister(priv->mii_bus);
1848 kfree(priv->mii_bus->irq);
1849 mdiobus_free(priv->mii_bus); 1832 mdiobus_free(priv->mii_bus);
1850 } else { 1833 } else {
1851 struct bcm63xx_enet_platform_data *pd; 1834 struct bcm63xx_enet_platform_data *pd;
@@ -1856,17 +1839,12 @@ static int bcm_enet_remove(struct platform_device *pdev)
1856 bcm_enet_mdio_write_mii); 1839 bcm_enet_mdio_write_mii);
1857 } 1840 }
1858 1841
1859 /* release device resources */
1860 iounmap(priv->base);
1861 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1862 release_mem_region(res->start, resource_size(res));
1863
1864 /* disable hw block clocks */ 1842 /* disable hw block clocks */
1865 if (priv->phy_clk) { 1843 if (priv->phy_clk) {
1866 clk_disable(priv->phy_clk); 1844 clk_disable_unprepare(priv->phy_clk);
1867 clk_put(priv->phy_clk); 1845 clk_put(priv->phy_clk);
1868 } 1846 }
1869 clk_disable(priv->mac_clk); 1847 clk_disable_unprepare(priv->mac_clk);
1870 clk_put(priv->mac_clk); 1848 clk_put(priv->mac_clk);
1871 1849
1872 platform_set_drvdata(pdev, NULL); 1850 platform_set_drvdata(pdev, NULL);
@@ -1889,31 +1867,20 @@ struct platform_driver bcm63xx_enet_driver = {
1889static int bcm_enet_shared_probe(struct platform_device *pdev) 1867static int bcm_enet_shared_probe(struct platform_device *pdev)
1890{ 1868{
1891 struct resource *res; 1869 struct resource *res;
1892 unsigned int iomem_size;
1893 1870
1894 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1871 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1895 if (!res) 1872 if (!res)
1896 return -ENODEV; 1873 return -ENODEV;
1897 1874
1898 iomem_size = resource_size(res); 1875 bcm_enet_shared_base = devm_request_and_ioremap(&pdev->dev, res);
1899 if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma")) 1876 if (!bcm_enet_shared_base)
1900 return -EBUSY;
1901
1902 bcm_enet_shared_base = ioremap(res->start, iomem_size);
1903 if (!bcm_enet_shared_base) {
1904 release_mem_region(res->start, iomem_size);
1905 return -ENOMEM; 1877 return -ENOMEM;
1906 } 1878
1907 return 0; 1879 return 0;
1908} 1880}
1909 1881
1910static int bcm_enet_shared_remove(struct platform_device *pdev) 1882static int bcm_enet_shared_remove(struct platform_device *pdev)
1911{ 1883{
1912 struct resource *res;
1913
1914 iounmap(bcm_enet_shared_base);
1915 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1916 release_mem_region(res->start, resource_size(res));
1917 return 0; 1884 return 0;
1918} 1885}
1919 1886
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index da5f4397f87c..eec0af45b859 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -13,6 +13,7 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/etherdevice.h> 14#include <linux/etherdevice.h>
15#include <linux/mii.h> 15#include <linux/mii.h>
16#include <linux/phy.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
17#include <linux/dma-mapping.h> 18#include <linux/dma-mapping.h>
18#include <bcm47xx_nvram.h> 19#include <bcm47xx_nvram.h>
@@ -244,10 +245,8 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
244 245
245 /* Alloc skb */ 246 /* Alloc skb */
246 slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); 247 slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
247 if (!slot->skb) { 248 if (!slot->skb)
248 bgmac_err(bgmac, "Allocation of skb failed!\n");
249 return -ENOMEM; 249 return -ENOMEM;
250 }
251 250
252 /* Poison - if everything goes fine, hardware will overwrite it */ 251 /* Poison - if everything goes fine, hardware will overwrite it */
253 rx = (struct bgmac_rx_header *)slot->skb->data; 252 rx = (struct bgmac_rx_header *)slot->skb->data;
@@ -1313,6 +1312,73 @@ static const struct ethtool_ops bgmac_ethtool_ops = {
1313}; 1312};
1314 1313
1315/************************************************** 1314/**************************************************
1315 * MII
1316 **************************************************/
1317
1318static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
1319{
1320 return bgmac_phy_read(bus->priv, mii_id, regnum);
1321}
1322
1323static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
1324 u16 value)
1325{
1326 return bgmac_phy_write(bus->priv, mii_id, regnum, value);
1327}
1328
1329static int bgmac_mii_register(struct bgmac *bgmac)
1330{
1331 struct mii_bus *mii_bus;
1332 int i, err = 0;
1333
1334 mii_bus = mdiobus_alloc();
1335 if (!mii_bus)
1336 return -ENOMEM;
1337
1338 mii_bus->name = "bgmac mii bus";
1339 sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
1340 bgmac->core->core_unit);
1341 mii_bus->priv = bgmac;
1342 mii_bus->read = bgmac_mii_read;
1343 mii_bus->write = bgmac_mii_write;
1344 mii_bus->parent = &bgmac->core->dev;
1345 mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
1346
1347 mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
1348 if (!mii_bus->irq) {
1349 err = -ENOMEM;
1350 goto err_free_bus;
1351 }
1352 for (i = 0; i < PHY_MAX_ADDR; i++)
1353 mii_bus->irq[i] = PHY_POLL;
1354
1355 err = mdiobus_register(mii_bus);
1356 if (err) {
1357 bgmac_err(bgmac, "Registration of mii bus failed\n");
1358 goto err_free_irq;
1359 }
1360
1361 bgmac->mii_bus = mii_bus;
1362
1363 return err;
1364
1365err_free_irq:
1366 kfree(mii_bus->irq);
1367err_free_bus:
1368 mdiobus_free(mii_bus);
1369 return err;
1370}
1371
1372static void bgmac_mii_unregister(struct bgmac *bgmac)
1373{
1374 struct mii_bus *mii_bus = bgmac->mii_bus;
1375
1376 mdiobus_unregister(mii_bus);
1377 kfree(mii_bus->irq);
1378 mdiobus_free(mii_bus);
1379}
1380
1381/**************************************************
1316 * BCMA bus ops 1382 * BCMA bus ops
1317 **************************************************/ 1383 **************************************************/
1318 1384
@@ -1404,11 +1470,18 @@ static int bgmac_probe(struct bcma_device *core)
1404 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) 1470 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
1405 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); 1471 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
1406 1472
1473 err = bgmac_mii_register(bgmac);
1474 if (err) {
1475 bgmac_err(bgmac, "Cannot register MDIO\n");
1476 err = -ENOTSUPP;
1477 goto err_dma_free;
1478 }
1479
1407 err = register_netdev(bgmac->net_dev); 1480 err = register_netdev(bgmac->net_dev);
1408 if (err) { 1481 if (err) {
1409 bgmac_err(bgmac, "Cannot register net device\n"); 1482 bgmac_err(bgmac, "Cannot register net device\n");
1410 err = -ENOTSUPP; 1483 err = -ENOTSUPP;
1411 goto err_dma_free; 1484 goto err_mii_unregister;
1412 } 1485 }
1413 1486
1414 netif_carrier_off(net_dev); 1487 netif_carrier_off(net_dev);
@@ -1417,6 +1490,8 @@ static int bgmac_probe(struct bcma_device *core)
1417 1490
1418 return 0; 1491 return 0;
1419 1492
1493err_mii_unregister:
1494 bgmac_mii_unregister(bgmac);
1420err_dma_free: 1495err_dma_free:
1421 bgmac_dma_free(bgmac); 1496 bgmac_dma_free(bgmac);
1422 1497
@@ -1433,6 +1508,7 @@ static void bgmac_remove(struct bcma_device *core)
1433 1508
1434 netif_napi_del(&bgmac->napi); 1509 netif_napi_del(&bgmac->napi);
1435 unregister_netdev(bgmac->net_dev); 1510 unregister_netdev(bgmac->net_dev);
1511 bgmac_mii_unregister(bgmac);
1436 bgmac_dma_free(bgmac); 1512 bgmac_dma_free(bgmac);
1437 bcma_set_drvdata(core, NULL); 1513 bcma_set_drvdata(core, NULL);
1438 free_netdev(bgmac->net_dev); 1514 free_netdev(bgmac->net_dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4ede614c81f8..98d4b5fcc070 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -399,6 +399,7 @@ struct bgmac {
399 struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */ 399 struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */
400 struct net_device *net_dev; 400 struct net_device *net_dev;
401 struct napi_struct napi; 401 struct napi_struct napi;
402 struct mii_bus *mii_bus;
402 403
403 /* DMA */ 404 /* DMA */
404 struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS]; 405 struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2f0ba8f2fd6c..5d204492c603 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -416,7 +416,7 @@ static int bnx2_unregister_cnic(struct net_device *dev)
416 return 0; 416 return 0;
417} 417}
418 418
419struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev) 419static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420{ 420{
421 struct bnx2 *bp = netdev_priv(dev); 421 struct bnx2 *bp = netdev_priv(dev);
422 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 422 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
@@ -854,12 +854,11 @@ bnx2_alloc_mem(struct bnx2 *bp)
854 sizeof(struct statistics_block); 854 sizeof(struct statistics_block);
855 855
856 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, 856 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 &bp->status_blk_mapping, GFP_KERNEL); 857 &bp->status_blk_mapping,
858 GFP_KERNEL | __GFP_ZERO);
858 if (status_blk == NULL) 859 if (status_blk == NULL)
859 goto alloc_mem_err; 860 goto alloc_mem_err;
860 861
861 memset(status_blk, 0, bp->status_stats_size);
862
863 bnapi = &bp->bnx2_napi[0]; 862 bnapi = &bp->bnx2_napi[0];
864 bnapi->status_blk.msi = status_blk; 863 bnapi->status_blk.msi = status_blk;
865 bnapi->hw_tx_cons_ptr = 864 bnapi->hw_tx_cons_ptr =
@@ -3212,7 +3211,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3212 } 3211 }
3213 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && 3212 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3214 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) 3213 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3215 __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag); 3214 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3216 3215
3217 skb->protocol = eth_type_trans(skb, bp->dev); 3216 skb->protocol = eth_type_trans(skb, bp->dev);
3218 3217
@@ -3554,7 +3553,7 @@ bnx2_set_rx_mode(struct net_device *dev)
3554 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | 3553 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3555 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); 3554 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3556 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN; 3555 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3557 if (!(dev->features & NETIF_F_HW_VLAN_RX) && 3556 if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3558 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) 3557 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3559 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; 3558 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3560 if (dev->flags & IFF_PROMISC) { 3559 if (dev->flags & IFF_PROMISC) {
@@ -7696,7 +7695,7 @@ bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7696 struct bnx2 *bp = netdev_priv(dev); 7695 struct bnx2 *bp = netdev_priv(dev);
7697 7696
7698 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) 7697 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7699 features |= NETIF_F_HW_VLAN_RX; 7698 features |= NETIF_F_HW_VLAN_CTAG_RX;
7700 7699
7701 return features; 7700 return features;
7702} 7701}
@@ -7707,12 +7706,12 @@ bnx2_set_features(struct net_device *dev, netdev_features_t features)
7707 struct bnx2 *bp = netdev_priv(dev); 7706 struct bnx2 *bp = netdev_priv(dev);
7708 7707
7709 /* TSO with VLAN tag won't work with current firmware */ 7708 /* TSO with VLAN tag won't work with current firmware */
7710 if (features & NETIF_F_HW_VLAN_TX) 7709 if (features & NETIF_F_HW_VLAN_CTAG_TX)
7711 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO); 7710 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7712 else 7711 else
7713 dev->vlan_features &= ~NETIF_F_ALL_TSO; 7712 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7714 7713
7715 if ((!!(features & NETIF_F_HW_VLAN_RX) != 7714 if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7716 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) && 7715 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7717 netif_running(dev)) { 7716 netif_running(dev)) {
7718 bnx2_netif_stop(bp, false); 7717 bnx2_netif_stop(bp, false);
@@ -8552,7 +8551,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8552 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; 8551 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8553 8552
8554 dev->vlan_features = dev->hw_features; 8553 dev->vlan_features = dev->hw_features;
8555 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 8554 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8556 dev->features |= dev->hw_features; 8555 dev->features |= dev->hw_features;
8557 dev->priv_flags |= IFF_UNICAST_FLT; 8556 dev->priv_flags |= IFF_UNICAST_FLT;
8558 8557
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e4605a965084..3dba2a70a00e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -26,8 +26,8 @@
26 * (you will need to reboot afterwards) */ 26 * (you will need to reboot afterwards) */
27/* #define BNX2X_STOP_ON_ERROR */ 27/* #define BNX2X_STOP_ON_ERROR */
28 28
29#define DRV_MODULE_VERSION "1.78.02-0" 29#define DRV_MODULE_VERSION "1.78.17-0"
30#define DRV_MODULE_RELDATE "2013/01/14" 30#define DRV_MODULE_RELDATE "2013/04/11"
31#define BNX2X_BC_VER 0x040200 31#define BNX2X_BC_VER 0x040200
32 32
33#if defined(CONFIG_DCB) 33#if defined(CONFIG_DCB)
@@ -492,7 +492,6 @@ enum bnx2x_tpa_mode_t {
492struct bnx2x_fastpath { 492struct bnx2x_fastpath {
493 struct bnx2x *bp; /* parent */ 493 struct bnx2x *bp; /* parent */
494 494
495#define BNX2X_NAPI_WEIGHT 128
496 struct napi_struct napi; 495 struct napi_struct napi;
497 union host_hc_status_block status_blk; 496 union host_hc_status_block status_blk;
498 /* chip independed shortcuts into sb structure */ 497 /* chip independed shortcuts into sb structure */
@@ -613,9 +612,10 @@ struct bnx2x_fastpath {
613 * START_BD - describes packed 612 * START_BD - describes packed
614 * START_BD(splitted) - includes unpaged data segment for GSO 613 * START_BD(splitted) - includes unpaged data segment for GSO
615 * PARSING_BD - for TSO and CSUM data 614 * PARSING_BD - for TSO and CSUM data
615 * PARSING_BD2 - for encapsulation data
616 * Frag BDs - decribes pages for frags 616 * Frag BDs - decribes pages for frags
617 */ 617 */
618#define BDS_PER_TX_PKT 3 618#define BDS_PER_TX_PKT 4
619#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT) 619#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
620/* max BDs per tx packet including next pages */ 620/* max BDs per tx packet including next pages */
621#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \ 621#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \
@@ -730,18 +730,24 @@ struct bnx2x_fastpath {
730#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \ 730#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
731 skb->csum_offset)) 731 skb->csum_offset))
732 732
733#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff) 733#define pbd_tcp_flags(tcp_hdr) (ntohl(tcp_flag_word(tcp_hdr))>>16 & 0xff)
734 734
735#define XMIT_PLAIN 0 735#define XMIT_PLAIN 0
736#define XMIT_CSUM_V4 0x1 736#define XMIT_CSUM_V4 (1 << 0)
737#define XMIT_CSUM_V6 0x2 737#define XMIT_CSUM_V6 (1 << 1)
738#define XMIT_CSUM_TCP 0x4 738#define XMIT_CSUM_TCP (1 << 2)
739#define XMIT_GSO_V4 0x8 739#define XMIT_GSO_V4 (1 << 3)
740#define XMIT_GSO_V6 0x10 740#define XMIT_GSO_V6 (1 << 4)
741#define XMIT_CSUM_ENC_V4 (1 << 5)
742#define XMIT_CSUM_ENC_V6 (1 << 6)
743#define XMIT_GSO_ENC_V4 (1 << 7)
744#define XMIT_GSO_ENC_V6 (1 << 8)
741 745
742#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6) 746#define XMIT_CSUM_ENC (XMIT_CSUM_ENC_V4 | XMIT_CSUM_ENC_V6)
743#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6) 747#define XMIT_GSO_ENC (XMIT_GSO_ENC_V4 | XMIT_GSO_ENC_V6)
744 748
749#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6 | XMIT_CSUM_ENC)
750#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6 | XMIT_GSO_ENC)
745 751
746/* stuff added to make the code fit 80Col */ 752/* stuff added to make the code fit 80Col */
747#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) 753#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
@@ -844,6 +850,9 @@ struct bnx2x_common {
844#define CHIP_IS_57840_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_VF) 850#define CHIP_IS_57840_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_VF)
845#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ 851#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
846 CHIP_IS_57711E(bp)) 852 CHIP_IS_57711E(bp))
853#define CHIP_IS_57811xx(bp) (CHIP_IS_57811(bp) || \
854 CHIP_IS_57811_MF(bp) || \
855 CHIP_IS_57811_VF(bp))
847#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ 856#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
848 CHIP_IS_57712_MF(bp) || \ 857 CHIP_IS_57712_MF(bp) || \
849 CHIP_IS_57712_VF(bp)) 858 CHIP_IS_57712_VF(bp))
@@ -853,9 +862,7 @@ struct bnx2x_common {
853 CHIP_IS_57810(bp) || \ 862 CHIP_IS_57810(bp) || \
854 CHIP_IS_57810_MF(bp) || \ 863 CHIP_IS_57810_MF(bp) || \
855 CHIP_IS_57810_VF(bp) || \ 864 CHIP_IS_57810_VF(bp) || \
856 CHIP_IS_57811(bp) || \ 865 CHIP_IS_57811xx(bp) || \
857 CHIP_IS_57811_MF(bp) || \
858 CHIP_IS_57811_VF(bp) || \
859 CHIP_IS_57840(bp) || \ 866 CHIP_IS_57840(bp) || \
860 CHIP_IS_57840_MF(bp) || \ 867 CHIP_IS_57840_MF(bp) || \
861 CHIP_IS_57840_VF(bp)) 868 CHIP_IS_57840_VF(bp))
@@ -1215,14 +1222,16 @@ enum {
1215 BNX2X_SP_RTNL_ENABLE_SRIOV, 1222 BNX2X_SP_RTNL_ENABLE_SRIOV,
1216 BNX2X_SP_RTNL_VFPF_MCAST, 1223 BNX2X_SP_RTNL_VFPF_MCAST,
1217 BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 1224 BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
1225 BNX2X_SP_RTNL_HYPERVISOR_VLAN,
1218}; 1226};
1219 1227
1220 1228
1221struct bnx2x_prev_path_list { 1229struct bnx2x_prev_path_list {
1230 struct list_head list;
1222 u8 bus; 1231 u8 bus;
1223 u8 slot; 1232 u8 slot;
1224 u8 path; 1233 u8 path;
1225 struct list_head list; 1234 u8 aer;
1226 u8 undi; 1235 u8 undi;
1227}; 1236};
1228 1237
@@ -1269,6 +1278,8 @@ struct bnx2x {
1269#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) 1278#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
1270 1279
1271#ifdef CONFIG_BNX2X_SRIOV 1280#ifdef CONFIG_BNX2X_SRIOV
1281 /* protects vf2pf mailbox from simultaneous access */
1282 struct mutex vf2pf_mutex;
1272 /* vf pf channel mailbox contains request and response buffers */ 1283 /* vf pf channel mailbox contains request and response buffers */
1273 struct bnx2x_vf_mbx_msg *vf2pf_mbox; 1284 struct bnx2x_vf_mbx_msg *vf2pf_mbox;
1274 dma_addr_t vf2pf_mbox_mapping; 1285 dma_addr_t vf2pf_mbox_mapping;
@@ -1281,6 +1292,8 @@ struct bnx2x {
1281 dma_addr_t pf2vf_bulletin_mapping; 1292 dma_addr_t pf2vf_bulletin_mapping;
1282 1293
1283 struct pf_vf_bulletin_content old_bulletin; 1294 struct pf_vf_bulletin_content old_bulletin;
1295
1296 u16 requested_nr_virtfn;
1284#endif /* CONFIG_BNX2X_SRIOV */ 1297#endif /* CONFIG_BNX2X_SRIOV */
1285 1298
1286 struct net_device *dev; 1299 struct net_device *dev;
@@ -1944,12 +1957,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1944void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, 1957void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
1945 bool is_pf); 1958 bool is_pf);
1946 1959
1947#define BNX2X_ILT_ZALLOC(x, y, size) \ 1960#define BNX2X_ILT_ZALLOC(x, y, size) \
1948 do { \ 1961 x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
1949 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 1962 GFP_KERNEL | __GFP_ZERO)
1950 if (x) \
1951 memset(x, 0, size); \
1952 } while (0)
1953 1963
1954#define BNX2X_ILT_FREE(x, y, size) \ 1964#define BNX2X_ILT_FREE(x, y, size) \
1955 do { \ 1965 do { \
@@ -2286,7 +2296,7 @@ static const u32 dmae_reg_go_c[] = {
2286 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 2296 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
2287}; 2297};
2288 2298
2289void bnx2x_set_ethtool_ops(struct net_device *netdev); 2299void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev);
2290void bnx2x_notify_link_changed(struct bnx2x *bp); 2300void bnx2x_notify_link_changed(struct bnx2x *bp);
2291 2301
2292#define BNX2X_MF_SD_PROTOCOL(bp) \ 2302#define BNX2X_MF_SD_PROTOCOL(bp) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 57619dd4a92b..b8fbe266ab68 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -451,7 +451,8 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
451 * Compute number of aggregated segments, and gso_type. 451 * Compute number of aggregated segments, and gso_type.
452 */ 452 */
453static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags, 453static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
454 u16 len_on_bd, unsigned int pkt_len) 454 u16 len_on_bd, unsigned int pkt_len,
455 u16 num_of_coalesced_segs)
455{ 456{
456 /* TPA aggregation won't have either IP options or TCP options 457 /* TPA aggregation won't have either IP options or TCP options
457 * other than timestamp or IPv6 extension headers. 458 * other than timestamp or IPv6 extension headers.
@@ -480,8 +481,7 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
480 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count 481 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
481 * to skb_shinfo(skb)->gso_segs 482 * to skb_shinfo(skb)->gso_segs
482 */ 483 */
483 NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len, 484 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
484 skb_shinfo(skb)->gso_size);
485} 485}
486 486
487static int bnx2x_alloc_rx_sge(struct bnx2x *bp, 487static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@ -537,7 +537,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
537 /* This is needed in order to enable forwarding support */ 537 /* This is needed in order to enable forwarding support */
538 if (frag_size) 538 if (frag_size)
539 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd, 539 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
540 le16_to_cpu(cqe->pkt_len)); 540 le16_to_cpu(cqe->pkt_len),
541 le16_to_cpu(cqe->num_of_coalesced_segs));
541 542
542#ifdef BNX2X_STOP_ON_ERROR 543#ifdef BNX2X_STOP_ON_ERROR
543 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) { 544 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
@@ -641,6 +642,14 @@ static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
641 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), 642 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
642 &iph->saddr, &iph->daddr, 0); 643 &iph->saddr, &iph->daddr, 0);
643} 644}
645
646static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
647 void (*gro_func)(struct bnx2x*, struct sk_buff*))
648{
649 skb_set_network_header(skb, 0);
650 gro_func(bp, skb);
651 tcp_gro_complete(skb);
652}
644#endif 653#endif
645 654
646static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, 655static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
@@ -648,19 +657,17 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
648{ 657{
649#ifdef CONFIG_INET 658#ifdef CONFIG_INET
650 if (skb_shinfo(skb)->gso_size) { 659 if (skb_shinfo(skb)->gso_size) {
651 skb_set_network_header(skb, 0);
652 switch (be16_to_cpu(skb->protocol)) { 660 switch (be16_to_cpu(skb->protocol)) {
653 case ETH_P_IP: 661 case ETH_P_IP:
654 bnx2x_gro_ip_csum(bp, skb); 662 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
655 break; 663 break;
656 case ETH_P_IPV6: 664 case ETH_P_IPV6:
657 bnx2x_gro_ipv6_csum(bp, skb); 665 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
658 break; 666 break;
659 default: 667 default:
660 BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n", 668 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
661 be16_to_cpu(skb->protocol)); 669 be16_to_cpu(skb->protocol));
662 } 670 }
663 tcp_gro_complete(skb);
664 } 671 }
665#endif 672#endif
666 napi_gro_receive(&fp->napi, skb); 673 napi_gro_receive(&fp->napi, skb);
@@ -718,7 +725,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
718 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, 725 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
719 skb, cqe, cqe_idx)) { 726 skb, cqe, cqe_idx)) {
720 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) 727 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
721 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag); 728 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
722 bnx2x_gro_receive(bp, fp, skb); 729 bnx2x_gro_receive(bp, fp, skb);
723 } else { 730 } else {
724 DP(NETIF_MSG_RX_STATUS, 731 DP(NETIF_MSG_RX_STATUS,
@@ -993,7 +1000,7 @@ reuse_rx:
993 1000
994 if (le16_to_cpu(cqe_fp->pars_flags.flags) & 1001 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
995 PARSING_FLAGS_VLAN) 1002 PARSING_FLAGS_VLAN)
996 __vlan_hwaccel_put_tag(skb, 1003 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
997 le16_to_cpu(cqe_fp->vlan_tag)); 1004 le16_to_cpu(cqe_fp->vlan_tag));
998 napi_gro_receive(&fp->napi, skb); 1005 napi_gro_receive(&fp->napi, skb);
999 1006
@@ -1037,6 +1044,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1037 DP(NETIF_MSG_INTR, 1044 DP(NETIF_MSG_INTR,
1038 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n", 1045 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1039 fp->index, fp->fw_sb_id, fp->igu_sb_id); 1046 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1047
1040 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 1048 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1041 1049
1042#ifdef BNX2X_STOP_ON_ERROR 1050#ifdef BNX2X_STOP_ON_ERROR
@@ -1718,7 +1726,7 @@ static int bnx2x_req_irq(struct bnx2x *bp)
1718 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); 1726 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1719} 1727}
1720 1728
1721static int bnx2x_setup_irqs(struct bnx2x *bp) 1729int bnx2x_setup_irqs(struct bnx2x *bp)
1722{ 1730{
1723 int rc = 0; 1731 int rc = 0;
1724 if (bp->flags & USING_MSIX_FLAG && 1732 if (bp->flags & USING_MSIX_FLAG &&
@@ -2009,7 +2017,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2009 * Cleans the object that have internal lists without sending 2017 * Cleans the object that have internal lists without sending
2010 * ramrods. Should be run when interrutps are disabled. 2018 * ramrods. Should be run when interrutps are disabled.
2011 */ 2019 */
2012static void bnx2x_squeeze_objects(struct bnx2x *bp) 2020void bnx2x_squeeze_objects(struct bnx2x *bp)
2013{ 2021{
2014 int rc; 2022 int rc;
2015 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 2023 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
@@ -2574,6 +2582,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2574 } 2582 }
2575 } 2583 }
2576 2584
2585 bnx2x_pre_irq_nic_init(bp);
2586
2577 /* Connect to IRQs */ 2587 /* Connect to IRQs */
2578 rc = bnx2x_setup_irqs(bp); 2588 rc = bnx2x_setup_irqs(bp);
2579 if (rc) { 2589 if (rc) {
@@ -2583,11 +2593,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2583 LOAD_ERROR_EXIT(bp, load_error2); 2593 LOAD_ERROR_EXIT(bp, load_error2);
2584 } 2594 }
2585 2595
2586 /* Setup NIC internals and enable interrupts */
2587 bnx2x_nic_init(bp, load_code);
2588
2589 /* Init per-function objects */ 2596 /* Init per-function objects */
2590 if (IS_PF(bp)) { 2597 if (IS_PF(bp)) {
2598 /* Setup NIC internals and enable interrupts */
2599 bnx2x_post_irq_nic_init(bp, load_code);
2600
2591 bnx2x_init_bp_objs(bp); 2601 bnx2x_init_bp_objs(bp);
2592 bnx2x_iov_nic_init(bp); 2602 bnx2x_iov_nic_init(bp);
2593 2603
@@ -2657,7 +2667,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2657 if (IS_PF(bp)) 2667 if (IS_PF(bp))
2658 rc = bnx2x_set_eth_mac(bp, true); 2668 rc = bnx2x_set_eth_mac(bp, true);
2659 else /* vf */ 2669 else /* vf */
2660 rc = bnx2x_vfpf_set_mac(bp); 2670 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2671 true);
2661 if (rc) { 2672 if (rc) {
2662 BNX2X_ERR("Setting Ethernet MAC failed\n"); 2673 BNX2X_ERR("Setting Ethernet MAC failed\n");
2663 LOAD_ERROR_EXIT(bp, load_error3); 2674 LOAD_ERROR_EXIT(bp, load_error3);
@@ -2777,7 +2788,7 @@ load_error0:
2777#endif /* ! BNX2X_STOP_ON_ERROR */ 2788#endif /* ! BNX2X_STOP_ON_ERROR */
2778} 2789}
2779 2790
2780static int bnx2x_drain_tx_queues(struct bnx2x *bp) 2791int bnx2x_drain_tx_queues(struct bnx2x *bp)
2781{ 2792{
2782 u8 rc = 0, cos, i; 2793 u8 rc = 0, cos, i;
2783 2794
@@ -2926,9 +2937,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2926 bnx2x_free_fp_mem_cnic(bp); 2937 bnx2x_free_fp_mem_cnic(bp);
2927 2938
2928 if (IS_PF(bp)) { 2939 if (IS_PF(bp)) {
2929 bnx2x_free_mem(bp);
2930 if (CNIC_LOADED(bp)) 2940 if (CNIC_LOADED(bp))
2931 bnx2x_free_mem_cnic(bp); 2941 bnx2x_free_mem_cnic(bp);
2942 bnx2x_free_mem(bp);
2932 } 2943 }
2933 bp->state = BNX2X_STATE_CLOSED; 2944 bp->state = BNX2X_STATE_CLOSED;
2934 bp->cnic_loaded = false; 2945 bp->cnic_loaded = false;
@@ -3089,11 +3100,11 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
3089 * to ease the pain of our fellow microcode engineers 3100 * to ease the pain of our fellow microcode engineers
3090 * we use one mapping for both BDs 3101 * we use one mapping for both BDs
3091 */ 3102 */
3092static noinline u16 bnx2x_tx_split(struct bnx2x *bp, 3103static u16 bnx2x_tx_split(struct bnx2x *bp,
3093 struct bnx2x_fp_txdata *txdata, 3104 struct bnx2x_fp_txdata *txdata,
3094 struct sw_tx_bd *tx_buf, 3105 struct sw_tx_bd *tx_buf,
3095 struct eth_tx_start_bd **tx_bd, u16 hlen, 3106 struct eth_tx_start_bd **tx_bd, u16 hlen,
3096 u16 bd_prod, int nbd) 3107 u16 bd_prod)
3097{ 3108{
3098 struct eth_tx_start_bd *h_tx_bd = *tx_bd; 3109 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3099 struct eth_tx_bd *d_tx_bd; 3110 struct eth_tx_bd *d_tx_bd;
@@ -3101,11 +3112,10 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
3101 int old_len = le16_to_cpu(h_tx_bd->nbytes); 3112 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3102 3113
3103 /* first fix first BD */ 3114 /* first fix first BD */
3104 h_tx_bd->nbd = cpu_to_le16(nbd);
3105 h_tx_bd->nbytes = cpu_to_le16(hlen); 3115 h_tx_bd->nbytes = cpu_to_le16(hlen);
3106 3116
3107 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n", 3117 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3108 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd); 3118 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3109 3119
3110 /* now get a new data BD 3120 /* now get a new data BD
3111 * (after the pbd) and fill it */ 3121 * (after the pbd) and fill it */
@@ -3134,7 +3144,7 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
3134 3144
3135#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32))) 3145#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3136#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16))) 3146#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3137static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) 3147static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3138{ 3148{
3139 __sum16 tsum = (__force __sum16) csum; 3149 __sum16 tsum = (__force __sum16) csum;
3140 3150
@@ -3149,30 +3159,47 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3149 return bswab16(tsum); 3159 return bswab16(tsum);
3150} 3160}
3151 3161
3152static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) 3162static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3153{ 3163{
3154 u32 rc; 3164 u32 rc;
3165 __u8 prot = 0;
3166 __be16 protocol;
3155 3167
3156 if (skb->ip_summed != CHECKSUM_PARTIAL) 3168 if (skb->ip_summed != CHECKSUM_PARTIAL)
3157 rc = XMIT_PLAIN; 3169 return XMIT_PLAIN;
3158 3170
3159 else { 3171 protocol = vlan_get_protocol(skb);
3160 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) { 3172 if (protocol == htons(ETH_P_IPV6)) {
3161 rc = XMIT_CSUM_V6; 3173 rc = XMIT_CSUM_V6;
3162 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 3174 prot = ipv6_hdr(skb)->nexthdr;
3163 rc |= XMIT_CSUM_TCP; 3175 } else {
3176 rc = XMIT_CSUM_V4;
3177 prot = ip_hdr(skb)->protocol;
3178 }
3164 3179
3180 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3181 if (inner_ip_hdr(skb)->version == 6) {
3182 rc |= XMIT_CSUM_ENC_V6;
3183 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3184 rc |= XMIT_CSUM_TCP;
3165 } else { 3185 } else {
3166 rc = XMIT_CSUM_V4; 3186 rc |= XMIT_CSUM_ENC_V4;
3167 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 3187 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3168 rc |= XMIT_CSUM_TCP; 3188 rc |= XMIT_CSUM_TCP;
3169 } 3189 }
3170 } 3190 }
3191 if (prot == IPPROTO_TCP)
3192 rc |= XMIT_CSUM_TCP;
3171 3193
3172 if (skb_is_gso_v6(skb)) 3194 if (skb_is_gso_v6(skb)) {
3173 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6; 3195 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
3174 else if (skb_is_gso(skb)) 3196 if (rc & XMIT_CSUM_ENC)
3175 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP; 3197 rc |= XMIT_GSO_ENC_V6;
3198 } else if (skb_is_gso(skb)) {
3199 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
3200 if (rc & XMIT_CSUM_ENC)
3201 rc |= XMIT_GSO_ENC_V4;
3202 }
3176 3203
3177 return rc; 3204 return rc;
3178} 3205}
@@ -3257,14 +3284,23 @@ exit_lbl:
3257} 3284}
3258#endif 3285#endif
3259 3286
3260static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, 3287static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3261 u32 xmit_type) 3288 u32 xmit_type)
3262{ 3289{
3290 struct ipv6hdr *ipv6;
3291
3263 *parsing_data |= (skb_shinfo(skb)->gso_size << 3292 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3264 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & 3293 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3265 ETH_TX_PARSE_BD_E2_LSO_MSS; 3294 ETH_TX_PARSE_BD_E2_LSO_MSS;
3266 if ((xmit_type & XMIT_GSO_V6) && 3295
3267 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) 3296 if (xmit_type & XMIT_GSO_ENC_V6)
3297 ipv6 = inner_ipv6_hdr(skb);
3298 else if (xmit_type & XMIT_GSO_V6)
3299 ipv6 = ipv6_hdr(skb);
3300 else
3301 ipv6 = NULL;
3302
3303 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3268 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; 3304 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3269} 3305}
3270 3306
@@ -3275,13 +3311,13 @@ static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3275 * @pbd: parse BD 3311 * @pbd: parse BD
3276 * @xmit_type: xmit flags 3312 * @xmit_type: xmit flags
3277 */ 3313 */
3278static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, 3314static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3279 struct eth_tx_parse_bd_e1x *pbd, 3315 struct eth_tx_parse_bd_e1x *pbd,
3280 u32 xmit_type) 3316 u32 xmit_type)
3281{ 3317{
3282 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 3318 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3283 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq); 3319 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3284 pbd->tcp_flags = pbd_tcp_flags(skb); 3320 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3285 3321
3286 if (xmit_type & XMIT_GSO_V4) { 3322 if (xmit_type & XMIT_GSO_V4) {
3287 pbd->ip_id = bswab16(ip_hdr(skb)->id); 3323 pbd->ip_id = bswab16(ip_hdr(skb)->id);
@@ -3301,6 +3337,40 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
3301} 3337}
3302 3338
3303/** 3339/**
3340 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3341 *
3342 * @bp: driver handle
3343 * @skb: packet skb
3344 * @parsing_data: data to be updated
3345 * @xmit_type: xmit flags
3346 *
3347 * 57712/578xx related, when skb has encapsulation
3348 */
3349static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3350 u32 *parsing_data, u32 xmit_type)
3351{
3352 *parsing_data |=
3353 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3354 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3355 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3356
3357 if (xmit_type & XMIT_CSUM_TCP) {
3358 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3359 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3360 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3361
3362 return skb_inner_transport_header(skb) +
3363 inner_tcp_hdrlen(skb) - skb->data;
3364 }
3365
3366 /* We support checksum offload for TCP and UDP only.
3367 * No need to pass the UDP header length - it's a constant.
3368 */
3369 return skb_inner_transport_header(skb) +
3370 sizeof(struct udphdr) - skb->data;
3371}
3372
3373/**
3304 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length 3374 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3305 * 3375 *
3306 * @bp: driver handle 3376 * @bp: driver handle
@@ -3308,15 +3378,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
3308 * @parsing_data: data to be updated 3378 * @parsing_data: data to be updated
3309 * @xmit_type: xmit flags 3379 * @xmit_type: xmit flags
3310 * 3380 *
3311 * 57712 related 3381 * 57712/578xx related
3312 */ 3382 */
3313static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, 3383static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3314 u32 *parsing_data, u32 xmit_type) 3384 u32 *parsing_data, u32 xmit_type)
3315{ 3385{
3316 *parsing_data |= 3386 *parsing_data |=
3317 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << 3387 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3318 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & 3388 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3319 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; 3389 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3320 3390
3321 if (xmit_type & XMIT_CSUM_TCP) { 3391 if (xmit_type & XMIT_CSUM_TCP) {
3322 *parsing_data |= ((tcp_hdrlen(skb) / 4) << 3392 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
@@ -3331,17 +3401,15 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3331 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data; 3401 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3332} 3402}
3333 3403
3334static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, 3404/* set FW indication according to inner or outer protocols if tunneled */
3335 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) 3405static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3406 struct eth_tx_start_bd *tx_start_bd,
3407 u32 xmit_type)
3336{ 3408{
3337 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 3409 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3338 3410
3339 if (xmit_type & XMIT_CSUM_V4) 3411 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3340 tx_start_bd->bd_flags.as_bitfield |= 3412 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3341 ETH_TX_BD_FLAGS_IP_CSUM;
3342 else
3343 tx_start_bd->bd_flags.as_bitfield |=
3344 ETH_TX_BD_FLAGS_IPV6;
3345 3413
3346 if (!(xmit_type & XMIT_CSUM_TCP)) 3414 if (!(xmit_type & XMIT_CSUM_TCP))
3347 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; 3415 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
@@ -3355,9 +3423,9 @@ static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3355 * @pbd: parse BD to be updated 3423 * @pbd: parse BD to be updated
3356 * @xmit_type: xmit flags 3424 * @xmit_type: xmit flags
3357 */ 3425 */
3358static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, 3426static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3359 struct eth_tx_parse_bd_e1x *pbd, 3427 struct eth_tx_parse_bd_e1x *pbd,
3360 u32 xmit_type) 3428 u32 xmit_type)
3361{ 3429{
3362 u8 hlen = (skb_network_header(skb) - skb->data) >> 1; 3430 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3363 3431
@@ -3403,6 +3471,75 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3403 return hlen; 3471 return hlen;
3404} 3472}
3405 3473
3474static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3475 struct eth_tx_parse_bd_e2 *pbd_e2,
3476 struct eth_tx_parse_2nd_bd *pbd2,
3477 u16 *global_data,
3478 u32 xmit_type)
3479{
3480 u16 hlen_w = 0;
3481 u8 outerip_off, outerip_len = 0;
3482 /* from outer IP to transport */
3483 hlen_w = (skb_inner_transport_header(skb) -
3484 skb_network_header(skb)) >> 1;
3485
3486 /* transport len */
3487 if (xmit_type & XMIT_CSUM_TCP)
3488 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3489 else
3490 hlen_w += sizeof(struct udphdr) >> 1;
3491
3492 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3493
3494 if (xmit_type & XMIT_CSUM_ENC_V4) {
3495 struct iphdr *iph = ip_hdr(skb);
3496 pbd2->fw_ip_csum_wo_len_flags_frag =
3497 bswab16(csum_fold((~iph->check) -
3498 iph->tot_len - iph->frag_off));
3499 } else {
3500 pbd2->fw_ip_hdr_to_payload_w =
3501 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3502 }
3503
3504 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3505
3506 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3507
3508 if (xmit_type & XMIT_GSO_V4) {
3509 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3510
3511 pbd_e2->data.tunnel_data.pseudo_csum =
3512 bswab16(~csum_tcpudp_magic(
3513 inner_ip_hdr(skb)->saddr,
3514 inner_ip_hdr(skb)->daddr,
3515 0, IPPROTO_TCP, 0));
3516
3517 outerip_len = ip_hdr(skb)->ihl << 1;
3518 } else {
3519 pbd_e2->data.tunnel_data.pseudo_csum =
3520 bswab16(~csum_ipv6_magic(
3521 &inner_ipv6_hdr(skb)->saddr,
3522 &inner_ipv6_hdr(skb)->daddr,
3523 0, IPPROTO_TCP, 0));
3524 }
3525
3526 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3527
3528 *global_data |=
3529 outerip_off |
3530 (!!(xmit_type & XMIT_CSUM_V6) <<
3531 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3532 (outerip_len <<
3533 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3534 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3535 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3536
3537 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3538 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3539 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3540 }
3541}
3542
3406/* called with netif_tx_lock 3543/* called with netif_tx_lock
3407 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call 3544 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3408 * netif_wake_queue() 3545 * netif_wake_queue()
@@ -3418,6 +3555,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3418 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; 3555 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3419 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 3556 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3420 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 3557 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3558 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3421 u32 pbd_e2_parsing_data = 0; 3559 u32 pbd_e2_parsing_data = 0;
3422 u16 pkt_prod, bd_prod; 3560 u16 pkt_prod, bd_prod;
3423 int nbd, txq_index; 3561 int nbd, txq_index;
@@ -3485,7 +3623,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3485 mac_type = MULTICAST_ADDRESS; 3623 mac_type = MULTICAST_ADDRESS;
3486 } 3624 }
3487 3625
3488#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) 3626#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3489 /* First, check if we need to linearize the skb (due to FW 3627 /* First, check if we need to linearize the skb (due to FW
3490 restrictions). No need to check fragmentation if page size > 8K 3628 restrictions). No need to check fragmentation if page size > 8K
3491 (there will be no violation to FW restrictions) */ 3629 (there will be no violation to FW restrictions) */
@@ -3533,12 +3671,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3533 first_bd = tx_start_bd; 3671 first_bd = tx_start_bd;
3534 3672
3535 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 3673 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3536 SET_FLAG(tx_start_bd->general_data,
3537 ETH_TX_START_BD_PARSE_NBDS,
3538 0);
3539 3674
3540 /* header nbd */ 3675 /* header nbd: indirectly zero other flags! */
3541 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); 3676 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3542 3677
3543 /* remember the first BD of the packet */ 3678 /* remember the first BD of the packet */
3544 tx_buf->first_bd = txdata->tx_bd_prod; 3679 tx_buf->first_bd = txdata->tx_bd_prod;
@@ -3558,19 +3693,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3558 /* when transmitting in a vf, start bd must hold the ethertype 3693 /* when transmitting in a vf, start bd must hold the ethertype
3559 * for fw to enforce it 3694 * for fw to enforce it
3560 */ 3695 */
3561#ifndef BNX2X_STOP_ON_ERROR 3696 if (IS_VF(bp))
3562 if (IS_VF(bp)) {
3563#endif
3564 tx_start_bd->vlan_or_ethertype = 3697 tx_start_bd->vlan_or_ethertype =
3565 cpu_to_le16(ntohs(eth->h_proto)); 3698 cpu_to_le16(ntohs(eth->h_proto));
3566#ifndef BNX2X_STOP_ON_ERROR 3699 else
3567 } else {
3568 /* used by FW for packet accounting */ 3700 /* used by FW for packet accounting */
3569 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); 3701 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3570 }
3571#endif
3572 } 3702 }
3573 3703
3704 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3705
3574 /* turn on parsing and get a BD */ 3706 /* turn on parsing and get a BD */
3575 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 3707 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3576 3708
@@ -3580,23 +3712,58 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3580 if (!CHIP_IS_E1x(bp)) { 3712 if (!CHIP_IS_E1x(bp)) {
3581 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; 3713 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3582 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 3714 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3583 /* Set PBD in checksum offload case */ 3715
3584 if (xmit_type & XMIT_CSUM) 3716 if (xmit_type & XMIT_CSUM_ENC) {
3717 u16 global_data = 0;
3718
3719 /* Set PBD in enc checksum offload case */
3720 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3721 &pbd_e2_parsing_data,
3722 xmit_type);
3723
3724 /* turn on 2nd parsing and get a BD */
3725 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3726
3727 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3728
3729 memset(pbd2, 0, sizeof(*pbd2));
3730
3731 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3732 (skb_inner_network_header(skb) -
3733 skb->data) >> 1;
3734
3735 if (xmit_type & XMIT_GSO_ENC)
3736 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3737 &global_data,
3738 xmit_type);
3739
3740 pbd2->global_data = cpu_to_le16(global_data);
3741
3742 /* add addition parse BD indication to start BD */
3743 SET_FLAG(tx_start_bd->general_data,
3744 ETH_TX_START_BD_PARSE_NBDS, 1);
3745 /* set encapsulation flag in start BD */
3746 SET_FLAG(tx_start_bd->general_data,
3747 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3748 nbd++;
3749 } else if (xmit_type & XMIT_CSUM) {
3750 /* Set PBD in checksum offload case w/o encapsulation */
3585 hlen = bnx2x_set_pbd_csum_e2(bp, skb, 3751 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3586 &pbd_e2_parsing_data, 3752 &pbd_e2_parsing_data,
3587 xmit_type); 3753 xmit_type);
3754 }
3588 3755
3589 if (IS_MF_SI(bp) || IS_VF(bp)) { 3756 /* Add the macs to the parsing BD this is a vf */
3590 /* fill in the MAC addresses in the PBD - for local 3757 if (IS_VF(bp)) {
3591 * switching 3758 /* override GRE parameters in BD */
3592 */ 3759 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3593 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi, 3760 &pbd_e2->data.mac_addr.src_mid,
3594 &pbd_e2->src_mac_addr_mid, 3761 &pbd_e2->data.mac_addr.src_lo,
3595 &pbd_e2->src_mac_addr_lo,
3596 eth->h_source); 3762 eth->h_source);
3597 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi, 3763
3598 &pbd_e2->dst_mac_addr_mid, 3764 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3599 &pbd_e2->dst_mac_addr_lo, 3765 &pbd_e2->data.mac_addr.dst_mid,
3766 &pbd_e2->data.mac_addr.dst_lo,
3600 eth->h_dest); 3767 eth->h_dest);
3601 } 3768 }
3602 3769
@@ -3618,14 +3785,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3618 /* Setup the data pointer of the first BD of the packet */ 3785 /* Setup the data pointer of the first BD of the packet */
3619 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 3786 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3620 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 3787 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3621 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3622 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 3788 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3623 pkt_size = tx_start_bd->nbytes; 3789 pkt_size = tx_start_bd->nbytes;
3624 3790
3625 DP(NETIF_MSG_TX_QUEUED, 3791 DP(NETIF_MSG_TX_QUEUED,
3626 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n", 3792 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
3627 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, 3793 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3628 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), 3794 le16_to_cpu(tx_start_bd->nbytes),
3629 tx_start_bd->bd_flags.as_bitfield, 3795 tx_start_bd->bd_flags.as_bitfield,
3630 le16_to_cpu(tx_start_bd->vlan_or_ethertype)); 3796 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3631 3797
@@ -3638,10 +3804,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3638 3804
3639 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 3805 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3640 3806
3641 if (unlikely(skb_headlen(skb) > hlen)) 3807 if (unlikely(skb_headlen(skb) > hlen)) {
3808 nbd++;
3642 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, 3809 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3643 &tx_start_bd, hlen, 3810 &tx_start_bd, hlen,
3644 bd_prod, ++nbd); 3811 bd_prod);
3812 }
3645 if (!CHIP_IS_E1x(bp)) 3813 if (!CHIP_IS_E1x(bp))
3646 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, 3814 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3647 xmit_type); 3815 xmit_type);
@@ -3731,9 +3899,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3731 if (pbd_e2) 3899 if (pbd_e2)
3732 DP(NETIF_MSG_TX_QUEUED, 3900 DP(NETIF_MSG_TX_QUEUED,
3733 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n", 3901 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3734 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid, 3902 pbd_e2,
3735 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi, 3903 pbd_e2->data.mac_addr.dst_hi,
3736 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo, 3904 pbd_e2->data.mac_addr.dst_mid,
3905 pbd_e2->data.mac_addr.dst_lo,
3906 pbd_e2->data.mac_addr.src_hi,
3907 pbd_e2->data.mac_addr.src_mid,
3908 pbd_e2->data.mac_addr.src_lo,
3737 pbd_e2->parsing_data); 3909 pbd_e2->parsing_data);
3738 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); 3910 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3739 3911
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index aee7671ff4c1..151675d66b0d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -50,13 +50,13 @@ extern int int_mode;
50 } \ 50 } \
51 } while (0) 51 } while (0)
52 52
53#define BNX2X_PCI_ALLOC(x, y, size) \ 53#define BNX2X_PCI_ALLOC(x, y, size) \
54 do { \ 54do { \
55 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 55 x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
56 if (x == NULL) \ 56 GFP_KERNEL | __GFP_ZERO); \
57 goto alloc_mem_err; \ 57 if (x == NULL) \
58 memset((void *)x, 0, size); \ 58 goto alloc_mem_err; \
59 } while (0) 59} while (0)
60 60
61#define BNX2X_ALLOC(x, size) \ 61#define BNX2X_ALLOC(x, size) \
62 do { \ 62 do { \
@@ -295,16 +295,29 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
295void bnx2x_nic_init_cnic(struct bnx2x *bp); 295void bnx2x_nic_init_cnic(struct bnx2x *bp);
296 296
297/** 297/**
298 * bnx2x_nic_init - init driver internals. 298 * bnx2x_preirq_nic_init - init driver internals.
299 * 299 *
300 * @bp: driver handle 300 * @bp: driver handle
301 * 301 *
302 * Initializes: 302 * Initializes:
303 * - rings 303 * - fastpath object
304 * - fastpath rings
305 * etc.
306 */
307void bnx2x_pre_irq_nic_init(struct bnx2x *bp);
308
309/**
310 * bnx2x_postirq_nic_init - init driver internals.
311 *
312 * @bp: driver handle
313 * @load_code: COMMON, PORT or FUNCTION
314 *
315 * Initializes:
304 * - status blocks 316 * - status blocks
317 * - slowpath rings
305 * - etc. 318 * - etc.
306 */ 319 */
307void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); 320void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code);
308/** 321/**
309 * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic. 322 * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
310 * 323 *
@@ -496,7 +509,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
496/* setup_tc callback */ 509/* setup_tc callback */
497int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); 510int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
498 511
512int bnx2x_get_vf_config(struct net_device *dev, int vf,
513 struct ifla_vf_info *ivi);
499int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); 514int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
515int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
500 516
501/* select_queue callback */ 517/* select_queue callback */
502u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 518u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
@@ -834,7 +850,7 @@ static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
834 /* Add NAPI objects */ 850 /* Add NAPI objects */
835 for_each_rx_queue_cnic(bp, i) 851 for_each_rx_queue_cnic(bp, i)
836 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 852 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
837 bnx2x_poll, BNX2X_NAPI_WEIGHT); 853 bnx2x_poll, NAPI_POLL_WEIGHT);
838} 854}
839 855
840static inline void bnx2x_add_all_napi(struct bnx2x *bp) 856static inline void bnx2x_add_all_napi(struct bnx2x *bp)
@@ -844,7 +860,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
844 /* Add NAPI objects */ 860 /* Add NAPI objects */
845 for_each_eth_queue(bp, i) 861 for_each_eth_queue(bp, i)
846 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 862 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
847 bnx2x_poll, BNX2X_NAPI_WEIGHT); 863 bnx2x_poll, NAPI_POLL_WEIGHT);
848} 864}
849 865
850static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) 866static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
@@ -970,6 +986,9 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
970 else /* CHIP_IS_E1X */ 986 else /* CHIP_IS_E1X */
971 start_params->network_cos_mode = FW_WRR; 987 start_params->network_cos_mode = FW_WRR;
972 988
989 start_params->gre_tunnel_mode = IPGRE_TUNNEL;
990 start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
991
973 return bnx2x_func_state_change(bp, &func_params); 992 return bnx2x_func_state_change(bp, &func_params);
974} 993}
975 994
@@ -1396,4 +1415,8 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
1396 * 1415 *
1397 */ 1416 */
1398void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len); 1417void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
1418
1419int bnx2x_drain_tx_queues(struct bnx2x *bp);
1420void bnx2x_squeeze_objects(struct bnx2x *bp);
1421
1399#endif /* BNX2X_CMN_H */ 1422#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index edfa67adf2f9..ce1a91618677 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1364,11 +1364,27 @@ static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
1364 return rc; 1364 return rc;
1365} 1365}
1366 1366
1367static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf,
1368 int buf_size)
1369{
1370 int rc;
1371
1372 rc = bnx2x_nvram_read(bp, offset, (u8 *)buf, buf_size);
1373
1374 if (!rc) {
1375 __be32 *be = (__be32 *)buf;
1376
1377 while ((buf_size -= 4) >= 0)
1378 *buf++ = be32_to_cpu(*be++);
1379 }
1380
1381 return rc;
1382}
1383
1367static int bnx2x_get_eeprom(struct net_device *dev, 1384static int bnx2x_get_eeprom(struct net_device *dev,
1368 struct ethtool_eeprom *eeprom, u8 *eebuf) 1385 struct ethtool_eeprom *eeprom, u8 *eebuf)
1369{ 1386{
1370 struct bnx2x *bp = netdev_priv(dev); 1387 struct bnx2x *bp = netdev_priv(dev);
1371 int rc;
1372 1388
1373 if (!netif_running(dev)) { 1389 if (!netif_running(dev)) {
1374 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1390 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
@@ -1383,9 +1399,7 @@ static int bnx2x_get_eeprom(struct net_device *dev,
1383 1399
1384 /* parameters already validated in ethtool_get_eeprom */ 1400 /* parameters already validated in ethtool_get_eeprom */
1385 1401
1386 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); 1402 return bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
1387
1388 return rc;
1389} 1403}
1390 1404
1391static int bnx2x_get_module_eeprom(struct net_device *dev, 1405static int bnx2x_get_module_eeprom(struct net_device *dev,
@@ -1393,10 +1407,9 @@ static int bnx2x_get_module_eeprom(struct net_device *dev,
1393 u8 *data) 1407 u8 *data)
1394{ 1408{
1395 struct bnx2x *bp = netdev_priv(dev); 1409 struct bnx2x *bp = netdev_priv(dev);
1396 int rc = 0, phy_idx; 1410 int rc = -EINVAL, phy_idx;
1397 u8 *user_data = data; 1411 u8 *user_data = data;
1398 int remaining_len = ee->len, xfer_size; 1412 unsigned int start_addr = ee->offset, xfer_size = 0;
1399 unsigned int page_off = ee->offset;
1400 1413
1401 if (!netif_running(dev)) { 1414 if (!netif_running(dev)) {
1402 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1415 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
@@ -1405,21 +1418,52 @@ static int bnx2x_get_module_eeprom(struct net_device *dev,
1405 } 1418 }
1406 1419
1407 phy_idx = bnx2x_get_cur_phy_idx(bp); 1420 phy_idx = bnx2x_get_cur_phy_idx(bp);
1408 bnx2x_acquire_phy_lock(bp); 1421
1409 while (!rc && remaining_len > 0) { 1422 /* Read A0 section */
1410 xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ? 1423 if (start_addr < ETH_MODULE_SFF_8079_LEN) {
1411 SFP_EEPROM_PAGE_SIZE : remaining_len; 1424 /* Limit transfer size to the A0 section boundary */
1425 if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN)
1426 xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr;
1427 else
1428 xfer_size = ee->len;
1429 bnx2x_acquire_phy_lock(bp);
1412 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], 1430 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1413 &bp->link_params, 1431 &bp->link_params,
1414 page_off, 1432 I2C_DEV_ADDR_A0,
1433 start_addr,
1415 xfer_size, 1434 xfer_size,
1416 user_data); 1435 user_data);
1417 remaining_len -= xfer_size; 1436 bnx2x_release_phy_lock(bp);
1437 if (rc) {
1438 DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n");
1439
1440 return -EINVAL;
1441 }
1418 user_data += xfer_size; 1442 user_data += xfer_size;
1419 page_off += xfer_size; 1443 start_addr += xfer_size;
1420 } 1444 }
1421 1445
1422 bnx2x_release_phy_lock(bp); 1446 /* Read A2 section */
1447 if ((start_addr >= ETH_MODULE_SFF_8079_LEN) &&
1448 (start_addr < ETH_MODULE_SFF_8472_LEN)) {
1449 xfer_size = ee->len - xfer_size;
1450 /* Limit transfer size to the A2 section boundary */
1451 if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN)
1452 xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr;
1453 start_addr -= ETH_MODULE_SFF_8079_LEN;
1454 bnx2x_acquire_phy_lock(bp);
1455 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1456 &bp->link_params,
1457 I2C_DEV_ADDR_A2,
1458 start_addr,
1459 xfer_size,
1460 user_data);
1461 bnx2x_release_phy_lock(bp);
1462 if (rc) {
1463 DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n");
1464 return -EINVAL;
1465 }
1466 }
1423 return rc; 1467 return rc;
1424} 1468}
1425 1469
@@ -1427,24 +1471,50 @@ static int bnx2x_get_module_info(struct net_device *dev,
1427 struct ethtool_modinfo *modinfo) 1471 struct ethtool_modinfo *modinfo)
1428{ 1472{
1429 struct bnx2x *bp = netdev_priv(dev); 1473 struct bnx2x *bp = netdev_priv(dev);
1430 int phy_idx; 1474 int phy_idx, rc;
1475 u8 sff8472_comp, diag_type;
1476
1431 if (!netif_running(dev)) { 1477 if (!netif_running(dev)) {
1432 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1478 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1433 "cannot access eeprom when the interface is down\n"); 1479 "cannot access eeprom when the interface is down\n");
1434 return -EAGAIN; 1480 return -EAGAIN;
1435 } 1481 }
1436
1437 phy_idx = bnx2x_get_cur_phy_idx(bp); 1482 phy_idx = bnx2x_get_cur_phy_idx(bp);
1438 switch (bp->link_params.phy[phy_idx].media_type) { 1483 bnx2x_acquire_phy_lock(bp);
1439 case ETH_PHY_SFPP_10G_FIBER: 1484 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1440 case ETH_PHY_SFP_1G_FIBER: 1485 &bp->link_params,
1441 case ETH_PHY_DA_TWINAX: 1486 I2C_DEV_ADDR_A0,
1487 SFP_EEPROM_SFF_8472_COMP_ADDR,
1488 SFP_EEPROM_SFF_8472_COMP_SIZE,
1489 &sff8472_comp);
1490 bnx2x_release_phy_lock(bp);
1491 if (rc) {
1492 DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n");
1493 return -EINVAL;
1494 }
1495
1496 bnx2x_acquire_phy_lock(bp);
1497 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1498 &bp->link_params,
1499 I2C_DEV_ADDR_A0,
1500 SFP_EEPROM_DIAG_TYPE_ADDR,
1501 SFP_EEPROM_DIAG_TYPE_SIZE,
1502 &diag_type);
1503 bnx2x_release_phy_lock(bp);
1504 if (rc) {
1505 DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n");
1506 return -EINVAL;
1507 }
1508
1509 if (!sff8472_comp ||
1510 (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) {
1442 modinfo->type = ETH_MODULE_SFF_8079; 1511 modinfo->type = ETH_MODULE_SFF_8079;
1443 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 1512 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
1444 return 0; 1513 } else {
1445 default: 1514 modinfo->type = ETH_MODULE_SFF_8472;
1446 return -EOPNOTSUPP; 1515 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1447 } 1516 }
1517 return 0;
1448} 1518}
1449 1519
1450static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, 1520static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
@@ -1496,9 +1566,8 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
1496 int buf_size) 1566 int buf_size)
1497{ 1567{
1498 int rc; 1568 int rc;
1499 u32 cmd_flags; 1569 u32 cmd_flags, align_offset, val;
1500 u32 align_offset; 1570 __be32 val_be;
1501 __be32 val;
1502 1571
1503 if (offset + buf_size > bp->common.flash_size) { 1572 if (offset + buf_size > bp->common.flash_size) {
1504 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1573 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
@@ -1517,16 +1586,16 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
1517 1586
1518 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); 1587 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1519 align_offset = (offset & ~0x03); 1588 align_offset = (offset & ~0x03);
1520 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags); 1589 rc = bnx2x_nvram_read_dword(bp, align_offset, &val_be, cmd_flags);
1521 1590
1522 if (rc == 0) { 1591 if (rc == 0) {
1523 val &= ~(0xff << BYTE_OFFSET(offset));
1524 val |= (*data_buf << BYTE_OFFSET(offset));
1525
1526 /* nvram data is returned as an array of bytes 1592 /* nvram data is returned as an array of bytes
1527 * convert it back to cpu order 1593 * convert it back to cpu order
1528 */ 1594 */
1529 val = be32_to_cpu(val); 1595 val = be32_to_cpu(val_be);
1596
1597 val &= ~le32_to_cpu(0xff << BYTE_OFFSET(offset));
1598 val |= le32_to_cpu(*data_buf << BYTE_OFFSET(offset));
1530 1599
1531 rc = bnx2x_nvram_write_dword(bp, align_offset, val, 1600 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
1532 cmd_flags); 1601 cmd_flags);
@@ -1820,12 +1889,15 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
1820 bp->link_params.req_flow_ctrl[cfg_idx] = 1889 bp->link_params.req_flow_ctrl[cfg_idx] =
1821 BNX2X_FLOW_CTRL_AUTO; 1890 BNX2X_FLOW_CTRL_AUTO;
1822 } 1891 }
1823 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_NONE; 1892 bp->link_params.req_fc_auto_adv = 0;
1824 if (epause->rx_pause) 1893 if (epause->rx_pause)
1825 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX; 1894 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX;
1826 1895
1827 if (epause->tx_pause) 1896 if (epause->tx_pause)
1828 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX; 1897 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX;
1898
1899 if (!bp->link_params.req_fc_auto_adv)
1900 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_NONE;
1829 } 1901 }
1830 1902
1831 DP(BNX2X_MSG_ETHTOOL, 1903 DP(BNX2X_MSG_ETHTOOL,
@@ -2526,14 +2598,168 @@ static int bnx2x_test_ext_loopback(struct bnx2x *bp)
2526 return rc; 2598 return rc;
2527} 2599}
2528 2600
2601struct code_entry {
2602 u32 sram_start_addr;
2603 u32 code_attribute;
2604#define CODE_IMAGE_TYPE_MASK 0xf0800003
2605#define CODE_IMAGE_VNTAG_PROFILES_DATA 0xd0000003
2606#define CODE_IMAGE_LENGTH_MASK 0x007ffffc
2607#define CODE_IMAGE_TYPE_EXTENDED_DIR 0xe0000000
2608 u32 nvm_start_addr;
2609};
2610
2611#define CODE_ENTRY_MAX 16
2612#define CODE_ENTRY_EXTENDED_DIR_IDX 15
2613#define MAX_IMAGES_IN_EXTENDED_DIR 64
2614#define NVRAM_DIR_OFFSET 0x14
2615
2616#define EXTENDED_DIR_EXISTS(code) \
2617 ((code & CODE_IMAGE_TYPE_MASK) == CODE_IMAGE_TYPE_EXTENDED_DIR && \
2618 (code & CODE_IMAGE_LENGTH_MASK) != 0)
2619
2529#define CRC32_RESIDUAL 0xdebb20e3 2620#define CRC32_RESIDUAL 0xdebb20e3
2621#define CRC_BUFF_SIZE 256
2622
2623static int bnx2x_nvram_crc(struct bnx2x *bp,
2624 int offset,
2625 int size,
2626 u8 *buff)
2627{
2628 u32 crc = ~0;
2629 int rc = 0, done = 0;
2630
2631 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2632 "NVRAM CRC from 0x%08x to 0x%08x\n", offset, offset + size);
2633
2634 while (done < size) {
2635 int count = min_t(int, size - done, CRC_BUFF_SIZE);
2636
2637 rc = bnx2x_nvram_read(bp, offset + done, buff, count);
2638
2639 if (rc)
2640 return rc;
2641
2642 crc = crc32_le(crc, buff, count);
2643 done += count;
2644 }
2645
2646 if (crc != CRC32_RESIDUAL)
2647 rc = -EINVAL;
2648
2649 return rc;
2650}
2651
2652static int bnx2x_test_nvram_dir(struct bnx2x *bp,
2653 struct code_entry *entry,
2654 u8 *buff)
2655{
2656 size_t size = entry->code_attribute & CODE_IMAGE_LENGTH_MASK;
2657 u32 type = entry->code_attribute & CODE_IMAGE_TYPE_MASK;
2658 int rc;
2659
2660 /* Zero-length images and AFEX profiles do not have CRC */
2661 if (size == 0 || type == CODE_IMAGE_VNTAG_PROFILES_DATA)
2662 return 0;
2663
2664 rc = bnx2x_nvram_crc(bp, entry->nvm_start_addr, size, buff);
2665 if (rc)
2666 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2667 "image %x has failed crc test (rc %d)\n", type, rc);
2668
2669 return rc;
2670}
2671
2672static int bnx2x_test_dir_entry(struct bnx2x *bp, u32 addr, u8 *buff)
2673{
2674 int rc;
2675 struct code_entry entry;
2676
2677 rc = bnx2x_nvram_read32(bp, addr, (u32 *)&entry, sizeof(entry));
2678 if (rc)
2679 return rc;
2680
2681 return bnx2x_test_nvram_dir(bp, &entry, buff);
2682}
2683
2684static int bnx2x_test_nvram_ext_dirs(struct bnx2x *bp, u8 *buff)
2685{
2686 u32 rc, cnt, dir_offset = NVRAM_DIR_OFFSET;
2687 struct code_entry entry;
2688 int i;
2689
2690 rc = bnx2x_nvram_read32(bp,
2691 dir_offset +
2692 sizeof(entry) * CODE_ENTRY_EXTENDED_DIR_IDX,
2693 (u32 *)&entry, sizeof(entry));
2694 if (rc)
2695 return rc;
2696
2697 if (!EXTENDED_DIR_EXISTS(entry.code_attribute))
2698 return 0;
2699
2700 rc = bnx2x_nvram_read32(bp, entry.nvm_start_addr,
2701 &cnt, sizeof(u32));
2702 if (rc)
2703 return rc;
2704
2705 dir_offset = entry.nvm_start_addr + 8;
2706
2707 for (i = 0; i < cnt && i < MAX_IMAGES_IN_EXTENDED_DIR; i++) {
2708 rc = bnx2x_test_dir_entry(bp, dir_offset +
2709 sizeof(struct code_entry) * i,
2710 buff);
2711 if (rc)
2712 return rc;
2713 }
2714
2715 return 0;
2716}
2717
2718static int bnx2x_test_nvram_dirs(struct bnx2x *bp, u8 *buff)
2719{
2720 u32 rc, dir_offset = NVRAM_DIR_OFFSET;
2721 int i;
2722
2723 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "NVRAM DIRS CRC test-set\n");
2724
2725 for (i = 0; i < CODE_ENTRY_EXTENDED_DIR_IDX; i++) {
2726 rc = bnx2x_test_dir_entry(bp, dir_offset +
2727 sizeof(struct code_entry) * i,
2728 buff);
2729 if (rc)
2730 return rc;
2731 }
2732
2733 return bnx2x_test_nvram_ext_dirs(bp, buff);
2734}
2735
2736struct crc_pair {
2737 int offset;
2738 int size;
2739};
2740
2741static int bnx2x_test_nvram_tbl(struct bnx2x *bp,
2742 const struct crc_pair *nvram_tbl, u8 *buf)
2743{
2744 int i;
2745
2746 for (i = 0; nvram_tbl[i].size; i++) {
2747 int rc = bnx2x_nvram_crc(bp, nvram_tbl[i].offset,
2748 nvram_tbl[i].size, buf);
2749 if (rc) {
2750 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2751 "nvram_tbl[%d] has failed crc test (rc %d)\n",
2752 i, rc);
2753 return rc;
2754 }
2755 }
2756
2757 return 0;
2758}
2530 2759
2531static int bnx2x_test_nvram(struct bnx2x *bp) 2760static int bnx2x_test_nvram(struct bnx2x *bp)
2532{ 2761{
2533 static const struct { 2762 const struct crc_pair nvram_tbl[] = {
2534 int offset;
2535 int size;
2536 } nvram_tbl[] = {
2537 { 0, 0x14 }, /* bootstrap */ 2763 { 0, 0x14 }, /* bootstrap */
2538 { 0x14, 0xec }, /* dir */ 2764 { 0x14, 0xec }, /* dir */
2539 { 0x100, 0x350 }, /* manuf_info */ 2765 { 0x100, 0x350 }, /* manuf_info */
@@ -2542,30 +2768,33 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
2542 { 0x708, 0x70 }, /* manuf_key_info */ 2768 { 0x708, 0x70 }, /* manuf_key_info */
2543 { 0, 0 } 2769 { 0, 0 }
2544 }; 2770 };
2545 __be32 *buf; 2771 const struct crc_pair nvram_tbl2[] = {
2546 u8 *data; 2772 { 0x7e8, 0x350 }, /* manuf_info2 */
2547 int i, rc; 2773 { 0xb38, 0xf0 }, /* feature_info */
2548 u32 magic, crc; 2774 { 0, 0 }
2775 };
2776
2777 u8 *buf;
2778 int rc;
2779 u32 magic;
2549 2780
2550 if (BP_NOMCP(bp)) 2781 if (BP_NOMCP(bp))
2551 return 0; 2782 return 0;
2552 2783
2553 buf = kmalloc(0x350, GFP_KERNEL); 2784 buf = kmalloc(CRC_BUFF_SIZE, GFP_KERNEL);
2554 if (!buf) { 2785 if (!buf) {
2555 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n"); 2786 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n");
2556 rc = -ENOMEM; 2787 rc = -ENOMEM;
2557 goto test_nvram_exit; 2788 goto test_nvram_exit;
2558 } 2789 }
2559 data = (u8 *)buf;
2560 2790
2561 rc = bnx2x_nvram_read(bp, 0, data, 4); 2791 rc = bnx2x_nvram_read32(bp, 0, &magic, sizeof(magic));
2562 if (rc) { 2792 if (rc) {
2563 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 2793 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2564 "magic value read (rc %d)\n", rc); 2794 "magic value read (rc %d)\n", rc);
2565 goto test_nvram_exit; 2795 goto test_nvram_exit;
2566 } 2796 }
2567 2797
2568 magic = be32_to_cpu(buf[0]);
2569 if (magic != 0x669955aa) { 2798 if (magic != 0x669955aa) {
2570 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 2799 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2571 "wrong magic value (0x%08x)\n", magic); 2800 "wrong magic value (0x%08x)\n", magic);
@@ -2573,25 +2802,26 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
2573 goto test_nvram_exit; 2802 goto test_nvram_exit;
2574 } 2803 }
2575 2804
2576 for (i = 0; nvram_tbl[i].size; i++) { 2805 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "Port 0 CRC test-set\n");
2806 rc = bnx2x_test_nvram_tbl(bp, nvram_tbl, buf);
2807 if (rc)
2808 goto test_nvram_exit;
2577 2809
2578 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data, 2810 if (!CHIP_IS_E1x(bp) && !CHIP_IS_57811xx(bp)) {
2579 nvram_tbl[i].size); 2811 u32 hide = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
2580 if (rc) { 2812 SHARED_HW_CFG_HIDE_PORT1;
2581 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2582 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
2583 goto test_nvram_exit;
2584 }
2585 2813
2586 crc = ether_crc_le(nvram_tbl[i].size, data); 2814 if (!hide) {
2587 if (crc != CRC32_RESIDUAL) {
2588 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 2815 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2589 "nvram_tbl[%d] wrong crc value (0x%08x)\n", i, crc); 2816 "Port 1 CRC test-set\n");
2590 rc = -ENODEV; 2817 rc = bnx2x_test_nvram_tbl(bp, nvram_tbl2, buf);
2591 goto test_nvram_exit; 2818 if (rc)
2819 goto test_nvram_exit;
2592 } 2820 }
2593 } 2821 }
2594 2822
2823 rc = bnx2x_test_nvram_dirs(bp, buf);
2824
2595test_nvram_exit: 2825test_nvram_exit:
2596 kfree(buf); 2826 kfree(buf);
2597 return rc; 2827 return rc;
@@ -3232,7 +3462,32 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
3232 .get_ts_info = ethtool_op_get_ts_info, 3462 .get_ts_info = ethtool_op_get_ts_info,
3233}; 3463};
3234 3464
3235void bnx2x_set_ethtool_ops(struct net_device *netdev) 3465static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
3466 .get_settings = bnx2x_get_settings,
3467 .set_settings = bnx2x_set_settings,
3468 .get_drvinfo = bnx2x_get_drvinfo,
3469 .get_msglevel = bnx2x_get_msglevel,
3470 .set_msglevel = bnx2x_set_msglevel,
3471 .get_link = bnx2x_get_link,
3472 .get_coalesce = bnx2x_get_coalesce,
3473 .get_ringparam = bnx2x_get_ringparam,
3474 .set_ringparam = bnx2x_set_ringparam,
3475 .get_sset_count = bnx2x_get_sset_count,
3476 .get_strings = bnx2x_get_strings,
3477 .get_ethtool_stats = bnx2x_get_ethtool_stats,
3478 .get_rxnfc = bnx2x_get_rxnfc,
3479 .set_rxnfc = bnx2x_set_rxnfc,
3480 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
3481 .get_rxfh_indir = bnx2x_get_rxfh_indir,
3482 .set_rxfh_indir = bnx2x_set_rxfh_indir,
3483 .get_channels = bnx2x_get_channels,
3484 .set_channels = bnx2x_set_channels,
3485};
3486
3487void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
3236{ 3488{
3237 SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops); 3489 if (IS_PF(bp))
3490 SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
3491 else /* vf */
3492 SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
3238} 3493}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index e5f808377c91..84aecdf06f7a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -30,31 +30,31 @@
30 * IRO[138].m2) + ((sbId) * IRO[138].m3)) 30 * IRO[138].m2) + ((sbId) * IRO[138].m3))
31#define CSTORM_IGU_MODE_OFFSET (IRO[157].base) 31#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
32#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ 32#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
33 (IRO[316].base + ((pfId) * IRO[316].m1))
34#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
35 (IRO[317].base + ((pfId) * IRO[317].m1)) 33 (IRO[317].base + ((pfId) * IRO[317].m1))
34#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
35 (IRO[318].base + ((pfId) * IRO[318].m1))
36#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ 36#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
37 (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) 37 (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
38#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ 38#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
39 (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) 39 (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
40#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ 40#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
41 (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) 41 (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
42#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ 42#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
43 (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) 43 (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
44#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ 44#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
45 (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2)) 45 (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
46#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ 46#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
47 (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2)) 47 (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
48#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ 48#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
49 (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) 49 (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
50#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ 50#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
51 (IRO[315].base + ((pfId) * IRO[315].m1)) 51 (IRO[316].base + ((pfId) * IRO[316].m1))
52#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 52#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
53 (IRO[307].base + ((pfId) * IRO[307].m1)) 53 (IRO[308].base + ((pfId) * IRO[308].m1))
54#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 54#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
55 (IRO[306].base + ((pfId) * IRO[306].m1)) 55 (IRO[307].base + ((pfId) * IRO[307].m1))
56#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 56#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
57 (IRO[305].base + ((pfId) * IRO[305].m1)) 57 (IRO[306].base + ((pfId) * IRO[306].m1))
58#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 58#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
59 (IRO[151].base + ((funcId) * IRO[151].m1)) 59 (IRO[151].base + ((funcId) * IRO[151].m1))
60#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ 60#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -114,7 +114,7 @@
114#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ 114#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
115 (IRO[268].base + ((pfId) * IRO[268].m1)) 115 (IRO[268].base + ((pfId) * IRO[268].m1))
116#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ 116#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
117 (IRO[277].base + ((pfId) * IRO[277].m1)) 117 (IRO[278].base + ((pfId) * IRO[278].m1))
118#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ 118#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
119 (IRO[264].base + ((pfId) * IRO[264].m1)) 119 (IRO[264].base + ((pfId) * IRO[264].m1))
120#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ 120#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
@@ -136,35 +136,32 @@
136#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base) 136#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
137#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ 137#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
138 (IRO[176].base + ((assertListEntry) * IRO[176].m1)) 138 (IRO[176].base + ((assertListEntry) * IRO[176].m1))
139#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
140 (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
141 IRO[205].m2))
142#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ 139#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
143 (IRO[183].base + ((portId) * IRO[183].m1)) 140 (IRO[183].base + ((portId) * IRO[183].m1))
144#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ 141#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
145 (IRO[318].base + ((pfId) * IRO[318].m1)) 142 (IRO[319].base + ((pfId) * IRO[319].m1))
146#define USTORM_FUNC_EN_OFFSET(funcId) \ 143#define USTORM_FUNC_EN_OFFSET(funcId) \
147 (IRO[178].base + ((funcId) * IRO[178].m1)) 144 (IRO[178].base + ((funcId) * IRO[178].m1))
148#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ 145#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
149 (IRO[282].base + ((pfId) * IRO[282].m1))
150#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
151 (IRO[283].base + ((pfId) * IRO[283].m1)) 146 (IRO[283].base + ((pfId) * IRO[283].m1))
147#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
148 (IRO[284].base + ((pfId) * IRO[284].m1))
152#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ 149#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
153 (IRO[287].base + ((pfId) * IRO[287].m1)) 150 (IRO[288].base + ((pfId) * IRO[288].m1))
154#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ 151#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
155 (IRO[284].base + ((pfId) * IRO[284].m1)) 152 (IRO[285].base + ((pfId) * IRO[285].m1))
156#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 153#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
157 (IRO[280].base + ((pfId) * IRO[280].m1)) 154 (IRO[281].base + ((pfId) * IRO[281].m1))
158#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 155#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
159 (IRO[279].base + ((pfId) * IRO[279].m1)) 156 (IRO[280].base + ((pfId) * IRO[280].m1))
160#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 157#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
161 (IRO[278].base + ((pfId) * IRO[278].m1)) 158 (IRO[279].base + ((pfId) * IRO[279].m1))
162#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ 159#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
163 (IRO[281].base + ((pfId) * IRO[281].m1)) 160 (IRO[282].base + ((pfId) * IRO[282].m1))
164#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ 161#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
165 (IRO[285].base + ((pfId) * IRO[285].m1))
166#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
167 (IRO[286].base + ((pfId) * IRO[286].m1)) 162 (IRO[286].base + ((pfId) * IRO[286].m1))
163#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
164 (IRO[287].base + ((pfId) * IRO[287].m1))
168#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ 165#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
169 (IRO[182].base + ((pfId) * IRO[182].m1)) 166 (IRO[182].base + ((pfId) * IRO[182].m1))
170#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 167#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -190,39 +187,39 @@
190#define XSTORM_FUNC_EN_OFFSET(funcId) \ 187#define XSTORM_FUNC_EN_OFFSET(funcId) \
191 (IRO[47].base + ((funcId) * IRO[47].m1)) 188 (IRO[47].base + ((funcId) * IRO[47].m1))
192#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ 189#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
193 (IRO[295].base + ((pfId) * IRO[295].m1)) 190 (IRO[296].base + ((pfId) * IRO[296].m1))
194#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ 191#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
195 (IRO[298].base + ((pfId) * IRO[298].m1))
196#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
197 (IRO[299].base + ((pfId) * IRO[299].m1)) 192 (IRO[299].base + ((pfId) * IRO[299].m1))
198#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ 193#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
199 (IRO[300].base + ((pfId) * IRO[300].m1)) 194 (IRO[300].base + ((pfId) * IRO[300].m1))
200#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ 195#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
201 (IRO[301].base + ((pfId) * IRO[301].m1)) 196 (IRO[301].base + ((pfId) * IRO[301].m1))
202#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ 197#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
203 (IRO[302].base + ((pfId) * IRO[302].m1)) 198 (IRO[302].base + ((pfId) * IRO[302].m1))
204#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ 199#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
205 (IRO[303].base + ((pfId) * IRO[303].m1)) 200 (IRO[303].base + ((pfId) * IRO[303].m1))
206#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ 201#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
207 (IRO[304].base + ((pfId) * IRO[304].m1)) 202 (IRO[304].base + ((pfId) * IRO[304].m1))
203#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
204 (IRO[305].base + ((pfId) * IRO[305].m1))
208#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 205#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
209 (IRO[294].base + ((pfId) * IRO[294].m1)) 206 (IRO[295].base + ((pfId) * IRO[295].m1))
210#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 207#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
211 (IRO[293].base + ((pfId) * IRO[293].m1)) 208 (IRO[294].base + ((pfId) * IRO[294].m1))
212#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 209#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
213 (IRO[292].base + ((pfId) * IRO[292].m1)) 210 (IRO[293].base + ((pfId) * IRO[293].m1))
214#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ 211#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
215 (IRO[297].base + ((pfId) * IRO[297].m1)) 212 (IRO[298].base + ((pfId) * IRO[298].m1))
216#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ 213#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
217 (IRO[296].base + ((pfId) * IRO[296].m1)) 214 (IRO[297].base + ((pfId) * IRO[297].m1))
218#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ 215#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
219 (IRO[291].base + ((pfId) * IRO[291].m1)) 216 (IRO[292].base + ((pfId) * IRO[292].m1))
220#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ 217#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
221 (IRO[290].base + ((pfId) * IRO[290].m1)) 218 (IRO[291].base + ((pfId) * IRO[291].m1))
222#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ 219#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
223 (IRO[289].base + ((pfId) * IRO[289].m1)) 220 (IRO[290].base + ((pfId) * IRO[290].m1))
224#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ 221#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
225 (IRO[288].base + ((pfId) * IRO[288].m1)) 222 (IRO[289].base + ((pfId) * IRO[289].m1))
226#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ 223#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
227 (IRO[44].base + ((pfId) * IRO[44].m1)) 224 (IRO[44].base + ((pfId) * IRO[44].m1))
228#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 225#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -389,4 +386,8 @@
389 386
390#define UNDEF_IRO 0x80000000 387#define UNDEF_IRO 0x80000000
391 388
389/* used for defining the amount of FCoE tasks supported for PF */
390#define MAX_FCOE_FUNCS_PER_ENGINE 2
391#define MAX_NUM_FCOE_TASKS_PER_ENGINE 4096
392
392#endif /* BNX2X_FW_DEFS_H */ 393#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 037860ecc343..12f00a40cdf0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -114,6 +114,10 @@ struct license_key {
114#define EPIO_CFG_EPIO30 0x0000001f 114#define EPIO_CFG_EPIO30 0x0000001f
115#define EPIO_CFG_EPIO31 0x00000020 115#define EPIO_CFG_EPIO31 0x00000020
116 116
117struct mac_addr {
118 u32 upper;
119 u32 lower;
120};
117 121
118struct shared_hw_cfg { /* NVRAM Offset */ 122struct shared_hw_cfg { /* NVRAM Offset */
119 /* Up to 16 bytes of NULL-terminated string */ 123 /* Up to 16 bytes of NULL-terminated string */
@@ -508,7 +512,22 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
508 #define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000 512 #define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000
509 #define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001 513 #define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001
510 514
511 u32 reserved0[6]; /* 0x178 */ 515 /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2
516 * LOM recommended and tested value is 0xBEB2. Using a different
517 * value means using a value not tested by BRCM
518 */
519 u32 sfi_tap_values; /* 0x178 */
520 #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF
521 #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0
522
523 /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested
524 * value is 0x2. LOM recommended and tested value is 0x2. Using a
525 * different value means using a value not tested by BRCM
526 */
527 #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000
528 #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16
529
530 u32 reserved0[5]; /* 0x17c */
512 531
513 u32 aeu_int_mask; /* 0x190 */ 532 u32 aeu_int_mask; /* 0x190 */
514 533
@@ -2821,8 +2840,8 @@ struct afex_stats {
2821 2840
2822#define BCM_5710_FW_MAJOR_VERSION 7 2841#define BCM_5710_FW_MAJOR_VERSION 7
2823#define BCM_5710_FW_MINOR_VERSION 8 2842#define BCM_5710_FW_MINOR_VERSION 8
2824#define BCM_5710_FW_REVISION_VERSION 2 2843#define BCM_5710_FW_REVISION_VERSION 17
2825#define BCM_5710_FW_ENGINEERING_VERSION 0 2844#define BCM_5710_FW_ENGINEERING_VERSION 0
2826#define BCM_5710_FW_COMPILE_FLAGS 1 2845#define BCM_5710_FW_COMPILE_FLAGS 1
2827 2846
2828 2847
@@ -3513,11 +3532,14 @@ struct client_init_tx_data {
3513#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2 3532#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
3514#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3) 3533#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3)
3515#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3 3534#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
3516#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4) 3535#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4)
3517#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4 3536#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4
3518 u8 default_vlan_flg; 3537 u8 default_vlan_flg;
3519 u8 force_default_pri_flg; 3538 u8 force_default_pri_flg;
3520 __le32 reserved3; 3539 u8 tunnel_lso_inc_ip_id;
3540 u8 refuse_outband_vlan_flg;
3541 u8 tunnel_non_lso_pcsum_location;
3542 u8 reserved1;
3521}; 3543};
3522 3544
3523/* 3545/*
@@ -3551,6 +3573,11 @@ struct client_update_ramrod_data {
3551 __le16 silent_vlan_mask; 3573 __le16 silent_vlan_mask;
3552 u8 silent_vlan_removal_flg; 3574 u8 silent_vlan_removal_flg;
3553 u8 silent_vlan_change_flg; 3575 u8 silent_vlan_change_flg;
3576 u8 refuse_outband_vlan_flg;
3577 u8 refuse_outband_vlan_change_flg;
3578 u8 tx_switching_flg;
3579 u8 tx_switching_change_flg;
3580 __le32 reserved1;
3554 __le32 echo; 3581 __le32 echo;
3555}; 3582};
3556 3583
@@ -3620,7 +3647,8 @@ struct eth_classify_header {
3620 */ 3647 */
3621struct eth_classify_mac_cmd { 3648struct eth_classify_mac_cmd {
3622 struct eth_classify_cmd_header header; 3649 struct eth_classify_cmd_header header;
3623 __le32 reserved0; 3650 __le16 reserved0;
3651 __le16 inner_mac;
3624 __le16 mac_lsb; 3652 __le16 mac_lsb;
3625 __le16 mac_mid; 3653 __le16 mac_mid;
3626 __le16 mac_msb; 3654 __le16 mac_msb;
@@ -3633,7 +3661,8 @@ struct eth_classify_mac_cmd {
3633 */ 3661 */
3634struct eth_classify_pair_cmd { 3662struct eth_classify_pair_cmd {
3635 struct eth_classify_cmd_header header; 3663 struct eth_classify_cmd_header header;
3636 __le32 reserved0; 3664 __le16 reserved0;
3665 __le16 inner_mac;
3637 __le16 mac_lsb; 3666 __le16 mac_lsb;
3638 __le16 mac_mid; 3667 __le16 mac_mid;
3639 __le16 mac_msb; 3668 __le16 mac_msb;
@@ -3855,8 +3884,68 @@ struct eth_halt_ramrod_data {
3855 3884
3856 3885
3857/* 3886/*
3858 * Command for setting multicast classification for a client 3887 * destination and source mac address.
3888 */
3889struct eth_mac_addresses {
3890#if defined(__BIG_ENDIAN)
3891 __le16 dst_mid;
3892 __le16 dst_lo;
3893#elif defined(__LITTLE_ENDIAN)
3894 __le16 dst_lo;
3895 __le16 dst_mid;
3896#endif
3897#if defined(__BIG_ENDIAN)
3898 __le16 src_lo;
3899 __le16 dst_hi;
3900#elif defined(__LITTLE_ENDIAN)
3901 __le16 dst_hi;
3902 __le16 src_lo;
3903#endif
3904#if defined(__BIG_ENDIAN)
3905 __le16 src_hi;
3906 __le16 src_mid;
3907#elif defined(__LITTLE_ENDIAN)
3908 __le16 src_mid;
3909 __le16 src_hi;
3910#endif
3911};
3912
3913/* tunneling related data */
3914struct eth_tunnel_data {
3915#if defined(__BIG_ENDIAN)
3916 __le16 dst_mid;
3917 __le16 dst_lo;
3918#elif defined(__LITTLE_ENDIAN)
3919 __le16 dst_lo;
3920 __le16 dst_mid;
3921#endif
3922#if defined(__BIG_ENDIAN)
3923 __le16 reserved0;
3924 __le16 dst_hi;
3925#elif defined(__LITTLE_ENDIAN)
3926 __le16 dst_hi;
3927 __le16 reserved0;
3928#endif
3929#if defined(__BIG_ENDIAN)
3930 u8 reserved1;
3931 u8 ip_hdr_start_inner_w;
3932 __le16 pseudo_csum;
3933#elif defined(__LITTLE_ENDIAN)
3934 __le16 pseudo_csum;
3935 u8 ip_hdr_start_inner_w;
3936 u8 reserved1;
3937#endif
3938};
3939
3940/* union for mac addresses and for tunneling data.
3941 * considered as tunneling data only if (tunnel_exist == 1).
3859 */ 3942 */
3943union eth_mac_addr_or_tunnel_data {
3944 struct eth_mac_addresses mac_addr;
3945 struct eth_tunnel_data tunnel_data;
3946};
3947
3948/*Command for setting multicast classification for a client */
3860struct eth_multicast_rules_cmd { 3949struct eth_multicast_rules_cmd {
3861 u8 cmd_general_data; 3950 u8 cmd_general_data;
3862#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0) 3951#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0)
@@ -3874,7 +3963,6 @@ struct eth_multicast_rules_cmd {
3874 struct regpair reserved3; 3963 struct regpair reserved3;
3875}; 3964};
3876 3965
3877
3878/* 3966/*
3879 * parameters for multicast classification ramrod 3967 * parameters for multicast classification ramrod
3880 */ 3968 */
@@ -3883,7 +3971,6 @@ struct eth_multicast_rules_ramrod_data {
3883 struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT]; 3971 struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
3884}; 3972};
3885 3973
3886
3887/* 3974/*
3888 * Place holder for ramrods protocol specific data 3975 * Place holder for ramrods protocol specific data
3889 */ 3976 */
@@ -3947,11 +4034,14 @@ struct eth_rss_update_ramrod_data {
3947#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 4034#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
3948#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) 4035#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
3949#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 4036#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
4037#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6)
4038#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6
3950#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7) 4039#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
3951#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7 4040#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
3952 u8 rss_result_mask; 4041 u8 rss_result_mask;
3953 u8 rss_mode; 4042 u8 rss_mode;
3954 __le32 __reserved2; 4043 __le16 udp_4tuple_dst_port_mask;
4044 __le16 udp_4tuple_dst_port_value;
3955 u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE]; 4045 u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
3956 __le32 rss_key[T_ETH_RSS_KEY]; 4046 __le32 rss_key[T_ETH_RSS_KEY];
3957 __le32 echo; 4047 __le32 echo;
@@ -4115,6 +4205,23 @@ enum eth_tpa_update_command {
4115 MAX_ETH_TPA_UPDATE_COMMAND 4205 MAX_ETH_TPA_UPDATE_COMMAND
4116}; 4206};
4117 4207
4208/* In case of LSO over IPv4 tunnel, whether to increment
4209 * IP ID on external IP header or internal IP header
4210 */
4211enum eth_tunnel_lso_inc_ip_id {
4212 EXT_HEADER,
4213 INT_HEADER,
4214 MAX_ETH_TUNNEL_LSO_INC_IP_ID
4215};
4216
4217/* In case tunnel exist and L4 checksum offload,
4218 * the pseudo checksum location, on packet or on BD.
4219 */
4220enum eth_tunnel_non_lso_pcsum_location {
4221 PCSUM_ON_PKT,
4222 PCSUM_ON_BD,
4223 MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION
4224};
4118 4225
4119/* 4226/*
4120 * Tx regular BD structure 4227 * Tx regular BD structure
@@ -4166,8 +4273,8 @@ struct eth_tx_start_bd {
4166#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 4273#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
4167#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5) 4274#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
4168#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5 4275#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
4169#define ETH_TX_START_BD_RESREVED (0x1<<7) 4276#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7)
4170#define ETH_TX_START_BD_RESREVED_SHIFT 7 4277#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7
4171}; 4278};
4172 4279
4173/* 4280/*
@@ -4216,15 +4323,10 @@ struct eth_tx_parse_bd_e1x {
4216 * Tx parsing BD structure for ETH E2 4323 * Tx parsing BD structure for ETH E2
4217 */ 4324 */
4218struct eth_tx_parse_bd_e2 { 4325struct eth_tx_parse_bd_e2 {
4219 __le16 dst_mac_addr_lo; 4326 union eth_mac_addr_or_tunnel_data data;
4220 __le16 dst_mac_addr_mid;
4221 __le16 dst_mac_addr_hi;
4222 __le16 src_mac_addr_lo;
4223 __le16 src_mac_addr_mid;
4224 __le16 src_mac_addr_hi;
4225 __le32 parsing_data; 4327 __le32 parsing_data;
4226#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x7FF<<0) 4328#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0)
4227#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0 4329#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0
4228#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11) 4330#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
4229#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11 4331#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
4230#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15) 4332#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
@@ -4236,8 +4338,51 @@ struct eth_tx_parse_bd_e2 {
4236}; 4338};
4237 4339
4238/* 4340/*
4239 * The last BD in the BD memory will hold a pointer to the next BD memory 4341 * Tx 2nd parsing BD structure for ETH packet
4240 */ 4342 */
4343struct eth_tx_parse_2nd_bd {
4344 __le16 global_data;
4345#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0)
4346#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
4347#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4)
4348#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4
4349#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5)
4350#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
4351#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6)
4352#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6
4353#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7)
4354#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
4355#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8)
4356#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
4357#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13)
4358#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13
4359 __le16 reserved1;
4360 u8 tcp_flags;
4361#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0)
4362#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
4363#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1)
4364#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1
4365#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2)
4366#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2
4367#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3)
4368#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3
4369#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4)
4370#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4
4371#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5)
4372#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5
4373#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6)
4374#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
4375#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7)
4376#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
4377 u8 reserved2;
4378 u8 tunnel_udp_hdr_start_w;
4379 u8 fw_ip_hdr_to_payload_w;
4380 __le16 fw_ip_csum_wo_len_flags_frag;
4381 __le16 hw_ip_id;
4382 __le32 tcp_send_seq;
4383};
4384
4385/* The last BD in the BD memory will hold a pointer to the next BD memory */
4241struct eth_tx_next_bd { 4386struct eth_tx_next_bd {
4242 __le32 addr_lo; 4387 __le32 addr_lo;
4243 __le32 addr_hi; 4388 __le32 addr_hi;
@@ -4252,6 +4397,7 @@ union eth_tx_bd_types {
4252 struct eth_tx_bd reg_bd; 4397 struct eth_tx_bd reg_bd;
4253 struct eth_tx_parse_bd_e1x parse_bd_e1x; 4398 struct eth_tx_parse_bd_e1x parse_bd_e1x;
4254 struct eth_tx_parse_bd_e2 parse_bd_e2; 4399 struct eth_tx_parse_bd_e2 parse_bd_e2;
4400 struct eth_tx_parse_2nd_bd parse_2nd_bd;
4255 struct eth_tx_next_bd next_bd; 4401 struct eth_tx_next_bd next_bd;
4256}; 4402};
4257 4403
@@ -4663,10 +4809,10 @@ enum common_spqe_cmd_id {
4663 RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 4809 RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
4664 RAMROD_CMD_ID_COMMON_START_TRAFFIC, 4810 RAMROD_CMD_ID_COMMON_START_TRAFFIC,
4665 RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 4811 RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
4812 RAMROD_CMD_ID_COMMON_SET_TIMESYNC,
4666 MAX_COMMON_SPQE_CMD_ID 4813 MAX_COMMON_SPQE_CMD_ID
4667}; 4814};
4668 4815
4669
4670/* 4816/*
4671 * Per-protocol connection types 4817 * Per-protocol connection types
4672 */ 4818 */
@@ -4863,7 +5009,7 @@ struct vf_flr_event_data {
4863 */ 5009 */
4864struct malicious_vf_event_data { 5010struct malicious_vf_event_data {
4865 u8 vf_id; 5011 u8 vf_id;
4866 u8 reserved0; 5012 u8 err_id;
4867 u16 reserved1; 5013 u16 reserved1;
4868 u32 reserved2; 5014 u32 reserved2;
4869 u32 reserved3; 5015 u32 reserved3;
@@ -4969,10 +5115,10 @@ enum event_ring_opcode {
4969 EVENT_RING_OPCODE_CLASSIFICATION_RULES, 5115 EVENT_RING_OPCODE_CLASSIFICATION_RULES,
4970 EVENT_RING_OPCODE_FILTERS_RULES, 5116 EVENT_RING_OPCODE_FILTERS_RULES,
4971 EVENT_RING_OPCODE_MULTICAST_RULES, 5117 EVENT_RING_OPCODE_MULTICAST_RULES,
5118 EVENT_RING_OPCODE_SET_TIMESYNC,
4972 MAX_EVENT_RING_OPCODE 5119 MAX_EVENT_RING_OPCODE
4973}; 5120};
4974 5121
4975
4976/* 5122/*
4977 * Modes for fairness algorithm 5123 * Modes for fairness algorithm
4978 */ 5124 */
@@ -5010,14 +5156,18 @@ struct flow_control_configuration {
5010 */ 5156 */
5011struct function_start_data { 5157struct function_start_data {
5012 u8 function_mode; 5158 u8 function_mode;
5013 u8 reserved; 5159 u8 allow_npar_tx_switching;
5014 __le16 sd_vlan_tag; 5160 __le16 sd_vlan_tag;
5015 __le16 vif_id; 5161 __le16 vif_id;
5016 u8 path_id; 5162 u8 path_id;
5017 u8 network_cos_mode; 5163 u8 network_cos_mode;
5164 u8 dmae_cmd_id;
5165 u8 gre_tunnel_mode;
5166 u8 gre_tunnel_rss;
5167 u8 nvgre_clss_en;
5168 __le16 reserved1[2];
5018}; 5169};
5019 5170
5020
5021struct function_update_data { 5171struct function_update_data {
5022 u8 vif_id_change_flg; 5172 u8 vif_id_change_flg;
5023 u8 afex_default_vlan_change_flg; 5173 u8 afex_default_vlan_change_flg;
@@ -5027,14 +5177,19 @@ struct function_update_data {
5027 __le16 afex_default_vlan; 5177 __le16 afex_default_vlan;
5028 u8 allowed_priorities; 5178 u8 allowed_priorities;
5029 u8 network_cos_mode; 5179 u8 network_cos_mode;
5180 u8 lb_mode_en_change_flg;
5030 u8 lb_mode_en; 5181 u8 lb_mode_en;
5031 u8 tx_switch_suspend_change_flg; 5182 u8 tx_switch_suspend_change_flg;
5032 u8 tx_switch_suspend; 5183 u8 tx_switch_suspend;
5033 u8 echo; 5184 u8 echo;
5034 __le16 reserved1; 5185 u8 reserved1;
5186 u8 update_gre_cfg_flg;
5187 u8 gre_tunnel_mode;
5188 u8 gre_tunnel_rss;
5189 u8 nvgre_clss_en;
5190 u32 reserved3;
5035}; 5191};
5036 5192
5037
5038/* 5193/*
5039 * FW version stored in the Xstorm RAM 5194 * FW version stored in the Xstorm RAM
5040 */ 5195 */
@@ -5061,6 +5216,22 @@ struct fw_version {
5061#define __FW_VERSION_RESERVED_SHIFT 4 5216#define __FW_VERSION_RESERVED_SHIFT 4
5062}; 5217};
5063 5218
5219/* GRE RSS Mode */
5220enum gre_rss_mode {
5221 GRE_OUTER_HEADERS_RSS,
5222 GRE_INNER_HEADERS_RSS,
5223 NVGRE_KEY_ENTROPY_RSS,
5224 MAX_GRE_RSS_MODE
5225};
5226
5227/* GRE Tunnel Mode */
5228enum gre_tunnel_type {
5229 NO_GRE_TUNNEL,
5230 NVGRE_TUNNEL,
5231 L2GRE_TUNNEL,
5232 IPGRE_TUNNEL,
5233 MAX_GRE_TUNNEL_TYPE
5234};
5064 5235
5065/* 5236/*
5066 * Dynamic Host-Coalescing - Driver(host) counters 5237 * Dynamic Host-Coalescing - Driver(host) counters
@@ -5224,6 +5395,26 @@ enum ip_ver {
5224 MAX_IP_VER 5395 MAX_IP_VER
5225}; 5396};
5226 5397
5398/*
5399 * Malicious VF error ID
5400 */
5401enum malicious_vf_error_id {
5402 VF_PF_CHANNEL_NOT_READY,
5403 ETH_ILLEGAL_BD_LENGTHS,
5404 ETH_PACKET_TOO_SHORT,
5405 ETH_PAYLOAD_TOO_BIG,
5406 ETH_ILLEGAL_ETH_TYPE,
5407 ETH_ILLEGAL_LSO_HDR_LEN,
5408 ETH_TOO_MANY_BDS,
5409 ETH_ZERO_HDR_NBDS,
5410 ETH_START_BD_NOT_SET,
5411 ETH_ILLEGAL_PARSE_NBDS,
5412 ETH_IPV6_AND_CHECKSUM,
5413 ETH_VLAN_FLG_INCORRECT,
5414 ETH_ILLEGAL_LSO_MSS,
5415 ETH_TUNNEL_NOT_SUPPORTED,
5416 MAX_MALICIOUS_VF_ERROR_ID
5417};
5227 5418
5228/* 5419/*
5229 * Multi-function modes 5420 * Multi-function modes
@@ -5368,7 +5559,6 @@ struct protocol_common_spe {
5368 union protocol_common_specific_data data; 5559 union protocol_common_specific_data data;
5369}; 5560};
5370 5561
5371
5372/* 5562/*
5373 * The send queue element 5563 * The send queue element
5374 */ 5564 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 0283f343b0d1..9d64b988ab34 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -27,6 +27,10 @@
27#include "bnx2x.h" 27#include "bnx2x.h"
28#include "bnx2x_cmn.h" 28#include "bnx2x_cmn.h"
29 29
30typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
31 struct link_params *params,
32 u8 dev_addr, u16 addr, u8 byte_cnt,
33 u8 *o_buf, u8);
30/********************************************************/ 34/********************************************************/
31#define ETH_HLEN 14 35#define ETH_HLEN 14
32/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ 36/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
@@ -152,6 +156,7 @@
152#define SFP_EEPROM_CON_TYPE_ADDR 0x2 156#define SFP_EEPROM_CON_TYPE_ADDR 0x2
153 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 157 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
154 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 158 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
159 #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
155 160
156 161
157#define SFP_EEPROM_COMP_CODE_ADDR 0x3 162#define SFP_EEPROM_COMP_CODE_ADDR 0x3
@@ -3127,11 +3132,6 @@ static int bnx2x_bsc_read(struct link_params *params,
3127 int rc = 0; 3132 int rc = 0;
3128 struct bnx2x *bp = params->bp; 3133 struct bnx2x *bp = params->bp;
3129 3134
3130 if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) {
3131 DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid);
3132 return -EINVAL;
3133 }
3134
3135 if (xfer_cnt > 16) { 3135 if (xfer_cnt > 16) {
3136 DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n", 3136 DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
3137 xfer_cnt); 3137 xfer_cnt);
@@ -3426,13 +3426,19 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
3426 3426
3427 switch (phy->req_flow_ctrl) { 3427 switch (phy->req_flow_ctrl) {
3428 case BNX2X_FLOW_CTRL_AUTO: 3428 case BNX2X_FLOW_CTRL_AUTO:
3429 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) 3429 switch (params->req_fc_auto_adv) {
3430 case BNX2X_FLOW_CTRL_BOTH:
3430 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 3431 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3431 else 3432 break;
3433 case BNX2X_FLOW_CTRL_RX:
3434 case BNX2X_FLOW_CTRL_TX:
3432 *ieee_fc |= 3435 *ieee_fc |=
3433 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 3436 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3437 break;
3438 default:
3439 break;
3440 }
3434 break; 3441 break;
3435
3436 case BNX2X_FLOW_CTRL_TX: 3442 case BNX2X_FLOW_CTRL_TX:
3437 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 3443 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3438 break; 3444 break;
@@ -3629,6 +3635,16 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3629 * init configuration, and set/clear SGMII flag. Internal 3635 * init configuration, and set/clear SGMII flag. Internal
3630 * phy init is done purely in phy_init stage. 3636 * phy init is done purely in phy_init stage.
3631 */ 3637 */
3638#define WC_TX_DRIVER(post2, idriver, ipre) \
3639 ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \
3640 (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \
3641 (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))
3642
3643#define WC_TX_FIR(post, main, pre) \
3644 ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \
3645 (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \
3646 (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET))
3647
3632static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy, 3648static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
3633 struct link_params *params, 3649 struct link_params *params,
3634 struct link_vars *vars) 3650 struct link_vars *vars)
@@ -3728,7 +3744,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3728 if (((vars->line_speed == SPEED_AUTO_NEG) && 3744 if (((vars->line_speed == SPEED_AUTO_NEG) &&
3729 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 3745 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
3730 (vars->line_speed == SPEED_1000)) { 3746 (vars->line_speed == SPEED_1000)) {
3731 u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; 3747 u16 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
3732 an_adv |= (1<<5); 3748 an_adv |= (1<<5);
3733 3749
3734 /* Enable CL37 1G Parallel Detect */ 3750 /* Enable CL37 1G Parallel Detect */
@@ -3753,20 +3769,13 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3753 /* Set Transmit PMD settings */ 3769 /* Set Transmit PMD settings */
3754 lane = bnx2x_get_warpcore_lane(phy, params); 3770 lane = bnx2x_get_warpcore_lane(phy, params);
3755 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3771 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3756 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 3772 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
3757 ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | 3773 WC_TX_DRIVER(0x02, 0x06, 0x09));
3758 (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
3759 (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
3760 /* Configure the next lane if dual mode */ 3774 /* Configure the next lane if dual mode */
3761 if (phy->flags & FLAGS_WC_DUAL_MODE) 3775 if (phy->flags & FLAGS_WC_DUAL_MODE)
3762 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3776 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3763 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1), 3777 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
3764 ((0x02 << 3778 WC_TX_DRIVER(0x02, 0x06, 0x09));
3765 MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
3766 (0x06 <<
3767 MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
3768 (0x09 <<
3769 MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
3770 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3779 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3771 MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, 3780 MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
3772 0x03f0); 3781 0x03f0);
@@ -3909,6 +3918,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3909{ 3918{
3910 struct bnx2x *bp = params->bp; 3919 struct bnx2x *bp = params->bp;
3911 u16 misc1_val, tap_val, tx_driver_val, lane, val; 3920 u16 misc1_val, tap_val, tx_driver_val, lane, val;
3921 u32 cfg_tap_val, tx_drv_brdct, tx_equal;
3922
3912 /* Hold rxSeqStart */ 3923 /* Hold rxSeqStart */
3913 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3924 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3914 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000); 3925 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
@@ -3952,23 +3963,33 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3952 3963
3953 if (is_xfi) { 3964 if (is_xfi) {
3954 misc1_val |= 0x5; 3965 misc1_val |= 0x5;
3955 tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | 3966 tap_val = WC_TX_FIR(0x08, 0x37, 0x00);
3956 (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | 3967 tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03);
3957 (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
3958 tx_driver_val =
3959 ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
3960 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
3961 (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
3962
3963 } else { 3968 } else {
3969 cfg_tap_val = REG_RD(bp, params->shmem_base +
3970 offsetof(struct shmem_region, dev_info.
3971 port_hw_config[params->port].
3972 sfi_tap_values));
3973
3974 tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK;
3975
3976 tx_drv_brdct = (cfg_tap_val &
3977 PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
3978 PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
3979
3964 misc1_val |= 0x9; 3980 misc1_val |= 0x9;
3965 tap_val = ((0x0f << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | 3981
3966 (0x2b << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | 3982 /* TAP values are controlled by nvram, if value there isn't 0 */
3967 (0x02 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); 3983 if (tx_equal)
3968 tx_driver_val = 3984 tap_val = (u16)tx_equal;
3969 ((0x03 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | 3985 else
3970 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | 3986 tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02);
3971 (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); 3987
3988 if (tx_drv_brdct)
3989 tx_driver_val = WC_TX_DRIVER(0x03, (u16)tx_drv_brdct,
3990 0x06);
3991 else
3992 tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06);
3972 } 3993 }
3973 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3994 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3974 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); 3995 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
@@ -4105,15 +4126,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
4105 /* Set Transmit PMD settings */ 4126 /* Set Transmit PMD settings */
4106 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4127 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4107 MDIO_WC_REG_TX_FIR_TAP, 4128 MDIO_WC_REG_TX_FIR_TAP,
4108 ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | 4129 (WC_TX_FIR(0x12, 0x2d, 0x00) |
4109 (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | 4130 MDIO_WC_REG_TX_FIR_TAP_ENABLE));
4110 (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) |
4111 MDIO_WC_REG_TX_FIR_TAP_ENABLE));
4112 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4131 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4113 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 4132 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
4114 ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | 4133 WC_TX_DRIVER(0x02, 0x02, 0x02));
4115 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
4116 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
4117} 4134}
4118 4135
4119static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, 4136static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
@@ -4750,8 +4767,8 @@ void bnx2x_link_status_update(struct link_params *params,
4750 port_mb[port].link_status)); 4767 port_mb[port].link_status));
4751 4768
4752 /* Force link UP in non LOOPBACK_EXT loopback mode(s) */ 4769 /* Force link UP in non LOOPBACK_EXT loopback mode(s) */
4753 if (bp->link_params.loopback_mode != LOOPBACK_NONE && 4770 if (params->loopback_mode != LOOPBACK_NONE &&
4754 bp->link_params.loopback_mode != LOOPBACK_EXT) 4771 params->loopback_mode != LOOPBACK_EXT)
4755 vars->link_status |= LINK_STATUS_LINK_UP; 4772 vars->link_status |= LINK_STATUS_LINK_UP;
4756 4773
4757 if (bnx2x_eee_has_cap(params)) 4774 if (bnx2x_eee_has_cap(params))
@@ -7758,7 +7775,8 @@ static void bnx2x_sfp_set_transmitter(struct link_params *params,
7758 7775
7759static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7776static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7760 struct link_params *params, 7777 struct link_params *params,
7761 u16 addr, u8 byte_cnt, u8 *o_buf) 7778 u8 dev_addr, u16 addr, u8 byte_cnt,
7779 u8 *o_buf, u8 is_init)
7762{ 7780{
7763 struct bnx2x *bp = params->bp; 7781 struct bnx2x *bp = params->bp;
7764 u16 val = 0; 7782 u16 val = 0;
@@ -7771,7 +7789,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7771 /* Set the read command byte count */ 7789 /* Set the read command byte count */
7772 bnx2x_cl45_write(bp, phy, 7790 bnx2x_cl45_write(bp, phy,
7773 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, 7791 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
7774 (byte_cnt | 0xa000)); 7792 (byte_cnt | (dev_addr << 8)));
7775 7793
7776 /* Set the read command address */ 7794 /* Set the read command address */
7777 bnx2x_cl45_write(bp, phy, 7795 bnx2x_cl45_write(bp, phy,
@@ -7845,6 +7863,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
7845} 7863}
7846static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7864static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7847 struct link_params *params, 7865 struct link_params *params,
7866 u8 dev_addr,
7848 u16 addr, u8 byte_cnt, 7867 u16 addr, u8 byte_cnt,
7849 u8 *o_buf, u8 is_init) 7868 u8 *o_buf, u8 is_init)
7850{ 7869{
@@ -7869,7 +7888,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7869 usleep_range(1000, 2000); 7888 usleep_range(1000, 2000);
7870 bnx2x_warpcore_power_module(params, 1); 7889 bnx2x_warpcore_power_module(params, 1);
7871 } 7890 }
7872 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, 7891 rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt,
7873 data_array); 7892 data_array);
7874 } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); 7893 } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
7875 7894
@@ -7885,7 +7904,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7885 7904
7886static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7905static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7887 struct link_params *params, 7906 struct link_params *params,
7888 u16 addr, u8 byte_cnt, u8 *o_buf) 7907 u8 dev_addr, u16 addr, u8 byte_cnt,
7908 u8 *o_buf, u8 is_init)
7889{ 7909{
7890 struct bnx2x *bp = params->bp; 7910 struct bnx2x *bp = params->bp;
7891 u16 val, i; 7911 u16 val, i;
@@ -7896,6 +7916,15 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7896 return -EINVAL; 7916 return -EINVAL;
7897 } 7917 }
7898 7918
7919 /* Set 2-wire transfer rate of SFP+ module EEPROM
7920 * to 100Khz since some DACs(direct attached cables) do
7921 * not work at 400Khz.
7922 */
7923 bnx2x_cl45_write(bp, phy,
7924 MDIO_PMA_DEVAD,
7925 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
7926 ((dev_addr << 8) | 1));
7927
7899 /* Need to read from 1.8000 to clear it */ 7928 /* Need to read from 1.8000 to clear it */
7900 bnx2x_cl45_read(bp, phy, 7929 bnx2x_cl45_read(bp, phy,
7901 MDIO_PMA_DEVAD, 7930 MDIO_PMA_DEVAD,
@@ -7968,26 +7997,44 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7968 7997
7969 return -EINVAL; 7998 return -EINVAL;
7970} 7999}
7971
7972int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, 8000int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7973 struct link_params *params, u16 addr, 8001 struct link_params *params, u8 dev_addr,
7974 u8 byte_cnt, u8 *o_buf) 8002 u16 addr, u16 byte_cnt, u8 *o_buf)
7975{ 8003{
7976 int rc = -EOPNOTSUPP; 8004 int rc = 0;
8005 struct bnx2x *bp = params->bp;
8006 u8 xfer_size;
8007 u8 *user_data = o_buf;
8008 read_sfp_module_eeprom_func_p read_func;
8009
8010 if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) {
8011 DP(NETIF_MSG_LINK, "invalid dev_addr 0x%x\n", dev_addr);
8012 return -EINVAL;
8013 }
8014
7977 switch (phy->type) { 8015 switch (phy->type) {
7978 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 8016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7979 rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, 8017 read_func = bnx2x_8726_read_sfp_module_eeprom;
7980 byte_cnt, o_buf); 8018 break;
7981 break;
7982 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 8019 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7983 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: 8020 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
7984 rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, 8021 read_func = bnx2x_8727_read_sfp_module_eeprom;
7985 byte_cnt, o_buf); 8022 break;
7986 break;
7987 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 8023 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7988 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr, 8024 read_func = bnx2x_warpcore_read_sfp_module_eeprom;
7989 byte_cnt, o_buf, 0); 8025 break;
7990 break; 8026 default:
8027 return -EOPNOTSUPP;
8028 }
8029
8030 while (!rc && (byte_cnt > 0)) {
8031 xfer_size = (byte_cnt > SFP_EEPROM_PAGE_SIZE) ?
8032 SFP_EEPROM_PAGE_SIZE : byte_cnt;
8033 rc = read_func(phy, params, dev_addr, addr, xfer_size,
8034 user_data, 0);
8035 byte_cnt -= xfer_size;
8036 user_data += xfer_size;
8037 addr += xfer_size;
7991 } 8038 }
7992 return rc; 8039 return rc;
7993} 8040}
@@ -8004,6 +8051,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8004 /* First check for copper cable */ 8051 /* First check for copper cable */
8005 if (bnx2x_read_sfp_module_eeprom(phy, 8052 if (bnx2x_read_sfp_module_eeprom(phy,
8006 params, 8053 params,
8054 I2C_DEV_ADDR_A0,
8007 SFP_EEPROM_CON_TYPE_ADDR, 8055 SFP_EEPROM_CON_TYPE_ADDR,
8008 2, 8056 2,
8009 (u8 *)val) != 0) { 8057 (u8 *)val) != 0) {
@@ -8021,6 +8069,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8021 */ 8069 */
8022 if (bnx2x_read_sfp_module_eeprom(phy, 8070 if (bnx2x_read_sfp_module_eeprom(phy,
8023 params, 8071 params,
8072 I2C_DEV_ADDR_A0,
8024 SFP_EEPROM_FC_TX_TECH_ADDR, 8073 SFP_EEPROM_FC_TX_TECH_ADDR,
8025 1, 8074 1,
8026 &copper_module_type) != 0) { 8075 &copper_module_type) != 0) {
@@ -8049,20 +8098,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8049 break; 8098 break;
8050 } 8099 }
8051 case SFP_EEPROM_CON_TYPE_VAL_LC: 8100 case SFP_EEPROM_CON_TYPE_VAL_LC:
8101 case SFP_EEPROM_CON_TYPE_VAL_RJ45:
8052 check_limiting_mode = 1; 8102 check_limiting_mode = 1;
8053 if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK | 8103 if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
8054 SFP_EEPROM_COMP_CODE_LR_MASK | 8104 SFP_EEPROM_COMP_CODE_LR_MASK |
8055 SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) { 8105 SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
8056 DP(NETIF_MSG_LINK, "1G Optic module detected\n"); 8106 DP(NETIF_MSG_LINK, "1G SFP module detected\n");
8057 gport = params->port; 8107 gport = params->port;
8058 phy->media_type = ETH_PHY_SFP_1G_FIBER; 8108 phy->media_type = ETH_PHY_SFP_1G_FIBER;
8059 phy->req_line_speed = SPEED_1000; 8109 if (phy->req_line_speed != SPEED_1000) {
8060 if (!CHIP_IS_E1x(bp)) 8110 phy->req_line_speed = SPEED_1000;
8061 gport = BP_PATH(bp) + (params->port << 1); 8111 if (!CHIP_IS_E1x(bp)) {
8062 netdev_err(bp->dev, "Warning: Link speed was forced to 1000Mbps." 8112 gport = BP_PATH(bp) +
8063 " Current SFP module in port %d is not" 8113 (params->port << 1);
8064 " compliant with 10G Ethernet\n", 8114 }
8065 gport); 8115 netdev_err(bp->dev,
8116 "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
8117 gport);
8118 }
8066 } else { 8119 } else {
8067 int idx, cfg_idx = 0; 8120 int idx, cfg_idx = 0;
8068 DP(NETIF_MSG_LINK, "10G Optic module detected\n"); 8121 DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8101,6 +8154,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8101 u8 options[SFP_EEPROM_OPTIONS_SIZE]; 8154 u8 options[SFP_EEPROM_OPTIONS_SIZE];
8102 if (bnx2x_read_sfp_module_eeprom(phy, 8155 if (bnx2x_read_sfp_module_eeprom(phy,
8103 params, 8156 params,
8157 I2C_DEV_ADDR_A0,
8104 SFP_EEPROM_OPTIONS_ADDR, 8158 SFP_EEPROM_OPTIONS_ADDR,
8105 SFP_EEPROM_OPTIONS_SIZE, 8159 SFP_EEPROM_OPTIONS_SIZE,
8106 options) != 0) { 8160 options) != 0) {
@@ -8167,6 +8221,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
8167 /* Format the warning message */ 8221 /* Format the warning message */
8168 if (bnx2x_read_sfp_module_eeprom(phy, 8222 if (bnx2x_read_sfp_module_eeprom(phy,
8169 params, 8223 params,
8224 I2C_DEV_ADDR_A0,
8170 SFP_EEPROM_VENDOR_NAME_ADDR, 8225 SFP_EEPROM_VENDOR_NAME_ADDR,
8171 SFP_EEPROM_VENDOR_NAME_SIZE, 8226 SFP_EEPROM_VENDOR_NAME_SIZE,
8172 (u8 *)vendor_name)) 8227 (u8 *)vendor_name))
@@ -8175,6 +8230,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
8175 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; 8230 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
8176 if (bnx2x_read_sfp_module_eeprom(phy, 8231 if (bnx2x_read_sfp_module_eeprom(phy,
8177 params, 8232 params,
8233 I2C_DEV_ADDR_A0,
8178 SFP_EEPROM_PART_NO_ADDR, 8234 SFP_EEPROM_PART_NO_ADDR,
8179 SFP_EEPROM_PART_NO_SIZE, 8235 SFP_EEPROM_PART_NO_SIZE,
8180 (u8 *)vendor_pn)) 8236 (u8 *)vendor_pn))
@@ -8205,12 +8261,13 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
8205 8261
8206 for (timeout = 0; timeout < 60; timeout++) { 8262 for (timeout = 0; timeout < 60; timeout++) {
8207 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 8263 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8208 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, 8264 rc = bnx2x_warpcore_read_sfp_module_eeprom(
8209 params, 1, 8265 phy, params, I2C_DEV_ADDR_A0, 1, 1, &val,
8210 1, &val, 1); 8266 1);
8211 else 8267 else
8212 rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, 8268 rc = bnx2x_read_sfp_module_eeprom(phy, params,
8213 &val); 8269 I2C_DEV_ADDR_A0,
8270 1, 1, &val);
8214 if (rc == 0) { 8271 if (rc == 0) {
8215 DP(NETIF_MSG_LINK, 8272 DP(NETIF_MSG_LINK,
8216 "SFP+ module initialization took %d ms\n", 8273 "SFP+ module initialization took %d ms\n",
@@ -8219,7 +8276,8 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
8219 } 8276 }
8220 usleep_range(5000, 10000); 8277 usleep_range(5000, 10000);
8221 } 8278 }
8222 rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val); 8279 rc = bnx2x_read_sfp_module_eeprom(phy, params, I2C_DEV_ADDR_A0,
8280 1, 1, &val);
8223 return rc; 8281 return rc;
8224} 8282}
8225 8283
@@ -8376,15 +8434,6 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
8376 bnx2x_cl45_write(bp, phy, 8434 bnx2x_cl45_write(bp, phy,
8377 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, 8435 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
8378 val); 8436 val);
8379
8380 /* Set 2-wire transfer rate of SFP+ module EEPROM
8381 * to 100Khz since some DACs(direct attached cables) do
8382 * not work at 400Khz.
8383 */
8384 bnx2x_cl45_write(bp, phy,
8385 MDIO_PMA_DEVAD,
8386 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
8387 0xa001);
8388 break; 8437 break;
8389 default: 8438 default:
8390 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", 8439 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -9528,8 +9577,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9528 } else { 9577 } else {
9529 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ 9578 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
9530 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 9579 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
9531 for (i = 0; i < ARRAY_SIZE(reg_set); 9580 for (i = 0; i < ARRAY_SIZE(reg_set); i++)
9532 i++)
9533 bnx2x_cl45_write(bp, phy, reg_set[i].devad, 9581 bnx2x_cl45_write(bp, phy, reg_set[i].devad,
9534 reg_set[i].reg, reg_set[i].val); 9582 reg_set[i].reg, reg_set[i].val);
9535 9583
@@ -10281,7 +10329,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
10281 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; 10329 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
10282 10330
10283 /* Determine if EEE was negotiated */ 10331 /* Determine if EEE was negotiated */
10284 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 10332 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
10333 (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
10285 bnx2x_eee_an_resolve(phy, params, vars); 10334 bnx2x_eee_an_resolve(phy, params, vars);
10286 } 10335 }
10287 10336
@@ -12242,7 +12291,7 @@ static void bnx2x_init_bmac_loopback(struct link_params *params,
12242 12291
12243 bnx2x_xgxs_deassert(params); 12292 bnx2x_xgxs_deassert(params);
12244 12293
12245 /* set bmac loopback */ 12294 /* Set bmac loopback */
12246 bnx2x_bmac_enable(params, vars, 1, 1); 12295 bnx2x_bmac_enable(params, vars, 1, 1);
12247 12296
12248 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 12297 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@@ -12261,7 +12310,7 @@ static void bnx2x_init_emac_loopback(struct link_params *params,
12261 vars->phy_flags = PHY_XGXS_FLAG; 12310 vars->phy_flags = PHY_XGXS_FLAG;
12262 12311
12263 bnx2x_xgxs_deassert(params); 12312 bnx2x_xgxs_deassert(params);
12264 /* set bmac loopback */ 12313 /* Set bmac loopback */
12265 bnx2x_emac_enable(params, vars, 1); 12314 bnx2x_emac_enable(params, vars, 1);
12266 bnx2x_emac_program(params, vars); 12315 bnx2x_emac_program(params, vars);
12267 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 12316 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@@ -12521,6 +12570,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12521 params->req_line_speed[0], params->req_flow_ctrl[0]); 12570 params->req_line_speed[0], params->req_flow_ctrl[0]);
12522 DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n", 12571 DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
12523 params->req_line_speed[1], params->req_flow_ctrl[1]); 12572 params->req_line_speed[1], params->req_flow_ctrl[1]);
12573 DP(NETIF_MSG_LINK, "req_adv_flow_ctrl 0x%x\n", params->req_fc_auto_adv);
12524 vars->link_status = 0; 12574 vars->link_status = 0;
12525 vars->phy_link_up = 0; 12575 vars->phy_link_up = 0;
12526 vars->link_up = 0; 12576 vars->link_up = 0;
@@ -13440,8 +13490,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13440 int sigdet; 13490 int sigdet;
13441 13491
13442 /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery 13492 /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
13443 * since some switches tend to reinit the AN process and clear the 13493 * Since some switches tend to reinit the AN process and clear the
13444 * advertised BP/NP after ~2 seconds causing the KR2 to be disabled 13494 * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
13445 * and recovered many times 13495 * and recovered many times
13446 */ 13496 */
13447 if (vars->check_kr2_recovery_cnt > 0) { 13497 if (vars->check_kr2_recovery_cnt > 0) {
@@ -13469,8 +13519,10 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13469 13519
13470 /* CL73 has not begun yet */ 13520 /* CL73 has not begun yet */
13471 if (base_page == 0) { 13521 if (base_page == 0) {
13472 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) 13522 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
13473 bnx2x_kr2_recovery(params, vars, phy); 13523 bnx2x_kr2_recovery(params, vars, phy);
13524 DP(NETIF_MSG_LINK, "No BP\n");
13525 }
13474 return; 13526 return;
13475 } 13527 }
13476 13528
@@ -13486,7 +13538,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13486 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13538 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
13487 if (!not_kr2_device) { 13539 if (!not_kr2_device) {
13488 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, 13540 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
13489 next_page); 13541 next_page);
13490 bnx2x_kr2_recovery(params, vars, phy); 13542 bnx2x_kr2_recovery(params, vars, phy);
13491 } 13543 }
13492 return; 13544 return;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 56c2aae4e2c8..4df45234fdc0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -41,6 +41,9 @@
41#define SPEED_AUTO_NEG 0 41#define SPEED_AUTO_NEG 0
42#define SPEED_20000 20000 42#define SPEED_20000 20000
43 43
44#define I2C_DEV_ADDR_A0 0xa0
45#define I2C_DEV_ADDR_A2 0xa2
46
44#define SFP_EEPROM_PAGE_SIZE 16 47#define SFP_EEPROM_PAGE_SIZE 16
45#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14 48#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
46#define SFP_EEPROM_VENDOR_NAME_SIZE 16 49#define SFP_EEPROM_VENDOR_NAME_SIZE 16
@@ -54,6 +57,15 @@
54#define SFP_EEPROM_SERIAL_SIZE 16 57#define SFP_EEPROM_SERIAL_SIZE 16
55#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */ 58#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */
56#define SFP_EEPROM_DATE_SIZE 6 59#define SFP_EEPROM_DATE_SIZE 6
60#define SFP_EEPROM_DIAG_TYPE_ADDR 0x5c
61#define SFP_EEPROM_DIAG_TYPE_SIZE 1
62#define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2)
63#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
64#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
65
66#define SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e
67#define SFP_EEPROM_A2_CC_DMI_ADDR 0x5f
68
57#define PWR_FLT_ERR_MSG_LEN 250 69#define PWR_FLT_ERR_MSG_LEN 250
58 70
59#define XGXS_EXT_PHY_TYPE(ext_phy_config) \ 71#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -420,8 +432,8 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
420 432
421/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */ 433/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
422int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, 434int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
423 struct link_params *params, u16 addr, 435 struct link_params *params, u8 dev_addr,
424 u8 byte_cnt, u8 *o_buf); 436 u16 addr, u16 byte_cnt, u8 *o_buf);
425 437
426void bnx2x_hw_reset_phy(struct link_params *params); 438void bnx2x_hw_reset_phy(struct link_params *params);
427 439
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c50696b396f1..b4c9dea93a53 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -75,8 +75,6 @@
75#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" 75#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
76#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" 76#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
77 77
78#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
79
80/* Time in jiffies before concluding the transmitter is hung */ 78/* Time in jiffies before concluding the transmitter is hung */
81#define TX_TIMEOUT (5*HZ) 79#define TX_TIMEOUT (5*HZ)
82 80
@@ -2955,14 +2953,16 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2955 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); 2953 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
2956 2954
2957 /* tx only connections collect statistics (on the same index as the 2955 /* tx only connections collect statistics (on the same index as the
2958 * parent connection). The statistics are zeroed when the parent 2956 * parent connection). The statistics are zeroed when the parent
2959 * connection is initialized. 2957 * connection is initialized.
2960 */ 2958 */
2961 2959
2962 __set_bit(BNX2X_Q_FLG_STATS, &flags); 2960 __set_bit(BNX2X_Q_FLG_STATS, &flags);
2963 if (zero_stats) 2961 if (zero_stats)
2964 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); 2962 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
2965 2963
2964 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
2965 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
2966 2966
2967#ifdef BNX2X_STOP_ON_ERROR 2967#ifdef BNX2X_STOP_ON_ERROR
2968 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags); 2968 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
@@ -3227,16 +3227,29 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3227{ 3227{
3228 struct eth_stats_info *ether_stat = 3228 struct eth_stats_info *ether_stat =
3229 &bp->slowpath->drv_info_to_mcp.ether_stat; 3229 &bp->slowpath->drv_info_to_mcp.ether_stat;
3230 struct bnx2x_vlan_mac_obj *mac_obj =
3231 &bp->sp_objs->mac_obj;
3232 int i;
3230 3233
3231 strlcpy(ether_stat->version, DRV_MODULE_VERSION, 3234 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3232 ETH_STAT_INFO_VERSION_LEN); 3235 ETH_STAT_INFO_VERSION_LEN);
3233 3236
3234 bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj, 3237 /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
3235 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3238 * mac_local field in ether_stat struct. The base address is offset by 2
3236 ether_stat->mac_local); 3239 * bytes to account for the field being 8 bytes but a mac address is
3237 3240 * only 6 bytes. Likewise, the stride for the get_n_elements function is
3241 * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
3242 * allocated by the ether_stat struct, so the macs will land in their
3243 * proper positions.
3244 */
3245 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3246 memset(ether_stat->mac_local + i, 0,
3247 sizeof(ether_stat->mac_local[0]));
3248 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3249 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3250 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3251 ETH_ALEN);
3238 ether_stat->mtu_size = bp->dev->mtu; 3252 ether_stat->mtu_size = bp->dev->mtu;
3239
3240 if (bp->dev->features & NETIF_F_RXCSUM) 3253 if (bp->dev->features & NETIF_F_RXCSUM)
3241 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 3254 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3242 if (bp->dev->features & NETIF_F_TSO) 3255 if (bp->dev->features & NETIF_F_TSO)
@@ -3258,8 +3271,7 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3258 if (!CNIC_LOADED(bp)) 3271 if (!CNIC_LOADED(bp))
3259 return; 3272 return;
3260 3273
3261 memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT, 3274 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3262 bp->fip_mac, ETH_ALEN);
3263 3275
3264 fcoe_stat->qos_priority = 3276 fcoe_stat->qos_priority =
3265 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; 3277 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3361,8 +3373,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3361 if (!CNIC_LOADED(bp)) 3373 if (!CNIC_LOADED(bp))
3362 return; 3374 return;
3363 3375
3364 memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT, 3376 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3365 bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); 3377 ETH_ALEN);
3366 3378
3367 iscsi_stat->qos_priority = 3379 iscsi_stat->qos_priority =
3368 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; 3380 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -6018,10 +6030,11 @@ void bnx2x_nic_init_cnic(struct bnx2x *bp)
6018 mmiowb(); 6030 mmiowb();
6019} 6031}
6020 6032
6021void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 6033void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6022{ 6034{
6023 int i; 6035 int i;
6024 6036
6037 /* Setup NIC internals and enable interrupts */
6025 for_each_eth_queue(bp, i) 6038 for_each_eth_queue(bp, i)
6026 bnx2x_init_eth_fp(bp, i); 6039 bnx2x_init_eth_fp(bp, i);
6027 6040
@@ -6030,17 +6043,26 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
6030 bnx2x_init_rx_rings(bp); 6043 bnx2x_init_rx_rings(bp);
6031 bnx2x_init_tx_rings(bp); 6044 bnx2x_init_tx_rings(bp);
6032 6045
6033 if (IS_VF(bp)) 6046 if (IS_VF(bp)) {
6047 bnx2x_memset_stats(bp);
6034 return; 6048 return;
6049 }
6035 6050
6036 /* Initialize MOD_ABS interrupts */ 6051 if (IS_PF(bp)) {
6037 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, 6052 /* Initialize MOD_ABS interrupts */
6038 bp->common.shmem_base, bp->common.shmem2_base, 6053 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6039 BP_PORT(bp)); 6054 bp->common.shmem_base,
6055 bp->common.shmem2_base, BP_PORT(bp));
6040 6056
6041 bnx2x_init_def_sb(bp); 6057 /* initialize the default status block and sp ring */
6042 bnx2x_update_dsb_idx(bp); 6058 bnx2x_init_def_sb(bp);
6043 bnx2x_init_sp_ring(bp); 6059 bnx2x_update_dsb_idx(bp);
6060 bnx2x_init_sp_ring(bp);
6061 }
6062}
6063
6064void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6065{
6044 bnx2x_init_eq_ring(bp); 6066 bnx2x_init_eq_ring(bp);
6045 bnx2x_init_internal(bp, load_code); 6067 bnx2x_init_internal(bp, load_code);
6046 bnx2x_pf_init(bp); 6068 bnx2x_pf_init(bp);
@@ -6058,12 +6080,7 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
6058 AEU_INPUTS_ATTN_BITS_SPIO5); 6080 AEU_INPUTS_ATTN_BITS_SPIO5);
6059} 6081}
6060 6082
6061/* end of nic init */ 6083/* gzip service functions */
6062
6063/*
6064 * gzip service functions
6065 */
6066
6067static int bnx2x_gunzip_init(struct bnx2x *bp) 6084static int bnx2x_gunzip_init(struct bnx2x *bp)
6068{ 6085{
6069 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, 6086 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
@@ -7757,6 +7774,8 @@ void bnx2x_free_mem(struct bnx2x *bp)
7757 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 7774 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
7758 BCM_PAGE_SIZE * NUM_EQ_PAGES); 7775 BCM_PAGE_SIZE * NUM_EQ_PAGES);
7759 7776
7777 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7778
7760 bnx2x_iov_free_mem(bp); 7779 bnx2x_iov_free_mem(bp);
7761} 7780}
7762 7781
@@ -7773,7 +7792,7 @@ int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
7773 sizeof(struct 7792 sizeof(struct
7774 host_hc_status_block_e1x)); 7793 host_hc_status_block_e1x));
7775 7794
7776 if (CONFIGURE_NIC_MODE(bp)) 7795 if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
7777 /* allocate searcher T2 table, as it wan't allocated before */ 7796 /* allocate searcher T2 table, as it wan't allocated before */
7778 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 7797 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7779 7798
@@ -7796,7 +7815,7 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
7796{ 7815{
7797 int i, allocated, context_size; 7816 int i, allocated, context_size;
7798 7817
7799 if (!CONFIGURE_NIC_MODE(bp)) 7818 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
7800 /* allocate searcher T2 table */ 7819 /* allocate searcher T2 table */
7801 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 7820 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7802 7821
@@ -7917,8 +7936,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
7917 7936
7918int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) 7937int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
7919{ 7938{
7920 unsigned long ramrod_flags = 0;
7921
7922 if (is_zero_ether_addr(bp->dev->dev_addr) && 7939 if (is_zero_ether_addr(bp->dev->dev_addr) &&
7923 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { 7940 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
7924 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, 7941 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
@@ -7926,12 +7943,18 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
7926 return 0; 7943 return 0;
7927 } 7944 }
7928 7945
7929 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); 7946 if (IS_PF(bp)) {
7947 unsigned long ramrod_flags = 0;
7930 7948
7931 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 7949 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
7932 /* Eth MAC is set on RSS leading client (fp[0]) */ 7950 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
7933 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj, 7951 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
7934 set, BNX2X_ETH_MAC, &ramrod_flags); 7952 &bp->sp_objs->mac_obj, set,
7953 BNX2X_ETH_MAC, &ramrod_flags);
7954 } else { /* vf */
7955 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
7956 bp->fp->index, true);
7957 }
7935} 7958}
7936 7959
7937int bnx2x_setup_leading(struct bnx2x *bp) 7960int bnx2x_setup_leading(struct bnx2x *bp)
@@ -9525,6 +9548,10 @@ sp_rtnl_not_reset:
9525 bnx2x_vfpf_storm_rx_mode(bp); 9548 bnx2x_vfpf_storm_rx_mode(bp);
9526 } 9549 }
9527 9550
9551 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
9552 &bp->sp_rtnl_state))
9553 bnx2x_pf_set_vfs_vlan(bp);
9554
9528 /* work which needs rtnl lock not-taken (as it takes the lock itself and 9555 /* work which needs rtnl lock not-taken (as it takes the lock itself and
9529 * can be called from other contexts as well) 9556 * can be called from other contexts as well)
9530 */ 9557 */
@@ -9532,8 +9559,10 @@ sp_rtnl_not_reset:
9532 9559
9533 /* enable SR-IOV if applicable */ 9560 /* enable SR-IOV if applicable */
9534 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, 9561 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
9535 &bp->sp_rtnl_state)) 9562 &bp->sp_rtnl_state)) {
9563 bnx2x_disable_sriov(bp);
9536 bnx2x_enable_sriov(bp); 9564 bnx2x_enable_sriov(bp);
9565 }
9537} 9566}
9538 9567
9539static void bnx2x_period_task(struct work_struct *work) 9568static void bnx2x_period_task(struct work_struct *work)
@@ -9701,6 +9730,31 @@ static struct bnx2x_prev_path_list *
9701 return NULL; 9730 return NULL;
9702} 9731}
9703 9732
9733static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
9734{
9735 struct bnx2x_prev_path_list *tmp_list;
9736 int rc;
9737
9738 rc = down_interruptible(&bnx2x_prev_sem);
9739 if (rc) {
9740 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9741 return rc;
9742 }
9743
9744 tmp_list = bnx2x_prev_path_get_entry(bp);
9745 if (tmp_list) {
9746 tmp_list->aer = 1;
9747 rc = 0;
9748 } else {
9749 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
9750 BP_PATH(bp));
9751 }
9752
9753 up(&bnx2x_prev_sem);
9754
9755 return rc;
9756}
9757
9704static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) 9758static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
9705{ 9759{
9706 struct bnx2x_prev_path_list *tmp_list; 9760 struct bnx2x_prev_path_list *tmp_list;
@@ -9709,14 +9763,15 @@ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
9709 if (down_trylock(&bnx2x_prev_sem)) 9763 if (down_trylock(&bnx2x_prev_sem))
9710 return false; 9764 return false;
9711 9765
9712 list_for_each_entry(tmp_list, &bnx2x_prev_list, list) { 9766 tmp_list = bnx2x_prev_path_get_entry(bp);
9713 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && 9767 if (tmp_list) {
9714 bp->pdev->bus->number == tmp_list->bus && 9768 if (tmp_list->aer) {
9715 BP_PATH(bp) == tmp_list->path) { 9769 DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
9770 BP_PATH(bp));
9771 } else {
9716 rc = true; 9772 rc = true;
9717 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n", 9773 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
9718 BP_PATH(bp)); 9774 BP_PATH(bp));
9719 break;
9720 } 9775 }
9721 } 9776 }
9722 9777
@@ -9730,6 +9785,28 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
9730 struct bnx2x_prev_path_list *tmp_list; 9785 struct bnx2x_prev_path_list *tmp_list;
9731 int rc; 9786 int rc;
9732 9787
9788 rc = down_interruptible(&bnx2x_prev_sem);
9789 if (rc) {
9790 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9791 return rc;
9792 }
9793
9794 /* Check whether the entry for this path already exists */
9795 tmp_list = bnx2x_prev_path_get_entry(bp);
9796 if (tmp_list) {
9797 if (!tmp_list->aer) {
9798 BNX2X_ERR("Re-Marking the path.\n");
9799 } else {
9800 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
9801 BP_PATH(bp));
9802 tmp_list->aer = 0;
9803 }
9804 up(&bnx2x_prev_sem);
9805 return 0;
9806 }
9807 up(&bnx2x_prev_sem);
9808
9809 /* Create an entry for this path and add it */
9733 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); 9810 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
9734 if (!tmp_list) { 9811 if (!tmp_list) {
9735 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); 9812 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
@@ -9739,6 +9816,7 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
9739 tmp_list->bus = bp->pdev->bus->number; 9816 tmp_list->bus = bp->pdev->bus->number;
9740 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); 9817 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
9741 tmp_list->path = BP_PATH(bp); 9818 tmp_list->path = BP_PATH(bp);
9819 tmp_list->aer = 0;
9742 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0; 9820 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
9743 9821
9744 rc = down_interruptible(&bnx2x_prev_sem); 9822 rc = down_interruptible(&bnx2x_prev_sem);
@@ -9746,8 +9824,8 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
9746 BNX2X_ERR("Received %d when tried to take lock\n", rc); 9824 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9747 kfree(tmp_list); 9825 kfree(tmp_list);
9748 } else { 9826 } else {
9749 BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n", 9827 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
9750 BP_PATH(bp)); 9828 BP_PATH(bp));
9751 list_add(&tmp_list->list, &bnx2x_prev_list); 9829 list_add(&tmp_list->list, &bnx2x_prev_list);
9752 up(&bnx2x_prev_sem); 9830 up(&bnx2x_prev_sem);
9753 } 9831 }
@@ -9990,6 +10068,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
9990 } 10068 }
9991 10069
9992 do { 10070 do {
10071 int aer = 0;
9993 /* Lock MCP using an unload request */ 10072 /* Lock MCP using an unload request */
9994 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 10073 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
9995 if (!fw) { 10074 if (!fw) {
@@ -9998,7 +10077,18 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
9998 break; 10077 break;
9999 } 10078 }
10000 10079
10001 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 10080 rc = down_interruptible(&bnx2x_prev_sem);
10081 if (rc) {
10082 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10083 rc);
10084 } else {
10085 /* If Path is marked by EEH, ignore unload status */
10086 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10087 bnx2x_prev_path_get_entry(bp)->aer);
10088 up(&bnx2x_prev_sem);
10089 }
10090
10091 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10002 rc = bnx2x_prev_unload_common(bp); 10092 rc = bnx2x_prev_unload_common(bp);
10003 break; 10093 break;
10004 } 10094 }
@@ -10038,8 +10128,12 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10038 id = ((val & 0xffff) << 16); 10128 id = ((val & 0xffff) << 16);
10039 val = REG_RD(bp, MISC_REG_CHIP_REV); 10129 val = REG_RD(bp, MISC_REG_CHIP_REV);
10040 id |= ((val & 0xf) << 12); 10130 id |= ((val & 0xf) << 12);
10041 val = REG_RD(bp, MISC_REG_CHIP_METAL); 10131
10042 id |= ((val & 0xff) << 4); 10132 /* Metal is read from PCI regs, but we can't access >=0x400 from
10133 * the configuration space (so we need to reg_rd)
10134 */
10135 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10136 id |= (((val >> 24) & 0xf) << 4);
10043 val = REG_RD(bp, MISC_REG_BOND_ID); 10137 val = REG_RD(bp, MISC_REG_BOND_ID);
10044 id |= (val & 0xf); 10138 id |= (val & 0xf);
10045 bp->common.chip_id = id; 10139 bp->common.chip_id = id;
@@ -10575,10 +10669,12 @@ static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
10575 10669
10576 bp->link_params.speed_cap_mask[0] = 10670 bp->link_params.speed_cap_mask[0] =
10577 SHMEM_RD(bp, 10671 SHMEM_RD(bp,
10578 dev_info.port_hw_config[port].speed_capability_mask); 10672 dev_info.port_hw_config[port].speed_capability_mask) &
10673 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
10579 bp->link_params.speed_cap_mask[1] = 10674 bp->link_params.speed_cap_mask[1] =
10580 SHMEM_RD(bp, 10675 SHMEM_RD(bp,
10581 dev_info.port_hw_config[port].speed_capability_mask2); 10676 dev_info.port_hw_config[port].speed_capability_mask2) &
10677 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
10582 bp->port.link_config[0] = 10678 bp->port.link_config[0] =
10583 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); 10679 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
10584 10680
@@ -10703,6 +10799,12 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
10703 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> 10799 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
10704 BNX2X_MAX_FCOE_INIT_CONN_SHIFT; 10800 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
10705 10801
10802 /* Calculate the number of maximum allowed FCoE tasks */
10803 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
10804 if (IS_MF(bp) || CHIP_MODE_IS_4_PORT(bp))
10805 bp->cnic_eth_dev.max_fcoe_exchanges /=
10806 MAX_FCOE_FUNCS_PER_ENGINE;
10807
10706 /* Read the WWN: */ 10808 /* Read the WWN: */
10707 if (!IS_MF(bp)) { 10809 if (!IS_MF(bp)) {
10708 /* Port info */ 10810 /* Port info */
@@ -10816,14 +10918,12 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
10816 } 10918 }
10817 } 10919 }
10818 10920
10819 if (IS_MF_STORAGE_SD(bp)) 10921 /* If this is a storage-only interface, use SAN mac as
10820 /* Zero primary MAC configuration */ 10922 * primary MAC. Notice that for SD this is already the case,
10821 memset(bp->dev->dev_addr, 0, ETH_ALEN); 10923 * as the SAN mac was copied from the primary MAC.
10822 10924 */
10823 if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp)) 10925 if (IS_MF_FCOE_AFEX(bp))
10824 /* use FIP MAC as primary MAC */
10825 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); 10926 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
10826
10827 } else { 10927 } else {
10828 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10928 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
10829 iscsi_mac_upper); 10929 iscsi_mac_upper);
@@ -11060,6 +11160,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
11060 } else 11160 } else
11061 BNX2X_DEV_INFO("illegal OV for SD\n"); 11161 BNX2X_DEV_INFO("illegal OV for SD\n");
11062 break; 11162 break;
11163 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
11164 bp->mf_config[vn] = 0;
11165 break;
11063 default: 11166 default:
11064 /* Unknown configuration: reset mf_config */ 11167 /* Unknown configuration: reset mf_config */
11065 bp->mf_config[vn] = 0; 11168 bp->mf_config[vn] = 0;
@@ -11406,26 +11509,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11406 * net_device service functions 11509 * net_device service functions
11407 */ 11510 */
11408 11511
11409static int bnx2x_open_epilog(struct bnx2x *bp)
11410{
11411 /* Enable sriov via delayed work. This must be done via delayed work
11412 * because it causes the probe of the vf devices to be run, which invoke
11413 * register_netdevice which must have rtnl lock taken. As we are holding
11414 * the lock right now, that could only work if the probe would not take
11415 * the lock. However, as the probe of the vf may be called from other
11416 * contexts as well (such as passthrough to vm failes) it can't assume
11417 * the lock is being held for it. Using delayed work here allows the
11418 * probe code to simply take the lock (i.e. wait for it to be released
11419 * if it is being held).
11420 */
11421 smp_mb__before_clear_bit();
11422 set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
11423 smp_mb__after_clear_bit();
11424 schedule_delayed_work(&bp->sp_rtnl_task, 0);
11425
11426 return 0;
11427}
11428
11429/* called with rtnl_lock */ 11512/* called with rtnl_lock */
11430static int bnx2x_open(struct net_device *dev) 11513static int bnx2x_open(struct net_device *dev)
11431{ 11514{
@@ -11795,6 +11878,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
11795 .ndo_setup_tc = bnx2x_setup_tc, 11878 .ndo_setup_tc = bnx2x_setup_tc,
11796#ifdef CONFIG_BNX2X_SRIOV 11879#ifdef CONFIG_BNX2X_SRIOV
11797 .ndo_set_vf_mac = bnx2x_set_vf_mac, 11880 .ndo_set_vf_mac = bnx2x_set_vf_mac,
11881 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
11882 .ndo_get_vf_config = bnx2x_get_vf_config,
11798#endif 11883#endif
11799#ifdef NETDEV_FCOE_WWNN 11884#ifdef NETDEV_FCOE_WWNN
11800 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 11885 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
@@ -11957,19 +12042,26 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
11957 dev->watchdog_timeo = TX_TIMEOUT; 12042 dev->watchdog_timeo = TX_TIMEOUT;
11958 12043
11959 dev->netdev_ops = &bnx2x_netdev_ops; 12044 dev->netdev_ops = &bnx2x_netdev_ops;
11960 bnx2x_set_ethtool_ops(dev); 12045 bnx2x_set_ethtool_ops(bp, dev);
11961 12046
11962 dev->priv_flags |= IFF_UNICAST_FLT; 12047 dev->priv_flags |= IFF_UNICAST_FLT;
11963 12048
11964 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 12049 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
11965 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 12050 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
11966 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | 12051 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
11967 NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX; 12052 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
12053 if (!CHIP_IS_E1x(bp)) {
12054 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
12055 dev->hw_enc_features =
12056 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12057 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12058 NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
12059 }
11968 12060
11969 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 12061 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
11970 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 12062 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
11971 12063
11972 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX; 12064 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
11973 if (bp->flags & USING_DAC_FLAG) 12065 if (bp->flags & USING_DAC_FLAG)
11974 dev->features |= NETIF_F_HIGHDMA; 12066 dev->features |= NETIF_F_HIGHDMA;
11975 12067
@@ -12451,7 +12543,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
12451 * l2 connections. 12543 * l2 connections.
12452 */ 12544 */
12453 if (IS_VF(bp)) { 12545 if (IS_VF(bp)) {
12454 bnx2x_vf_map_doorbells(bp); 12546 bp->doorbells = bnx2x_vf_doorbells(bp);
12455 rc = bnx2x_vf_pci_alloc(bp); 12547 rc = bnx2x_vf_pci_alloc(bp);
12456 if (rc) 12548 if (rc)
12457 goto init_one_exit; 12549 goto init_one_exit;
@@ -12479,13 +12571,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
12479 goto init_one_exit; 12571 goto init_one_exit;
12480 } 12572 }
12481 12573
12482 /* Enable SRIOV if capability found in configuration space. 12574 /* Enable SRIOV if capability found in configuration space */
12483 * Once the generic SR-IOV framework makes it in from the 12575 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
12484 * pci tree this will be revised, to allow dynamic control
12485 * over the number of VFs. Right now, change the num of vfs
12486 * param below to enable SR-IOV.
12487 */
12488 rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
12489 if (rc) 12576 if (rc)
12490 goto init_one_exit; 12577 goto init_one_exit;
12491 12578
@@ -12497,16 +12584,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
12497 if (CHIP_IS_E1x(bp)) 12584 if (CHIP_IS_E1x(bp))
12498 bp->flags |= NO_FCOE_FLAG; 12585 bp->flags |= NO_FCOE_FLAG;
12499 12586
12500 /* disable FCOE for 57840 device, until FW supports it */
12501 switch (ent->driver_data) {
12502 case BCM57840_O:
12503 case BCM57840_4_10:
12504 case BCM57840_2_20:
12505 case BCM57840_MFO:
12506 case BCM57840_MF:
12507 bp->flags |= NO_FCOE_FLAG;
12508 }
12509
12510 /* Set bp->num_queues for MSI-X mode*/ 12587 /* Set bp->num_queues for MSI-X mode*/
12511 bnx2x_set_num_queues(bp); 12588 bnx2x_set_num_queues(bp);
12512 12589
@@ -12640,9 +12717,7 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
12640 12717
12641static int bnx2x_eeh_nic_unload(struct bnx2x *bp) 12718static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12642{ 12719{
12643 int i; 12720 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
12644
12645 bp->state = BNX2X_STATE_ERROR;
12646 12721
12647 bp->rx_mode = BNX2X_RX_MODE_NONE; 12722 bp->rx_mode = BNX2X_RX_MODE_NONE;
12648 12723
@@ -12651,29 +12726,21 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12651 12726
12652 /* Stop Tx */ 12727 /* Stop Tx */
12653 bnx2x_tx_disable(bp); 12728 bnx2x_tx_disable(bp);
12654
12655 bnx2x_netif_stop(bp, 0);
12656 /* Delete all NAPI objects */ 12729 /* Delete all NAPI objects */
12657 bnx2x_del_all_napi(bp); 12730 bnx2x_del_all_napi(bp);
12658 if (CNIC_LOADED(bp)) 12731 if (CNIC_LOADED(bp))
12659 bnx2x_del_all_napi_cnic(bp); 12732 bnx2x_del_all_napi_cnic(bp);
12733 netdev_reset_tc(bp->dev);
12660 12734
12661 del_timer_sync(&bp->timer); 12735 del_timer_sync(&bp->timer);
12736 cancel_delayed_work(&bp->sp_task);
12737 cancel_delayed_work(&bp->period_task);
12662 12738
12663 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 12739 spin_lock_bh(&bp->stats_lock);
12664 12740 bp->stats_state = STATS_STATE_DISABLED;
12665 /* Release IRQs */ 12741 spin_unlock_bh(&bp->stats_lock);
12666 bnx2x_free_irq(bp);
12667
12668 /* Free SKBs, SGEs, TPA pool and driver internals */
12669 bnx2x_free_skbs(bp);
12670
12671 for_each_rx_queue(bp, i)
12672 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12673
12674 bnx2x_free_mem(bp);
12675 12742
12676 bp->state = BNX2X_STATE_CLOSED; 12743 bnx2x_save_statistics(bp);
12677 12744
12678 netif_carrier_off(bp->dev); 12745 netif_carrier_off(bp->dev);
12679 12746
@@ -12709,6 +12776,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12709 12776
12710 rtnl_lock(); 12777 rtnl_lock();
12711 12778
12779 BNX2X_ERR("IO error detected\n");
12780
12712 netif_device_detach(dev); 12781 netif_device_detach(dev);
12713 12782
12714 if (state == pci_channel_io_perm_failure) { 12783 if (state == pci_channel_io_perm_failure) {
@@ -12719,6 +12788,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12719 if (netif_running(dev)) 12788 if (netif_running(dev))
12720 bnx2x_eeh_nic_unload(bp); 12789 bnx2x_eeh_nic_unload(bp);
12721 12790
12791 bnx2x_prev_path_mark_eeh(bp);
12792
12722 pci_disable_device(pdev); 12793 pci_disable_device(pdev);
12723 12794
12724 rtnl_unlock(); 12795 rtnl_unlock();
@@ -12737,9 +12808,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12737{ 12808{
12738 struct net_device *dev = pci_get_drvdata(pdev); 12809 struct net_device *dev = pci_get_drvdata(pdev);
12739 struct bnx2x *bp = netdev_priv(dev); 12810 struct bnx2x *bp = netdev_priv(dev);
12811 int i;
12740 12812
12741 rtnl_lock(); 12813 rtnl_lock();
12742 12814 BNX2X_ERR("IO slot reset initializing...\n");
12743 if (pci_enable_device(pdev)) { 12815 if (pci_enable_device(pdev)) {
12744 dev_err(&pdev->dev, 12816 dev_err(&pdev->dev,
12745 "Cannot re-enable PCI device after reset\n"); 12817 "Cannot re-enable PCI device after reset\n");
@@ -12749,10 +12821,47 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12749 12821
12750 pci_set_master(pdev); 12822 pci_set_master(pdev);
12751 pci_restore_state(pdev); 12823 pci_restore_state(pdev);
12824 pci_save_state(pdev);
12752 12825
12753 if (netif_running(dev)) 12826 if (netif_running(dev))
12754 bnx2x_set_power_state(bp, PCI_D0); 12827 bnx2x_set_power_state(bp, PCI_D0);
12755 12828
12829 if (netif_running(dev)) {
12830 BNX2X_ERR("IO slot reset --> driver unload\n");
12831 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
12832 u32 v;
12833
12834 v = SHMEM2_RD(bp,
12835 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
12836 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
12837 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
12838 }
12839 bnx2x_drain_tx_queues(bp);
12840 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
12841 bnx2x_netif_stop(bp, 1);
12842 bnx2x_free_irq(bp);
12843
12844 /* Report UNLOAD_DONE to MCP */
12845 bnx2x_send_unload_done(bp, true);
12846
12847 bp->sp_state = 0;
12848 bp->port.pmf = 0;
12849
12850 bnx2x_prev_unload(bp);
12851
12852 /* We should have resetted the engine, so It's fair to
12853 * assume the FW will no longer write to the bnx2x driver.
12854 */
12855 bnx2x_squeeze_objects(bp);
12856 bnx2x_free_skbs(bp);
12857 for_each_rx_queue(bp, i)
12858 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12859 bnx2x_free_fp_mem(bp);
12860 bnx2x_free_mem(bp);
12861
12862 bp->state = BNX2X_STATE_CLOSED;
12863 }
12864
12756 rtnl_unlock(); 12865 rtnl_unlock();
12757 12866
12758 return PCI_ERS_RESULT_RECOVERED; 12867 return PCI_ERS_RESULT_RECOVERED;
@@ -12779,6 +12888,9 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
12779 12888
12780 bnx2x_eeh_recover(bp); 12889 bnx2x_eeh_recover(bp);
12781 12890
12891 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12892 DRV_MSG_SEQ_NUMBER_MASK;
12893
12782 if (netif_running(dev)) 12894 if (netif_running(dev))
12783 bnx2x_nic_load(bp, LOAD_NORMAL); 12895 bnx2x_nic_load(bp, LOAD_NORMAL);
12784 12896
@@ -12801,6 +12913,9 @@ static struct pci_driver bnx2x_pci_driver = {
12801 .suspend = bnx2x_suspend, 12913 .suspend = bnx2x_suspend,
12802 .resume = bnx2x_resume, 12914 .resume = bnx2x_resume,
12803 .err_handler = &bnx2x_err_handler, 12915 .err_handler = &bnx2x_err_handler,
12916#ifdef CONFIG_BNX2X_SRIOV
12917 .sriov_configure = bnx2x_sriov_configure,
12918#endif
12804}; 12919};
12805 12920
12806static int __init bnx2x_init(void) 12921static int __init bnx2x_init(void)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 791eb2d53011..d22bc40091ec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1491,10 +1491,6 @@
1491/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1 1491/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
1492 Port. */ 1492 Port. */
1493#define MISC_REG_BOND_ID 0xa400 1493#define MISC_REG_BOND_ID 0xa400
1494/* [R 8] These bits indicate the metal revision of the chip. This value
1495 starts at 0x00 for each all-layer tape-out and increments by one for each
1496 tape-out. */
1497#define MISC_REG_CHIP_METAL 0xa404
1498/* [R 16] These bits indicate the part number for the chip. */ 1494/* [R 16] These bits indicate the part number for the chip. */
1499#define MISC_REG_CHIP_NUM 0xa408 1495#define MISC_REG_CHIP_NUM 0xa408
1500/* [R 4] These bits indicate the base revision of the chip. This value 1496/* [R 4] These bits indicate the base revision of the chip. This value
@@ -6331,6 +6327,8 @@
6331#define PCI_PM_DATA_B 0x414 6327#define PCI_PM_DATA_B 0x414
6332#define PCI_ID_VAL1 0x434 6328#define PCI_ID_VAL1 0x434
6333#define PCI_ID_VAL2 0x438 6329#define PCI_ID_VAL2 0x438
6330#define PCI_ID_VAL3 0x43c
6331
6334#define GRC_CONFIG_REG_PF_INIT_VF 0x624 6332#define GRC_CONFIG_REG_PF_INIT_VF 0x624
6335#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf 6333#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
6336/* First VF_NUM for PF is encoded in this register. 6334/* First VF_NUM for PF is encoded in this register.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 7306416bc90d..32a9609cc98b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -30,8 +30,6 @@
30 30
31#define BNX2X_MAX_EMUL_MULTI 16 31#define BNX2X_MAX_EMUL_MULTI 16
32 32
33#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
34
35/**** Exe Queue interfaces ****/ 33/**** Exe Queue interfaces ****/
36 34
37/** 35/**
@@ -444,30 +442,21 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
444} 442}
445 443
446static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, 444static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
447 int n, u8 *buf) 445 int n, u8 *base, u8 stride, u8 size)
448{ 446{
449 struct bnx2x_vlan_mac_registry_elem *pos; 447 struct bnx2x_vlan_mac_registry_elem *pos;
450 u8 *next = buf; 448 u8 *next = base;
451 int counter = 0; 449 int counter = 0;
452 450
453 /* traverse list */ 451 /* traverse list */
454 list_for_each_entry(pos, &o->head, link) { 452 list_for_each_entry(pos, &o->head, link) {
455 if (counter < n) { 453 if (counter < n) {
456 /* place leading zeroes in buffer */ 454 memcpy(next, &pos->u, size);
457 memset(next, 0, MAC_LEADING_ZERO_CNT);
458
459 /* place mac after leading zeroes*/
460 memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
461 ETH_ALEN);
462
463 /* calculate address of next element and
464 * advance counter
465 */
466 counter++; 455 counter++;
467 next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32)); 456 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
457 counter, next);
458 next += stride + size;
468 459
469 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
470 counter, next, pos->u.mac.mac);
471 } 460 }
472 } 461 }
473 return counter * ETH_ALEN; 462 return counter * ETH_ALEN;
@@ -487,7 +476,8 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,
487 476
488 /* Check if a requested MAC already exists */ 477 /* Check if a requested MAC already exists */
489 list_for_each_entry(pos, &o->head, link) 478 list_for_each_entry(pos, &o->head, link)
490 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) 479 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
480 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
491 return -EEXIST; 481 return -EEXIST;
492 482
493 return 0; 483 return 0;
@@ -520,7 +510,9 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
520 list_for_each_entry(pos, &o->head, link) 510 list_for_each_entry(pos, &o->head, link)
521 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 511 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
522 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, 512 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
523 ETH_ALEN))) 513 ETH_ALEN)) &&
514 (data->vlan_mac.is_inner_mac ==
515 pos->u.vlan_mac.is_inner_mac))
524 return -EEXIST; 516 return -EEXIST;
525 517
526 return 0; 518 return 0;
@@ -538,7 +530,8 @@ static struct bnx2x_vlan_mac_registry_elem *
538 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac); 530 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
539 531
540 list_for_each_entry(pos, &o->head, link) 532 list_for_each_entry(pos, &o->head, link)
541 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) 533 if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
534 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
542 return pos; 535 return pos;
543 536
544 return NULL; 537 return NULL;
@@ -573,7 +566,9 @@ static struct bnx2x_vlan_mac_registry_elem *
573 list_for_each_entry(pos, &o->head, link) 566 list_for_each_entry(pos, &o->head, link)
574 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 567 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
575 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, 568 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
576 ETH_ALEN))) 569 ETH_ALEN)) &&
570 (data->vlan_mac.is_inner_mac ==
571 pos->u.vlan_mac.is_inner_mac))
577 return pos; 572 return pos;
578 573
579 return NULL; 574 return NULL;
@@ -770,6 +765,8 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
770 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, 765 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
771 &rule_entry->mac.mac_mid, 766 &rule_entry->mac.mac_mid,
772 &rule_entry->mac.mac_lsb, mac); 767 &rule_entry->mac.mac_lsb, mac);
768 rule_entry->mac.inner_mac =
769 cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
773 770
774 /* MOVE: Add a rule that will add this MAC to the target Queue */ 771 /* MOVE: Add a rule that will add this MAC to the target Queue */
775 if (cmd == BNX2X_VLAN_MAC_MOVE) { 772 if (cmd == BNX2X_VLAN_MAC_MOVE) {
@@ -786,6 +783,9 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
786 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, 783 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
787 &rule_entry->mac.mac_mid, 784 &rule_entry->mac.mac_mid,
788 &rule_entry->mac.mac_lsb, mac); 785 &rule_entry->mac.mac_lsb, mac);
786 rule_entry->mac.inner_mac =
787 cpu_to_le16(elem->cmd_data.vlan_mac.
788 u.mac.is_inner_mac);
789 } 789 }
790 790
791 /* Set the ramrod data header */ 791 /* Set the ramrod data header */
@@ -974,7 +974,8 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
974 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, 974 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
975 &rule_entry->pair.mac_mid, 975 &rule_entry->pair.mac_mid,
976 &rule_entry->pair.mac_lsb, mac); 976 &rule_entry->pair.mac_lsb, mac);
977 977 rule_entry->pair.inner_mac =
978 cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
978 /* MOVE: Add a rule that will add this MAC to the target Queue */ 979 /* MOVE: Add a rule that will add this MAC to the target Queue */
979 if (cmd == BNX2X_VLAN_MAC_MOVE) { 980 if (cmd == BNX2X_VLAN_MAC_MOVE) {
980 rule_entry++; 981 rule_entry++;
@@ -991,6 +992,9 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
991 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, 992 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
992 &rule_entry->pair.mac_mid, 993 &rule_entry->pair.mac_mid,
993 &rule_entry->pair.mac_lsb, mac); 994 &rule_entry->pair.mac_lsb, mac);
995 rule_entry->pair.inner_mac =
996 cpu_to_le16(elem->cmd_data.vlan_mac.u.
997 vlan_mac.is_inner_mac);
994 } 998 }
995 999
996 /* Set the ramrod data header */ 1000 /* Set the ramrod data header */
@@ -1854,6 +1858,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1854 return rc; 1858 return rc;
1855 } 1859 }
1856 list_del(&exeq_pos->link); 1860 list_del(&exeq_pos->link);
1861 bnx2x_exe_queue_free_elem(bp, exeq_pos);
1857 } 1862 }
1858 } 1863 }
1859 1864
@@ -2012,6 +2017,7 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
2012 vlan_obj->check_move = bnx2x_check_move; 2017 vlan_obj->check_move = bnx2x_check_move;
2013 vlan_obj->ramrod_cmd = 2018 vlan_obj->ramrod_cmd =
2014 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 2019 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2020 vlan_obj->get_n_elements = bnx2x_get_n_elements;
2015 2021
2016 /* Exe Queue */ 2022 /* Exe Queue */
2017 bnx2x_exe_queue_init(bp, 2023 bnx2x_exe_queue_init(bp,
@@ -4426,6 +4432,12 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4426 tx_data->force_default_pri_flg = 4432 tx_data->force_default_pri_flg =
4427 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); 4433 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4428 4434
4435 tx_data->tunnel_lso_inc_ip_id =
4436 test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4437 tx_data->tunnel_non_lso_pcsum_location =
4438 test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
4439 PCSUM_ON_BD;
4440
4429 tx_data->tx_status_block_id = params->fw_sb_id; 4441 tx_data->tx_status_block_id = params->fw_sb_id;
4430 tx_data->tx_sb_index_number = params->sb_cq_index; 4442 tx_data->tx_sb_index_number = params->sb_cq_index;
4431 tx_data->tss_leading_client_id = params->tss_leading_cl_id; 4443 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
@@ -5669,17 +5681,18 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
5669 memset(rdata, 0, sizeof(*rdata)); 5681 memset(rdata, 0, sizeof(*rdata));
5670 5682
5671 /* Fill the ramrod data with provided parameters */ 5683 /* Fill the ramrod data with provided parameters */
5672 rdata->function_mode = (u8)start_params->mf_mode; 5684 rdata->function_mode = (u8)start_params->mf_mode;
5673 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); 5685 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5674 rdata->path_id = BP_PATH(bp); 5686 rdata->path_id = BP_PATH(bp);
5675 rdata->network_cos_mode = start_params->network_cos_mode; 5687 rdata->network_cos_mode = start_params->network_cos_mode;
5676 5688 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5677 /* 5689 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
5678 * No need for an explicit memory barrier here as long we would 5690
5679 * need to ensure the ordering of writing to the SPQ element 5691 /* No need for an explicit memory barrier here as long we would
5680 * and updating of the SPQ producer which involves a memory 5692 * need to ensure the ordering of writing to the SPQ element
5681 * read and we will have to put a full memory barrier there 5693 * and updating of the SPQ producer which involves a memory
5682 * (inside bnx2x_sp_post()). 5694 * read and we will have to put a full memory barrier there
5695 * (inside bnx2x_sp_post()).
5683 */ 5696 */
5684 5697
5685 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 5698 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index ff907609b9fc..43c00bc84a08 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -100,6 +100,7 @@ struct bnx2x_raw_obj {
100/************************* VLAN-MAC commands related parameters ***************/ 100/************************* VLAN-MAC commands related parameters ***************/
101struct bnx2x_mac_ramrod_data { 101struct bnx2x_mac_ramrod_data {
102 u8 mac[ETH_ALEN]; 102 u8 mac[ETH_ALEN];
103 u8 is_inner_mac;
103}; 104};
104 105
105struct bnx2x_vlan_ramrod_data { 106struct bnx2x_vlan_ramrod_data {
@@ -108,6 +109,7 @@ struct bnx2x_vlan_ramrod_data {
108 109
109struct bnx2x_vlan_mac_ramrod_data { 110struct bnx2x_vlan_mac_ramrod_data {
110 u8 mac[ETH_ALEN]; 111 u8 mac[ETH_ALEN];
112 u8 is_inner_mac;
111 u16 vlan; 113 u16 vlan;
112}; 114};
113 115
@@ -313,8 +315,9 @@ struct bnx2x_vlan_mac_obj {
313 * 315 *
314 * @return number of copied bytes 316 * @return number of copied bytes
315 */ 317 */
316 int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, 318 int (*get_n_elements)(struct bnx2x *bp,
317 int n, u8 *buf); 319 struct bnx2x_vlan_mac_obj *o, int n, u8 *base,
320 u8 stride, u8 size);
318 321
319 /** 322 /**
320 * Checks if ADD-ramrod with the given params may be performed. 323 * Checks if ADD-ramrod with the given params may be performed.
@@ -824,7 +827,9 @@ enum {
824 BNX2X_Q_FLG_TX_SEC, 827 BNX2X_Q_FLG_TX_SEC,
825 BNX2X_Q_FLG_ANTI_SPOOF, 828 BNX2X_Q_FLG_ANTI_SPOOF,
826 BNX2X_Q_FLG_SILENT_VLAN_REM, 829 BNX2X_Q_FLG_SILENT_VLAN_REM,
827 BNX2X_Q_FLG_FORCE_DEFAULT_PRI 830 BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
831 BNX2X_Q_FLG_PCSUM_ON_PKT,
832 BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
828}; 833};
829 834
830/* Queue type options: queue type may be a compination of below. */ 835/* Queue type options: queue type may be a compination of below. */
@@ -842,6 +847,7 @@ enum bnx2x_q_type {
842#define BNX2X_MULTI_TX_COS_E3B0 3 847#define BNX2X_MULTI_TX_COS_E3B0 3
843#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */ 848#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */
844 849
850#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
845 851
846struct bnx2x_queue_init_params { 852struct bnx2x_queue_init_params {
847 struct { 853 struct {
@@ -1118,6 +1124,15 @@ struct bnx2x_func_start_params {
1118 1124
1119 /* Function cos mode */ 1125 /* Function cos mode */
1120 u8 network_cos_mode; 1126 u8 network_cos_mode;
1127
1128 /* NVGRE classification enablement */
1129 u8 nvgre_clss_en;
1130
1131 /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
1132 u8 gre_tunnel_mode;
1133
1134 /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
1135 u8 gre_tunnel_rss;
1121}; 1136};
1122 1137
1123struct bnx2x_func_switch_update_params { 1138struct bnx2x_func_switch_update_params {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6adfa2093581..2ce7c7471367 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -20,7 +20,9 @@
20#include "bnx2x.h" 20#include "bnx2x.h"
21#include "bnx2x_init.h" 21#include "bnx2x_init.h"
22#include "bnx2x_cmn.h" 22#include "bnx2x_cmn.h"
23#include "bnx2x_sp.h"
23#include <linux/crc32.h> 24#include <linux/crc32.h>
25#include <linux/if_vlan.h>
24 26
25/* General service functions */ 27/* General service functions */
26static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 28static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
@@ -555,8 +557,7 @@ static int bnx2x_vfop_config_list(struct bnx2x *bp,
555 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 557 rc = bnx2x_config_vlan_mac(bp, vlan_mac);
556 if (rc >= 0) { 558 if (rc >= 0) {
557 cnt += pos->add ? 1 : -1; 559 cnt += pos->add ? 1 : -1;
558 list_del(&pos->link); 560 list_move(&pos->link, &rollback_list);
559 list_add(&pos->link, &rollback_list);
560 rc = 0; 561 rc = 0;
561 } else if (rc == -EEXIST) { 562 } else if (rc == -EEXIST) {
562 rc = 0; 563 rc = 0;
@@ -958,6 +959,12 @@ op_err:
958 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); 959 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
959op_done: 960op_done:
960 case BNX2X_VFOP_QSETUP_DONE: 961 case BNX2X_VFOP_QSETUP_DONE:
962 vf->cfg_flags |= VF_CFG_VLAN;
963 smp_mb__before_clear_bit();
964 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
965 &bp->sp_rtnl_state);
966 smp_mb__after_clear_bit();
967 schedule_delayed_work(&bp->sp_rtnl_task, 0);
961 bnx2x_vfop_end(bp, vf, vfop); 968 bnx2x_vfop_end(bp, vf, vfop);
962 return; 969 return;
963 default: 970 default:
@@ -1459,7 +1466,6 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
1459 return bnx2x_is_pcie_pending(dev); 1466 return bnx2x_is_pcie_pending(dev);
1460 1467
1461unknown_dev: 1468unknown_dev:
1462 BNX2X_ERR("Unknown device\n");
1463 return false; 1469 return false;
1464} 1470}
1465 1471
@@ -1926,20 +1932,22 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1926 1932
1927 /* SRIOV can be enabled only with MSIX */ 1933 /* SRIOV can be enabled only with MSIX */
1928 if (int_mode_param == BNX2X_INT_MODE_MSI || 1934 if (int_mode_param == BNX2X_INT_MODE_MSI ||
1929 int_mode_param == BNX2X_INT_MODE_INTX) 1935 int_mode_param == BNX2X_INT_MODE_INTX) {
1930 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1936 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1937 return 0;
1938 }
1931 1939
1932 err = -EIO; 1940 err = -EIO;
1933 /* verify ari is enabled */ 1941 /* verify ari is enabled */
1934 if (!bnx2x_ari_enabled(bp->pdev)) { 1942 if (!bnx2x_ari_enabled(bp->pdev)) {
1935 BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n"); 1943 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1936 return err; 1944 return 0;
1937 } 1945 }
1938 1946
1939 /* verify igu is in normal mode */ 1947 /* verify igu is in normal mode */
1940 if (CHIP_INT_MODE_IS_BC(bp)) { 1948 if (CHIP_INT_MODE_IS_BC(bp)) {
1941 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1949 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
1942 return err; 1950 return 0;
1943 } 1951 }
1944 1952
1945 /* allocate the vfs database */ 1953 /* allocate the vfs database */
@@ -1964,8 +1972,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1964 if (iov->total == 0) 1972 if (iov->total == 0)
1965 goto failed; 1973 goto failed;
1966 1974
1967 /* calculate the actual number of VFs */ 1975 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1968 iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param); 1976
1977 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1978 num_vfs_param, iov->nr_virtfn);
1969 1979
1970 /* allocate the vf array */ 1980 /* allocate the vf array */
1971 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1981 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
@@ -2378,8 +2388,8 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2378 goto get_vf; 2388 goto get_vf;
2379 case EVENT_RING_OPCODE_MALICIOUS_VF: 2389 case EVENT_RING_OPCODE_MALICIOUS_VF:
2380 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 2390 abs_vfid = elem->message.data.malicious_vf_event.vf_id;
2381 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n", 2391 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
2382 abs_vfid); 2392 abs_vfid, elem->message.data.malicious_vf_event.err_id);
2383 goto get_vf; 2393 goto get_vf;
2384 default: 2394 default:
2385 return 1; 2395 return 1;
@@ -2436,8 +2446,8 @@ get_vf:
2436 /* Do nothing for now */ 2446 /* Do nothing for now */
2437 break; 2447 break;
2438 case EVENT_RING_OPCODE_MALICIOUS_VF: 2448 case EVENT_RING_OPCODE_MALICIOUS_VF:
2439 DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n", 2449 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
2440 vf->abs_vfid); 2450 abs_vfid, elem->message.data.malicious_vf_event.err_id);
2441 /* Do nothing for now */ 2451 /* Do nothing for now */
2442 break; 2452 break;
2443 } 2453 }
@@ -3012,21 +3022,138 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3012 vf->op_current = CHANNEL_TLV_NONE; 3022 vf->op_current = CHANNEL_TLV_NONE;
3013} 3023}
3014 3024
3015void bnx2x_enable_sriov(struct bnx2x *bp) 3025int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3016{ 3026{
3017 int rc = 0;
3018 3027
3019 /* disbale sriov in case it is still enabled */ 3028 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
3029
3030 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
3031 num_vfs_param, BNX2X_NR_VIRTFN(bp));
3032
3033 /* HW channel is only operational when PF is up */
3034 if (bp->state != BNX2X_STATE_OPEN) {
3035 BNX2X_ERR("VF num configurtion via sysfs not supported while PF is down");
3036 return -EINVAL;
3037 }
3038
3039 /* we are always bound by the total_vfs in the configuration space */
3040 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
3041 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
3042 num_vfs_param, BNX2X_NR_VIRTFN(bp));
3043 num_vfs_param = BNX2X_NR_VIRTFN(bp);
3044 }
3045
3046 bp->requested_nr_virtfn = num_vfs_param;
3047 if (num_vfs_param == 0) {
3048 pci_disable_sriov(dev);
3049 return 0;
3050 } else {
3051 return bnx2x_enable_sriov(bp);
3052 }
3053}
3054
3055int bnx2x_enable_sriov(struct bnx2x *bp)
3056{
3057 int rc = 0, req_vfs = bp->requested_nr_virtfn;
3058
3059 rc = pci_enable_sriov(bp->pdev, req_vfs);
3060 if (rc) {
3061 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
3062 return rc;
3063 }
3064 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
3065 return req_vfs;
3066}
3067
3068void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
3069{
3070 int vfidx;
3071 struct pf_vf_bulletin_content *bulletin;
3072
3073 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
3074 for_each_vf(bp, vfidx) {
3075 bulletin = BP_VF_BULLETIN(bp, vfidx);
3076 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
3077 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
3078 }
3079}
3080
3081void bnx2x_disable_sriov(struct bnx2x *bp)
3082{
3020 pci_disable_sriov(bp->pdev); 3083 pci_disable_sriov(bp->pdev);
3021 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 3084}
3085
3086static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
3087 struct bnx2x_virtf *vf)
3088{
3089 if (!IS_SRIOV(bp)) {
3090 BNX2X_ERR("vf ndo called though sriov is disabled\n");
3091 return -EINVAL;
3092 }
3093
3094 if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
3095 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
3096 vfidx, BNX2X_NR_VIRTFN(bp));
3097 return -EINVAL;
3098 }
3099
3100 if (!vf) {
3101 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
3102 vfidx);
3103 return -EINVAL;
3104 }
3022 3105
3023 /* enable sriov */ 3106 return 0;
3024 DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn)); 3107}
3025 rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn)); 3108
3109int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3110 struct ifla_vf_info *ivi)
3111{
3112 struct bnx2x *bp = netdev_priv(dev);
3113 struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
3114 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3115 struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
3116 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
3117 int rc;
3118
3119 /* sanity */
3120 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
3026 if (rc) 3121 if (rc)
3027 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3122 return rc;
3028 else 3123 if (!mac_obj || !vlan_obj || !bulletin) {
3029 DP(BNX2X_MSG_IOV, "sriov enabled\n"); 3124 BNX2X_ERR("VF partially initialized\n");
3125 return -EINVAL;
3126 }
3127
3128 ivi->vf = vfidx;
3129 ivi->qos = 0;
3130 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
3131 ivi->spoofchk = 1; /*always enabled */
3132 if (vf->state == VF_ENABLED) {
3133 /* mac and vlan are in vlan_mac objects */
3134 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
3135 0, ETH_ALEN);
3136 vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan,
3137 0, VLAN_HLEN);
3138 } else {
3139 /* mac */
3140 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
3141 /* mac configured by ndo so its in bulletin board */
3142 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
3143 else
3144 /* funtion has not been loaded yet. Show mac as 0s */
3145 memset(&ivi->mac, 0, ETH_ALEN);
3146
3147 /* vlan */
3148 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
3149 /* vlan configured by ndo so its in bulletin board */
3150 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
3151 else
3152 /* funtion has not been loaded yet. Show vlans as 0s */
3153 memset(&ivi->vlan, 0, VLAN_HLEN);
3154 }
3155
3156 return 0;
3030} 3157}
3031 3158
3032/* New mac for VF. Consider these cases: 3159/* New mac for VF. Consider these cases:
@@ -3044,23 +3171,19 @@ void bnx2x_enable_sriov(struct bnx2x *bp)
3044 * VF to configure any mac for itself except for this mac. In case of a race 3171 * VF to configure any mac for itself except for this mac. In case of a race
3045 * where the VF fails to see the new post on its bulletin board before sending a 3172 * where the VF fails to see the new post on its bulletin board before sending a
3046 * mac configuration request, the PF will simply fail the request and VF can try 3173 * mac configuration request, the PF will simply fail the request and VF can try
3047 * again after consulting its bulletin board 3174 * again after consulting its bulletin board.
3048 */ 3175 */
3049int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac) 3176int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3050{ 3177{
3051 struct bnx2x *bp = netdev_priv(dev); 3178 struct bnx2x *bp = netdev_priv(dev);
3052 int rc, q_logical_state, vfidx = queue; 3179 int rc, q_logical_state;
3053 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3180 struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
3054 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3181 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
3055 3182
3056 /* if SRIOV is disabled there is nothing to do (and somewhere, someone 3183 /* sanity */
3057 * has erred). 3184 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
3058 */ 3185 if (rc)
3059 if (!IS_SRIOV(bp)) { 3186 return rc;
3060 BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
3061 return -EINVAL;
3062 }
3063
3064 if (!is_valid_ether_addr(mac)) { 3187 if (!is_valid_ether_addr(mac)) {
3065 BNX2X_ERR("mac address invalid\n"); 3188 BNX2X_ERR("mac address invalid\n");
3066 return -EINVAL; 3189 return -EINVAL;
@@ -3085,7 +3208,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
3085 if (vf->state == VF_ENABLED && 3208 if (vf->state == VF_ENABLED &&
3086 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3209 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3087 /* configure the mac in device on this vf's queue */ 3210 /* configure the mac in device on this vf's queue */
3088 unsigned long flags = 0; 3211 unsigned long ramrod_flags = 0;
3089 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3212 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3090 3213
3091 /* must lock vfpf channel to protect against vf flows */ 3214 /* must lock vfpf channel to protect against vf flows */
@@ -3106,14 +3229,133 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
3106 } 3229 }
3107 3230
3108 /* configure the new mac to device */ 3231 /* configure the new mac to device */
3109 __set_bit(RAMROD_COMP_WAIT, &flags); 3232 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3110 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3233 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3111 BNX2X_ETH_MAC, &flags); 3234 BNX2X_ETH_MAC, &ramrod_flags);
3112 3235
3113 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3236 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3114 } 3237 }
3115 3238
3116 return rc; 3239 return 0;
3240}
3241
3242int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3243{
3244 struct bnx2x *bp = netdev_priv(dev);
3245 int rc, q_logical_state;
3246 struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
3247 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
3248
3249 /* sanity */
3250 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
3251 if (rc)
3252 return rc;
3253
3254 if (vlan > 4095) {
3255 BNX2X_ERR("illegal vlan value %d\n", vlan);
3256 return -EINVAL;
3257 }
3258
3259 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
3260 vfidx, vlan, 0);
3261
3262 /* update PF's copy of the VF's bulletin. No point in posting the vlan
3263 * to the VF since it doesn't have anything to do with it. But it useful
3264 * to store it here in case the VF is not up yet and we can only
3265 * configure the vlan later when it does.
3266 */
3267 bulletin->valid_bitmap |= 1 << VLAN_VALID;
3268 bulletin->vlan = vlan;
3269
3270 /* is vf initialized and queue set up? */
3271 q_logical_state =
3272 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
3273 if (vf->state == VF_ENABLED &&
3274 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3275 /* configure the vlan in device on this vf's queue */
3276 unsigned long ramrod_flags = 0;
3277 unsigned long vlan_mac_flags = 0;
3278 struct bnx2x_vlan_mac_obj *vlan_obj =
3279 &bnx2x_vfq(vf, 0, vlan_obj);
3280 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3281 struct bnx2x_queue_state_params q_params = {NULL};
3282 struct bnx2x_queue_update_params *update_params;
3283
3284 memset(&ramrod_param, 0, sizeof(ramrod_param));
3285
3286 /* must lock vfpf channel to protect against vf flows */
3287 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3288
3289 /* remove existing vlans */
3290 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3291 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3292 &ramrod_flags);
3293 if (rc) {
3294 BNX2X_ERR("failed to delete vlans\n");
3295 return -EINVAL;
3296 }
3297
3298 /* send queue update ramrod to configure default vlan and silent
3299 * vlan removal
3300 */
3301 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3302 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3303 q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj);
3304 update_params = &q_params.params.update;
3305 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3306 &update_params->update_flags);
3307 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3308 &update_params->update_flags);
3309
3310 if (vlan == 0) {
3311 /* if vlan is 0 then we want to leave the VF traffic
3312 * untagged, and leave the incoming traffic untouched
3313 * (i.e. do not remove any vlan tags).
3314 */
3315 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3316 &update_params->update_flags);
3317 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3318 &update_params->update_flags);
3319 } else {
3320 /* configure the new vlan to device */
3321 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3322 ramrod_param.vlan_mac_obj = vlan_obj;
3323 ramrod_param.ramrod_flags = ramrod_flags;
3324 ramrod_param.user_req.u.vlan.vlan = vlan;
3325 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3326 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3327 if (rc) {
3328 BNX2X_ERR("failed to configure vlan\n");
3329 return -EINVAL;
3330 }
3331
3332 /* configure default vlan to vf queue and set silent
3333 * vlan removal (the vf remains unaware of this vlan).
3334 */
3335 update_params = &q_params.params.update;
3336 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3337 &update_params->update_flags);
3338 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3339 &update_params->update_flags);
3340 update_params->def_vlan = vlan;
3341 }
3342
3343 /* Update the Queue state */
3344 rc = bnx2x_queue_state_change(bp, &q_params);
3345 if (rc) {
3346 BNX2X_ERR("Failed to configure default VLAN\n");
3347 return rc;
3348 }
3349
3350 /* clear the flag indicating that this VF needs its vlan
3351 * (will only be set if the HV configured th Vlan before vf was
3352 * and we were called because the VF came up later
3353 */
3354 vf->cfg_flags &= ~VF_CFG_VLAN;
3355
3356 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3357 }
3358 return 0;
3117} 3359}
3118 3360
3119/* crc is the first field in the bulletin board. compute the crc over the 3361/* crc is the first field in the bulletin board. compute the crc over the
@@ -3165,20 +3407,26 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3165 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 3407 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
3166 } 3408 }
3167 3409
3410 /* the vlan in bulletin board is valid and is new */
3411 if (bulletin.valid_bitmap & 1 << VLAN_VALID)
3412 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
3413
3168 /* copy new bulletin board to bp */ 3414 /* copy new bulletin board to bp */
3169 bp->old_bulletin = bulletin; 3415 bp->old_bulletin = bulletin;
3170 3416
3171 return PFVF_BULLETIN_UPDATED; 3417 return PFVF_BULLETIN_UPDATED;
3172} 3418}
3173 3419
3174void bnx2x_vf_map_doorbells(struct bnx2x *bp) 3420void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3175{ 3421{
3176 /* vf doorbells are embedded within the regview */ 3422 /* vf doorbells are embedded within the regview */
3177 bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START; 3423 return bp->regview + PXP_VF_ADDR_DB_START;
3178} 3424}
3179 3425
3180int bnx2x_vf_pci_alloc(struct bnx2x *bp) 3426int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3181{ 3427{
3428 mutex_init(&bp->vf2pf_mutex);
3429
3182 /* allocate vf2pf mailbox for vf to pf channel */ 3430 /* allocate vf2pf mailbox for vf to pf channel */
3183 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, 3431 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
3184 sizeof(struct bnx2x_vf_mbx_msg)); 3432 sizeof(struct bnx2x_vf_mbx_msg));
@@ -3196,3 +3444,26 @@ alloc_mem_err:
3196 sizeof(union pf_vf_bulletin)); 3444 sizeof(union pf_vf_bulletin));
3197 return -ENOMEM; 3445 return -ENOMEM;
3198} 3446}
3447
3448int bnx2x_open_epilog(struct bnx2x *bp)
3449{
3450 /* Enable sriov via delayed work. This must be done via delayed work
3451 * because it causes the probe of the vf devices to be run, which invoke
3452 * register_netdevice which must have rtnl lock taken. As we are holding
3453 * the lock right now, that could only work if the probe would not take
3454 * the lock. However, as the probe of the vf may be called from other
3455 * contexts as well (such as passthrough to vm failes) it can't assume
3456 * the lock is being held for it. Using delayed work here allows the
3457 * probe code to simply take the lock (i.e. wait for it to be released
3458 * if it is being held). We only want to do this if the number of VFs
3459 * was set before PF driver was loaded.
3460 */
3461 if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
3462 smp_mb__before_clear_bit();
3463 set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
3464 smp_mb__after_clear_bit();
3465 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3466 }
3467
3468 return 0;
3469}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index b4050173add9..d67ddc554c0f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -193,6 +193,7 @@ struct bnx2x_virtf {
193#define VF_CFG_TPA 0x0004 193#define VF_CFG_TPA 0x0004
194#define VF_CFG_INT_SIMD 0x0008 194#define VF_CFG_INT_SIMD 0x0008
195#define VF_CACHE_LINE 0x0010 195#define VF_CACHE_LINE 0x0010
196#define VF_CFG_VLAN 0x0020
196 197
197 u8 state; 198 u8 state;
198#define VF_FREE 0 /* VF ready to be acquired holds no resc */ 199#define VF_FREE 0 /* VF ready to be acquired holds no resc */
@@ -712,6 +713,7 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
712 u16 length); 713 u16 length);
713void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, 714void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
714 u16 type, u16 length); 715 u16 type, u16 length);
716void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv);
715void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list); 717void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
716 718
717bool bnx2x_tlv_supported(u16 tlvtype); 719bool bnx2x_tlv_supported(u16 tlvtype);
@@ -731,7 +733,7 @@ int bnx2x_vfpf_init(struct bnx2x *bp);
731void bnx2x_vfpf_close_vf(struct bnx2x *bp); 733void bnx2x_vfpf_close_vf(struct bnx2x *bp);
732int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx); 734int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx);
733int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx); 735int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
734int bnx2x_vfpf_set_mac(struct bnx2x *bp); 736int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
735int bnx2x_vfpf_set_mcast(struct net_device *dev); 737int bnx2x_vfpf_set_mcast(struct net_device *dev);
736int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp); 738int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
737 739
@@ -750,13 +752,17 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
750} 752}
751 753
752enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); 754enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
753void bnx2x_vf_map_doorbells(struct bnx2x *bp); 755void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
754int bnx2x_vf_pci_alloc(struct bnx2x *bp); 756int bnx2x_vf_pci_alloc(struct bnx2x *bp);
755void bnx2x_enable_sriov(struct bnx2x *bp); 757int bnx2x_enable_sriov(struct bnx2x *bp);
758void bnx2x_disable_sriov(struct bnx2x *bp);
756static inline int bnx2x_vf_headroom(struct bnx2x *bp) 759static inline int bnx2x_vf_headroom(struct bnx2x *bp)
757{ 760{
758 return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF; 761 return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
759} 762}
763void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
764int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
765int bnx2x_open_epilog(struct bnx2x *bp);
760 766
761#else /* CONFIG_BNX2X_SRIOV */ 767#else /* CONFIG_BNX2X_SRIOV */
762 768
@@ -779,7 +785,8 @@ static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
779static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 785static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
780 int num_vfs_param) {return 0; } 786 int num_vfs_param) {return 0; }
781static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {} 787static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
782static inline void bnx2x_enable_sriov(struct bnx2x *bp) {} 788static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; }
789static inline void bnx2x_disable_sriov(struct bnx2x *bp) {}
783static inline int bnx2x_vfpf_acquire(struct bnx2x *bp, 790static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
784 u8 tx_count, u8 rx_count) {return 0; } 791 u8 tx_count, u8 rx_count) {return 0; }
785static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } 792static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
@@ -787,7 +794,8 @@ static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
787static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} 794static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
788static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; } 795static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; }
789static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; } 796static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
790static inline int bnx2x_vfpf_set_mac(struct bnx2x *bp) {return 0; } 797static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
798 u8 vf_qid, bool set) {return 0; }
791static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; } 799static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
792static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; } 800static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
793static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; } 801static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
@@ -802,8 +810,15 @@ static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp
802 return PFVF_BULLETIN_UNCHANGED; 810 return PFVF_BULLETIN_UNCHANGED;
803} 811}
804 812
805static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; } 813static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
814{
815 return NULL;
816}
817
806static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } 818static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
819static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
820static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
821static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
807 822
808#endif /* CONFIG_BNX2X_SRIOV */ 823#endif /* CONFIG_BNX2X_SRIOV */
809#endif /* bnx2x_sriov.h */ 824#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 4397f8b76f2e..2ca3d94fcec2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1547,11 +1547,51 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1547 } 1547 }
1548} 1548}
1549 1549
1550void bnx2x_memset_stats(struct bnx2x *bp)
1551{
1552 int i;
1553
1554 /* function stats */
1555 for_each_queue(bp, i) {
1556 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
1557
1558 memset(&fp_stats->old_tclient, 0,
1559 sizeof(fp_stats->old_tclient));
1560 memset(&fp_stats->old_uclient, 0,
1561 sizeof(fp_stats->old_uclient));
1562 memset(&fp_stats->old_xclient, 0,
1563 sizeof(fp_stats->old_xclient));
1564 if (bp->stats_init) {
1565 memset(&fp_stats->eth_q_stats, 0,
1566 sizeof(fp_stats->eth_q_stats));
1567 memset(&fp_stats->eth_q_stats_old, 0,
1568 sizeof(fp_stats->eth_q_stats_old));
1569 }
1570 }
1571
1572 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
1573
1574 if (bp->stats_init) {
1575 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1576 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1577 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1578 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1579 memset(&bp->func_stats, 0, sizeof(bp->func_stats));
1580 }
1581
1582 bp->stats_state = STATS_STATE_DISABLED;
1583
1584 if (bp->port.pmf && bp->port.port_stx)
1585 bnx2x_port_stats_base_init(bp);
1586
1587 /* mark the end of statistics initializiation */
1588 bp->stats_init = false;
1589}
1590
1550void bnx2x_stats_init(struct bnx2x *bp) 1591void bnx2x_stats_init(struct bnx2x *bp)
1551{ 1592{
1552 int /*abs*/port = BP_PORT(bp); 1593 int /*abs*/port = BP_PORT(bp);
1553 int mb_idx = BP_FW_MB_IDX(bp); 1594 int mb_idx = BP_FW_MB_IDX(bp);
1554 int i;
1555 1595
1556 bp->stats_pending = 0; 1596 bp->stats_pending = 0;
1557 bp->executer_idx = 0; 1597 bp->executer_idx = 0;
@@ -1587,36 +1627,11 @@ void bnx2x_stats_init(struct bnx2x *bp)
1587 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); 1627 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1588 } 1628 }
1589 1629
1590 /* function stats */
1591 for_each_queue(bp, i) {
1592 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
1593
1594 memset(&fp_stats->old_tclient, 0,
1595 sizeof(fp_stats->old_tclient));
1596 memset(&fp_stats->old_uclient, 0,
1597 sizeof(fp_stats->old_uclient));
1598 memset(&fp_stats->old_xclient, 0,
1599 sizeof(fp_stats->old_xclient));
1600 if (bp->stats_init) {
1601 memset(&fp_stats->eth_q_stats, 0,
1602 sizeof(fp_stats->eth_q_stats));
1603 memset(&fp_stats->eth_q_stats_old, 0,
1604 sizeof(fp_stats->eth_q_stats_old));
1605 }
1606 }
1607
1608 /* Prepare statistics ramrod data */ 1630 /* Prepare statistics ramrod data */
1609 bnx2x_prep_fw_stats_req(bp); 1631 bnx2x_prep_fw_stats_req(bp);
1610 1632
1611 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); 1633 /* Clean SP from previous statistics */
1612 if (bp->stats_init) { 1634 if (bp->stats_init) {
1613 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1614 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1615 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1616 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1617 memset(&bp->func_stats, 0, sizeof(bp->func_stats));
1618
1619 /* Clean SP from previous statistics */
1620 if (bp->func_stx) { 1635 if (bp->func_stx) {
1621 memset(bnx2x_sp(bp, func_stats), 0, 1636 memset(bnx2x_sp(bp, func_stats), 0,
1622 sizeof(struct host_func_stats)); 1637 sizeof(struct host_func_stats));
@@ -1626,13 +1641,7 @@ void bnx2x_stats_init(struct bnx2x *bp)
1626 } 1641 }
1627 } 1642 }
1628 1643
1629 bp->stats_state = STATS_STATE_DISABLED; 1644 bnx2x_memset_stats(bp);
1630
1631 if (bp->port.pmf && bp->port.port_stx)
1632 bnx2x_port_stats_base_init(bp);
1633
1634 /* mark the end of statistics initializiation */
1635 bp->stats_init = false;
1636} 1645}
1637 1646
1638void bnx2x_save_statistics(struct bnx2x *bp) 1647void bnx2x_save_statistics(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 198f6f1c9ad5..d117f472816c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -540,8 +540,8 @@ struct bnx2x_fw_port_stats_old {
540/* forward */ 540/* forward */
541struct bnx2x; 541struct bnx2x;
542 542
543void bnx2x_memset_stats(struct bnx2x *bp);
543void bnx2x_stats_init(struct bnx2x *bp); 544void bnx2x_stats_init(struct bnx2x *bp);
544
545void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); 545void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
546 546
547/** 547/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 531eebf40d60..928b074d7d80 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -36,6 +36,8 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
36void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, 36void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
37 u16 type, u16 length) 37 u16 type, u16 length)
38{ 38{
39 mutex_lock(&bp->vf2pf_mutex);
40
39 DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n", 41 DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
40 type); 42 type);
41 43
@@ -49,6 +51,15 @@ void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
49 first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req); 51 first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
50} 52}
51 53
54/* releases the mailbox */
55void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
56{
57 DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
58 first_tlv->tl.type);
59
60 mutex_unlock(&bp->vf2pf_mutex);
61}
62
52/* list the types and lengths of the tlvs on the buffer */ 63/* list the types and lengths of the tlvs on the buffer */
53void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) 64void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
54{ 65{
@@ -181,8 +192,10 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
181 /* clear mailbox and prep first tlv */ 192 /* clear mailbox and prep first tlv */
182 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req)); 193 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
183 194
184 if (bnx2x_get_vf_id(bp, &vf_id)) 195 if (bnx2x_get_vf_id(bp, &vf_id)) {
185 return -EAGAIN; 196 rc = -EAGAIN;
197 goto out;
198 }
186 199
187 req->vfdev_info.vf_id = vf_id; 200 req->vfdev_info.vf_id = vf_id;
188 req->vfdev_info.vf_os = 0; 201 req->vfdev_info.vf_os = 0;
@@ -213,7 +226,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
213 226
214 /* PF timeout */ 227 /* PF timeout */
215 if (rc) 228 if (rc)
216 return rc; 229 goto out;
217 230
218 /* copy acquire response from buffer to bp */ 231 /* copy acquire response from buffer to bp */
219 memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp)); 232 memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
@@ -253,7 +266,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
253 /* PF reports error */ 266 /* PF reports error */
254 BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n", 267 BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
255 bp->acquire_resp.hdr.status); 268 bp->acquire_resp.hdr.status);
256 return -EAGAIN; 269 rc = -EAGAIN;
270 goto out;
257 } 271 }
258 } 272 }
259 273
@@ -279,20 +293,24 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
279 bp->acquire_resp.resc.current_mac_addr, 293 bp->acquire_resp.resc.current_mac_addr,
280 ETH_ALEN); 294 ETH_ALEN);
281 295
282 return 0; 296out:
297 bnx2x_vfpf_finalize(bp, &req->first_tlv);
298 return rc;
283} 299}
284 300
285int bnx2x_vfpf_release(struct bnx2x *bp) 301int bnx2x_vfpf_release(struct bnx2x *bp)
286{ 302{
287 struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release; 303 struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
288 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; 304 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
289 u32 rc = 0, vf_id; 305 u32 rc, vf_id;
290 306
291 /* clear mailbox and prep first tlv */ 307 /* clear mailbox and prep first tlv */
292 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req)); 308 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
293 309
294 if (bnx2x_get_vf_id(bp, &vf_id)) 310 if (bnx2x_get_vf_id(bp, &vf_id)) {
295 return -EAGAIN; 311 rc = -EAGAIN;
312 goto out;
313 }
296 314
297 req->vf_id = vf_id; 315 req->vf_id = vf_id;
298 316
@@ -308,7 +326,8 @@ int bnx2x_vfpf_release(struct bnx2x *bp)
308 326
309 if (rc) 327 if (rc)
310 /* PF timeout */ 328 /* PF timeout */
311 return rc; 329 goto out;
330
312 if (resp->hdr.status == PFVF_STATUS_SUCCESS) { 331 if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
313 /* PF released us */ 332 /* PF released us */
314 DP(BNX2X_MSG_SP, "vf released\n"); 333 DP(BNX2X_MSG_SP, "vf released\n");
@@ -316,10 +335,13 @@ int bnx2x_vfpf_release(struct bnx2x *bp)
316 /* PF reports error */ 335 /* PF reports error */
317 BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n", 336 BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n",
318 resp->hdr.status); 337 resp->hdr.status);
319 return -EAGAIN; 338 rc = -EAGAIN;
339 goto out;
320 } 340 }
341out:
342 bnx2x_vfpf_finalize(bp, &req->first_tlv);
321 343
322 return 0; 344 return rc;
323} 345}
324 346
325/* Tell PF about SB addresses */ 347/* Tell PF about SB addresses */
@@ -350,16 +372,20 @@ int bnx2x_vfpf_init(struct bnx2x *bp)
350 372
351 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); 373 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
352 if (rc) 374 if (rc)
353 return rc; 375 goto out;
354 376
355 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 377 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
356 BNX2X_ERR("INIT VF failed: %d. Breaking...\n", 378 BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
357 resp->hdr.status); 379 resp->hdr.status);
358 return -EAGAIN; 380 rc = -EAGAIN;
381 goto out;
359 } 382 }
360 383
361 DP(BNX2X_MSG_SP, "INIT VF Succeeded\n"); 384 DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
362 return 0; 385out:
386 bnx2x_vfpf_finalize(bp, &req->first_tlv);
387
388 return rc;
363} 389}
364 390
365/* CLOSE VF - opposite to INIT_VF */ 391/* CLOSE VF - opposite to INIT_VF */
@@ -380,6 +406,9 @@ void bnx2x_vfpf_close_vf(struct bnx2x *bp)
380 for_each_queue(bp, i) 406 for_each_queue(bp, i)
381 bnx2x_vfpf_teardown_queue(bp, i); 407 bnx2x_vfpf_teardown_queue(bp, i);
382 408
409 /* remove mac */
410 bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
411
383 /* clear mailbox and prep first tlv */ 412 /* clear mailbox and prep first tlv */
384 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req)); 413 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
385 414
@@ -401,6 +430,8 @@ void bnx2x_vfpf_close_vf(struct bnx2x *bp)
401 BNX2X_ERR("Sending CLOSE failed: pf response was %d\n", 430 BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
402 resp->hdr.status); 431 resp->hdr.status);
403 432
433 bnx2x_vfpf_finalize(bp, &req->first_tlv);
434
404free_irq: 435free_irq:
405 /* Disable HW interrupts, NAPI */ 436 /* Disable HW interrupts, NAPI */
406 bnx2x_netif_stop(bp, 0); 437 bnx2x_netif_stop(bp, 0);
@@ -435,7 +466,6 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
435 /* calculate queue flags */ 466 /* calculate queue flags */
436 flags |= VFPF_QUEUE_FLG_STATS; 467 flags |= VFPF_QUEUE_FLG_STATS;
437 flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; 468 flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
438 flags |= IS_MF_SD(bp) ? VFPF_QUEUE_FLG_OV : 0;
439 flags |= VFPF_QUEUE_FLG_VLAN; 469 flags |= VFPF_QUEUE_FLG_VLAN;
440 DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); 470 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
441 471
@@ -486,8 +516,11 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
486 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 516 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
487 BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n", 517 BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
488 fp_idx, resp->hdr.status); 518 fp_idx, resp->hdr.status);
489 return -EINVAL; 519 rc = -EINVAL;
490 } 520 }
521
522 bnx2x_vfpf_finalize(bp, &req->first_tlv);
523
491 return rc; 524 return rc;
492} 525}
493 526
@@ -515,41 +548,46 @@ int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
515 if (rc) { 548 if (rc) {
516 BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx, 549 BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
517 rc); 550 rc);
518 return rc; 551 goto out;
519 } 552 }
520 553
521 /* PF failed the transaction */ 554 /* PF failed the transaction */
522 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 555 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
523 BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx, 556 BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
524 resp->hdr.status); 557 resp->hdr.status);
525 return -EINVAL; 558 rc = -EINVAL;
526 } 559 }
527 560
528 return 0; 561out:
562 bnx2x_vfpf_finalize(bp, &req->first_tlv);
563 return rc;
529} 564}
530 565
531/* request pf to add a mac for the vf */ 566/* request pf to add a mac for the vf */
532int bnx2x_vfpf_set_mac(struct bnx2x *bp) 567int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
533{ 568{
534 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; 569 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
535 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; 570 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
536 int rc; 571 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
572 int rc = 0;
537 573
538 /* clear mailbox and prep first tlv */ 574 /* clear mailbox and prep first tlv */
539 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, 575 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
540 sizeof(*req)); 576 sizeof(*req));
541 577
542 req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED; 578 req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
543 req->vf_qid = 0; 579 req->vf_qid = vf_qid;
544 req->n_mac_vlan_filters = 1; 580 req->n_mac_vlan_filters = 1;
545 req->filters[0].flags = 581
546 VFPF_Q_FILTER_DEST_MAC_VALID | VFPF_Q_FILTER_SET_MAC; 582 req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
583 if (set)
584 req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
547 585
548 /* sample bulletin board for new mac */ 586 /* sample bulletin board for new mac */
549 bnx2x_sample_bulletin(bp); 587 bnx2x_sample_bulletin(bp);
550 588
551 /* copy mac from device to request */ 589 /* copy mac from device to request */
552 memcpy(req->filters[0].mac, bp->dev->dev_addr, ETH_ALEN); 590 memcpy(req->filters[0].mac, addr, ETH_ALEN);
553 591
554 /* add list termination tlv */ 592 /* add list termination tlv */
555 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, 593 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
@@ -562,7 +600,7 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
562 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); 600 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
563 if (rc) { 601 if (rc) {
564 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc); 602 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
565 return rc; 603 goto out;
566 } 604 }
567 605
568 /* failure may mean PF was configured with a new mac for us */ 606 /* failure may mean PF was configured with a new mac for us */
@@ -570,6 +608,9 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
570 DP(BNX2X_MSG_IOV, 608 DP(BNX2X_MSG_IOV,
571 "vfpf SET MAC failed. Check bulletin board for new posts\n"); 609 "vfpf SET MAC failed. Check bulletin board for new posts\n");
572 610
611 /* copy mac from bulletin to device */
612 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
613
573 /* check if bulletin board was updated */ 614 /* check if bulletin board was updated */
574 if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) { 615 if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
575 /* copy mac from device to request */ 616 /* copy mac from device to request */
@@ -587,8 +628,10 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
587 628
588 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 629 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
589 BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status); 630 BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
590 return -EINVAL; 631 rc = -EINVAL;
591 } 632 }
633out:
634 bnx2x_vfpf_finalize(bp, &req->first_tlv);
592 635
593 return 0; 636 return 0;
594} 637}
@@ -643,14 +686,16 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
643 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); 686 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
644 if (rc) { 687 if (rc) {
645 BNX2X_ERR("Sending a message failed: %d\n", rc); 688 BNX2X_ERR("Sending a message failed: %d\n", rc);
646 return rc; 689 goto out;
647 } 690 }
648 691
649 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 692 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
650 BNX2X_ERR("Set Rx mode/multicast failed: %d\n", 693 BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
651 resp->hdr.status); 694 resp->hdr.status);
652 return -EINVAL; 695 rc = -EINVAL;
653 } 696 }
697out:
698 bnx2x_vfpf_finalize(bp, &req->first_tlv);
654 699
655 return 0; 700 return 0;
656} 701}
@@ -689,7 +734,8 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
689 break; 734 break;
690 default: 735 default:
691 BNX2X_ERR("BAD rx mode (%d)\n", mode); 736 BNX2X_ERR("BAD rx mode (%d)\n", mode);
692 return -EINVAL; 737 rc = -EINVAL;
738 goto out;
693 } 739 }
694 740
695 req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED; 741 req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
@@ -708,8 +754,10 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
708 754
709 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 755 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
710 BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status); 756 BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
711 return -EINVAL; 757 rc = -EINVAL;
712 } 758 }
759out:
760 bnx2x_vfpf_finalize(bp, &req->first_tlv);
713 761
714 return rc; 762 return rc;
715} 763}
@@ -1004,7 +1052,7 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1004} 1052}
1005 1053
1006/* convert MBX queue-flags to standard SP queue-flags */ 1054/* convert MBX queue-flags to standard SP queue-flags */
1007static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags, 1055static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
1008 unsigned long *sp_q_flags) 1056 unsigned long *sp_q_flags)
1009{ 1057{
1010 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA) 1058 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
@@ -1015,8 +1063,6 @@ static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
1015 __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags); 1063 __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
1016 if (mbx_q_flags & VFPF_QUEUE_FLG_STATS) 1064 if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
1017 __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags); 1065 __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
1018 if (mbx_q_flags & VFPF_QUEUE_FLG_OV)
1019 __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
1020 if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN) 1066 if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
1021 __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags); 1067 __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
1022 if (mbx_q_flags & VFPF_QUEUE_FLG_COS) 1068 if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
@@ -1025,6 +1071,10 @@ static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
1025 __set_bit(BNX2X_Q_FLG_HC, sp_q_flags); 1071 __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
1026 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) 1072 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
1027 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); 1073 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
1074
1075 /* outer vlan removal is set according to the PF's multi fuction mode */
1076 if (IS_MF_SD(bp))
1077 __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
1028} 1078}
1029 1079
1030static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, 1080static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1075,11 +1125,11 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1075 init_p->tx.hc_rate = setup_q->txq.hc_rate; 1125 init_p->tx.hc_rate = setup_q->txq.hc_rate;
1076 init_p->tx.sb_cq_index = setup_q->txq.sb_index; 1126 init_p->tx.sb_cq_index = setup_q->txq.sb_index;
1077 1127
1078 bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags, 1128 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1079 &init_p->tx.flags); 1129 &init_p->tx.flags);
1080 1130
1081 /* tx setup - flags */ 1131 /* tx setup - flags */
1082 bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags, 1132 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1083 &setup_p->flags); 1133 &setup_p->flags);
1084 1134
1085 /* tx setup - general, nothing */ 1135 /* tx setup - general, nothing */
@@ -1107,11 +1157,11 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1107 /* rx init */ 1157 /* rx init */
1108 init_p->rx.hc_rate = setup_q->rxq.hc_rate; 1158 init_p->rx.hc_rate = setup_q->rxq.hc_rate;
1109 init_p->rx.sb_cq_index = setup_q->rxq.sb_index; 1159 init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
1110 bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags, 1160 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1111 &init_p->rx.flags); 1161 &init_p->rx.flags);
1112 1162
1113 /* rx setup - flags */ 1163 /* rx setup - flags */
1114 bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags, 1164 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1115 &setup_p->flags); 1165 &setup_p->flags);
1116 1166
1117 /* rx setup - general */ 1167 /* rx setup - general */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index bfc80baec00d..41708faab575 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -328,9 +328,15 @@ struct pf_vf_bulletin_content {
328#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address 328#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
329 * is available for it 329 * is available for it
330 */ 330 */
331#define VLAN_VALID 1 /* when set, the vf should not access
332 * the vfpf channel
333 */
331 334
332 u8 mac[ETH_ALEN]; 335 u8 mac[ETH_ALEN];
333 u8 padding[2]; 336 u8 mac_padding[2];
337
338 u16 vlan;
339 u8 vlan_padding[6];
334}; 340};
335 341
336union pf_vf_bulletin { 342union pf_vf_bulletin {
@@ -353,6 +359,7 @@ enum channel_tlvs {
353 CHANNEL_TLV_LIST_END, 359 CHANNEL_TLV_LIST_END,
354 CHANNEL_TLV_FLR, 360 CHANNEL_TLV_FLR,
355 CHANNEL_TLV_PF_SET_MAC, 361 CHANNEL_TLV_PF_SET_MAC,
362 CHANNEL_TLV_PF_SET_VLAN,
356 CHANNEL_TLV_MAX 363 CHANNEL_TLV_MAX
357}; 364};
358 365
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 149a3a038491..40649a8bf390 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5544,8 +5544,10 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5544 5544
5545 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) 5545 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5546 cdev->max_iscsi_conn = ethdev->max_iscsi_conn; 5546 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5547 if (CNIC_SUPPORTS_FCOE(cp)) 5547 if (CNIC_SUPPORTS_FCOE(cp)) {
5548 cdev->max_fcoe_conn = ethdev->max_fcoe_conn; 5548 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5549 cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5550 }
5549 5551
5550 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS) 5552 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5551 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS; 5553 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 0c9367a0f57d..ec9bb9ad4bb3 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -195,6 +195,7 @@ struct cnic_eth_dev {
195 u32 max_fcoe_conn; 195 u32 max_fcoe_conn;
196 u32 max_rdma_conn; 196 u32 max_rdma_conn;
197 u32 fcoe_init_cid; 197 u32 fcoe_init_cid;
198 u32 max_fcoe_exchanges;
198 u32 fcoe_wwn_port_name_hi; 199 u32 fcoe_wwn_port_name_hi;
199 u32 fcoe_wwn_port_name_lo; 200 u32 fcoe_wwn_port_name_lo;
200 u32 fcoe_wwn_node_name_hi; 201 u32 fcoe_wwn_node_name_hi;
@@ -313,6 +314,8 @@ struct cnic_dev {
313 int max_fcoe_conn; 314 int max_fcoe_conn;
314 int max_rdma_conn; 315 int max_rdma_conn;
315 316
317 int max_fcoe_exchanges;
318
316 union drv_info_to_mcp *stats_addr; 319 union drv_info_to_mcp *stats_addr;
317 struct fcoe_capabilities *fcoe_cap; 320 struct fcoe_capabilities *fcoe_cap;
318 321
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index e9b35da375cb..e80bfb60c3ef 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -831,11 +831,8 @@ static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
831 sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + 831 sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
832 SMP_CACHE_BYTES * 2 + 832 SMP_CACHE_BYTES * 2 +
833 NET_IP_ALIGN); 833 NET_IP_ALIGN);
834 if (sb_new == NULL) { 834 if (sb_new == NULL)
835 pr_info("%s: sk_buff allocation failed\n",
836 d->sbdma_eth->sbm_dev->name);
837 return -ENOBUFS; 835 return -ENOBUFS;
838 }
839 836
840 sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); 837 sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
841 } 838 }
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 17a972734ba7..728d42ab2a76 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
94 94
95#define DRV_MODULE_NAME "tg3" 95#define DRV_MODULE_NAME "tg3"
96#define TG3_MAJ_NUM 3 96#define TG3_MAJ_NUM 3
97#define TG3_MIN_NUM 130 97#define TG3_MIN_NUM 131
98#define DRV_MODULE_VERSION \ 98#define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100#define DRV_MODULE_RELDATE "February 14, 2013" 100#define DRV_MODULE_RELDATE "April 09, 2013"
101 101
102#define RESET_KIND_SHUTDOWN 0 102#define RESET_KIND_SHUTDOWN 0
103#define RESET_KIND_INIT 1 103#define RESET_KIND_INIT 1
@@ -212,6 +212,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
212#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) 212#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213 213
214#define FIRMWARE_TG3 "tigon/tg3.bin" 214#define FIRMWARE_TG3 "tigon/tg3.bin"
215#define FIRMWARE_TG357766 "tigon/tg357766.bin"
215#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 216#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
216#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 217#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
217 218
@@ -1873,6 +1874,20 @@ static void tg3_link_report(struct tg3 *tp)
1873 tp->link_up = netif_carrier_ok(tp->dev); 1874 tp->link_up = netif_carrier_ok(tp->dev);
1874} 1875}
1875 1876
1877static u32 tg3_decode_flowctrl_1000T(u32 adv)
1878{
1879 u32 flowctrl = 0;
1880
1881 if (adv & ADVERTISE_PAUSE_CAP) {
1882 flowctrl |= FLOW_CTRL_RX;
1883 if (!(adv & ADVERTISE_PAUSE_ASYM))
1884 flowctrl |= FLOW_CTRL_TX;
1885 } else if (adv & ADVERTISE_PAUSE_ASYM)
1886 flowctrl |= FLOW_CTRL_TX;
1887
1888 return flowctrl;
1889}
1890
1876static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1891static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1877{ 1892{
1878 u16 miireg; 1893 u16 miireg;
@@ -1889,6 +1904,20 @@ static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1889 return miireg; 1904 return miireg;
1890} 1905}
1891 1906
1907static u32 tg3_decode_flowctrl_1000X(u32 adv)
1908{
1909 u32 flowctrl = 0;
1910
1911 if (adv & ADVERTISE_1000XPAUSE) {
1912 flowctrl |= FLOW_CTRL_RX;
1913 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1914 flowctrl |= FLOW_CTRL_TX;
1915 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1916 flowctrl |= FLOW_CTRL_TX;
1917
1918 return flowctrl;
1919}
1920
1892static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) 1921static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1893{ 1922{
1894 u8 cap = 0; 1923 u8 cap = 0;
@@ -2199,7 +2228,7 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2199 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); 2228 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2200} 2229}
2201 2230
2202static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) 2231static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2203{ 2232{
2204 u32 phy; 2233 u32 phy;
2205 2234
@@ -2291,7 +2320,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
2291 tg3_phy_toggle_auxctl_smdsp(tp, false); 2320 tg3_phy_toggle_auxctl_smdsp(tp, false);
2292} 2321}
2293 2322
2294static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) 2323static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2295{ 2324{
2296 u32 val; 2325 u32 val;
2297 2326
@@ -2301,7 +2330,7 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2301 tp->setlpicnt = 0; 2330 tp->setlpicnt = 0;
2302 2331
2303 if (tp->link_config.autoneg == AUTONEG_ENABLE && 2332 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2304 current_link_up == 1 && 2333 current_link_up &&
2305 tp->link_config.active_duplex == DUPLEX_FULL && 2334 tp->link_config.active_duplex == DUPLEX_FULL &&
2306 (tp->link_config.active_speed == SPEED_100 || 2335 (tp->link_config.active_speed == SPEED_100 ||
2307 tp->link_config.active_speed == SPEED_1000)) { 2336 tp->link_config.active_speed == SPEED_1000)) {
@@ -2323,7 +2352,7 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2323 } 2352 }
2324 2353
2325 if (!tp->setlpicnt) { 2354 if (!tp->setlpicnt) {
2326 if (current_link_up == 1 && 2355 if (current_link_up &&
2327 !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2356 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2328 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); 2357 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2329 tg3_phy_toggle_auxctl_smdsp(tp, false); 2358 tg3_phy_toggle_auxctl_smdsp(tp, false);
@@ -2530,6 +2559,13 @@ static void tg3_carrier_off(struct tg3 *tp)
2530 tp->link_up = false; 2559 tp->link_up = false;
2531} 2560}
2532 2561
2562static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2563{
2564 if (tg3_flag(tp, ENABLE_ASF))
2565 netdev_warn(tp->dev,
2566 "Management side-band traffic will be interrupted during phy settings change\n");
2567}
2568
2533/* This will reset the tigon3 PHY if there is no valid 2569/* This will reset the tigon3 PHY if there is no valid
2534 * link unless the FORCE argument is non-zero. 2570 * link unless the FORCE argument is non-zero.
2535 */ 2571 */
@@ -2669,7 +2705,7 @@ out:
2669 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0) 2705 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2670 tg3_phydsp_write(tp, 0xffb, 0x4000); 2706 tg3_phydsp_write(tp, 0xffb, 0x4000);
2671 2707
2672 tg3_phy_toggle_automdix(tp, 1); 2708 tg3_phy_toggle_automdix(tp, true);
2673 tg3_phy_set_wirespeed(tp); 2709 tg3_phy_set_wirespeed(tp);
2674 return 0; 2710 return 0;
2675} 2711}
@@ -2925,6 +2961,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2925{ 2961{
2926 u32 val; 2962 u32 val;
2927 2963
2964 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
2965 return;
2966
2928 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 2967 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2929 if (tg3_asic_rev(tp) == ASIC_REV_5704) { 2968 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2930 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); 2969 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
@@ -3448,11 +3487,58 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3448#define TX_CPU_SCRATCH_SIZE 0x04000 3487#define TX_CPU_SCRATCH_SIZE 0x04000
3449 3488
3450/* tp->lock is held. */ 3489/* tp->lock is held. */
3451static int tg3_halt_cpu(struct tg3 *tp, u32 offset) 3490static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3452{ 3491{
3453 int i; 3492 int i;
3493 const int iters = 10000;
3494
3495 for (i = 0; i < iters; i++) {
3496 tw32(cpu_base + CPU_STATE, 0xffffffff);
3497 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3498 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3499 break;
3500 }
3501
3502 return (i == iters) ? -EBUSY : 0;
3503}
3504
3505/* tp->lock is held. */
3506static int tg3_rxcpu_pause(struct tg3 *tp)
3507{
3508 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3509
3510 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3511 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3512 udelay(10);
3513
3514 return rc;
3515}
3516
3517/* tp->lock is held. */
3518static int tg3_txcpu_pause(struct tg3 *tp)
3519{
3520 return tg3_pause_cpu(tp, TX_CPU_BASE);
3521}
3522
3523/* tp->lock is held. */
3524static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3525{
3526 tw32(cpu_base + CPU_STATE, 0xffffffff);
3527 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3528}
3454 3529
3455 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); 3530/* tp->lock is held. */
3531static void tg3_rxcpu_resume(struct tg3 *tp)
3532{
3533 tg3_resume_cpu(tp, RX_CPU_BASE);
3534}
3535
3536/* tp->lock is held. */
3537static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3538{
3539 int rc;
3540
3541 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3456 3542
3457 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3543 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3458 u32 val = tr32(GRC_VCPU_EXT_CTRL); 3544 u32 val = tr32(GRC_VCPU_EXT_CTRL);
@@ -3460,17 +3546,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3460 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); 3546 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3461 return 0; 3547 return 0;
3462 } 3548 }
3463 if (offset == RX_CPU_BASE) { 3549 if (cpu_base == RX_CPU_BASE) {
3464 for (i = 0; i < 10000; i++) { 3550 rc = tg3_rxcpu_pause(tp);
3465 tw32(offset + CPU_STATE, 0xffffffff);
3466 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3467 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3468 break;
3469 }
3470
3471 tw32(offset + CPU_STATE, 0xffffffff);
3472 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3473 udelay(10);
3474 } else { 3551 } else {
3475 /* 3552 /*
3476 * There is only an Rx CPU for the 5750 derivative in the 3553 * There is only an Rx CPU for the 5750 derivative in the
@@ -3479,17 +3556,12 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3479 if (tg3_flag(tp, IS_SSB_CORE)) 3556 if (tg3_flag(tp, IS_SSB_CORE))
3480 return 0; 3557 return 0;
3481 3558
3482 for (i = 0; i < 10000; i++) { 3559 rc = tg3_txcpu_pause(tp);
3483 tw32(offset + CPU_STATE, 0xffffffff);
3484 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3485 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3486 break;
3487 }
3488 } 3560 }
3489 3561
3490 if (i >= 10000) { 3562 if (rc) {
3491 netdev_err(tp->dev, "%s timed out, %s CPU\n", 3563 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3492 __func__, offset == RX_CPU_BASE ? "RX" : "TX"); 3564 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3493 return -ENODEV; 3565 return -ENODEV;
3494 } 3566 }
3495 3567
@@ -3499,19 +3571,41 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3499 return 0; 3571 return 0;
3500} 3572}
3501 3573
3502struct fw_info { 3574static int tg3_fw_data_len(struct tg3 *tp,
3503 unsigned int fw_base; 3575 const struct tg3_firmware_hdr *fw_hdr)
3504 unsigned int fw_len; 3576{
3505 const __be32 *fw_data; 3577 int fw_len;
3506}; 3578
3579 /* Non fragmented firmware have one firmware header followed by a
3580 * contiguous chunk of data to be written. The length field in that
3581 * header is not the length of data to be written but the complete
3582 * length of the bss. The data length is determined based on
3583 * tp->fw->size minus headers.
3584 *
3585 * Fragmented firmware have a main header followed by multiple
3586 * fragments. Each fragment is identical to non fragmented firmware
3587 * with a firmware header followed by a contiguous chunk of data. In
3588 * the main header, the length field is unused and set to 0xffffffff.
3589 * In each fragment header the length is the entire size of that
3590 * fragment i.e. fragment data + header length. Data length is
3591 * therefore length field in the header minus TG3_FW_HDR_LEN.
3592 */
3593 if (tp->fw_len == 0xffffffff)
3594 fw_len = be32_to_cpu(fw_hdr->len);
3595 else
3596 fw_len = tp->fw->size;
3597
3598 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3599}
3507 3600
3508/* tp->lock is held. */ 3601/* tp->lock is held. */
3509static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, 3602static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3510 u32 cpu_scratch_base, int cpu_scratch_size, 3603 u32 cpu_scratch_base, int cpu_scratch_size,
3511 struct fw_info *info) 3604 const struct tg3_firmware_hdr *fw_hdr)
3512{ 3605{
3513 int err, lock_err, i; 3606 int err, i;
3514 void (*write_op)(struct tg3 *, u32, u32); 3607 void (*write_op)(struct tg3 *, u32, u32);
3608 int total_len = tp->fw->size;
3515 3609
3516 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { 3610 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3517 netdev_err(tp->dev, 3611 netdev_err(tp->dev,
@@ -3520,30 +3614,49 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3520 return -EINVAL; 3614 return -EINVAL;
3521 } 3615 }
3522 3616
3523 if (tg3_flag(tp, 5705_PLUS)) 3617 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3524 write_op = tg3_write_mem; 3618 write_op = tg3_write_mem;
3525 else 3619 else
3526 write_op = tg3_write_indirect_reg32; 3620 write_op = tg3_write_indirect_reg32;
3527 3621
3528 /* It is possible that bootcode is still loading at this point. 3622 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3529 * Get the nvram lock first before halting the cpu. 3623 /* It is possible that bootcode is still loading at this point.
3530 */ 3624 * Get the nvram lock first before halting the cpu.
3531 lock_err = tg3_nvram_lock(tp); 3625 */
3532 err = tg3_halt_cpu(tp, cpu_base); 3626 int lock_err = tg3_nvram_lock(tp);
3533 if (!lock_err) 3627 err = tg3_halt_cpu(tp, cpu_base);
3534 tg3_nvram_unlock(tp); 3628 if (!lock_err)
3535 if (err) 3629 tg3_nvram_unlock(tp);
3536 goto out; 3630 if (err)
3631 goto out;
3537 3632
3538 for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) 3633 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3539 write_op(tp, cpu_scratch_base + i, 0); 3634 write_op(tp, cpu_scratch_base + i, 0);
3540 tw32(cpu_base + CPU_STATE, 0xffffffff); 3635 tw32(cpu_base + CPU_STATE, 0xffffffff);
3541 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); 3636 tw32(cpu_base + CPU_MODE,
3542 for (i = 0; i < (info->fw_len / sizeof(u32)); i++) 3637 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3543 write_op(tp, (cpu_scratch_base + 3638 } else {
3544 (info->fw_base & 0xffff) + 3639 /* Subtract additional main header for fragmented firmware and
3545 (i * sizeof(u32))), 3640 * advance to the first fragment
3546 be32_to_cpu(info->fw_data[i])); 3641 */
3642 total_len -= TG3_FW_HDR_LEN;
3643 fw_hdr++;
3644 }
3645
3646 do {
3647 u32 *fw_data = (u32 *)(fw_hdr + 1);
3648 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3649 write_op(tp, cpu_scratch_base +
3650 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3651 (i * sizeof(u32)),
3652 be32_to_cpu(fw_data[i]));
3653
3654 total_len -= be32_to_cpu(fw_hdr->len);
3655
3656 /* Advance to next fragment */
3657 fw_hdr = (struct tg3_firmware_hdr *)
3658 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3659 } while (total_len > 0);
3547 3660
3548 err = 0; 3661 err = 0;
3549 3662
@@ -3552,13 +3665,33 @@ out:
3552} 3665}
3553 3666
3554/* tp->lock is held. */ 3667/* tp->lock is held. */
3668static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3669{
3670 int i;
3671 const int iters = 5;
3672
3673 tw32(cpu_base + CPU_STATE, 0xffffffff);
3674 tw32_f(cpu_base + CPU_PC, pc);
3675
3676 for (i = 0; i < iters; i++) {
3677 if (tr32(cpu_base + CPU_PC) == pc)
3678 break;
3679 tw32(cpu_base + CPU_STATE, 0xffffffff);
3680 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3681 tw32_f(cpu_base + CPU_PC, pc);
3682 udelay(1000);
3683 }
3684
3685 return (i == iters) ? -EBUSY : 0;
3686}
3687
3688/* tp->lock is held. */
3555static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) 3689static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3556{ 3690{
3557 struct fw_info info; 3691 const struct tg3_firmware_hdr *fw_hdr;
3558 const __be32 *fw_data; 3692 int err;
3559 int err, i;
3560 3693
3561 fw_data = (void *)tp->fw->data; 3694 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3562 3695
3563 /* Firmware blob starts with version numbers, followed by 3696 /* Firmware blob starts with version numbers, followed by
3564 start address and length. We are setting complete length. 3697 start address and length. We are setting complete length.
@@ -3566,60 +3699,117 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3566 Remainder is the blob to be loaded contiguously 3699 Remainder is the blob to be loaded contiguously
3567 from start address. */ 3700 from start address. */
3568 3701
3569 info.fw_base = be32_to_cpu(fw_data[1]);
3570 info.fw_len = tp->fw->size - 12;
3571 info.fw_data = &fw_data[3];
3572
3573 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, 3702 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3574 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, 3703 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3575 &info); 3704 fw_hdr);
3576 if (err) 3705 if (err)
3577 return err; 3706 return err;
3578 3707
3579 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, 3708 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3580 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, 3709 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3581 &info); 3710 fw_hdr);
3582 if (err) 3711 if (err)
3583 return err; 3712 return err;
3584 3713
3585 /* Now startup only the RX cpu. */ 3714 /* Now startup only the RX cpu. */
3586 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 3715 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3587 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); 3716 be32_to_cpu(fw_hdr->base_addr));
3588 3717 if (err) {
3589 for (i = 0; i < 5; i++) {
3590 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3591 break;
3592 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3593 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3594 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3595 udelay(1000);
3596 }
3597 if (i >= 5) {
3598 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " 3718 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3599 "should be %08x\n", __func__, 3719 "should be %08x\n", __func__,
3600 tr32(RX_CPU_BASE + CPU_PC), info.fw_base); 3720 tr32(RX_CPU_BASE + CPU_PC),
3721 be32_to_cpu(fw_hdr->base_addr));
3601 return -ENODEV; 3722 return -ENODEV;
3602 } 3723 }
3603 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 3724
3604 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); 3725 tg3_rxcpu_resume(tp);
3605 3726
3606 return 0; 3727 return 0;
3607} 3728}
3608 3729
3730static int tg3_validate_rxcpu_state(struct tg3 *tp)
3731{
3732 const int iters = 1000;
3733 int i;
3734 u32 val;
3735
3736 /* Wait for boot code to complete initialization and enter service
3737 * loop. It is then safe to download service patches
3738 */
3739 for (i = 0; i < iters; i++) {
3740 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3741 break;
3742
3743 udelay(10);
3744 }
3745
3746 if (i == iters) {
3747 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3748 return -EBUSY;
3749 }
3750
3751 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3752 if (val & 0xff) {
3753 netdev_warn(tp->dev,
3754 "Other patches exist. Not downloading EEE patch\n");
3755 return -EEXIST;
3756 }
3757
3758 return 0;
3759}
3760
3761/* tp->lock is held. */
3762static void tg3_load_57766_firmware(struct tg3 *tp)
3763{
3764 struct tg3_firmware_hdr *fw_hdr;
3765
3766 if (!tg3_flag(tp, NO_NVRAM))
3767 return;
3768
3769 if (tg3_validate_rxcpu_state(tp))
3770 return;
3771
3772 if (!tp->fw)
3773 return;
3774
3775 /* This firmware blob has a different format than older firmware
3776 * releases as given below. The main difference is we have fragmented
3777 * data to be written to non-contiguous locations.
3778 *
3779 * In the beginning we have a firmware header identical to other
3780 * firmware which consists of version, base addr and length. The length
3781 * here is unused and set to 0xffffffff.
3782 *
3783 * This is followed by a series of firmware fragments which are
3784 * individually identical to previous firmware. i.e. they have the
3785 * firmware header and followed by data for that fragment. The version
3786 * field of the individual fragment header is unused.
3787 */
3788
3789 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3790 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3791 return;
3792
3793 if (tg3_rxcpu_pause(tp))
3794 return;
3795
3796 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3797 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3798
3799 tg3_rxcpu_resume(tp);
3800}
3801
3609/* tp->lock is held. */ 3802/* tp->lock is held. */
3610static int tg3_load_tso_firmware(struct tg3 *tp) 3803static int tg3_load_tso_firmware(struct tg3 *tp)
3611{ 3804{
3612 struct fw_info info; 3805 const struct tg3_firmware_hdr *fw_hdr;
3613 const __be32 *fw_data;
3614 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 3806 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3615 int err, i; 3807 int err;
3616 3808
3617 if (tg3_flag(tp, HW_TSO_1) || 3809 if (!tg3_flag(tp, FW_TSO))
3618 tg3_flag(tp, HW_TSO_2) ||
3619 tg3_flag(tp, HW_TSO_3))
3620 return 0; 3810 return 0;
3621 3811
3622 fw_data = (void *)tp->fw->data; 3812 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3623 3813
3624 /* Firmware blob starts with version numbers, followed by 3814 /* Firmware blob starts with version numbers, followed by
3625 start address and length. We are setting complete length. 3815 start address and length. We are setting complete length.
@@ -3627,10 +3817,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
3627 Remainder is the blob to be loaded contiguously 3817 Remainder is the blob to be loaded contiguously
3628 from start address. */ 3818 from start address. */
3629 3819
3630 info.fw_base = be32_to_cpu(fw_data[1]);
3631 cpu_scratch_size = tp->fw_len; 3820 cpu_scratch_size = tp->fw_len;
3632 info.fw_len = tp->fw->size - 12;
3633 info.fw_data = &fw_data[3];
3634 3821
3635 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 3822 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3636 cpu_base = RX_CPU_BASE; 3823 cpu_base = RX_CPU_BASE;
@@ -3643,36 +3830,28 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
3643 3830
3644 err = tg3_load_firmware_cpu(tp, cpu_base, 3831 err = tg3_load_firmware_cpu(tp, cpu_base,
3645 cpu_scratch_base, cpu_scratch_size, 3832 cpu_scratch_base, cpu_scratch_size,
3646 &info); 3833 fw_hdr);
3647 if (err) 3834 if (err)
3648 return err; 3835 return err;
3649 3836
3650 /* Now startup the cpu. */ 3837 /* Now startup the cpu. */
3651 tw32(cpu_base + CPU_STATE, 0xffffffff); 3838 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3652 tw32_f(cpu_base + CPU_PC, info.fw_base); 3839 be32_to_cpu(fw_hdr->base_addr));
3653 3840 if (err) {
3654 for (i = 0; i < 5; i++) {
3655 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3656 break;
3657 tw32(cpu_base + CPU_STATE, 0xffffffff);
3658 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3659 tw32_f(cpu_base + CPU_PC, info.fw_base);
3660 udelay(1000);
3661 }
3662 if (i >= 5) {
3663 netdev_err(tp->dev, 3841 netdev_err(tp->dev,
3664 "%s fails to set CPU PC, is %08x should be %08x\n", 3842 "%s fails to set CPU PC, is %08x should be %08x\n",
3665 __func__, tr32(cpu_base + CPU_PC), info.fw_base); 3843 __func__, tr32(cpu_base + CPU_PC),
3844 be32_to_cpu(fw_hdr->base_addr));
3666 return -ENODEV; 3845 return -ENODEV;
3667 } 3846 }
3668 tw32(cpu_base + CPU_STATE, 0xffffffff); 3847
3669 tw32_f(cpu_base + CPU_MODE, 0x00000000); 3848 tg3_resume_cpu(tp, cpu_base);
3670 return 0; 3849 return 0;
3671} 3850}
3672 3851
3673 3852
3674/* tp->lock is held. */ 3853/* tp->lock is held. */
3675static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) 3854static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3676{ 3855{
3677 u32 addr_high, addr_low; 3856 u32 addr_high, addr_low;
3678 int i; 3857 int i;
@@ -3735,7 +3914,7 @@ static int tg3_power_up(struct tg3 *tp)
3735 return err; 3914 return err;
3736} 3915}
3737 3916
3738static int tg3_setup_phy(struct tg3 *, int); 3917static int tg3_setup_phy(struct tg3 *, bool);
3739 3918
3740static int tg3_power_down_prepare(struct tg3 *tp) 3919static int tg3_power_down_prepare(struct tg3 *tp)
3741{ 3920{
@@ -3807,7 +3986,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
3807 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; 3986 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3808 3987
3809 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 3988 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3810 tg3_setup_phy(tp, 0); 3989 tg3_setup_phy(tp, false);
3811 } 3990 }
3812 3991
3813 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3992 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
@@ -3848,7 +4027,13 @@ static int tg3_power_down_prepare(struct tg3 *tp)
3848 4027
3849 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4028 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3850 mac_mode = MAC_MODE_PORT_MODE_GMII; 4029 mac_mode = MAC_MODE_PORT_MODE_GMII;
3851 else 4030 else if (tp->phy_flags &
4031 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4032 if (tp->link_config.active_speed == SPEED_1000)
4033 mac_mode = MAC_MODE_PORT_MODE_GMII;
4034 else
4035 mac_mode = MAC_MODE_PORT_MODE_MII;
4036 } else
3852 mac_mode = MAC_MODE_PORT_MODE_MII; 4037 mac_mode = MAC_MODE_PORT_MODE_MII;
3853 4038
3854 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; 4039 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
@@ -4102,12 +4287,16 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
4102 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4287 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4103 u32 adv, fc; 4288 u32 adv, fc;
4104 4289
4105 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 4290 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4291 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4106 adv = ADVERTISED_10baseT_Half | 4292 adv = ADVERTISED_10baseT_Half |
4107 ADVERTISED_10baseT_Full; 4293 ADVERTISED_10baseT_Full;
4108 if (tg3_flag(tp, WOL_SPEED_100MB)) 4294 if (tg3_flag(tp, WOL_SPEED_100MB))
4109 adv |= ADVERTISED_100baseT_Half | 4295 adv |= ADVERTISED_100baseT_Half |
4110 ADVERTISED_100baseT_Full; 4296 ADVERTISED_100baseT_Full;
4297 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4298 adv |= ADVERTISED_1000baseT_Half |
4299 ADVERTISED_1000baseT_Full;
4111 4300
4112 fc = FLOW_CTRL_TX | FLOW_CTRL_RX; 4301 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4113 } else { 4302 } else {
@@ -4121,6 +4310,15 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
4121 4310
4122 tg3_phy_autoneg_cfg(tp, adv, fc); 4311 tg3_phy_autoneg_cfg(tp, adv, fc);
4123 4312
4313 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4314 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4315 /* Normally during power down we want to autonegotiate
4316 * the lowest possible speed for WOL. However, to avoid
4317 * link flap, we leave it untouched.
4318 */
4319 return;
4320 }
4321
4124 tg3_writephy(tp, MII_BMCR, 4322 tg3_writephy(tp, MII_BMCR,
4125 BMCR_ANENABLE | BMCR_ANRESTART); 4323 BMCR_ANENABLE | BMCR_ANRESTART);
4126 } else { 4324 } else {
@@ -4177,6 +4375,103 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
4177 } 4375 }
4178} 4376}
4179 4377
4378static int tg3_phy_pull_config(struct tg3 *tp)
4379{
4380 int err;
4381 u32 val;
4382
4383 err = tg3_readphy(tp, MII_BMCR, &val);
4384 if (err)
4385 goto done;
4386
4387 if (!(val & BMCR_ANENABLE)) {
4388 tp->link_config.autoneg = AUTONEG_DISABLE;
4389 tp->link_config.advertising = 0;
4390 tg3_flag_clear(tp, PAUSE_AUTONEG);
4391
4392 err = -EIO;
4393
4394 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4395 case 0:
4396 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4397 goto done;
4398
4399 tp->link_config.speed = SPEED_10;
4400 break;
4401 case BMCR_SPEED100:
4402 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4403 goto done;
4404
4405 tp->link_config.speed = SPEED_100;
4406 break;
4407 case BMCR_SPEED1000:
4408 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4409 tp->link_config.speed = SPEED_1000;
4410 break;
4411 }
4412 /* Fall through */
4413 default:
4414 goto done;
4415 }
4416
4417 if (val & BMCR_FULLDPLX)
4418 tp->link_config.duplex = DUPLEX_FULL;
4419 else
4420 tp->link_config.duplex = DUPLEX_HALF;
4421
4422 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4423
4424 err = 0;
4425 goto done;
4426 }
4427
4428 tp->link_config.autoneg = AUTONEG_ENABLE;
4429 tp->link_config.advertising = ADVERTISED_Autoneg;
4430 tg3_flag_set(tp, PAUSE_AUTONEG);
4431
4432 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4433 u32 adv;
4434
4435 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4436 if (err)
4437 goto done;
4438
4439 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4440 tp->link_config.advertising |= adv | ADVERTISED_TP;
4441
4442 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4443 } else {
4444 tp->link_config.advertising |= ADVERTISED_FIBRE;
4445 }
4446
4447 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4448 u32 adv;
4449
4450 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4451 err = tg3_readphy(tp, MII_CTRL1000, &val);
4452 if (err)
4453 goto done;
4454
4455 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4456 } else {
4457 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4458 if (err)
4459 goto done;
4460
4461 adv = tg3_decode_flowctrl_1000X(val);
4462 tp->link_config.flowctrl = adv;
4463
4464 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4465 adv = mii_adv_to_ethtool_adv_x(val);
4466 }
4467
4468 tp->link_config.advertising |= adv;
4469 }
4470
4471done:
4472 return err;
4473}
4474
4180static int tg3_init_5401phy_dsp(struct tg3 *tp) 4475static int tg3_init_5401phy_dsp(struct tg3 *tp)
4181{ 4476{
4182 int err; 4477 int err;
@@ -4196,6 +4491,32 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
4196 return err; 4491 return err;
4197} 4492}
4198 4493
4494static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4495{
4496 u32 val;
4497 u32 tgtadv = 0;
4498 u32 advertising = tp->link_config.advertising;
4499
4500 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4501 return true;
4502
4503 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4504 return false;
4505
4506 val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4507
4508
4509 if (advertising & ADVERTISED_100baseT_Full)
4510 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4511 if (advertising & ADVERTISED_1000baseT_Full)
4512 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4513
4514 if (val != tgtadv)
4515 return false;
4516
4517 return true;
4518}
4519
4199static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) 4520static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4200{ 4521{
4201 u32 advmsk, tgtadv, advertising; 4522 u32 advmsk, tgtadv, advertising;
@@ -4262,7 +4583,7 @@ static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4262 return true; 4583 return true;
4263} 4584}
4264 4585
4265static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up) 4586static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4266{ 4587{
4267 if (curr_link_up != tp->link_up) { 4588 if (curr_link_up != tp->link_up) {
4268 if (curr_link_up) { 4589 if (curr_link_up) {
@@ -4280,23 +4601,28 @@ static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4280 return false; 4601 return false;
4281} 4602}
4282 4603
4283static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) 4604static void tg3_clear_mac_status(struct tg3 *tp)
4284{ 4605{
4285 int current_link_up; 4606 tw32(MAC_EVENT, 0);
4607
4608 tw32_f(MAC_STATUS,
4609 MAC_STATUS_SYNC_CHANGED |
4610 MAC_STATUS_CFG_CHANGED |
4611 MAC_STATUS_MI_COMPLETION |
4612 MAC_STATUS_LNKSTATE_CHANGED);
4613 udelay(40);
4614}
4615
4616static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4617{
4618 bool current_link_up;
4286 u32 bmsr, val; 4619 u32 bmsr, val;
4287 u32 lcl_adv, rmt_adv; 4620 u32 lcl_adv, rmt_adv;
4288 u16 current_speed; 4621 u16 current_speed;
4289 u8 current_duplex; 4622 u8 current_duplex;
4290 int i, err; 4623 int i, err;
4291 4624
4292 tw32(MAC_EVENT, 0); 4625 tg3_clear_mac_status(tp);
4293
4294 tw32_f(MAC_STATUS,
4295 (MAC_STATUS_SYNC_CHANGED |
4296 MAC_STATUS_CFG_CHANGED |
4297 MAC_STATUS_MI_COMPLETION |
4298 MAC_STATUS_LNKSTATE_CHANGED));
4299 udelay(40);
4300 4626
4301 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 4627 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4302 tw32_f(MAC_MI_MODE, 4628 tw32_f(MAC_MI_MODE,
@@ -4316,7 +4642,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4316 tg3_readphy(tp, MII_BMSR, &bmsr); 4642 tg3_readphy(tp, MII_BMSR, &bmsr);
4317 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4643 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4318 !(bmsr & BMSR_LSTATUS)) 4644 !(bmsr & BMSR_LSTATUS))
4319 force_reset = 1; 4645 force_reset = true;
4320 } 4646 }
4321 if (force_reset) 4647 if (force_reset)
4322 tg3_phy_reset(tp); 4648 tg3_phy_reset(tp);
@@ -4380,7 +4706,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4380 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); 4706 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4381 } 4707 }
4382 4708
4383 current_link_up = 0; 4709 current_link_up = false;
4384 current_speed = SPEED_UNKNOWN; 4710 current_speed = SPEED_UNKNOWN;
4385 current_duplex = DUPLEX_UNKNOWN; 4711 current_duplex = DUPLEX_UNKNOWN;
4386 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; 4712 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
@@ -4439,21 +4765,31 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4439 tp->link_config.active_duplex = current_duplex; 4765 tp->link_config.active_duplex = current_duplex;
4440 4766
4441 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 4767 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4768 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4769
4442 if ((bmcr & BMCR_ANENABLE) && 4770 if ((bmcr & BMCR_ANENABLE) &&
4771 eee_config_ok &&
4443 tg3_phy_copper_an_config_ok(tp, &lcl_adv) && 4772 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4444 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) 4773 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4445 current_link_up = 1; 4774 current_link_up = true;
4775
4776 /* EEE settings changes take effect only after a phy
4777 * reset. If we have skipped a reset due to Link Flap
4778 * Avoidance being enabled, do it now.
4779 */
4780 if (!eee_config_ok &&
4781 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4782 !force_reset)
4783 tg3_phy_reset(tp);
4446 } else { 4784 } else {
4447 if (!(bmcr & BMCR_ANENABLE) && 4785 if (!(bmcr & BMCR_ANENABLE) &&
4448 tp->link_config.speed == current_speed && 4786 tp->link_config.speed == current_speed &&
4449 tp->link_config.duplex == current_duplex && 4787 tp->link_config.duplex == current_duplex) {
4450 tp->link_config.flowctrl == 4788 current_link_up = true;
4451 tp->link_config.active_flowctrl) {
4452 current_link_up = 1;
4453 } 4789 }
4454 } 4790 }
4455 4791
4456 if (current_link_up == 1 && 4792 if (current_link_up &&
4457 tp->link_config.active_duplex == DUPLEX_FULL) { 4793 tp->link_config.active_duplex == DUPLEX_FULL) {
4458 u32 reg, bit; 4794 u32 reg, bit;
4459 4795
@@ -4473,11 +4809,11 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4473 } 4809 }
4474 4810
4475relink: 4811relink:
4476 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 4812 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4477 tg3_phy_copper_begin(tp); 4813 tg3_phy_copper_begin(tp);
4478 4814
4479 if (tg3_flag(tp, ROBOSWITCH)) { 4815 if (tg3_flag(tp, ROBOSWITCH)) {
4480 current_link_up = 1; 4816 current_link_up = true;
4481 /* FIXME: when BCM5325 switch is used use 100 MBit/s */ 4817 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4482 current_speed = SPEED_1000; 4818 current_speed = SPEED_1000;
4483 current_duplex = DUPLEX_FULL; 4819 current_duplex = DUPLEX_FULL;
@@ -4488,11 +4824,11 @@ relink:
4488 tg3_readphy(tp, MII_BMSR, &bmsr); 4824 tg3_readphy(tp, MII_BMSR, &bmsr);
4489 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || 4825 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4490 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) 4826 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4491 current_link_up = 1; 4827 current_link_up = true;
4492 } 4828 }
4493 4829
4494 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 4830 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4495 if (current_link_up == 1) { 4831 if (current_link_up) {
4496 if (tp->link_config.active_speed == SPEED_100 || 4832 if (tp->link_config.active_speed == SPEED_100 ||
4497 tp->link_config.active_speed == SPEED_10) 4833 tp->link_config.active_speed == SPEED_10)
4498 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 4834 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
@@ -4528,7 +4864,7 @@ relink:
4528 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 4864 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4529 4865
4530 if (tg3_asic_rev(tp) == ASIC_REV_5700) { 4866 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4531 if (current_link_up == 1 && 4867 if (current_link_up &&
4532 tg3_5700_link_polarity(tp, tp->link_config.active_speed)) 4868 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4533 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 4869 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4534 else 4870 else
@@ -4559,7 +4895,7 @@ relink:
4559 udelay(40); 4895 udelay(40);
4560 4896
4561 if (tg3_asic_rev(tp) == ASIC_REV_5700 && 4897 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4562 current_link_up == 1 && 4898 current_link_up &&
4563 tp->link_config.active_speed == SPEED_1000 && 4899 tp->link_config.active_speed == SPEED_1000 &&
4564 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { 4900 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4565 udelay(120); 4901 udelay(120);
@@ -4999,19 +5335,19 @@ static void tg3_init_bcm8002(struct tg3 *tp)
4999 tg3_writephy(tp, 0x10, 0x8011); 5335 tg3_writephy(tp, 0x10, 0x8011);
5000} 5336}
5001 5337
5002static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) 5338static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5003{ 5339{
5004 u16 flowctrl; 5340 u16 flowctrl;
5341 bool current_link_up;
5005 u32 sg_dig_ctrl, sg_dig_status; 5342 u32 sg_dig_ctrl, sg_dig_status;
5006 u32 serdes_cfg, expected_sg_dig_ctrl; 5343 u32 serdes_cfg, expected_sg_dig_ctrl;
5007 int workaround, port_a; 5344 int workaround, port_a;
5008 int current_link_up;
5009 5345
5010 serdes_cfg = 0; 5346 serdes_cfg = 0;
5011 expected_sg_dig_ctrl = 0; 5347 expected_sg_dig_ctrl = 0;
5012 workaround = 0; 5348 workaround = 0;
5013 port_a = 1; 5349 port_a = 1;
5014 current_link_up = 0; 5350 current_link_up = false;
5015 5351
5016 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 && 5352 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5017 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) { 5353 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
@@ -5042,7 +5378,7 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5042 } 5378 }
5043 if (mac_status & MAC_STATUS_PCS_SYNCED) { 5379 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5044 tg3_setup_flow_control(tp, 0, 0); 5380 tg3_setup_flow_control(tp, 0, 0);
5045 current_link_up = 1; 5381 current_link_up = true;
5046 } 5382 }
5047 goto out; 5383 goto out;
5048 } 5384 }
@@ -5063,7 +5399,7 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5063 MAC_STATUS_RCVD_CFG)) == 5399 MAC_STATUS_RCVD_CFG)) ==
5064 MAC_STATUS_PCS_SYNCED)) { 5400 MAC_STATUS_PCS_SYNCED)) {
5065 tp->serdes_counter--; 5401 tp->serdes_counter--;
5066 current_link_up = 1; 5402 current_link_up = true;
5067 goto out; 5403 goto out;
5068 } 5404 }
5069restart_autoneg: 5405restart_autoneg:
@@ -5098,7 +5434,7 @@ restart_autoneg:
5098 mii_adv_to_ethtool_adv_x(remote_adv); 5434 mii_adv_to_ethtool_adv_x(remote_adv);
5099 5435
5100 tg3_setup_flow_control(tp, local_adv, remote_adv); 5436 tg3_setup_flow_control(tp, local_adv, remote_adv);
5101 current_link_up = 1; 5437 current_link_up = true;
5102 tp->serdes_counter = 0; 5438 tp->serdes_counter = 0;
5103 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 5439 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5104 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { 5440 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
@@ -5126,7 +5462,7 @@ restart_autoneg:
5126 if ((mac_status & MAC_STATUS_PCS_SYNCED) && 5462 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5127 !(mac_status & MAC_STATUS_RCVD_CFG)) { 5463 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5128 tg3_setup_flow_control(tp, 0, 0); 5464 tg3_setup_flow_control(tp, 0, 0);
5129 current_link_up = 1; 5465 current_link_up = true;
5130 tp->phy_flags |= 5466 tp->phy_flags |=
5131 TG3_PHYFLG_PARALLEL_DETECT; 5467 TG3_PHYFLG_PARALLEL_DETECT;
5132 tp->serdes_counter = 5468 tp->serdes_counter =
@@ -5144,9 +5480,9 @@ out:
5144 return current_link_up; 5480 return current_link_up;
5145} 5481}
5146 5482
5147static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) 5483static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5148{ 5484{
5149 int current_link_up = 0; 5485 bool current_link_up = false;
5150 5486
5151 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) 5487 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5152 goto out; 5488 goto out;
@@ -5173,7 +5509,7 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5173 5509
5174 tg3_setup_flow_control(tp, local_adv, remote_adv); 5510 tg3_setup_flow_control(tp, local_adv, remote_adv);
5175 5511
5176 current_link_up = 1; 5512 current_link_up = true;
5177 } 5513 }
5178 for (i = 0; i < 30; i++) { 5514 for (i = 0; i < 30; i++) {
5179 udelay(20); 5515 udelay(20);
@@ -5188,15 +5524,15 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5188 } 5524 }
5189 5525
5190 mac_status = tr32(MAC_STATUS); 5526 mac_status = tr32(MAC_STATUS);
5191 if (current_link_up == 0 && 5527 if (!current_link_up &&
5192 (mac_status & MAC_STATUS_PCS_SYNCED) && 5528 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5193 !(mac_status & MAC_STATUS_RCVD_CFG)) 5529 !(mac_status & MAC_STATUS_RCVD_CFG))
5194 current_link_up = 1; 5530 current_link_up = true;
5195 } else { 5531 } else {
5196 tg3_setup_flow_control(tp, 0, 0); 5532 tg3_setup_flow_control(tp, 0, 0);
5197 5533
5198 /* Forcing 1000FD link up. */ 5534 /* Forcing 1000FD link up. */
5199 current_link_up = 1; 5535 current_link_up = true;
5200 5536
5201 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 5537 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5202 udelay(40); 5538 udelay(40);
@@ -5209,13 +5545,13 @@ out:
5209 return current_link_up; 5545 return current_link_up;
5210} 5546}
5211 5547
5212static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) 5548static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5213{ 5549{
5214 u32 orig_pause_cfg; 5550 u32 orig_pause_cfg;
5215 u16 orig_active_speed; 5551 u16 orig_active_speed;
5216 u8 orig_active_duplex; 5552 u8 orig_active_duplex;
5217 u32 mac_status; 5553 u32 mac_status;
5218 int current_link_up; 5554 bool current_link_up;
5219 int i; 5555 int i;
5220 5556
5221 orig_pause_cfg = tp->link_config.active_flowctrl; 5557 orig_pause_cfg = tp->link_config.active_flowctrl;
@@ -5252,7 +5588,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5252 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 5588 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5253 udelay(40); 5589 udelay(40);
5254 5590
5255 current_link_up = 0; 5591 current_link_up = false;
5256 tp->link_config.rmt_adv = 0; 5592 tp->link_config.rmt_adv = 0;
5257 mac_status = tr32(MAC_STATUS); 5593 mac_status = tr32(MAC_STATUS);
5258 5594
@@ -5277,7 +5613,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5277 5613
5278 mac_status = tr32(MAC_STATUS); 5614 mac_status = tr32(MAC_STATUS);
5279 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { 5615 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5280 current_link_up = 0; 5616 current_link_up = false;
5281 if (tp->link_config.autoneg == AUTONEG_ENABLE && 5617 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5282 tp->serdes_counter == 0) { 5618 tp->serdes_counter == 0) {
5283 tw32_f(MAC_MODE, (tp->mac_mode | 5619 tw32_f(MAC_MODE, (tp->mac_mode |
@@ -5287,7 +5623,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5287 } 5623 }
5288 } 5624 }
5289 5625
5290 if (current_link_up == 1) { 5626 if (current_link_up) {
5291 tp->link_config.active_speed = SPEED_1000; 5627 tp->link_config.active_speed = SPEED_1000;
5292 tp->link_config.active_duplex = DUPLEX_FULL; 5628 tp->link_config.active_duplex = DUPLEX_FULL;
5293 tw32(MAC_LED_CTRL, (tp->led_ctrl | 5629 tw32(MAC_LED_CTRL, (tp->led_ctrl |
@@ -5312,33 +5648,63 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5312 return 0; 5648 return 0;
5313} 5649}
5314 5650
5315static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) 5651static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5316{ 5652{
5317 int current_link_up, err = 0; 5653 int err = 0;
5318 u32 bmsr, bmcr; 5654 u32 bmsr, bmcr;
5319 u16 current_speed; 5655 u16 current_speed = SPEED_UNKNOWN;
5320 u8 current_duplex; 5656 u8 current_duplex = DUPLEX_UNKNOWN;
5321 u32 local_adv, remote_adv; 5657 bool current_link_up = false;
5658 u32 local_adv, remote_adv, sgsr;
5659
5660 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5661 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5662 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5663 (sgsr & SERDES_TG3_SGMII_MODE)) {
5664
5665 if (force_reset)
5666 tg3_phy_reset(tp);
5667
5668 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5669
5670 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5671 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5672 } else {
5673 current_link_up = true;
5674 if (sgsr & SERDES_TG3_SPEED_1000) {
5675 current_speed = SPEED_1000;
5676 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5677 } else if (sgsr & SERDES_TG3_SPEED_100) {
5678 current_speed = SPEED_100;
5679 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5680 } else {
5681 current_speed = SPEED_10;
5682 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5683 }
5684
5685 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5686 current_duplex = DUPLEX_FULL;
5687 else
5688 current_duplex = DUPLEX_HALF;
5689 }
5690
5691 tw32_f(MAC_MODE, tp->mac_mode);
5692 udelay(40);
5693
5694 tg3_clear_mac_status(tp);
5695
5696 goto fiber_setup_done;
5697 }
5322 5698
5323 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 5699 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5324 tw32_f(MAC_MODE, tp->mac_mode); 5700 tw32_f(MAC_MODE, tp->mac_mode);
5325 udelay(40); 5701 udelay(40);
5326 5702
5327 tw32(MAC_EVENT, 0); 5703 tg3_clear_mac_status(tp);
5328
5329 tw32_f(MAC_STATUS,
5330 (MAC_STATUS_SYNC_CHANGED |
5331 MAC_STATUS_CFG_CHANGED |
5332 MAC_STATUS_MI_COMPLETION |
5333 MAC_STATUS_LNKSTATE_CHANGED));
5334 udelay(40);
5335 5704
5336 if (force_reset) 5705 if (force_reset)
5337 tg3_phy_reset(tp); 5706 tg3_phy_reset(tp);
5338 5707
5339 current_link_up = 0;
5340 current_speed = SPEED_UNKNOWN;
5341 current_duplex = DUPLEX_UNKNOWN;
5342 tp->link_config.rmt_adv = 0; 5708 tp->link_config.rmt_adv = 0;
5343 5709
5344 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 5710 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
@@ -5424,7 +5790,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5424 5790
5425 if (bmsr & BMSR_LSTATUS) { 5791 if (bmsr & BMSR_LSTATUS) {
5426 current_speed = SPEED_1000; 5792 current_speed = SPEED_1000;
5427 current_link_up = 1; 5793 current_link_up = true;
5428 if (bmcr & BMCR_FULLDPLX) 5794 if (bmcr & BMCR_FULLDPLX)
5429 current_duplex = DUPLEX_FULL; 5795 current_duplex = DUPLEX_FULL;
5430 else 5796 else
@@ -5451,12 +5817,13 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5451 } else if (!tg3_flag(tp, 5780_CLASS)) { 5817 } else if (!tg3_flag(tp, 5780_CLASS)) {
5452 /* Link is up via parallel detect */ 5818 /* Link is up via parallel detect */
5453 } else { 5819 } else {
5454 current_link_up = 0; 5820 current_link_up = false;
5455 } 5821 }
5456 } 5822 }
5457 } 5823 }
5458 5824
5459 if (current_link_up == 1 && current_duplex == DUPLEX_FULL) 5825fiber_setup_done:
5826 if (current_link_up && current_duplex == DUPLEX_FULL)
5460 tg3_setup_flow_control(tp, local_adv, remote_adv); 5827 tg3_setup_flow_control(tp, local_adv, remote_adv);
5461 5828
5462 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 5829 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
@@ -5535,7 +5902,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
5535 } 5902 }
5536} 5903}
5537 5904
5538static int tg3_setup_phy(struct tg3 *tp, int force_reset) 5905static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5539{ 5906{
5540 u32 val; 5907 u32 val;
5541 int err; 5908 int err;
@@ -5625,10 +5992,13 @@ static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5625 5992
5626 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 5993 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5627 SOF_TIMESTAMPING_RX_SOFTWARE | 5994 SOF_TIMESTAMPING_RX_SOFTWARE |
5628 SOF_TIMESTAMPING_SOFTWARE | 5995 SOF_TIMESTAMPING_SOFTWARE;
5629 SOF_TIMESTAMPING_TX_HARDWARE | 5996
5630 SOF_TIMESTAMPING_RX_HARDWARE | 5997 if (tg3_flag(tp, PTP_CAPABLE)) {
5631 SOF_TIMESTAMPING_RAW_HARDWARE; 5998 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
5999 SOF_TIMESTAMPING_RX_HARDWARE |
6000 SOF_TIMESTAMPING_RAW_HARDWARE;
6001 }
5632 6002
5633 if (tp->ptp_clock) 6003 if (tp->ptp_clock)
5634 info->phc_index = ptp_clock_index(tp->ptp_clock); 6004 info->phc_index = ptp_clock_index(tp->ptp_clock);
@@ -6348,7 +6718,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
6348 6718
6349 if (desc->type_flags & RXD_FLAG_VLAN && 6719 if (desc->type_flags & RXD_FLAG_VLAN &&
6350 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) 6720 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6351 __vlan_hwaccel_put_tag(skb, 6721 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6352 desc->err_vlan & RXD_VLAN_MASK); 6722 desc->err_vlan & RXD_VLAN_MASK);
6353 6723
6354 napi_gro_receive(&tnapi->napi, skb); 6724 napi_gro_receive(&tnapi->napi, skb);
@@ -6436,7 +6806,7 @@ static void tg3_poll_link(struct tg3 *tp)
6436 MAC_STATUS_LNKSTATE_CHANGED)); 6806 MAC_STATUS_LNKSTATE_CHANGED));
6437 udelay(40); 6807 udelay(40);
6438 } else 6808 } else
6439 tg3_setup_phy(tp, 0); 6809 tg3_setup_phy(tp, false);
6440 spin_unlock(&tp->lock); 6810 spin_unlock(&tp->lock);
6441 } 6811 }
6442 } 6812 }
@@ -7533,7 +7903,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7533 u32 val, bmcr, mac_mode, ptest = 0; 7903 u32 val, bmcr, mac_mode, ptest = 0;
7534 7904
7535 tg3_phy_toggle_apd(tp, false); 7905 tg3_phy_toggle_apd(tp, false);
7536 tg3_phy_toggle_automdix(tp, 0); 7906 tg3_phy_toggle_automdix(tp, false);
7537 7907
7538 if (extlpbk && tg3_phy_set_extloopbk(tp)) 7908 if (extlpbk && tg3_phy_set_extloopbk(tp))
7539 return -EIO; 7909 return -EIO;
@@ -7641,7 +8011,7 @@ static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7641 spin_lock_bh(&tp->lock); 8011 spin_lock_bh(&tp->lock);
7642 tg3_mac_loopback(tp, false); 8012 tg3_mac_loopback(tp, false);
7643 /* Force link status check */ 8013 /* Force link status check */
7644 tg3_setup_phy(tp, 1); 8014 tg3_setup_phy(tp, true);
7645 spin_unlock_bh(&tp->lock); 8015 spin_unlock_bh(&tp->lock);
7646 netdev_info(dev, "Internal MAC loopback mode disabled.\n"); 8016 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7647 } 8017 }
@@ -8039,11 +8409,9 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
8039 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, 8409 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8040 TG3_RX_RCB_RING_BYTES(tp), 8410 TG3_RX_RCB_RING_BYTES(tp),
8041 &tnapi->rx_rcb_mapping, 8411 &tnapi->rx_rcb_mapping,
8042 GFP_KERNEL); 8412 GFP_KERNEL | __GFP_ZERO);
8043 if (!tnapi->rx_rcb) 8413 if (!tnapi->rx_rcb)
8044 goto err_out; 8414 goto err_out;
8045
8046 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8047 } 8415 }
8048 8416
8049 return 0; 8417 return 0;
@@ -8093,12 +8461,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
8093 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, 8461 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8094 sizeof(struct tg3_hw_stats), 8462 sizeof(struct tg3_hw_stats),
8095 &tp->stats_mapping, 8463 &tp->stats_mapping,
8096 GFP_KERNEL); 8464 GFP_KERNEL | __GFP_ZERO);
8097 if (!tp->hw_stats) 8465 if (!tp->hw_stats)
8098 goto err_out; 8466 goto err_out;
8099 8467
8100 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8101
8102 for (i = 0; i < tp->irq_cnt; i++) { 8468 for (i = 0; i < tp->irq_cnt; i++) {
8103 struct tg3_napi *tnapi = &tp->napi[i]; 8469 struct tg3_napi *tnapi = &tp->napi[i];
8104 struct tg3_hw_status *sblk; 8470 struct tg3_hw_status *sblk;
@@ -8106,11 +8472,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
8106 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, 8472 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8107 TG3_HW_STATUS_SIZE, 8473 TG3_HW_STATUS_SIZE,
8108 &tnapi->status_mapping, 8474 &tnapi->status_mapping,
8109 GFP_KERNEL); 8475 GFP_KERNEL | __GFP_ZERO);
8110 if (!tnapi->hw_status) 8476 if (!tnapi->hw_status)
8111 goto err_out; 8477 goto err_out;
8112 8478
8113 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8114 sblk = tnapi->hw_status; 8479 sblk = tnapi->hw_status;
8115 8480
8116 if (tg3_flag(tp, ENABLE_RSS)) { 8481 if (tg3_flag(tp, ENABLE_RSS)) {
@@ -8157,7 +8522,7 @@ err_out:
8157/* To stop a block, clear the enable bit and poll till it 8522/* To stop a block, clear the enable bit and poll till it
8158 * clears. tp->lock is held. 8523 * clears. tp->lock is held.
8159 */ 8524 */
8160static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent) 8525static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8161{ 8526{
8162 unsigned int i; 8527 unsigned int i;
8163 u32 val; 8528 u32 val;
@@ -8201,7 +8566,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
8201} 8566}
8202 8567
8203/* tp->lock is held. */ 8568/* tp->lock is held. */
8204static int tg3_abort_hw(struct tg3 *tp, int silent) 8569static int tg3_abort_hw(struct tg3 *tp, bool silent)
8205{ 8570{
8206 int i, err; 8571 int i, err;
8207 8572
@@ -8561,6 +8926,9 @@ static int tg3_chip_reset(struct tg3 *tp)
8561 8926
8562 /* Reprobe ASF enable state. */ 8927 /* Reprobe ASF enable state. */
8563 tg3_flag_clear(tp, ENABLE_ASF); 8928 tg3_flag_clear(tp, ENABLE_ASF);
8929 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
8930 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
8931
8564 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); 8932 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8565 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 8933 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8566 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 8934 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
@@ -8572,6 +8940,12 @@ static int tg3_chip_reset(struct tg3 *tp)
8572 tp->last_event_jiffies = jiffies; 8940 tp->last_event_jiffies = jiffies;
8573 if (tg3_flag(tp, 5750_PLUS)) 8941 if (tg3_flag(tp, 5750_PLUS))
8574 tg3_flag_set(tp, ASF_NEW_HANDSHAKE); 8942 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8943
8944 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
8945 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
8946 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
8947 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
8948 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
8575 } 8949 }
8576 } 8950 }
8577 8951
@@ -8582,7 +8956,7 @@ static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8582static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); 8956static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8583 8957
8584/* tp->lock is held. */ 8958/* tp->lock is held. */
8585static int tg3_halt(struct tg3 *tp, int kind, int silent) 8959static int tg3_halt(struct tg3 *tp, int kind, bool silent)
8586{ 8960{
8587 int err; 8961 int err;
8588 8962
@@ -8593,7 +8967,7 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
8593 tg3_abort_hw(tp, silent); 8967 tg3_abort_hw(tp, silent);
8594 err = tg3_chip_reset(tp); 8968 err = tg3_chip_reset(tp);
8595 8969
8596 __tg3_set_mac_addr(tp, 0); 8970 __tg3_set_mac_addr(tp, false);
8597 8971
8598 tg3_write_sig_legacy(tp, kind); 8972 tg3_write_sig_legacy(tp, kind);
8599 tg3_write_sig_post_reset(tp, kind); 8973 tg3_write_sig_post_reset(tp, kind);
@@ -8617,7 +8991,8 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
8617{ 8991{
8618 struct tg3 *tp = netdev_priv(dev); 8992 struct tg3 *tp = netdev_priv(dev);
8619 struct sockaddr *addr = p; 8993 struct sockaddr *addr = p;
8620 int err = 0, skip_mac_1 = 0; 8994 int err = 0;
8995 bool skip_mac_1 = false;
8621 8996
8622 if (!is_valid_ether_addr(addr->sa_data)) 8997 if (!is_valid_ether_addr(addr->sa_data))
8623 return -EADDRNOTAVAIL; 8998 return -EADDRNOTAVAIL;
@@ -8638,7 +9013,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
8638 /* Skip MAC addr 1 if ASF is using it. */ 9013 /* Skip MAC addr 1 if ASF is using it. */
8639 if ((addr0_high != addr1_high || addr0_low != addr1_low) && 9014 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8640 !(addr1_high == 0 && addr1_low == 0)) 9015 !(addr1_high == 0 && addr1_low == 0))
8641 skip_mac_1 = 1; 9016 skip_mac_1 = true;
8642 } 9017 }
8643 spin_lock_bh(&tp->lock); 9018 spin_lock_bh(&tp->lock);
8644 __tg3_set_mac_addr(tp, skip_mac_1); 9019 __tg3_set_mac_addr(tp, skip_mac_1);
@@ -9057,7 +9432,7 @@ static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9057} 9432}
9058 9433
9059/* tp->lock is held. */ 9434/* tp->lock is held. */
9060static int tg3_reset_hw(struct tg3 *tp, int reset_phy) 9435static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9061{ 9436{
9062 u32 val, rdmac_mode; 9437 u32 val, rdmac_mode;
9063 int i, err, limit; 9438 int i, err, limit;
@@ -9106,6 +9481,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9106 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 9481 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9107 } 9482 }
9108 9483
9484 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9485 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9486 tg3_phy_pull_config(tp);
9487 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9488 }
9489
9109 if (reset_phy) 9490 if (reset_phy)
9110 tg3_phy_reset(tp); 9491 tg3_phy_reset(tp);
9111 9492
@@ -9444,7 +9825,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9444 tg3_rings_reset(tp); 9825 tg3_rings_reset(tp);
9445 9826
9446 /* Initialize MAC address and backoff seed. */ 9827 /* Initialize MAC address and backoff seed. */
9447 __tg3_set_mac_addr(tp, 0); 9828 __tg3_set_mac_addr(tp, false);
9448 9829
9449 /* MTU + ethernet header + FCS + optional VLAN tag */ 9830 /* MTU + ethernet header + FCS + optional VLAN tag */
9450 tw32(MAC_RX_MTU_SIZE, 9831 tw32(MAC_RX_MTU_SIZE,
@@ -9781,6 +10162,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9781 return err; 10162 return err;
9782 } 10163 }
9783 10164
10165 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10166 /* Ignore any errors for the firmware download. If download
10167 * fails, the device will operate with EEE disabled
10168 */
10169 tg3_load_57766_firmware(tp);
10170 }
10171
9784 if (tg3_flag(tp, TSO_CAPABLE)) { 10172 if (tg3_flag(tp, TSO_CAPABLE)) {
9785 err = tg3_load_tso_firmware(tp); 10173 err = tg3_load_tso_firmware(tp);
9786 if (err) 10174 if (err)
@@ -9888,7 +10276,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9888 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 10276 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9889 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 10277 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9890 10278
9891 err = tg3_setup_phy(tp, 0); 10279 err = tg3_setup_phy(tp, false);
9892 if (err) 10280 if (err)
9893 return err; 10281 return err;
9894 10282
@@ -9968,7 +10356,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9968/* Called at device open time to get the chip ready for 10356/* Called at device open time to get the chip ready for
9969 * packet processing. Invoked with tp->lock held. 10357 * packet processing. Invoked with tp->lock held.
9970 */ 10358 */
9971static int tg3_init_hw(struct tg3 *tp, int reset_phy) 10359static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
9972{ 10360{
9973 tg3_switch_clocks(tp); 10361 tg3_switch_clocks(tp);
9974 10362
@@ -10229,7 +10617,7 @@ static void tg3_timer(unsigned long __opaque)
10229 phy_event = 1; 10617 phy_event = 1;
10230 10618
10231 if (phy_event) 10619 if (phy_event)
10232 tg3_setup_phy(tp, 0); 10620 tg3_setup_phy(tp, false);
10233 } else if (tg3_flag(tp, POLL_SERDES)) { 10621 } else if (tg3_flag(tp, POLL_SERDES)) {
10234 u32 mac_stat = tr32(MAC_STATUS); 10622 u32 mac_stat = tr32(MAC_STATUS);
10235 int need_setup = 0; 10623 int need_setup = 0;
@@ -10252,7 +10640,7 @@ static void tg3_timer(unsigned long __opaque)
10252 tw32_f(MAC_MODE, tp->mac_mode); 10640 tw32_f(MAC_MODE, tp->mac_mode);
10253 udelay(40); 10641 udelay(40);
10254 } 10642 }
10255 tg3_setup_phy(tp, 0); 10643 tg3_setup_phy(tp, false);
10256 } 10644 }
10257 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 10645 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10258 tg3_flag(tp, 5780_CLASS)) { 10646 tg3_flag(tp, 5780_CLASS)) {
@@ -10338,7 +10726,7 @@ static void tg3_timer_stop(struct tg3 *tp)
10338/* Restart hardware after configuration changes, self-test, etc. 10726/* Restart hardware after configuration changes, self-test, etc.
10339 * Invoked with tp->lock held. 10727 * Invoked with tp->lock held.
10340 */ 10728 */
10341static int tg3_restart_hw(struct tg3 *tp, int reset_phy) 10729static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10342 __releases(tp->lock) 10730 __releases(tp->lock)
10343 __acquires(tp->lock) 10731 __acquires(tp->lock)
10344{ 10732{
@@ -10388,7 +10776,7 @@ static void tg3_reset_task(struct work_struct *work)
10388 } 10776 }
10389 10777
10390 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 10778 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10391 err = tg3_init_hw(tp, 1); 10779 err = tg3_init_hw(tp, true);
10392 if (err) 10780 if (err)
10393 goto out; 10781 goto out;
10394 10782
@@ -10558,7 +10946,7 @@ static int tg3_test_msi(struct tg3 *tp)
10558 tg3_full_lock(tp, 1); 10946 tg3_full_lock(tp, 1);
10559 10947
10560 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 10948 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10561 err = tg3_init_hw(tp, 1); 10949 err = tg3_init_hw(tp, true);
10562 10950
10563 tg3_full_unlock(tp); 10951 tg3_full_unlock(tp);
10564 10952
@@ -10570,7 +10958,7 @@ static int tg3_test_msi(struct tg3 *tp)
10570 10958
10571static int tg3_request_firmware(struct tg3 *tp) 10959static int tg3_request_firmware(struct tg3 *tp)
10572{ 10960{
10573 const __be32 *fw_data; 10961 const struct tg3_firmware_hdr *fw_hdr;
10574 10962
10575 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 10963 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10576 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", 10964 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
@@ -10578,15 +10966,15 @@ static int tg3_request_firmware(struct tg3 *tp)
10578 return -ENOENT; 10966 return -ENOENT;
10579 } 10967 }
10580 10968
10581 fw_data = (void *)tp->fw->data; 10969 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10582 10970
10583 /* Firmware blob starts with version numbers, followed by 10971 /* Firmware blob starts with version numbers, followed by
10584 * start address and _full_ length including BSS sections 10972 * start address and _full_ length including BSS sections
10585 * (which must be longer than the actual data, of course 10973 * (which must be longer than the actual data, of course
10586 */ 10974 */
10587 10975
10588 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ 10976 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
10589 if (tp->fw_len < (tp->fw->size - 12)) { 10977 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10590 netdev_err(tp->dev, "bogus length %d in \"%s\"\n", 10978 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10591 tp->fw_len, tp->fw_needed); 10979 tp->fw_len, tp->fw_needed);
10592 release_firmware(tp->fw); 10980 release_firmware(tp->fw);
@@ -10885,7 +11273,15 @@ static int tg3_open(struct net_device *dev)
10885 11273
10886 if (tp->fw_needed) { 11274 if (tp->fw_needed) {
10887 err = tg3_request_firmware(tp); 11275 err = tg3_request_firmware(tp);
10888 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 11276 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11277 if (err) {
11278 netdev_warn(tp->dev, "EEE capability disabled\n");
11279 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11280 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11281 netdev_warn(tp->dev, "EEE capability restored\n");
11282 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11283 }
11284 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10889 if (err) 11285 if (err)
10890 return err; 11286 return err;
10891 } else if (err) { 11287 } else if (err) {
@@ -10910,7 +11306,9 @@ static int tg3_open(struct net_device *dev)
10910 11306
10911 tg3_full_unlock(tp); 11307 tg3_full_unlock(tp);
10912 11308
10913 err = tg3_start(tp, true, true, true); 11309 err = tg3_start(tp,
11310 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11311 true, true);
10914 if (err) { 11312 if (err) {
10915 tg3_frob_aux_power(tp, false); 11313 tg3_frob_aux_power(tp, false);
10916 pci_set_power_state(tp->pdev, PCI_D3hot); 11314 pci_set_power_state(tp->pdev, PCI_D3hot);
@@ -11416,8 +11814,12 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11416 tp->link_config.duplex = cmd->duplex; 11814 tp->link_config.duplex = cmd->duplex;
11417 } 11815 }
11418 11816
11817 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11818
11819 tg3_warn_mgmt_link_flap(tp);
11820
11419 if (netif_running(dev)) 11821 if (netif_running(dev))
11420 tg3_setup_phy(tp, 1); 11822 tg3_setup_phy(tp, true);
11421 11823
11422 tg3_full_unlock(tp); 11824 tg3_full_unlock(tp);
11423 11825
@@ -11494,6 +11896,8 @@ static int tg3_nway_reset(struct net_device *dev)
11494 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11896 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11495 return -EINVAL; 11897 return -EINVAL;
11496 11898
11899 tg3_warn_mgmt_link_flap(tp);
11900
11497 if (tg3_flag(tp, USE_PHYLIB)) { 11901 if (tg3_flag(tp, USE_PHYLIB)) {
11498 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 11902 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11499 return -EAGAIN; 11903 return -EAGAIN;
@@ -11571,7 +11975,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
11571 11975
11572 if (netif_running(dev)) { 11976 if (netif_running(dev)) {
11573 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11977 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11574 err = tg3_restart_hw(tp, 1); 11978 err = tg3_restart_hw(tp, false);
11575 if (!err) 11979 if (!err)
11576 tg3_netif_start(tp); 11980 tg3_netif_start(tp);
11577 } 11981 }
@@ -11606,6 +12010,9 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
11606 struct tg3 *tp = netdev_priv(dev); 12010 struct tg3 *tp = netdev_priv(dev);
11607 int err = 0; 12011 int err = 0;
11608 12012
12013 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12014 tg3_warn_mgmt_link_flap(tp);
12015
11609 if (tg3_flag(tp, USE_PHYLIB)) { 12016 if (tg3_flag(tp, USE_PHYLIB)) {
11610 u32 newadv; 12017 u32 newadv;
11611 struct phy_device *phydev; 12018 struct phy_device *phydev;
@@ -11692,7 +12099,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
11692 12099
11693 if (netif_running(dev)) { 12100 if (netif_running(dev)) {
11694 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 12101 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11695 err = tg3_restart_hw(tp, 1); 12102 err = tg3_restart_hw(tp, false);
11696 if (!err) 12103 if (!err)
11697 tg3_netif_start(tp); 12104 tg3_netif_start(tp);
11698 } 12105 }
@@ -11700,6 +12107,8 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
11700 tg3_full_unlock(tp); 12107 tg3_full_unlock(tp);
11701 } 12108 }
11702 12109
12110 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12111
11703 return err; 12112 return err;
11704} 12113}
11705 12114
@@ -12760,7 +13169,7 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12760 goto done; 13169 goto done;
12761 } 13170 }
12762 13171
12763 err = tg3_reset_hw(tp, 1); 13172 err = tg3_reset_hw(tp, true);
12764 if (err) { 13173 if (err) {
12765 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13174 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12766 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; 13175 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
@@ -12927,7 +13336,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12927 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 13336 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12928 if (netif_running(dev)) { 13337 if (netif_running(dev)) {
12929 tg3_flag_set(tp, INIT_COMPLETE); 13338 tg3_flag_set(tp, INIT_COMPLETE);
12930 err2 = tg3_restart_hw(tp, 1); 13339 err2 = tg3_restart_hw(tp, true);
12931 if (!err2) 13340 if (!err2)
12932 tg3_netif_start(tp); 13341 tg3_netif_start(tp);
12933 } 13342 }
@@ -13244,7 +13653,8 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13244static int tg3_change_mtu(struct net_device *dev, int new_mtu) 13653static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13245{ 13654{
13246 struct tg3 *tp = netdev_priv(dev); 13655 struct tg3 *tp = netdev_priv(dev);
13247 int err, reset_phy = 0; 13656 int err;
13657 bool reset_phy = false;
13248 13658
13249 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) 13659 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13250 return -EINVAL; 13660 return -EINVAL;
@@ -13271,7 +13681,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13271 * breaks all requests to 256 bytes. 13681 * breaks all requests to 256 bytes.
13272 */ 13682 */
13273 if (tg3_asic_rev(tp) == ASIC_REV_57766) 13683 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13274 reset_phy = 1; 13684 reset_phy = true;
13275 13685
13276 err = tg3_restart_hw(tp, reset_phy); 13686 err = tg3_restart_hw(tp, reset_phy);
13277 13687
@@ -13837,6 +14247,12 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
13837 case FLASH_5762_EEPROM_LD: 14247 case FLASH_5762_EEPROM_LD:
13838 nvmpinstrp = FLASH_5720_EEPROM_LD; 14248 nvmpinstrp = FLASH_5720_EEPROM_LD;
13839 break; 14249 break;
14250 case FLASH_5720VENDOR_M_ST_M45PE20:
14251 /* This pinstrap supports multiple sizes, so force it
14252 * to read the actual size from location 0xf0.
14253 */
14254 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14255 break;
13840 } 14256 }
13841 } 14257 }
13842 14258
@@ -14289,14 +14705,18 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14289 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 14705 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14290 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 14706 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14291 14707
14292 if (tg3_flag(tp, PCI_EXPRESS) && 14708 if (tg3_flag(tp, PCI_EXPRESS)) {
14293 tg3_asic_rev(tp) != ASIC_REV_5785 &&
14294 !tg3_flag(tp, 57765_PLUS)) {
14295 u32 cfg3; 14709 u32 cfg3;
14296 14710
14297 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 14711 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14298 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) 14712 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14713 !tg3_flag(tp, 57765_PLUS) &&
14714 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14299 tg3_flag_set(tp, ASPM_WORKAROUND); 14715 tg3_flag_set(tp, ASPM_WORKAROUND);
14716 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14717 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14718 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14719 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14300 } 14720 }
14301 14721
14302 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) 14722 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
@@ -14450,6 +14870,12 @@ static int tg3_phy_probe(struct tg3 *tp)
14450 } 14870 }
14451 } 14871 }
14452 14872
14873 if (!tg3_flag(tp, ENABLE_ASF) &&
14874 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14875 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14876 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14877 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14878
14453 if (tg3_flag(tp, USE_PHYLIB)) 14879 if (tg3_flag(tp, USE_PHYLIB))
14454 return tg3_phy_init(tp); 14880 return tg3_phy_init(tp);
14455 14881
@@ -14515,6 +14941,7 @@ static int tg3_phy_probe(struct tg3 *tp)
14515 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 14941 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14516 (tg3_asic_rev(tp) == ASIC_REV_5719 || 14942 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14517 tg3_asic_rev(tp) == ASIC_REV_5720 || 14943 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14944 tg3_asic_rev(tp) == ASIC_REV_57766 ||
14518 tg3_asic_rev(tp) == ASIC_REV_5762 || 14945 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14519 (tg3_asic_rev(tp) == ASIC_REV_5717 && 14946 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14520 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || 14947 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
@@ -14524,7 +14951,8 @@ static int tg3_phy_probe(struct tg3 *tp)
14524 14951
14525 tg3_phy_init_link_config(tp); 14952 tg3_phy_init_link_config(tp);
14526 14953
14527 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 14954 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
14955 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14528 !tg3_flag(tp, ENABLE_APE) && 14956 !tg3_flag(tp, ENABLE_APE) &&
14529 !tg3_flag(tp, ENABLE_ASF)) { 14957 !tg3_flag(tp, ENABLE_ASF)) {
14530 u32 bmsr, dummy; 14958 u32 bmsr, dummy;
@@ -15300,7 +15728,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15300 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15728 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15301 tg3_asic_rev(tp) != ASIC_REV_5701 && 15729 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15302 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 15730 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15303 tg3_flag_set(tp, TSO_BUG); 15731 tg3_flag_set(tp, FW_TSO);
15732 tg3_flag_set(tp, TSO_BUG);
15304 if (tg3_asic_rev(tp) == ASIC_REV_5705) 15733 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15305 tp->fw_needed = FIRMWARE_TG3TSO5; 15734 tp->fw_needed = FIRMWARE_TG3TSO5;
15306 else 15735 else
@@ -15311,7 +15740,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15311 if (tg3_flag(tp, HW_TSO_1) || 15740 if (tg3_flag(tp, HW_TSO_1) ||
15312 tg3_flag(tp, HW_TSO_2) || 15741 tg3_flag(tp, HW_TSO_2) ||
15313 tg3_flag(tp, HW_TSO_3) || 15742 tg3_flag(tp, HW_TSO_3) ||
15314 tp->fw_needed) { 15743 tg3_flag(tp, FW_TSO)) {
15315 /* For firmware TSO, assume ASF is disabled. 15744 /* For firmware TSO, assume ASF is disabled.
15316 * We'll disable TSO later if we discover ASF 15745 * We'll disable TSO later if we discover ASF
15317 * is enabled in tg3_get_eeprom_hw_cfg(). 15746 * is enabled in tg3_get_eeprom_hw_cfg().
@@ -15326,6 +15755,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15326 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) 15755 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15327 tp->fw_needed = FIRMWARE_TG3; 15756 tp->fw_needed = FIRMWARE_TG3;
15328 15757
15758 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15759 tp->fw_needed = FIRMWARE_TG357766;
15760
15329 tp->irq_max = 1; 15761 tp->irq_max = 1;
15330 15762
15331 if (tg3_flag(tp, 5750_PLUS)) { 15763 if (tg3_flag(tp, 5750_PLUS)) {
@@ -15598,7 +16030,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15598 */ 16030 */
15599 tg3_get_eeprom_hw_cfg(tp); 16031 tg3_get_eeprom_hw_cfg(tp);
15600 16032
15601 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) { 16033 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15602 tg3_flag_clear(tp, TSO_CAPABLE); 16034 tg3_flag_clear(tp, TSO_CAPABLE);
15603 tg3_flag_clear(tp, TSO_BUG); 16035 tg3_flag_clear(tp, TSO_BUG);
15604 tp->fw_needed = NULL; 16036 tp->fw_needed = NULL;
@@ -15786,6 +16218,11 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15786 udelay(50); 16218 udelay(50);
15787 tg3_nvram_init(tp); 16219 tg3_nvram_init(tp);
15788 16220
16221 /* If the device has an NVRAM, no need to load patch firmware */
16222 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16223 !tg3_flag(tp, NO_NVRAM))
16224 tp->fw_needed = NULL;
16225
15789 grc_misc_cfg = tr32(GRC_MISC_CFG); 16226 grc_misc_cfg = tr32(GRC_MISC_CFG);
15790 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; 16227 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15791 16228
@@ -16144,7 +16581,7 @@ out:
16144} 16581}
16145 16582
16146static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, 16583static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16147 int size, int to_device) 16584 int size, bool to_device)
16148{ 16585{
16149 struct tg3_internal_buffer_desc test_desc; 16586 struct tg3_internal_buffer_desc test_desc;
16150 u32 sram_dma_descs; 16587 u32 sram_dma_descs;
@@ -16344,7 +16781,7 @@ static int tg3_test_dma(struct tg3 *tp)
16344 p[i] = i; 16781 p[i] = i;
16345 16782
16346 /* Send the buffer to the chip. */ 16783 /* Send the buffer to the chip. */
16347 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); 16784 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16348 if (ret) { 16785 if (ret) {
16349 dev_err(&tp->pdev->dev, 16786 dev_err(&tp->pdev->dev,
16350 "%s: Buffer write failed. err = %d\n", 16787 "%s: Buffer write failed. err = %d\n",
@@ -16367,7 +16804,7 @@ static int tg3_test_dma(struct tg3 *tp)
16367 } 16804 }
16368#endif 16805#endif
16369 /* Now read it back. */ 16806 /* Now read it back. */
16370 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); 16807 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16371 if (ret) { 16808 if (ret) {
16372 dev_err(&tp->pdev->dev, "%s: Buffer read failed. " 16809 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16373 "err = %d\n", __func__, ret); 16810 "err = %d\n", __func__, ret);
@@ -16763,7 +17200,7 @@ static int tg3_init_one(struct pci_dev *pdev,
16763 17200
16764 tg3_init_bufmgr_config(tp); 17201 tg3_init_bufmgr_config(tp);
16765 17202
16766 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 17203 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
16767 17204
16768 /* 5700 B0 chips do not support checksumming correctly due 17205 /* 5700 B0 chips do not support checksumming correctly due
16769 * to hardware bugs. 17206 * to hardware bugs.
@@ -17048,7 +17485,7 @@ static int tg3_suspend(struct device *device)
17048 tg3_full_lock(tp, 0); 17485 tg3_full_lock(tp, 0);
17049 17486
17050 tg3_flag_set(tp, INIT_COMPLETE); 17487 tg3_flag_set(tp, INIT_COMPLETE);
17051 err2 = tg3_restart_hw(tp, 1); 17488 err2 = tg3_restart_hw(tp, true);
17052 if (err2) 17489 if (err2)
17053 goto out; 17490 goto out;
17054 17491
@@ -17082,7 +17519,8 @@ static int tg3_resume(struct device *device)
17082 tg3_full_lock(tp, 0); 17519 tg3_full_lock(tp, 0);
17083 17520
17084 tg3_flag_set(tp, INIT_COMPLETE); 17521 tg3_flag_set(tp, INIT_COMPLETE);
17085 err = tg3_restart_hw(tp, 1); 17522 err = tg3_restart_hw(tp,
17523 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17086 if (err) 17524 if (err)
17087 goto out; 17525 goto out;
17088 17526
@@ -17098,15 +17536,9 @@ out:
17098 17536
17099 return err; 17537 return err;
17100} 17538}
17539#endif /* CONFIG_PM_SLEEP */
17101 17540
17102static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); 17541static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17103#define TG3_PM_OPS (&tg3_pm_ops)
17104
17105#else
17106
17107#define TG3_PM_OPS NULL
17108
17109#endif /* CONFIG_PM_SLEEP */
17110 17542
17111/** 17543/**
17112 * tg3_io_error_detected - called when PCI error is detected 17544 * tg3_io_error_detected - called when PCI error is detected
@@ -17221,7 +17653,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
17221 17653
17222 tg3_full_lock(tp, 0); 17654 tg3_full_lock(tp, 0);
17223 tg3_flag_set(tp, INIT_COMPLETE); 17655 tg3_flag_set(tp, INIT_COMPLETE);
17224 err = tg3_restart_hw(tp, 1); 17656 err = tg3_restart_hw(tp, true);
17225 if (err) { 17657 if (err) {
17226 tg3_full_unlock(tp); 17658 tg3_full_unlock(tp);
17227 netdev_err(netdev, "Cannot restart hardware after reset.\n"); 17659 netdev_err(netdev, "Cannot restart hardware after reset.\n");
@@ -17254,7 +17686,7 @@ static struct pci_driver tg3_driver = {
17254 .probe = tg3_init_one, 17686 .probe = tg3_init_one,
17255 .remove = tg3_remove_one, 17687 .remove = tg3_remove_one,
17256 .err_handler = &tg3_err_handler, 17688 .err_handler = &tg3_err_handler,
17257 .driver.pm = TG3_PM_OPS, 17689 .driver.pm = &tg3_pm_ops,
17258}; 17690};
17259 17691
17260static int __init tg3_init(void) 17692static int __init tg3_init(void)
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 8d7d4c2ab5d6..9b2d3ac2474a 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2198,6 +2198,8 @@
2198 2198
2199#define NIC_SRAM_DATA_CFG_3 0x00000d3c 2199#define NIC_SRAM_DATA_CFG_3 0x00000d3c
2200#define NIC_SRAM_ASPM_DEBOUNCE 0x00000002 2200#define NIC_SRAM_ASPM_DEBOUNCE 0x00000002
2201#define NIC_SRAM_LNK_FLAP_AVOID 0x00400000
2202#define NIC_SRAM_1G_ON_VAUX_OK 0x00800000
2201 2203
2202#define NIC_SRAM_DATA_CFG_4 0x00000d60 2204#define NIC_SRAM_DATA_CFG_4 0x00000d60
2203#define NIC_SRAM_GMII_MODE 0x00000002 2205#define NIC_SRAM_GMII_MODE 0x00000002
@@ -2222,6 +2224,12 @@
2222#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000 2224#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000
2223#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000 2225#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000
2224 2226
2227#define TG3_SRAM_RXCPU_SCRATCH_BASE_57766 0x00030000
2228#define TG3_SRAM_RXCPU_SCRATCH_SIZE_57766 0x00010000
2229#define TG3_57766_FW_BASE_ADDR 0x00030000
2230#define TG3_57766_FW_HANDSHAKE 0x0003fccc
2231#define TG3_SBROM_IN_SERVICE_LOOP 0x51
2232
2225#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128 2233#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128
2226#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64 2234#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64
2227#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32 2235#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32
@@ -2365,6 +2373,13 @@
2365#define MII_TG3_FET_SHDW_AUXSTAT2 0x1b 2373#define MII_TG3_FET_SHDW_AUXSTAT2 0x1b
2366#define MII_TG3_FET_SHDW_AUXSTAT2_APD 0x0020 2374#define MII_TG3_FET_SHDW_AUXSTAT2_APD 0x0020
2367 2375
2376/* Serdes PHY Register Definitions */
2377#define SERDES_TG3_1000X_STATUS 0x14
2378#define SERDES_TG3_SGMII_MODE 0x0001
2379#define SERDES_TG3_LINK_UP 0x0002
2380#define SERDES_TG3_FULL_DUPLEX 0x0004
2381#define SERDES_TG3_SPEED_100 0x0008
2382#define SERDES_TG3_SPEED_1000 0x0010
2368 2383
2369/* APE registers. Accessible through BAR1 */ 2384/* APE registers. Accessible through BAR1 */
2370#define TG3_APE_GPIO_MSG 0x0008 2385#define TG3_APE_GPIO_MSG 0x0008
@@ -3009,17 +3024,18 @@ enum TG3_FLAGS {
3009 TG3_FLAG_JUMBO_CAPABLE, 3024 TG3_FLAG_JUMBO_CAPABLE,
3010 TG3_FLAG_CHIP_RESETTING, 3025 TG3_FLAG_CHIP_RESETTING,
3011 TG3_FLAG_INIT_COMPLETE, 3026 TG3_FLAG_INIT_COMPLETE,
3012 TG3_FLAG_TSO_BUG,
3013 TG3_FLAG_MAX_RXPEND_64, 3027 TG3_FLAG_MAX_RXPEND_64,
3014 TG3_FLAG_TSO_CAPABLE,
3015 TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */ 3028 TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
3016 TG3_FLAG_ASF_NEW_HANDSHAKE, 3029 TG3_FLAG_ASF_NEW_HANDSHAKE,
3017 TG3_FLAG_HW_AUTONEG, 3030 TG3_FLAG_HW_AUTONEG,
3018 TG3_FLAG_IS_NIC, 3031 TG3_FLAG_IS_NIC,
3019 TG3_FLAG_FLASH, 3032 TG3_FLAG_FLASH,
3033 TG3_FLAG_FW_TSO,
3020 TG3_FLAG_HW_TSO_1, 3034 TG3_FLAG_HW_TSO_1,
3021 TG3_FLAG_HW_TSO_2, 3035 TG3_FLAG_HW_TSO_2,
3022 TG3_FLAG_HW_TSO_3, 3036 TG3_FLAG_HW_TSO_3,
3037 TG3_FLAG_TSO_CAPABLE,
3038 TG3_FLAG_TSO_BUG,
3023 TG3_FLAG_ICH_WORKAROUND, 3039 TG3_FLAG_ICH_WORKAROUND,
3024 TG3_FLAG_1SHOT_MSI, 3040 TG3_FLAG_1SHOT_MSI,
3025 TG3_FLAG_NO_FWARE_REPORTED, 3041 TG3_FLAG_NO_FWARE_REPORTED,
@@ -3064,6 +3080,13 @@ enum TG3_FLAGS {
3064 TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ 3080 TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
3065}; 3081};
3066 3082
3083struct tg3_firmware_hdr {
3084 __be32 version; /* unused for fragments */
3085 __be32 base_addr;
3086 __be32 len;
3087};
3088#define TG3_FW_HDR_LEN (sizeof(struct tg3_firmware_hdr))
3089
3067struct tg3 { 3090struct tg3 {
3068 /* begin "general, frequently-used members" cacheline section */ 3091 /* begin "general, frequently-used members" cacheline section */
3069 3092
@@ -3267,6 +3290,7 @@ struct tg3 {
3267#define TG3_PHYFLG_IS_LOW_POWER 0x00000001 3290#define TG3_PHYFLG_IS_LOW_POWER 0x00000001
3268#define TG3_PHYFLG_IS_CONNECTED 0x00000002 3291#define TG3_PHYFLG_IS_CONNECTED 0x00000002
3269#define TG3_PHYFLG_USE_MI_INTERRUPT 0x00000004 3292#define TG3_PHYFLG_USE_MI_INTERRUPT 0x00000004
3293#define TG3_PHYFLG_USER_CONFIGURED 0x00000008
3270#define TG3_PHYFLG_PHY_SERDES 0x00000010 3294#define TG3_PHYFLG_PHY_SERDES 0x00000010
3271#define TG3_PHYFLG_MII_SERDES 0x00000020 3295#define TG3_PHYFLG_MII_SERDES 0x00000020
3272#define TG3_PHYFLG_ANY_SERDES (TG3_PHYFLG_PHY_SERDES | \ 3296#define TG3_PHYFLG_ANY_SERDES (TG3_PHYFLG_PHY_SERDES | \
@@ -3284,6 +3308,8 @@ struct tg3 {
3284#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000 3308#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000
3285#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000 3309#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000
3286#define TG3_PHYFLG_EEE_CAP 0x00040000 3310#define TG3_PHYFLG_EEE_CAP 0x00040000
3311#define TG3_PHYFLG_1G_ON_VAUX_OK 0x00080000
3312#define TG3_PHYFLG_KEEP_LINK_ON_PWRDN 0x00100000
3287#define TG3_PHYFLG_MDIX_STATE 0x00200000 3313#define TG3_PHYFLG_MDIX_STATE 0x00200000
3288 3314
3289 u32 led_ctrl; 3315 u32 led_ctrl;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 3227fdde521b..f2b73ffa9122 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -76,7 +76,7 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
76static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); 76static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
77static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc); 77static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
78static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); 78static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
79static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, 79static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
80 u32 boot_param); 80 u32 boot_param);
81static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); 81static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
82static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, 82static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7cce42dc2f20..ce4a030d3d0c 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -610,7 +610,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
610 rcb->rxq->rx_bytes += length; 610 rcb->rxq->rx_bytes += length;
611 611
612 if (flags & BNA_CQ_EF_VLAN) 612 if (flags & BNA_CQ_EF_VLAN)
613 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag)); 613 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
614 614
615 if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) 615 if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
616 napi_gro_frags(&rx_ctrl->napi); 616 napi_gro_frags(&rx_ctrl->napi);
@@ -1264,9 +1264,8 @@ bnad_mem_alloc(struct bnad *bnad,
1264 mem_info->mdl[i].len = mem_info->len; 1264 mem_info->mdl[i].len = mem_info->len;
1265 mem_info->mdl[i].kva = 1265 mem_info->mdl[i].kva =
1266 dma_alloc_coherent(&bnad->pcidev->dev, 1266 dma_alloc_coherent(&bnad->pcidev->dev,
1267 mem_info->len, &dma_pa, 1267 mem_info->len, &dma_pa,
1268 GFP_KERNEL); 1268 GFP_KERNEL);
1269
1270 if (mem_info->mdl[i].kva == NULL) 1269 if (mem_info->mdl[i].kva == NULL)
1271 goto err_return; 1270 goto err_return;
1272 1271
@@ -3069,8 +3068,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
3069} 3068}
3070 3069
3071static int 3070static int
3072bnad_vlan_rx_add_vid(struct net_device *netdev, 3071bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3073 unsigned short vid)
3074{ 3072{
3075 struct bnad *bnad = netdev_priv(netdev); 3073 struct bnad *bnad = netdev_priv(netdev);
3076 unsigned long flags; 3074 unsigned long flags;
@@ -3091,8 +3089,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
3091} 3089}
3092 3090
3093static int 3091static int
3094bnad_vlan_rx_kill_vid(struct net_device *netdev, 3092bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3095 unsigned short vid)
3096{ 3093{
3097 struct bnad *bnad = netdev_priv(netdev); 3094 struct bnad *bnad = netdev_priv(netdev);
3098 unsigned long flags; 3095 unsigned long flags;
@@ -3171,14 +3168,14 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
3171 3168
3172 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | 3169 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3173 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 3170 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3174 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX; 3171 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
3175 3172
3176 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | 3173 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3177 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 3174 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3178 NETIF_F_TSO | NETIF_F_TSO6; 3175 NETIF_F_TSO | NETIF_F_TSO6;
3179 3176
3180 netdev->features |= netdev->hw_features | 3177 netdev->features |= netdev->hw_features |
3181 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; 3178 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3182 3179
3183 if (using_dac) 3180 if (using_dac)
3184 netdev->features |= NETIF_F_HIGHDMA; 3181 netdev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 3becdb2deb46..cc9a185f0abb 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -47,22 +47,19 @@ static int at91ether_start(struct net_device *dev)
47 int i; 47 int i;
48 48
49 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 49 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
50 MAX_RX_DESCR * sizeof(struct macb_dma_desc), 50 (MAX_RX_DESCR *
51 &lp->rx_ring_dma, GFP_KERNEL); 51 sizeof(struct macb_dma_desc)),
52 if (!lp->rx_ring) { 52 &lp->rx_ring_dma, GFP_KERNEL);
53 netdev_err(dev, "unable to alloc rx ring DMA buffer\n"); 53 if (!lp->rx_ring)
54 return -ENOMEM; 54 return -ENOMEM;
55 }
56 55
57 lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, 56 lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
58 MAX_RX_DESCR * MAX_RBUFF_SZ, 57 MAX_RX_DESCR * MAX_RBUFF_SZ,
59 &lp->rx_buffers_dma, GFP_KERNEL); 58 &lp->rx_buffers_dma, GFP_KERNEL);
60 if (!lp->rx_buffers) { 59 if (!lp->rx_buffers) {
61 netdev_err(dev, "unable to alloc rx data DMA buffer\n");
62
63 dma_free_coherent(&lp->pdev->dev, 60 dma_free_coherent(&lp->pdev->dev,
64 MAX_RX_DESCR * sizeof(struct macb_dma_desc), 61 MAX_RX_DESCR * sizeof(struct macb_dma_desc),
65 lp->rx_ring, lp->rx_ring_dma); 62 lp->rx_ring, lp->rx_ring_dma);
66 lp->rx_ring = NULL; 63 lp->rx_ring = NULL;
67 return -ENOMEM; 64 return -ENOMEM;
68 } 65 }
@@ -209,7 +206,6 @@ static void at91ether_rx(struct net_device *dev)
209 netif_rx(skb); 206 netif_rx(skb);
210 } else { 207 } else {
211 lp->stats.rx_dropped++; 208 lp->stats.rx_dropped++;
212 netdev_notice(dev, "Memory squeeze, dropping packet.\n");
213 } 209 }
214 210
215 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) 211 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
@@ -303,42 +299,7 @@ static const struct of_device_id at91ether_dt_ids[] = {
303 { .compatible = "cdns,emac" }, 299 { .compatible = "cdns,emac" },
304 { /* sentinel */ } 300 { /* sentinel */ }
305}; 301};
306
307MODULE_DEVICE_TABLE(of, at91ether_dt_ids); 302MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
308
309static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
310{
311 struct device_node *np = pdev->dev.of_node;
312
313 if (np)
314 return of_get_phy_mode(np);
315
316 return -ENODEV;
317}
318
319static int at91ether_get_hwaddr_dt(struct macb *bp)
320{
321 struct device_node *np = bp->pdev->dev.of_node;
322
323 if (np) {
324 const char *mac = of_get_mac_address(np);
325 if (mac) {
326 memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
327 return 0;
328 }
329 }
330
331 return -ENODEV;
332}
333#else
334static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
335{
336 return -ENODEV;
337}
338static int at91ether_get_hwaddr_dt(struct macb *bp)
339{
340 return -ENODEV;
341}
342#endif 303#endif
343 304
344/* Detect MAC & PHY and perform ethernet interface initialization */ 305/* Detect MAC & PHY and perform ethernet interface initialization */
@@ -352,6 +313,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
352 struct macb *lp; 313 struct macb *lp;
353 int res; 314 int res;
354 u32 reg; 315 u32 reg;
316 const char *mac;
355 317
356 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 318 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
357 if (!regs) 319 if (!regs)
@@ -403,11 +365,13 @@ static int __init at91ether_probe(struct platform_device *pdev)
403 platform_set_drvdata(pdev, dev); 365 platform_set_drvdata(pdev, dev);
404 SET_NETDEV_DEV(dev, &pdev->dev); 366 SET_NETDEV_DEV(dev, &pdev->dev);
405 367
406 res = at91ether_get_hwaddr_dt(lp); 368 mac = of_get_mac_address(pdev->dev.of_node);
407 if (res < 0) 369 if (mac)
370 memcpy(lp->dev->dev_addr, mac, ETH_ALEN);
371 else
408 macb_get_hwaddr(lp); 372 macb_get_hwaddr(lp);
409 373
410 res = at91ether_get_phy_mode_dt(pdev); 374 res = of_get_phy_mode(pdev->dev.of_node);
411 if (res < 0) { 375 if (res < 0) {
412 if (board_data && board_data->is_rmii) 376 if (board_data && board_data->is_rmii)
413 lp->phy_interface = PHY_INTERFACE_MODE_RMII; 377 lp->phy_interface = PHY_INTERFACE_MODE_RMII;
@@ -430,7 +394,8 @@ static int __init at91ether_probe(struct platform_device *pdev)
430 if (res) 394 if (res)
431 goto err_disable_clock; 395 goto err_disable_clock;
432 396
433 if (macb_mii_init(lp) != 0) 397 res = macb_mii_init(lp);
398 if (res)
434 goto err_out_unregister_netdev; 399 goto err_out_unregister_netdev;
435 400
436 /* will be enabled in open() */ 401 /* will be enabled in open() */
@@ -519,18 +484,7 @@ static struct platform_driver at91ether_driver = {
519 }, 484 },
520}; 485};
521 486
522static int __init at91ether_init(void) 487module_platform_driver_probe(at91ether_driver, at91ether_probe);
523{
524 return platform_driver_probe(&at91ether_driver, at91ether_probe);
525}
526
527static void __exit at91ether_exit(void)
528{
529 platform_driver_unregister(&at91ether_driver);
530}
531
532module_init(at91ether_init)
533module_exit(at91ether_exit)
534 488
535MODULE_LICENSE("GPL"); 489MODULE_LICENSE("GPL");
536MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver"); 490MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 79039439bfdc..6be513deb17f 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -485,6 +485,8 @@ static void macb_tx_interrupt(struct macb *bp)
485 status = macb_readl(bp, TSR); 485 status = macb_readl(bp, TSR);
486 macb_writel(bp, TSR, status); 486 macb_writel(bp, TSR, status);
487 487
488 macb_writel(bp, ISR, MACB_BIT(TCOMP));
489
488 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", 490 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
489 (unsigned long)status); 491 (unsigned long)status);
490 492
@@ -736,6 +738,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
736 * now. 738 * now.
737 */ 739 */
738 macb_writel(bp, IDR, MACB_RX_INT_FLAGS); 740 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
741 macb_writel(bp, ISR, MACB_BIT(RCOMP));
739 742
740 if (napi_schedule_prep(&bp->napi)) { 743 if (napi_schedule_prep(&bp->napi)) {
741 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); 744 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
@@ -1054,6 +1057,7 @@ static void macb_configure_dma(struct macb *bp)
1054 dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64); 1057 dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
1055 dmacfg |= GEM_BF(FBLDO, 16); 1058 dmacfg |= GEM_BF(FBLDO, 16);
1056 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 1059 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1060 dmacfg &= ~GEM_BIT(ENDIA);
1057 gem_writel(bp, DMACFG, dmacfg); 1061 gem_writel(bp, DMACFG, dmacfg);
1058 } 1062 }
1059} 1063}
@@ -1472,41 +1476,7 @@ static const struct of_device_id macb_dt_ids[] = {
1472 { .compatible = "cdns,gem" }, 1476 { .compatible = "cdns,gem" },
1473 { /* sentinel */ } 1477 { /* sentinel */ }
1474}; 1478};
1475
1476MODULE_DEVICE_TABLE(of, macb_dt_ids); 1479MODULE_DEVICE_TABLE(of, macb_dt_ids);
1477
1478static int macb_get_phy_mode_dt(struct platform_device *pdev)
1479{
1480 struct device_node *np = pdev->dev.of_node;
1481
1482 if (np)
1483 return of_get_phy_mode(np);
1484
1485 return -ENODEV;
1486}
1487
1488static int macb_get_hwaddr_dt(struct macb *bp)
1489{
1490 struct device_node *np = bp->pdev->dev.of_node;
1491 if (np) {
1492 const char *mac = of_get_mac_address(np);
1493 if (mac) {
1494 memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
1495 return 0;
1496 }
1497 }
1498
1499 return -ENODEV;
1500}
1501#else
1502static int macb_get_phy_mode_dt(struct platform_device *pdev)
1503{
1504 return -ENODEV;
1505}
1506static int macb_get_hwaddr_dt(struct macb *bp)
1507{
1508 return -ENODEV;
1509}
1510#endif 1480#endif
1511 1481
1512static int __init macb_probe(struct platform_device *pdev) 1482static int __init macb_probe(struct platform_device *pdev)
@@ -1519,6 +1489,7 @@ static int __init macb_probe(struct platform_device *pdev)
1519 u32 config; 1489 u32 config;
1520 int err = -ENXIO; 1490 int err = -ENXIO;
1521 struct pinctrl *pinctrl; 1491 struct pinctrl *pinctrl;
1492 const char *mac;
1522 1493
1523 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1494 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1524 if (!regs) { 1495 if (!regs) {
@@ -1557,14 +1528,14 @@ static int __init macb_probe(struct platform_device *pdev)
1557 dev_err(&pdev->dev, "failed to get macb_clk\n"); 1528 dev_err(&pdev->dev, "failed to get macb_clk\n");
1558 goto err_out_free_dev; 1529 goto err_out_free_dev;
1559 } 1530 }
1560 clk_enable(bp->pclk); 1531 clk_prepare_enable(bp->pclk);
1561 1532
1562 bp->hclk = clk_get(&pdev->dev, "hclk"); 1533 bp->hclk = clk_get(&pdev->dev, "hclk");
1563 if (IS_ERR(bp->hclk)) { 1534 if (IS_ERR(bp->hclk)) {
1564 dev_err(&pdev->dev, "failed to get hclk\n"); 1535 dev_err(&pdev->dev, "failed to get hclk\n");
1565 goto err_out_put_pclk; 1536 goto err_out_put_pclk;
1566 } 1537 }
1567 clk_enable(bp->hclk); 1538 clk_prepare_enable(bp->hclk);
1568 1539
1569 bp->regs = ioremap(regs->start, resource_size(regs)); 1540 bp->regs = ioremap(regs->start, resource_size(regs));
1570 if (!bp->regs) { 1541 if (!bp->regs) {
@@ -1592,11 +1563,13 @@ static int __init macb_probe(struct platform_device *pdev)
1592 config |= macb_dbw(bp); 1563 config |= macb_dbw(bp);
1593 macb_writel(bp, NCFGR, config); 1564 macb_writel(bp, NCFGR, config);
1594 1565
1595 err = macb_get_hwaddr_dt(bp); 1566 mac = of_get_mac_address(pdev->dev.of_node);
1596 if (err < 0) 1567 if (mac)
1568 memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
1569 else
1597 macb_get_hwaddr(bp); 1570 macb_get_hwaddr(bp);
1598 1571
1599 err = macb_get_phy_mode_dt(pdev); 1572 err = of_get_phy_mode(pdev->dev.of_node);
1600 if (err < 0) { 1573 if (err < 0) {
1601 pdata = pdev->dev.platform_data; 1574 pdata = pdev->dev.platform_data;
1602 if (pdata && pdata->is_rmii) 1575 if (pdata && pdata->is_rmii)
@@ -1629,9 +1602,9 @@ static int __init macb_probe(struct platform_device *pdev)
1629 goto err_out_free_irq; 1602 goto err_out_free_irq;
1630 } 1603 }
1631 1604
1632 if (macb_mii_init(bp) != 0) { 1605 err = macb_mii_init(bp);
1606 if (err)
1633 goto err_out_unregister_netdev; 1607 goto err_out_unregister_netdev;
1634 }
1635 1608
1636 platform_set_drvdata(pdev, dev); 1609 platform_set_drvdata(pdev, dev);
1637 1610
@@ -1654,9 +1627,9 @@ err_out_free_irq:
1654err_out_iounmap: 1627err_out_iounmap:
1655 iounmap(bp->regs); 1628 iounmap(bp->regs);
1656err_out_disable_clocks: 1629err_out_disable_clocks:
1657 clk_disable(bp->hclk); 1630 clk_disable_unprepare(bp->hclk);
1658 clk_put(bp->hclk); 1631 clk_put(bp->hclk);
1659 clk_disable(bp->pclk); 1632 clk_disable_unprepare(bp->pclk);
1660err_out_put_pclk: 1633err_out_put_pclk:
1661 clk_put(bp->pclk); 1634 clk_put(bp->pclk);
1662err_out_free_dev: 1635err_out_free_dev:
@@ -1683,9 +1656,9 @@ static int __exit macb_remove(struct platform_device *pdev)
1683 unregister_netdev(dev); 1656 unregister_netdev(dev);
1684 free_irq(dev->irq, dev); 1657 free_irq(dev->irq, dev);
1685 iounmap(bp->regs); 1658 iounmap(bp->regs);
1686 clk_disable(bp->hclk); 1659 clk_disable_unprepare(bp->hclk);
1687 clk_put(bp->hclk); 1660 clk_put(bp->hclk);
1688 clk_disable(bp->pclk); 1661 clk_disable_unprepare(bp->pclk);
1689 clk_put(bp->pclk); 1662 clk_put(bp->pclk);
1690 free_netdev(dev); 1663 free_netdev(dev);
1691 platform_set_drvdata(pdev, NULL); 1664 platform_set_drvdata(pdev, NULL);
@@ -1703,8 +1676,8 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state)
1703 netif_carrier_off(netdev); 1676 netif_carrier_off(netdev);
1704 netif_device_detach(netdev); 1677 netif_device_detach(netdev);
1705 1678
1706 clk_disable(bp->hclk); 1679 clk_disable_unprepare(bp->hclk);
1707 clk_disable(bp->pclk); 1680 clk_disable_unprepare(bp->pclk);
1708 1681
1709 return 0; 1682 return 0;
1710} 1683}
@@ -1714,8 +1687,8 @@ static int macb_resume(struct platform_device *pdev)
1714 struct net_device *netdev = platform_get_drvdata(pdev); 1687 struct net_device *netdev = platform_get_drvdata(pdev);
1715 struct macb *bp = netdev_priv(netdev); 1688 struct macb *bp = netdev_priv(netdev);
1716 1689
1717 clk_enable(bp->pclk); 1690 clk_prepare_enable(bp->pclk);
1718 clk_enable(bp->hclk); 1691 clk_prepare_enable(bp->hclk);
1719 1692
1720 netif_device_attach(netdev); 1693 netif_device_attach(netdev);
1721 1694
@@ -1737,18 +1710,7 @@ static struct platform_driver macb_driver = {
1737 }, 1710 },
1738}; 1711};
1739 1712
1740static int __init macb_init(void) 1713module_platform_driver_probe(macb_driver, macb_probe);
1741{
1742 return platform_driver_probe(&macb_driver, macb_probe);
1743}
1744
1745static void __exit macb_exit(void)
1746{
1747 platform_driver_unregister(&macb_driver);
1748}
1749
1750module_init(macb_init);
1751module_exit(macb_exit);
1752 1714
1753MODULE_LICENSE("GPL"); 1715MODULE_LICENSE("GPL");
1754MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); 1716MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 570908b93578..993d70380688 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -173,6 +173,8 @@
173/* Bitfields in DMACFG. */ 173/* Bitfields in DMACFG. */
174#define GEM_FBLDO_OFFSET 0 174#define GEM_FBLDO_OFFSET 0
175#define GEM_FBLDO_SIZE 5 175#define GEM_FBLDO_SIZE 5
176#define GEM_ENDIA_OFFSET 7
177#define GEM_ENDIA_SIZE 1
176#define GEM_RXBMS_OFFSET 8 178#define GEM_RXBMS_OFFSET 8
177#define GEM_RXBMS_SIZE 2 179#define GEM_RXBMS_SIZE 2
178#define GEM_TXPBMS_OFFSET 10 180#define GEM_TXPBMS_OFFSET 10
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index b0ebc9f6d55e..4a1f2fa812ab 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1482,7 +1482,7 @@ static int xgmac_set_features(struct net_device *dev, netdev_features_t features
1482 u32 ctrl; 1482 u32 ctrl;
1483 struct xgmac_priv *priv = netdev_priv(dev); 1483 struct xgmac_priv *priv = netdev_priv(dev);
1484 void __iomem *ioaddr = priv->base; 1484 void __iomem *ioaddr = priv->base;
1485 u32 changed = dev->features ^ features; 1485 netdev_features_t changed = dev->features ^ features;
1486 1486
1487 if (!(changed & NETIF_F_RXCSUM)) 1487 if (!(changed & NETIF_F_RXCSUM))
1488 return 0; 1488 return 0;
@@ -1886,12 +1886,9 @@ static int xgmac_resume(struct device *dev)
1886 1886
1887 return 0; 1887 return 0;
1888} 1888}
1889#endif /* CONFIG_PM_SLEEP */
1889 1890
1890static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume); 1891static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
1891#define XGMAC_PM_OPS (&xgmac_pm_ops)
1892#else
1893#define XGMAC_PM_OPS NULL
1894#endif /* CONFIG_PM_SLEEP */
1895 1892
1896static const struct of_device_id xgmac_of_match[] = { 1893static const struct of_device_id xgmac_of_match[] = {
1897 { .compatible = "calxeda,hb-xgmac", }, 1894 { .compatible = "calxeda,hb-xgmac", },
@@ -1906,7 +1903,7 @@ static struct platform_driver xgmac_driver = {
1906 }, 1903 },
1907 .probe = xgmac_probe, 1904 .probe = xgmac_probe,
1908 .remove = xgmac_remove, 1905 .remove = xgmac_remove,
1909 .driver.pm = XGMAC_PM_OPS, 1906 .driver.pm = &xgmac_pm_ops,
1910}; 1907};
1911 1908
1912module_platform_driver(xgmac_driver); 1909module_platform_driver(xgmac_driver);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 20d2085f61c5..9624cfe7df57 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -856,10 +856,10 @@ static netdev_features_t t1_fix_features(struct net_device *dev,
856 * Since there is no support for separate rx/tx vlan accel 856 * Since there is no support for separate rx/tx vlan accel
857 * enable/disable make sure tx flag is always in same state as rx. 857 * enable/disable make sure tx flag is always in same state as rx.
858 */ 858 */
859 if (features & NETIF_F_HW_VLAN_RX) 859 if (features & NETIF_F_HW_VLAN_CTAG_RX)
860 features |= NETIF_F_HW_VLAN_TX; 860 features |= NETIF_F_HW_VLAN_CTAG_TX;
861 else 861 else
862 features &= ~NETIF_F_HW_VLAN_TX; 862 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
863 863
864 return features; 864 return features;
865} 865}
@@ -869,7 +869,7 @@ static int t1_set_features(struct net_device *dev, netdev_features_t features)
869 netdev_features_t changed = dev->features ^ features; 869 netdev_features_t changed = dev->features ^ features;
870 struct adapter *adapter = dev->ml_priv; 870 struct adapter *adapter = dev->ml_priv;
871 871
872 if (changed & NETIF_F_HW_VLAN_RX) 872 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
873 t1_vlan_mode(adapter, features); 873 t1_vlan_mode(adapter, features);
874 874
875 return 0; 875 return 0;
@@ -1085,8 +1085,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1085 netdev->features |= NETIF_F_HIGHDMA; 1085 netdev->features |= NETIF_F_HIGHDMA;
1086 if (vlan_tso_capable(adapter)) { 1086 if (vlan_tso_capable(adapter)) {
1087 netdev->features |= 1087 netdev->features |=
1088 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1088 NETIF_F_HW_VLAN_CTAG_TX |
1089 netdev->hw_features |= NETIF_F_HW_VLAN_RX; 1089 NETIF_F_HW_VLAN_CTAG_RX;
1090 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1090 1091
1091 /* T204: disable TSO */ 1092 /* T204: disable TSO */
1092 if (!(is_T2(adapter)) || bi->port_number != 4) { 1093 if (!(is_T2(adapter)) || bi->port_number != 4) {
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 482976925154..8061fb0ef7ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -734,7 +734,7 @@ void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
734{ 734{
735 struct sge *sge = adapter->sge; 735 struct sge *sge = adapter->sge;
736 736
737 if (features & NETIF_F_HW_VLAN_RX) 737 if (features & NETIF_F_HW_VLAN_CTAG_RX)
738 sge->sge_control |= F_VLAN_XTRACT; 738 sge->sge_control |= F_VLAN_XTRACT;
739 else 739 else
740 sge->sge_control &= ~F_VLAN_XTRACT; 740 sge->sge_control &= ~F_VLAN_XTRACT;
@@ -835,7 +835,7 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
835 struct sk_buff *skb; 835 struct sk_buff *skb;
836 dma_addr_t mapping; 836 dma_addr_t mapping;
837 837
838 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC); 838 skb = dev_alloc_skb(q->rx_buffer_size);
839 if (!skb) 839 if (!skb)
840 break; 840 break;
841 841
@@ -1046,11 +1046,10 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
1046 const struct freelQ_ce *ce = &fl->centries[fl->cidx]; 1046 const struct freelQ_ce *ce = &fl->centries[fl->cidx];
1047 1047
1048 if (len < copybreak) { 1048 if (len < copybreak) {
1049 skb = alloc_skb(len + 2, GFP_ATOMIC); 1049 skb = netdev_alloc_skb_ip_align(NULL, len);
1050 if (!skb) 1050 if (!skb)
1051 goto use_orig_buf; 1051 goto use_orig_buf;
1052 1052
1053 skb_reserve(skb, 2); /* align IP header */
1054 skb_put(skb, len); 1053 skb_put(skb, len);
1055 pci_dma_sync_single_for_cpu(pdev, 1054 pci_dma_sync_single_for_cpu(pdev,
1056 dma_unmap_addr(ce, dma_addr), 1055 dma_unmap_addr(ce, dma_addr),
@@ -1387,7 +1386,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1387 1386
1388 if (p->vlan_valid) { 1387 if (p->vlan_valid) {
1389 st->vlan_xtract++; 1388 st->vlan_xtract++;
1390 __vlan_hwaccel_put_tag(skb, ntohs(p->vlan)); 1389 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
1391 } 1390 }
1392 netif_receive_skb(skb); 1391 netif_receive_skb(skb);
1393} 1392}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 2b5e62193cea..71497e835f42 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1181,14 +1181,15 @@ static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1181 1181
1182 if (adapter->params.rev > 0) { 1182 if (adapter->params.rev > 0) {
1183 t3_set_vlan_accel(adapter, 1 << pi->port_id, 1183 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1184 features & NETIF_F_HW_VLAN_RX); 1184 features & NETIF_F_HW_VLAN_CTAG_RX);
1185 } else { 1185 } else {
1186 /* single control for all ports */ 1186 /* single control for all ports */
1187 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX; 1187 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1188 1188
1189 for_each_port(adapter, i) 1189 for_each_port(adapter, i)
1190 have_vlans |= 1190 have_vlans |=
1191 adapter->port[i]->features & NETIF_F_HW_VLAN_RX; 1191 adapter->port[i]->features &
1192 NETIF_F_HW_VLAN_CTAG_RX;
1192 1193
1193 t3_set_vlan_accel(adapter, 1, have_vlans); 1194 t3_set_vlan_accel(adapter, 1, have_vlans);
1194 } 1195 }
@@ -2563,10 +2564,10 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev,
2563 * Since there is no support for separate rx/tx vlan accel 2564 * Since there is no support for separate rx/tx vlan accel
2564 * enable/disable make sure tx flag is always in same state as rx. 2565 * enable/disable make sure tx flag is always in same state as rx.
2565 */ 2566 */
2566 if (features & NETIF_F_HW_VLAN_RX) 2567 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2567 features |= NETIF_F_HW_VLAN_TX; 2568 features |= NETIF_F_HW_VLAN_CTAG_TX;
2568 else 2569 else
2569 features &= ~NETIF_F_HW_VLAN_TX; 2570 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2570 2571
2571 return features; 2572 return features;
2572} 2573}
@@ -2575,7 +2576,7 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2575{ 2576{
2576 netdev_features_t changed = dev->features ^ features; 2577 netdev_features_t changed = dev->features ^ features;
2577 2578
2578 if (changed & NETIF_F_HW_VLAN_RX) 2579 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2579 cxgb_vlan_mode(dev, features); 2580 cxgb_vlan_mode(dev, features);
2580 2581
2581 return 0; 2582 return 0;
@@ -3288,8 +3289,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3288 netdev->mem_start = mmio_start; 3289 netdev->mem_start = mmio_start;
3289 netdev->mem_end = mmio_start + mmio_len - 1; 3290 netdev->mem_end = mmio_start + mmio_len - 1;
3290 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | 3291 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3291 NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX; 3292 NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3292 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX; 3293 netdev->features |= netdev->hw_features |
3294 NETIF_F_HW_VLAN_CTAG_TX;
3293 netdev->vlan_features |= netdev->features & VLAN_FEAT; 3295 netdev->vlan_features |= netdev->features & VLAN_FEAT;
3294 if (pci_using_dac) 3296 if (pci_using_dac)
3295 netdev->features |= NETIF_F_HIGHDMA; 3297 netdev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 4232767862b5..0c96e5fe99cc 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -185,7 +185,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
185 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { 185 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
186 rcu_read_lock(); 186 rcu_read_lock();
187 if (vlan && vlan != VLAN_VID_MASK) { 187 if (vlan && vlan != VLAN_VID_MASK) {
188 dev = __vlan_find_dev_deep(dev, vlan); 188 dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan);
189 } else if (netif_is_bond_slave(dev)) { 189 } else if (netif_is_bond_slave(dev)) {
190 struct net_device *upper_dev; 190 struct net_device *upper_dev;
191 191
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 9d67eb794c4b..f12e6b85a653 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2030,7 +2030,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2030 2030
2031 if (p->vlan_valid) { 2031 if (p->vlan_valid) {
2032 qs->port_stats[SGE_PSTAT_VLANEX]++; 2032 qs->port_stats[SGE_PSTAT_VLANEX]++;
2033 __vlan_hwaccel_put_tag(skb, ntohs(p->vlan)); 2033 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
2034 } 2034 }
2035 if (rq->polling) { 2035 if (rq->polling) {
2036 if (lro) 2036 if (lro)
@@ -2132,7 +2132,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2132 2132
2133 if (cpl->vlan_valid) { 2133 if (cpl->vlan_valid) {
2134 qs->port_stats[SGE_PSTAT_VLANEX]++; 2134 qs->port_stats[SGE_PSTAT_VLANEX]++;
2135 __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan)); 2135 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
2136 } 2136 }
2137 napi_gro_frags(&qs->napi); 2137 napi_gro_frags(&qs->napi);
2138} 2138}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6db997c78a5f..681804b30a3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -54,6 +54,10 @@
54#define FW_VERSION_MINOR 1 54#define FW_VERSION_MINOR 1
55#define FW_VERSION_MICRO 0 55#define FW_VERSION_MICRO 0
56 56
57#define FW_VERSION_MAJOR_T5 0
58#define FW_VERSION_MINOR_T5 0
59#define FW_VERSION_MICRO_T5 0
60
57#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) 61#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
58 62
59enum { 63enum {
@@ -66,7 +70,9 @@ enum {
66enum { 70enum {
67 MEM_EDC0, 71 MEM_EDC0,
68 MEM_EDC1, 72 MEM_EDC1,
69 MEM_MC 73 MEM_MC,
74 MEM_MC0 = MEM_MC,
75 MEM_MC1
70}; 76};
71 77
72enum { 78enum {
@@ -74,8 +80,10 @@ enum {
74 MEMWIN0_BASE = 0x1b800, 80 MEMWIN0_BASE = 0x1b800,
75 MEMWIN1_APERTURE = 32768, 81 MEMWIN1_APERTURE = 32768,
76 MEMWIN1_BASE = 0x28000, 82 MEMWIN1_BASE = 0x28000,
83 MEMWIN1_BASE_T5 = 0x52000,
77 MEMWIN2_APERTURE = 65536, 84 MEMWIN2_APERTURE = 65536,
78 MEMWIN2_BASE = 0x30000, 85 MEMWIN2_BASE = 0x30000,
86 MEMWIN2_BASE_T5 = 0x54000,
79}; 87};
80 88
81enum dev_master { 89enum dev_master {
@@ -431,6 +439,7 @@ struct sge_txq {
431 spinlock_t db_lock; 439 spinlock_t db_lock;
432 int db_disabled; 440 int db_disabled;
433 unsigned short db_pidx; 441 unsigned short db_pidx;
442 u64 udb;
434}; 443};
435 444
436struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ 445struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
@@ -504,13 +513,44 @@ struct sge {
504 513
505struct l2t_data; 514struct l2t_data;
506 515
516#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
517#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
518#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
519
520#define CHELSIO_T4 0x4
521#define CHELSIO_T5 0x5
522
523enum chip_type {
524 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
525 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
526 T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
527 T4_FIRST_REV = T4_A1,
528 T4_LAST_REV = T4_A3,
529
530 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
531 T5_FIRST_REV = T5_A1,
532 T5_LAST_REV = T5_A1,
533};
534
535#ifdef CONFIG_PCI_IOV
536
537/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial
538 * Configuration initialization for T5 only has SR-IOV functionality enabled
539 * on PF0-3 in order to simplify everything.
540 */
541#define NUM_OF_PF_WITH_SRIOV 4
542
543#endif
544
507struct adapter { 545struct adapter {
508 void __iomem *regs; 546 void __iomem *regs;
547 void __iomem *bar2;
509 struct pci_dev *pdev; 548 struct pci_dev *pdev;
510 struct device *pdev_dev; 549 struct device *pdev_dev;
511 unsigned int mbox; 550 unsigned int mbox;
512 unsigned int fn; 551 unsigned int fn;
513 unsigned int flags; 552 unsigned int flags;
553 enum chip_type chip;
514 554
515 int msg_enable; 555 int msg_enable;
516 556
@@ -673,6 +713,16 @@ enum {
673 VLAN_REWRITE 713 VLAN_REWRITE
674}; 714};
675 715
716static inline int is_t5(enum chip_type chip)
717{
718 return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV);
719}
720
721static inline int is_t4(enum chip_type chip)
722{
723 return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
724}
725
676static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) 726static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
677{ 727{
678 return readl(adap->regs + reg_addr); 728 return readl(adap->regs + reg_addr);
@@ -858,7 +908,8 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
858 int start, int n, const u16 *rspq, unsigned int nrspq); 908 int start, int n, const u16 *rspq, unsigned int nrspq);
859int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 909int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
860 unsigned int flags); 910 unsigned int flags);
861int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity); 911int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
912 u64 *parity);
862int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, 913int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
863 u64 *parity); 914 u64 *parity);
864 915
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e707e31abd81..c59ec3ddaa66 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -68,8 +68,8 @@
68#include "t4fw_api.h" 68#include "t4fw_api.h"
69#include "l2t.h" 69#include "l2t.h"
70 70
71#define DRV_VERSION "1.3.0-ko" 71#define DRV_VERSION "2.0.0-ko"
72#define DRV_DESC "Chelsio T4 Network Driver" 72#define DRV_DESC "Chelsio T4/T5 Network Driver"
73 73
74/* 74/*
75 * Max interrupt hold-off timer value in us. Queues fall back to this value 75 * Max interrupt hold-off timer value in us. Queues fall back to this value
@@ -229,11 +229,51 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
229 CH_DEVICE(0x440a, 4), 229 CH_DEVICE(0x440a, 4),
230 CH_DEVICE(0x440d, 4), 230 CH_DEVICE(0x440d, 4),
231 CH_DEVICE(0x440e, 4), 231 CH_DEVICE(0x440e, 4),
232 CH_DEVICE(0x5001, 4),
233 CH_DEVICE(0x5002, 4),
234 CH_DEVICE(0x5003, 4),
235 CH_DEVICE(0x5004, 4),
236 CH_DEVICE(0x5005, 4),
237 CH_DEVICE(0x5006, 4),
238 CH_DEVICE(0x5007, 4),
239 CH_DEVICE(0x5008, 4),
240 CH_DEVICE(0x5009, 4),
241 CH_DEVICE(0x500A, 4),
242 CH_DEVICE(0x500B, 4),
243 CH_DEVICE(0x500C, 4),
244 CH_DEVICE(0x500D, 4),
245 CH_DEVICE(0x500E, 4),
246 CH_DEVICE(0x500F, 4),
247 CH_DEVICE(0x5010, 4),
248 CH_DEVICE(0x5011, 4),
249 CH_DEVICE(0x5012, 4),
250 CH_DEVICE(0x5013, 4),
251 CH_DEVICE(0x5401, 4),
252 CH_DEVICE(0x5402, 4),
253 CH_DEVICE(0x5403, 4),
254 CH_DEVICE(0x5404, 4),
255 CH_DEVICE(0x5405, 4),
256 CH_DEVICE(0x5406, 4),
257 CH_DEVICE(0x5407, 4),
258 CH_DEVICE(0x5408, 4),
259 CH_DEVICE(0x5409, 4),
260 CH_DEVICE(0x540A, 4),
261 CH_DEVICE(0x540B, 4),
262 CH_DEVICE(0x540C, 4),
263 CH_DEVICE(0x540D, 4),
264 CH_DEVICE(0x540E, 4),
265 CH_DEVICE(0x540F, 4),
266 CH_DEVICE(0x5410, 4),
267 CH_DEVICE(0x5411, 4),
268 CH_DEVICE(0x5412, 4),
269 CH_DEVICE(0x5413, 4),
232 { 0, } 270 { 0, }
233}; 271};
234 272
235#define FW_FNAME "cxgb4/t4fw.bin" 273#define FW_FNAME "cxgb4/t4fw.bin"
274#define FW5_FNAME "cxgb4/t5fw.bin"
236#define FW_CFNAME "cxgb4/t4-config.txt" 275#define FW_CFNAME "cxgb4/t4-config.txt"
276#define FW5_CFNAME "cxgb4/t5-config.txt"
237 277
238MODULE_DESCRIPTION(DRV_DESC); 278MODULE_DESCRIPTION(DRV_DESC);
239MODULE_AUTHOR("Chelsio Communications"); 279MODULE_AUTHOR("Chelsio Communications");
@@ -241,6 +281,7 @@ MODULE_LICENSE("Dual BSD/GPL");
241MODULE_VERSION(DRV_VERSION); 281MODULE_VERSION(DRV_VERSION);
242MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); 282MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
243MODULE_FIRMWARE(FW_FNAME); 283MODULE_FIRMWARE(FW_FNAME);
284MODULE_FIRMWARE(FW5_FNAME);
244 285
245/* 286/*
246 * Normally we're willing to become the firmware's Master PF but will be happy 287 * Normally we're willing to become the firmware's Master PF but will be happy
@@ -319,7 +360,10 @@ static bool vf_acls;
319module_param(vf_acls, bool, 0644); 360module_param(vf_acls, bool, 0644);
320MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement"); 361MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
321 362
322static unsigned int num_vf[4]; 363/* Configure the number of PCI-E Virtual Function which are to be instantiated
364 * on SR-IOV Capable Physical Functions.
365 */
366static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
323 367
324module_param_array(num_vf, uint, NULL, 0644); 368module_param_array(num_vf, uint, NULL, 0644);
325MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); 369MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
@@ -515,7 +559,7 @@ static int link_start(struct net_device *dev)
515 * that step explicitly. 559 * that step explicitly.
516 */ 560 */
517 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, 561 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
518 !!(dev->features & NETIF_F_HW_VLAN_RX), true); 562 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
519 if (ret == 0) { 563 if (ret == 0) {
520 ret = t4_change_mac(pi->adapter, mb, pi->viid, 564 ret = t4_change_mac(pi->adapter, mb, pi->viid,
521 pi->xact_addr_filt, dev->dev_addr, true, 565 pi->xact_addr_filt, dev->dev_addr, true,
@@ -601,6 +645,21 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
601 u8 opcode = ((const struct rss_header *)rsp)->opcode; 645 u8 opcode = ((const struct rss_header *)rsp)->opcode;
602 646
603 rsp++; /* skip RSS header */ 647 rsp++; /* skip RSS header */
648
649 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
650 */
651 if (unlikely(opcode == CPL_FW4_MSG &&
652 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
653 rsp++;
654 opcode = ((const struct rss_header *)rsp)->opcode;
655 rsp++;
656 if (opcode != CPL_SGE_EGR_UPDATE) {
657 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
658 , opcode);
659 goto out;
660 }
661 }
662
604 if (likely(opcode == CPL_SGE_EGR_UPDATE)) { 663 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
605 const struct cpl_sge_egr_update *p = (void *)rsp; 664 const struct cpl_sge_egr_update *p = (void *)rsp;
606 unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); 665 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
@@ -635,6 +694,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
635 } else 694 } else
636 dev_err(q->adap->pdev_dev, 695 dev_err(q->adap->pdev_dev,
637 "unexpected CPL %#x on FW event queue\n", opcode); 696 "unexpected CPL %#x on FW event queue\n", opcode);
697out:
638 return 0; 698 return 0;
639} 699}
640 700
@@ -652,6 +712,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
652{ 712{
653 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); 713 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
654 714
715 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
716 */
717 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
718 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
719 rsp += 2;
720
655 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { 721 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
656 rxq->stats.nomem++; 722 rxq->stats.nomem++;
657 return -1; 723 return -1;
@@ -1002,21 +1068,36 @@ freeout: t4_free_sge_resources(adap);
1002static int upgrade_fw(struct adapter *adap) 1068static int upgrade_fw(struct adapter *adap)
1003{ 1069{
1004 int ret; 1070 int ret;
1005 u32 vers; 1071 u32 vers, exp_major;
1006 const struct fw_hdr *hdr; 1072 const struct fw_hdr *hdr;
1007 const struct firmware *fw; 1073 const struct firmware *fw;
1008 struct device *dev = adap->pdev_dev; 1074 struct device *dev = adap->pdev_dev;
1075 char *fw_file_name;
1009 1076
1010 ret = request_firmware(&fw, FW_FNAME, dev); 1077 switch (CHELSIO_CHIP_VERSION(adap->chip)) {
1078 case CHELSIO_T4:
1079 fw_file_name = FW_FNAME;
1080 exp_major = FW_VERSION_MAJOR;
1081 break;
1082 case CHELSIO_T5:
1083 fw_file_name = FW5_FNAME;
1084 exp_major = FW_VERSION_MAJOR_T5;
1085 break;
1086 default:
1087 dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
1088 return -EINVAL;
1089 }
1090
1091 ret = request_firmware(&fw, fw_file_name, dev);
1011 if (ret < 0) { 1092 if (ret < 0) {
1012 dev_err(dev, "unable to load firmware image " FW_FNAME 1093 dev_err(dev, "unable to load firmware image %s, error %d\n",
1013 ", error %d\n", ret); 1094 fw_file_name, ret);
1014 return ret; 1095 return ret;
1015 } 1096 }
1016 1097
1017 hdr = (const struct fw_hdr *)fw->data; 1098 hdr = (const struct fw_hdr *)fw->data;
1018 vers = ntohl(hdr->fw_ver); 1099 vers = ntohl(hdr->fw_ver);
1019 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) { 1100 if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
1020 ret = -EINVAL; /* wrong major version, won't do */ 1101 ret = -EINVAL; /* wrong major version, won't do */
1021 goto out; 1102 goto out;
1022 } 1103 }
@@ -1024,18 +1105,15 @@ static int upgrade_fw(struct adapter *adap)
1024 /* 1105 /*
1025 * If the flash FW is unusable or we found something newer, load it. 1106 * If the flash FW is unusable or we found something newer, load it.
1026 */ 1107 */
1027 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR || 1108 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
1028 vers > adap->params.fw_vers) { 1109 vers > adap->params.fw_vers) {
1029 dev_info(dev, "upgrading firmware ...\n"); 1110 dev_info(dev, "upgrading firmware ...\n");
1030 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size, 1111 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1031 /*force=*/false); 1112 /*force=*/false);
1032 if (!ret) 1113 if (!ret)
1033 dev_info(dev, "firmware successfully upgraded to " 1114 dev_info(dev,
1034 FW_FNAME " (%d.%d.%d.%d)\n", 1115 "firmware upgraded to version %pI4 from %s\n",
1035 FW_HDR_FW_VER_MAJOR_GET(vers), 1116 &hdr->fw_ver, fw_file_name);
1036 FW_HDR_FW_VER_MINOR_GET(vers),
1037 FW_HDR_FW_VER_MICRO_GET(vers),
1038 FW_HDR_FW_VER_BUILD_GET(vers));
1039 else 1117 else
1040 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret); 1118 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1041 } else { 1119 } else {
@@ -1308,6 +1386,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
1308 "VLANinsertions ", 1386 "VLANinsertions ",
1309 "GROpackets ", 1387 "GROpackets ",
1310 "GROmerged ", 1388 "GROmerged ",
1389 "WriteCoalSuccess ",
1390 "WriteCoalFail ",
1311}; 1391};
1312 1392
1313static int get_sset_count(struct net_device *dev, int sset) 1393static int get_sset_count(struct net_device *dev, int sset)
@@ -1321,10 +1401,15 @@ static int get_sset_count(struct net_device *dev, int sset)
1321} 1401}
1322 1402
1323#define T4_REGMAP_SIZE (160 * 1024) 1403#define T4_REGMAP_SIZE (160 * 1024)
1404#define T5_REGMAP_SIZE (332 * 1024)
1324 1405
1325static int get_regs_len(struct net_device *dev) 1406static int get_regs_len(struct net_device *dev)
1326{ 1407{
1327 return T4_REGMAP_SIZE; 1408 struct adapter *adap = netdev2adap(dev);
1409 if (is_t4(adap->chip))
1410 return T4_REGMAP_SIZE;
1411 else
1412 return T5_REGMAP_SIZE;
1328} 1413}
1329 1414
1330static int get_eeprom_len(struct net_device *dev) 1415static int get_eeprom_len(struct net_device *dev)
@@ -1398,11 +1483,25 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1398{ 1483{
1399 struct port_info *pi = netdev_priv(dev); 1484 struct port_info *pi = netdev_priv(dev);
1400 struct adapter *adapter = pi->adapter; 1485 struct adapter *adapter = pi->adapter;
1486 u32 val1, val2;
1401 1487
1402 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); 1488 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1403 1489
1404 data += sizeof(struct port_stats) / sizeof(u64); 1490 data += sizeof(struct port_stats) / sizeof(u64);
1405 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); 1491 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1492 data += sizeof(struct queue_port_stats) / sizeof(u64);
1493 if (!is_t4(adapter->chip)) {
1494 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1495 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1496 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1497 *data = val1 - val2;
1498 data++;
1499 *data = val2;
1500 data++;
1501 } else {
1502 memset(data, 0, 2 * sizeof(u64));
1503 *data += 2;
1504 }
1406} 1505}
1407 1506
1408/* 1507/*
@@ -1413,7 +1512,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1413 */ 1512 */
1414static inline unsigned int mk_adap_vers(const struct adapter *ap) 1513static inline unsigned int mk_adap_vers(const struct adapter *ap)
1415{ 1514{
1416 return 4 | (ap->params.rev << 10) | (1 << 16); 1515 return CHELSIO_CHIP_VERSION(ap->chip) |
1516 (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
1417} 1517}
1418 1518
1419static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, 1519static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
@@ -1428,7 +1528,7 @@ static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1428static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 1528static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1429 void *buf) 1529 void *buf)
1430{ 1530{
1431 static const unsigned int reg_ranges[] = { 1531 static const unsigned int t4_reg_ranges[] = {
1432 0x1008, 0x1108, 1532 0x1008, 0x1108,
1433 0x1180, 0x11b4, 1533 0x1180, 0x11b4,
1434 0x11fc, 0x123c, 1534 0x11fc, 0x123c,
@@ -1648,13 +1748,452 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1648 0x27e00, 0x27e04 1748 0x27e00, 0x27e04
1649 }; 1749 };
1650 1750
1751 static const unsigned int t5_reg_ranges[] = {
1752 0x1008, 0x1148,
1753 0x1180, 0x11b4,
1754 0x11fc, 0x123c,
1755 0x1280, 0x173c,
1756 0x1800, 0x18fc,
1757 0x3000, 0x3028,
1758 0x3060, 0x30d8,
1759 0x30e0, 0x30fc,
1760 0x3140, 0x357c,
1761 0x35a8, 0x35cc,
1762 0x35ec, 0x35ec,
1763 0x3600, 0x5624,
1764 0x56cc, 0x575c,
1765 0x580c, 0x5814,
1766 0x5890, 0x58bc,
1767 0x5940, 0x59dc,
1768 0x59fc, 0x5a18,
1769 0x5a60, 0x5a9c,
1770 0x5b9c, 0x5bfc,
1771 0x6000, 0x6040,
1772 0x6058, 0x614c,
1773 0x7700, 0x7798,
1774 0x77c0, 0x78fc,
1775 0x7b00, 0x7c54,
1776 0x7d00, 0x7efc,
1777 0x8dc0, 0x8de0,
1778 0x8df8, 0x8e84,
1779 0x8ea0, 0x8f84,
1780 0x8fc0, 0x90f8,
1781 0x9400, 0x9470,
1782 0x9600, 0x96f4,
1783 0x9800, 0x9808,
1784 0x9820, 0x983c,
1785 0x9850, 0x9864,
1786 0x9c00, 0x9c6c,
1787 0x9c80, 0x9cec,
1788 0x9d00, 0x9d6c,
1789 0x9d80, 0x9dec,
1790 0x9e00, 0x9e6c,
1791 0x9e80, 0x9eec,
1792 0x9f00, 0x9f6c,
1793 0x9f80, 0xa020,
1794 0xd004, 0xd03c,
1795 0xdfc0, 0xdfe0,
1796 0xe000, 0x11088,
1797 0x1109c, 0x1117c,
1798 0x11190, 0x11204,
1799 0x19040, 0x1906c,
1800 0x19078, 0x19080,
1801 0x1908c, 0x19124,
1802 0x19150, 0x191b0,
1803 0x191d0, 0x191e8,
1804 0x19238, 0x19290,
1805 0x193f8, 0x19474,
1806 0x19490, 0x194cc,
1807 0x194f0, 0x194f8,
1808 0x19c00, 0x19c60,
1809 0x19c94, 0x19e10,
1810 0x19e50, 0x19f34,
1811 0x19f40, 0x19f50,
1812 0x19f90, 0x19fe4,
1813 0x1a000, 0x1a06c,
1814 0x1a0b0, 0x1a120,
1815 0x1a128, 0x1a138,
1816 0x1a190, 0x1a1c4,
1817 0x1a1fc, 0x1a1fc,
1818 0x1e008, 0x1e00c,
1819 0x1e040, 0x1e04c,
1820 0x1e284, 0x1e290,
1821 0x1e2c0, 0x1e2c0,
1822 0x1e2e0, 0x1e2e0,
1823 0x1e300, 0x1e384,
1824 0x1e3c0, 0x1e3c8,
1825 0x1e408, 0x1e40c,
1826 0x1e440, 0x1e44c,
1827 0x1e684, 0x1e690,
1828 0x1e6c0, 0x1e6c0,
1829 0x1e6e0, 0x1e6e0,
1830 0x1e700, 0x1e784,
1831 0x1e7c0, 0x1e7c8,
1832 0x1e808, 0x1e80c,
1833 0x1e840, 0x1e84c,
1834 0x1ea84, 0x1ea90,
1835 0x1eac0, 0x1eac0,
1836 0x1eae0, 0x1eae0,
1837 0x1eb00, 0x1eb84,
1838 0x1ebc0, 0x1ebc8,
1839 0x1ec08, 0x1ec0c,
1840 0x1ec40, 0x1ec4c,
1841 0x1ee84, 0x1ee90,
1842 0x1eec0, 0x1eec0,
1843 0x1eee0, 0x1eee0,
1844 0x1ef00, 0x1ef84,
1845 0x1efc0, 0x1efc8,
1846 0x1f008, 0x1f00c,
1847 0x1f040, 0x1f04c,
1848 0x1f284, 0x1f290,
1849 0x1f2c0, 0x1f2c0,
1850 0x1f2e0, 0x1f2e0,
1851 0x1f300, 0x1f384,
1852 0x1f3c0, 0x1f3c8,
1853 0x1f408, 0x1f40c,
1854 0x1f440, 0x1f44c,
1855 0x1f684, 0x1f690,
1856 0x1f6c0, 0x1f6c0,
1857 0x1f6e0, 0x1f6e0,
1858 0x1f700, 0x1f784,
1859 0x1f7c0, 0x1f7c8,
1860 0x1f808, 0x1f80c,
1861 0x1f840, 0x1f84c,
1862 0x1fa84, 0x1fa90,
1863 0x1fac0, 0x1fac0,
1864 0x1fae0, 0x1fae0,
1865 0x1fb00, 0x1fb84,
1866 0x1fbc0, 0x1fbc8,
1867 0x1fc08, 0x1fc0c,
1868 0x1fc40, 0x1fc4c,
1869 0x1fe84, 0x1fe90,
1870 0x1fec0, 0x1fec0,
1871 0x1fee0, 0x1fee0,
1872 0x1ff00, 0x1ff84,
1873 0x1ffc0, 0x1ffc8,
1874 0x30000, 0x30030,
1875 0x30100, 0x30144,
1876 0x30190, 0x301d0,
1877 0x30200, 0x30318,
1878 0x30400, 0x3052c,
1879 0x30540, 0x3061c,
1880 0x30800, 0x30834,
1881 0x308c0, 0x30908,
1882 0x30910, 0x309ac,
1883 0x30a00, 0x30a04,
1884 0x30a0c, 0x30a2c,
1885 0x30a44, 0x30a50,
1886 0x30a74, 0x30c24,
1887 0x30d08, 0x30d14,
1888 0x30d1c, 0x30d20,
1889 0x30d3c, 0x30d50,
1890 0x31200, 0x3120c,
1891 0x31220, 0x31220,
1892 0x31240, 0x31240,
1893 0x31600, 0x31600,
1894 0x31608, 0x3160c,
1895 0x31a00, 0x31a1c,
1896 0x31e04, 0x31e20,
1897 0x31e38, 0x31e3c,
1898 0x31e80, 0x31e80,
1899 0x31e88, 0x31ea8,
1900 0x31eb0, 0x31eb4,
1901 0x31ec8, 0x31ed4,
1902 0x31fb8, 0x32004,
1903 0x32208, 0x3223c,
1904 0x32600, 0x32630,
1905 0x32a00, 0x32abc,
1906 0x32b00, 0x32b70,
1907 0x33000, 0x33048,
1908 0x33060, 0x3309c,
1909 0x330f0, 0x33148,
1910 0x33160, 0x3319c,
1911 0x331f0, 0x332e4,
1912 0x332f8, 0x333e4,
1913 0x333f8, 0x33448,
1914 0x33460, 0x3349c,
1915 0x334f0, 0x33548,
1916 0x33560, 0x3359c,
1917 0x335f0, 0x336e4,
1918 0x336f8, 0x337e4,
1919 0x337f8, 0x337fc,
1920 0x33814, 0x33814,
1921 0x3382c, 0x3382c,
1922 0x33880, 0x3388c,
1923 0x338e8, 0x338ec,
1924 0x33900, 0x33948,
1925 0x33960, 0x3399c,
1926 0x339f0, 0x33ae4,
1927 0x33af8, 0x33b10,
1928 0x33b28, 0x33b28,
1929 0x33b3c, 0x33b50,
1930 0x33bf0, 0x33c10,
1931 0x33c28, 0x33c28,
1932 0x33c3c, 0x33c50,
1933 0x33cf0, 0x33cfc,
1934 0x34000, 0x34030,
1935 0x34100, 0x34144,
1936 0x34190, 0x341d0,
1937 0x34200, 0x34318,
1938 0x34400, 0x3452c,
1939 0x34540, 0x3461c,
1940 0x34800, 0x34834,
1941 0x348c0, 0x34908,
1942 0x34910, 0x349ac,
1943 0x34a00, 0x34a04,
1944 0x34a0c, 0x34a2c,
1945 0x34a44, 0x34a50,
1946 0x34a74, 0x34c24,
1947 0x34d08, 0x34d14,
1948 0x34d1c, 0x34d20,
1949 0x34d3c, 0x34d50,
1950 0x35200, 0x3520c,
1951 0x35220, 0x35220,
1952 0x35240, 0x35240,
1953 0x35600, 0x35600,
1954 0x35608, 0x3560c,
1955 0x35a00, 0x35a1c,
1956 0x35e04, 0x35e20,
1957 0x35e38, 0x35e3c,
1958 0x35e80, 0x35e80,
1959 0x35e88, 0x35ea8,
1960 0x35eb0, 0x35eb4,
1961 0x35ec8, 0x35ed4,
1962 0x35fb8, 0x36004,
1963 0x36208, 0x3623c,
1964 0x36600, 0x36630,
1965 0x36a00, 0x36abc,
1966 0x36b00, 0x36b70,
1967 0x37000, 0x37048,
1968 0x37060, 0x3709c,
1969 0x370f0, 0x37148,
1970 0x37160, 0x3719c,
1971 0x371f0, 0x372e4,
1972 0x372f8, 0x373e4,
1973 0x373f8, 0x37448,
1974 0x37460, 0x3749c,
1975 0x374f0, 0x37548,
1976 0x37560, 0x3759c,
1977 0x375f0, 0x376e4,
1978 0x376f8, 0x377e4,
1979 0x377f8, 0x377fc,
1980 0x37814, 0x37814,
1981 0x3782c, 0x3782c,
1982 0x37880, 0x3788c,
1983 0x378e8, 0x378ec,
1984 0x37900, 0x37948,
1985 0x37960, 0x3799c,
1986 0x379f0, 0x37ae4,
1987 0x37af8, 0x37b10,
1988 0x37b28, 0x37b28,
1989 0x37b3c, 0x37b50,
1990 0x37bf0, 0x37c10,
1991 0x37c28, 0x37c28,
1992 0x37c3c, 0x37c50,
1993 0x37cf0, 0x37cfc,
1994 0x38000, 0x38030,
1995 0x38100, 0x38144,
1996 0x38190, 0x381d0,
1997 0x38200, 0x38318,
1998 0x38400, 0x3852c,
1999 0x38540, 0x3861c,
2000 0x38800, 0x38834,
2001 0x388c0, 0x38908,
2002 0x38910, 0x389ac,
2003 0x38a00, 0x38a04,
2004 0x38a0c, 0x38a2c,
2005 0x38a44, 0x38a50,
2006 0x38a74, 0x38c24,
2007 0x38d08, 0x38d14,
2008 0x38d1c, 0x38d20,
2009 0x38d3c, 0x38d50,
2010 0x39200, 0x3920c,
2011 0x39220, 0x39220,
2012 0x39240, 0x39240,
2013 0x39600, 0x39600,
2014 0x39608, 0x3960c,
2015 0x39a00, 0x39a1c,
2016 0x39e04, 0x39e20,
2017 0x39e38, 0x39e3c,
2018 0x39e80, 0x39e80,
2019 0x39e88, 0x39ea8,
2020 0x39eb0, 0x39eb4,
2021 0x39ec8, 0x39ed4,
2022 0x39fb8, 0x3a004,
2023 0x3a208, 0x3a23c,
2024 0x3a600, 0x3a630,
2025 0x3aa00, 0x3aabc,
2026 0x3ab00, 0x3ab70,
2027 0x3b000, 0x3b048,
2028 0x3b060, 0x3b09c,
2029 0x3b0f0, 0x3b148,
2030 0x3b160, 0x3b19c,
2031 0x3b1f0, 0x3b2e4,
2032 0x3b2f8, 0x3b3e4,
2033 0x3b3f8, 0x3b448,
2034 0x3b460, 0x3b49c,
2035 0x3b4f0, 0x3b548,
2036 0x3b560, 0x3b59c,
2037 0x3b5f0, 0x3b6e4,
2038 0x3b6f8, 0x3b7e4,
2039 0x3b7f8, 0x3b7fc,
2040 0x3b814, 0x3b814,
2041 0x3b82c, 0x3b82c,
2042 0x3b880, 0x3b88c,
2043 0x3b8e8, 0x3b8ec,
2044 0x3b900, 0x3b948,
2045 0x3b960, 0x3b99c,
2046 0x3b9f0, 0x3bae4,
2047 0x3baf8, 0x3bb10,
2048 0x3bb28, 0x3bb28,
2049 0x3bb3c, 0x3bb50,
2050 0x3bbf0, 0x3bc10,
2051 0x3bc28, 0x3bc28,
2052 0x3bc3c, 0x3bc50,
2053 0x3bcf0, 0x3bcfc,
2054 0x3c000, 0x3c030,
2055 0x3c100, 0x3c144,
2056 0x3c190, 0x3c1d0,
2057 0x3c200, 0x3c318,
2058 0x3c400, 0x3c52c,
2059 0x3c540, 0x3c61c,
2060 0x3c800, 0x3c834,
2061 0x3c8c0, 0x3c908,
2062 0x3c910, 0x3c9ac,
2063 0x3ca00, 0x3ca04,
2064 0x3ca0c, 0x3ca2c,
2065 0x3ca44, 0x3ca50,
2066 0x3ca74, 0x3cc24,
2067 0x3cd08, 0x3cd14,
2068 0x3cd1c, 0x3cd20,
2069 0x3cd3c, 0x3cd50,
2070 0x3d200, 0x3d20c,
2071 0x3d220, 0x3d220,
2072 0x3d240, 0x3d240,
2073 0x3d600, 0x3d600,
2074 0x3d608, 0x3d60c,
2075 0x3da00, 0x3da1c,
2076 0x3de04, 0x3de20,
2077 0x3de38, 0x3de3c,
2078 0x3de80, 0x3de80,
2079 0x3de88, 0x3dea8,
2080 0x3deb0, 0x3deb4,
2081 0x3dec8, 0x3ded4,
2082 0x3dfb8, 0x3e004,
2083 0x3e208, 0x3e23c,
2084 0x3e600, 0x3e630,
2085 0x3ea00, 0x3eabc,
2086 0x3eb00, 0x3eb70,
2087 0x3f000, 0x3f048,
2088 0x3f060, 0x3f09c,
2089 0x3f0f0, 0x3f148,
2090 0x3f160, 0x3f19c,
2091 0x3f1f0, 0x3f2e4,
2092 0x3f2f8, 0x3f3e4,
2093 0x3f3f8, 0x3f448,
2094 0x3f460, 0x3f49c,
2095 0x3f4f0, 0x3f548,
2096 0x3f560, 0x3f59c,
2097 0x3f5f0, 0x3f6e4,
2098 0x3f6f8, 0x3f7e4,
2099 0x3f7f8, 0x3f7fc,
2100 0x3f814, 0x3f814,
2101 0x3f82c, 0x3f82c,
2102 0x3f880, 0x3f88c,
2103 0x3f8e8, 0x3f8ec,
2104 0x3f900, 0x3f948,
2105 0x3f960, 0x3f99c,
2106 0x3f9f0, 0x3fae4,
2107 0x3faf8, 0x3fb10,
2108 0x3fb28, 0x3fb28,
2109 0x3fb3c, 0x3fb50,
2110 0x3fbf0, 0x3fc10,
2111 0x3fc28, 0x3fc28,
2112 0x3fc3c, 0x3fc50,
2113 0x3fcf0, 0x3fcfc,
2114 0x40000, 0x4000c,
2115 0x40040, 0x40068,
2116 0x40080, 0x40144,
2117 0x40180, 0x4018c,
2118 0x40200, 0x40298,
2119 0x402ac, 0x4033c,
2120 0x403f8, 0x403fc,
2121 0x41300, 0x413c4,
2122 0x41400, 0x4141c,
2123 0x41480, 0x414d0,
2124 0x44000, 0x44078,
2125 0x440c0, 0x44278,
2126 0x442c0, 0x44478,
2127 0x444c0, 0x44678,
2128 0x446c0, 0x44878,
2129 0x448c0, 0x449fc,
2130 0x45000, 0x45068,
2131 0x45080, 0x45084,
2132 0x450a0, 0x450b0,
2133 0x45200, 0x45268,
2134 0x45280, 0x45284,
2135 0x452a0, 0x452b0,
2136 0x460c0, 0x460e4,
2137 0x47000, 0x4708c,
2138 0x47200, 0x47250,
2139 0x47400, 0x47420,
2140 0x47600, 0x47618,
2141 0x47800, 0x47814,
2142 0x48000, 0x4800c,
2143 0x48040, 0x48068,
2144 0x48080, 0x48144,
2145 0x48180, 0x4818c,
2146 0x48200, 0x48298,
2147 0x482ac, 0x4833c,
2148 0x483f8, 0x483fc,
2149 0x49300, 0x493c4,
2150 0x49400, 0x4941c,
2151 0x49480, 0x494d0,
2152 0x4c000, 0x4c078,
2153 0x4c0c0, 0x4c278,
2154 0x4c2c0, 0x4c478,
2155 0x4c4c0, 0x4c678,
2156 0x4c6c0, 0x4c878,
2157 0x4c8c0, 0x4c9fc,
2158 0x4d000, 0x4d068,
2159 0x4d080, 0x4d084,
2160 0x4d0a0, 0x4d0b0,
2161 0x4d200, 0x4d268,
2162 0x4d280, 0x4d284,
2163 0x4d2a0, 0x4d2b0,
2164 0x4e0c0, 0x4e0e4,
2165 0x4f000, 0x4f08c,
2166 0x4f200, 0x4f250,
2167 0x4f400, 0x4f420,
2168 0x4f600, 0x4f618,
2169 0x4f800, 0x4f814,
2170 0x50000, 0x500cc,
2171 0x50400, 0x50400,
2172 0x50800, 0x508cc,
2173 0x50c00, 0x50c00,
2174 0x51000, 0x5101c,
2175 0x51300, 0x51308,
2176 };
2177
1651 int i; 2178 int i;
1652 struct adapter *ap = netdev2adap(dev); 2179 struct adapter *ap = netdev2adap(dev);
2180 static const unsigned int *reg_ranges;
2181 int arr_size = 0, buf_size = 0;
2182
2183 if (is_t4(ap->chip)) {
2184 reg_ranges = &t4_reg_ranges[0];
2185 arr_size = ARRAY_SIZE(t4_reg_ranges);
2186 buf_size = T4_REGMAP_SIZE;
2187 } else {
2188 reg_ranges = &t5_reg_ranges[0];
2189 arr_size = ARRAY_SIZE(t5_reg_ranges);
2190 buf_size = T5_REGMAP_SIZE;
2191 }
1653 2192
1654 regs->version = mk_adap_vers(ap); 2193 regs->version = mk_adap_vers(ap);
1655 2194
1656 memset(buf, 0, T4_REGMAP_SIZE); 2195 memset(buf, 0, buf_size);
1657 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) 2196 for (i = 0; i < arr_size; i += 2)
1658 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]); 2197 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1659} 2198}
1660 2199
@@ -2205,14 +2744,14 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2205 netdev_features_t changed = dev->features ^ features; 2744 netdev_features_t changed = dev->features ^ features;
2206 int err; 2745 int err;
2207 2746
2208 if (!(changed & NETIF_F_HW_VLAN_RX)) 2747 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2209 return 0; 2748 return 0;
2210 2749
2211 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, 2750 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2212 -1, -1, -1, 2751 -1, -1, -1,
2213 !!(features & NETIF_F_HW_VLAN_RX), true); 2752 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2214 if (unlikely(err)) 2753 if (unlikely(err))
2215 dev->features = features ^ NETIF_F_HW_VLAN_RX; 2754 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2216 return err; 2755 return err;
2217} 2756}
2218 2757
@@ -2363,8 +2902,8 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2363 int ret, ofst; 2902 int ret, ofst;
2364 __be32 data[16]; 2903 __be32 data[16];
2365 2904
2366 if (mem == MEM_MC) 2905 if ((mem == MEM_MC) || (mem == MEM_MC1))
2367 ret = t4_mc_read(adap, pos, data, NULL); 2906 ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
2368 else 2907 else
2369 ret = t4_edc_read(adap, mem, pos, data, NULL); 2908 ret = t4_edc_read(adap, mem, pos, data, NULL);
2370 if (ret) 2909 if (ret)
@@ -2405,18 +2944,37 @@ static void add_debugfs_mem(struct adapter *adap, const char *name,
2405static int setup_debugfs(struct adapter *adap) 2944static int setup_debugfs(struct adapter *adap)
2406{ 2945{
2407 int i; 2946 int i;
2947 u32 size;
2408 2948
2409 if (IS_ERR_OR_NULL(adap->debugfs_root)) 2949 if (IS_ERR_OR_NULL(adap->debugfs_root))
2410 return -1; 2950 return -1;
2411 2951
2412 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); 2952 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2413 if (i & EDRAM0_ENABLE) 2953 if (i & EDRAM0_ENABLE) {
2414 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5); 2954 size = t4_read_reg(adap, MA_EDRAM0_BAR);
2415 if (i & EDRAM1_ENABLE) 2955 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
2416 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5); 2956 }
2417 if (i & EXT_MEM_ENABLE) 2957 if (i & EDRAM1_ENABLE) {
2418 add_debugfs_mem(adap, "mc", MEM_MC, 2958 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2419 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR))); 2959 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2960 }
2961 if (is_t4(adap->chip)) {
2962 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2963 if (i & EXT_MEM_ENABLE)
2964 add_debugfs_mem(adap, "mc", MEM_MC,
2965 EXT_MEM_SIZE_GET(size));
2966 } else {
2967 if (i & EXT_MEM_ENABLE) {
2968 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2969 add_debugfs_mem(adap, "mc0", MEM_MC0,
2970 EXT_MEM_SIZE_GET(size));
2971 }
2972 if (i & EXT_MEM1_ENABLE) {
2973 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2974 add_debugfs_mem(adap, "mc1", MEM_MC1,
2975 EXT_MEM_SIZE_GET(size));
2976 }
2977 }
2420 if (adap->l2t) 2978 if (adap->l2t)
2421 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, 2979 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2422 &t4_l2t_fops); 2980 &t4_l2t_fops);
@@ -2747,10 +3305,18 @@ EXPORT_SYMBOL(cxgb4_port_chan);
2747unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) 3305unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2748{ 3306{
2749 struct adapter *adap = netdev2adap(dev); 3307 struct adapter *adap = netdev2adap(dev);
2750 u32 v; 3308 u32 v1, v2, lp_count, hp_count;
2751 3309
2752 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); 3310 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2753 return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v); 3311 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3312 if (is_t4(adap->chip)) {
3313 lp_count = G_LP_COUNT(v1);
3314 hp_count = G_HP_COUNT(v1);
3315 } else {
3316 lp_count = G_LP_COUNT_T5(v1);
3317 hp_count = G_HP_COUNT_T5(v2);
3318 }
3319 return lpfifo ? lp_count : hp_count;
2754} 3320}
2755EXPORT_SYMBOL(cxgb4_dbfifo_count); 3321EXPORT_SYMBOL(cxgb4_dbfifo_count);
2756 3322
@@ -2853,6 +3419,25 @@ out:
2853} 3419}
2854EXPORT_SYMBOL(cxgb4_sync_txq_pidx); 3420EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2855 3421
3422void cxgb4_disable_db_coalescing(struct net_device *dev)
3423{
3424 struct adapter *adap;
3425
3426 adap = netdev2adap(dev);
3427 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3428 F_NOCOALESCE);
3429}
3430EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3431
3432void cxgb4_enable_db_coalescing(struct net_device *dev)
3433{
3434 struct adapter *adap;
3435
3436 adap = netdev2adap(dev);
3437 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3438}
3439EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3440
2856static struct pci_driver cxgb4_driver; 3441static struct pci_driver cxgb4_driver;
2857 3442
2858static void check_neigh_update(struct neighbour *neigh) 3443static void check_neigh_update(struct neighbour *neigh)
@@ -2888,14 +3473,23 @@ static struct notifier_block cxgb4_netevent_nb = {
2888 3473
2889static void drain_db_fifo(struct adapter *adap, int usecs) 3474static void drain_db_fifo(struct adapter *adap, int usecs)
2890{ 3475{
2891 u32 v; 3476 u32 v1, v2, lp_count, hp_count;
2892 3477
2893 do { 3478 do {
3479 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3480 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3481 if (is_t4(adap->chip)) {
3482 lp_count = G_LP_COUNT(v1);
3483 hp_count = G_HP_COUNT(v1);
3484 } else {
3485 lp_count = G_LP_COUNT_T5(v1);
3486 hp_count = G_HP_COUNT_T5(v2);
3487 }
3488
3489 if (lp_count == 0 && hp_count == 0)
3490 break;
2894 set_current_state(TASK_UNINTERRUPTIBLE); 3491 set_current_state(TASK_UNINTERRUPTIBLE);
2895 schedule_timeout(usecs_to_jiffies(usecs)); 3492 schedule_timeout(usecs_to_jiffies(usecs));
2896 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2897 if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
2898 break;
2899 } while (1); 3493 } while (1);
2900} 3494}
2901 3495
@@ -3004,24 +3598,62 @@ static void process_db_drop(struct work_struct *work)
3004 3598
3005 adap = container_of(work, struct adapter, db_drop_task); 3599 adap = container_of(work, struct adapter, db_drop_task);
3006 3600
3601 if (is_t4(adap->chip)) {
3602 disable_dbs(adap);
3603 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3604 drain_db_fifo(adap, 1);
3605 recover_all_queues(adap);
3606 enable_dbs(adap);
3607 } else {
3608 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3609 u16 qid = (dropped_db >> 15) & 0x1ffff;
3610 u16 pidx_inc = dropped_db & 0x1fff;
3611 unsigned int s_qpp;
3612 unsigned short udb_density;
3613 unsigned long qpshift;
3614 int page;
3615 u32 udb;
3616
3617 dev_warn(adap->pdev_dev,
3618 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3619 dropped_db, qid,
3620 (dropped_db >> 14) & 1,
3621 (dropped_db >> 13) & 1,
3622 pidx_inc);
3623
3624 drain_db_fifo(adap, 1);
3625
3626 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3627 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3628 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3629 qpshift = PAGE_SHIFT - ilog2(udb_density);
3630 udb = qid << qpshift;
3631 udb &= PAGE_MASK;
3632 page = udb / PAGE_SIZE;
3633 udb += (qid - (page * udb_density)) * 128;
3634
3635 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
3636
3637 /* Re-enable BAR2 WC */
3638 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3639 }
3640
3007 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0); 3641 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
3008 disable_dbs(adap);
3009 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3010 drain_db_fifo(adap, 1);
3011 recover_all_queues(adap);
3012 enable_dbs(adap);
3013} 3642}
3014 3643
3015void t4_db_full(struct adapter *adap) 3644void t4_db_full(struct adapter *adap)
3016{ 3645{
3017 t4_set_reg_field(adap, SGE_INT_ENABLE3, 3646 if (is_t4(adap->chip)) {
3018 DBFIFO_HP_INT | DBFIFO_LP_INT, 0); 3647 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3019 queue_work(workq, &adap->db_full_task); 3648 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3649 queue_work(workq, &adap->db_full_task);
3650 }
3020} 3651}
3021 3652
3022void t4_db_dropped(struct adapter *adap) 3653void t4_db_dropped(struct adapter *adap)
3023{ 3654{
3024 queue_work(workq, &adap->db_drop_task); 3655 if (is_t4(adap->chip))
3656 queue_work(workq, &adap->db_drop_task);
3025} 3657}
3026 3658
3027static void uld_attach(struct adapter *adap, unsigned int uld) 3659static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -3566,17 +4198,27 @@ void t4_fatal_err(struct adapter *adap)
3566 4198
3567static void setup_memwin(struct adapter *adap) 4199static void setup_memwin(struct adapter *adap)
3568{ 4200{
3569 u32 bar0; 4201 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
3570 4202
3571 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ 4203 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
4204 if (is_t4(adap->chip)) {
4205 mem_win0_base = bar0 + MEMWIN0_BASE;
4206 mem_win1_base = bar0 + MEMWIN1_BASE;
4207 mem_win2_base = bar0 + MEMWIN2_BASE;
4208 } else {
4209 /* For T5, only relative offset inside the PCIe BAR is passed */
4210 mem_win0_base = MEMWIN0_BASE;
4211 mem_win1_base = MEMWIN1_BASE_T5;
4212 mem_win2_base = MEMWIN2_BASE_T5;
4213 }
3572 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), 4214 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
3573 (bar0 + MEMWIN0_BASE) | BIR(0) | 4215 mem_win0_base | BIR(0) |
3574 WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); 4216 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
3575 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), 4217 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
3576 (bar0 + MEMWIN1_BASE) | BIR(0) | 4218 mem_win1_base | BIR(0) |
3577 WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); 4219 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
3578 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), 4220 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
3579 (bar0 + MEMWIN2_BASE) | BIR(0) | 4221 mem_win2_base | BIR(0) |
3580 WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); 4222 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
3581} 4223}
3582 4224
@@ -3745,6 +4387,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
3745 unsigned long mtype = 0, maddr = 0; 4387 unsigned long mtype = 0, maddr = 0;
3746 u32 finiver, finicsum, cfcsum; 4388 u32 finiver, finicsum, cfcsum;
3747 int ret, using_flash; 4389 int ret, using_flash;
4390 char *fw_config_file, fw_config_file_path[256];
3748 4391
3749 /* 4392 /*
3750 * Reset device if necessary. 4393 * Reset device if necessary.
@@ -3761,7 +4404,21 @@ static int adap_init0_config(struct adapter *adapter, int reset)
3761 * then use that. Otherwise, use the configuration file stored 4404 * then use that. Otherwise, use the configuration file stored
3762 * in the adapter flash ... 4405 * in the adapter flash ...
3763 */ 4406 */
3764 ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev); 4407 switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
4408 case CHELSIO_T4:
4409 fw_config_file = FW_CFNAME;
4410 break;
4411 case CHELSIO_T5:
4412 fw_config_file = FW5_CFNAME;
4413 break;
4414 default:
4415 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4416 adapter->pdev->device);
4417 ret = -EINVAL;
4418 goto bye;
4419 }
4420
4421 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
3765 if (ret < 0) { 4422 if (ret < 0) {
3766 using_flash = 1; 4423 using_flash = 1;
3767 mtype = FW_MEMTYPE_CF_FLASH; 4424 mtype = FW_MEMTYPE_CF_FLASH;
@@ -3877,6 +4534,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
3877 if (ret < 0) 4534 if (ret < 0)
3878 goto bye; 4535 goto bye;
3879 4536
4537 sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
3880 /* 4538 /*
3881 * Return successfully and note that we're operating with parameters 4539 * Return successfully and note that we're operating with parameters
3882 * not supplied by the driver, rather than from hard-wired 4540 * not supplied by the driver, rather than from hard-wired
@@ -3887,7 +4545,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
3887 "Configuration File %s, version %#x, computed checksum %#x\n", 4545 "Configuration File %s, version %#x, computed checksum %#x\n",
3888 (using_flash 4546 (using_flash
3889 ? "in device FLASH" 4547 ? "in device FLASH"
3890 : "/lib/firmware/" FW_CFNAME), 4548 : fw_config_file_path),
3891 finiver, cfcsum); 4549 finiver, cfcsum);
3892 return 0; 4550 return 0;
3893 4551
@@ -4354,6 +5012,15 @@ static int adap_init0(struct adapter *adap)
4354 adap->tids.aftid_end = val[1]; 5012 adap->tids.aftid_end = val[1];
4355 } 5013 }
4356 5014
5015 /* If we're running on newer firmware, let it know that we're
5016 * prepared to deal with encapsulated CPL messages. Older
5017 * firmware won't understand this and we'll just get
5018 * unencapsulated messages ...
5019 */
5020 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5021 val[0] = 1;
5022 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5023
4357 /* 5024 /*
4358 * Get device capabilities so we can determine what resources we need 5025 * Get device capabilities so we can determine what resources we need
4359 * to manage. 5026 * to manage.
@@ -4814,7 +5481,8 @@ static void print_port_info(const struct net_device *dev)
4814 sprintf(bufp, "BASE-%s", base[pi->port_type]); 5481 sprintf(bufp, "BASE-%s", base[pi->port_type]);
4815 5482
4816 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", 5483 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
4817 adap->params.vpd.id, adap->params.rev, buf, 5484 adap->params.vpd.id,
5485 CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
4818 is_offload(adap) ? "R" : "", adap->params.pci.width, spd, 5486 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
4819 (adap->flags & USING_MSIX) ? " MSI-X" : 5487 (adap->flags & USING_MSIX) ? " MSI-X" :
4820 (adap->flags & USING_MSI) ? " MSI" : ""); 5488 (adap->flags & USING_MSI) ? " MSI" : "");
@@ -4854,10 +5522,11 @@ static void free_some_resources(struct adapter *adapter)
4854#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) 5522#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4855#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ 5523#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4856 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) 5524 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5525#define SEGMENT_SIZE 128
4857 5526
4858static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 5527static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4859{ 5528{
4860 int func, i, err; 5529 int func, i, err, s_qpp, qpp, num_seg;
4861 struct port_info *pi; 5530 struct port_info *pi;
4862 bool highdma = false; 5531 bool highdma = false;
4863 struct adapter *adapter = NULL; 5532 struct adapter *adapter = NULL;
@@ -4934,7 +5603,34 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4934 5603
4935 err = t4_prep_adapter(adapter); 5604 err = t4_prep_adapter(adapter);
4936 if (err) 5605 if (err)
4937 goto out_unmap_bar; 5606 goto out_unmap_bar0;
5607
5608 if (!is_t4(adapter->chip)) {
5609 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5610 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5611 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
5612 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5613
5614 /* Each segment size is 128B. Write coalescing is enabled only
5615 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5616 * queue is less no of segments that can be accommodated in
5617 * a page size.
5618 */
5619 if (qpp > num_seg) {
5620 dev_err(&pdev->dev,
5621 "Incorrect number of egress queues per page\n");
5622 err = -EINVAL;
5623 goto out_unmap_bar0;
5624 }
5625 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5626 pci_resource_len(pdev, 2));
5627 if (!adapter->bar2) {
5628 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5629 err = -ENOMEM;
5630 goto out_unmap_bar0;
5631 }
5632 }
5633
4938 setup_memwin(adapter); 5634 setup_memwin(adapter);
4939 err = adap_init0(adapter); 5635 err = adap_init0(adapter);
4940 setup_memwin_rdma(adapter); 5636 setup_memwin_rdma(adapter);
@@ -4963,7 +5659,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4963 netdev->hw_features = NETIF_F_SG | TSO_FLAGS | 5659 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
4964 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 5660 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4965 NETIF_F_RXCSUM | NETIF_F_RXHASH | 5661 NETIF_F_RXCSUM | NETIF_F_RXHASH |
4966 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 5662 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
4967 if (highdma) 5663 if (highdma)
4968 netdev->hw_features |= NETIF_F_HIGHDMA; 5664 netdev->hw_features |= NETIF_F_HIGHDMA;
4969 netdev->features |= netdev->hw_features; 5665 netdev->features |= netdev->hw_features;
@@ -5063,6 +5759,9 @@ sriov:
5063 out_free_dev: 5759 out_free_dev:
5064 free_some_resources(adapter); 5760 free_some_resources(adapter);
5065 out_unmap_bar: 5761 out_unmap_bar:
5762 if (!is_t4(adapter->chip))
5763 iounmap(adapter->bar2);
5764 out_unmap_bar0:
5066 iounmap(adapter->regs); 5765 iounmap(adapter->regs);
5067 out_free_adapter: 5766 out_free_adapter:
5068 kfree(adapter); 5767 kfree(adapter);
@@ -5113,6 +5812,8 @@ static void remove_one(struct pci_dev *pdev)
5113 5812
5114 free_some_resources(adapter); 5813 free_some_resources(adapter);
5115 iounmap(adapter->regs); 5814 iounmap(adapter->regs);
5815 if (!is_t4(adapter->chip))
5816 iounmap(adapter->bar2);
5116 kfree(adapter); 5817 kfree(adapter);
5117 pci_disable_pcie_error_reporting(pdev); 5818 pci_disable_pcie_error_reporting(pdev);
5118 pci_disable_device(pdev); 5819 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index e2bbc7f3e2de..4faf4d067ee7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -269,4 +269,7 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
269 unsigned int skb_len, unsigned int pull_len); 269 unsigned int skb_len, unsigned int pull_len);
270int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size); 270int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
271int cxgb4_flush_eq_cache(struct net_device *dev); 271int cxgb4_flush_eq_cache(struct net_device *dev);
272void cxgb4_disable_db_coalescing(struct net_device *dev);
273void cxgb4_enable_db_coalescing(struct net_device *dev);
274
272#endif /* !__CXGB4_OFLD_H */ 275#endif /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index fe9a2ea3588b..2bfbb206b35a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -506,10 +506,14 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
506 506
507static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) 507static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
508{ 508{
509 u32 val;
509 if (q->pend_cred >= 8) { 510 if (q->pend_cred >= 8) {
511 val = PIDX(q->pend_cred / 8);
512 if (!is_t4(adap->chip))
513 val |= DBTYPE(1);
510 wmb(); 514 wmb();
511 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) | 515 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
512 QID(q->cntxt_id) | PIDX(q->pend_cred / 8)); 516 QID(q->cntxt_id) | val);
513 q->pend_cred &= 7; 517 q->pend_cred &= 7;
514 } 518 }
515} 519}
@@ -812,6 +816,22 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
812 *end = 0; 816 *end = 0;
813} 817}
814 818
819/* This function copies 64 byte coalesced work request to
820 * memory mapped BAR2 space(user space writes).
821 * For coalesced WR SGE, fetches data from the FIFO instead of from Host.
822 */
823static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
824{
825 int count = 8;
826
827 while (count) {
828 writeq(*src, dst);
829 src++;
830 dst++;
831 count--;
832 }
833}
834
815/** 835/**
816 * ring_tx_db - check and potentially ring a Tx queue's doorbell 836 * ring_tx_db - check and potentially ring a Tx queue's doorbell
817 * @adap: the adapter 837 * @adap: the adapter
@@ -822,11 +842,25 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
822 */ 842 */
823static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) 843static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
824{ 844{
845 unsigned int *wr, index;
846
825 wmb(); /* write descriptors before telling HW */ 847 wmb(); /* write descriptors before telling HW */
826 spin_lock(&q->db_lock); 848 spin_lock(&q->db_lock);
827 if (!q->db_disabled) { 849 if (!q->db_disabled) {
828 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 850 if (is_t4(adap->chip)) {
829 QID(q->cntxt_id) | PIDX(n)); 851 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
852 QID(q->cntxt_id) | PIDX(n));
853 } else {
854 if (n == 1) {
855 index = q->pidx ? (q->pidx - 1) : (q->size - 1);
856 wr = (unsigned int *)&q->desc[index];
857 cxgb_pio_copy((u64 __iomem *)
858 (adap->bar2 + q->udb + 64),
859 (u64 *)wr);
860 } else
861 writel(n, adap->bar2 + q->udb + 8);
862 wmb();
863 }
830 } 864 }
831 q->db_pidx = q->pidx; 865 q->db_pidx = q->pidx;
832 spin_unlock(&q->db_lock); 866 spin_unlock(&q->db_lock);
@@ -1555,7 +1589,6 @@ static noinline int handle_trace_pkt(struct adapter *adap,
1555 const struct pkt_gl *gl) 1589 const struct pkt_gl *gl)
1556{ 1590{
1557 struct sk_buff *skb; 1591 struct sk_buff *skb;
1558 struct cpl_trace_pkt *p;
1559 1592
1560 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); 1593 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1561 if (unlikely(!skb)) { 1594 if (unlikely(!skb)) {
@@ -1563,8 +1596,11 @@ static noinline int handle_trace_pkt(struct adapter *adap,
1563 return 0; 1596 return 0;
1564 } 1597 }
1565 1598
1566 p = (struct cpl_trace_pkt *)skb->data; 1599 if (is_t4(adap->chip))
1567 __skb_pull(skb, sizeof(*p)); 1600 __skb_pull(skb, sizeof(struct cpl_trace_pkt));
1601 else
1602 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
1603
1568 skb_reset_mac_header(skb); 1604 skb_reset_mac_header(skb);
1569 skb->protocol = htons(0xffff); 1605 skb->protocol = htons(0xffff);
1570 skb->dev = adap->port[0]; 1606 skb->dev = adap->port[0];
@@ -1597,7 +1633,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1597 skb->rxhash = (__force u32)pkt->rsshdr.hash_val; 1633 skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1598 1634
1599 if (unlikely(pkt->vlan_ex)) { 1635 if (unlikely(pkt->vlan_ex)) {
1600 __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan)); 1636 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1601 rxq->stats.vlan_ex++; 1637 rxq->stats.vlan_ex++;
1602 } 1638 }
1603 ret = napi_gro_frags(&rxq->rspq.napi); 1639 ret = napi_gro_frags(&rxq->rspq.napi);
@@ -1625,8 +1661,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1625 const struct cpl_rx_pkt *pkt; 1661 const struct cpl_rx_pkt *pkt;
1626 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 1662 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1627 struct sge *s = &q->adap->sge; 1663 struct sge *s = &q->adap->sge;
1664 int cpl_trace_pkt = is_t4(q->adap->chip) ?
1665 CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
1628 1666
1629 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) 1667 if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
1630 return handle_trace_pkt(q->adap, si); 1668 return handle_trace_pkt(q->adap, si);
1631 1669
1632 pkt = (const struct cpl_rx_pkt *)rsp; 1670 pkt = (const struct cpl_rx_pkt *)rsp;
@@ -1667,7 +1705,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1667 skb_checksum_none_assert(skb); 1705 skb_checksum_none_assert(skb);
1668 1706
1669 if (unlikely(pkt->vlan_ex)) { 1707 if (unlikely(pkt->vlan_ex)) {
1670 __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan)); 1708 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1671 rxq->stats.vlan_ex++; 1709 rxq->stats.vlan_ex++;
1672 } 1710 }
1673 netif_receive_skb(skb); 1711 netif_receive_skb(skb);
@@ -2143,11 +2181,27 @@ err:
2143 2181
2144static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) 2182static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2145{ 2183{
2184 q->cntxt_id = id;
2185 if (!is_t4(adap->chip)) {
2186 unsigned int s_qpp;
2187 unsigned short udb_density;
2188 unsigned long qpshift;
2189 int page;
2190
2191 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
2192 udb_density = 1 << QUEUESPERPAGEPF0_GET((t4_read_reg(adap,
2193 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp));
2194 qpshift = PAGE_SHIFT - ilog2(udb_density);
2195 q->udb = q->cntxt_id << qpshift;
2196 q->udb &= PAGE_MASK;
2197 page = q->udb / PAGE_SIZE;
2198 q->udb += (q->cntxt_id - (page * udb_density)) * 128;
2199 }
2200
2146 q->in_use = 0; 2201 q->in_use = 0;
2147 q->cidx = q->pidx = 0; 2202 q->cidx = q->pidx = 0;
2148 q->stops = q->restarts = 0; 2203 q->stops = q->restarts = 0;
2149 q->stat = (void *)&q->desc[q->size]; 2204 q->stat = (void *)&q->desc[q->size];
2150 q->cntxt_id = id;
2151 spin_lock_init(&q->db_lock); 2205 spin_lock_init(&q->db_lock);
2152 adap->sge.egr_map[id - adap->sge.egr_start] = q; 2206 adap->sge.egr_map[id - adap->sge.egr_start] = q;
2153} 2207}
@@ -2587,11 +2641,20 @@ static int t4_sge_init_hard(struct adapter *adap)
2587 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows 2641 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
2588 * and generate an interrupt when this occurs so we can recover. 2642 * and generate an interrupt when this occurs so we can recover.
2589 */ 2643 */
2590 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, 2644 if (is_t4(adap->chip)) {
2591 V_HP_INT_THRESH(M_HP_INT_THRESH) | 2645 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2592 V_LP_INT_THRESH(M_LP_INT_THRESH), 2646 V_HP_INT_THRESH(M_HP_INT_THRESH) |
2593 V_HP_INT_THRESH(dbfifo_int_thresh) | 2647 V_LP_INT_THRESH(M_LP_INT_THRESH),
2594 V_LP_INT_THRESH(dbfifo_int_thresh)); 2648 V_HP_INT_THRESH(dbfifo_int_thresh) |
2649 V_LP_INT_THRESH(dbfifo_int_thresh));
2650 } else {
2651 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2652 V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
2653 V_LP_INT_THRESH_T5(dbfifo_int_thresh));
2654 t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
2655 V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
2656 V_HP_INT_THRESH_T5(dbfifo_int_thresh));
2657 }
2595 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP, 2658 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
2596 F_ENABLE_DROP); 2659 F_ENABLE_DROP);
2597 2660
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 8049268ce0f2..d02d4e8c4417 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -282,6 +282,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
282 * t4_mc_read - read from MC through backdoor accesses 282 * t4_mc_read - read from MC through backdoor accesses
283 * @adap: the adapter 283 * @adap: the adapter
284 * @addr: address of first byte requested 284 * @addr: address of first byte requested
285 * @idx: which MC to access
285 * @data: 64 bytes of data containing the requested address 286 * @data: 64 bytes of data containing the requested address
286 * @ecc: where to store the corresponding 64-bit ECC word 287 * @ecc: where to store the corresponding 64-bit ECC word
287 * 288 *
@@ -289,22 +290,38 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
289 * that covers the requested address @addr. If @parity is not %NULL it 290 * that covers the requested address @addr. If @parity is not %NULL it
290 * is assigned the 64-bit ECC word for the read data. 291 * is assigned the 64-bit ECC word for the read data.
291 */ 292 */
292int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) 293int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
293{ 294{
294 int i; 295 int i;
296 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
297 u32 mc_bist_status_rdata, mc_bist_data_pattern;
298
299 if (is_t4(adap->chip)) {
300 mc_bist_cmd = MC_BIST_CMD;
301 mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
302 mc_bist_cmd_len = MC_BIST_CMD_LEN;
303 mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
304 mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
305 } else {
306 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
307 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
308 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
309 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
310 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
311 }
295 312
296 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST) 313 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
297 return -EBUSY; 314 return -EBUSY;
298 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU); 315 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
299 t4_write_reg(adap, MC_BIST_CMD_LEN, 64); 316 t4_write_reg(adap, mc_bist_cmd_len, 64);
300 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc); 317 t4_write_reg(adap, mc_bist_data_pattern, 0xc);
301 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST | 318 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
302 BIST_CMD_GAP(1)); 319 BIST_CMD_GAP(1));
303 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1); 320 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
304 if (i) 321 if (i)
305 return i; 322 return i;
306 323
307#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) 324#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
308 325
309 for (i = 15; i >= 0; i--) 326 for (i = 15; i >= 0; i--)
310 *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); 327 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
@@ -329,20 +346,39 @@ int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
329int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 346int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
330{ 347{
331 int i; 348 int i;
349 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
350 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
351
352 if (is_t4(adap->chip)) {
353 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
354 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
355 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
356 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
357 idx);
358 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
359 idx);
360 } else {
361 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
362 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
363 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
364 edc_bist_cmd_data_pattern =
365 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
366 edc_bist_status_rdata =
367 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
368 }
332 369
333 idx *= EDC_STRIDE; 370 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
334 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
335 return -EBUSY; 371 return -EBUSY;
336 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU); 372 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
337 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64); 373 t4_write_reg(adap, edc_bist_cmd_len, 64);
338 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc); 374 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
339 t4_write_reg(adap, EDC_BIST_CMD + idx, 375 t4_write_reg(adap, edc_bist_cmd,
340 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); 376 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
341 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1); 377 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
342 if (i) 378 if (i)
343 return i; 379 return i;
344 380
345#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) 381#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
346 382
347 for (i = 15; i >= 0; i--) 383 for (i = 15; i >= 0; i--)
348 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); 384 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
@@ -366,6 +402,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
366static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) 402static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
367{ 403{
368 int i; 404 int i;
405 u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn);
369 406
370 /* 407 /*
371 * Setup offset into PCIE memory window. Address must be a 408 * Setup offset into PCIE memory window. Address must be a
@@ -374,7 +411,7 @@ static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
374 * values.) 411 * values.)
375 */ 412 */
376 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, 413 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
377 addr & ~(MEMWIN0_APERTURE - 1)); 414 (addr & ~(MEMWIN0_APERTURE - 1)) | win_pf);
378 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); 415 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
379 416
380 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ 417 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
@@ -410,6 +447,7 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
410 __be32 *buf, int dir) 447 __be32 *buf, int dir)
411{ 448{
412 u32 pos, start, end, offset, memoffset; 449 u32 pos, start, end, offset, memoffset;
450 u32 edc_size, mc_size;
413 int ret = 0; 451 int ret = 0;
414 __be32 *data; 452 __be32 *data;
415 453
@@ -423,13 +461,21 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
423 if (!data) 461 if (!data)
424 return -ENOMEM; 462 return -ENOMEM;
425 463
426 /* 464 /* Offset into the region of memory which is being accessed
427 * Offset into the region of memory which is being accessed
428 * MEM_EDC0 = 0 465 * MEM_EDC0 = 0
429 * MEM_EDC1 = 1 466 * MEM_EDC1 = 1
430 * MEM_MC = 2 467 * MEM_MC = 2 -- T4
468 * MEM_MC0 = 2 -- For T5
469 * MEM_MC1 = 3 -- For T5
431 */ 470 */
432 memoffset = (mtype * (5 * 1024 * 1024)); 471 edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
472 if (mtype != MEM_MC1)
473 memoffset = (mtype * (edc_size * 1024 * 1024));
474 else {
475 mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
476 MA_EXT_MEMORY_BAR));
477 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
478 }
433 479
434 /* Determine the PCIE_MEM_ACCESS_OFFSET */ 480 /* Determine the PCIE_MEM_ACCESS_OFFSET */
435 addr = addr + memoffset; 481 addr = addr + memoffset;
@@ -497,9 +543,9 @@ int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
497} 543}
498 544
499#define EEPROM_STAT_ADDR 0x7bfc 545#define EEPROM_STAT_ADDR 0x7bfc
500#define VPD_LEN 512
501#define VPD_BASE 0x400 546#define VPD_BASE 0x400
502#define VPD_BASE_OLD 0 547#define VPD_BASE_OLD 0
548#define VPD_LEN 1024
503 549
504/** 550/**
505 * t4_seeprom_wp - enable/disable EEPROM write protection 551 * t4_seeprom_wp - enable/disable EEPROM write protection
@@ -856,6 +902,7 @@ int t4_check_fw_version(struct adapter *adapter)
856{ 902{
857 u32 api_vers[2]; 903 u32 api_vers[2];
858 int ret, major, minor, micro; 904 int ret, major, minor, micro;
905 int exp_major, exp_minor, exp_micro;
859 906
860 ret = get_fw_version(adapter, &adapter->params.fw_vers); 907 ret = get_fw_version(adapter, &adapter->params.fw_vers);
861 if (!ret) 908 if (!ret)
@@ -870,17 +917,35 @@ int t4_check_fw_version(struct adapter *adapter)
870 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); 917 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
871 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); 918 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
872 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); 919 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
920
921 switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
922 case CHELSIO_T4:
923 exp_major = FW_VERSION_MAJOR;
924 exp_minor = FW_VERSION_MINOR;
925 exp_micro = FW_VERSION_MICRO;
926 break;
927 case CHELSIO_T5:
928 exp_major = FW_VERSION_MAJOR_T5;
929 exp_minor = FW_VERSION_MINOR_T5;
930 exp_micro = FW_VERSION_MICRO_T5;
931 break;
932 default:
933 dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n",
934 adapter->chip);
935 return -EINVAL;
936 }
937
873 memcpy(adapter->params.api_vers, api_vers, 938 memcpy(adapter->params.api_vers, api_vers,
874 sizeof(adapter->params.api_vers)); 939 sizeof(adapter->params.api_vers));
875 940
876 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ 941 if (major != exp_major) { /* major mismatch - fail */
877 dev_err(adapter->pdev_dev, 942 dev_err(adapter->pdev_dev,
878 "card FW has major version %u, driver wants %u\n", 943 "card FW has major version %u, driver wants %u\n",
879 major, FW_VERSION_MAJOR); 944 major, exp_major);
880 return -EINVAL; 945 return -EINVAL;
881 } 946 }
882 947
883 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) 948 if (minor == exp_minor && micro == exp_micro)
884 return 0; /* perfect match */ 949 return 0; /* perfect match */
885 950
886 /* Minor/micro version mismatch. Report it but often it's OK. */ 951 /* Minor/micro version mismatch. Report it but often it's OK. */
@@ -1246,6 +1311,45 @@ static void pcie_intr_handler(struct adapter *adapter)
1246 { 0 } 1311 { 0 }
1247 }; 1312 };
1248 1313
1314 static struct intr_info t5_pcie_intr_info[] = {
1315 { MSTGRPPERR, "Master Response Read Queue parity error",
1316 -1, 1 },
1317 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1318 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1319 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1320 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1321 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1322 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1323 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1324 -1, 1 },
1325 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1326 -1, 1 },
1327 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1328 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1329 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1330 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1331 { DREQWRPERR, "PCI DMA channel write request parity error",
1332 -1, 1 },
1333 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1334 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1335 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1336 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1337 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1338 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1339 { FIDPERR, "PCI FID parity error", -1, 1 },
1340 { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1341 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1342 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1343 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1344 -1, 1 },
1345 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
1346 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1347 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1348 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1349 { READRSPERR, "Outbound read error", -1, 0 },
1350 { 0 }
1351 };
1352
1249 int fat; 1353 int fat;
1250 1354
1251 fat = t4_handle_intr_status(adapter, 1355 fat = t4_handle_intr_status(adapter,
@@ -1254,7 +1358,10 @@ static void pcie_intr_handler(struct adapter *adapter)
1254 t4_handle_intr_status(adapter, 1358 t4_handle_intr_status(adapter,
1255 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 1359 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1256 pcie_port_intr_info) + 1360 pcie_port_intr_info) +
1257 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info); 1361 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1362 is_t4(adapter->chip) ?
1363 pcie_intr_info : t5_pcie_intr_info);
1364
1258 if (fat) 1365 if (fat)
1259 t4_fatal_err(adapter); 1366 t4_fatal_err(adapter);
1260} 1367}
@@ -1664,7 +1771,14 @@ static void ncsi_intr_handler(struct adapter *adap)
1664 */ 1771 */
1665static void xgmac_intr_handler(struct adapter *adap, int port) 1772static void xgmac_intr_handler(struct adapter *adap, int port)
1666{ 1773{
1667 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); 1774 u32 v, int_cause_reg;
1775
1776 if (is_t4(adap->chip))
1777 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1778 else
1779 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
1780
1781 v = t4_read_reg(adap, int_cause_reg);
1668 1782
1669 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; 1783 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1670 if (!v) 1784 if (!v)
@@ -2126,7 +2240,9 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2126 u32 bgmap = get_mps_bg_map(adap, idx); 2240 u32 bgmap = get_mps_bg_map(adap, idx);
2127 2241
2128#define GET_STAT(name) \ 2242#define GET_STAT(name) \
2129 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L)) 2243 t4_read_reg64(adap, \
2244 (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
2245 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
2130#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) 2246#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2131 2247
2132 p->tx_octets = GET_STAT(TX_PORT_BYTES); 2248 p->tx_octets = GET_STAT(TX_PORT_BYTES);
@@ -2205,14 +2321,26 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2205void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 2321void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2206 const u8 *addr) 2322 const u8 *addr)
2207{ 2323{
2324 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2325
2326 if (is_t4(adap->chip)) {
2327 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2328 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2329 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2330 } else {
2331 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2332 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2333 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2334 }
2335
2208 if (addr) { 2336 if (addr) {
2209 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO), 2337 t4_write_reg(adap, mag_id_reg_l,
2210 (addr[2] << 24) | (addr[3] << 16) | 2338 (addr[2] << 24) | (addr[3] << 16) |
2211 (addr[4] << 8) | addr[5]); 2339 (addr[4] << 8) | addr[5]);
2212 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI), 2340 t4_write_reg(adap, mag_id_reg_h,
2213 (addr[0] << 8) | addr[1]); 2341 (addr[0] << 8) | addr[1]);
2214 } 2342 }
2215 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN, 2343 t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
2216 addr ? MAGICEN : 0); 2344 addr ? MAGICEN : 0);
2217} 2345}
2218 2346
@@ -2235,16 +2363,23 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2235 u64 mask0, u64 mask1, unsigned int crc, bool enable) 2363 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2236{ 2364{
2237 int i; 2365 int i;
2366 u32 port_cfg_reg;
2367
2368 if (is_t4(adap->chip))
2369 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2370 else
2371 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2238 2372
2239 if (!enable) { 2373 if (!enable) {
2240 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 2374 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
2241 PATEN, 0);
2242 return 0; 2375 return 0;
2243 } 2376 }
2244 if (map > 0xff) 2377 if (map > 0xff)
2245 return -EINVAL; 2378 return -EINVAL;
2246 2379
2247#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name) 2380#define EPIO_REG(name) \
2381 (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
2382 T5_PORT_REG(port, MAC_PORT_EPIO_##name))
2248 2383
2249 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 2384 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2250 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 2385 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
@@ -2322,24 +2457,24 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2322 * @addr: address of first byte requested aligned on 32b. 2457 * @addr: address of first byte requested aligned on 32b.
2323 * @data: len bytes to hold the data read 2458 * @data: len bytes to hold the data read
2324 * @len: amount of data to read from window. Must be <= 2459 * @len: amount of data to read from window. Must be <=
2325 * MEMWIN0_APERATURE after adjusting for 16B alignment 2460 * MEMWIN0_APERATURE after adjusting for 16B for T4 and
2326 * requirements of the the memory window. 2461 * 128B for T5 alignment requirements of the the memory window.
2327 * 2462 *
2328 * Read len bytes of data from MC starting at @addr. 2463 * Read len bytes of data from MC starting at @addr.
2329 */ 2464 */
2330int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) 2465int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2331{ 2466{
2332 int i; 2467 int i, off;
2333 int off; 2468 u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn);
2334 2469
2335 /* 2470 /* Align on a 2KB boundary.
2336 * Align on a 16B boundary.
2337 */ 2471 */
2338 off = addr & 15; 2472 off = addr & MEMWIN0_APERTURE;
2339 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE) 2473 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
2340 return -EINVAL; 2474 return -EINVAL;
2341 2475
2342 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15); 2476 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
2477 (addr & ~MEMWIN0_APERTURE) | win_pf);
2343 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); 2478 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
2344 2479
2345 for (i = 0; i < len; i += 4) 2480 for (i = 0; i < len; i += 4)
@@ -3162,6 +3297,9 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3162 int i, ret; 3297 int i, ret;
3163 struct fw_vi_mac_cmd c; 3298 struct fw_vi_mac_cmd c;
3164 struct fw_vi_mac_exact *p; 3299 struct fw_vi_mac_exact *p;
3300 unsigned int max_naddr = is_t4(adap->chip) ?
3301 NUM_MPS_CLS_SRAM_L_INSTANCES :
3302 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3165 3303
3166 if (naddr > 7) 3304 if (naddr > 7)
3167 return -EINVAL; 3305 return -EINVAL;
@@ -3187,8 +3325,8 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3187 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); 3325 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3188 3326
3189 if (idx) 3327 if (idx)
3190 idx[i] = index >= NEXACT_MAC ? 0xffff : index; 3328 idx[i] = index >= max_naddr ? 0xffff : index;
3191 if (index < NEXACT_MAC) 3329 if (index < max_naddr)
3192 ret++; 3330 ret++;
3193 else if (hash) 3331 else if (hash)
3194 *hash |= (1ULL << hash_mac_addr(addr[i])); 3332 *hash |= (1ULL << hash_mac_addr(addr[i]));
@@ -3221,6 +3359,9 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3221 int ret, mode; 3359 int ret, mode;
3222 struct fw_vi_mac_cmd c; 3360 struct fw_vi_mac_cmd c;
3223 struct fw_vi_mac_exact *p = c.u.exact; 3361 struct fw_vi_mac_exact *p = c.u.exact;
3362 unsigned int max_mac_addr = is_t4(adap->chip) ?
3363 NUM_MPS_CLS_SRAM_L_INSTANCES :
3364 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3224 3365
3225 if (idx < 0) /* new allocation */ 3366 if (idx < 0) /* new allocation */
3226 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 3367 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
@@ -3238,7 +3379,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3238 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3379 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3239 if (ret == 0) { 3380 if (ret == 0) {
3240 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); 3381 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3241 if (ret >= NEXACT_MAC) 3382 if (ret >= max_mac_addr)
3242 ret = -ENOMEM; 3383 ret = -ENOMEM;
3243 } 3384 }
3244 return ret; 3385 return ret;
@@ -3547,7 +3688,8 @@ static int get_flash_params(struct adapter *adap)
3547 */ 3688 */
3548int t4_prep_adapter(struct adapter *adapter) 3689int t4_prep_adapter(struct adapter *adapter)
3549{ 3690{
3550 int ret; 3691 int ret, ver;
3692 uint16_t device_id;
3551 3693
3552 ret = t4_wait_dev_ready(adapter); 3694 ret = t4_wait_dev_ready(adapter);
3553 if (ret < 0) 3695 if (ret < 0)
@@ -3562,6 +3704,28 @@ int t4_prep_adapter(struct adapter *adapter)
3562 return ret; 3704 return ret;
3563 } 3705 }
3564 3706
3707 /* Retrieve adapter's device ID
3708 */
3709 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
3710 ver = device_id >> 12;
3711 switch (ver) {
3712 case CHELSIO_T4:
3713 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4,
3714 adapter->params.rev);
3715 break;
3716 case CHELSIO_T5:
3717 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5,
3718 adapter->params.rev);
3719 break;
3720 default:
3721 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3722 device_id);
3723 return -EINVAL;
3724 }
3725
3726 /* Reassign the updated revision field */
3727 adapter->params.rev = adapter->chip;
3728
3565 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 3729 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3566 3730
3567 /* 3731 /*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index f534ed7e10e9..1d1623be9f1e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -47,7 +47,6 @@ enum {
47 TCB_SIZE = 128, /* TCB size */ 47 TCB_SIZE = 128, /* TCB size */
48 NMTUS = 16, /* size of MTU table */ 48 NMTUS = 16, /* size of MTU table */
49 NCCTRL_WIN = 32, /* # of congestion control windows */ 49 NCCTRL_WIN = 32, /* # of congestion control windows */
50 NEXACT_MAC = 336, /* # of exact MAC address filters */
51 L2T_SIZE = 4096, /* # of L2T entries */ 50 L2T_SIZE = 4096, /* # of L2T entries */
52 MBOX_LEN = 64, /* mailbox size in bytes */ 51 MBOX_LEN = 64, /* mailbox size in bytes */
53 TRACE_LEN = 112, /* length of trace data and mask */ 52 TRACE_LEN = 112, /* length of trace data and mask */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 261d17703adc..01d484441200 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -74,6 +74,7 @@ enum {
74 CPL_PASS_ESTABLISH = 0x41, 74 CPL_PASS_ESTABLISH = 0x41,
75 CPL_RX_DATA_DDP = 0x42, 75 CPL_RX_DATA_DDP = 0x42,
76 CPL_PASS_ACCEPT_REQ = 0x44, 76 CPL_PASS_ACCEPT_REQ = 0x44,
77 CPL_TRACE_PKT_T5 = 0x48,
77 78
78 CPL_RDMA_READ_REQ = 0x60, 79 CPL_RDMA_READ_REQ = 0x60,
79 80
@@ -157,6 +158,7 @@ union opcode_tid {
157}; 158};
158 159
159#define CPL_OPCODE(x) ((x) << 24) 160#define CPL_OPCODE(x) ((x) << 24)
161#define G_CPL_OPCODE(x) (((x) >> 24) & 0xFF)
160#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid)) 162#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid))
161#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) 163#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
162#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF) 164#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF)
@@ -287,6 +289,23 @@ struct cpl_act_open_req {
287 __be32 opt2; 289 __be32 opt2;
288}; 290};
289 291
292#define S_FILTER_TUPLE 24
293#define M_FILTER_TUPLE 0xFFFFFFFFFF
294#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
295#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE)
296struct cpl_t5_act_open_req {
297 WR_HDR;
298 union opcode_tid ot;
299 __be16 local_port;
300 __be16 peer_port;
301 __be32 local_ip;
302 __be32 peer_ip;
303 __be64 opt0;
304 __be32 rsvd;
305 __be32 opt2;
306 __be64 params;
307};
308
290struct cpl_act_open_req6 { 309struct cpl_act_open_req6 {
291 WR_HDR; 310 WR_HDR;
292 union opcode_tid ot; 311 union opcode_tid ot;
@@ -566,6 +585,11 @@ struct cpl_rx_pkt {
566#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN) 585#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
567#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN) 586#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
568 587
588#define S_RX_T5_ETHHDR_LEN 0
589#define M_RX_T5_ETHHDR_LEN 0x3F
590#define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN)
591#define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN)
592
569#define S_RX_MACIDX 8 593#define S_RX_MACIDX 8
570#define M_RX_MACIDX 0x1FF 594#define M_RX_MACIDX 0x1FF
571#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX) 595#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
@@ -612,6 +636,28 @@ struct cpl_trace_pkt {
612 __be64 tstamp; 636 __be64 tstamp;
613}; 637};
614 638
639struct cpl_t5_trace_pkt {
640 __u8 opcode;
641 __u8 intf;
642#if defined(__LITTLE_ENDIAN_BITFIELD)
643 __u8 runt:4;
644 __u8 filter_hit:4;
645 __u8:6;
646 __u8 err:1;
647 __u8 trunc:1;
648#else
649 __u8 filter_hit:4;
650 __u8 runt:4;
651 __u8 trunc:1;
652 __u8 err:1;
653 __u8:6;
654#endif
655 __be16 rsvd;
656 __be16 len;
657 __be64 tstamp;
658 __be64 rsvd1;
659};
660
615struct cpl_l2t_write_req { 661struct cpl_l2t_write_req {
616 WR_HDR; 662 WR_HDR;
617 union opcode_tid ot; 663 union opcode_tid ot;
@@ -643,6 +689,15 @@ struct cpl_sge_egr_update {
643 __be16 pidx; 689 __be16 pidx;
644}; 690};
645 691
692/* cpl_fw*.type values */
693enum {
694 FW_TYPE_CMD_RPL = 0,
695 FW_TYPE_WR_RPL = 1,
696 FW_TYPE_CQE = 2,
697 FW_TYPE_OFLD_CONNECTION_WR_RPL = 3,
698 FW_TYPE_RSSCPL = 4,
699};
700
646struct cpl_fw4_pld { 701struct cpl_fw4_pld {
647 u8 opcode; 702 u8 opcode;
648 u8 rsvd0[3]; 703 u8 rsvd0[3];
@@ -692,6 +747,7 @@ enum {
692 FW6_TYPE_WR_RPL = 1, 747 FW6_TYPE_WR_RPL = 1,
693 FW6_TYPE_CQE = 2, 748 FW6_TYPE_CQE = 2,
694 FW6_TYPE_OFLD_CONNECTION_WR_RPL = 3, 749 FW6_TYPE_OFLD_CONNECTION_WR_RPL = 3,
750 FW6_TYPE_RSSCPL = FW_TYPE_RSSCPL,
695}; 751};
696 752
697struct cpl_fw6_msg_ofld_connection_wr_rpl { 753struct cpl_fw6_msg_ofld_connection_wr_rpl {
@@ -742,4 +798,12 @@ struct ulp_mem_io {
742#define ULP_MEMIO_LOCK(x) ((x) << 31) 798#define ULP_MEMIO_LOCK(x) ((x) << 31)
743}; 799};
744 800
801#define S_T5_ULP_MEMIO_IMM 23
802#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM)
803#define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U)
804
805#define S_T5_ULP_MEMIO_ORDER 22
806#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
807#define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U)
808
745#endif /* __T4_MSG_H */ 809#endif /* __T4_MSG_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 83ec5f7844ac..ef146c0ba481 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -68,9 +68,14 @@
68#define QID_SHIFT 15 68#define QID_SHIFT 15
69#define QID(x) ((x) << QID_SHIFT) 69#define QID(x) ((x) << QID_SHIFT)
70#define DBPRIO(x) ((x) << 14) 70#define DBPRIO(x) ((x) << 14)
71#define DBTYPE(x) ((x) << 13)
71#define PIDX_MASK 0x00003fffU 72#define PIDX_MASK 0x00003fffU
72#define PIDX_SHIFT 0 73#define PIDX_SHIFT 0
73#define PIDX(x) ((x) << PIDX_SHIFT) 74#define PIDX(x) ((x) << PIDX_SHIFT)
75#define S_PIDX_T5 0
76#define M_PIDX_T5 0x1fffU
77#define PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5)
78
74 79
75#define SGE_PF_GTS 0x4 80#define SGE_PF_GTS 0x4
76#define INGRESSQID_MASK 0xffff0000U 81#define INGRESSQID_MASK 0xffff0000U
@@ -152,6 +157,8 @@
152#define QUEUESPERPAGEPF0_MASK 0x0000000fU 157#define QUEUESPERPAGEPF0_MASK 0x0000000fU
153#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) 158#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
154 159
160#define QUEUESPERPAGEPF1 4
161
155#define SGE_INT_CAUSE1 0x1024 162#define SGE_INT_CAUSE1 0x1024
156#define SGE_INT_CAUSE2 0x1030 163#define SGE_INT_CAUSE2 0x1030
157#define SGE_INT_CAUSE3 0x103c 164#define SGE_INT_CAUSE3 0x103c
@@ -234,6 +241,10 @@
234#define SGE_DOORBELL_CONTROL 0x10a8 241#define SGE_DOORBELL_CONTROL 0x10a8
235#define ENABLE_DROP (1 << 13) 242#define ENABLE_DROP (1 << 13)
236 243
244#define S_NOCOALESCE 26
245#define V_NOCOALESCE(x) ((x) << S_NOCOALESCE)
246#define F_NOCOALESCE V_NOCOALESCE(1U)
247
237#define SGE_TIMER_VALUE_0_AND_1 0x10b8 248#define SGE_TIMER_VALUE_0_AND_1 0x10b8
238#define TIMERVALUE0_MASK 0xffff0000U 249#define TIMERVALUE0_MASK 0xffff0000U
239#define TIMERVALUE0_SHIFT 16 250#define TIMERVALUE0_SHIFT 16
@@ -272,17 +283,36 @@
272#define S_HP_INT_THRESH 28 283#define S_HP_INT_THRESH 28
273#define M_HP_INT_THRESH 0xfU 284#define M_HP_INT_THRESH 0xfU
274#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH) 285#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
286#define S_LP_INT_THRESH_T5 18
287#define V_LP_INT_THRESH_T5(x) ((x) << S_LP_INT_THRESH_T5)
288#define M_LP_COUNT_T5 0x3ffffU
289#define G_LP_COUNT_T5(x) (((x) >> S_LP_COUNT) & M_LP_COUNT_T5)
275#define M_HP_COUNT 0x7ffU 290#define M_HP_COUNT 0x7ffU
276#define S_HP_COUNT 16 291#define S_HP_COUNT 16
277#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT) 292#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
278#define S_LP_INT_THRESH 12 293#define S_LP_INT_THRESH 12
279#define M_LP_INT_THRESH 0xfU 294#define M_LP_INT_THRESH 0xfU
295#define M_LP_INT_THRESH_T5 0xfffU
280#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH) 296#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
281#define M_LP_COUNT 0x7ffU 297#define M_LP_COUNT 0x7ffU
282#define S_LP_COUNT 0 298#define S_LP_COUNT 0
283#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT) 299#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
284#define A_SGE_DBFIFO_STATUS 0x10a4 300#define A_SGE_DBFIFO_STATUS 0x10a4
285 301
302#define SGE_STAT_TOTAL 0x10e4
303#define SGE_STAT_MATCH 0x10e8
304
305#define SGE_STAT_CFG 0x10ec
306#define S_STATSOURCE_T5 9
307#define STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5)
308
309#define SGE_DBFIFO_STATUS2 0x1118
310#define M_HP_COUNT_T5 0x3ffU
311#define G_HP_COUNT_T5(x) ((x) & M_HP_COUNT_T5)
312#define S_HP_INT_THRESH_T5 10
313#define M_HP_INT_THRESH_T5 0xfU
314#define V_HP_INT_THRESH_T5(x) ((x) << S_HP_INT_THRESH_T5)
315
286#define S_ENABLE_DROP 13 316#define S_ENABLE_DROP 13
287#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP) 317#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
288#define F_ENABLE_DROP V_ENABLE_DROP(1U) 318#define F_ENABLE_DROP V_ENABLE_DROP(1U)
@@ -331,8 +361,27 @@
331#define MSIADDRHPERR 0x00000002U 361#define MSIADDRHPERR 0x00000002U
332#define MSIADDRLPERR 0x00000001U 362#define MSIADDRLPERR 0x00000001U
333 363
364#define READRSPERR 0x20000000U
365#define TRGT1GRPPERR 0x10000000U
366#define IPSOTPERR 0x08000000U
367#define IPRXDATAGRPPERR 0x02000000U
368#define IPRXHDRGRPPERR 0x01000000U
369#define MAGRPPERR 0x00400000U
370#define VFIDPERR 0x00200000U
371#define HREQWRPERR 0x00010000U
372#define DREQWRPERR 0x00002000U
373#define MSTTAGQPERR 0x00000400U
374#define PIOREQGRPPERR 0x00000100U
375#define PIOCPLGRPPERR 0x00000080U
376#define MSIXSTIPERR 0x00000004U
377#define MSTTIMEOUTPERR 0x00000002U
378#define MSTGRPPERR 0x00000001U
379
334#define PCIE_NONFAT_ERR 0x3010 380#define PCIE_NONFAT_ERR 0x3010
335#define PCIE_MEM_ACCESS_BASE_WIN 0x3068 381#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
382#define S_PCIEOFST 10
383#define M_PCIEOFST 0x3fffffU
384#define GET_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST)
336#define PCIEOFST_MASK 0xfffffc00U 385#define PCIEOFST_MASK 0xfffffc00U
337#define BIR_MASK 0x00000300U 386#define BIR_MASK 0x00000300U
338#define BIR_SHIFT 8 387#define BIR_SHIFT 8
@@ -342,6 +391,9 @@
342#define WINDOW(x) ((x) << WINDOW_SHIFT) 391#define WINDOW(x) ((x) << WINDOW_SHIFT)
343#define PCIE_MEM_ACCESS_OFFSET 0x306c 392#define PCIE_MEM_ACCESS_OFFSET 0x306c
344 393
394#define S_PFNUM 0
395#define V_PFNUM(x) ((x) << S_PFNUM)
396
345#define PCIE_FW 0x30b8 397#define PCIE_FW 0x30b8
346#define PCIE_FW_ERR 0x80000000U 398#define PCIE_FW_ERR 0x80000000U
347#define PCIE_FW_INIT 0x40000000U 399#define PCIE_FW_INIT 0x40000000U
@@ -407,12 +459,18 @@
407 459
408#define MC_BIST_STATUS_RDATA 0x7688 460#define MC_BIST_STATUS_RDATA 0x7688
409 461
462#define MA_EDRAM0_BAR 0x77c0
463#define MA_EDRAM1_BAR 0x77c4
464#define EDRAM_SIZE_MASK 0xfffU
465#define EDRAM_SIZE_GET(x) ((x) & EDRAM_SIZE_MASK)
466
410#define MA_EXT_MEMORY_BAR 0x77c8 467#define MA_EXT_MEMORY_BAR 0x77c8
411#define EXT_MEM_SIZE_MASK 0x00000fffU 468#define EXT_MEM_SIZE_MASK 0x00000fffU
412#define EXT_MEM_SIZE_SHIFT 0 469#define EXT_MEM_SIZE_SHIFT 0
413#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT) 470#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT)
414 471
415#define MA_TARGET_MEM_ENABLE 0x77d8 472#define MA_TARGET_MEM_ENABLE 0x77d8
473#define EXT_MEM1_ENABLE 0x00000010U
416#define EXT_MEM_ENABLE 0x00000004U 474#define EXT_MEM_ENABLE 0x00000004U
417#define EDRAM1_ENABLE 0x00000002U 475#define EDRAM1_ENABLE 0x00000002U
418#define EDRAM0_ENABLE 0x00000001U 476#define EDRAM0_ENABLE 0x00000001U
@@ -431,6 +489,7 @@
431#define MA_PCIE_FW 0x30b8 489#define MA_PCIE_FW 0x30b8
432#define MA_PARITY_ERROR_STATUS 0x77f4 490#define MA_PARITY_ERROR_STATUS 0x77f4
433 491
492#define MA_EXT_MEMORY1_BAR 0x7808
434#define EDC_0_BASE_ADDR 0x7900 493#define EDC_0_BASE_ADDR 0x7900
435 494
436#define EDC_BIST_CMD 0x7904 495#define EDC_BIST_CMD 0x7904
@@ -801,6 +860,15 @@
801#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c 860#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
802#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 861#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
803#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 862#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
863#define MAC_PORT_CFG2 0x818
864#define MAC_PORT_MAGIC_MACID_LO 0x824
865#define MAC_PORT_MAGIC_MACID_HI 0x828
866#define MAC_PORT_EPIO_DATA0 0x8c0
867#define MAC_PORT_EPIO_DATA1 0x8c4
868#define MAC_PORT_EPIO_DATA2 0x8c8
869#define MAC_PORT_EPIO_DATA3 0x8cc
870#define MAC_PORT_EPIO_OP 0x8d0
871
804#define MPS_CMN_CTL 0x9000 872#define MPS_CMN_CTL 0x9000
805#define NUMPORTS_MASK 0x00000003U 873#define NUMPORTS_MASK 0x00000003U
806#define NUMPORTS_SHIFT 0 874#define NUMPORTS_SHIFT 0
@@ -1063,6 +1131,7 @@
1063#define ADDRESS_SHIFT 0 1131#define ADDRESS_SHIFT 0
1064#define ADDRESS(x) ((x) << ADDRESS_SHIFT) 1132#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
1065 1133
1134#define MAC_PORT_INT_CAUSE 0x8dc
1066#define XGMAC_PORT_INT_CAUSE 0x10dc 1135#define XGMAC_PORT_INT_CAUSE 0x10dc
1067 1136
1068#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28 1137#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28
@@ -1101,4 +1170,33 @@
1101#define V_PORT(x) ((x) << S_PORT) 1170#define V_PORT(x) ((x) << S_PORT)
1102#define F_PORT V_PORT(1U) 1171#define F_PORT V_PORT(1U)
1103 1172
1173#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
1174#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
1175
1176#define T5_PORT0_BASE 0x30000
1177#define T5_PORT_STRIDE 0x4000
1178#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE)
1179#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg))
1180
1181#define MC_0_BASE_ADDR 0x40000
1182#define MC_1_BASE_ADDR 0x48000
1183#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
1184#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
1185
1186#define MC_P_BIST_CMD 0x41400
1187#define MC_P_BIST_CMD_ADDR 0x41404
1188#define MC_P_BIST_CMD_LEN 0x41408
1189#define MC_P_BIST_DATA_PATTERN 0x4140c
1190#define MC_P_BIST_STATUS_RDATA 0x41488
1191#define EDC_T50_BASE_ADDR 0x50000
1192#define EDC_H_BIST_CMD 0x50004
1193#define EDC_H_BIST_CMD_ADDR 0x50008
1194#define EDC_H_BIST_CMD_LEN 0x5000c
1195#define EDC_H_BIST_DATA_PATTERN 0x50010
1196#define EDC_H_BIST_STATUS_RDATA 0x50028
1197
1198#define EDC_T51_BASE_ADDR 0x50800
1199#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
1200#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
1201
1104#endif /* __T4_REGS_H */ 1202#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index a0dcccd846c9..d1c755f78aaf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -574,7 +574,7 @@ struct fw_eth_tx_pkt_vm_wr {
574 __be16 vlantci; 574 __be16 vlantci;
575}; 575};
576 576
577#define FW_CMD_MAX_TIMEOUT 3000 577#define FW_CMD_MAX_TIMEOUT 10000
578 578
579/* 579/*
580 * If a host driver does a HELLO and discovers that there's already a MASTER 580 * If a host driver does a HELLO and discovers that there's already a MASTER
@@ -973,7 +973,9 @@ enum fw_params_param_pfvf {
973 FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B, 973 FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B,
974 FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C, 974 FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C,
975 FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D, 975 FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D,
976 FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E 976 FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E,
977 FW_PARAMS_PARAM_PFVF_ETHOFLD_END = 0x30,
978 FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31
977}; 979};
978 980
979/* 981/*
@@ -1758,6 +1760,25 @@ enum fw_port_module_type {
1758 FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK 1760 FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
1759}; 1761};
1760 1762
1763enum fw_port_mod_sub_type {
1764 FW_PORT_MOD_SUB_TYPE_NA,
1765 FW_PORT_MOD_SUB_TYPE_MV88E114X = 0x1,
1766 FW_PORT_MOD_SUB_TYPE_TN8022 = 0x2,
1767 FW_PORT_MOD_SUB_TYPE_AQ1202 = 0x3,
1768 FW_PORT_MOD_SUB_TYPE_88x3120 = 0x4,
1769 FW_PORT_MOD_SUB_TYPE_BCM84834 = 0x5,
1770 FW_PORT_MOD_SUB_TYPE_BT_VSC8634 = 0x8,
1771
1772 /* The following will never been in the VPD. They are TWINAX cable
1773 * lengths decoded from SFP+ module i2c PROMs. These should
1774 * almost certainly go somewhere else ...
1775 */
1776 FW_PORT_MOD_SUB_TYPE_TWINAX_1 = 0x9,
1777 FW_PORT_MOD_SUB_TYPE_TWINAX_3 = 0xA,
1778 FW_PORT_MOD_SUB_TYPE_TWINAX_5 = 0xB,
1779 FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC,
1780};
1781
1761/* port stats */ 1782/* port stats */
1762#define FW_NUM_PORT_STATS 50 1783#define FW_NUM_PORT_STATS 50
1763#define FW_NUM_PORT_TX_STATS 23 1784#define FW_NUM_PORT_TX_STATS 23
@@ -2123,11 +2144,11 @@ struct fw_hdr {
2123 u8 intfver_ri; 2144 u8 intfver_ri;
2124 u8 intfver_iscsipdu; 2145 u8 intfver_iscsipdu;
2125 u8 intfver_iscsi; 2146 u8 intfver_iscsi;
2147 u8 intfver_fcoepdu;
2126 u8 intfver_fcoe; 2148 u8 intfver_fcoe;
2127 u8 reserved2; 2149 __u32 reserved2;
2128 __u32 reserved3; 2150 __u32 reserved3;
2129 __u32 reserved4; 2151 __u32 reserved4;
2130 __u32 reserved5;
2131 __be32 flags; 2152 __be32 flags;
2132 __be32 reserved6[23]; 2153 __be32 reserved6[23];
2133}; 2154};
@@ -2137,6 +2158,17 @@ struct fw_hdr {
2137#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) 2158#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
2138#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) 2159#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
2139 2160
2161enum fw_hdr_intfver {
2162 FW_HDR_INTFVER_NIC = 0x00,
2163 FW_HDR_INTFVER_VNIC = 0x00,
2164 FW_HDR_INTFVER_OFLD = 0x00,
2165 FW_HDR_INTFVER_RI = 0x00,
2166 FW_HDR_INTFVER_ISCSIPDU = 0x00,
2167 FW_HDR_INTFVER_ISCSI = 0x00,
2168 FW_HDR_INTFVER_FCOEPDU = 0x00,
2169 FW_HDR_INTFVER_FCOE = 0x00,
2170};
2171
2140enum fw_hdr_flags { 2172enum fw_hdr_flags {
2141 FW_HDR_FLAGS_RESET_HALT = 0x00000001, 2173 FW_HDR_FLAGS_RESET_HALT = 0x00000001,
2142}; 2174};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 68eaa9c88c7d..be5c7ef6ca93 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -344,6 +344,7 @@ struct adapter {
344 unsigned long registered_device_map; 344 unsigned long registered_device_map;
345 unsigned long open_device_map; 345 unsigned long open_device_map;
346 unsigned long flags; 346 unsigned long flags;
347 enum chip_type chip;
347 struct adapter_params params; 348 struct adapter_params params;
348 349
349 /* queue and interrupt resources */ 350 /* queue and interrupt resources */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 56b46ab2d4c5..40c22e7de15c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -54,8 +54,8 @@
54/* 54/*
55 * Generic information about the driver. 55 * Generic information about the driver.
56 */ 56 */
57#define DRV_VERSION "1.0.0" 57#define DRV_VERSION "2.0.0-ko"
58#define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver" 58#define DRV_DESC "Chelsio T4/T5 Virtual Function (VF) Network Driver"
59 59
60/* 60/*
61 * Module Parameters. 61 * Module Parameters.
@@ -409,6 +409,20 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
409 break; 409 break;
410 } 410 }
411 411
412 case CPL_FW4_MSG: {
413 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
414 */
415 const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
416 opcode = G_CPL_OPCODE(ntohl(p->opcode_qid));
417 if (opcode != CPL_SGE_EGR_UPDATE) {
418 dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
419 , opcode);
420 break;
421 }
422 cpl = (void *)p;
423 /*FALLTHROUGH*/
424 }
425
412 case CPL_SGE_EGR_UPDATE: { 426 case CPL_SGE_EGR_UPDATE: {
413 /* 427 /*
414 * We've received an Egress Queue Status Update message. We 428 * We've received an Egress Queue Status Update message. We
@@ -1050,7 +1064,7 @@ static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1050 /* 1064 /*
1051 * Chip version 4, revision 0x3f (cxgb4vf). 1065 * Chip version 4, revision 0x3f (cxgb4vf).
1052 */ 1066 */
1053 return 4 | (0x3f << 10); 1067 return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10);
1054} 1068}
1055 1069
1056/* 1070/*
@@ -1100,10 +1114,10 @@ static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
1100 * Since there is no support for separate rx/tx vlan accel 1114 * Since there is no support for separate rx/tx vlan accel
1101 * enable/disable make sure tx flag is always in same state as rx. 1115 * enable/disable make sure tx flag is always in same state as rx.
1102 */ 1116 */
1103 if (features & NETIF_F_HW_VLAN_RX) 1117 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1104 features |= NETIF_F_HW_VLAN_TX; 1118 features |= NETIF_F_HW_VLAN_CTAG_TX;
1105 else 1119 else
1106 features &= ~NETIF_F_HW_VLAN_TX; 1120 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
1107 1121
1108 return features; 1122 return features;
1109} 1123}
@@ -1114,9 +1128,9 @@ static int cxgb4vf_set_features(struct net_device *dev,
1114 struct port_info *pi = netdev_priv(dev); 1128 struct port_info *pi = netdev_priv(dev);
1115 netdev_features_t changed = dev->features ^ features; 1129 netdev_features_t changed = dev->features ^ features;
1116 1130
1117 if (changed & NETIF_F_HW_VLAN_RX) 1131 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1118 t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, 1132 t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
1119 features & NETIF_F_HW_VLAN_TX, 0); 1133 features & NETIF_F_HW_VLAN_CTAG_TX, 0);
1120 1134
1121 return 0; 1135 return 0;
1122} 1136}
@@ -2072,6 +2086,7 @@ static int adap_init0(struct adapter *adapter)
2072 struct sge *s = &adapter->sge; 2086 struct sge *s = &adapter->sge;
2073 unsigned int ethqsets; 2087 unsigned int ethqsets;
2074 int err; 2088 int err;
2089 u32 param, val = 0;
2075 2090
2076 /* 2091 /*
2077 * Wait for the device to become ready before proceeding ... 2092 * Wait for the device to become ready before proceeding ...
@@ -2099,6 +2114,15 @@ static int adap_init0(struct adapter *adapter)
2099 return err; 2114 return err;
2100 } 2115 }
2101 2116
2117 switch (adapter->pdev->device >> 12) {
2118 case CHELSIO_T4:
2119 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
2120 break;
2121 case CHELSIO_T5:
2122 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0);
2123 break;
2124 }
2125
2102 /* 2126 /*
2103 * Grab basic operational parameters. These will predominantly have 2127 * Grab basic operational parameters. These will predominantly have
2104 * been set up by the Physical Function Driver or will be hard coded 2128 * been set up by the Physical Function Driver or will be hard coded
@@ -2144,6 +2168,16 @@ static int adap_init0(struct adapter *adapter)
2144 return err; 2168 return err;
2145 } 2169 }
2146 2170
2171 /* If we're running on newer firmware, let it know that we're
2172 * prepared to deal with encapsulated CPL messages. Older
2173 * firmware won't understand this and we'll just get
2174 * unencapsulated messages ...
2175 */
2176 param = FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
2177 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
2178 val = 1;
2179 (void) t4vf_set_params(adapter, 1, &param, &val);
2180
2147 /* 2181 /*
2148 * Retrieve our RX interrupt holdoff timer values and counter 2182 * Retrieve our RX interrupt holdoff timer values and counter
2149 * threshold values from the SGE parameters. 2183 * threshold values from the SGE parameters.
@@ -2614,11 +2648,12 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2614 2648
2615 netdev->hw_features = NETIF_F_SG | TSO_FLAGS | 2649 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
2616 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2650 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2617 NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM; 2651 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
2618 netdev->vlan_features = NETIF_F_SG | TSO_FLAGS | 2652 netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
2619 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2653 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2620 NETIF_F_HIGHDMA; 2654 NETIF_F_HIGHDMA;
2621 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_TX; 2655 netdev->features = netdev->hw_features |
2656 NETIF_F_HW_VLAN_CTAG_TX;
2622 if (pci_using_dac) 2657 if (pci_using_dac)
2623 netdev->features |= NETIF_F_HIGHDMA; 2658 netdev->features |= NETIF_F_HIGHDMA;
2624 2659
@@ -2888,6 +2923,26 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
2888 CH_DEVICE(0x480a, 0), /* T404-bt */ 2923 CH_DEVICE(0x480a, 0), /* T404-bt */
2889 CH_DEVICE(0x480d, 0), /* T480-cr */ 2924 CH_DEVICE(0x480d, 0), /* T480-cr */
2890 CH_DEVICE(0x480e, 0), /* T440-lp-cr */ 2925 CH_DEVICE(0x480e, 0), /* T440-lp-cr */
2926 CH_DEVICE(0x5800, 0), /* T580-dbg */
2927 CH_DEVICE(0x5801, 0), /* T520-cr */
2928 CH_DEVICE(0x5802, 0), /* T522-cr */
2929 CH_DEVICE(0x5803, 0), /* T540-cr */
2930 CH_DEVICE(0x5804, 0), /* T520-bch */
2931 CH_DEVICE(0x5805, 0), /* T540-bch */
2932 CH_DEVICE(0x5806, 0), /* T540-ch */
2933 CH_DEVICE(0x5807, 0), /* T520-so */
2934 CH_DEVICE(0x5808, 0), /* T520-cx */
2935 CH_DEVICE(0x5809, 0), /* T520-bt */
2936 CH_DEVICE(0x580a, 0), /* T504-bt */
2937 CH_DEVICE(0x580b, 0), /* T520-sr */
2938 CH_DEVICE(0x580c, 0), /* T504-bt */
2939 CH_DEVICE(0x580d, 0), /* T580-cr */
2940 CH_DEVICE(0x580e, 0), /* T540-lp-cr */
2941 CH_DEVICE(0x580f, 0), /* Amsterdam */
2942 CH_DEVICE(0x5810, 0), /* T580-lp-cr */
2943 CH_DEVICE(0x5811, 0), /* T520-lp-cr */
2944 CH_DEVICE(0x5812, 0), /* T560-cr */
2945 CH_DEVICE(0x5813, 0), /* T580-cr */
2891 { 0, } 2946 { 0, }
2892}; 2947};
2893 2948
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 9488032d6d2d..df296af20bd5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -528,17 +528,21 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
528 */ 528 */
529static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) 529static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
530{ 530{
531 u32 val;
532
531 /* 533 /*
532 * The SGE keeps track of its Producer and Consumer Indices in terms 534 * The SGE keeps track of its Producer and Consumer Indices in terms
533 * of Egress Queue Units so we can only tell it about integral numbers 535 * of Egress Queue Units so we can only tell it about integral numbers
534 * of multiples of Free List Entries per Egress Queue Units ... 536 * of multiples of Free List Entries per Egress Queue Units ...
535 */ 537 */
536 if (fl->pend_cred >= FL_PER_EQ_UNIT) { 538 if (fl->pend_cred >= FL_PER_EQ_UNIT) {
539 val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
540 if (!is_t4(adapter->chip))
541 val |= DBTYPE(1);
537 wmb(); 542 wmb();
538 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, 543 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
539 DBPRIO(1) | 544 DBPRIO(1) |
540 QID(fl->cntxt_id) | 545 QID(fl->cntxt_id) | val);
541 PIDX(fl->pend_cred / FL_PER_EQ_UNIT));
542 fl->pend_cred %= FL_PER_EQ_UNIT; 546 fl->pend_cred %= FL_PER_EQ_UNIT;
543 } 547 }
544} 548}
@@ -1478,7 +1482,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1478 skb_record_rx_queue(skb, rxq->rspq.idx); 1482 skb_record_rx_queue(skb, rxq->rspq.idx);
1479 1483
1480 if (pkt->vlan_ex) { 1484 if (pkt->vlan_ex) {
1481 __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan)); 1485 __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1486 be16_to_cpu(pkt->vlan));
1482 rxq->stats.vlan_ex++; 1487 rxq->stats.vlan_ex++;
1483 } 1488 }
1484 ret = napi_gro_frags(&rxq->rspq.napi); 1489 ret = napi_gro_frags(&rxq->rspq.napi);
@@ -1547,7 +1552,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1547 1552
1548 if (pkt->vlan_ex) { 1553 if (pkt->vlan_ex) {
1549 rxq->stats.vlan_ex++; 1554 rxq->stats.vlan_ex++;
1550 __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan)); 1555 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
1551 } 1556 }
1552 1557
1553 netif_receive_skb(skb); 1558 netif_receive_skb(skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 283f9d0d37fd..53cbfed21d0b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -38,6 +38,25 @@
38 38
39#include "../cxgb4/t4fw_api.h" 39#include "../cxgb4/t4fw_api.h"
40 40
41#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
42#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
43#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
44
45#define CHELSIO_T4 0x4
46#define CHELSIO_T5 0x5
47
48enum chip_type {
49 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
50 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
51 T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
52 T4_FIRST_REV = T4_A1,
53 T4_LAST_REV = T4_A3,
54
55 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
56 T5_FIRST_REV = T5_A1,
57 T5_LAST_REV = T5_A1,
58};
59
41/* 60/*
42 * The "len16" field of a Firmware Command Structure ... 61 * The "len16" field of a Firmware Command Structure ...
43 */ 62 */
@@ -232,6 +251,11 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
232 return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false); 251 return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
233} 252}
234 253
254static inline int is_t4(enum chip_type chip)
255{
256 return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
257}
258
235int t4vf_wait_dev_ready(struct adapter *); 259int t4vf_wait_dev_ready(struct adapter *);
236int t4vf_port_init(struct adapter *, int); 260int t4vf_port_init(struct adapter *, int);
237 261
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 7127c7b9efde..9f96dc3bb112 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -1027,8 +1027,11 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
1027 unsigned nfilters = 0; 1027 unsigned nfilters = 0;
1028 unsigned int rem = naddr; 1028 unsigned int rem = naddr;
1029 struct fw_vi_mac_cmd cmd, rpl; 1029 struct fw_vi_mac_cmd cmd, rpl;
1030 unsigned int max_naddr = is_t4(adapter->chip) ?
1031 NUM_MPS_CLS_SRAM_L_INSTANCES :
1032 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1030 1033
1031 if (naddr > FW_CLS_TCAM_NUM_ENTRIES) 1034 if (naddr > max_naddr)
1032 return -EINVAL; 1035 return -EINVAL;
1033 1036
1034 for (offset = 0; offset < naddr; /**/) { 1037 for (offset = 0; offset < naddr; /**/) {
@@ -1069,10 +1072,10 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
1069 1072
1070 if (idx) 1073 if (idx)
1071 idx[offset+i] = 1074 idx[offset+i] =
1072 (index >= FW_CLS_TCAM_NUM_ENTRIES 1075 (index >= max_naddr
1073 ? 0xffff 1076 ? 0xffff
1074 : index); 1077 : index);
1075 if (index < FW_CLS_TCAM_NUM_ENTRIES) 1078 if (index < max_naddr)
1076 nfilters++; 1079 nfilters++;
1077 else if (hash) 1080 else if (hash)
1078 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 1081 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
@@ -1118,6 +1121,9 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
1118 struct fw_vi_mac_exact *p = &cmd.u.exact[0]; 1121 struct fw_vi_mac_exact *p = &cmd.u.exact[0];
1119 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1122 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1120 u.exact[1]), 16); 1123 u.exact[1]), 16);
1124 unsigned int max_naddr = is_t4(adapter->chip) ?
1125 NUM_MPS_CLS_SRAM_L_INSTANCES :
1126 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1121 1127
1122 /* 1128 /*
1123 * If this is a new allocation, determine whether it should be 1129 * If this is a new allocation, determine whether it should be
@@ -1140,7 +1146,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
1140 if (ret == 0) { 1146 if (ret == 0) {
1141 p = &rpl.u.exact[0]; 1147 p = &rpl.u.exact[0];
1142 ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); 1148 ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
1143 if (ret >= FW_CLS_TCAM_NUM_ENTRIES) 1149 if (ret >= max_naddr)
1144 ret = -ENOMEM; 1150 ret = -ENOMEM;
1145 } 1151 }
1146 return ret; 1152 return ret;
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 138446957786..19f642a45f40 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -101,23 +101,6 @@ static char version[] __initdata =
101 * them to system IRQ numbers. This mapping is card specific and is set to 101 * them to system IRQ numbers. This mapping is card specific and is set to
102 * the configuration of the Cirrus Eval board for this chip. 102 * the configuration of the Cirrus Eval board for this chip.
103 */ 103 */
104#if defined(CONFIG_MACH_IXDP2351)
105#define CS89x0_NONISA_IRQ
106static unsigned int netcard_portlist[] __used __initdata = {
107 IXDP2351_VIRT_CS8900_BASE, 0
108};
109static unsigned int cs8900_irq_map[] = {
110 IRQ_IXDP2351_CS8900, 0, 0, 0
111};
112#elif defined(CONFIG_ARCH_IXDP2X01)
113#define CS89x0_NONISA_IRQ
114static unsigned int netcard_portlist[] __used __initdata = {
115 IXDP2X01_CS8900_VIRT_BASE, 0
116};
117static unsigned int cs8900_irq_map[] = {
118 IRQ_IXDP2X01_CS8900, 0, 0, 0
119};
120#else
121#ifndef CONFIG_CS89x0_PLATFORM 104#ifndef CONFIG_CS89x0_PLATFORM
122static unsigned int netcard_portlist[] __used __initdata = { 105static unsigned int netcard_portlist[] __used __initdata = {
123 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 106 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240,
@@ -127,7 +110,6 @@ static unsigned int cs8900_irq_map[] = {
127 10, 11, 12, 5 110 10, 11, 12, 5
128}; 111};
129#endif 112#endif
130#endif
131 113
132#if DEBUGGING 114#if DEBUGGING
133static unsigned int net_debug = DEBUGGING; 115static unsigned int net_debug = DEBUGGING;
@@ -210,32 +192,6 @@ static int __init media_fn(char *str)
210__setup("cs89x0_media=", media_fn); 192__setup("cs89x0_media=", media_fn);
211#endif 193#endif
212 194
213#if defined(CONFIG_MACH_IXDP2351)
214static u16
215readword(unsigned long base_addr, int portno)
216{
217 return __raw_readw(base_addr + (portno << 1));
218}
219
220static void
221writeword(unsigned long base_addr, int portno, u16 value)
222{
223 __raw_writew(value, base_addr + (portno << 1));
224}
225#elif defined(CONFIG_ARCH_IXDP2X01)
226static u16
227readword(unsigned long base_addr, int portno)
228{
229 return __raw_readl(base_addr + (portno << 1));
230}
231
232static void
233writeword(unsigned long base_addr, int portno, u16 value)
234{
235 __raw_writel(value, base_addr + (portno << 1));
236}
237#endif
238
239static void readwords(struct net_local *lp, int portno, void *buf, int length) 195static void readwords(struct net_local *lp, int portno, void *buf, int length)
240{ 196{
241 u8 *buf8 = (u8 *)buf; 197 u8 *buf8 = (u8 *)buf;
@@ -478,9 +434,6 @@ dma_rx(struct net_device *dev)
478 /* Malloc up new buffer. */ 434 /* Malloc up new buffer. */
479 skb = netdev_alloc_skb(dev, length + 2); 435 skb = netdev_alloc_skb(dev, length + 2);
480 if (skb == NULL) { 436 if (skb == NULL) {
481 /* I don't think we want to do this to a stressed system */
482 cs89_dbg(0, err, "%s: Memory squeeze, dropping packet\n",
483 dev->name);
484 dev->stats.rx_dropped++; 437 dev->stats.rx_dropped++;
485 438
486 /* AKPM: advance bp to the next frame */ 439 /* AKPM: advance bp to the next frame */
@@ -731,9 +684,6 @@ net_rx(struct net_device *dev)
731 /* Malloc up new buffer. */ 684 /* Malloc up new buffer. */
732 skb = netdev_alloc_skb(dev, length + 2); 685 skb = netdev_alloc_skb(dev, length + 2);
733 if (skb == NULL) { 686 if (skb == NULL) {
734#if 0 /* Again, this seems a cruel thing to do */
735 pr_warn("%s: Memory squeeze, dropping packet\n", dev->name);
736#endif
737 dev->stats.rx_dropped++; 687 dev->stats.rx_dropped++;
738 return; 688 return;
739 } 689 }
@@ -908,7 +858,7 @@ net_open(struct net_device *dev)
908 goto bad_out; 858 goto bad_out;
909 } 859 }
910 } else { 860 } else {
911#if !defined(CS89x0_NONISA_IRQ) && !defined(CONFIG_CS89x0_PLATFORM) 861#if !defined(CONFIG_CS89x0_PLATFORM)
912 if (((1 << dev->irq) & lp->irq_map) == 0) { 862 if (((1 << dev->irq) & lp->irq_map) == 0) {
913 pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n", 863 pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
914 dev->name, dev->irq, lp->irq_map); 864 dev->name, dev->irq, lp->irq_map);
@@ -1321,9 +1271,7 @@ static const struct net_device_ops net_ops = {
1321static void __init reset_chip(struct net_device *dev) 1271static void __init reset_chip(struct net_device *dev)
1322{ 1272{
1323#if !defined(CONFIG_MACH_MX31ADS) 1273#if !defined(CONFIG_MACH_MX31ADS)
1324#if !defined(CS89x0_NONISA_IRQ)
1325 struct net_local *lp = netdev_priv(dev); 1274 struct net_local *lp = netdev_priv(dev);
1326#endif /* CS89x0_NONISA_IRQ */
1327 int reset_start_time; 1275 int reset_start_time;
1328 1276
1329 writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET); 1277 writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
@@ -1331,7 +1279,6 @@ static void __init reset_chip(struct net_device *dev)
1331 /* wait 30 ms */ 1279 /* wait 30 ms */
1332 msleep(30); 1280 msleep(30);
1333 1281
1334#if !defined(CS89x0_NONISA_IRQ)
1335 if (lp->chip_type != CS8900) { 1282 if (lp->chip_type != CS8900) {
1336 /* Hardware problem requires PNP registers to be reconfigured after a reset */ 1283 /* Hardware problem requires PNP registers to be reconfigured after a reset */
1337 iowrite16(PP_CS8920_ISAINT, lp->virt_addr + ADD_PORT); 1284 iowrite16(PP_CS8920_ISAINT, lp->virt_addr + ADD_PORT);
@@ -1344,7 +1291,6 @@ static void __init reset_chip(struct net_device *dev)
1344 iowrite8((dev->mem_start >> 8) & 0xff, 1291 iowrite8((dev->mem_start >> 8) & 0xff,
1345 lp->virt_addr + DATA_PORT + 1); 1292 lp->virt_addr + DATA_PORT + 1);
1346 } 1293 }
1347#endif /* CS89x0_NONISA_IRQ */
1348 1294
1349 /* Wait until the chip is reset */ 1295 /* Wait until the chip is reset */
1350 reset_start_time = jiffies; 1296 reset_start_time = jiffies;
@@ -1579,9 +1525,6 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
1579 i = lp->isa_config & INT_NO_MASK; 1525 i = lp->isa_config & INT_NO_MASK;
1580#ifndef CONFIG_CS89x0_PLATFORM 1526#ifndef CONFIG_CS89x0_PLATFORM
1581 if (lp->chip_type == CS8900) { 1527 if (lp->chip_type == CS8900) {
1582#ifdef CS89x0_NONISA_IRQ
1583 i = cs8900_irq_map[0];
1584#else
1585 /* Translate the IRQ using the IRQ mapping table. */ 1528 /* Translate the IRQ using the IRQ mapping table. */
1586 if (i >= ARRAY_SIZE(cs8900_irq_map)) 1529 if (i >= ARRAY_SIZE(cs8900_irq_map))
1587 pr_err("invalid ISA interrupt number %d\n", i); 1530 pr_err("invalid ISA interrupt number %d\n", i);
@@ -1599,7 +1542,6 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
1599 lp->irq_map = ((irq_map_buff[0] >> 8) | 1542 lp->irq_map = ((irq_map_buff[0] >> 8) |
1600 (irq_map_buff[1] << 8)); 1543 (irq_map_buff[1] << 8));
1601 } 1544 }
1602#endif
1603 } 1545 }
1604#endif 1546#endif
1605 if (!dev->irq) 1547 if (!dev->irq)
@@ -1978,18 +1920,6 @@ static struct platform_driver cs89x0_driver = {
1978 .remove = cs89x0_platform_remove, 1920 .remove = cs89x0_platform_remove,
1979}; 1921};
1980 1922
1981static int __init cs89x0_init(void) 1923module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
1982{
1983 return platform_driver_probe(&cs89x0_driver, cs89x0_platform_probe);
1984}
1985
1986module_init(cs89x0_init);
1987
1988static void __exit cs89x0_cleanup(void)
1989{
1990 platform_driver_unregister(&cs89x0_driver);
1991}
1992
1993module_exit(cs89x0_cleanup);
1994 1924
1995#endif /* CONFIG_CS89x0_PLATFORM */ 1925#endif /* CONFIG_CS89x0_PLATFORM */
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 354cbb78ed50..67b0388b6e68 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -887,18 +887,7 @@ static struct platform_driver ep93xx_eth_driver = {
887 }, 887 },
888}; 888};
889 889
890static int __init ep93xx_eth_init_module(void) 890module_platform_driver(ep93xx_eth_driver);
891{
892 printk(KERN_INFO DRV_MODULE_NAME " version " DRV_MODULE_VERSION " loading\n");
893 return platform_driver_register(&ep93xx_eth_driver);
894}
895
896static void __exit ep93xx_eth_cleanup_module(void)
897{
898 platform_driver_unregister(&ep93xx_eth_driver);
899}
900 891
901module_init(ep93xx_eth_init_module);
902module_exit(ep93xx_eth_cleanup_module);
903MODULE_LICENSE("GPL"); 892MODULE_LICENSE("GPL");
904MODULE_ALIAS("platform:ep93xx-eth"); 893MODULE_ALIAS("platform:ep93xx-eth");
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index bf0fc56dba19..4b6e5695b263 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -212,7 +212,7 @@ int enic_dev_deinit_done(struct enic *enic, int *status)
212} 212}
213 213
214/* rtnl lock is held */ 214/* rtnl lock is held */
215int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 215int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
216{ 216{
217 struct enic *enic = netdev_priv(netdev); 217 struct enic *enic = netdev_priv(netdev);
218 int err; 218 int err;
@@ -225,7 +225,7 @@ int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
225} 225}
226 226
227/* rtnl lock is held */ 227/* rtnl lock is held */
228int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 228int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
229{ 229{
230 struct enic *enic = netdev_priv(netdev); 230 struct enic *enic = netdev_priv(netdev);
231 int err; 231 int err;
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index da1cba3c410e..08bded051b93 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -46,8 +46,8 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
46 int broadcast, int promisc, int allmulti); 46 int broadcast, int promisc, int allmulti);
47int enic_dev_add_addr(struct enic *enic, u8 *addr); 47int enic_dev_add_addr(struct enic *enic, u8 *addr);
48int enic_dev_del_addr(struct enic *enic, u8 *addr); 48int enic_dev_del_addr(struct enic *enic, u8 *addr);
49int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid); 49int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
50int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); 50int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
51int enic_dev_notify_unset(struct enic *enic); 51int enic_dev_notify_unset(struct enic *enic);
52int enic_dev_hang_notify(struct enic *enic); 52int enic_dev_hang_notify(struct enic *enic);
53int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic); 53int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index ec1a233622c6..635f55992d7e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1300,7 +1300,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1300 } 1300 }
1301 1301
1302 if (vlan_stripped) 1302 if (vlan_stripped)
1303 __vlan_hwaccel_put_tag(skb, vlan_tci); 1303 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1304 1304
1305 if (netdev->features & NETIF_F_GRO) 1305 if (netdev->features & NETIF_F_GRO)
1306 napi_gro_receive(&enic->napi[q_number], skb); 1306 napi_gro_receive(&enic->napi[q_number], skb);
@@ -2496,9 +2496,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2496 netdev->watchdog_timeo = 2 * HZ; 2496 netdev->watchdog_timeo = 2 * HZ;
2497 netdev->ethtool_ops = &enic_ethtool_ops; 2497 netdev->ethtool_ops = &enic_ethtool_ops;
2498 2498
2499 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 2499 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2500 if (ENIC_SETTING(enic, LOOP)) { 2500 if (ENIC_SETTING(enic, LOOP)) {
2501 netdev->features &= ~NETIF_F_HW_VLAN_TX; 2501 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2502 enic->loop_enable = 1; 2502 enic->loop_enable = 1;
2503 enic->loop_tag = enic->config.loop_tag; 2503 enic->loop_tag = enic->config.loop_tag;
2504 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); 2504 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 605b22283be1..97455c573db5 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -308,6 +308,9 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
308 308
309 if (status & STAT_ERROR) { 309 if (status & STAT_ERROR) {
310 err = (int)readq(&devcmd->args[0]); 310 err = (int)readq(&devcmd->args[0]);
311 if (err == ERR_EINVAL &&
312 cmd == CMD_CAPABILITY)
313 return err;
311 if (err != ERR_ECMDUNKNOWN || 314 if (err != ERR_ECMDUNKNOWN ||
312 cmd != CMD_CAPABILITY) 315 cmd != CMD_CAPABILITY)
313 pr_err("Error %d devcmd %d\n", 316 pr_err("Error %d devcmd %d\n",
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 9eada8e86078..9105465b2a1a 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1693,22 +1693,7 @@ static struct platform_driver dm9000_driver = {
1693 .remove = dm9000_drv_remove, 1693 .remove = dm9000_drv_remove,
1694}; 1694};
1695 1695
1696static int __init 1696module_platform_driver(dm9000_driver);
1697dm9000_init(void)
1698{
1699 printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION);
1700
1701 return platform_driver_register(&dm9000_driver);
1702}
1703
1704static void __exit
1705dm9000_cleanup(void)
1706{
1707 platform_driver_unregister(&dm9000_driver);
1708}
1709
1710module_init(dm9000_init);
1711module_exit(dm9000_cleanup);
1712 1697
1713MODULE_AUTHOR("Sascha Hauer, Ben Dooks"); 1698MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1714MODULE_DESCRIPTION("Davicom DM9000 network driver"); 1699MODULE_DESCRIPTION("Davicom DM9000 network driver");
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 88feced9a629..cdbcd1643141 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -236,17 +236,14 @@ static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
236 private->rx_buffer = dma_alloc_coherent(d, 8192, 236 private->rx_buffer = dma_alloc_coherent(d, 8192,
237 &private->rx_dma_handle, 237 &private->rx_dma_handle,
238 GFP_KERNEL); 238 GFP_KERNEL);
239 if (private->rx_buffer == NULL) { 239 if (private->rx_buffer == NULL)
240 pr_err("%s: no memory for rx buffer\n", __func__);
241 goto rx_buf_fail; 240 goto rx_buf_fail;
242 } 241
243 private->tx_buffer = dma_alloc_coherent(d, 8192, 242 private->tx_buffer = dma_alloc_coherent(d, 8192,
244 &private->tx_dma_handle, 243 &private->tx_dma_handle,
245 GFP_KERNEL); 244 GFP_KERNEL);
246 if (private->tx_buffer == NULL) { 245 if (private->tx_buffer == NULL)
247 pr_err("%s: no memory for tx buffer\n", __func__);
248 goto tx_buf_fail; 246 goto tx_buf_fail;
249 }
250 247
251 SET_NETDEV_DEV(dev, &pdev->dev); 248 SET_NETDEV_DEV(dev, &pdev->dev);
252 249
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 110d26f4c602..afa8e3af2c4d 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -580,12 +580,9 @@ alloc_list (struct net_device *dev)
580 580
581 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); 581 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
582 np->rx_skbuff[i] = skb; 582 np->rx_skbuff[i] = skb;
583 if (skb == NULL) { 583 if (skb == NULL)
584 printk (KERN_ERR
585 "%s: alloc_list: allocate Rx buffer error! ",
586 dev->name);
587 break; 584 break;
588 } 585
589 /* Rubicon now supports 40 bits of addressing space. */ 586 /* Rubicon now supports 40 bits of addressing space. */
590 np->rx_ring[i].fraginfo = 587 np->rx_ring[i].fraginfo =
591 cpu_to_le64 ( pci_map_single ( 588 cpu_to_le64 ( pci_map_single (
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 29aff55f2eea..234ce6f07544 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -214,6 +214,7 @@ struct be_tx_stats {
214}; 214};
215 215
216struct be_tx_obj { 216struct be_tx_obj {
217 u32 db_offset;
217 struct be_queue_info q; 218 struct be_queue_info q;
218 struct be_queue_info cq; 219 struct be_queue_info cq;
219 /* Remember the skbs that were transmitted */ 220 /* Remember the skbs that were transmitted */
@@ -292,7 +293,7 @@ struct be_drv_stats {
292 u32 rx_in_range_errors; 293 u32 rx_in_range_errors;
293 u32 rx_out_range_errors; 294 u32 rx_out_range_errors;
294 u32 rx_frame_too_long; 295 u32 rx_frame_too_long;
295 u32 rx_address_mismatch_drops; 296 u32 rx_address_filtered;
296 u32 rx_dropped_too_small; 297 u32 rx_dropped_too_small;
297 u32 rx_dropped_too_short; 298 u32 rx_dropped_too_short;
298 u32 rx_dropped_header_too_small; 299 u32 rx_dropped_header_too_small;
@@ -328,6 +329,7 @@ enum vf_state {
328#define BE_FLAGS_WORKER_SCHEDULED (1 << 3) 329#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
329#define BE_UC_PMAC_COUNT 30 330#define BE_UC_PMAC_COUNT 30
330#define BE_VF_UC_PMAC_COUNT 2 331#define BE_VF_UC_PMAC_COUNT 2
332#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
331 333
332struct phy_info { 334struct phy_info {
333 u8 transceiver; 335 u8 transceiver;
@@ -434,6 +436,8 @@ struct be_adapter {
434 u8 wol_cap; 436 u8 wol_cap;
435 bool wol; 437 bool wol;
436 u32 uc_macs; /* Count of secondary UC MAC programmed */ 438 u32 uc_macs; /* Count of secondary UC MAC programmed */
439 u16 asic_rev;
440 u16 qnq_vid;
437 u32 msg_enable; 441 u32 msg_enable;
438 int be_get_temp_freq; 442 int be_get_temp_freq;
439 u16 max_mcast_mac; 443 u16 max_mcast_mac;
@@ -445,6 +449,7 @@ struct be_adapter {
445 u16 max_event_queues; 449 u16 max_event_queues;
446 u32 if_cap_flags; 450 u32 if_cap_flags;
447 u8 pf_number; 451 u8 pf_number;
452 u64 rss_flags;
448}; 453};
449 454
450#define be_physfn(adapter) (!adapter->virtfn) 455#define be_physfn(adapter) (!adapter->virtfn)
@@ -648,6 +653,11 @@ static inline bool be_is_wol_excluded(struct be_adapter *adapter)
648 } 653 }
649} 654}
650 655
656static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
657{
658 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
659}
660
651extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, 661extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
652 u16 num_popped); 662 u16 num_popped);
653extern void be_link_status_update(struct be_adapter *adapter, u8 link_status); 663extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 3c9b4f12e3e5..25d3290b8cac 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -263,6 +263,27 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
263 } 263 }
264} 264}
265 265
266static void be_async_dbg_evt_process(struct be_adapter *adapter,
267 u32 trailer, struct be_mcc_compl *cmp)
268{
269 u8 event_type = 0;
270 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
271
272 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
273 ASYNC_TRAILER_EVENT_TYPE_MASK;
274
275 switch (event_type) {
276 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
277 if (evt->valid)
278 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
279 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
280 break;
281 default:
282 dev_warn(&adapter->pdev->dev, "Unknown debug event\n");
283 break;
284 }
285}
286
266static inline bool is_link_state_evt(u32 trailer) 287static inline bool is_link_state_evt(u32 trailer)
267{ 288{
268 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 289 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
@@ -277,6 +298,13 @@ static inline bool is_grp5_evt(u32 trailer)
277 ASYNC_EVENT_CODE_GRP_5); 298 ASYNC_EVENT_CODE_GRP_5);
278} 299}
279 300
301static inline bool is_dbg_evt(u32 trailer)
302{
303 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
304 ASYNC_TRAILER_EVENT_CODE_MASK) ==
305 ASYNC_EVENT_CODE_QNQ);
306}
307
280static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 308static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
281{ 309{
282 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; 310 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
@@ -325,6 +353,9 @@ int be_process_mcc(struct be_adapter *adapter)
325 else if (is_grp5_evt(compl->flags)) 353 else if (is_grp5_evt(compl->flags))
326 be_async_grp5_evt_process(adapter, 354 be_async_grp5_evt_process(adapter,
327 compl->flags, compl); 355 compl->flags, compl);
356 else if (is_dbg_evt(compl->flags))
357 be_async_dbg_evt_process(adapter,
358 compl->flags, compl);
328 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 359 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
329 status = be_mcc_compl_process(adapter, compl); 360 status = be_mcc_compl_process(adapter, compl);
330 atomic_dec(&mcc_obj->q.used); 361 atomic_dec(&mcc_obj->q.used);
@@ -687,10 +718,8 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
687 if (!mccq->created) 718 if (!mccq->created)
688 return NULL; 719 return NULL;
689 720
690 if (atomic_read(&mccq->used) >= mccq->len) { 721 if (atomic_read(&mccq->used) >= mccq->len)
691 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
692 return NULL; 722 return NULL;
693 }
694 723
695 wrb = queue_head_node(mccq); 724 wrb = queue_head_node(mccq);
696 queue_head_inc(mccq); 725 queue_head_inc(mccq);
@@ -1022,6 +1051,7 @@ int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1022 1051
1023 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ 1052 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1024 req->async_event_bitmap[0] = cpu_to_le32(0x00000022); 1053 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1054 req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
1025 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1055 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1026 1056
1027 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1057 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -1095,15 +1125,14 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
1095 return status; 1125 return status;
1096} 1126}
1097 1127
1098int be_cmd_txq_create(struct be_adapter *adapter, 1128int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1099 struct be_queue_info *txq,
1100 struct be_queue_info *cq)
1101{ 1129{
1102 struct be_mcc_wrb *wrb; 1130 struct be_mcc_wrb *wrb;
1103 struct be_cmd_req_eth_tx_create *req; 1131 struct be_cmd_req_eth_tx_create *req;
1132 struct be_queue_info *txq = &txo->q;
1133 struct be_queue_info *cq = &txo->cq;
1104 struct be_dma_mem *q_mem = &txq->dma_mem; 1134 struct be_dma_mem *q_mem = &txq->dma_mem;
1105 void *ctxt; 1135 int status, ver = 0;
1106 int status;
1107 1136
1108 spin_lock_bh(&adapter->mcc_lock); 1137 spin_lock_bh(&adapter->mcc_lock);
1109 1138
@@ -1114,34 +1143,37 @@ int be_cmd_txq_create(struct be_adapter *adapter,
1114 } 1143 }
1115 1144
1116 req = embedded_payload(wrb); 1145 req = embedded_payload(wrb);
1117 ctxt = &req->context;
1118 1146
1119 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1147 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1120 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL); 1148 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
1121 1149
1122 if (lancer_chip(adapter)) { 1150 if (lancer_chip(adapter)) {
1123 req->hdr.version = 1; 1151 req->hdr.version = 1;
1124 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt, 1152 req->if_id = cpu_to_le16(adapter->if_handle);
1125 adapter->if_handle); 1153 } else if (BEx_chip(adapter)) {
1154 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1155 req->hdr.version = 2;
1156 } else { /* For SH */
1157 req->hdr.version = 2;
1126 } 1158 }
1127 1159
1128 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1160 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1129 req->ulp_num = BE_ULP1_NUM; 1161 req->ulp_num = BE_ULP1_NUM;
1130 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 1162 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1131 1163 req->cq_id = cpu_to_le16(cq->id);
1132 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, 1164 req->queue_size = be_encoded_q_len(txq->len);
1133 be_encoded_q_len(txq->len));
1134 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
1135 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
1136
1137 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1138
1139 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1165 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1140 1166
1167 ver = req->hdr.version;
1168
1141 status = be_mcc_notify_wait(adapter); 1169 status = be_mcc_notify_wait(adapter);
1142 if (!status) { 1170 if (!status) {
1143 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); 1171 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
1144 txq->id = le16_to_cpu(resp->cid); 1172 txq->id = le16_to_cpu(resp->cid);
1173 if (ver == 2)
1174 txo->db_offset = le32_to_cpu(resp->db_offset);
1175 else
1176 txo->db_offset = DB_TXULP1_OFFSET;
1145 txq->created = true; 1177 txq->created = true;
1146 } 1178 }
1147 1179
@@ -1834,7 +1866,7 @@ err:
1834 1866
1835/* Uses mbox */ 1867/* Uses mbox */
1836int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, 1868int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1837 u32 *mode, u32 *caps) 1869 u32 *mode, u32 *caps, u16 *asic_rev)
1838{ 1870{
1839 struct be_mcc_wrb *wrb; 1871 struct be_mcc_wrb *wrb;
1840 struct be_cmd_req_query_fw_cfg *req; 1872 struct be_cmd_req_query_fw_cfg *req;
@@ -1855,6 +1887,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1855 *port_num = le32_to_cpu(resp->phys_port); 1887 *port_num = le32_to_cpu(resp->phys_port);
1856 *mode = le32_to_cpu(resp->function_mode); 1888 *mode = le32_to_cpu(resp->function_mode);
1857 *caps = le32_to_cpu(resp->function_caps); 1889 *caps = le32_to_cpu(resp->function_caps);
1890 *asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
1858 } 1891 }
1859 1892
1860 mutex_unlock(&adapter->mbox_lock); 1893 mutex_unlock(&adapter->mbox_lock);
@@ -1897,7 +1930,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
1897 return status; 1930 return status;
1898} 1931}
1899 1932
1900int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size) 1933int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
1934 u32 rss_hash_opts, u16 table_size)
1901{ 1935{
1902 struct be_mcc_wrb *wrb; 1936 struct be_mcc_wrb *wrb;
1903 struct be_cmd_req_rss_config *req; 1937 struct be_cmd_req_rss_config *req;
@@ -1916,16 +1950,12 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1916 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); 1950 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1917 1951
1918 req->if_id = cpu_to_le32(adapter->if_handle); 1952 req->if_id = cpu_to_le32(adapter->if_handle);
1919 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | 1953 req->enable_rss = cpu_to_le16(rss_hash_opts);
1920 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6); 1954 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1921 1955
1922 if (lancer_chip(adapter) || skyhawk_chip(adapter)) { 1956 if (lancer_chip(adapter) || skyhawk_chip(adapter))
1923 req->hdr.version = 1; 1957 req->hdr.version = 1;
1924 req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
1925 RSS_ENABLE_UDP_IPV6);
1926 }
1927 1958
1928 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1929 memcpy(req->cpu_table, rsstable, table_size); 1959 memcpy(req->cpu_table, rsstable, table_size);
1930 memcpy(req->hash, myhash, sizeof(myhash)); 1960 memcpy(req->hash, myhash, sizeof(myhash));
1931 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); 1961 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
@@ -2343,7 +2373,6 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2343{ 2373{
2344 struct be_mcc_wrb *wrb; 2374 struct be_mcc_wrb *wrb;
2345 struct be_cmd_req_seeprom_read *req; 2375 struct be_cmd_req_seeprom_read *req;
2346 struct be_sge *sge;
2347 int status; 2376 int status;
2348 2377
2349 spin_lock_bh(&adapter->mcc_lock); 2378 spin_lock_bh(&adapter->mcc_lock);
@@ -2354,7 +2383,6 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2354 goto err; 2383 goto err;
2355 } 2384 }
2356 req = nonemb_cmd->va; 2385 req = nonemb_cmd->va;
2357 sge = nonembedded_sgl(wrb);
2358 2386
2359 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2387 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2360 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, 2388 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
@@ -2461,6 +2489,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2461 struct mgmt_controller_attrib *attribs; 2489 struct mgmt_controller_attrib *attribs;
2462 struct be_dma_mem attribs_cmd; 2490 struct be_dma_mem attribs_cmd;
2463 2491
2492 if (mutex_lock_interruptible(&adapter->mbox_lock))
2493 return -1;
2494
2464 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 2495 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2465 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 2496 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2466 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, 2497 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
@@ -2468,12 +2499,10 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2468 if (!attribs_cmd.va) { 2499 if (!attribs_cmd.va) {
2469 dev_err(&adapter->pdev->dev, 2500 dev_err(&adapter->pdev->dev,
2470 "Memory allocation failure\n"); 2501 "Memory allocation failure\n");
2471 return -ENOMEM; 2502 status = -ENOMEM;
2503 goto err;
2472 } 2504 }
2473 2505
2474 if (mutex_lock_interruptible(&adapter->mbox_lock))
2475 return -1;
2476
2477 wrb = wrb_from_mbox(adapter); 2506 wrb = wrb_from_mbox(adapter);
2478 if (!wrb) { 2507 if (!wrb) {
2479 status = -EBUSY; 2508 status = -EBUSY;
@@ -2493,8 +2522,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2493 2522
2494err: 2523err:
2495 mutex_unlock(&adapter->mbox_lock); 2524 mutex_unlock(&adapter->mbox_lock);
2496 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va, 2525 if (attribs_cmd.va)
2497 attribs_cmd.dma); 2526 pci_free_consistent(adapter->pdev, attribs_cmd.size,
2527 attribs_cmd.va, attribs_cmd.dma);
2498 return status; 2528 return status;
2499} 2529}
2500 2530
@@ -2667,10 +2697,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2667 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 2697 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2668 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, 2698 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2669 &cmd.dma, GFP_KERNEL); 2699 &cmd.dma, GFP_KERNEL);
2670 if (!cmd.va) { 2700 if (!cmd.va)
2671 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2672 return -ENOMEM; 2701 return -ENOMEM;
2673 }
2674 2702
2675 spin_lock_bh(&adapter->mcc_lock); 2703 spin_lock_bh(&adapter->mcc_lock);
2676 2704
@@ -2794,6 +2822,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2794 CMD_SUBSYSTEM_ETH)) 2822 CMD_SUBSYSTEM_ETH))
2795 return -EPERM; 2823 return -EPERM;
2796 2824
2825 if (mutex_lock_interruptible(&adapter->mbox_lock))
2826 return -1;
2827
2797 memset(&cmd, 0, sizeof(struct be_dma_mem)); 2828 memset(&cmd, 0, sizeof(struct be_dma_mem));
2798 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 2829 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2799 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 2830 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
@@ -2801,12 +2832,10 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2801 if (!cmd.va) { 2832 if (!cmd.va) {
2802 dev_err(&adapter->pdev->dev, 2833 dev_err(&adapter->pdev->dev,
2803 "Memory allocation failure\n"); 2834 "Memory allocation failure\n");
2804 return -ENOMEM; 2835 status = -ENOMEM;
2836 goto err;
2805 } 2837 }
2806 2838
2807 if (mutex_lock_interruptible(&adapter->mbox_lock))
2808 return -1;
2809
2810 wrb = wrb_from_mbox(adapter); 2839 wrb = wrb_from_mbox(adapter);
2811 if (!wrb) { 2840 if (!wrb) {
2812 status = -EBUSY; 2841 status = -EBUSY;
@@ -2837,7 +2866,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2837 } 2866 }
2838err: 2867err:
2839 mutex_unlock(&adapter->mbox_lock); 2868 mutex_unlock(&adapter->mbox_lock);
2840 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2869 if (cmd.va)
2870 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2841 return status; 2871 return status;
2842 2872
2843} 2873}
@@ -2942,14 +2972,15 @@ static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
2942 int i; 2972 int i;
2943 2973
2944 for (i = 0; i < desc_count; i++) { 2974 for (i = 0; i < desc_count; i++) {
2945 desc->desc_len = RESOURCE_DESC_SIZE; 2975 desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE;
2946 if (((void *)desc + desc->desc_len) > 2976 if (((void *)desc + desc->desc_len) >
2947 (void *)(buf + max_buf_size)) { 2977 (void *)(buf + max_buf_size)) {
2948 desc = NULL; 2978 desc = NULL;
2949 break; 2979 break;
2950 } 2980 }
2951 2981
2952 if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID) 2982 if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
2983 desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
2953 break; 2984 break;
2954 2985
2955 desc = (void *)desc + desc->desc_len; 2986 desc = (void *)desc + desc->desc_len;
@@ -2969,16 +3000,18 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
2969 int status; 3000 int status;
2970 struct be_dma_mem cmd; 3001 struct be_dma_mem cmd;
2971 3002
3003 if (mutex_lock_interruptible(&adapter->mbox_lock))
3004 return -1;
3005
2972 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3006 memset(&cmd, 0, sizeof(struct be_dma_mem));
2973 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 3007 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
2974 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 3008 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2975 &cmd.dma); 3009 &cmd.dma);
2976 if (!cmd.va) { 3010 if (!cmd.va) {
2977 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3011 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2978 return -ENOMEM; 3012 status = -ENOMEM;
3013 goto err;
2979 } 3014 }
2980 if (mutex_lock_interruptible(&adapter->mbox_lock))
2981 return -1;
2982 3015
2983 wrb = wrb_from_mbox(adapter); 3016 wrb = wrb_from_mbox(adapter);
2984 if (!wrb) { 3017 if (!wrb) {
@@ -2992,6 +3025,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
2992 OPCODE_COMMON_GET_FUNC_CONFIG, 3025 OPCODE_COMMON_GET_FUNC_CONFIG,
2993 cmd.size, wrb, &cmd); 3026 cmd.size, wrb, &cmd);
2994 3027
3028 if (skyhawk_chip(adapter))
3029 req->hdr.version = 1;
3030
2995 status = be_mbox_notify_wait(adapter); 3031 status = be_mbox_notify_wait(adapter);
2996 if (!status) { 3032 if (!status) {
2997 struct be_cmd_resp_get_func_config *resp = cmd.va; 3033 struct be_cmd_resp_get_func_config *resp = cmd.va;
@@ -3018,28 +3054,46 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
3018 } 3054 }
3019err: 3055err:
3020 mutex_unlock(&adapter->mbox_lock); 3056 mutex_unlock(&adapter->mbox_lock);
3021 pci_free_consistent(adapter->pdev, cmd.size, 3057 if (cmd.va)
3022 cmd.va, cmd.dma); 3058 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3023 return status; 3059 return status;
3024} 3060}
3025 3061
3026 /* Uses sync mcc */ 3062/* Uses mbox */
3027int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags, 3063int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3028 u8 domain) 3064 u8 domain, struct be_dma_mem *cmd)
3029{ 3065{
3030 struct be_mcc_wrb *wrb; 3066 struct be_mcc_wrb *wrb;
3031 struct be_cmd_req_get_profile_config *req; 3067 struct be_cmd_req_get_profile_config *req;
3032 int status; 3068 int status;
3033 struct be_dma_mem cmd;
3034 3069
3035 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3070 if (mutex_lock_interruptible(&adapter->mbox_lock))
3036 cmd.size = sizeof(struct be_cmd_resp_get_profile_config); 3071 return -1;
3037 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 3072 wrb = wrb_from_mbox(adapter);
3038 &cmd.dma); 3073
3039 if (!cmd.va) { 3074 req = cmd->va;
3040 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3075 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3041 return -ENOMEM; 3076 OPCODE_COMMON_GET_PROFILE_CONFIG,
3042 } 3077 cmd->size, wrb, cmd);
3078
3079 req->type = ACTIVE_PROFILE_TYPE;
3080 req->hdr.domain = domain;
3081 if (!lancer_chip(adapter))
3082 req->hdr.version = 1;
3083
3084 status = be_mbox_notify_wait(adapter);
3085
3086 mutex_unlock(&adapter->mbox_lock);
3087 return status;
3088}
3089
3090/* Uses sync mcc */
3091int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
3092 u8 domain, struct be_dma_mem *cmd)
3093{
3094 struct be_mcc_wrb *wrb;
3095 struct be_cmd_req_get_profile_config *req;
3096 int status;
3043 3097
3044 spin_lock_bh(&adapter->mcc_lock); 3098 spin_lock_bh(&adapter->mcc_lock);
3045 3099
@@ -3049,16 +3103,47 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
3049 goto err; 3103 goto err;
3050 } 3104 }
3051 3105
3052 req = cmd.va; 3106 req = cmd->va;
3053
3054 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3107 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3055 OPCODE_COMMON_GET_PROFILE_CONFIG, 3108 OPCODE_COMMON_GET_PROFILE_CONFIG,
3056 cmd.size, wrb, &cmd); 3109 cmd->size, wrb, cmd);
3057 3110
3058 req->type = ACTIVE_PROFILE_TYPE; 3111 req->type = ACTIVE_PROFILE_TYPE;
3059 req->hdr.domain = domain; 3112 req->hdr.domain = domain;
3113 if (!lancer_chip(adapter))
3114 req->hdr.version = 1;
3060 3115
3061 status = be_mcc_notify_wait(adapter); 3116 status = be_mcc_notify_wait(adapter);
3117
3118err:
3119 spin_unlock_bh(&adapter->mcc_lock);
3120 return status;
3121}
3122
3123/* Uses sync mcc, if MCCQ is already created otherwise mbox */
3124int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
3125 u16 *txq_count, u8 domain)
3126{
3127 struct be_queue_info *mccq = &adapter->mcc_obj.q;
3128 struct be_dma_mem cmd;
3129 int status;
3130
3131 memset(&cmd, 0, sizeof(struct be_dma_mem));
3132 if (!lancer_chip(adapter))
3133 cmd.size = sizeof(struct be_cmd_resp_get_profile_config_v1);
3134 else
3135 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3136 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3137 &cmd.dma);
3138 if (!cmd.va) {
3139 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3140 return -ENOMEM;
3141 }
3142
3143 if (!mccq->created)
3144 status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
3145 else
3146 status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
3062 if (!status) { 3147 if (!status) {
3063 struct be_cmd_resp_get_profile_config *resp = cmd.va; 3148 struct be_cmd_resp_get_profile_config *resp = cmd.va;
3064 u32 desc_count = le32_to_cpu(resp->desc_count); 3149 u32 desc_count = le32_to_cpu(resp->desc_count);
@@ -3071,12 +3156,15 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
3071 status = -EINVAL; 3156 status = -EINVAL;
3072 goto err; 3157 goto err;
3073 } 3158 }
3074 *cap_flags = le32_to_cpu(desc->cap_flags); 3159 if (cap_flags)
3160 *cap_flags = le32_to_cpu(desc->cap_flags);
3161 if (txq_count)
3162 *txq_count = le32_to_cpu(desc->txq_count);
3075 } 3163 }
3076err: 3164err:
3077 spin_unlock_bh(&adapter->mcc_lock); 3165 if (cmd.va)
3078 pci_free_consistent(adapter->pdev, cmd.size, 3166 pci_free_consistent(adapter->pdev, cmd.size,
3079 cmd.va, cmd.dma); 3167 cmd.va, cmd.dma);
3080 return status; 3168 return status;
3081} 3169}
3082 3170
@@ -3105,7 +3193,7 @@ int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3105 req->hdr.domain = domain; 3193 req->hdr.domain = domain;
3106 req->desc_count = cpu_to_le32(1); 3194 req->desc_count = cpu_to_le32(1);
3107 3195
3108 req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID; 3196 req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3109 req->nic_desc.desc_len = RESOURCE_DESC_SIZE; 3197 req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
3110 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV); 3198 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3111 req->nic_desc.pf_num = adapter->pf_number; 3199 req->nic_desc.pf_num = adapter->pf_number;
@@ -3202,6 +3290,31 @@ err:
3202 return status; 3290 return status;
3203} 3291}
3204 3292
3293int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
3294{
3295 struct be_mcc_wrb *wrb;
3296 struct be_cmd_req_intr_set *req;
3297 int status;
3298
3299 if (mutex_lock_interruptible(&adapter->mbox_lock))
3300 return -1;
3301
3302 wrb = wrb_from_mbox(adapter);
3303
3304 req = embedded_payload(wrb);
3305
3306 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3307 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
3308 wrb, NULL);
3309
3310 req->intr_enabled = intr_enable;
3311
3312 status = be_mbox_notify_wait(adapter);
3313
3314 mutex_unlock(&adapter->mbox_lock);
3315 return status;
3316}
3317
3205int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 3318int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3206 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 3319 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3207{ 3320{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 96970860c915..a855668e0cc5 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -84,6 +84,9 @@ struct be_mcc_compl {
84#define ASYNC_EVENT_QOS_SPEED 0x1 84#define ASYNC_EVENT_QOS_SPEED 0x1
85#define ASYNC_EVENT_COS_PRIORITY 0x2 85#define ASYNC_EVENT_COS_PRIORITY 0x2
86#define ASYNC_EVENT_PVID_STATE 0x3 86#define ASYNC_EVENT_PVID_STATE 0x3
87#define ASYNC_EVENT_CODE_QNQ 0x6
88#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
89
87struct be_async_event_trailer { 90struct be_async_event_trailer {
88 u32 code; 91 u32 code;
89}; 92};
@@ -144,6 +147,16 @@ struct be_async_event_grp5_pvid_state {
144 struct be_async_event_trailer trailer; 147 struct be_async_event_trailer trailer;
145} __packed; 148} __packed;
146 149
150/* async event indicating outer VLAN tag in QnQ */
151struct be_async_event_qnq {
152 u8 valid; /* Indicates if outer VLAN is valid */
153 u8 rsvd0;
154 u16 vlan_tag;
155 u32 event_tag;
156 u8 rsvd1[4];
157 struct be_async_event_trailer trailer;
158} __packed;
159
147struct be_mcc_mailbox { 160struct be_mcc_mailbox {
148 struct be_mcc_wrb wrb; 161 struct be_mcc_wrb wrb;
149 struct be_mcc_compl compl; 162 struct be_mcc_compl compl;
@@ -188,6 +201,7 @@ struct be_mcc_mailbox {
188#define OPCODE_COMMON_GET_BEACON_STATE 70 201#define OPCODE_COMMON_GET_BEACON_STATE 70
189#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 202#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
190#define OPCODE_COMMON_GET_PORT_NAME 77 203#define OPCODE_COMMON_GET_PORT_NAME 77
204#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
191#define OPCODE_COMMON_GET_PHY_DETAILS 102 205#define OPCODE_COMMON_GET_PHY_DETAILS 102
192#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 206#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
193#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 207#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
@@ -473,46 +487,27 @@ struct be_cmd_resp_mcc_create {
473#define BE_ETH_TX_RING_TYPE_STANDARD 2 487#define BE_ETH_TX_RING_TYPE_STANDARD 2
474#define BE_ULP1_NUM 1 488#define BE_ULP1_NUM 1
475 489
476/* Pseudo amap definition in which each bit of the actual structure is defined
477 * as a byte: used to calculate offset/shift/mask of each field */
478struct amap_tx_context {
479 u8 if_id[16]; /* dword 0 */
480 u8 tx_ring_size[4]; /* dword 0 */
481 u8 rsvd1[26]; /* dword 0 */
482 u8 pci_func_id[8]; /* dword 1 */
483 u8 rsvd2[9]; /* dword 1 */
484 u8 ctx_valid; /* dword 1 */
485 u8 cq_id_send[16]; /* dword 2 */
486 u8 rsvd3[16]; /* dword 2 */
487 u8 rsvd4[32]; /* dword 3 */
488 u8 rsvd5[32]; /* dword 4 */
489 u8 rsvd6[32]; /* dword 5 */
490 u8 rsvd7[32]; /* dword 6 */
491 u8 rsvd8[32]; /* dword 7 */
492 u8 rsvd9[32]; /* dword 8 */
493 u8 rsvd10[32]; /* dword 9 */
494 u8 rsvd11[32]; /* dword 10 */
495 u8 rsvd12[32]; /* dword 11 */
496 u8 rsvd13[32]; /* dword 12 */
497 u8 rsvd14[32]; /* dword 13 */
498 u8 rsvd15[32]; /* dword 14 */
499 u8 rsvd16[32]; /* dword 15 */
500} __packed;
501
502struct be_cmd_req_eth_tx_create { 490struct be_cmd_req_eth_tx_create {
503 struct be_cmd_req_hdr hdr; 491 struct be_cmd_req_hdr hdr;
504 u8 num_pages; 492 u8 num_pages;
505 u8 ulp_num; 493 u8 ulp_num;
506 u8 type; 494 u16 type;
507 u8 bound_port; 495 u16 if_id;
508 u8 context[sizeof(struct amap_tx_context) / 8]; 496 u8 queue_size;
497 u8 rsvd0;
498 u32 rsvd1;
499 u16 cq_id;
500 u16 rsvd2;
501 u32 rsvd3[13];
509 struct phys_addr pages[8]; 502 struct phys_addr pages[8];
510} __packed; 503} __packed;
511 504
512struct be_cmd_resp_eth_tx_create { 505struct be_cmd_resp_eth_tx_create {
513 struct be_cmd_resp_hdr hdr; 506 struct be_cmd_resp_hdr hdr;
514 u16 cid; 507 u16 cid;
515 u16 rsvd0; 508 u16 rid;
509 u32 db_offset;
510 u32 rsvd0[4];
516} __packed; 511} __packed;
517 512
518/******************** Create RxQ ***************************/ 513/******************** Create RxQ ***************************/
@@ -608,8 +603,8 @@ struct be_port_rxf_stats_v0 {
608 u32 rx_in_range_errors; /* dword 10*/ 603 u32 rx_in_range_errors; /* dword 10*/
609 u32 rx_out_range_errors; /* dword 11*/ 604 u32 rx_out_range_errors; /* dword 11*/
610 u32 rx_frame_too_long; /* dword 12*/ 605 u32 rx_frame_too_long; /* dword 12*/
611 u32 rx_address_mismatch_drops; /* dword 13*/ 606 u32 rx_address_filtered; /* dword 13*/
612 u32 rx_vlan_mismatch_drops; /* dword 14*/ 607 u32 rx_vlan_filtered; /* dword 14*/
613 u32 rx_dropped_too_small; /* dword 15*/ 608 u32 rx_dropped_too_small; /* dword 15*/
614 u32 rx_dropped_too_short; /* dword 16*/ 609 u32 rx_dropped_too_short; /* dword 16*/
615 u32 rx_dropped_header_too_small; /* dword 17*/ 610 u32 rx_dropped_header_too_small; /* dword 17*/
@@ -815,8 +810,8 @@ struct lancer_pport_stats {
815 u32 rx_control_frames_unknown_opcode_hi; 810 u32 rx_control_frames_unknown_opcode_hi;
816 u32 rx_in_range_errors; 811 u32 rx_in_range_errors;
817 u32 rx_out_of_range_errors; 812 u32 rx_out_of_range_errors;
818 u32 rx_address_mismatch_drops; 813 u32 rx_address_filtered;
819 u32 rx_vlan_mismatch_drops; 814 u32 rx_vlan_filtered;
820 u32 rx_dropped_too_small; 815 u32 rx_dropped_too_small;
821 u32 rx_dropped_too_short; 816 u32 rx_dropped_too_short;
822 u32 rx_dropped_header_too_small; 817 u32 rx_dropped_header_too_small;
@@ -1066,7 +1061,6 @@ struct be_cmd_resp_modify_eq_delay {
1066} __packed; 1061} __packed;
1067 1062
1068/******************** Get FW Config *******************/ 1063/******************** Get FW Config *******************/
1069#define BE_FUNCTION_CAPS_RSS 0x2
1070/* The HW can come up in either of the following multi-channel modes 1064/* The HW can come up in either of the following multi-channel modes
1071 * based on the skew/IPL. 1065 * based on the skew/IPL.
1072 */ 1066 */
@@ -1109,6 +1103,9 @@ struct be_cmd_resp_query_fw_cfg {
1109#define RSS_ENABLE_UDP_IPV4 0x10 1103#define RSS_ENABLE_UDP_IPV4 0x10
1110#define RSS_ENABLE_UDP_IPV6 0x20 1104#define RSS_ENABLE_UDP_IPV6 0x20
1111 1105
1106#define L3_RSS_FLAGS (RXH_IP_DST | RXH_IP_SRC)
1107#define L4_RSS_FLAGS (RXH_L4_B_0_1 | RXH_L4_B_2_3)
1108
1112struct be_cmd_req_rss_config { 1109struct be_cmd_req_rss_config {
1113 struct be_cmd_req_hdr hdr; 1110 struct be_cmd_req_hdr hdr;
1114 u32 if_id; 1111 u32 if_id;
@@ -1592,7 +1589,7 @@ struct be_port_rxf_stats_v1 {
1592 u32 rx_in_range_errors; 1589 u32 rx_in_range_errors;
1593 u32 rx_out_range_errors; 1590 u32 rx_out_range_errors;
1594 u32 rx_frame_too_long; 1591 u32 rx_frame_too_long;
1595 u32 rx_address_mismatch_drops; 1592 u32 rx_address_filtered;
1596 u32 rx_dropped_too_small; 1593 u32 rx_dropped_too_small;
1597 u32 rx_dropped_too_short; 1594 u32 rx_dropped_too_short;
1598 u32 rx_dropped_header_too_small; 1595 u32 rx_dropped_header_too_small;
@@ -1706,9 +1703,11 @@ struct be_cmd_req_set_ext_fat_caps {
1706 struct be_fat_conf_params set_params; 1703 struct be_fat_conf_params set_params;
1707}; 1704};
1708 1705
1709#define RESOURCE_DESC_SIZE 72 1706#define RESOURCE_DESC_SIZE 88
1710#define NIC_RESOURCE_DESC_TYPE_ID 0x41 1707#define NIC_RESOURCE_DESC_TYPE_V0 0x41
1708#define NIC_RESOURCE_DESC_TYPE_V1 0x51
1711#define MAX_RESOURCE_DESC 4 1709#define MAX_RESOURCE_DESC 4
1710#define MAX_RESOURCE_DESC_V1 32
1712 1711
1713/* QOS unit number */ 1712/* QOS unit number */
1714#define QUN 4 1713#define QUN 4
@@ -1755,7 +1754,7 @@ struct be_cmd_req_get_func_config {
1755}; 1754};
1756 1755
1757struct be_cmd_resp_get_func_config { 1756struct be_cmd_resp_get_func_config {
1758 struct be_cmd_req_hdr hdr; 1757 struct be_cmd_resp_hdr hdr;
1759 u32 desc_count; 1758 u32 desc_count;
1760 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE]; 1759 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
1761}; 1760};
@@ -1774,6 +1773,12 @@ struct be_cmd_resp_get_profile_config {
1774 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE]; 1773 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
1775}; 1774};
1776 1775
1776struct be_cmd_resp_get_profile_config_v1 {
1777 struct be_cmd_req_hdr hdr;
1778 u32 desc_count;
1779 u8 func_param[MAX_RESOURCE_DESC_V1 * RESOURCE_DESC_SIZE];
1780};
1781
1777struct be_cmd_req_set_profile_config { 1782struct be_cmd_req_set_profile_config {
1778 struct be_cmd_req_hdr hdr; 1783 struct be_cmd_req_hdr hdr;
1779 u32 rsvd; 1784 u32 rsvd;
@@ -1791,6 +1796,12 @@ struct be_cmd_enable_disable_vf {
1791 u8 rsvd[3]; 1796 u8 rsvd[3];
1792}; 1797};
1793 1798
1799struct be_cmd_req_intr_set {
1800 struct be_cmd_req_hdr hdr;
1801 u8 intr_enabled;
1802 u8 rsvd[3];
1803};
1804
1794static inline bool check_privilege(struct be_adapter *adapter, u32 flags) 1805static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
1795{ 1806{
1796 return flags & adapter->cmd_privileges ? true : false; 1807 return flags & adapter->cmd_privileges ? true : false;
@@ -1834,8 +1845,7 @@ extern int be_cmd_mccq_create(struct be_adapter *adapter,
1834 struct be_queue_info *mccq, 1845 struct be_queue_info *mccq,
1835 struct be_queue_info *cq); 1846 struct be_queue_info *cq);
1836extern int be_cmd_txq_create(struct be_adapter *adapter, 1847extern int be_cmd_txq_create(struct be_adapter *adapter,
1837 struct be_queue_info *txq, 1848 struct be_tx_obj *txo);
1838 struct be_queue_info *cq);
1839extern int be_cmd_rxq_create(struct be_adapter *adapter, 1849extern int be_cmd_rxq_create(struct be_adapter *adapter,
1840 struct be_queue_info *rxq, u16 cq_id, 1850 struct be_queue_info *rxq, u16 cq_id,
1841 u16 frag_size, u32 if_id, u32 rss, u8 *rss_id); 1851 u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
@@ -1862,11 +1872,11 @@ extern int be_cmd_set_flow_control(struct be_adapter *adapter,
1862 u32 tx_fc, u32 rx_fc); 1872 u32 tx_fc, u32 rx_fc);
1863extern int be_cmd_get_flow_control(struct be_adapter *adapter, 1873extern int be_cmd_get_flow_control(struct be_adapter *adapter,
1864 u32 *tx_fc, u32 *rx_fc); 1874 u32 *tx_fc, u32 *rx_fc);
1865extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, 1875extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1866 u32 *port_num, u32 *function_mode, u32 *function_caps); 1876 u32 *function_mode, u32 *function_caps, u16 *asic_rev);
1867extern int be_cmd_reset_function(struct be_adapter *adapter); 1877extern int be_cmd_reset_function(struct be_adapter *adapter);
1868extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 1878extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
1869 u16 table_size); 1879 u32 rss_hash_opts, u16 table_size);
1870extern int be_process_mcc(struct be_adapter *adapter); 1880extern int be_process_mcc(struct be_adapter *adapter);
1871extern int be_cmd_set_beacon_state(struct be_adapter *adapter, 1881extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
1872 u8 port_num, u8 beacon, u8 status, u8 state); 1882 u8 port_num, u8 beacon, u8 status, u8 state);
@@ -1931,10 +1941,11 @@ extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
1931extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name); 1941extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
1932extern int be_cmd_get_func_config(struct be_adapter *adapter); 1942extern int be_cmd_get_func_config(struct be_adapter *adapter);
1933extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags, 1943extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
1934 u8 domain); 1944 u16 *txq_count, u8 domain);
1935 1945
1936extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, 1946extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
1937 u8 domain); 1947 u8 domain);
1938extern int be_cmd_get_if_id(struct be_adapter *adapter, 1948extern int be_cmd_get_if_id(struct be_adapter *adapter,
1939 struct be_vf_cfg *vf_cfg, int vf_num); 1949 struct be_vf_cfg *vf_cfg, int vf_num);
1940extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain); 1950extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
1951extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 76b302f30c87..5733cde88e2c 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -54,7 +54,7 @@ static const struct be_ethtool_stat et_stats[] = {
54 /* Received packets dropped when they don't pass the unicast or 54 /* Received packets dropped when they don't pass the unicast or
55 * multicast address filtering. 55 * multicast address filtering.
56 */ 56 */
57 {DRVSTAT_INFO(rx_address_mismatch_drops)}, 57 {DRVSTAT_INFO(rx_address_filtered)},
58 /* Received packets dropped when IP packet length field is less than 58 /* Received packets dropped when IP packet length field is less than
59 * the IP header length field. 59 * the IP header length field.
60 */ 60 */
@@ -680,7 +680,8 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
680 680
681 if (be_is_wol_supported(adapter)) { 681 if (be_is_wol_supported(adapter)) {
682 wol->supported |= WAKE_MAGIC; 682 wol->supported |= WAKE_MAGIC;
683 wol->wolopts |= WAKE_MAGIC; 683 if (adapter->wol)
684 wol->wolopts |= WAKE_MAGIC;
684 } else 685 } else
685 wol->wolopts = 0; 686 wol->wolopts = 0;
686 memset(&wol->sopass, 0, sizeof(wol->sopass)); 687 memset(&wol->sopass, 0, sizeof(wol->sopass));
@@ -719,10 +720,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
719 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 720 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
720 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, 721 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
721 &ddrdma_cmd.dma, GFP_KERNEL); 722 &ddrdma_cmd.dma, GFP_KERNEL);
722 if (!ddrdma_cmd.va) { 723 if (!ddrdma_cmd.va)
723 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
724 return -ENOMEM; 724 return -ENOMEM;
725 }
726 725
727 for (i = 0; i < 2; i++) { 726 for (i = 0; i < 2; i++) {
728 ret = be_cmd_ddr_dma_test(adapter, pattern[i], 727 ret = be_cmd_ddr_dma_test(adapter, pattern[i],
@@ -757,6 +756,12 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
757 int status; 756 int status;
758 u8 link_status = 0; 757 u8 link_status = 0;
759 758
759 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
760 dev_err(&adapter->pdev->dev, "Self test not supported\n");
761 test->flags |= ETH_TEST_FL_FAILED;
762 return;
763 }
764
760 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 765 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
761 766
762 if (test->flags & ETH_TEST_FL_OFFLINE) { 767 if (test->flags & ETH_TEST_FL_OFFLINE) {
@@ -845,11 +850,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
845 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, 850 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
846 &eeprom_cmd.dma, GFP_KERNEL); 851 &eeprom_cmd.dma, GFP_KERNEL);
847 852
848 if (!eeprom_cmd.va) { 853 if (!eeprom_cmd.va)
849 dev_err(&adapter->pdev->dev,
850 "Memory allocation failure. Could not read eeprom\n");
851 return -ENOMEM; 854 return -ENOMEM;
852 }
853 855
854 status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd); 856 status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
855 857
@@ -939,6 +941,159 @@ static void be_set_msg_level(struct net_device *netdev, u32 level)
939 return; 941 return;
940} 942}
941 943
944static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
945{
946 u64 data = 0;
947
948 switch (flow_type) {
949 case TCP_V4_FLOW:
950 if (adapter->rss_flags & RSS_ENABLE_IPV4)
951 data |= RXH_IP_DST | RXH_IP_SRC;
952 if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4)
953 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
954 break;
955 case UDP_V4_FLOW:
956 if (adapter->rss_flags & RSS_ENABLE_IPV4)
957 data |= RXH_IP_DST | RXH_IP_SRC;
958 if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4)
959 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
960 break;
961 case TCP_V6_FLOW:
962 if (adapter->rss_flags & RSS_ENABLE_IPV6)
963 data |= RXH_IP_DST | RXH_IP_SRC;
964 if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6)
965 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
966 break;
967 case UDP_V6_FLOW:
968 if (adapter->rss_flags & RSS_ENABLE_IPV6)
969 data |= RXH_IP_DST | RXH_IP_SRC;
970 if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6)
971 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
972 break;
973 }
974
975 return data;
976}
977
978static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
979 u32 *rule_locs)
980{
981 struct be_adapter *adapter = netdev_priv(netdev);
982
983 if (!be_multi_rxq(adapter)) {
984 dev_info(&adapter->pdev->dev,
985 "ethtool::get_rxnfc: RX flow hashing is disabled\n");
986 return -EINVAL;
987 }
988
989 switch (cmd->cmd) {
990 case ETHTOOL_GRXFH:
991 cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
992 break;
993 case ETHTOOL_GRXRINGS:
994 cmd->data = adapter->num_rx_qs - 1;
995 break;
996 default:
997 return -EINVAL;
998 }
999
1000 return 0;
1001}
1002
1003static int be_set_rss_hash_opts(struct be_adapter *adapter,
1004 struct ethtool_rxnfc *cmd)
1005{
1006 struct be_rx_obj *rxo;
1007 int status = 0, i, j;
1008 u8 rsstable[128];
1009 u32 rss_flags = adapter->rss_flags;
1010
1011 if (cmd->data != L3_RSS_FLAGS &&
1012 cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
1013 return -EINVAL;
1014
1015 switch (cmd->flow_type) {
1016 case TCP_V4_FLOW:
1017 if (cmd->data == L3_RSS_FLAGS)
1018 rss_flags &= ~RSS_ENABLE_TCP_IPV4;
1019 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1020 rss_flags |= RSS_ENABLE_IPV4 |
1021 RSS_ENABLE_TCP_IPV4;
1022 break;
1023 case TCP_V6_FLOW:
1024 if (cmd->data == L3_RSS_FLAGS)
1025 rss_flags &= ~RSS_ENABLE_TCP_IPV6;
1026 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1027 rss_flags |= RSS_ENABLE_IPV6 |
1028 RSS_ENABLE_TCP_IPV6;
1029 break;
1030 case UDP_V4_FLOW:
1031 if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1032 BEx_chip(adapter))
1033 return -EINVAL;
1034
1035 if (cmd->data == L3_RSS_FLAGS)
1036 rss_flags &= ~RSS_ENABLE_UDP_IPV4;
1037 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1038 rss_flags |= RSS_ENABLE_IPV4 |
1039 RSS_ENABLE_UDP_IPV4;
1040 break;
1041 case UDP_V6_FLOW:
1042 if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1043 BEx_chip(adapter))
1044 return -EINVAL;
1045
1046 if (cmd->data == L3_RSS_FLAGS)
1047 rss_flags &= ~RSS_ENABLE_UDP_IPV6;
1048 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1049 rss_flags |= RSS_ENABLE_IPV6 |
1050 RSS_ENABLE_UDP_IPV6;
1051 break;
1052 default:
1053 return -EINVAL;
1054 }
1055
1056 if (rss_flags == adapter->rss_flags)
1057 return status;
1058
1059 if (be_multi_rxq(adapter)) {
1060 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
1061 for_all_rss_queues(adapter, rxo, i) {
1062 if ((j + i) >= 128)
1063 break;
1064 rsstable[j + i] = rxo->rss_id;
1065 }
1066 }
1067 }
1068 status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128);
1069 if (!status)
1070 adapter->rss_flags = rss_flags;
1071
1072 return status;
1073}
1074
1075static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1076{
1077 struct be_adapter *adapter = netdev_priv(netdev);
1078 int status = 0;
1079
1080 if (!be_multi_rxq(adapter)) {
1081 dev_err(&adapter->pdev->dev,
1082 "ethtool::set_rxnfc: RX flow hashing is disabled\n");
1083 return -EINVAL;
1084 }
1085
1086 switch (cmd->cmd) {
1087 case ETHTOOL_SRXFH:
1088 status = be_set_rss_hash_opts(adapter, cmd);
1089 break;
1090 default:
1091 return -EINVAL;
1092 }
1093
1094 return status;
1095}
1096
942const struct ethtool_ops be_ethtool_ops = { 1097const struct ethtool_ops be_ethtool_ops = {
943 .get_settings = be_get_settings, 1098 .get_settings = be_get_settings,
944 .get_drvinfo = be_get_drvinfo, 1099 .get_drvinfo = be_get_drvinfo,
@@ -962,4 +1117,6 @@ const struct ethtool_ops be_ethtool_ops = {
962 .get_regs = be_get_regs, 1117 .get_regs = be_get_regs,
963 .flash_device = be_do_flash, 1118 .flash_device = be_do_flash,
964 .self_test = be_self_test, 1119 .self_test = be_self_test,
1120 .get_rxnfc = be_get_rxnfc,
1121 .set_rxnfc = be_set_rxnfc,
965}; 1122};
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 62dc220695f7..3c1099b47f2a 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -72,6 +72,10 @@
72 */ 72 */
73#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */ 73#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
74 74
75/********* PCI Function Capability *********/
76#define BE_FUNCTION_CAPS_RSS 0x2
77#define BE_FUNCTION_CAPS_SUPER_NIC 0x40
78
75/********* Power management (WOL) **********/ 79/********* Power management (WOL) **********/
76#define PCICFG_PM_CONTROL_OFFSET 0x44 80#define PCICFG_PM_CONTROL_OFFSET 0x44
77#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */ 81#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */
@@ -495,7 +499,8 @@ struct flash_file_hdr_g3 {
495 u32 antidote; 499 u32 antidote;
496 u32 num_imgs; 500 u32 num_imgs;
497 u8 build[24]; 501 u8 build[24];
498 u8 rsvd[32]; 502 u8 asic_type_rev;
503 u8 rsvd[31];
499}; 504};
500 505
501struct flash_section_hdr { 506struct flash_section_hdr {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 2886c9b63f90..4babc8a4a543 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -146,20 +146,16 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
146 q->entry_size = entry_size; 146 q->entry_size = entry_size;
147 mem->size = len * entry_size; 147 mem->size = len * entry_size;
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, 148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL); 149 GFP_KERNEL | __GFP_ZERO);
150 if (!mem->va) 150 if (!mem->va)
151 return -ENOMEM; 151 return -ENOMEM;
152 memset(mem->va, 0, mem->size);
153 return 0; 152 return 0;
154} 153}
155 154
156static void be_intr_set(struct be_adapter *adapter, bool enable) 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
157{ 156{
158 u32 reg, enabled; 157 u32 reg, enabled;
159 158
160 if (adapter->eeh_error)
161 return;
162
163 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, 159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164 &reg); 160 &reg);
165 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
@@ -175,6 +171,22 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
175 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg); 171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
176} 172}
177 173
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
178static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted) 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179{ 191{
180 u32 val = 0; 192 u32 val = 0;
@@ -185,14 +197,15 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
185 iowrite32(val, adapter->db + DB_RQ_OFFSET); 197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
186} 198}
187 199
188static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted) 200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
189{ 202{
190 u32 val = 0; 203 u32 val = 0;
191 val |= qid & DB_TXULP_RING_ID_MASK; 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
192 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT; 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
193 206
194 wmb(); 207 wmb();
195 iowrite32(val, adapter->db + DB_TXULP1_OFFSET); 208 iowrite32(val, adapter->db + txo->db_offset);
196} 209}
197 210
198static void be_eq_notify(struct be_adapter *adapter, u16 qid, 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
@@ -340,9 +353,9 @@ static void populate_be_v0_stats(struct be_adapter *adapter)
340 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow; 353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
341 drvs->rx_dropped_header_too_small = 354 drvs->rx_dropped_header_too_small =
342 port_stats->rx_dropped_header_too_small; 355 port_stats->rx_dropped_header_too_small;
343 drvs->rx_address_mismatch_drops = 356 drvs->rx_address_filtered =
344 port_stats->rx_address_mismatch_drops + 357 port_stats->rx_address_filtered +
345 port_stats->rx_vlan_mismatch_drops; 358 port_stats->rx_vlan_filtered;
346 drvs->rx_alignment_symbol_errors = 359 drvs->rx_alignment_symbol_errors =
347 port_stats->rx_alignment_symbol_errors; 360 port_stats->rx_alignment_symbol_errors;
348 361
@@ -391,7 +404,7 @@ static void populate_be_v1_stats(struct be_adapter *adapter)
391 port_stats->rx_dropped_header_too_small; 404 port_stats->rx_dropped_header_too_small;
392 drvs->rx_input_fifo_overflow_drop = 405 drvs->rx_input_fifo_overflow_drop =
393 port_stats->rx_input_fifo_overflow_drop; 406 port_stats->rx_input_fifo_overflow_drop;
394 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops; 407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
395 drvs->rx_alignment_symbol_errors = 408 drvs->rx_alignment_symbol_errors =
396 port_stats->rx_alignment_symbol_errors; 409 port_stats->rx_alignment_symbol_errors;
397 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop; 410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
@@ -432,9 +445,9 @@ static void populate_lancer_stats(struct be_adapter *adapter)
432 drvs->rx_dropped_header_too_small = 445 drvs->rx_dropped_header_too_small =
433 pport_stats->rx_dropped_header_too_small; 446 pport_stats->rx_dropped_header_too_small;
434 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow; 447 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
435 drvs->rx_address_mismatch_drops = 448 drvs->rx_address_filtered =
436 pport_stats->rx_address_mismatch_drops + 449 pport_stats->rx_address_filtered +
437 pport_stats->rx_vlan_mismatch_drops; 450 pport_stats->rx_vlan_filtered;
438 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo; 451 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
439 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow; 452 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
440 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo; 453 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
@@ -626,13 +639,8 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
626 return vlan_tag; 639 return vlan_tag;
627} 640}
628 641
629static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
630{
631 return vlan_tx_tag_present(skb) || adapter->pvid;
632}
633
634static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, 642static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
635 struct sk_buff *skb, u32 wrb_cnt, u32 len) 643 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
636{ 644{
637 u16 vlan_tag; 645 u16 vlan_tag;
638 646
@@ -659,8 +667,9 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
659 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag); 667 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
660 } 668 }
661 669
670 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1); 672 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt); 673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); 674 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666} 675}
@@ -683,7 +692,8 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
683} 692}
684 693
685static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, 694static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
686 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb) 695 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
696 bool skip_hw_vlan)
687{ 697{
688 dma_addr_t busaddr; 698 dma_addr_t busaddr;
689 int i, copied = 0; 699 int i, copied = 0;
@@ -732,7 +742,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
732 queue_head_inc(txq); 742 queue_head_inc(txq);
733 } 743 }
734 744
735 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied); 745 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
736 be_dws_cpu_to_le(hdr, sizeof(*hdr)); 746 be_dws_cpu_to_le(hdr, sizeof(*hdr));
737 747
738 return copied; 748 return copied;
@@ -749,7 +759,8 @@ dma_err:
749} 759}
750 760
751static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, 761static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
752 struct sk_buff *skb) 762 struct sk_buff *skb,
763 bool *skip_hw_vlan)
753{ 764{
754 u16 vlan_tag = 0; 765 u16 vlan_tag = 0;
755 766
@@ -759,14 +770,72 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
759 770
760 if (vlan_tx_tag_present(skb)) { 771 if (vlan_tx_tag_present(skb)) {
761 vlan_tag = be_get_tx_vlan_tag(adapter, skb); 772 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
762 skb = __vlan_put_tag(skb, vlan_tag); 773 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
763 if (skb) 774 if (skb)
764 skb->vlan_tci = 0; 775 skb->vlan_tci = 0;
765 } 776 }
766 777
778 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
779 if (!vlan_tag)
780 vlan_tag = adapter->pvid;
781 if (skip_hw_vlan)
782 *skip_hw_vlan = true;
783 }
784
785 if (vlan_tag) {
786 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
787 if (unlikely(!skb))
788 return skb;
789
790 skb->vlan_tci = 0;
791 }
792
793 /* Insert the outer VLAN, if any */
794 if (adapter->qnq_vid) {
795 vlan_tag = adapter->qnq_vid;
796 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
797 if (unlikely(!skb))
798 return skb;
799 if (skip_hw_vlan)
800 *skip_hw_vlan = true;
801 }
802
767 return skb; 803 return skb;
768} 804}
769 805
806static bool be_ipv6_exthdr_check(struct sk_buff *skb)
807{
808 struct ethhdr *eh = (struct ethhdr *)skb->data;
809 u16 offset = ETH_HLEN;
810
811 if (eh->h_proto == htons(ETH_P_IPV6)) {
812 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
813
814 offset += sizeof(struct ipv6hdr);
815 if (ip6h->nexthdr != NEXTHDR_TCP &&
816 ip6h->nexthdr != NEXTHDR_UDP) {
817 struct ipv6_opt_hdr *ehdr =
818 (struct ipv6_opt_hdr *) (skb->data + offset);
819
820 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
821 if (ehdr->hdrlen == 0xff)
822 return true;
823 }
824 }
825 return false;
826}
827
828static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
829{
830 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
831}
832
833static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
834{
835 return BE3_chip(adapter) &&
836 be_ipv6_exthdr_check(skb);
837}
838
770static netdev_tx_t be_xmit(struct sk_buff *skb, 839static netdev_tx_t be_xmit(struct sk_buff *skb,
771 struct net_device *netdev) 840 struct net_device *netdev)
772{ 841{
@@ -777,33 +846,64 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
777 u32 wrb_cnt = 0, copied = 0; 846 u32 wrb_cnt = 0, copied = 0;
778 u32 start = txq->head, eth_hdr_len; 847 u32 start = txq->head, eth_hdr_len;
779 bool dummy_wrb, stopped = false; 848 bool dummy_wrb, stopped = false;
849 bool skip_hw_vlan = false;
850 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
780 851
781 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? 852 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
782 VLAN_ETH_HLEN : ETH_HLEN; 853 VLAN_ETH_HLEN : ETH_HLEN;
783 854
784 /* HW has a bug which considers padding bytes as legal 855 /* For padded packets, BE HW modifies tot_len field in IP header
785 * and modifies the IPv4 hdr's 'tot_len' field 856 * incorrecly when VLAN tag is inserted by HW.
786 */ 857 */
787 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) && 858 if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
788 is_ipv4_pkt(skb)) {
789 ip = (struct iphdr *)ip_hdr(skb); 859 ip = (struct iphdr *)ip_hdr(skb);
790 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); 860 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
791 } 861 }
792 862
863 /* If vlan tag is already inlined in the packet, skip HW VLAN
864 * tagging in UMC mode
865 */
866 if ((adapter->function_mode & UMC_ENABLED) &&
867 veh->h_vlan_proto == htons(ETH_P_8021Q))
868 skip_hw_vlan = true;
869
793 /* HW has a bug wherein it will calculate CSUM for VLAN 870 /* HW has a bug wherein it will calculate CSUM for VLAN
794 * pkts even though it is disabled. 871 * pkts even though it is disabled.
795 * Manually insert VLAN in pkt. 872 * Manually insert VLAN in pkt.
796 */ 873 */
797 if (skb->ip_summed != CHECKSUM_PARTIAL && 874 if (skb->ip_summed != CHECKSUM_PARTIAL &&
798 be_vlan_tag_chk(adapter, skb)) { 875 vlan_tx_tag_present(skb)) {
799 skb = be_insert_vlan_in_pkt(adapter, skb); 876 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
877 if (unlikely(!skb))
878 goto tx_drop;
879 }
880
881 /* HW may lockup when VLAN HW tagging is requested on
882 * certain ipv6 packets. Drop such pkts if the HW workaround to
883 * skip HW tagging is not enabled by FW.
884 */
885 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
886 (adapter->pvid || adapter->qnq_vid) &&
887 !qnq_async_evt_rcvd(adapter)))
888 goto tx_drop;
889
890 /* Manual VLAN tag insertion to prevent:
891 * ASIC lockup when the ASIC inserts VLAN tag into
892 * certain ipv6 packets. Insert VLAN tags in driver,
893 * and set event, completion, vlan bits accordingly
894 * in the Tx WRB.
895 */
896 if (be_ipv6_tx_stall_chk(adapter, skb) &&
897 be_vlan_tag_tx_chk(adapter, skb)) {
898 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
800 if (unlikely(!skb)) 899 if (unlikely(!skb))
801 goto tx_drop; 900 goto tx_drop;
802 } 901 }
803 902
804 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); 903 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
805 904
806 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); 905 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
906 skip_hw_vlan);
807 if (copied) { 907 if (copied) {
808 int gso_segs = skb_shinfo(skb)->gso_segs; 908 int gso_segs = skb_shinfo(skb)->gso_segs;
809 909
@@ -822,7 +922,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
822 stopped = true; 922 stopped = true;
823 } 923 }
824 924
825 be_txq_notify(adapter, txq->id, wrb_cnt); 925 be_txq_notify(adapter, txo, wrb_cnt);
826 926
827 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped); 927 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
828 } else { 928 } else {
@@ -891,7 +991,7 @@ set_vlan_promisc:
891 return status; 991 return status;
892} 992}
893 993
894static int be_vlan_add_vid(struct net_device *netdev, u16 vid) 994static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
895{ 995{
896 struct be_adapter *adapter = netdev_priv(netdev); 996 struct be_adapter *adapter = netdev_priv(netdev);
897 int status = 0; 997 int status = 0;
@@ -917,7 +1017,7 @@ ret:
917 return status; 1017 return status;
918} 1018}
919 1019
920static int be_vlan_rem_vid(struct net_device *netdev, u16 vid) 1020static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
921{ 1021{
922 struct be_adapter *adapter = netdev_priv(netdev); 1022 struct be_adapter *adapter = netdev_priv(netdev);
923 int status = 0; 1023 int status = 0;
@@ -1372,7 +1472,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
1372 1472
1373 1473
1374 if (rxcp->vlanf) 1474 if (rxcp->vlanf)
1375 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag); 1475 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1376 1476
1377 netif_receive_skb(skb); 1477 netif_receive_skb(skb);
1378} 1478}
@@ -1428,7 +1528,7 @@ void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1428 skb->rxhash = rxcp->rss_hash; 1528 skb->rxhash = rxcp->rss_hash;
1429 1529
1430 if (rxcp->vlanf) 1530 if (rxcp->vlanf)
1431 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag); 1531 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1432 1532
1433 napi_gro_frags(napi); 1533 napi_gro_frags(napi);
1434} 1534}
@@ -1958,7 +2058,7 @@ static int be_tx_qs_create(struct be_adapter *adapter)
1958 if (status) 2058 if (status)
1959 return status; 2059 return status;
1960 2060
1961 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq); 2061 status = be_cmd_txq_create(adapter, txo);
1962 if (status) 2062 if (status)
1963 return status; 2063 return status;
1964 } 2064 }
@@ -2436,9 +2536,6 @@ static int be_close(struct net_device *netdev)
2436 2536
2437 be_roce_dev_close(adapter); 2537 be_roce_dev_close(adapter);
2438 2538
2439 if (!lancer_chip(adapter))
2440 be_intr_set(adapter, false);
2441
2442 for_all_evt_queues(adapter, eqo, i) 2539 for_all_evt_queues(adapter, eqo, i)
2443 napi_disable(&eqo->napi); 2540 napi_disable(&eqo->napi);
2444 2541
@@ -2500,9 +2597,19 @@ static int be_rx_qs_create(struct be_adapter *adapter)
2500 rsstable[j + i] = rxo->rss_id; 2597 rsstable[j + i] = rxo->rss_id;
2501 } 2598 }
2502 } 2599 }
2503 rc = be_cmd_rss_config(adapter, rsstable, 128); 2600 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2504 if (rc) 2601 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2602
2603 if (!BEx_chip(adapter))
2604 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2605 RSS_ENABLE_UDP_IPV6;
2606
2607 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2608 128);
2609 if (rc) {
2610 adapter->rss_flags = 0;
2505 return rc; 2611 return rc;
2612 }
2506 } 2613 }
2507 2614
2508 /* First time posting */ 2615 /* First time posting */
@@ -2526,9 +2633,6 @@ static int be_open(struct net_device *netdev)
2526 2633
2527 be_irq_register(adapter); 2634 be_irq_register(adapter);
2528 2635
2529 if (!lancer_chip(adapter))
2530 be_intr_set(adapter, true);
2531
2532 for_all_rx_queues(adapter, rxo, i) 2636 for_all_rx_queues(adapter, rxo, i)
2533 be_cq_notify(adapter, rxo->cq.id, true, 0); 2637 be_cq_notify(adapter, rxo->cq.id, true, 0);
2534 2638
@@ -2563,10 +2667,9 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2563 2667
2564 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); 2668 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2565 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 2669 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2566 GFP_KERNEL); 2670 GFP_KERNEL | __GFP_ZERO);
2567 if (cmd.va == NULL) 2671 if (cmd.va == NULL)
2568 return -1; 2672 return -1;
2569 memset(cmd.va, 0, cmd.size);
2570 2673
2571 if (enable) { 2674 if (enable) {
2572 status = pci_write_config_dword(adapter->pdev, 2675 status = pci_write_config_dword(adapter->pdev,
@@ -2714,7 +2817,8 @@ static int be_vfs_if_create(struct be_adapter *adapter)
2714 2817
2715 for_all_vfs(adapter, vf_cfg, vf) { 2818 for_all_vfs(adapter, vf_cfg, vf) {
2716 if (!BE3_chip(adapter)) 2819 if (!BE3_chip(adapter))
2717 be_cmd_get_profile_config(adapter, &cap_flags, vf + 1); 2820 be_cmd_get_profile_config(adapter, &cap_flags,
2821 NULL, vf + 1);
2718 2822
2719 /* If a FW profile exists, then cap_flags are updated */ 2823 /* If a FW profile exists, then cap_flags are updated */
2720 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | 2824 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
@@ -2878,11 +2982,14 @@ static void be_get_resources(struct be_adapter *adapter)
2878 u16 dev_num_vfs; 2982 u16 dev_num_vfs;
2879 int pos, status; 2983 int pos, status;
2880 bool profile_present = false; 2984 bool profile_present = false;
2985 u16 txq_count = 0;
2881 2986
2882 if (!BEx_chip(adapter)) { 2987 if (!BEx_chip(adapter)) {
2883 status = be_cmd_get_func_config(adapter); 2988 status = be_cmd_get_func_config(adapter);
2884 if (!status) 2989 if (!status)
2885 profile_present = true; 2990 profile_present = true;
2991 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
2992 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
2886 } 2993 }
2887 2994
2888 if (profile_present) { 2995 if (profile_present) {
@@ -2920,7 +3027,9 @@ static void be_get_resources(struct be_adapter *adapter)
2920 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED; 3027 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2921 3028
2922 adapter->max_mcast_mac = BE_MAX_MC; 3029 adapter->max_mcast_mac = BE_MAX_MC;
2923 adapter->max_tx_queues = MAX_TX_QS; 3030 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3031 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3032 MAX_TX_QS);
2924 adapter->max_rss_queues = (adapter->be3_native) ? 3033 adapter->max_rss_queues = (adapter->be3_native) ?
2925 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; 3034 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2926 adapter->max_event_queues = BE3_MAX_RSS_QS; 3035 adapter->max_event_queues = BE3_MAX_RSS_QS;
@@ -2954,7 +3063,8 @@ static int be_get_config(struct be_adapter *adapter)
2954 3063
2955 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num, 3064 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2956 &adapter->function_mode, 3065 &adapter->function_mode,
2957 &adapter->function_caps); 3066 &adapter->function_caps,
3067 &adapter->asic_rev);
2958 if (status) 3068 if (status)
2959 goto err; 3069 goto err;
2960 3070
@@ -3215,7 +3325,7 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
3215 return 0; 3325 return 0;
3216} 3326}
3217 3327
3218/* For BE2 and BE3 */ 3328/* For BE2, BE3 and BE3-R */
3219static int be_flash_BEx(struct be_adapter *adapter, 3329static int be_flash_BEx(struct be_adapter *adapter,
3220 const struct firmware *fw, 3330 const struct firmware *fw,
3221 struct be_dma_mem *flash_cmd, 3331 struct be_dma_mem *flash_cmd,
@@ -3458,11 +3568,9 @@ static int lancer_fw_download(struct be_adapter *adapter,
3458 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) 3568 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3459 + LANCER_FW_DOWNLOAD_CHUNK; 3569 + LANCER_FW_DOWNLOAD_CHUNK;
3460 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, 3570 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3461 &flash_cmd.dma, GFP_KERNEL); 3571 &flash_cmd.dma, GFP_KERNEL);
3462 if (!flash_cmd.va) { 3572 if (!flash_cmd.va) {
3463 status = -ENOMEM; 3573 status = -ENOMEM;
3464 dev_err(&adapter->pdev->dev,
3465 "Memory allocation failure while flashing\n");
3466 goto lancer_fw_exit; 3574 goto lancer_fw_exit;
3467 } 3575 }
3468 3576
@@ -3530,18 +3638,22 @@ lancer_fw_exit:
3530 3638
3531#define UFI_TYPE2 2 3639#define UFI_TYPE2 2
3532#define UFI_TYPE3 3 3640#define UFI_TYPE3 3
3641#define UFI_TYPE3R 10
3533#define UFI_TYPE4 4 3642#define UFI_TYPE4 4
3534static int be_get_ufi_type(struct be_adapter *adapter, 3643static int be_get_ufi_type(struct be_adapter *adapter,
3535 struct flash_file_hdr_g2 *fhdr) 3644 struct flash_file_hdr_g3 *fhdr)
3536{ 3645{
3537 if (fhdr == NULL) 3646 if (fhdr == NULL)
3538 goto be_get_ufi_exit; 3647 goto be_get_ufi_exit;
3539 3648
3540 if (skyhawk_chip(adapter) && fhdr->build[0] == '4') 3649 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3541 return UFI_TYPE4; 3650 return UFI_TYPE4;
3542 else if (BE3_chip(adapter) && fhdr->build[0] == '3') 3651 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3543 return UFI_TYPE3; 3652 if (fhdr->asic_type_rev == 0x10)
3544 else if (BE2_chip(adapter) && fhdr->build[0] == '2') 3653 return UFI_TYPE3R;
3654 else
3655 return UFI_TYPE3;
3656 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3545 return UFI_TYPE2; 3657 return UFI_TYPE2;
3546 3658
3547be_get_ufi_exit: 3659be_get_ufi_exit:
@@ -3552,7 +3664,6 @@ be_get_ufi_exit:
3552 3664
3553static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) 3665static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3554{ 3666{
3555 struct flash_file_hdr_g2 *fhdr;
3556 struct flash_file_hdr_g3 *fhdr3; 3667 struct flash_file_hdr_g3 *fhdr3;
3557 struct image_hdr *img_hdr_ptr = NULL; 3668 struct image_hdr *img_hdr_ptr = NULL;
3558 struct be_dma_mem flash_cmd; 3669 struct be_dma_mem flash_cmd;
@@ -3564,29 +3675,41 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3564 &flash_cmd.dma, GFP_KERNEL); 3675 &flash_cmd.dma, GFP_KERNEL);
3565 if (!flash_cmd.va) { 3676 if (!flash_cmd.va) {
3566 status = -ENOMEM; 3677 status = -ENOMEM;
3567 dev_err(&adapter->pdev->dev,
3568 "Memory allocation failure while flashing\n");
3569 goto be_fw_exit; 3678 goto be_fw_exit;
3570 } 3679 }
3571 3680
3572 p = fw->data; 3681 p = fw->data;
3573 fhdr = (struct flash_file_hdr_g2 *)p; 3682 fhdr3 = (struct flash_file_hdr_g3 *)p;
3574 3683
3575 ufi_type = be_get_ufi_type(adapter, fhdr); 3684 ufi_type = be_get_ufi_type(adapter, fhdr3);
3576 3685
3577 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3578 num_imgs = le32_to_cpu(fhdr3->num_imgs); 3686 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3579 for (i = 0; i < num_imgs; i++) { 3687 for (i = 0; i < num_imgs; i++) {
3580 img_hdr_ptr = (struct image_hdr *)(fw->data + 3688 img_hdr_ptr = (struct image_hdr *)(fw->data +
3581 (sizeof(struct flash_file_hdr_g3) + 3689 (sizeof(struct flash_file_hdr_g3) +
3582 i * sizeof(struct image_hdr))); 3690 i * sizeof(struct image_hdr)));
3583 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) { 3691 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3584 if (ufi_type == UFI_TYPE4) 3692 switch (ufi_type) {
3693 case UFI_TYPE4:
3585 status = be_flash_skyhawk(adapter, fw, 3694 status = be_flash_skyhawk(adapter, fw,
3586 &flash_cmd, num_imgs); 3695 &flash_cmd, num_imgs);
3587 else if (ufi_type == UFI_TYPE3) 3696 break;
3697 case UFI_TYPE3R:
3588 status = be_flash_BEx(adapter, fw, &flash_cmd, 3698 status = be_flash_BEx(adapter, fw, &flash_cmd,
3589 num_imgs); 3699 num_imgs);
3700 break;
3701 case UFI_TYPE3:
3702 /* Do not flash this ufi on BE3-R cards */
3703 if (adapter->asic_rev < 0x10)
3704 status = be_flash_BEx(adapter, fw,
3705 &flash_cmd,
3706 num_imgs);
3707 else {
3708 status = -1;
3709 dev_err(&adapter->pdev->dev,
3710 "Can't load BE3 UFI on BE3R\n");
3711 }
3712 }
3590 } 3713 }
3591 } 3714 }
3592 3715
@@ -3663,12 +3786,12 @@ static void be_netdev_init(struct net_device *netdev)
3663 3786
3664 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 3787 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3665 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | 3788 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3666 NETIF_F_HW_VLAN_TX; 3789 NETIF_F_HW_VLAN_CTAG_TX;
3667 if (be_multi_rxq(adapter)) 3790 if (be_multi_rxq(adapter))
3668 netdev->hw_features |= NETIF_F_RXHASH; 3791 netdev->hw_features |= NETIF_F_RXHASH;
3669 3792
3670 netdev->features |= netdev->hw_features | 3793 netdev->features |= netdev->hw_features |
3671 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; 3794 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3672 3795
3673 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 3796 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3674 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3797 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -3792,12 +3915,13 @@ static int be_ctrl_init(struct be_adapter *adapter)
3792 3915
3793 rx_filter->size = sizeof(struct be_cmd_req_rx_filter); 3916 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3794 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size, 3917 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3795 &rx_filter->dma, GFP_KERNEL); 3918 &rx_filter->dma,
3919 GFP_KERNEL | __GFP_ZERO);
3796 if (rx_filter->va == NULL) { 3920 if (rx_filter->va == NULL) {
3797 status = -ENOMEM; 3921 status = -ENOMEM;
3798 goto free_mbox; 3922 goto free_mbox;
3799 } 3923 }
3800 memset(rx_filter->va, 0, rx_filter->size); 3924
3801 mutex_init(&adapter->mbox_lock); 3925 mutex_init(&adapter->mbox_lock);
3802 spin_lock_init(&adapter->mcc_lock); 3926 spin_lock_init(&adapter->mcc_lock);
3803 spin_lock_init(&adapter->mcc_cq_lock); 3927 spin_lock_init(&adapter->mcc_cq_lock);
@@ -3839,10 +3963,9 @@ static int be_stats_init(struct be_adapter *adapter)
3839 cmd->size = sizeof(struct be_cmd_req_get_stats_v1); 3963 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3840 3964
3841 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, 3965 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3842 GFP_KERNEL); 3966 GFP_KERNEL | __GFP_ZERO);
3843 if (cmd->va == NULL) 3967 if (cmd->va == NULL)
3844 return -1; 3968 return -1;
3845 memset(cmd->va, 0, cmd->size);
3846 return 0; 3969 return 0;
3847} 3970}
3848 3971
@@ -3854,6 +3977,7 @@ static void be_remove(struct pci_dev *pdev)
3854 return; 3977 return;
3855 3978
3856 be_roce_dev_remove(adapter); 3979 be_roce_dev_remove(adapter);
3980 be_intr_set(adapter, false);
3857 3981
3858 cancel_delayed_work_sync(&adapter->func_recovery_work); 3982 cancel_delayed_work_sync(&adapter->func_recovery_work);
3859 3983
@@ -4108,6 +4232,11 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4108 4232
4109 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 4233 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4110 if (!status) { 4234 if (!status) {
4235 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4236 if (status < 0) {
4237 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4238 goto free_netdev;
4239 }
4111 netdev->features |= NETIF_F_HIGHDMA; 4240 netdev->features |= NETIF_F_HIGHDMA;
4112 } else { 4241 } else {
4113 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 4242 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
@@ -4132,22 +4261,22 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4132 goto ctrl_clean; 4261 goto ctrl_clean;
4133 } 4262 }
4134 4263
4135 /* tell fw we're ready to fire cmds */
4136 status = be_cmd_fw_init(adapter);
4137 if (status)
4138 goto ctrl_clean;
4139
4140 if (be_reset_required(adapter)) { 4264 if (be_reset_required(adapter)) {
4141 status = be_cmd_reset_function(adapter); 4265 status = be_cmd_reset_function(adapter);
4142 if (status) 4266 if (status)
4143 goto ctrl_clean; 4267 goto ctrl_clean;
4268
4269 /* Wait for interrupts to quiesce after an FLR */
4270 msleep(100);
4144 } 4271 }
4145 4272
4146 /* The INTR bit may be set in the card when probed by a kdump kernel 4273 /* Allow interrupts for other ULPs running on NIC function */
4147 * after a crash. 4274 be_intr_set(adapter, true);
4148 */ 4275
4149 if (!lancer_chip(adapter)) 4276 /* tell fw we're ready to fire cmds */
4150 be_intr_set(adapter, false); 4277 status = be_cmd_fw_init(adapter);
4278 if (status)
4279 goto ctrl_clean;
4151 4280
4152 status = be_stats_init(adapter); 4281 status = be_stats_init(adapter);
4153 if (status) 4282 if (status)
@@ -4358,12 +4487,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
4358 4487
4359 pci_save_state(pdev); 4488 pci_save_state(pdev);
4360 4489
4361 /* tell fw we're ready to fire cmds */ 4490 status = be_cmd_reset_function(adapter);
4362 status = be_cmd_fw_init(adapter);
4363 if (status) 4491 if (status)
4364 goto err; 4492 goto err;
4365 4493
4366 status = be_cmd_reset_function(adapter); 4494 /* tell fw we're ready to fire cmds */
4495 status = be_cmd_fw_init(adapter);
4367 if (status) 4496 if (status)
4368 goto err; 4497 goto err;
4369 4498
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 55d32aa0a093..f3d126dcc104 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index db4ea8081c07..276572998463 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 7c361d1db94c..21b85fb7d05f 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -780,12 +780,11 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
780 780
781 priv->descs = dma_alloc_coherent(priv->dev, 781 priv->descs = dma_alloc_coherent(priv->dev,
782 sizeof(struct ftgmac100_descs), 782 sizeof(struct ftgmac100_descs),
783 &priv->descs_dma_addr, GFP_KERNEL); 783 &priv->descs_dma_addr,
784 GFP_KERNEL | __GFP_ZERO);
784 if (!priv->descs) 785 if (!priv->descs)
785 return -ENOMEM; 786 return -ENOMEM;
786 787
787 memset(priv->descs, 0, sizeof(struct ftgmac100_descs));
788
789 /* initialize RX ring */ 788 /* initialize RX ring */
790 ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); 789 ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
791 790
@@ -1350,22 +1349,7 @@ static struct platform_driver ftgmac100_driver = {
1350 }, 1349 },
1351}; 1350};
1352 1351
1353/****************************************************************************** 1352module_platform_driver(ftgmac100_driver);
1354 * initialization / finalization
1355 *****************************************************************************/
1356static int __init ftgmac100_init(void)
1357{
1358 pr_info("Loading version " DRV_VERSION " ...\n");
1359 return platform_driver_register(&ftgmac100_driver);
1360}
1361
1362static void __exit ftgmac100_exit(void)
1363{
1364 platform_driver_unregister(&ftgmac100_driver);
1365}
1366
1367module_init(ftgmac100_init);
1368module_exit(ftgmac100_exit);
1369 1353
1370MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); 1354MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
1371MODULE_DESCRIPTION("FTGMAC100 driver"); 1355MODULE_DESCRIPTION("FTGMAC100 driver");
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index b5ea8fbd8a76..a6eda8d83138 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -732,13 +732,13 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
732{ 732{
733 int i; 733 int i;
734 734
735 priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs), 735 priv->descs = dma_alloc_coherent(priv->dev,
736 &priv->descs_dma_addr, GFP_KERNEL); 736 sizeof(struct ftmac100_descs),
737 &priv->descs_dma_addr,
738 GFP_KERNEL | __GFP_ZERO);
737 if (!priv->descs) 739 if (!priv->descs)
738 return -ENOMEM; 740 return -ENOMEM;
739 741
740 memset(priv->descs, 0, sizeof(struct ftmac100_descs));
741
742 /* initialize RX ring */ 742 /* initialize RX ring */
743 ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); 743 ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
744 744
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index b7d58fe6f531..549ce13b92ac 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -2,7 +2,8 @@
2# Makefile for the Freescale network device drivers. 2# Makefile for the Freescale network device drivers.
3# 3#
4 4
5obj-$(CONFIG_FEC) += fec.o fec_ptp.o 5obj-$(CONFIG_FEC) += fec.o
6fec-objs :=fec_main.o fec_ptp.o
6obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o 7obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
7ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) 8ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
8 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o 9 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index eb4372962839..d44f65bac1d4 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -52,6 +52,7 @@
52#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ 52#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
53#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ 53#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
54#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ 54#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
55#define FEC_RACC 0x1C4 /* Receive Accelerator function */
55#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ 56#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
56#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ 57#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
57 58
@@ -164,9 +165,11 @@ struct bufdesc_ex {
164#define BD_ENET_TX_CSL ((ushort)0x0001) 165#define BD_ENET_TX_CSL ((ushort)0x0001)
165#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */ 166#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
166 167
167/*enhanced buffer desciptor control/status used by Ethernet transmit*/ 168/*enhanced buffer descriptor control/status used by Ethernet transmit*/
168#define BD_ENET_TX_INT 0x40000000 169#define BD_ENET_TX_INT 0x40000000
169#define BD_ENET_TX_TS 0x20000000 170#define BD_ENET_TX_TS 0x20000000
171#define BD_ENET_TX_PINS 0x10000000
172#define BD_ENET_TX_IINS 0x08000000
170 173
171 174
172/* This device has up to three irqs on some platforms */ 175/* This device has up to three irqs on some platforms */
@@ -190,6 +193,10 @@ struct bufdesc_ex {
190 193
191#define BD_ENET_RX_INT 0x00800000 194#define BD_ENET_RX_INT 0x00800000
192#define BD_ENET_RX_PTP ((ushort)0x0400) 195#define BD_ENET_RX_PTP ((ushort)0x0400)
196#define BD_ENET_RX_ICE 0x00000020
197#define BD_ENET_RX_PCR 0x00000010
198#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
199#define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
193 200
194/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and 201/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
195 * tx_bd_base always point to the base of the buffer descriptors. The 202 * tx_bd_base always point to the base of the buffer descriptors. The
@@ -247,6 +254,7 @@ struct fec_enet_private {
247 int pause_flag; 254 int pause_flag;
248 255
249 struct napi_struct napi; 256 struct napi_struct napi;
257 int csum_flags;
250 258
251 struct ptp_clock *ptp_clock; 259 struct ptp_clock *ptp_clock;
252 struct ptp_clock_info ptp_caps; 260 struct ptp_clock_info ptp_caps;
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec_main.c
index 73195f643c9c..b9748f14ea78 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -29,12 +29,17 @@
29#include <linux/ioport.h> 29#include <linux/ioport.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/pci.h>
33#include <linux/init.h> 32#include <linux/init.h>
34#include <linux/delay.h> 33#include <linux/delay.h>
35#include <linux/netdevice.h> 34#include <linux/netdevice.h>
36#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
37#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/in.h>
38#include <linux/ip.h>
39#include <net/ip.h>
40#include <linux/tcp.h>
41#include <linux/udp.h>
42#include <linux/icmp.h>
38#include <linux/spinlock.h> 43#include <linux/spinlock.h>
39#include <linux/workqueue.h> 44#include <linux/workqueue.h>
40#include <linux/bitops.h> 45#include <linux/bitops.h>
@@ -53,11 +58,6 @@
53 58
54#include <asm/cacheflush.h> 59#include <asm/cacheflush.h>
55 60
56#ifndef CONFIG_ARM
57#include <asm/coldfire.h>
58#include <asm/mcfsim.h>
59#endif
60
61#include "fec.h" 61#include "fec.h"
62 62
63#if defined(CONFIG_ARM) 63#if defined(CONFIG_ARM)
@@ -107,6 +107,9 @@ static struct platform_device_id fec_devtype[] = {
107 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 107 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
108 FEC_QUIRK_HAS_BUFDESC_EX, 108 FEC_QUIRK_HAS_BUFDESC_EX,
109 }, { 109 }, {
110 .name = "mvf-fec",
111 .driver_data = FEC_QUIRK_ENET_MAC,
112 }, {
110 /* sentinel */ 113 /* sentinel */
111 } 114 }
112}; 115};
@@ -117,6 +120,7 @@ enum imx_fec_type {
117 IMX27_FEC, /* runs on i.mx27/35/51 */ 120 IMX27_FEC, /* runs on i.mx27/35/51 */
118 IMX28_FEC, 121 IMX28_FEC,
119 IMX6Q_FEC, 122 IMX6Q_FEC,
123 MVF_FEC,
120}; 124};
121 125
122static const struct of_device_id fec_dt_ids[] = { 126static const struct of_device_id fec_dt_ids[] = {
@@ -124,6 +128,7 @@ static const struct of_device_id fec_dt_ids[] = {
124 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, 128 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
125 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, 129 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
126 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, 130 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
131 { .compatible = "fsl,mvf-fec", .data = &fec_devtype[MVF_FEC], },
127 { /* sentinel */ } 132 { /* sentinel */ }
128}; 133};
129MODULE_DEVICE_TABLE(of, fec_dt_ids); 134MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -177,6 +182,11 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
177#define PKT_MINBUF_SIZE 64 182#define PKT_MINBUF_SIZE 64
178#define PKT_MAXBLR_SIZE 1520 183#define PKT_MAXBLR_SIZE 1520
179 184
185/* FEC receive acceleration */
186#define FEC_RACC_IPDIS (1 << 1)
187#define FEC_RACC_PRODIS (1 << 2)
188#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
189
180/* 190/*
181 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 191 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
182 * size bits. Other FEC hardware does not, so we need to take that into 192 * size bits. Other FEC hardware does not, so we need to take that into
@@ -237,6 +247,21 @@ static void *swap_buffer(void *bufaddr, int len)
237 return bufaddr; 247 return bufaddr;
238} 248}
239 249
250static int
251fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
252{
253 /* Only run for packets requiring a checksum. */
254 if (skb->ip_summed != CHECKSUM_PARTIAL)
255 return 0;
256
257 if (unlikely(skb_cow_head(skb, 0)))
258 return -1;
259
260 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
261
262 return 0;
263}
264
240static netdev_tx_t 265static netdev_tx_t
241fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 266fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
242{ 267{
@@ -249,7 +274,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
249 unsigned int index; 274 unsigned int index;
250 275
251 if (!fep->link) { 276 if (!fep->link) {
252 /* Link is down or autonegotiation is in progress. */ 277 /* Link is down or auto-negotiation is in progress. */
253 return NETDEV_TX_BUSY; 278 return NETDEV_TX_BUSY;
254 } 279 }
255 280
@@ -262,10 +287,16 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
262 /* Ooops. All transmit buffers are full. Bail out. 287 /* Ooops. All transmit buffers are full. Bail out.
263 * This should not happen, since ndev->tbusy should be set. 288 * This should not happen, since ndev->tbusy should be set.
264 */ 289 */
265 printk("%s: tx queue full!.\n", ndev->name); 290 netdev_err(ndev, "tx queue full!\n");
266 return NETDEV_TX_BUSY; 291 return NETDEV_TX_BUSY;
267 } 292 }
268 293
294 /* Protocol checksum off-load for TCP and UDP. */
295 if (fec_enet_clear_csum(skb, ndev)) {
296 kfree_skb(skb);
297 return NETDEV_TX_OK;
298 }
299
269 /* Clear all of the status flags */ 300 /* Clear all of the status flags */
270 status &= ~BD_ENET_TX_STATS; 301 status &= ~BD_ENET_TX_STATS;
271 302
@@ -322,8 +353,14 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
322 ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT); 353 ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
323 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 354 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
324 } else { 355 } else {
325
326 ebdp->cbd_esc = BD_ENET_TX_INT; 356 ebdp->cbd_esc = BD_ENET_TX_INT;
357
358 /* Enable protocol checksum flags
359 * We do not bother with the IP Checksum bits as they
360 * are done by the kernel
361 */
362 if (skb->ip_summed == CHECKSUM_PARTIAL)
363 ebdp->cbd_esc |= BD_ENET_TX_PINS;
327 } 364 }
328 } 365 }
329 /* If this was the last BD in the ring, start at the beginning again. */ 366 /* If this was the last BD in the ring, start at the beginning again. */
@@ -403,6 +440,7 @@ fec_restart(struct net_device *ndev, int duplex)
403 const struct platform_device_id *id_entry = 440 const struct platform_device_id *id_entry =
404 platform_get_device_id(fep->pdev); 441 platform_get_device_id(fep->pdev);
405 int i; 442 int i;
443 u32 val;
406 u32 temp_mac[2]; 444 u32 temp_mac[2];
407 u32 rcntl = OPT_FRAME_SIZE | 0x04; 445 u32 rcntl = OPT_FRAME_SIZE | 0x04;
408 u32 ecntl = 0x2; /* ETHEREN */ 446 u32 ecntl = 0x2; /* ETHEREN */
@@ -469,6 +507,14 @@ fec_restart(struct net_device *ndev, int duplex)
469 /* Set MII speed */ 507 /* Set MII speed */
470 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 508 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
471 509
510 /* set RX checksum */
511 val = readl(fep->hwp + FEC_RACC);
512 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
513 val |= FEC_RACC_OPTIONS;
514 else
515 val &= ~FEC_RACC_OPTIONS;
516 writel(val, fep->hwp + FEC_RACC);
517
472 /* 518 /*
473 * The phy interface and speed need to get configured 519 * The phy interface and speed need to get configured
474 * differently on enet-mac. 520 * differently on enet-mac.
@@ -526,7 +572,7 @@ fec_restart(struct net_device *ndev, int duplex)
526 fep->phy_dev && fep->phy_dev->pause)) { 572 fep->phy_dev && fep->phy_dev->pause)) {
527 rcntl |= FEC_ENET_FCE; 573 rcntl |= FEC_ENET_FCE;
528 574
529 /* set FIFO thresh hold parameter to reduce overrun */ 575 /* set FIFO threshold parameter to reduce overrun */
530 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); 576 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
531 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); 577 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
532 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); 578 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
@@ -574,7 +620,7 @@ fec_stop(struct net_device *ndev)
574 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ 620 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
575 udelay(10); 621 udelay(10);
576 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) 622 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
577 printk("fec_stop : Graceful transmit stop did not complete !\n"); 623 netdev_err(ndev, "Graceful transmit stop did not complete!\n");
578 } 624 }
579 625
580 /* Whack a reset. We should wait for this. */ 626 /* Whack a reset. We should wait for this. */
@@ -672,7 +718,7 @@ fec_enet_tx(struct net_device *ndev)
672 } 718 }
673 719
674 if (status & BD_ENET_TX_READY) 720 if (status & BD_ENET_TX_READY)
675 printk("HEY! Enet xmit interrupt and TX_READY.\n"); 721 netdev_err(ndev, "HEY! Enet xmit interrupt and TX_READY\n");
676 722
677 /* Deferred means some collisions occurred during transmit, 723 /* Deferred means some collisions occurred during transmit,
678 * but we eventually sent the packet OK. 724 * but we eventually sent the packet OK.
@@ -740,7 +786,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
740 * the last indicator should be set. 786 * the last indicator should be set.
741 */ 787 */
742 if ((status & BD_ENET_RX_LAST) == 0) 788 if ((status & BD_ENET_RX_LAST) == 0)
743 printk("FEC ENET: rcv is not +last\n"); 789 netdev_err(ndev, "rcv is not +last\n");
744 790
745 if (!fep->opened) 791 if (!fep->opened)
746 goto rx_processing_done; 792 goto rx_processing_done;
@@ -791,8 +837,6 @@ fec_enet_rx(struct net_device *ndev, int budget)
791 skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN); 837 skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
792 838
793 if (unlikely(!skb)) { 839 if (unlikely(!skb)) {
794 printk("%s: Memory squeeze, dropping packet.\n",
795 ndev->name);
796 ndev->stats.rx_dropped++; 840 ndev->stats.rx_dropped++;
797 } else { 841 } else {
798 skb_reserve(skb, NET_IP_ALIGN); 842 skb_reserve(skb, NET_IP_ALIGN);
@@ -816,6 +860,18 @@ fec_enet_rx(struct net_device *ndev, int budget)
816 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 860 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
817 } 861 }
818 862
863 if (fep->bufdesc_ex &&
864 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
865 struct bufdesc_ex *ebdp =
866 (struct bufdesc_ex *)bdp;
867 if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
868 /* don't check it */
869 skb->ip_summed = CHECKSUM_UNNECESSARY;
870 } else {
871 skb_checksum_none_assert(skb);
872 }
873 }
874
819 if (!skb_defer_rx_timestamp(skb)) 875 if (!skb_defer_rx_timestamp(skb))
820 napi_gro_receive(&fep->napi, skb); 876 napi_gro_receive(&fep->napi, skb);
821 } 877 }
@@ -916,7 +972,6 @@ static void fec_get_mac(struct net_device *ndev)
916 */ 972 */
917 iap = macaddr; 973 iap = macaddr;
918 974
919#ifdef CONFIG_OF
920 /* 975 /*
921 * 2) from device tree data 976 * 2) from device tree data
922 */ 977 */
@@ -928,7 +983,6 @@ static void fec_get_mac(struct net_device *ndev)
928 iap = (unsigned char *) mac; 983 iap = (unsigned char *) mac;
929 } 984 }
930 } 985 }
931#endif
932 986
933 /* 987 /*
934 * 3) from flash or fuse (via platform data) 988 * 3) from flash or fuse (via platform data)
@@ -1032,7 +1086,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1032 usecs_to_jiffies(FEC_MII_TIMEOUT)); 1086 usecs_to_jiffies(FEC_MII_TIMEOUT));
1033 if (time_left == 0) { 1087 if (time_left == 0) {
1034 fep->mii_timeout = 1; 1088 fep->mii_timeout = 1;
1035 printk(KERN_ERR "FEC: MDIO read timeout\n"); 1089 netdev_err(fep->netdev, "MDIO read timeout\n");
1036 return -ETIMEDOUT; 1090 return -ETIMEDOUT;
1037 } 1091 }
1038 1092
@@ -1060,7 +1114,7 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1060 usecs_to_jiffies(FEC_MII_TIMEOUT)); 1114 usecs_to_jiffies(FEC_MII_TIMEOUT));
1061 if (time_left == 0) { 1115 if (time_left == 0) {
1062 fep->mii_timeout = 1; 1116 fep->mii_timeout = 1;
1063 printk(KERN_ERR "FEC: MDIO write timeout\n"); 1117 netdev_err(fep->netdev, "MDIO write timeout\n");
1064 return -ETIMEDOUT; 1118 return -ETIMEDOUT;
1065 } 1119 }
1066 1120
@@ -1100,9 +1154,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
1100 } 1154 }
1101 1155
1102 if (phy_id >= PHY_MAX_ADDR) { 1156 if (phy_id >= PHY_MAX_ADDR) {
1103 printk(KERN_INFO 1157 netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
1104 "%s: no PHY, assuming direct connection to switch\n",
1105 ndev->name);
1106 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); 1158 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
1107 phy_id = 0; 1159 phy_id = 0;
1108 } 1160 }
@@ -1111,7 +1163,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
1111 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 1163 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
1112 fep->phy_interface); 1164 fep->phy_interface);
1113 if (IS_ERR(phy_dev)) { 1165 if (IS_ERR(phy_dev)) {
1114 printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name); 1166 netdev_err(ndev, "could not attach to PHY\n");
1115 return PTR_ERR(phy_dev); 1167 return PTR_ERR(phy_dev);
1116 } 1168 }
1117 1169
@@ -1129,11 +1181,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
1129 fep->link = 0; 1181 fep->link = 0;
1130 fep->full_duplex = 0; 1182 fep->full_duplex = 0;
1131 1183
1132 printk(KERN_INFO 1184 netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1133 "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", 1185 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
1134 ndev->name, 1186 fep->phy_dev->irq);
1135 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
1136 fep->phy_dev->irq);
1137 1187
1138 return 0; 1188 return 0;
1139} 1189}
@@ -1443,7 +1493,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1443 1493
1444 if (fep->bufdesc_ex) { 1494 if (fep->bufdesc_ex) {
1445 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1495 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1446 ebdp->cbd_esc = BD_ENET_RX_INT; 1496 ebdp->cbd_esc = BD_ENET_TX_INT;
1447 } 1497 }
1448 1498
1449 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1499 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
@@ -1608,7 +1658,7 @@ fec_set_mac_address(struct net_device *ndev, void *p)
1608 * Polled functionality used by netconsole and others in non interrupt mode 1658 * Polled functionality used by netconsole and others in non interrupt mode
1609 * 1659 *
1610 */ 1660 */
1611void fec_poll_controller(struct net_device *dev) 1661static void fec_poll_controller(struct net_device *dev)
1612{ 1662{
1613 int i; 1663 int i;
1614 struct fec_enet_private *fep = netdev_priv(dev); 1664 struct fec_enet_private *fep = netdev_priv(dev);
@@ -1623,6 +1673,33 @@ void fec_poll_controller(struct net_device *dev)
1623} 1673}
1624#endif 1674#endif
1625 1675
1676static int fec_set_features(struct net_device *netdev,
1677 netdev_features_t features)
1678{
1679 struct fec_enet_private *fep = netdev_priv(netdev);
1680 netdev_features_t changed = features ^ netdev->features;
1681
1682 netdev->features = features;
1683
1684 /* Receive checksum has been changed */
1685 if (changed & NETIF_F_RXCSUM) {
1686 if (features & NETIF_F_RXCSUM)
1687 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
1688 else
1689 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
1690
1691 if (netif_running(netdev)) {
1692 fec_stop(netdev);
1693 fec_restart(netdev, fep->phy_dev->duplex);
1694 netif_wake_queue(netdev);
1695 } else {
1696 fec_restart(netdev, fep->phy_dev->duplex);
1697 }
1698 }
1699
1700 return 0;
1701}
1702
1626static const struct net_device_ops fec_netdev_ops = { 1703static const struct net_device_ops fec_netdev_ops = {
1627 .ndo_open = fec_enet_open, 1704 .ndo_open = fec_enet_open,
1628 .ndo_stop = fec_enet_close, 1705 .ndo_stop = fec_enet_close,
@@ -1636,6 +1713,7 @@ static const struct net_device_ops fec_netdev_ops = {
1636#ifdef CONFIG_NET_POLL_CONTROLLER 1713#ifdef CONFIG_NET_POLL_CONTROLLER
1637 .ndo_poll_controller = fec_poll_controller, 1714 .ndo_poll_controller = fec_poll_controller,
1638#endif 1715#endif
1716 .ndo_set_features = fec_set_features,
1639}; 1717};
1640 1718
1641 /* 1719 /*
@@ -1649,11 +1727,9 @@ static int fec_enet_init(struct net_device *ndev)
1649 1727
1650 /* Allocate memory for buffer descriptors. */ 1728 /* Allocate memory for buffer descriptors. */
1651 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, 1729 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
1652 GFP_KERNEL); 1730 GFP_KERNEL);
1653 if (!cbd_base) { 1731 if (!cbd_base)
1654 printk("FEC: allocate descriptor memory failed?\n");
1655 return -ENOMEM; 1732 return -ENOMEM;
1656 }
1657 1733
1658 memset(cbd_base, 0, PAGE_SIZE); 1734 memset(cbd_base, 0, PAGE_SIZE);
1659 spin_lock_init(&fep->hw_lock); 1735 spin_lock_init(&fep->hw_lock);
@@ -1679,22 +1755,19 @@ static int fec_enet_init(struct net_device *ndev)
1679 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 1755 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
1680 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); 1756 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
1681 1757
1758 /* enable hw accelerator */
1759 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
1760 | NETIF_F_RXCSUM);
1761 ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
1762 | NETIF_F_RXCSUM);
1763 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
1764
1682 fec_restart(ndev, 0); 1765 fec_restart(ndev, 0);
1683 1766
1684 return 0; 1767 return 0;
1685} 1768}
1686 1769
1687#ifdef CONFIG_OF 1770#ifdef CONFIG_OF
1688static int fec_get_phy_mode_dt(struct platform_device *pdev)
1689{
1690 struct device_node *np = pdev->dev.of_node;
1691
1692 if (np)
1693 return of_get_phy_mode(np);
1694
1695 return -ENODEV;
1696}
1697
1698static void fec_reset_phy(struct platform_device *pdev) 1771static void fec_reset_phy(struct platform_device *pdev)
1699{ 1772{
1700 int err, phy_reset; 1773 int err, phy_reset;
@@ -1723,11 +1796,6 @@ static void fec_reset_phy(struct platform_device *pdev)
1723 gpio_set_value(phy_reset, 1); 1796 gpio_set_value(phy_reset, 1);
1724} 1797}
1725#else /* CONFIG_OF */ 1798#else /* CONFIG_OF */
1726static int fec_get_phy_mode_dt(struct platform_device *pdev)
1727{
1728 return -ENODEV;
1729}
1730
1731static void fec_reset_phy(struct platform_device *pdev) 1799static void fec_reset_phy(struct platform_device *pdev)
1732{ 1800{
1733 /* 1801 /*
@@ -1758,16 +1826,10 @@ fec_probe(struct platform_device *pdev)
1758 if (!r) 1826 if (!r)
1759 return -ENXIO; 1827 return -ENXIO;
1760 1828
1761 r = request_mem_region(r->start, resource_size(r), pdev->name);
1762 if (!r)
1763 return -EBUSY;
1764
1765 /* Init network device */ 1829 /* Init network device */
1766 ndev = alloc_etherdev(sizeof(struct fec_enet_private)); 1830 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
1767 if (!ndev) { 1831 if (!ndev)
1768 ret = -ENOMEM; 1832 return -ENOMEM;
1769 goto failed_alloc_etherdev;
1770 }
1771 1833
1772 SET_NETDEV_DEV(ndev, &pdev->dev); 1834 SET_NETDEV_DEV(ndev, &pdev->dev);
1773 1835
@@ -1779,7 +1841,7 @@ fec_probe(struct platform_device *pdev)
1779 (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) 1841 (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
1780 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 1842 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
1781 1843
1782 fep->hwp = ioremap(r->start, resource_size(r)); 1844 fep->hwp = devm_request_and_ioremap(&pdev->dev, r);
1783 fep->pdev = pdev; 1845 fep->pdev = pdev;
1784 fep->dev_id = dev_id++; 1846 fep->dev_id = dev_id++;
1785 1847
@@ -1792,7 +1854,7 @@ fec_probe(struct platform_device *pdev)
1792 1854
1793 platform_set_drvdata(pdev, ndev); 1855 platform_set_drvdata(pdev, ndev);
1794 1856
1795 ret = fec_get_phy_mode_dt(pdev); 1857 ret = of_get_phy_mode(pdev->dev.of_node);
1796 if (ret < 0) { 1858 if (ret < 0) {
1797 pdata = pdev->dev.platform_data; 1859 pdata = pdev->dev.platform_data;
1798 if (pdata) 1860 if (pdata)
@@ -1882,6 +1944,9 @@ fec_probe(struct platform_device *pdev)
1882 if (ret) 1944 if (ret)
1883 goto failed_register; 1945 goto failed_register;
1884 1946
1947 if (fep->bufdesc_ex && fep->ptp_clock)
1948 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
1949
1885 return 0; 1950 return 0;
1886 1951
1887failed_register: 1952failed_register:
@@ -1901,11 +1966,8 @@ failed_regulator:
1901 clk_disable_unprepare(fep->clk_ptp); 1966 clk_disable_unprepare(fep->clk_ptp);
1902failed_pin: 1967failed_pin:
1903failed_clk: 1968failed_clk:
1904 iounmap(fep->hwp);
1905failed_ioremap: 1969failed_ioremap:
1906 free_netdev(ndev); 1970 free_netdev(ndev);
1907failed_alloc_etherdev:
1908 release_mem_region(r->start, resource_size(r));
1909 1971
1910 return ret; 1972 return ret;
1911} 1973}
@@ -1915,7 +1977,6 @@ fec_drv_remove(struct platform_device *pdev)
1915{ 1977{
1916 struct net_device *ndev = platform_get_drvdata(pdev); 1978 struct net_device *ndev = platform_get_drvdata(pdev);
1917 struct fec_enet_private *fep = netdev_priv(ndev); 1979 struct fec_enet_private *fep = netdev_priv(ndev);
1918 struct resource *r;
1919 int i; 1980 int i;
1920 1981
1921 unregister_netdev(ndev); 1982 unregister_netdev(ndev);
@@ -1931,19 +1992,14 @@ fec_drv_remove(struct platform_device *pdev)
1931 if (irq > 0) 1992 if (irq > 0)
1932 free_irq(irq, ndev); 1993 free_irq(irq, ndev);
1933 } 1994 }
1934 iounmap(fep->hwp);
1935 free_netdev(ndev); 1995 free_netdev(ndev);
1936 1996
1937 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1938 BUG_ON(!r);
1939 release_mem_region(r->start, resource_size(r));
1940
1941 platform_set_drvdata(pdev, NULL); 1997 platform_set_drvdata(pdev, NULL);
1942 1998
1943 return 0; 1999 return 0;
1944} 2000}
1945 2001
1946#ifdef CONFIG_PM 2002#ifdef CONFIG_PM_SLEEP
1947static int 2003static int
1948fec_suspend(struct device *dev) 2004fec_suspend(struct device *dev)
1949{ 2005{
@@ -1975,24 +2031,15 @@ fec_resume(struct device *dev)
1975 2031
1976 return 0; 2032 return 0;
1977} 2033}
2034#endif /* CONFIG_PM_SLEEP */
1978 2035
1979static const struct dev_pm_ops fec_pm_ops = { 2036static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
1980 .suspend = fec_suspend,
1981 .resume = fec_resume,
1982 .freeze = fec_suspend,
1983 .thaw = fec_resume,
1984 .poweroff = fec_suspend,
1985 .restore = fec_resume,
1986};
1987#endif
1988 2037
1989static struct platform_driver fec_driver = { 2038static struct platform_driver fec_driver = {
1990 .driver = { 2039 .driver = {
1991 .name = DRIVER_NAME, 2040 .name = DRIVER_NAME,
1992 .owner = THIS_MODULE, 2041 .owner = THIS_MODULE,
1993#ifdef CONFIG_PM
1994 .pm = &fec_pm_ops, 2042 .pm = &fec_pm_ops,
1995#endif
1996 .of_match_table = fec_dt_ids, 2043 .of_match_table = fec_dt_ids,
1997 }, 2044 },
1998 .id_table = fec_devtype, 2045 .id_table = fec_devtype,
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 77943a6a1b8c..9bc15e2365bb 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -14,6 +14,8 @@
14 * 14 *
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
18#include <linux/module.h> 20#include <linux/module.h>
19 21
@@ -858,13 +860,11 @@ static int mpc52xx_fec_probe(struct platform_device *op)
858 /* Reserve FEC control zone */ 860 /* Reserve FEC control zone */
859 rv = of_address_to_resource(np, 0, &mem); 861 rv = of_address_to_resource(np, 0, &mem);
860 if (rv) { 862 if (rv) {
861 printk(KERN_ERR DRIVER_NAME ": " 863 pr_err("Error while parsing device node resource\n");
862 "Error while parsing device node resource\n" );
863 goto err_netdev; 864 goto err_netdev;
864 } 865 }
865 if (resource_size(&mem) < sizeof(struct mpc52xx_fec)) { 866 if (resource_size(&mem) < sizeof(struct mpc52xx_fec)) {
866 printk(KERN_ERR DRIVER_NAME 867 pr_err("invalid resource size (%lx < %x), check mpc52xx_devices.c\n",
867 " - invalid resource size (%lx < %x), check mpc52xx_devices.c\n",
868 (unsigned long)resource_size(&mem), 868 (unsigned long)resource_size(&mem),
869 sizeof(struct mpc52xx_fec)); 869 sizeof(struct mpc52xx_fec));
870 rv = -EINVAL; 870 rv = -EINVAL;
@@ -902,7 +902,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
902 priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo); 902 priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo);
903 903
904 if (!priv->rx_dmatsk || !priv->tx_dmatsk) { 904 if (!priv->rx_dmatsk || !priv->tx_dmatsk) {
905 printk(KERN_ERR DRIVER_NAME ": Can not init SDMA tasks\n" ); 905 pr_err("Can not init SDMA tasks\n");
906 rv = -ENOMEM; 906 rv = -ENOMEM;
907 goto err_rx_tx_dmatsk; 907 goto err_rx_tx_dmatsk;
908 } 908 }
@@ -982,8 +982,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
982 982
983 /* We're done ! */ 983 /* We're done ! */
984 dev_set_drvdata(&op->dev, ndev); 984 dev_set_drvdata(&op->dev, ndev);
985 printk(KERN_INFO "%s: %s MAC %pM\n", 985 netdev_info(ndev, "%s MAC %pM\n",
986 ndev->name, op->dev.of_node->full_name, ndev->dev_addr); 986 op->dev.of_node->full_name, ndev->dev_addr);
987 987
988 return 0; 988 return 0;
989 989
@@ -1094,7 +1094,7 @@ mpc52xx_fec_init(void)
1094 int ret; 1094 int ret;
1095 ret = platform_driver_register(&mpc52xx_fec_mdio_driver); 1095 ret = platform_driver_register(&mpc52xx_fec_mdio_driver);
1096 if (ret) { 1096 if (ret) {
1097 printk(KERN_ERR DRIVER_NAME ": failed to register mdio driver\n"); 1097 pr_err("failed to register mdio driver\n");
1098 return ret; 1098 return ret;
1099 } 1099 }
1100#endif 1100#endif
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 0d8df400a479..25fc960cbf0e 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -17,6 +17,8 @@
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */ 18 */
19 19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
20#include <linux/module.h> 22#include <linux/module.h>
21#include <linux/kernel.h> 23#include <linux/kernel.h>
22#include <linux/string.h> 24#include <linux/string.h>
@@ -128,7 +130,6 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
128 130
129 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 131 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
130} 132}
131EXPORT_SYMBOL(fec_ptp_start_cyclecounter);
132 133
133/** 134/**
134 * fec_ptp_adjfreq - adjust ptp cycle frequency 135 * fec_ptp_adjfreq - adjust ptp cycle frequency
@@ -319,7 +320,6 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
319 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 320 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
320 -EFAULT : 0; 321 -EFAULT : 0;
321} 322}
322EXPORT_SYMBOL(fec_ptp_ioctl);
323 323
324/** 324/**
325 * fec_time_keep - call timecounter_read every second to avoid timer overrun 325 * fec_time_keep - call timecounter_read every second to avoid timer overrun
@@ -381,8 +381,5 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
381 if (IS_ERR(fep->ptp_clock)) { 381 if (IS_ERR(fep->ptp_clock)) {
382 fep->ptp_clock = NULL; 382 fep->ptp_clock = NULL;
383 pr_err("ptp_clock_register failed\n"); 383 pr_err("ptp_clock_register failed\n");
384 } else {
385 pr_info("registered PHC device on %s\n", ndev->name);
386 } 384 }
387} 385}
388EXPORT_SYMBOL(fec_ptp_init);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 46df28893c10..edc120094c34 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -177,8 +177,6 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
177 received++; 177 received++;
178 netif_receive_skb(skb); 178 netif_receive_skb(skb);
179 } else { 179 } else {
180 dev_warn(fep->dev,
181 "Memory squeeze, dropping packet.\n");
182 fep->stats.rx_dropped++; 180 fep->stats.rx_dropped++;
183 skbn = skb; 181 skbn = skb;
184 } 182 }
@@ -309,8 +307,6 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
309 received++; 307 received++;
310 netif_rx(skb); 308 netif_rx(skb);
311 } else { 309 } else {
312 dev_warn(fep->dev,
313 "Memory squeeze, dropping packet.\n");
314 fep->stats.rx_dropped++; 310 fep->stats.rx_dropped++;
315 skbn = skb; 311 skbn = skb;
316 } 312 }
@@ -505,11 +501,9 @@ void fs_init_bds(struct net_device *dev)
505 */ 501 */
506 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { 502 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
507 skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE); 503 skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
508 if (skb == NULL) { 504 if (skb == NULL)
509 dev_warn(fep->dev,
510 "Memory squeeze, unable to allocate skb\n");
511 break; 505 break;
512 } 506
513 skb_align(skb, ENET_RX_ALIGN); 507 skb_align(skb, ENET_RX_ALIGN);
514 fep->rx_skbuff[i] = skb; 508 fep->rx_skbuff[i] = skb;
515 CBDW_BUFADDR(bdp, 509 CBDW_BUFADDR(bdp,
@@ -593,13 +587,8 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
593 587
594 /* Alloc new skb */ 588 /* Alloc new skb */
595 new_skb = netdev_alloc_skb(dev, skb->len + 4); 589 new_skb = netdev_alloc_skb(dev, skb->len + 4);
596 if (!new_skb) { 590 if (!new_skb)
597 if (net_ratelimit()) {
598 dev_warn(fep->dev,
599 "Memory squeeze, dropping tx packet.\n");
600 }
601 return NULL; 591 return NULL;
602 }
603 592
604 /* Make sure new skb is properly aligned */ 593 /* Make sure new skb is properly aligned */
605 skb_align(new_skb, 4); 594 skb_align(new_skb, 4);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d2c5441d1bf0..2375a01715a0 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -132,7 +132,7 @@ static int gfar_poll(struct napi_struct *napi, int budget);
132static void gfar_netpoll(struct net_device *dev); 132static void gfar_netpoll(struct net_device *dev);
133#endif 133#endif
134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
135static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 135static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
136static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 136static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
137 int amount_pull, struct napi_struct *napi); 137 int amount_pull, struct napi_struct *napi);
138void gfar_halt(struct net_device *dev); 138void gfar_halt(struct net_device *dev);
@@ -245,14 +245,13 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
245 245
246 /* Allocate memory for the buffer descriptors */ 246 /* Allocate memory for the buffer descriptors */
247 vaddr = dma_alloc_coherent(dev, 247 vaddr = dma_alloc_coherent(dev,
248 sizeof(struct txbd8) * priv->total_tx_ring_size + 248 (priv->total_tx_ring_size *
249 sizeof(struct rxbd8) * priv->total_rx_ring_size, 249 sizeof(struct txbd8)) +
250 &addr, GFP_KERNEL); 250 (priv->total_rx_ring_size *
251 if (!vaddr) { 251 sizeof(struct rxbd8)),
252 netif_err(priv, ifup, ndev, 252 &addr, GFP_KERNEL);
253 "Could not allocate buffer descriptors!\n"); 253 if (!vaddr)
254 return -ENOMEM; 254 return -ENOMEM;
255 }
256 255
257 for (i = 0; i < priv->num_tx_queues; i++) { 256 for (i = 0; i < priv->num_tx_queues; i++) {
258 tx_queue = priv->tx_queue[i]; 257 tx_queue = priv->tx_queue[i];
@@ -342,7 +341,7 @@ static void gfar_init_mac(struct net_device *ndev)
342 gfar_init_tx_rx_base(priv); 341 gfar_init_tx_rx_base(priv);
343 342
344 /* Configure the coalescing support */ 343 /* Configure the coalescing support */
345 gfar_configure_coalescing(priv, 0xFF, 0xFF); 344 gfar_configure_coalescing_all(priv);
346 345
347 /* set this when rx hw offload (TOE) functions are being used */ 346 /* set this when rx hw offload (TOE) functions are being used */
348 priv->uses_rxfcb = 0; 347 priv->uses_rxfcb = 0;
@@ -387,7 +386,7 @@ static void gfar_init_mac(struct net_device *ndev)
387 priv->uses_rxfcb = 1; 386 priv->uses_rxfcb = 1;
388 } 387 }
389 388
390 if (ndev->features & NETIF_F_HW_VLAN_RX) { 389 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
391 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 390 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
392 priv->uses_rxfcb = 1; 391 priv->uses_rxfcb = 1;
393 } 392 }
@@ -691,7 +690,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
691 } 690 }
692 691
693 for (i = 0; i < priv->num_tx_queues; i++) 692 for (i = 0; i < priv->num_tx_queues; i++)
694 priv->tx_queue[i] = NULL; 693 priv->tx_queue[i] = NULL;
695 for (i = 0; i < priv->num_rx_queues; i++) 694 for (i = 0; i < priv->num_rx_queues; i++)
696 priv->rx_queue[i] = NULL; 695 priv->rx_queue[i] = NULL;
697 696
@@ -1051,8 +1050,9 @@ static int gfar_probe(struct platform_device *ofdev)
1051 } 1050 }
1052 1051
1053 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1052 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1054 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1053 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1055 dev->features |= NETIF_F_HW_VLAN_RX; 1054 NETIF_F_HW_VLAN_CTAG_RX;
1055 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1056 } 1056 }
1057 1057
1058 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 1058 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
@@ -1817,25 +1817,15 @@ void gfar_start(struct net_device *dev)
1817 dev->trans_start = jiffies; /* prevent tx timeout */ 1817 dev->trans_start = jiffies; /* prevent tx timeout */
1818} 1818}
1819 1819
1820void gfar_configure_coalescing(struct gfar_private *priv, 1820static void gfar_configure_coalescing(struct gfar_private *priv,
1821 unsigned long tx_mask, unsigned long rx_mask) 1821 unsigned long tx_mask, unsigned long rx_mask)
1822{ 1822{
1823 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1823 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1824 u32 __iomem *baddr; 1824 u32 __iomem *baddr;
1825 int i = 0;
1826
1827 /* Backward compatible case ---- even if we enable
1828 * multiple queues, there's only single reg to program
1829 */
1830 gfar_write(&regs->txic, 0);
1831 if (likely(priv->tx_queue[0]->txcoalescing))
1832 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1833
1834 gfar_write(&regs->rxic, 0);
1835 if (unlikely(priv->rx_queue[0]->rxcoalescing))
1836 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1837 1825
1838 if (priv->mode == MQ_MG_MODE) { 1826 if (priv->mode == MQ_MG_MODE) {
1827 int i = 0;
1828
1839 baddr = &regs->txic0; 1829 baddr = &regs->txic0;
1840 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { 1830 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1841 gfar_write(baddr + i, 0); 1831 gfar_write(baddr + i, 0);
@@ -1849,9 +1839,25 @@ void gfar_configure_coalescing(struct gfar_private *priv,
1849 if (likely(priv->rx_queue[i]->rxcoalescing)) 1839 if (likely(priv->rx_queue[i]->rxcoalescing))
1850 gfar_write(baddr + i, priv->rx_queue[i]->rxic); 1840 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1851 } 1841 }
1842 } else {
1843 /* Backward compatible case -- even if we enable
1844 * multiple queues, there's only single reg to program
1845 */
1846 gfar_write(&regs->txic, 0);
1847 if (likely(priv->tx_queue[0]->txcoalescing))
1848 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1849
1850 gfar_write(&regs->rxic, 0);
1851 if (unlikely(priv->rx_queue[0]->rxcoalescing))
1852 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1852 } 1853 }
1853} 1854}
1854 1855
1856void gfar_configure_coalescing_all(struct gfar_private *priv)
1857{
1858 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1859}
1860
1855static int register_grp_irqs(struct gfar_priv_grp *grp) 1861static int register_grp_irqs(struct gfar_priv_grp *grp)
1856{ 1862{
1857 struct gfar_private *priv = grp->priv; 1863 struct gfar_private *priv = grp->priv;
@@ -1941,7 +1947,7 @@ int startup_gfar(struct net_device *ndev)
1941 1947
1942 phy_start(priv->phydev); 1948 phy_start(priv->phydev);
1943 1949
1944 gfar_configure_coalescing(priv, 0xFF, 0xFF); 1950 gfar_configure_coalescing_all(priv);
1945 1951
1946 return 0; 1952 return 0;
1947 1953
@@ -2343,7 +2349,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
2343 local_irq_save(flags); 2349 local_irq_save(flags);
2344 lock_rx_qs(priv); 2350 lock_rx_qs(priv);
2345 2351
2346 if (features & NETIF_F_HW_VLAN_TX) { 2352 if (features & NETIF_F_HW_VLAN_CTAG_TX) {
2347 /* Enable VLAN tag insertion */ 2353 /* Enable VLAN tag insertion */
2348 tempval = gfar_read(&regs->tctrl); 2354 tempval = gfar_read(&regs->tctrl);
2349 tempval |= TCTRL_VLINS; 2355 tempval |= TCTRL_VLINS;
@@ -2355,7 +2361,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
2355 gfar_write(&regs->tctrl, tempval); 2361 gfar_write(&regs->tctrl, tempval);
2356 } 2362 }
2357 2363
2358 if (features & NETIF_F_HW_VLAN_RX) { 2364 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2359 /* Enable VLAN tag extraction */ 2365 /* Enable VLAN tag extraction */
2360 tempval = gfar_read(&regs->rctrl); 2366 tempval = gfar_read(&regs->rctrl);
2361 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); 2367 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
@@ -2469,12 +2475,11 @@ static void gfar_align_skb(struct sk_buff *skb)
2469} 2475}
2470 2476
2471/* Interrupt Handler for Transmit complete */ 2477/* Interrupt Handler for Transmit complete */
2472static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) 2478static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2473{ 2479{
2474 struct net_device *dev = tx_queue->dev; 2480 struct net_device *dev = tx_queue->dev;
2475 struct netdev_queue *txq; 2481 struct netdev_queue *txq;
2476 struct gfar_private *priv = netdev_priv(dev); 2482 struct gfar_private *priv = netdev_priv(dev);
2477 struct gfar_priv_rx_q *rx_queue = NULL;
2478 struct txbd8 *bdp, *next = NULL; 2483 struct txbd8 *bdp, *next = NULL;
2479 struct txbd8 *lbdp = NULL; 2484 struct txbd8 *lbdp = NULL;
2480 struct txbd8 *base = tx_queue->tx_bd_base; 2485 struct txbd8 *base = tx_queue->tx_bd_base;
@@ -2489,7 +2494,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2489 u32 lstatus; 2494 u32 lstatus;
2490 size_t buflen; 2495 size_t buflen;
2491 2496
2492 rx_queue = priv->rx_queue[tqi];
2493 txq = netdev_get_tx_queue(dev, tqi); 2497 txq = netdev_get_tx_queue(dev, tqi);
2494 bdp = tx_queue->dirty_tx; 2498 bdp = tx_queue->dirty_tx;
2495 skb_dirtytx = tx_queue->skb_dirtytx; 2499 skb_dirtytx = tx_queue->skb_dirtytx;
@@ -2571,8 +2575,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2571 tx_queue->dirty_tx = bdp; 2575 tx_queue->dirty_tx = bdp;
2572 2576
2573 netdev_tx_completed_queue(txq, howmany, bytes_sent); 2577 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2574
2575 return howmany;
2576} 2578}
2577 2579
2578static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) 2580static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
@@ -2694,8 +2696,6 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2694 struct gfar_private *priv = netdev_priv(dev); 2696 struct gfar_private *priv = netdev_priv(dev);
2695 struct rxfcb *fcb = NULL; 2697 struct rxfcb *fcb = NULL;
2696 2698
2697 gro_result_t ret;
2698
2699 /* fcb is at the beginning if exists */ 2699 /* fcb is at the beginning if exists */
2700 fcb = (struct rxfcb *)skb->data; 2700 fcb = (struct rxfcb *)skb->data;
2701 2701
@@ -2725,19 +2725,17 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2725 /* Tell the skb what kind of packet this is */ 2725 /* Tell the skb what kind of packet this is */
2726 skb->protocol = eth_type_trans(skb, dev); 2726 skb->protocol = eth_type_trans(skb, dev);
2727 2727
2728 /* There's need to check for NETIF_F_HW_VLAN_RX here. 2728 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2729 * Even if vlan rx accel is disabled, on some chips 2729 * Even if vlan rx accel is disabled, on some chips
2730 * RXFCB_VLN is pseudo randomly set. 2730 * RXFCB_VLN is pseudo randomly set.
2731 */ 2731 */
2732 if (dev->features & NETIF_F_HW_VLAN_RX && 2732 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2733 fcb->flags & RXFCB_VLN) 2733 fcb->flags & RXFCB_VLN)
2734 __vlan_hwaccel_put_tag(skb, fcb->vlctl); 2734 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
2735 2735
2736 /* Send the packet up the stack */ 2736 /* Send the packet up the stack */
2737 ret = napi_gro_receive(napi, skb); 2737 napi_gro_receive(napi, skb);
2738 2738
2739 if (unlikely(GRO_DROP == ret))
2740 atomic64_inc(&priv->extra_stats.kernel_dropped);
2741} 2739}
2742 2740
2743/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 2741/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2835,62 +2833,82 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2835 struct gfar __iomem *regs = gfargrp->regs; 2833 struct gfar __iomem *regs = gfargrp->regs;
2836 struct gfar_priv_tx_q *tx_queue = NULL; 2834 struct gfar_priv_tx_q *tx_queue = NULL;
2837 struct gfar_priv_rx_q *rx_queue = NULL; 2835 struct gfar_priv_rx_q *rx_queue = NULL;
2838 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; 2836 int work_done = 0, work_done_per_q = 0;
2839 int tx_cleaned = 0, i, left_over_budget = budget; 2837 int i, budget_per_q = 0;
2840 unsigned long serviced_queues = 0; 2838 int has_tx_work;
2841 int num_queues = 0; 2839 unsigned long rstat_rxf;
2842 2840 int num_act_queues;
2843 num_queues = gfargrp->num_rx_queues;
2844 budget_per_queue = budget/num_queues;
2845 2841
2846 /* Clear IEVENT, so interrupts aren't called again 2842 /* Clear IEVENT, so interrupts aren't called again
2847 * because of the packets that have already arrived 2843 * because of the packets that have already arrived
2848 */ 2844 */
2849 gfar_write(&regs->ievent, IEVENT_RTX_MASK); 2845 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2850 2846
2851 while (num_queues && left_over_budget) { 2847 rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
2852 budget_per_queue = left_over_budget/num_queues; 2848
2853 left_over_budget = 0; 2849 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2850 if (num_act_queues)
2851 budget_per_q = budget/num_act_queues;
2852
2853 while (1) {
2854 has_tx_work = 0;
2855 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2856 tx_queue = priv->tx_queue[i];
2857 /* run Tx cleanup to completion */
2858 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
2859 gfar_clean_tx_ring(tx_queue);
2860 has_tx_work = 1;
2861 }
2862 }
2854 2863
2855 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 2864 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2856 if (test_bit(i, &serviced_queues)) 2865 /* skip queue if not active */
2866 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2857 continue; 2867 continue;
2868
2858 rx_queue = priv->rx_queue[i]; 2869 rx_queue = priv->rx_queue[i];
2859 tx_queue = priv->tx_queue[rx_queue->qindex]; 2870 work_done_per_q =
2860 2871 gfar_clean_rx_ring(rx_queue, budget_per_q);
2861 tx_cleaned += gfar_clean_tx_ring(tx_queue); 2872 work_done += work_done_per_q;
2862 rx_cleaned_per_queue = 2873
2863 gfar_clean_rx_ring(rx_queue, budget_per_queue); 2874 /* finished processing this queue */
2864 rx_cleaned += rx_cleaned_per_queue; 2875 if (work_done_per_q < budget_per_q) {
2865 if (rx_cleaned_per_queue < budget_per_queue) { 2876 /* clear active queue hw indication */
2866 left_over_budget = left_over_budget + 2877 gfar_write(&regs->rstat,
2867 (budget_per_queue - 2878 RSTAT_CLEAR_RXF0 >> i);
2868 rx_cleaned_per_queue); 2879 rstat_rxf &= ~(RSTAT_CLEAR_RXF0 >> i);
2869 set_bit(i, &serviced_queues); 2880 num_act_queues--;
2870 num_queues--; 2881
2882 if (!num_act_queues)
2883 break;
2884 /* recompute budget per Rx queue */
2885 budget_per_q =
2886 (budget - work_done) / num_act_queues;
2871 } 2887 }
2872 } 2888 }
2873 }
2874 2889
2875 if (tx_cleaned) 2890 if (work_done >= budget)
2876 return budget; 2891 break;
2877 2892
2878 if (rx_cleaned < budget) { 2893 if (!num_act_queues && !has_tx_work) {
2879 napi_complete(napi);
2880 2894
2881 /* Clear the halt bit in RSTAT */ 2895 napi_complete(napi);
2882 gfar_write(&regs->rstat, gfargrp->rstat);
2883 2896
2884 gfar_write(&regs->imask, IMASK_DEFAULT); 2897 /* Clear the halt bit in RSTAT */
2898 gfar_write(&regs->rstat, gfargrp->rstat);
2885 2899
2886 /* If we are coalescing interrupts, update the timer 2900 gfar_write(&regs->imask, IMASK_DEFAULT);
2887 * Otherwise, clear it 2901
2888 */ 2902 /* If we are coalescing interrupts, update the timer
2889 gfar_configure_coalescing(priv, gfargrp->rx_bit_map, 2903 * Otherwise, clear it
2890 gfargrp->tx_bit_map); 2904 */
2905 gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
2906 gfargrp->tx_bit_map);
2907 break;
2908 }
2891 } 2909 }
2892 2910
2893 return rx_cleaned; 2911 return work_done;
2894} 2912}
2895 2913
2896#ifdef CONFIG_NET_POLL_CONTROLLER 2914#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 63a28d294e20..04b552cd419d 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -291,7 +291,9 @@ extern const char gfar_driver_version[];
291#define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK) 291#define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK)
292 292
293 293
294#define RSTAT_CLEAR_RHALT 0x00800000 294#define RSTAT_CLEAR_RHALT 0x00800000
295#define RSTAT_CLEAR_RXF0 0x00000080
296#define RSTAT_RXF_MASK 0x000000ff
295 297
296#define TCTRL_IPCSEN 0x00004000 298#define TCTRL_IPCSEN 0x00004000
297#define TCTRL_TUCSEN 0x00002000 299#define TCTRL_TUCSEN 0x00002000
@@ -627,7 +629,6 @@ struct rmon_mib
627}; 629};
628 630
629struct gfar_extra_stats { 631struct gfar_extra_stats {
630 atomic64_t kernel_dropped;
631 atomic64_t rx_large; 632 atomic64_t rx_large;
632 atomic64_t rx_short; 633 atomic64_t rx_short;
633 atomic64_t rx_nonoctet; 634 atomic64_t rx_nonoctet;
@@ -1180,8 +1181,7 @@ extern void stop_gfar(struct net_device *dev);
1180extern void gfar_halt(struct net_device *dev); 1181extern void gfar_halt(struct net_device *dev);
1181extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, 1182extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
1182 int enable, u32 regnum, u32 read); 1183 int enable, u32 regnum, u32 read);
1183extern void gfar_configure_coalescing(struct gfar_private *priv, 1184extern void gfar_configure_coalescing_all(struct gfar_private *priv);
1184 unsigned long tx_mask, unsigned long rx_mask);
1185void gfar_init_sysfs(struct net_device *dev); 1185void gfar_init_sysfs(struct net_device *dev);
1186int gfar_set_features(struct net_device *dev, netdev_features_t features); 1186int gfar_set_features(struct net_device *dev, netdev_features_t features);
1187extern void gfar_check_rx_parser_mode(struct gfar_private *priv); 1187extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 75e89acf4912..21cd88124ca9 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -66,7 +66,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
66 struct ethtool_drvinfo *drvinfo); 66 struct ethtool_drvinfo *drvinfo);
67 67
68static const char stat_gstrings[][ETH_GSTRING_LEN] = { 68static const char stat_gstrings[][ETH_GSTRING_LEN] = {
69 "rx-dropped-by-kernel",
70 "rx-large-frame-errors", 69 "rx-large-frame-errors",
71 "rx-short-frame-errors", 70 "rx-short-frame-errors",
72 "rx-non-octet-errors", 71 "rx-non-octet-errors",
@@ -390,14 +389,14 @@ static int gfar_scoalesce(struct net_device *dev,
390 389
391 /* Check the bounds of the values */ 390 /* Check the bounds of the values */
392 if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) { 391 if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
393 pr_info("Coalescing is limited to %d microseconds\n", 392 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
394 GFAR_MAX_COAL_USECS); 393 GFAR_MAX_COAL_USECS);
395 return -EINVAL; 394 return -EINVAL;
396 } 395 }
397 396
398 if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { 397 if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
399 pr_info("Coalescing is limited to %d frames\n", 398 netdev_info(dev, "Coalescing is limited to %d frames\n",
400 GFAR_MAX_COAL_FRAMES); 399 GFAR_MAX_COAL_FRAMES);
401 return -EINVAL; 400 return -EINVAL;
402 } 401 }
403 402
@@ -419,14 +418,14 @@ static int gfar_scoalesce(struct net_device *dev,
419 418
420 /* Check the bounds of the values */ 419 /* Check the bounds of the values */
421 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { 420 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
422 pr_info("Coalescing is limited to %d microseconds\n", 421 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
423 GFAR_MAX_COAL_USECS); 422 GFAR_MAX_COAL_USECS);
424 return -EINVAL; 423 return -EINVAL;
425 } 424 }
426 425
427 if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { 426 if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
428 pr_info("Coalescing is limited to %d frames\n", 427 netdev_info(dev, "Coalescing is limited to %d frames\n",
429 GFAR_MAX_COAL_FRAMES); 428 GFAR_MAX_COAL_FRAMES);
430 return -EINVAL; 429 return -EINVAL;
431 } 430 }
432 431
@@ -436,7 +435,7 @@ static int gfar_scoalesce(struct net_device *dev,
436 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); 435 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
437 } 436 }
438 437
439 gfar_configure_coalescing(priv, 0xFF, 0xFF); 438 gfar_configure_coalescing_all(priv);
440 439
441 return 0; 440 return 0;
442} 441}
@@ -543,7 +542,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
543 int err = 0, i = 0; 542 int err = 0, i = 0;
544 netdev_features_t changed = dev->features ^ features; 543 netdev_features_t changed = dev->features ^ features;
545 544
546 if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) 545 if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
547 gfar_vlan_mode(dev, features); 546 gfar_vlan_mode(dev, features);
548 547
549 if (!(changed & NETIF_F_RXCSUM)) 548 if (!(changed & NETIF_F_RXCSUM))
@@ -736,7 +735,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
736 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP; 735 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
737 break; 736 break;
738 default: 737 default:
739 pr_err("Right now this class is not supported\n"); 738 netdev_err(priv->ndev,
739 "Right now this class is not supported\n");
740 ret = 0; 740 ret = 0;
741 goto err; 741 goto err;
742 } 742 }
@@ -752,7 +752,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
752 } 752 }
753 753
754 if (i == MAX_FILER_IDX + 1) { 754 if (i == MAX_FILER_IDX + 1) {
755 pr_err("No parse rule found, can't create hash rules\n"); 755 netdev_err(priv->ndev,
756 "No parse rule found, can't create hash rules\n");
756 ret = 0; 757 ret = 0;
757 goto err; 758 goto err;
758 } 759 }
@@ -1569,7 +1570,7 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1569 gfar_cluster_filer(tab); 1570 gfar_cluster_filer(tab);
1570 gfar_optimize_filer_masks(tab); 1571 gfar_optimize_filer_masks(tab);
1571 1572
1572 pr_debug("\n\tSummary:\n" 1573 pr_debug("\tSummary:\n"
1573 "\tData on hardware: %d\n" 1574 "\tData on hardware: %d\n"
1574 "\tCompression rate: %d%%\n", 1575 "\tCompression rate: %d%%\n",
1575 tab->index, 100 - (100 * tab->index) / i); 1576 tab->index, 100 - (100 * tab->index) / i);
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 2e5daee0438a..576e4b858fce 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -17,6 +17,9 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
20#include <linux/device.h> 23#include <linux/device.h>
21#include <linux/hrtimer.h> 24#include <linux/hrtimer.h>
22#include <linux/init.h> 25#include <linux/init.h>
@@ -127,7 +130,6 @@ struct gianfar_ptp_registers {
127 130
128#define DRIVER "gianfar_ptp" 131#define DRIVER "gianfar_ptp"
129#define DEFAULT_CKSEL 1 132#define DEFAULT_CKSEL 1
130#define N_ALARM 1 /* first alarm is used internally to reset fipers */
131#define N_EXT_TS 2 133#define N_EXT_TS 2
132#define REG_SIZE sizeof(struct gianfar_ptp_registers) 134#define REG_SIZE sizeof(struct gianfar_ptp_registers)
133 135
@@ -410,7 +412,7 @@ static struct ptp_clock_info ptp_gianfar_caps = {
410 .owner = THIS_MODULE, 412 .owner = THIS_MODULE,
411 .name = "gianfar clock", 413 .name = "gianfar clock",
412 .max_adj = 512000, 414 .max_adj = 512000,
413 .n_alarm = N_ALARM, 415 .n_alarm = 0,
414 .n_ext_ts = N_EXT_TS, 416 .n_ext_ts = N_EXT_TS,
415 .n_per_out = 0, 417 .n_per_out = 0,
416 .pps = 1, 418 .pps = 1,
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
index cd14a4d449c2..acb55af7e3f3 100644
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
+++ b/drivers/net/ethernet/freescale/gianfar_sysfs.c
@@ -337,5 +337,5 @@ void gfar_init_sysfs(struct net_device *dev)
337 rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve); 337 rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve);
338 rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off); 338 rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off);
339 if (rc) 339 if (rc)
340 dev_err(&dev->dev, "Error creating gianfar sysfs files.\n"); 340 dev_err(&dev->dev, "Error creating gianfar sysfs files\n");
341} 341}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 0a70bb55d1b0..e04c59818f60 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -12,6 +12,9 @@
12 * Free Software Foundation; either version 2 of the License, or (at your 12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. 13 * option) any later version.
14 */ 14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
15#include <linux/kernel.h> 18#include <linux/kernel.h>
16#include <linux/init.h> 19#include <linux/init.h>
17#include <linux/errno.h> 20#include <linux/errno.h>
@@ -50,12 +53,6 @@
50 53
51#define ugeth_dbg(format, arg...) \ 54#define ugeth_dbg(format, arg...) \
52 ugeth_printk(KERN_DEBUG , format , ## arg) 55 ugeth_printk(KERN_DEBUG , format , ## arg)
53#define ugeth_err(format, arg...) \
54 ugeth_printk(KERN_ERR , format , ## arg)
55#define ugeth_info(format, arg...) \
56 ugeth_printk(KERN_INFO , format , ## arg)
57#define ugeth_warn(format, arg...) \
58 ugeth_printk(KERN_WARNING , format , ## arg)
59 56
60#ifdef UGETH_VERBOSE_DEBUG 57#ifdef UGETH_VERBOSE_DEBUG
61#define ugeth_vdbg ugeth_dbg 58#define ugeth_vdbg ugeth_dbg
@@ -281,7 +278,7 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
281 for (i = 0; i < num_entries; i++) { 278 for (i = 0; i < num_entries; i++) {
282 if ((snum = qe_get_snum()) < 0) { 279 if ((snum = qe_get_snum()) < 0) {
283 if (netif_msg_ifup(ugeth)) 280 if (netif_msg_ifup(ugeth))
284 ugeth_err("fill_init_enet_entries: Can not get SNUM."); 281 pr_err("Can not get SNUM\n");
285 return snum; 282 return snum;
286 } 283 }
287 if ((i == 0) && skip_page_for_first_entry) 284 if ((i == 0) && skip_page_for_first_entry)
@@ -292,7 +289,7 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
292 qe_muram_alloc(thread_size, thread_alignment); 289 qe_muram_alloc(thread_size, thread_alignment);
293 if (IS_ERR_VALUE(init_enet_offset)) { 290 if (IS_ERR_VALUE(init_enet_offset)) {
294 if (netif_msg_ifup(ugeth)) 291 if (netif_msg_ifup(ugeth))
295 ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory."); 292 pr_err("Can not allocate DPRAM memory\n");
296 qe_put_snum((u8) snum); 293 qe_put_snum((u8) snum);
297 return -ENOMEM; 294 return -ENOMEM;
298 } 295 }
@@ -365,10 +362,9 @@ static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
365 init_enet_offset = 362 init_enet_offset =
366 (in_be32(p_start) & 363 (in_be32(p_start) &
367 ENET_INIT_PARAM_PTR_MASK); 364 ENET_INIT_PARAM_PTR_MASK);
368 ugeth_info("Init enet entry %d:", i); 365 pr_info("Init enet entry %d:\n", i);
369 ugeth_info("Base address: 0x%08x", 366 pr_info("Base address: 0x%08x\n",
370 (u32) 367 (u32)qe_muram_addr(init_enet_offset));
371 qe_muram_addr(init_enet_offset));
372 mem_disp(qe_muram_addr(init_enet_offset), 368 mem_disp(qe_muram_addr(init_enet_offset),
373 thread_size); 369 thread_size);
374 } 370 }
@@ -396,8 +392,8 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
396{ 392{
397 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 393 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
398 394
399 if (!(paddr_num < NUM_OF_PADDRS)) { 395 if (paddr_num >= NUM_OF_PADDRS) {
400 ugeth_warn("%s: Illagel paddr_num.", __func__); 396 pr_warn("%s: Invalid paddr_num: %u\n", __func__, paddr_num);
401 return -EINVAL; 397 return -EINVAL;
402 } 398 }
403 399
@@ -573,7 +569,7 @@ static void dump_bds(struct ucc_geth_private *ugeth)
573 length = 569 length =
574 (ugeth->ug_info->bdRingLenTx[i] * 570 (ugeth->ug_info->bdRingLenTx[i] *
575 sizeof(struct qe_bd)); 571 sizeof(struct qe_bd));
576 ugeth_info("TX BDs[%d]", i); 572 pr_info("TX BDs[%d]\n", i);
577 mem_disp(ugeth->p_tx_bd_ring[i], length); 573 mem_disp(ugeth->p_tx_bd_ring[i], length);
578 } 574 }
579 } 575 }
@@ -582,7 +578,7 @@ static void dump_bds(struct ucc_geth_private *ugeth)
582 length = 578 length =
583 (ugeth->ug_info->bdRingLenRx[i] * 579 (ugeth->ug_info->bdRingLenRx[i] *
584 sizeof(struct qe_bd)); 580 sizeof(struct qe_bd));
585 ugeth_info("RX BDs[%d]", i); 581 pr_info("RX BDs[%d]\n", i);
586 mem_disp(ugeth->p_rx_bd_ring[i], length); 582 mem_disp(ugeth->p_rx_bd_ring[i], length);
587 } 583 }
588 } 584 }
@@ -592,93 +588,93 @@ static void dump_regs(struct ucc_geth_private *ugeth)
592{ 588{
593 int i; 589 int i;
594 590
595 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num + 1); 591 pr_info("UCC%d Geth registers:\n", ugeth->ug_info->uf_info.ucc_num + 1);
596 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); 592 pr_info("Base address: 0x%08x\n", (u32)ugeth->ug_regs);
597 593
598 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", 594 pr_info("maccfg1 : addr - 0x%08x, val - 0x%08x\n",
599 (u32) & ugeth->ug_regs->maccfg1, 595 (u32)&ugeth->ug_regs->maccfg1,
600 in_be32(&ugeth->ug_regs->maccfg1)); 596 in_be32(&ugeth->ug_regs->maccfg1));
601 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x", 597 pr_info("maccfg2 : addr - 0x%08x, val - 0x%08x\n",
602 (u32) & ugeth->ug_regs->maccfg2, 598 (u32)&ugeth->ug_regs->maccfg2,
603 in_be32(&ugeth->ug_regs->maccfg2)); 599 in_be32(&ugeth->ug_regs->maccfg2));
604 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x", 600 pr_info("ipgifg : addr - 0x%08x, val - 0x%08x\n",
605 (u32) & ugeth->ug_regs->ipgifg, 601 (u32)&ugeth->ug_regs->ipgifg,
606 in_be32(&ugeth->ug_regs->ipgifg)); 602 in_be32(&ugeth->ug_regs->ipgifg));
607 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x", 603 pr_info("hafdup : addr - 0x%08x, val - 0x%08x\n",
608 (u32) & ugeth->ug_regs->hafdup, 604 (u32)&ugeth->ug_regs->hafdup,
609 in_be32(&ugeth->ug_regs->hafdup)); 605 in_be32(&ugeth->ug_regs->hafdup));
610 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x", 606 pr_info("ifctl : addr - 0x%08x, val - 0x%08x\n",
611 (u32) & ugeth->ug_regs->ifctl, 607 (u32)&ugeth->ug_regs->ifctl,
612 in_be32(&ugeth->ug_regs->ifctl)); 608 in_be32(&ugeth->ug_regs->ifctl));
613 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x", 609 pr_info("ifstat : addr - 0x%08x, val - 0x%08x\n",
614 (u32) & ugeth->ug_regs->ifstat, 610 (u32)&ugeth->ug_regs->ifstat,
615 in_be32(&ugeth->ug_regs->ifstat)); 611 in_be32(&ugeth->ug_regs->ifstat));
616 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x", 612 pr_info("macstnaddr1: addr - 0x%08x, val - 0x%08x\n",
617 (u32) & ugeth->ug_regs->macstnaddr1, 613 (u32)&ugeth->ug_regs->macstnaddr1,
618 in_be32(&ugeth->ug_regs->macstnaddr1)); 614 in_be32(&ugeth->ug_regs->macstnaddr1));
619 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x", 615 pr_info("macstnaddr2: addr - 0x%08x, val - 0x%08x\n",
620 (u32) & ugeth->ug_regs->macstnaddr2, 616 (u32)&ugeth->ug_regs->macstnaddr2,
621 in_be32(&ugeth->ug_regs->macstnaddr2)); 617 in_be32(&ugeth->ug_regs->macstnaddr2));
622 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x", 618 pr_info("uempr : addr - 0x%08x, val - 0x%08x\n",
623 (u32) & ugeth->ug_regs->uempr, 619 (u32)&ugeth->ug_regs->uempr,
624 in_be32(&ugeth->ug_regs->uempr)); 620 in_be32(&ugeth->ug_regs->uempr));
625 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x", 621 pr_info("utbipar : addr - 0x%08x, val - 0x%08x\n",
626 (u32) & ugeth->ug_regs->utbipar, 622 (u32)&ugeth->ug_regs->utbipar,
627 in_be32(&ugeth->ug_regs->utbipar)); 623 in_be32(&ugeth->ug_regs->utbipar));
628 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x", 624 pr_info("uescr : addr - 0x%08x, val - 0x%04x\n",
629 (u32) & ugeth->ug_regs->uescr, 625 (u32)&ugeth->ug_regs->uescr,
630 in_be16(&ugeth->ug_regs->uescr)); 626 in_be16(&ugeth->ug_regs->uescr));
631 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x", 627 pr_info("tx64 : addr - 0x%08x, val - 0x%08x\n",
632 (u32) & ugeth->ug_regs->tx64, 628 (u32)&ugeth->ug_regs->tx64,
633 in_be32(&ugeth->ug_regs->tx64)); 629 in_be32(&ugeth->ug_regs->tx64));
634 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x", 630 pr_info("tx127 : addr - 0x%08x, val - 0x%08x\n",
635 (u32) & ugeth->ug_regs->tx127, 631 (u32)&ugeth->ug_regs->tx127,
636 in_be32(&ugeth->ug_regs->tx127)); 632 in_be32(&ugeth->ug_regs->tx127));
637 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x", 633 pr_info("tx255 : addr - 0x%08x, val - 0x%08x\n",
638 (u32) & ugeth->ug_regs->tx255, 634 (u32)&ugeth->ug_regs->tx255,
639 in_be32(&ugeth->ug_regs->tx255)); 635 in_be32(&ugeth->ug_regs->tx255));
640 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x", 636 pr_info("rx64 : addr - 0x%08x, val - 0x%08x\n",
641 (u32) & ugeth->ug_regs->rx64, 637 (u32)&ugeth->ug_regs->rx64,
642 in_be32(&ugeth->ug_regs->rx64)); 638 in_be32(&ugeth->ug_regs->rx64));
643 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x", 639 pr_info("rx127 : addr - 0x%08x, val - 0x%08x\n",
644 (u32) & ugeth->ug_regs->rx127, 640 (u32)&ugeth->ug_regs->rx127,
645 in_be32(&ugeth->ug_regs->rx127)); 641 in_be32(&ugeth->ug_regs->rx127));
646 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x", 642 pr_info("rx255 : addr - 0x%08x, val - 0x%08x\n",
647 (u32) & ugeth->ug_regs->rx255, 643 (u32)&ugeth->ug_regs->rx255,
648 in_be32(&ugeth->ug_regs->rx255)); 644 in_be32(&ugeth->ug_regs->rx255));
649 ugeth_info("txok : addr - 0x%08x, val - 0x%08x", 645 pr_info("txok : addr - 0x%08x, val - 0x%08x\n",
650 (u32) & ugeth->ug_regs->txok, 646 (u32)&ugeth->ug_regs->txok,
651 in_be32(&ugeth->ug_regs->txok)); 647 in_be32(&ugeth->ug_regs->txok));
652 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x", 648 pr_info("txcf : addr - 0x%08x, val - 0x%04x\n",
653 (u32) & ugeth->ug_regs->txcf, 649 (u32)&ugeth->ug_regs->txcf,
654 in_be16(&ugeth->ug_regs->txcf)); 650 in_be16(&ugeth->ug_regs->txcf));
655 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x", 651 pr_info("tmca : addr - 0x%08x, val - 0x%08x\n",
656 (u32) & ugeth->ug_regs->tmca, 652 (u32)&ugeth->ug_regs->tmca,
657 in_be32(&ugeth->ug_regs->tmca)); 653 in_be32(&ugeth->ug_regs->tmca));
658 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x", 654 pr_info("tbca : addr - 0x%08x, val - 0x%08x\n",
659 (u32) & ugeth->ug_regs->tbca, 655 (u32)&ugeth->ug_regs->tbca,
660 in_be32(&ugeth->ug_regs->tbca)); 656 in_be32(&ugeth->ug_regs->tbca));
661 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x", 657 pr_info("rxfok : addr - 0x%08x, val - 0x%08x\n",
662 (u32) & ugeth->ug_regs->rxfok, 658 (u32)&ugeth->ug_regs->rxfok,
663 in_be32(&ugeth->ug_regs->rxfok)); 659 in_be32(&ugeth->ug_regs->rxfok));
664 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x", 660 pr_info("rxbok : addr - 0x%08x, val - 0x%08x\n",
665 (u32) & ugeth->ug_regs->rxbok, 661 (u32)&ugeth->ug_regs->rxbok,
666 in_be32(&ugeth->ug_regs->rxbok)); 662 in_be32(&ugeth->ug_regs->rxbok));
667 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x", 663 pr_info("rbyt : addr - 0x%08x, val - 0x%08x\n",
668 (u32) & ugeth->ug_regs->rbyt, 664 (u32)&ugeth->ug_regs->rbyt,
669 in_be32(&ugeth->ug_regs->rbyt)); 665 in_be32(&ugeth->ug_regs->rbyt));
670 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x", 666 pr_info("rmca : addr - 0x%08x, val - 0x%08x\n",
671 (u32) & ugeth->ug_regs->rmca, 667 (u32)&ugeth->ug_regs->rmca,
672 in_be32(&ugeth->ug_regs->rmca)); 668 in_be32(&ugeth->ug_regs->rmca));
673 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x", 669 pr_info("rbca : addr - 0x%08x, val - 0x%08x\n",
674 (u32) & ugeth->ug_regs->rbca, 670 (u32)&ugeth->ug_regs->rbca,
675 in_be32(&ugeth->ug_regs->rbca)); 671 in_be32(&ugeth->ug_regs->rbca));
676 ugeth_info("scar : addr - 0x%08x, val - 0x%08x", 672 pr_info("scar : addr - 0x%08x, val - 0x%08x\n",
677 (u32) & ugeth->ug_regs->scar, 673 (u32)&ugeth->ug_regs->scar,
678 in_be32(&ugeth->ug_regs->scar)); 674 in_be32(&ugeth->ug_regs->scar));
679 ugeth_info("scam : addr - 0x%08x, val - 0x%08x", 675 pr_info("scam : addr - 0x%08x, val - 0x%08x\n",
680 (u32) & ugeth->ug_regs->scam, 676 (u32)&ugeth->ug_regs->scam,
681 in_be32(&ugeth->ug_regs->scam)); 677 in_be32(&ugeth->ug_regs->scam));
682 678
683 if (ugeth->p_thread_data_tx) { 679 if (ugeth->p_thread_data_tx) {
684 int numThreadsTxNumerical; 680 int numThreadsTxNumerical;
@@ -703,13 +699,13 @@ static void dump_regs(struct ucc_geth_private *ugeth)
703 break; 699 break;
704 } 700 }
705 701
706 ugeth_info("Thread data TXs:"); 702 pr_info("Thread data TXs:\n");
707 ugeth_info("Base address: 0x%08x", 703 pr_info("Base address: 0x%08x\n",
708 (u32) ugeth->p_thread_data_tx); 704 (u32)ugeth->p_thread_data_tx);
709 for (i = 0; i < numThreadsTxNumerical; i++) { 705 for (i = 0; i < numThreadsTxNumerical; i++) {
710 ugeth_info("Thread data TX[%d]:", i); 706 pr_info("Thread data TX[%d]:\n", i);
711 ugeth_info("Base address: 0x%08x", 707 pr_info("Base address: 0x%08x\n",
712 (u32) & ugeth->p_thread_data_tx[i]); 708 (u32)&ugeth->p_thread_data_tx[i]);
713 mem_disp((u8 *) & ugeth->p_thread_data_tx[i], 709 mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
714 sizeof(struct ucc_geth_thread_data_tx)); 710 sizeof(struct ucc_geth_thread_data_tx));
715 } 711 }
@@ -737,270 +733,260 @@ static void dump_regs(struct ucc_geth_private *ugeth)
737 break; 733 break;
738 } 734 }
739 735
740 ugeth_info("Thread data RX:"); 736 pr_info("Thread data RX:\n");
741 ugeth_info("Base address: 0x%08x", 737 pr_info("Base address: 0x%08x\n",
742 (u32) ugeth->p_thread_data_rx); 738 (u32)ugeth->p_thread_data_rx);
743 for (i = 0; i < numThreadsRxNumerical; i++) { 739 for (i = 0; i < numThreadsRxNumerical; i++) {
744 ugeth_info("Thread data RX[%d]:", i); 740 pr_info("Thread data RX[%d]:\n", i);
745 ugeth_info("Base address: 0x%08x", 741 pr_info("Base address: 0x%08x\n",
746 (u32) & ugeth->p_thread_data_rx[i]); 742 (u32)&ugeth->p_thread_data_rx[i]);
747 mem_disp((u8 *) & ugeth->p_thread_data_rx[i], 743 mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
748 sizeof(struct ucc_geth_thread_data_rx)); 744 sizeof(struct ucc_geth_thread_data_rx));
749 } 745 }
750 } 746 }
751 if (ugeth->p_exf_glbl_param) { 747 if (ugeth->p_exf_glbl_param) {
752 ugeth_info("EXF global param:"); 748 pr_info("EXF global param:\n");
753 ugeth_info("Base address: 0x%08x", 749 pr_info("Base address: 0x%08x\n",
754 (u32) ugeth->p_exf_glbl_param); 750 (u32)ugeth->p_exf_glbl_param);
755 mem_disp((u8 *) ugeth->p_exf_glbl_param, 751 mem_disp((u8 *) ugeth->p_exf_glbl_param,
756 sizeof(*ugeth->p_exf_glbl_param)); 752 sizeof(*ugeth->p_exf_glbl_param));
757 } 753 }
758 if (ugeth->p_tx_glbl_pram) { 754 if (ugeth->p_tx_glbl_pram) {
759 ugeth_info("TX global param:"); 755 pr_info("TX global param:\n");
760 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram); 756 pr_info("Base address: 0x%08x\n", (u32)ugeth->p_tx_glbl_pram);
761 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x", 757 pr_info("temoder : addr - 0x%08x, val - 0x%04x\n",
762 (u32) & ugeth->p_tx_glbl_pram->temoder, 758 (u32)&ugeth->p_tx_glbl_pram->temoder,
763 in_be16(&ugeth->p_tx_glbl_pram->temoder)); 759 in_be16(&ugeth->p_tx_glbl_pram->temoder));
764 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x", 760 pr_info("sqptr : addr - 0x%08x, val - 0x%08x\n",
765 (u32) & ugeth->p_tx_glbl_pram->sqptr, 761 (u32)&ugeth->p_tx_glbl_pram->sqptr,
766 in_be32(&ugeth->p_tx_glbl_pram->sqptr)); 762 in_be32(&ugeth->p_tx_glbl_pram->sqptr));
767 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x", 763 pr_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x\n",
768 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer, 764 (u32)&ugeth->p_tx_glbl_pram->schedulerbasepointer,
769 in_be32(&ugeth->p_tx_glbl_pram-> 765 in_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer));
770 schedulerbasepointer)); 766 pr_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x\n",
771 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x", 767 (u32)&ugeth->p_tx_glbl_pram->txrmonbaseptr,
772 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr, 768 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
773 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); 769 pr_info("tstate : addr - 0x%08x, val - 0x%08x\n",
774 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x", 770 (u32)&ugeth->p_tx_glbl_pram->tstate,
775 (u32) & ugeth->p_tx_glbl_pram->tstate, 771 in_be32(&ugeth->p_tx_glbl_pram->tstate));
776 in_be32(&ugeth->p_tx_glbl_pram->tstate)); 772 pr_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x\n",
777 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x", 773 (u32)&ugeth->p_tx_glbl_pram->iphoffset[0],
778 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0], 774 ugeth->p_tx_glbl_pram->iphoffset[0]);
779 ugeth->p_tx_glbl_pram->iphoffset[0]); 775 pr_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x\n",
780 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x", 776 (u32)&ugeth->p_tx_glbl_pram->iphoffset[1],
781 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1], 777 ugeth->p_tx_glbl_pram->iphoffset[1]);
782 ugeth->p_tx_glbl_pram->iphoffset[1]); 778 pr_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x\n",
783 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x", 779 (u32)&ugeth->p_tx_glbl_pram->iphoffset[2],
784 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2], 780 ugeth->p_tx_glbl_pram->iphoffset[2]);
785 ugeth->p_tx_glbl_pram->iphoffset[2]); 781 pr_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x\n",
786 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x", 782 (u32)&ugeth->p_tx_glbl_pram->iphoffset[3],
787 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3], 783 ugeth->p_tx_glbl_pram->iphoffset[3]);
788 ugeth->p_tx_glbl_pram->iphoffset[3]); 784 pr_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x\n",
789 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x", 785 (u32)&ugeth->p_tx_glbl_pram->iphoffset[4],
790 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4], 786 ugeth->p_tx_glbl_pram->iphoffset[4]);
791 ugeth->p_tx_glbl_pram->iphoffset[4]); 787 pr_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x\n",
792 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x", 788 (u32)&ugeth->p_tx_glbl_pram->iphoffset[5],
793 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5], 789 ugeth->p_tx_glbl_pram->iphoffset[5]);
794 ugeth->p_tx_glbl_pram->iphoffset[5]); 790 pr_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x\n",
795 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x", 791 (u32)&ugeth->p_tx_glbl_pram->iphoffset[6],
796 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6], 792 ugeth->p_tx_glbl_pram->iphoffset[6]);
797 ugeth->p_tx_glbl_pram->iphoffset[6]); 793 pr_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x\n",
798 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x", 794 (u32)&ugeth->p_tx_glbl_pram->iphoffset[7],
799 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7], 795 ugeth->p_tx_glbl_pram->iphoffset[7]);
800 ugeth->p_tx_glbl_pram->iphoffset[7]); 796 pr_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x\n",
801 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x", 797 (u32)&ugeth->p_tx_glbl_pram->vtagtable[0],
802 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0], 798 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
803 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); 799 pr_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x\n",
804 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x", 800 (u32)&ugeth->p_tx_glbl_pram->vtagtable[1],
805 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1], 801 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
806 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); 802 pr_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x\n",
807 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x", 803 (u32)&ugeth->p_tx_glbl_pram->vtagtable[2],
808 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2], 804 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
809 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); 805 pr_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x\n",
810 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x", 806 (u32)&ugeth->p_tx_glbl_pram->vtagtable[3],
811 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3], 807 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
812 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); 808 pr_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x\n",
813 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x", 809 (u32)&ugeth->p_tx_glbl_pram->vtagtable[4],
814 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4], 810 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
815 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); 811 pr_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x\n",
816 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x", 812 (u32)&ugeth->p_tx_glbl_pram->vtagtable[5],
817 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5], 813 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
818 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); 814 pr_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x\n",
819 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x", 815 (u32)&ugeth->p_tx_glbl_pram->vtagtable[6],
820 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6], 816 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
821 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); 817 pr_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x\n",
822 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x", 818 (u32)&ugeth->p_tx_glbl_pram->vtagtable[7],
823 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7], 819 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
824 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); 820 pr_info("tqptr : addr - 0x%08x, val - 0x%08x\n",
825 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x", 821 (u32)&ugeth->p_tx_glbl_pram->tqptr,
826 (u32) & ugeth->p_tx_glbl_pram->tqptr, 822 in_be32(&ugeth->p_tx_glbl_pram->tqptr));
827 in_be32(&ugeth->p_tx_glbl_pram->tqptr));
828 } 823 }
829 if (ugeth->p_rx_glbl_pram) { 824 if (ugeth->p_rx_glbl_pram) {
830 ugeth_info("RX global param:"); 825 pr_info("RX global param:\n");
831 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram); 826 pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_glbl_pram);
832 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x", 827 pr_info("remoder : addr - 0x%08x, val - 0x%08x\n",
833 (u32) & ugeth->p_rx_glbl_pram->remoder, 828 (u32)&ugeth->p_rx_glbl_pram->remoder,
834 in_be32(&ugeth->p_rx_glbl_pram->remoder)); 829 in_be32(&ugeth->p_rx_glbl_pram->remoder));
835 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x", 830 pr_info("rqptr : addr - 0x%08x, val - 0x%08x\n",
836 (u32) & ugeth->p_rx_glbl_pram->rqptr, 831 (u32)&ugeth->p_rx_glbl_pram->rqptr,
837 in_be32(&ugeth->p_rx_glbl_pram->rqptr)); 832 in_be32(&ugeth->p_rx_glbl_pram->rqptr));
838 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x", 833 pr_info("typeorlen : addr - 0x%08x, val - 0x%04x\n",
839 (u32) & ugeth->p_rx_glbl_pram->typeorlen, 834 (u32)&ugeth->p_rx_glbl_pram->typeorlen,
840 in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); 835 in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
841 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x", 836 pr_info("rxgstpack : addr - 0x%08x, val - 0x%02x\n",
842 (u32) & ugeth->p_rx_glbl_pram->rxgstpack, 837 (u32)&ugeth->p_rx_glbl_pram->rxgstpack,
843 ugeth->p_rx_glbl_pram->rxgstpack); 838 ugeth->p_rx_glbl_pram->rxgstpack);
844 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x", 839 pr_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x\n",
845 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr, 840 (u32)&ugeth->p_rx_glbl_pram->rxrmonbaseptr,
846 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); 841 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
847 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x", 842 pr_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x\n",
848 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr, 843 (u32)&ugeth->p_rx_glbl_pram->intcoalescingptr,
849 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); 844 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
850 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x", 845 pr_info("rstate : addr - 0x%08x, val - 0x%02x\n",
851 (u32) & ugeth->p_rx_glbl_pram->rstate, 846 (u32)&ugeth->p_rx_glbl_pram->rstate,
852 ugeth->p_rx_glbl_pram->rstate); 847 ugeth->p_rx_glbl_pram->rstate);
853 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x", 848 pr_info("mrblr : addr - 0x%08x, val - 0x%04x\n",
854 (u32) & ugeth->p_rx_glbl_pram->mrblr, 849 (u32)&ugeth->p_rx_glbl_pram->mrblr,
855 in_be16(&ugeth->p_rx_glbl_pram->mrblr)); 850 in_be16(&ugeth->p_rx_glbl_pram->mrblr));
856 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x", 851 pr_info("rbdqptr : addr - 0x%08x, val - 0x%08x\n",
857 (u32) & ugeth->p_rx_glbl_pram->rbdqptr, 852 (u32)&ugeth->p_rx_glbl_pram->rbdqptr,
858 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); 853 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
859 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x", 854 pr_info("mflr : addr - 0x%08x, val - 0x%04x\n",
860 (u32) & ugeth->p_rx_glbl_pram->mflr, 855 (u32)&ugeth->p_rx_glbl_pram->mflr,
861 in_be16(&ugeth->p_rx_glbl_pram->mflr)); 856 in_be16(&ugeth->p_rx_glbl_pram->mflr));
862 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x", 857 pr_info("minflr : addr - 0x%08x, val - 0x%04x\n",
863 (u32) & ugeth->p_rx_glbl_pram->minflr, 858 (u32)&ugeth->p_rx_glbl_pram->minflr,
864 in_be16(&ugeth->p_rx_glbl_pram->minflr)); 859 in_be16(&ugeth->p_rx_glbl_pram->minflr));
865 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x", 860 pr_info("maxd1 : addr - 0x%08x, val - 0x%04x\n",
866 (u32) & ugeth->p_rx_glbl_pram->maxd1, 861 (u32)&ugeth->p_rx_glbl_pram->maxd1,
867 in_be16(&ugeth->p_rx_glbl_pram->maxd1)); 862 in_be16(&ugeth->p_rx_glbl_pram->maxd1));
868 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x", 863 pr_info("maxd2 : addr - 0x%08x, val - 0x%04x\n",
869 (u32) & ugeth->p_rx_glbl_pram->maxd2, 864 (u32)&ugeth->p_rx_glbl_pram->maxd2,
870 in_be16(&ugeth->p_rx_glbl_pram->maxd2)); 865 in_be16(&ugeth->p_rx_glbl_pram->maxd2));
871 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x", 866 pr_info("ecamptr : addr - 0x%08x, val - 0x%08x\n",
872 (u32) & ugeth->p_rx_glbl_pram->ecamptr, 867 (u32)&ugeth->p_rx_glbl_pram->ecamptr,
873 in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); 868 in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
874 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x", 869 pr_info("l2qt : addr - 0x%08x, val - 0x%08x\n",
875 (u32) & ugeth->p_rx_glbl_pram->l2qt, 870 (u32)&ugeth->p_rx_glbl_pram->l2qt,
876 in_be32(&ugeth->p_rx_glbl_pram->l2qt)); 871 in_be32(&ugeth->p_rx_glbl_pram->l2qt));
877 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x", 872 pr_info("l3qt[0] : addr - 0x%08x, val - 0x%08x\n",
878 (u32) & ugeth->p_rx_glbl_pram->l3qt[0], 873 (u32)&ugeth->p_rx_glbl_pram->l3qt[0],
879 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); 874 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
880 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x", 875 pr_info("l3qt[1] : addr - 0x%08x, val - 0x%08x\n",
881 (u32) & ugeth->p_rx_glbl_pram->l3qt[1], 876 (u32)&ugeth->p_rx_glbl_pram->l3qt[1],
882 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); 877 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
883 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x", 878 pr_info("l3qt[2] : addr - 0x%08x, val - 0x%08x\n",
884 (u32) & ugeth->p_rx_glbl_pram->l3qt[2], 879 (u32)&ugeth->p_rx_glbl_pram->l3qt[2],
885 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); 880 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
886 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x", 881 pr_info("l3qt[3] : addr - 0x%08x, val - 0x%08x\n",
887 (u32) & ugeth->p_rx_glbl_pram->l3qt[3], 882 (u32)&ugeth->p_rx_glbl_pram->l3qt[3],
888 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); 883 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
889 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x", 884 pr_info("l3qt[4] : addr - 0x%08x, val - 0x%08x\n",
890 (u32) & ugeth->p_rx_glbl_pram->l3qt[4], 885 (u32)&ugeth->p_rx_glbl_pram->l3qt[4],
891 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); 886 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
892 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x", 887 pr_info("l3qt[5] : addr - 0x%08x, val - 0x%08x\n",
893 (u32) & ugeth->p_rx_glbl_pram->l3qt[5], 888 (u32)&ugeth->p_rx_glbl_pram->l3qt[5],
894 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); 889 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
895 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x", 890 pr_info("l3qt[6] : addr - 0x%08x, val - 0x%08x\n",
896 (u32) & ugeth->p_rx_glbl_pram->l3qt[6], 891 (u32)&ugeth->p_rx_glbl_pram->l3qt[6],
897 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); 892 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
898 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x", 893 pr_info("l3qt[7] : addr - 0x%08x, val - 0x%08x\n",
899 (u32) & ugeth->p_rx_glbl_pram->l3qt[7], 894 (u32)&ugeth->p_rx_glbl_pram->l3qt[7],
900 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); 895 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
901 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x", 896 pr_info("vlantype : addr - 0x%08x, val - 0x%04x\n",
902 (u32) & ugeth->p_rx_glbl_pram->vlantype, 897 (u32)&ugeth->p_rx_glbl_pram->vlantype,
903 in_be16(&ugeth->p_rx_glbl_pram->vlantype)); 898 in_be16(&ugeth->p_rx_glbl_pram->vlantype));
904 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x", 899 pr_info("vlantci : addr - 0x%08x, val - 0x%04x\n",
905 (u32) & ugeth->p_rx_glbl_pram->vlantci, 900 (u32)&ugeth->p_rx_glbl_pram->vlantci,
906 in_be16(&ugeth->p_rx_glbl_pram->vlantci)); 901 in_be16(&ugeth->p_rx_glbl_pram->vlantci));
907 for (i = 0; i < 64; i++) 902 for (i = 0; i < 64; i++)
908 ugeth_info 903 pr_info("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x\n",
909 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x", 904 i,
910 i, 905 (u32)&ugeth->p_rx_glbl_pram->addressfiltering[i],
911 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i], 906 ugeth->p_rx_glbl_pram->addressfiltering[i]);
912 ugeth->p_rx_glbl_pram->addressfiltering[i]); 907 pr_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x\n",
913 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x", 908 (u32)&ugeth->p_rx_glbl_pram->exfGlobalParam,
914 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam, 909 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
915 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
916 } 910 }
917 if (ugeth->p_send_q_mem_reg) { 911 if (ugeth->p_send_q_mem_reg) {
918 ugeth_info("Send Q memory registers:"); 912 pr_info("Send Q memory registers:\n");
919 ugeth_info("Base address: 0x%08x", 913 pr_info("Base address: 0x%08x\n", (u32)ugeth->p_send_q_mem_reg);
920 (u32) ugeth->p_send_q_mem_reg);
921 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 914 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
922 ugeth_info("SQQD[%d]:", i); 915 pr_info("SQQD[%d]:\n", i);
923 ugeth_info("Base address: 0x%08x", 916 pr_info("Base address: 0x%08x\n",
924 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]); 917 (u32)&ugeth->p_send_q_mem_reg->sqqd[i]);
925 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], 918 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
926 sizeof(struct ucc_geth_send_queue_qd)); 919 sizeof(struct ucc_geth_send_queue_qd));
927 } 920 }
928 } 921 }
929 if (ugeth->p_scheduler) { 922 if (ugeth->p_scheduler) {
930 ugeth_info("Scheduler:"); 923 pr_info("Scheduler:\n");
931 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler); 924 pr_info("Base address: 0x%08x\n", (u32)ugeth->p_scheduler);
932 mem_disp((u8 *) ugeth->p_scheduler, 925 mem_disp((u8 *) ugeth->p_scheduler,
933 sizeof(*ugeth->p_scheduler)); 926 sizeof(*ugeth->p_scheduler));
934 } 927 }
935 if (ugeth->p_tx_fw_statistics_pram) { 928 if (ugeth->p_tx_fw_statistics_pram) {
936 ugeth_info("TX FW statistics pram:"); 929 pr_info("TX FW statistics pram:\n");
937 ugeth_info("Base address: 0x%08x", 930 pr_info("Base address: 0x%08x\n",
938 (u32) ugeth->p_tx_fw_statistics_pram); 931 (u32)ugeth->p_tx_fw_statistics_pram);
939 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, 932 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
940 sizeof(*ugeth->p_tx_fw_statistics_pram)); 933 sizeof(*ugeth->p_tx_fw_statistics_pram));
941 } 934 }
942 if (ugeth->p_rx_fw_statistics_pram) { 935 if (ugeth->p_rx_fw_statistics_pram) {
943 ugeth_info("RX FW statistics pram:"); 936 pr_info("RX FW statistics pram:\n");
944 ugeth_info("Base address: 0x%08x", 937 pr_info("Base address: 0x%08x\n",
945 (u32) ugeth->p_rx_fw_statistics_pram); 938 (u32)ugeth->p_rx_fw_statistics_pram);
946 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, 939 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
947 sizeof(*ugeth->p_rx_fw_statistics_pram)); 940 sizeof(*ugeth->p_rx_fw_statistics_pram));
948 } 941 }
949 if (ugeth->p_rx_irq_coalescing_tbl) { 942 if (ugeth->p_rx_irq_coalescing_tbl) {
950 ugeth_info("RX IRQ coalescing tables:"); 943 pr_info("RX IRQ coalescing tables:\n");
951 ugeth_info("Base address: 0x%08x", 944 pr_info("Base address: 0x%08x\n",
952 (u32) ugeth->p_rx_irq_coalescing_tbl); 945 (u32)ugeth->p_rx_irq_coalescing_tbl);
953 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 946 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
954 ugeth_info("RX IRQ coalescing table entry[%d]:", i); 947 pr_info("RX IRQ coalescing table entry[%d]:\n", i);
955 ugeth_info("Base address: 0x%08x", 948 pr_info("Base address: 0x%08x\n",
956 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 949 (u32)&ugeth->p_rx_irq_coalescing_tbl->
957 coalescingentry[i]); 950 coalescingentry[i]);
958 ugeth_info 951 pr_info("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x\n",
959 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x", 952 (u32)&ugeth->p_rx_irq_coalescing_tbl->
960 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 953 coalescingentry[i].interruptcoalescingmaxvalue,
961 coalescingentry[i].interruptcoalescingmaxvalue, 954 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
962 in_be32(&ugeth->p_rx_irq_coalescing_tbl-> 955 coalescingentry[i].
963 coalescingentry[i]. 956 interruptcoalescingmaxvalue));
964 interruptcoalescingmaxvalue)); 957 pr_info("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x\n",
965 ugeth_info 958 (u32)&ugeth->p_rx_irq_coalescing_tbl->
966 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x", 959 coalescingentry[i].interruptcoalescingcounter,
967 (u32) & ugeth->p_rx_irq_coalescing_tbl-> 960 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
968 coalescingentry[i].interruptcoalescingcounter, 961 coalescingentry[i].
969 in_be32(&ugeth->p_rx_irq_coalescing_tbl-> 962 interruptcoalescingcounter));
970 coalescingentry[i].
971 interruptcoalescingcounter));
972 } 963 }
973 } 964 }
974 if (ugeth->p_rx_bd_qs_tbl) { 965 if (ugeth->p_rx_bd_qs_tbl) {
975 ugeth_info("RX BD QS tables:"); 966 pr_info("RX BD QS tables:\n");
976 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl); 967 pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_bd_qs_tbl);
977 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { 968 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
978 ugeth_info("RX BD QS table[%d]:", i); 969 pr_info("RX BD QS table[%d]:\n", i);
979 ugeth_info("Base address: 0x%08x", 970 pr_info("Base address: 0x%08x\n",
980 (u32) & ugeth->p_rx_bd_qs_tbl[i]); 971 (u32)&ugeth->p_rx_bd_qs_tbl[i]);
981 ugeth_info 972 pr_info("bdbaseptr : addr - 0x%08x, val - 0x%08x\n",
982 ("bdbaseptr : addr - 0x%08x, val - 0x%08x", 973 (u32)&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
983 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, 974 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
984 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); 975 pr_info("bdptr : addr - 0x%08x, val - 0x%08x\n",
985 ugeth_info 976 (u32)&ugeth->p_rx_bd_qs_tbl[i].bdptr,
986 ("bdptr : addr - 0x%08x, val - 0x%08x", 977 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
987 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr, 978 pr_info("externalbdbaseptr: addr - 0x%08x, val - 0x%08x\n",
988 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); 979 (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
989 ugeth_info 980 in_be32(&ugeth->p_rx_bd_qs_tbl[i].
990 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x", 981 externalbdbaseptr));
991 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, 982 pr_info("externalbdptr : addr - 0x%08x, val - 0x%08x\n",
992 in_be32(&ugeth->p_rx_bd_qs_tbl[i]. 983 (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
993 externalbdbaseptr)); 984 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
994 ugeth_info 985 pr_info("ucode RX Prefetched BDs:\n");
995 ("externalbdptr : addr - 0x%08x, val - 0x%08x", 986 pr_info("Base address: 0x%08x\n",
996 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr, 987 (u32)qe_muram_addr(in_be32
997 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); 988 (&ugeth->p_rx_bd_qs_tbl[i].
998 ugeth_info("ucode RX Prefetched BDs:"); 989 bdbaseptr)));
999 ugeth_info("Base address: 0x%08x",
1000 (u32)
1001 qe_muram_addr(in_be32
1002 (&ugeth->p_rx_bd_qs_tbl[i].
1003 bdbaseptr)));
1004 mem_disp((u8 *) 990 mem_disp((u8 *)
1005 qe_muram_addr(in_be32 991 qe_muram_addr(in_be32
1006 (&ugeth->p_rx_bd_qs_tbl[i]. 992 (&ugeth->p_rx_bd_qs_tbl[i].
@@ -1010,9 +996,9 @@ static void dump_regs(struct ucc_geth_private *ugeth)
1010 } 996 }
1011 if (ugeth->p_init_enet_param_shadow) { 997 if (ugeth->p_init_enet_param_shadow) {
1012 int size; 998 int size;
1013 ugeth_info("Init enet param shadow:"); 999 pr_info("Init enet param shadow:\n");
1014 ugeth_info("Base address: 0x%08x", 1000 pr_info("Base address: 0x%08x\n",
1015 (u32) ugeth->p_init_enet_param_shadow); 1001 (u32) ugeth->p_init_enet_param_shadow);
1016 mem_disp((u8 *) ugeth->p_init_enet_param_shadow, 1002 mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
1017 sizeof(*ugeth->p_init_enet_param_shadow)); 1003 sizeof(*ugeth->p_init_enet_param_shadow));
1018 1004
@@ -1392,12 +1378,11 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1392 struct phy_device *tbiphy; 1378 struct phy_device *tbiphy;
1393 1379
1394 if (!ug_info->tbi_node) 1380 if (!ug_info->tbi_node)
1395 ugeth_warn("TBI mode requires that the device " 1381 pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
1396 "tree specify a tbi-handle\n");
1397 1382
1398 tbiphy = of_phy_find_device(ug_info->tbi_node); 1383 tbiphy = of_phy_find_device(ug_info->tbi_node);
1399 if (!tbiphy) 1384 if (!tbiphy)
1400 ugeth_warn("Could not get TBI device\n"); 1385 pr_warn("Could not get TBI device\n");
1401 1386
1402 value = phy_read(tbiphy, ENET_TBI_MII_CR); 1387 value = phy_read(tbiphy, ENET_TBI_MII_CR);
1403 value &= ~0x1000; /* Turn off autonegotiation */ 1388 value &= ~0x1000; /* Turn off autonegotiation */
@@ -1409,8 +1394,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1409 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); 1394 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
1410 if (ret_val != 0) { 1395 if (ret_val != 0) {
1411 if (netif_msg_probe(ugeth)) 1396 if (netif_msg_probe(ugeth))
1412 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.", 1397 pr_err("Preamble length must be between 3 and 7 inclusive\n");
1413 __func__);
1414 return ret_val; 1398 return ret_val;
1415 } 1399 }
1416 1400
@@ -1520,7 +1504,7 @@ static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1520 /* check if the UCC number is in range. */ 1504 /* check if the UCC number is in range. */
1521 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1505 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1522 if (netif_msg_probe(ugeth)) 1506 if (netif_msg_probe(ugeth))
1523 ugeth_err("%s: ucc_num out of range.", __func__); 1507 pr_err("ucc_num out of range\n");
1524 return -EINVAL; 1508 return -EINVAL;
1525 } 1509 }
1526 1510
@@ -1549,7 +1533,7 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1549 /* check if the UCC number is in range. */ 1533 /* check if the UCC number is in range. */
1550 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1534 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1551 if (netif_msg_probe(ugeth)) 1535 if (netif_msg_probe(ugeth))
1552 ugeth_err("%s: ucc_num out of range.", __func__); 1536 pr_err("ucc_num out of range\n");
1553 return -EINVAL; 1537 return -EINVAL;
1554 } 1538 }
1555 1539
@@ -1648,7 +1632,7 @@ static void adjust_link(struct net_device *dev)
1648 break; 1632 break;
1649 default: 1633 default:
1650 if (netif_msg_link(ugeth)) 1634 if (netif_msg_link(ugeth))
1651 ugeth_warn( 1635 pr_warn(
1652 "%s: Ack! Speed (%d) is not 10/100/1000!", 1636 "%s: Ack! Speed (%d) is not 10/100/1000!",
1653 dev->name, phydev->speed); 1637 dev->name, phydev->speed);
1654 break; 1638 break;
@@ -2103,8 +2087,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2103 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || 2087 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
2104 (uf_info->bd_mem_part == MEM_PART_MURAM))) { 2088 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2105 if (netif_msg_probe(ugeth)) 2089 if (netif_msg_probe(ugeth))
2106 ugeth_err("%s: Bad memory partition value.", 2090 pr_err("Bad memory partition value\n");
2107 __func__);
2108 return -EINVAL; 2091 return -EINVAL;
2109 } 2092 }
2110 2093
@@ -2114,9 +2097,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2114 (ug_info->bdRingLenRx[i] % 2097 (ug_info->bdRingLenRx[i] %
2115 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { 2098 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
2116 if (netif_msg_probe(ugeth)) 2099 if (netif_msg_probe(ugeth))
2117 ugeth_err 2100 pr_err("Rx BD ring length must be multiple of 4, no smaller than 8\n");
2118 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
2119 __func__);
2120 return -EINVAL; 2101 return -EINVAL;
2121 } 2102 }
2122 } 2103 }
@@ -2125,9 +2106,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2125 for (i = 0; i < ug_info->numQueuesTx; i++) { 2106 for (i = 0; i < ug_info->numQueuesTx; i++) {
2126 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { 2107 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
2127 if (netif_msg_probe(ugeth)) 2108 if (netif_msg_probe(ugeth))
2128 ugeth_err 2109 pr_err("Tx BD ring length must be no smaller than 2\n");
2129 ("%s: Tx BD ring length must be no smaller than 2.",
2130 __func__);
2131 return -EINVAL; 2110 return -EINVAL;
2132 } 2111 }
2133 } 2112 }
@@ -2136,23 +2115,21 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2136 if ((uf_info->max_rx_buf_length == 0) || 2115 if ((uf_info->max_rx_buf_length == 0) ||
2137 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { 2116 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
2138 if (netif_msg_probe(ugeth)) 2117 if (netif_msg_probe(ugeth))
2139 ugeth_err 2118 pr_err("max_rx_buf_length must be non-zero multiple of 128\n");
2140 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2141 __func__);
2142 return -EINVAL; 2119 return -EINVAL;
2143 } 2120 }
2144 2121
2145 /* num Tx queues */ 2122 /* num Tx queues */
2146 if (ug_info->numQueuesTx > NUM_TX_QUEUES) { 2123 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2147 if (netif_msg_probe(ugeth)) 2124 if (netif_msg_probe(ugeth))
2148 ugeth_err("%s: number of tx queues too large.", __func__); 2125 pr_err("number of tx queues too large\n");
2149 return -EINVAL; 2126 return -EINVAL;
2150 } 2127 }
2151 2128
2152 /* num Rx queues */ 2129 /* num Rx queues */
2153 if (ug_info->numQueuesRx > NUM_RX_QUEUES) { 2130 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2154 if (netif_msg_probe(ugeth)) 2131 if (netif_msg_probe(ugeth))
2155 ugeth_err("%s: number of rx queues too large.", __func__); 2132 pr_err("number of rx queues too large\n");
2156 return -EINVAL; 2133 return -EINVAL;
2157 } 2134 }
2158 2135
@@ -2160,10 +2137,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2160 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { 2137 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
2161 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { 2138 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
2162 if (netif_msg_probe(ugeth)) 2139 if (netif_msg_probe(ugeth))
2163 ugeth_err 2140 pr_err("VLAN priority table entry must not be larger than number of Rx queues\n");
2164 ("%s: VLAN priority table entry must not be"
2165 " larger than number of Rx queues.",
2166 __func__);
2167 return -EINVAL; 2141 return -EINVAL;
2168 } 2142 }
2169 } 2143 }
@@ -2172,18 +2146,14 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2172 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { 2146 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
2173 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { 2147 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
2174 if (netif_msg_probe(ugeth)) 2148 if (netif_msg_probe(ugeth))
2175 ugeth_err 2149 pr_err("IP priority table entry must not be larger than number of Rx queues\n");
2176 ("%s: IP priority table entry must not be"
2177 " larger than number of Rx queues.",
2178 __func__);
2179 return -EINVAL; 2150 return -EINVAL;
2180 } 2151 }
2181 } 2152 }
2182 2153
2183 if (ug_info->cam && !ug_info->ecamptr) { 2154 if (ug_info->cam && !ug_info->ecamptr) {
2184 if (netif_msg_probe(ugeth)) 2155 if (netif_msg_probe(ugeth))
2185 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", 2156 pr_err("If cam mode is chosen, must supply cam ptr\n");
2186 __func__);
2187 return -EINVAL; 2157 return -EINVAL;
2188 } 2158 }
2189 2159
@@ -2191,9 +2161,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2191 UCC_GETH_NUM_OF_STATION_ADDRESSES_1) && 2161 UCC_GETH_NUM_OF_STATION_ADDRESSES_1) &&
2192 ug_info->rxExtendedFiltering) { 2162 ug_info->rxExtendedFiltering) {
2193 if (netif_msg_probe(ugeth)) 2163 if (netif_msg_probe(ugeth))
2194 ugeth_err("%s: Number of station addresses greater than 1 " 2164 pr_err("Number of station addresses greater than 1 not allowed in extended parsing mode\n");
2195 "not allowed in extended parsing mode.",
2196 __func__);
2197 return -EINVAL; 2165 return -EINVAL;
2198 } 2166 }
2199 2167
@@ -2207,7 +2175,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2207 /* Initialize the general fast UCC block. */ 2175 /* Initialize the general fast UCC block. */
2208 if (ucc_fast_init(uf_info, &ugeth->uccf)) { 2176 if (ucc_fast_init(uf_info, &ugeth->uccf)) {
2209 if (netif_msg_probe(ugeth)) 2177 if (netif_msg_probe(ugeth))
2210 ugeth_err("%s: Failed to init uccf.", __func__); 2178 pr_err("Failed to init uccf\n");
2211 return -ENOMEM; 2179 return -ENOMEM;
2212 } 2180 }
2213 2181
@@ -2222,7 +2190,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2222 ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs)); 2190 ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs));
2223 if (!ugeth->ug_regs) { 2191 if (!ugeth->ug_regs) {
2224 if (netif_msg_probe(ugeth)) 2192 if (netif_msg_probe(ugeth))
2225 ugeth_err("%s: Failed to ioremap regs.", __func__); 2193 pr_err("Failed to ioremap regs\n");
2226 return -ENOMEM; 2194 return -ENOMEM;
2227 } 2195 }
2228 2196
@@ -2273,9 +2241,7 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
2273 } 2241 }
2274 if (!ugeth->p_tx_bd_ring[j]) { 2242 if (!ugeth->p_tx_bd_ring[j]) {
2275 if (netif_msg_ifup(ugeth)) 2243 if (netif_msg_ifup(ugeth))
2276 ugeth_err 2244 pr_err("Can not allocate memory for Tx bd rings\n");
2277 ("%s: Can not allocate memory for Tx bd rings.",
2278 __func__);
2279 return -ENOMEM; 2245 return -ENOMEM;
2280 } 2246 }
2281 /* Zero unused end of bd ring, according to spec */ 2247 /* Zero unused end of bd ring, according to spec */
@@ -2293,8 +2259,7 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
2293 2259
2294 if (ugeth->tx_skbuff[j] == NULL) { 2260 if (ugeth->tx_skbuff[j] == NULL) {
2295 if (netif_msg_ifup(ugeth)) 2261 if (netif_msg_ifup(ugeth))
2296 ugeth_err("%s: Could not allocate tx_skbuff", 2262 pr_err("Could not allocate tx_skbuff\n");
2297 __func__);
2298 return -ENOMEM; 2263 return -ENOMEM;
2299 } 2264 }
2300 2265
@@ -2353,9 +2318,7 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
2353 } 2318 }
2354 if (!ugeth->p_rx_bd_ring[j]) { 2319 if (!ugeth->p_rx_bd_ring[j]) {
2355 if (netif_msg_ifup(ugeth)) 2320 if (netif_msg_ifup(ugeth))
2356 ugeth_err 2321 pr_err("Can not allocate memory for Rx bd rings\n");
2357 ("%s: Can not allocate memory for Rx bd rings.",
2358 __func__);
2359 return -ENOMEM; 2322 return -ENOMEM;
2360 } 2323 }
2361 } 2324 }
@@ -2369,8 +2332,7 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
2369 2332
2370 if (ugeth->rx_skbuff[j] == NULL) { 2333 if (ugeth->rx_skbuff[j] == NULL) {
2371 if (netif_msg_ifup(ugeth)) 2334 if (netif_msg_ifup(ugeth))
2372 ugeth_err("%s: Could not allocate rx_skbuff", 2335 pr_err("Could not allocate rx_skbuff\n");
2373 __func__);
2374 return -ENOMEM; 2336 return -ENOMEM;
2375 } 2337 }
2376 2338
@@ -2438,8 +2400,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2438 break; 2400 break;
2439 default: 2401 default:
2440 if (netif_msg_ifup(ugeth)) 2402 if (netif_msg_ifup(ugeth))
2441 ugeth_err("%s: Bad number of Rx threads value.", 2403 pr_err("Bad number of Rx threads value\n");
2442 __func__);
2443 return -EINVAL; 2404 return -EINVAL;
2444 break; 2405 break;
2445 } 2406 }
@@ -2462,8 +2423,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2462 break; 2423 break;
2463 default: 2424 default:
2464 if (netif_msg_ifup(ugeth)) 2425 if (netif_msg_ifup(ugeth))
2465 ugeth_err("%s: Bad number of Tx threads value.", 2426 pr_err("Bad number of Tx threads value\n");
2466 __func__);
2467 return -EINVAL; 2427 return -EINVAL;
2468 break; 2428 break;
2469 } 2429 }
@@ -2512,8 +2472,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2512 &ug_regs->ipgifg); 2472 &ug_regs->ipgifg);
2513 if (ret_val != 0) { 2473 if (ret_val != 0) {
2514 if (netif_msg_ifup(ugeth)) 2474 if (netif_msg_ifup(ugeth))
2515 ugeth_err("%s: IPGIFG initialization parameter too large.", 2475 pr_err("IPGIFG initialization parameter too large\n");
2516 __func__);
2517 return ret_val; 2476 return ret_val;
2518 } 2477 }
2519 2478
@@ -2529,8 +2488,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2529 &ug_regs->hafdup); 2488 &ug_regs->hafdup);
2530 if (ret_val != 0) { 2489 if (ret_val != 0) {
2531 if (netif_msg_ifup(ugeth)) 2490 if (netif_msg_ifup(ugeth))
2532 ugeth_err("%s: Half Duplex initialization parameter too large.", 2491 pr_err("Half Duplex initialization parameter too large\n");
2533 __func__);
2534 return ret_val; 2492 return ret_val;
2535 } 2493 }
2536 2494
@@ -2567,9 +2525,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2567 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); 2525 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
2568 if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) { 2526 if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
2569 if (netif_msg_ifup(ugeth)) 2527 if (netif_msg_ifup(ugeth))
2570 ugeth_err 2528 pr_err("Can not allocate DPRAM memory for p_tx_glbl_pram\n");
2571 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
2572 __func__);
2573 return -ENOMEM; 2529 return -ENOMEM;
2574 } 2530 }
2575 ugeth->p_tx_glbl_pram = 2531 ugeth->p_tx_glbl_pram =
@@ -2589,9 +2545,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2589 UCC_GETH_THREAD_DATA_ALIGNMENT); 2545 UCC_GETH_THREAD_DATA_ALIGNMENT);
2590 if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { 2546 if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
2591 if (netif_msg_ifup(ugeth)) 2547 if (netif_msg_ifup(ugeth))
2592 ugeth_err 2548 pr_err("Can not allocate DPRAM memory for p_thread_data_tx\n");
2593 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
2594 __func__);
2595 return -ENOMEM; 2549 return -ENOMEM;
2596 } 2550 }
2597 2551
@@ -2618,9 +2572,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2618 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); 2572 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
2619 if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { 2573 if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
2620 if (netif_msg_ifup(ugeth)) 2574 if (netif_msg_ifup(ugeth))
2621 ugeth_err 2575 pr_err("Can not allocate DPRAM memory for p_send_q_mem_reg\n");
2622 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
2623 __func__);
2624 return -ENOMEM; 2576 return -ENOMEM;
2625 } 2577 }
2626 2578
@@ -2661,9 +2613,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2661 UCC_GETH_SCHEDULER_ALIGNMENT); 2613 UCC_GETH_SCHEDULER_ALIGNMENT);
2662 if (IS_ERR_VALUE(ugeth->scheduler_offset)) { 2614 if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
2663 if (netif_msg_ifup(ugeth)) 2615 if (netif_msg_ifup(ugeth))
2664 ugeth_err 2616 pr_err("Can not allocate DPRAM memory for p_scheduler\n");
2665 ("%s: Can not allocate DPRAM memory for p_scheduler.",
2666 __func__);
2667 return -ENOMEM; 2617 return -ENOMEM;
2668 } 2618 }
2669 2619
@@ -2710,10 +2660,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2710 UCC_GETH_TX_STATISTICS_ALIGNMENT); 2660 UCC_GETH_TX_STATISTICS_ALIGNMENT);
2711 if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { 2661 if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
2712 if (netif_msg_ifup(ugeth)) 2662 if (netif_msg_ifup(ugeth))
2713 ugeth_err 2663 pr_err("Can not allocate DPRAM memory for p_tx_fw_statistics_pram\n");
2714 ("%s: Can not allocate DPRAM memory for"
2715 " p_tx_fw_statistics_pram.",
2716 __func__);
2717 return -ENOMEM; 2664 return -ENOMEM;
2718 } 2665 }
2719 ugeth->p_tx_fw_statistics_pram = 2666 ugeth->p_tx_fw_statistics_pram =
@@ -2750,9 +2697,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2750 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); 2697 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
2751 if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) { 2698 if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
2752 if (netif_msg_ifup(ugeth)) 2699 if (netif_msg_ifup(ugeth))
2753 ugeth_err 2700 pr_err("Can not allocate DPRAM memory for p_rx_glbl_pram\n");
2754 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
2755 __func__);
2756 return -ENOMEM; 2701 return -ENOMEM;
2757 } 2702 }
2758 ugeth->p_rx_glbl_pram = 2703 ugeth->p_rx_glbl_pram =
@@ -2771,9 +2716,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2771 UCC_GETH_THREAD_DATA_ALIGNMENT); 2716 UCC_GETH_THREAD_DATA_ALIGNMENT);
2772 if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { 2717 if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
2773 if (netif_msg_ifup(ugeth)) 2718 if (netif_msg_ifup(ugeth))
2774 ugeth_err 2719 pr_err("Can not allocate DPRAM memory for p_thread_data_rx\n");
2775 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
2776 __func__);
2777 return -ENOMEM; 2720 return -ENOMEM;
2778 } 2721 }
2779 2722
@@ -2794,9 +2737,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2794 UCC_GETH_RX_STATISTICS_ALIGNMENT); 2737 UCC_GETH_RX_STATISTICS_ALIGNMENT);
2795 if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { 2738 if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
2796 if (netif_msg_ifup(ugeth)) 2739 if (netif_msg_ifup(ugeth))
2797 ugeth_err 2740 pr_err("Can not allocate DPRAM memory for p_rx_fw_statistics_pram\n");
2798 ("%s: Can not allocate DPRAM memory for"
2799 " p_rx_fw_statistics_pram.", __func__);
2800 return -ENOMEM; 2741 return -ENOMEM;
2801 } 2742 }
2802 ugeth->p_rx_fw_statistics_pram = 2743 ugeth->p_rx_fw_statistics_pram =
@@ -2816,9 +2757,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2816 + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); 2757 + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
2817 if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { 2758 if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
2818 if (netif_msg_ifup(ugeth)) 2759 if (netif_msg_ifup(ugeth))
2819 ugeth_err 2760 pr_err("Can not allocate DPRAM memory for p_rx_irq_coalescing_tbl\n");
2820 ("%s: Can not allocate DPRAM memory for"
2821 " p_rx_irq_coalescing_tbl.", __func__);
2822 return -ENOMEM; 2761 return -ENOMEM;
2823 } 2762 }
2824 2763
@@ -2884,9 +2823,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2884 UCC_GETH_RX_BD_QUEUES_ALIGNMENT); 2823 UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
2885 if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { 2824 if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
2886 if (netif_msg_ifup(ugeth)) 2825 if (netif_msg_ifup(ugeth))
2887 ugeth_err 2826 pr_err("Can not allocate DPRAM memory for p_rx_bd_qs_tbl\n");
2888 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
2889 __func__);
2890 return -ENOMEM; 2827 return -ENOMEM;
2891 } 2828 }
2892 2829
@@ -2961,8 +2898,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2961 if (ug_info->rxExtendedFiltering) { 2898 if (ug_info->rxExtendedFiltering) {
2962 if (!ug_info->extendedFilteringChainPointer) { 2899 if (!ug_info->extendedFilteringChainPointer) {
2963 if (netif_msg_ifup(ugeth)) 2900 if (netif_msg_ifup(ugeth))
2964 ugeth_err("%s: Null Extended Filtering Chain Pointer.", 2901 pr_err("Null Extended Filtering Chain Pointer\n");
2965 __func__);
2966 return -EINVAL; 2902 return -EINVAL;
2967 } 2903 }
2968 2904
@@ -2973,9 +2909,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2973 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); 2909 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
2974 if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { 2910 if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
2975 if (netif_msg_ifup(ugeth)) 2911 if (netif_msg_ifup(ugeth))
2976 ugeth_err 2912 pr_err("Can not allocate DPRAM memory for p_exf_glbl_param\n");
2977 ("%s: Can not allocate DPRAM memory for"
2978 " p_exf_glbl_param.", __func__);
2979 return -ENOMEM; 2913 return -ENOMEM;
2980 } 2914 }
2981 2915
@@ -3020,9 +2954,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3020 if (!(ugeth->p_init_enet_param_shadow = 2954 if (!(ugeth->p_init_enet_param_shadow =
3021 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) { 2955 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
3022 if (netif_msg_ifup(ugeth)) 2956 if (netif_msg_ifup(ugeth))
3023 ugeth_err 2957 pr_err("Can not allocate memory for p_UccInitEnetParamShadows\n");
3024 ("%s: Can not allocate memory for"
3025 " p_UccInitEnetParamShadows.", __func__);
3026 return -ENOMEM; 2958 return -ENOMEM;
3027 } 2959 }
3028 /* Zero out *p_init_enet_param_shadow */ 2960 /* Zero out *p_init_enet_param_shadow */
@@ -3055,8 +2987,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3055 (ug_info->largestexternallookupkeysize != 2987 (ug_info->largestexternallookupkeysize !=
3056 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { 2988 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3057 if (netif_msg_ifup(ugeth)) 2989 if (netif_msg_ifup(ugeth))
3058 ugeth_err("%s: Invalid largest External Lookup Key Size.", 2990 pr_err("Invalid largest External Lookup Key Size\n");
3059 __func__);
3060 return -EINVAL; 2991 return -EINVAL;
3061 } 2992 }
3062 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = 2993 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
@@ -3081,8 +3012,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3081 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, 3012 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
3082 ug_info->riscRx, 1)) != 0) { 3013 ug_info->riscRx, 1)) != 0) {
3083 if (netif_msg_ifup(ugeth)) 3014 if (netif_msg_ifup(ugeth))
3084 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3015 pr_err("Can not fill p_init_enet_param_shadow\n");
3085 __func__);
3086 return ret_val; 3016 return ret_val;
3087 } 3017 }
3088 3018
@@ -3096,8 +3026,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3096 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, 3026 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
3097 ug_info->riscTx, 0)) != 0) { 3027 ug_info->riscTx, 0)) != 0) {
3098 if (netif_msg_ifup(ugeth)) 3028 if (netif_msg_ifup(ugeth))
3099 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3029 pr_err("Can not fill p_init_enet_param_shadow\n");
3100 __func__);
3101 return ret_val; 3030 return ret_val;
3102 } 3031 }
3103 3032
@@ -3105,8 +3034,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3105 for (i = 0; i < ug_info->numQueuesRx; i++) { 3034 for (i = 0; i < ug_info->numQueuesRx; i++) {
3106 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { 3035 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3107 if (netif_msg_ifup(ugeth)) 3036 if (netif_msg_ifup(ugeth))
3108 ugeth_err("%s: Can not fill Rx bds with buffers.", 3037 pr_err("Can not fill Rx bds with buffers\n");
3109 __func__);
3110 return ret_val; 3038 return ret_val;
3111 } 3039 }
3112 } 3040 }
@@ -3115,9 +3043,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3115 init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); 3043 init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
3116 if (IS_ERR_VALUE(init_enet_pram_offset)) { 3044 if (IS_ERR_VALUE(init_enet_pram_offset)) {
3117 if (netif_msg_ifup(ugeth)) 3045 if (netif_msg_ifup(ugeth))
3118 ugeth_err 3046 pr_err("Can not allocate DPRAM memory for p_init_enet_pram\n");
3119 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3120 __func__);
3121 return -ENOMEM; 3047 return -ENOMEM;
3122 } 3048 }
3123 p_init_enet_pram = 3049 p_init_enet_pram =
@@ -3266,8 +3192,8 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3266 (!(bd_status & (R_F | R_L))) || 3192 (!(bd_status & (R_F | R_L))) ||
3267 (bd_status & R_ERRORS_FATAL)) { 3193 (bd_status & R_ERRORS_FATAL)) {
3268 if (netif_msg_rx_err(ugeth)) 3194 if (netif_msg_rx_err(ugeth))
3269 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", 3195 pr_err("%d: ERROR!!! skb - 0x%08x\n",
3270 __func__, __LINE__, (u32) skb); 3196 __LINE__, (u32)skb);
3271 dev_kfree_skb(skb); 3197 dev_kfree_skb(skb);
3272 3198
3273 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; 3199 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
@@ -3290,7 +3216,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3290 skb = get_new_skb(ugeth, bd); 3216 skb = get_new_skb(ugeth, bd);
3291 if (!skb) { 3217 if (!skb) {
3292 if (netif_msg_rx_err(ugeth)) 3218 if (netif_msg_rx_err(ugeth))
3293 ugeth_warn("%s: No Rx Data Buffer", __func__); 3219 pr_warn("No Rx Data Buffer\n");
3294 dev->stats.rx_dropped++; 3220 dev->stats.rx_dropped++;
3295 break; 3221 break;
3296 } 3222 }
@@ -3481,25 +3407,19 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
3481 3407
3482 err = ucc_struct_init(ugeth); 3408 err = ucc_struct_init(ugeth);
3483 if (err) { 3409 if (err) {
3484 if (netif_msg_ifup(ugeth)) 3410 netif_err(ugeth, ifup, dev, "Cannot configure internal struct, aborting\n");
3485 ugeth_err("%s: Cannot configure internal struct, "
3486 "aborting.", dev->name);
3487 goto err; 3411 goto err;
3488 } 3412 }
3489 3413
3490 err = ucc_geth_startup(ugeth); 3414 err = ucc_geth_startup(ugeth);
3491 if (err) { 3415 if (err) {
3492 if (netif_msg_ifup(ugeth)) 3416 netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
3493 ugeth_err("%s: Cannot configure net device, aborting.",
3494 dev->name);
3495 goto err; 3417 goto err;
3496 } 3418 }
3497 3419
3498 err = adjust_enet_interface(ugeth); 3420 err = adjust_enet_interface(ugeth);
3499 if (err) { 3421 if (err) {
3500 if (netif_msg_ifup(ugeth)) 3422 netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
3501 ugeth_err("%s: Cannot configure net device, aborting.",
3502 dev->name);
3503 goto err; 3423 goto err;
3504 } 3424 }
3505 3425
@@ -3516,8 +3436,7 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
3516 3436
3517 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 3437 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
3518 if (err) { 3438 if (err) {
3519 if (netif_msg_ifup(ugeth)) 3439 netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n");
3520 ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
3521 goto err; 3440 goto err;
3522 } 3441 }
3523 3442
@@ -3538,35 +3457,27 @@ static int ucc_geth_open(struct net_device *dev)
3538 3457
3539 /* Test station address */ 3458 /* Test station address */
3540 if (dev->dev_addr[0] & ENET_GROUP_ADDR) { 3459 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
3541 if (netif_msg_ifup(ugeth)) 3460 netif_err(ugeth, ifup, dev,
3542 ugeth_err("%s: Multicast address used for station " 3461 "Multicast address used for station address - is this what you wanted?\n");
3543 "address - is this what you wanted?",
3544 __func__);
3545 return -EINVAL; 3462 return -EINVAL;
3546 } 3463 }
3547 3464
3548 err = init_phy(dev); 3465 err = init_phy(dev);
3549 if (err) { 3466 if (err) {
3550 if (netif_msg_ifup(ugeth)) 3467 netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n");
3551 ugeth_err("%s: Cannot initialize PHY, aborting.",
3552 dev->name);
3553 return err; 3468 return err;
3554 } 3469 }
3555 3470
3556 err = ucc_geth_init_mac(ugeth); 3471 err = ucc_geth_init_mac(ugeth);
3557 if (err) { 3472 if (err) {
3558 if (netif_msg_ifup(ugeth)) 3473 netif_err(ugeth, ifup, dev, "Cannot initialize MAC, aborting\n");
3559 ugeth_err("%s: Cannot initialize MAC, aborting.",
3560 dev->name);
3561 goto err; 3474 goto err;
3562 } 3475 }
3563 3476
3564 err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 3477 err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler,
3565 0, "UCC Geth", dev); 3478 0, "UCC Geth", dev);
3566 if (err) { 3479 if (err) {
3567 if (netif_msg_ifup(ugeth)) 3480 netif_err(ugeth, ifup, dev, "Cannot get IRQ for net device, aborting\n");
3568 ugeth_err("%s: Cannot get IRQ for net device, aborting.",
3569 dev->name);
3570 goto err; 3481 goto err;
3571 } 3482 }
3572 3483
@@ -3704,8 +3615,7 @@ static int ucc_geth_resume(struct platform_device *ofdev)
3704 3615
3705 err = ucc_geth_init_mac(ugeth); 3616 err = ucc_geth_init_mac(ugeth);
3706 if (err) { 3617 if (err) {
3707 ugeth_err("%s: Cannot initialize MAC, aborting.", 3618 netdev_err(ndev, "Cannot initialize MAC, aborting\n");
3708 ndev->name);
3709 return err; 3619 return err;
3710 } 3620 }
3711 } 3621 }
@@ -3825,8 +3735,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3825 ug_info = &ugeth_info[ucc_num]; 3735 ug_info = &ugeth_info[ucc_num];
3826 if (ug_info == NULL) { 3736 if (ug_info == NULL) {
3827 if (netif_msg_probe(&debug)) 3737 if (netif_msg_probe(&debug))
3828 ugeth_err("%s: [%d] Missing additional data!", 3738 pr_err("[%d] Missing additional data!\n", ucc_num);
3829 __func__, ucc_num);
3830 return -ENODEV; 3739 return -ENODEV;
3831 } 3740 }
3832 3741
@@ -3837,8 +3746,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3837 ug_info->uf_info.rx_clock = qe_clock_source(sprop); 3746 ug_info->uf_info.rx_clock = qe_clock_source(sprop);
3838 if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) || 3747 if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
3839 (ug_info->uf_info.rx_clock > QE_CLK24)) { 3748 (ug_info->uf_info.rx_clock > QE_CLK24)) {
3840 printk(KERN_ERR 3749 pr_err("invalid rx-clock-name property\n");
3841 "ucc_geth: invalid rx-clock-name property\n");
3842 return -EINVAL; 3750 return -EINVAL;
3843 } 3751 }
3844 } else { 3752 } else {
@@ -3846,13 +3754,11 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3846 if (!prop) { 3754 if (!prop) {
3847 /* If both rx-clock-name and rx-clock are missing, 3755 /* If both rx-clock-name and rx-clock are missing,
3848 we want to tell people to use rx-clock-name. */ 3756 we want to tell people to use rx-clock-name. */
3849 printk(KERN_ERR 3757 pr_err("missing rx-clock-name property\n");
3850 "ucc_geth: missing rx-clock-name property\n");
3851 return -EINVAL; 3758 return -EINVAL;
3852 } 3759 }
3853 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { 3760 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
3854 printk(KERN_ERR 3761 pr_err("invalid rx-clock propperty\n");
3855 "ucc_geth: invalid rx-clock propperty\n");
3856 return -EINVAL; 3762 return -EINVAL;
3857 } 3763 }
3858 ug_info->uf_info.rx_clock = *prop; 3764 ug_info->uf_info.rx_clock = *prop;
@@ -3863,20 +3769,17 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3863 ug_info->uf_info.tx_clock = qe_clock_source(sprop); 3769 ug_info->uf_info.tx_clock = qe_clock_source(sprop);
3864 if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) || 3770 if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
3865 (ug_info->uf_info.tx_clock > QE_CLK24)) { 3771 (ug_info->uf_info.tx_clock > QE_CLK24)) {
3866 printk(KERN_ERR 3772 pr_err("invalid tx-clock-name property\n");
3867 "ucc_geth: invalid tx-clock-name property\n");
3868 return -EINVAL; 3773 return -EINVAL;
3869 } 3774 }
3870 } else { 3775 } else {
3871 prop = of_get_property(np, "tx-clock", NULL); 3776 prop = of_get_property(np, "tx-clock", NULL);
3872 if (!prop) { 3777 if (!prop) {
3873 printk(KERN_ERR 3778 pr_err("missing tx-clock-name property\n");
3874 "ucc_geth: missing tx-clock-name property\n");
3875 return -EINVAL; 3779 return -EINVAL;
3876 } 3780 }
3877 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { 3781 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
3878 printk(KERN_ERR 3782 pr_err("invalid tx-clock property\n");
3879 "ucc_geth: invalid tx-clock property\n");
3880 return -EINVAL; 3783 return -EINVAL;
3881 } 3784 }
3882 ug_info->uf_info.tx_clock = *prop; 3785 ug_info->uf_info.tx_clock = *prop;
@@ -3949,7 +3852,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3949 } 3852 }
3950 3853
3951 if (netif_msg_probe(&debug)) 3854 if (netif_msg_probe(&debug))
3952 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n", 3855 pr_info("UCC%1d at 0x%8x (irq = %d)\n",
3953 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, 3856 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
3954 ug_info->uf_info.irq); 3857 ug_info->uf_info.irq);
3955 3858
@@ -3988,8 +3891,8 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3988 err = register_netdev(dev); 3891 err = register_netdev(dev);
3989 if (err) { 3892 if (err) {
3990 if (netif_msg_probe(ugeth)) 3893 if (netif_msg_probe(ugeth))
3991 ugeth_err("%s: Cannot register net device, aborting.", 3894 pr_err("%s: Cannot register net device, aborting\n",
3992 dev->name); 3895 dev->name);
3993 free_netdev(dev); 3896 free_netdev(dev);
3994 return err; 3897 return err;
3995 } 3898 }
@@ -4047,7 +3950,7 @@ static int __init ucc_geth_init(void)
4047 int i, ret; 3950 int i, ret;
4048 3951
4049 if (netif_msg_drv(&debug)) 3952 if (netif_msg_drv(&debug))
4050 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); 3953 pr_info(DRV_DESC "\n");
4051 for (i = 0; i < 8; i++) 3954 for (i = 0; i < 8; i++)
4052 memcpy(&(ugeth_info[i]), &ugeth_primary_info, 3955 memcpy(&(ugeth_info[i]), &ugeth_primary_info,
4053 sizeof(ugeth_primary_info)); 3956 sizeof(ugeth_primary_info));
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 1ebf7128ec04..e79aaf9ae52a 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -38,7 +38,7 @@
38 38
39#include "ucc_geth.h" 39#include "ucc_geth.h"
40 40
41static char hw_stat_gstrings[][ETH_GSTRING_LEN] = { 41static const char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
42 "tx-64-frames", 42 "tx-64-frames",
43 "tx-65-127-frames", 43 "tx-65-127-frames",
44 "tx-128-255-frames", 44 "tx-128-255-frames",
@@ -59,7 +59,7 @@ static char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
59 "rx-dropped-frames", 59 "rx-dropped-frames",
60}; 60};
61 61
62static char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { 62static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
63 "tx-single-collision", 63 "tx-single-collision",
64 "tx-multiple-collision", 64 "tx-multiple-collision",
65 "tx-late-collsion", 65 "tx-late-collsion",
@@ -74,7 +74,7 @@ static char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
74 "tx-jumbo-frames", 74 "tx-jumbo-frames",
75}; 75};
76 76
77static char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { 77static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
78 "rx-crc-errors", 78 "rx-crc-errors",
79 "rx-alignment-errors", 79 "rx-alignment-errors",
80 "rx-in-range-length-errors", 80 "rx-in-range-length-errors",
@@ -160,8 +160,7 @@ uec_set_pauseparam(struct net_device *netdev,
160 if (ugeth->phydev->autoneg) { 160 if (ugeth->phydev->autoneg) {
161 if (netif_running(netdev)) { 161 if (netif_running(netdev)) {
162 /* FIXME: automatically restart */ 162 /* FIXME: automatically restart */
163 printk(KERN_INFO 163 netdev_info(netdev, "Please re-open the interface\n");
164 "Please re-open the interface.\n");
165 } 164 }
166 } else { 165 } else {
167 struct ucc_geth_info *ug_info = ugeth->ug_info; 166 struct ucc_geth_info *ug_info = ugeth->ug_info;
@@ -240,18 +239,18 @@ uec_set_ringparam(struct net_device *netdev,
240 int queue = 0, ret = 0; 239 int queue = 0, ret = 0;
241 240
242 if (ring->rx_pending < UCC_GETH_RX_BD_RING_SIZE_MIN) { 241 if (ring->rx_pending < UCC_GETH_RX_BD_RING_SIZE_MIN) {
243 printk("%s: RxBD ring size must be no smaller than %d.\n", 242 netdev_info(netdev, "RxBD ring size must be no smaller than %d\n",
244 netdev->name, UCC_GETH_RX_BD_RING_SIZE_MIN); 243 UCC_GETH_RX_BD_RING_SIZE_MIN);
245 return -EINVAL; 244 return -EINVAL;
246 } 245 }
247 if (ring->rx_pending % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT) { 246 if (ring->rx_pending % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT) {
248 printk("%s: RxBD ring size must be multiple of %d.\n", 247 netdev_info(netdev, "RxBD ring size must be multiple of %d\n",
249 netdev->name, UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT); 248 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT);
250 return -EINVAL; 249 return -EINVAL;
251 } 250 }
252 if (ring->tx_pending < UCC_GETH_TX_BD_RING_SIZE_MIN) { 251 if (ring->tx_pending < UCC_GETH_TX_BD_RING_SIZE_MIN) {
253 printk("%s: TxBD ring size must be no smaller than %d.\n", 252 netdev_info(netdev, "TxBD ring size must be no smaller than %d\n",
254 netdev->name, UCC_GETH_TX_BD_RING_SIZE_MIN); 253 UCC_GETH_TX_BD_RING_SIZE_MIN);
255 return -EINVAL; 254 return -EINVAL;
256 } 255 }
257 256
@@ -260,8 +259,7 @@ uec_set_ringparam(struct net_device *netdev,
260 259
261 if (netif_running(netdev)) { 260 if (netif_running(netdev)) {
262 /* FIXME: restart automatically */ 261 /* FIXME: restart automatically */
263 printk(KERN_INFO 262 netdev_info(netdev, "Please re-open the interface\n");
264 "Please re-open the interface.\n");
265 } 263 }
266 264
267 return ret; 265 return ret;
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index ab98b77df309..ef46b58cb4e9 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -991,8 +991,6 @@ static void fjn_rx(struct net_device *dev)
991 } 991 }
992 skb = netdev_alloc_skb(dev, pkt_len + 2); 992 skb = netdev_alloc_skb(dev, pkt_len + 2);
993 if (skb == NULL) { 993 if (skb == NULL) {
994 netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n",
995 pkt_len);
996 outb(F_SKP_PKT, ioaddr + RX_SKIP); 994 outb(F_SKP_PKT, ioaddr + RX_SKIP);
997 dev->stats.rx_dropped++; 995 dev->stats.rx_dropped++;
998 break; 996 break;
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 1c54e229e3cc..e38816145395 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -798,16 +798,14 @@ static inline int i596_rx(struct net_device *dev)
798#ifdef __mc68000__ 798#ifdef __mc68000__
799 cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ); 799 cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ);
800#endif 800#endif
801 } 801 } else {
802 else
803 skb = netdev_alloc_skb(dev, pkt_len + 2); 802 skb = netdev_alloc_skb(dev, pkt_len + 2);
803 }
804memory_squeeze: 804memory_squeeze:
805 if (skb == NULL) { 805 if (skb == NULL) {
806 /* XXX tulip.c can defer packets here!! */ 806 /* XXX tulip.c can defer packets here!! */
807 printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
808 dev->stats.rx_dropped++; 807 dev->stats.rx_dropped++;
809 } 808 } else {
810 else {
811 if (!rx_in_place) { 809 if (!rx_in_place) {
812 /* 16 byte align the data fields */ 810 /* 16 byte align the data fields */
813 skb_reserve(skb, 2); 811 skb_reserve(skb, 2);
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index f045ea4dc514..d653bac4cfc4 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -715,14 +715,12 @@ static inline int i596_rx(struct net_device *dev)
715 rbd->v_data = newskb->data; 715 rbd->v_data = newskb->data;
716 rbd->b_data = SWAP32(dma_addr); 716 rbd->b_data = SWAP32(dma_addr);
717 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); 717 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
718 } else 718 } else {
719 skb = netdev_alloc_skb_ip_align(dev, pkt_len); 719 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
720 }
720memory_squeeze: 721memory_squeeze:
721 if (skb == NULL) { 722 if (skb == NULL) {
722 /* XXX tulip.c can defer packets here!! */ 723 /* XXX tulip.c can defer packets here!! */
723 printk(KERN_ERR
724 "%s: i596_rx Memory squeeze, dropping packet.\n",
725 dev->name);
726 dev->stats.rx_dropped++; 724 dev->stats.rx_dropped++;
727 } else { 725 } else {
728 if (!rx_in_place) { 726 if (!rx_in_place) {
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 328f47c92e26..90ea0b1673ca 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -402,7 +402,6 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
402 skb_arr_rq1[index] = netdev_alloc_skb(dev, 402 skb_arr_rq1[index] = netdev_alloc_skb(dev,
403 EHEA_L_PKT_SIZE); 403 EHEA_L_PKT_SIZE);
404 if (!skb_arr_rq1[index]) { 404 if (!skb_arr_rq1[index]) {
405 netdev_info(dev, "Unable to allocate enough skb in the array\n");
406 pr->rq1_skba.os_skbs = fill_wqes - i; 405 pr->rq1_skba.os_skbs = fill_wqes - i;
407 break; 406 break;
408 } 407 }
@@ -432,10 +431,8 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
432 431
433 for (i = 0; i < nr_rq1a; i++) { 432 for (i = 0; i < nr_rq1a; i++) {
434 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); 433 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
435 if (!skb_arr_rq1[i]) { 434 if (!skb_arr_rq1[i])
436 netdev_info(dev, "Not enough memory to allocate skb array\n");
437 break; 435 break;
438 }
439 } 436 }
440 /* Ring doorbell */ 437 /* Ring doorbell */
441 ehea_update_rq1a(pr->qp, i - 1); 438 ehea_update_rq1a(pr->qp, i - 1);
@@ -695,10 +692,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
695 692
696 skb = netdev_alloc_skb(dev, 693 skb = netdev_alloc_skb(dev,
697 EHEA_L_PKT_SIZE); 694 EHEA_L_PKT_SIZE);
698 if (!skb) { 695 if (!skb)
699 netdev_err(dev, "Not enough memory to allocate skb\n");
700 break; 696 break;
701 }
702 } 697 }
703 skb_copy_to_linear_data(skb, ((char *)cqe) + 64, 698 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
704 cqe->num_bytes_transfered - 4); 699 cqe->num_bytes_transfered - 4);
@@ -730,7 +725,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
730 processed_bytes += skb->len; 725 processed_bytes += skb->len;
731 726
732 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) 727 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
733 __vlan_hwaccel_put_tag(skb, cqe->vlan_tag); 728 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
729 cqe->vlan_tag);
734 730
735 napi_gro_receive(&pr->napi, skb); 731 napi_gro_receive(&pr->napi, skb);
736 } else { 732 } else {
@@ -2115,7 +2111,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2115 return NETDEV_TX_OK; 2111 return NETDEV_TX_OK;
2116} 2112}
2117 2113
2118static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 2114static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
2119{ 2115{
2120 struct ehea_port *port = netdev_priv(dev); 2116 struct ehea_port *port = netdev_priv(dev);
2121 struct ehea_adapter *adapter = port->adapter; 2117 struct ehea_adapter *adapter = port->adapter;
@@ -2153,7 +2149,7 @@ out:
2153 return err; 2149 return err;
2154} 2150}
2155 2151
2156static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 2152static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
2157{ 2153{
2158 struct ehea_port *port = netdev_priv(dev); 2154 struct ehea_port *port = netdev_priv(dev);
2159 struct ehea_adapter *adapter = port->adapter; 2155 struct ehea_adapter *adapter = port->adapter;
@@ -3025,12 +3021,12 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3025 dev->netdev_ops = &ehea_netdev_ops; 3021 dev->netdev_ops = &ehea_netdev_ops;
3026 ehea_set_ethtool_ops(dev); 3022 ehea_set_ethtool_ops(dev);
3027 3023
3028 dev->hw_features = NETIF_F_SG | NETIF_F_TSO 3024 dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
3029 | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX; 3025 NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
3030 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO 3026 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO |
3031 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX 3027 NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
3032 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER 3028 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3033 | NETIF_F_RXCSUM; 3029 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
3034 dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA | 3030 dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
3035 NETIF_F_IP_CSUM; 3031 NETIF_F_IP_CSUM;
3036 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; 3032 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 1f7ecf57181e..610ed223d1db 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -637,17 +637,12 @@ static int mal_probe(struct platform_device *ofdev)
637 bd_size = sizeof(struct mal_descriptor) * 637 bd_size = sizeof(struct mal_descriptor) *
638 (NUM_TX_BUFF * mal->num_tx_chans + 638 (NUM_TX_BUFF * mal->num_tx_chans +
639 NUM_RX_BUFF * mal->num_rx_chans); 639 NUM_RX_BUFF * mal->num_rx_chans);
640 mal->bd_virt = 640 mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
641 dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, 641 GFP_KERNEL | __GFP_ZERO);
642 GFP_KERNEL);
643 if (mal->bd_virt == NULL) { 642 if (mal->bd_virt == NULL) {
644 printk(KERN_ERR
645 "mal%d: out of memory allocating RX/TX descriptors!\n",
646 index);
647 err = -ENOMEM; 643 err = -ENOMEM;
648 goto fail_unmap; 644 goto fail_unmap;
649 } 645 }
650 memset(mal->bd_virt, 0, bd_size);
651 646
652 for (i = 0; i < mal->num_tx_chans; ++i) 647 for (i = 0; i < mal->num_tx_chans; ++i)
653 set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + 648 set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c859771a9902..302d59401065 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -556,11 +556,9 @@ static int ibmveth_open(struct net_device *netdev)
556 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * 556 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
557 rxq_entries; 557 rxq_entries;
558 adapter->rx_queue.queue_addr = 558 adapter->rx_queue.queue_addr =
559 dma_alloc_coherent(dev, adapter->rx_queue.queue_len, 559 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
560 &adapter->rx_queue.queue_dma, GFP_KERNEL); 560 &adapter->rx_queue.queue_dma, GFP_KERNEL);
561
562 if (!adapter->rx_queue.queue_addr) { 561 if (!adapter->rx_queue.queue_addr) {
563 netdev_err(netdev, "unable to allocate rx queue pages\n");
564 rc = -ENOMEM; 562 rc = -ENOMEM;
565 goto err_out; 563 goto err_out;
566 } 564 }
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index ffd287196bf8..82a967c95598 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1020,12 +1020,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1020 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1020 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1021 txdr->size = ALIGN(txdr->size, 4096); 1021 txdr->size = ALIGN(txdr->size, 4096);
1022 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 1022 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1023 GFP_KERNEL); 1023 GFP_KERNEL | __GFP_ZERO);
1024 if (!txdr->desc) { 1024 if (!txdr->desc) {
1025 ret_val = 2; 1025 ret_val = 2;
1026 goto err_nomem; 1026 goto err_nomem;
1027 } 1027 }
1028 memset(txdr->desc, 0, txdr->size);
1029 txdr->next_to_use = txdr->next_to_clean = 0; 1028 txdr->next_to_use = txdr->next_to_clean = 0;
1030 1029
1031 ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); 1030 ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF));
@@ -1079,12 +1078,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1079 1078
1080 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1079 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1081 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1080 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1082 GFP_KERNEL); 1081 GFP_KERNEL | __GFP_ZERO);
1083 if (!rxdr->desc) { 1082 if (!rxdr->desc) {
1084 ret_val = 6; 1083 ret_val = 6;
1085 goto err_nomem; 1084 goto err_nomem;
1086 } 1085 }
1087 memset(rxdr->desc, 0, rxdr->size);
1088 rxdr->next_to_use = rxdr->next_to_clean = 0; 1086 rxdr->next_to_use = rxdr->next_to_clean = 0;
1089 1087
1090 rctl = er32(RCTL); 1088 rctl = er32(RCTL);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 8502c625dbef..59ad007dd5aa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -166,8 +166,10 @@ static void e1000_vlan_mode(struct net_device *netdev,
166 netdev_features_t features); 166 netdev_features_t features);
167static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 167static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
168 bool filter_on); 168 bool filter_on);
169static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); 169static int e1000_vlan_rx_add_vid(struct net_device *netdev,
170static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); 170 __be16 proto, u16 vid);
171static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
172 __be16 proto, u16 vid);
171static void e1000_restore_vlan(struct e1000_adapter *adapter); 173static void e1000_restore_vlan(struct e1000_adapter *adapter);
172 174
173#ifdef CONFIG_PM 175#ifdef CONFIG_PM
@@ -333,7 +335,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
333 if (!test_bit(vid, adapter->active_vlans)) { 335 if (!test_bit(vid, adapter->active_vlans)) {
334 if (hw->mng_cookie.status & 336 if (hw->mng_cookie.status &
335 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 337 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
336 e1000_vlan_rx_add_vid(netdev, vid); 338 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
337 adapter->mng_vlan_id = vid; 339 adapter->mng_vlan_id = vid;
338 } else { 340 } else {
339 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 341 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -341,7 +343,8 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
341 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && 343 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
342 (vid != old_vid) && 344 (vid != old_vid) &&
343 !test_bit(old_vid, adapter->active_vlans)) 345 !test_bit(old_vid, adapter->active_vlans))
344 e1000_vlan_rx_kill_vid(netdev, old_vid); 346 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
347 old_vid);
345 } else { 348 } else {
346 adapter->mng_vlan_id = vid; 349 adapter->mng_vlan_id = vid;
347 } 350 }
@@ -809,10 +812,10 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
809 /* Since there is no support for separate Rx/Tx vlan accel 812 /* Since there is no support for separate Rx/Tx vlan accel
810 * enable/disable make sure Tx flag is always in same state as Rx. 813 * enable/disable make sure Tx flag is always in same state as Rx.
811 */ 814 */
812 if (features & NETIF_F_HW_VLAN_RX) 815 if (features & NETIF_F_HW_VLAN_CTAG_RX)
813 features |= NETIF_F_HW_VLAN_TX; 816 features |= NETIF_F_HW_VLAN_CTAG_TX;
814 else 817 else
815 features &= ~NETIF_F_HW_VLAN_TX; 818 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
816 819
817 return features; 820 return features;
818} 821}
@@ -823,7 +826,7 @@ static int e1000_set_features(struct net_device *netdev,
823 struct e1000_adapter *adapter = netdev_priv(netdev); 826 struct e1000_adapter *adapter = netdev_priv(netdev);
824 netdev_features_t changed = features ^ netdev->features; 827 netdev_features_t changed = features ^ netdev->features;
825 828
826 if (changed & NETIF_F_HW_VLAN_RX) 829 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
827 e1000_vlan_mode(netdev, features); 830 e1000_vlan_mode(netdev, features);
828 831
829 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) 832 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
@@ -1058,9 +1061,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1058 if (hw->mac_type >= e1000_82543) { 1061 if (hw->mac_type >= e1000_82543) {
1059 netdev->hw_features = NETIF_F_SG | 1062 netdev->hw_features = NETIF_F_SG |
1060 NETIF_F_HW_CSUM | 1063 NETIF_F_HW_CSUM |
1061 NETIF_F_HW_VLAN_RX; 1064 NETIF_F_HW_VLAN_CTAG_RX;
1062 netdev->features = NETIF_F_HW_VLAN_TX | 1065 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1063 NETIF_F_HW_VLAN_FILTER; 1066 NETIF_F_HW_VLAN_CTAG_FILTER;
1064 } 1067 }
1065 1068
1066 if ((hw->mac_type >= e1000_82544) && 1069 if ((hw->mac_type >= e1000_82544) &&
@@ -1457,7 +1460,8 @@ static int e1000_close(struct net_device *netdev)
1457 if ((hw->mng_cookie.status & 1460 if ((hw->mng_cookie.status &
1458 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1461 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1459 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { 1462 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1460 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1463 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1464 adapter->mng_vlan_id);
1461 } 1465 }
1462 1466
1463 return 0; 1467 return 0;
@@ -1516,8 +1520,6 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1516 if (!txdr->desc) { 1520 if (!txdr->desc) {
1517setup_tx_desc_die: 1521setup_tx_desc_die:
1518 vfree(txdr->buffer_info); 1522 vfree(txdr->buffer_info);
1519 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1520 "ring\n");
1521 return -ENOMEM; 1523 return -ENOMEM;
1522 } 1524 }
1523 1525
@@ -1707,10 +1709,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1707 1709
1708 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1710 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1709 GFP_KERNEL); 1711 GFP_KERNEL);
1710
1711 if (!rxdr->desc) { 1712 if (!rxdr->desc) {
1712 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1713 "ring\n");
1714setup_rx_desc_die: 1713setup_rx_desc_die:
1715 vfree(rxdr->buffer_info); 1714 vfree(rxdr->buffer_info);
1716 return -ENOMEM; 1715 return -ENOMEM;
@@ -1729,8 +1728,6 @@ setup_rx_desc_die:
1729 if (!rxdr->desc) { 1728 if (!rxdr->desc) {
1730 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1729 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1731 olddma); 1730 olddma);
1732 e_err(probe, "Unable to allocate memory for the Rx "
1733 "descriptor ring\n");
1734 goto setup_rx_desc_die; 1731 goto setup_rx_desc_die;
1735 } 1732 }
1736 1733
@@ -4006,7 +4003,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4006 if (status & E1000_RXD_STAT_VP) { 4003 if (status & E1000_RXD_STAT_VP) {
4007 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 4004 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4008 4005
4009 __vlan_hwaccel_put_tag(skb, vid); 4006 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4010 } 4007 }
4011 napi_gro_receive(&adapter->napi, skb); 4008 napi_gro_receive(&adapter->napi, skb);
4012} 4009}
@@ -4792,7 +4789,7 @@ static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4792 u32 ctrl; 4789 u32 ctrl;
4793 4790
4794 ctrl = er32(CTRL); 4791 ctrl = er32(CTRL);
4795 if (features & NETIF_F_HW_VLAN_RX) { 4792 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4796 /* enable VLAN tag insert/strip */ 4793 /* enable VLAN tag insert/strip */
4797 ctrl |= E1000_CTRL_VME; 4794 ctrl |= E1000_CTRL_VME;
4798 } else { 4795 } else {
@@ -4844,7 +4841,8 @@ static void e1000_vlan_mode(struct net_device *netdev,
4844 e1000_irq_enable(adapter); 4841 e1000_irq_enable(adapter);
4845} 4842}
4846 4843
4847static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 4844static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4845 __be16 proto, u16 vid)
4848{ 4846{
4849 struct e1000_adapter *adapter = netdev_priv(netdev); 4847 struct e1000_adapter *adapter = netdev_priv(netdev);
4850 struct e1000_hw *hw = &adapter->hw; 4848 struct e1000_hw *hw = &adapter->hw;
@@ -4869,7 +4867,8 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4869 return 0; 4867 return 0;
4870} 4868}
4871 4869
4872static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 4870static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4871 __be16 proto, u16 vid)
4873{ 4872{
4874 struct e1000_adapter *adapter = netdev_priv(netdev); 4873 struct e1000_adapter *adapter = netdev_priv(netdev);
4875 struct e1000_hw *hw = &adapter->hw; 4874 struct e1000_hw *hw = &adapter->hw;
@@ -4903,7 +4902,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
4903 4902
4904 e1000_vlan_filter_on_off(adapter, true); 4903 e1000_vlan_filter_on_off(adapter, true);
4905 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 4904 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4906 e1000_vlan_rx_add_vid(adapter->netdev, vid); 4905 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4907} 4906}
4908 4907
4909int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) 4908int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index e0991388664c..b71c8502a2b3 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -37,7 +37,9 @@
37 * "index + 5". 37 * "index + 5".
38 */ 38 */
39static const u16 e1000_gg82563_cable_length_table[] = { 39static const u16 e1000_gg82563_cable_length_table[] = {
40 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; 40 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF
41};
42
41#define GG82563_CABLE_LENGTH_TABLE_SIZE \ 43#define GG82563_CABLE_LENGTH_TABLE_SIZE \
42 ARRAY_SIZE(e1000_gg82563_cable_length_table) 44 ARRAY_SIZE(e1000_gg82563_cable_length_table)
43 45
@@ -116,7 +118,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
116 nvm->type = e1000_nvm_eeprom_spi; 118 nvm->type = e1000_nvm_eeprom_spi;
117 119
118 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 120 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
119 E1000_EECD_SIZE_EX_SHIFT); 121 E1000_EECD_SIZE_EX_SHIFT);
120 122
121 /* Added to a constant, "size" becomes the left-shift value 123 /* Added to a constant, "size" becomes the left-shift value
122 * for setting word_size. 124 * for setting word_size.
@@ -393,7 +395,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
393 * before the device has completed the "Page Select" MDI 395 * before the device has completed the "Page Select" MDI
394 * transaction. So we wait 200us after each MDI command... 396 * transaction. So we wait 200us after each MDI command...
395 */ 397 */
396 udelay(200); 398 usleep_range(200, 400);
397 399
398 /* ...and verify the command was successful. */ 400 /* ...and verify the command was successful. */
399 ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); 401 ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
@@ -403,17 +405,17 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
403 return -E1000_ERR_PHY; 405 return -E1000_ERR_PHY;
404 } 406 }
405 407
406 udelay(200); 408 usleep_range(200, 400);
407 409
408 ret_val = e1000e_read_phy_reg_mdic(hw, 410 ret_val = e1000e_read_phy_reg_mdic(hw,
409 MAX_PHY_REG_ADDRESS & offset, 411 MAX_PHY_REG_ADDRESS & offset,
410 data); 412 data);
411 413
412 udelay(200); 414 usleep_range(200, 400);
413 } else { 415 } else {
414 ret_val = e1000e_read_phy_reg_mdic(hw, 416 ret_val = e1000e_read_phy_reg_mdic(hw,
415 MAX_PHY_REG_ADDRESS & offset, 417 MAX_PHY_REG_ADDRESS & offset,
416 data); 418 data);
417 } 419 }
418 420
419 e1000_release_phy_80003es2lan(hw); 421 e1000_release_phy_80003es2lan(hw);
@@ -462,7 +464,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
462 * before the device has completed the "Page Select" MDI 464 * before the device has completed the "Page Select" MDI
463 * transaction. So we wait 200us after each MDI command... 465 * transaction. So we wait 200us after each MDI command...
464 */ 466 */
465 udelay(200); 467 usleep_range(200, 400);
466 468
467 /* ...and verify the command was successful. */ 469 /* ...and verify the command was successful. */
468 ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); 470 ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
@@ -472,17 +474,17 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
472 return -E1000_ERR_PHY; 474 return -E1000_ERR_PHY;
473 } 475 }
474 476
475 udelay(200); 477 usleep_range(200, 400);
476 478
477 ret_val = e1000e_write_phy_reg_mdic(hw, 479 ret_val = e1000e_write_phy_reg_mdic(hw,
478 MAX_PHY_REG_ADDRESS & offset, 480 MAX_PHY_REG_ADDRESS &
479 data); 481 offset, data);
480 482
481 udelay(200); 483 usleep_range(200, 400);
482 } else { 484 } else {
483 ret_val = e1000e_write_phy_reg_mdic(hw, 485 ret_val = e1000e_write_phy_reg_mdic(hw,
484 MAX_PHY_REG_ADDRESS & offset, 486 MAX_PHY_REG_ADDRESS &
485 data); 487 offset, data);
486 } 488 }
487 489
488 e1000_release_phy_80003es2lan(hw); 490 e1000_release_phy_80003es2lan(hw);
@@ -580,7 +582,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
580 e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n"); 582 e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n");
581 583
582 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 584 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
583 100000, &link); 585 100000, &link);
584 if (ret_val) 586 if (ret_val)
585 return ret_val; 587 return ret_val;
586 588
@@ -595,7 +597,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
595 597
596 /* Try once more */ 598 /* Try once more */
597 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 599 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
598 100000, &link); 600 100000, &link);
599 if (ret_val) 601 if (ret_val)
600 return ret_val; 602 return ret_val;
601 } 603 }
@@ -666,14 +668,12 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
666 s32 ret_val; 668 s32 ret_val;
667 669
668 if (hw->phy.media_type == e1000_media_type_copper) { 670 if (hw->phy.media_type == e1000_media_type_copper) {
669 ret_val = e1000e_get_speed_and_duplex_copper(hw, 671 ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
670 speed,
671 duplex);
672 hw->phy.ops.cfg_on_link_up(hw); 672 hw->phy.ops.cfg_on_link_up(hw);
673 } else { 673 } else {
674 ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, 674 ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
675 speed, 675 speed,
676 duplex); 676 duplex);
677 } 677 }
678 678
679 return ret_val; 679 return ret_val;
@@ -754,9 +754,9 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
754 754
755 /* Initialize identification LED */ 755 /* Initialize identification LED */
756 ret_val = mac->ops.id_led_init(hw); 756 ret_val = mac->ops.id_led_init(hw);
757 /* An error is not fatal and we should not stop init due to this */
757 if (ret_val) 758 if (ret_val)
758 e_dbg("Error initializing identification LED\n"); 759 e_dbg("Error initializing identification LED\n");
759 /* This is not fatal and we should not stop init due to this */
760 760
761 /* Disabling VLAN filtering */ 761 /* Disabling VLAN filtering */
762 e_dbg("Initializing the IEEE VLAN\n"); 762 e_dbg("Initializing the IEEE VLAN\n");
@@ -784,14 +784,14 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
784 784
785 /* Set the transmit descriptor write-back policy */ 785 /* Set the transmit descriptor write-back policy */
786 reg_data = er32(TXDCTL(0)); 786 reg_data = er32(TXDCTL(0));
787 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 787 reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
788 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; 788 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
789 ew32(TXDCTL(0), reg_data); 789 ew32(TXDCTL(0), reg_data);
790 790
791 /* ...for both queues. */ 791 /* ...for both queues. */
792 reg_data = er32(TXDCTL(1)); 792 reg_data = er32(TXDCTL(1));
793 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 793 reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
794 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; 794 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
795 ew32(TXDCTL(1), reg_data); 795 ew32(TXDCTL(1), reg_data);
796 796
797 /* Enable retransmit on late collisions */ 797 /* Enable retransmit on late collisions */
@@ -818,13 +818,12 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
818 /* default to true to enable the MDIC W/A */ 818 /* default to true to enable the MDIC W/A */
819 hw->dev_spec.e80003es2lan.mdic_wa_enable = true; 819 hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
820 820
821 ret_val = e1000_read_kmrn_reg_80003es2lan(hw, 821 ret_val =
822 E1000_KMRNCTRLSTA_OFFSET >> 822 e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >>
823 E1000_KMRNCTRLSTA_OFFSET_SHIFT, 823 E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i);
824 &i);
825 if (!ret_val) { 824 if (!ret_val) {
826 if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == 825 if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
827 E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) 826 E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
828 hw->dev_spec.e80003es2lan.mdic_wa_enable = false; 827 hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
829 } 828 }
830 829
@@ -891,7 +890,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
891{ 890{
892 struct e1000_phy_info *phy = &hw->phy; 891 struct e1000_phy_info *phy = &hw->phy;
893 s32 ret_val; 892 s32 ret_val;
894 u32 ctrl_ext; 893 u32 reg;
895 u16 data; 894 u16 data;
896 895
897 ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); 896 ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
@@ -954,22 +953,19 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
954 } 953 }
955 954
956 /* Bypass Rx and Tx FIFO's */ 955 /* Bypass Rx and Tx FIFO's */
957 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 956 reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL;
958 E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, 957 data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
959 E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | 958 E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
960 E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); 959 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
961 if (ret_val) 960 if (ret_val)
962 return ret_val; 961 return ret_val;
963 962
964 ret_val = e1000_read_kmrn_reg_80003es2lan(hw, 963 reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE;
965 E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, 964 ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data);
966 &data);
967 if (ret_val) 965 if (ret_val)
968 return ret_val; 966 return ret_val;
969 data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; 967 data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
970 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 968 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
971 E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
972 data);
973 if (ret_val) 969 if (ret_val)
974 return ret_val; 970 return ret_val;
975 971
@@ -982,9 +978,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
982 if (ret_val) 978 if (ret_val)
983 return ret_val; 979 return ret_val;
984 980
985 ctrl_ext = er32(CTRL_EXT); 981 reg = er32(CTRL_EXT);
986 ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); 982 reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
987 ew32(CTRL_EXT, ctrl_ext); 983 ew32(CTRL_EXT, reg);
988 984
989 ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); 985 ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
990 if (ret_val) 986 if (ret_val)
@@ -1049,27 +1045,29 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
1049 * polling the phy; this fixes erroneous timeouts at 10Mbps. 1045 * polling the phy; this fixes erroneous timeouts at 10Mbps.
1050 */ 1046 */
1051 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), 1047 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
1052 0xFFFF); 1048 0xFFFF);
1053 if (ret_val) 1049 if (ret_val)
1054 return ret_val; 1050 return ret_val;
1055 ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), 1051 ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
1056 &reg_data); 1052 &reg_data);
1057 if (ret_val) 1053 if (ret_val)
1058 return ret_val; 1054 return ret_val;
1059 reg_data |= 0x3F; 1055 reg_data |= 0x3F;
1060 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), 1056 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
1061 reg_data); 1057 reg_data);
1062 if (ret_val) 1058 if (ret_val)
1063 return ret_val; 1059 return ret_val;
1064 ret_val = e1000_read_kmrn_reg_80003es2lan(hw, 1060 ret_val =
1065 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, 1061 e1000_read_kmrn_reg_80003es2lan(hw,
1066 &reg_data); 1062 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
1063 &reg_data);
1067 if (ret_val) 1064 if (ret_val)
1068 return ret_val; 1065 return ret_val;
1069 reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; 1066 reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
1070 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 1067 ret_val =
1071 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, 1068 e1000_write_kmrn_reg_80003es2lan(hw,
1072 reg_data); 1069 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
1070 reg_data);
1073 if (ret_val) 1071 if (ret_val)
1074 return ret_val; 1072 return ret_val;
1075 1073
@@ -1096,7 +1094,7 @@ static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
1096 1094
1097 if (hw->phy.media_type == e1000_media_type_copper) { 1095 if (hw->phy.media_type == e1000_media_type_copper) {
1098 ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed, 1096 ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
1099 &duplex); 1097 &duplex);
1100 if (ret_val) 1098 if (ret_val)
1101 return ret_val; 1099 return ret_val;
1102 1100
@@ -1125,9 +1123,10 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
1125 u16 reg_data, reg_data2; 1123 u16 reg_data, reg_data2;
1126 1124
1127 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; 1125 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
1128 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 1126 ret_val =
1129 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1127 e1000_write_kmrn_reg_80003es2lan(hw,
1130 reg_data); 1128 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
1129 reg_data);
1131 if (ret_val) 1130 if (ret_val)
1132 return ret_val; 1131 return ret_val;
1133 1132
@@ -1171,9 +1170,10 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
1171 u32 i = 0; 1170 u32 i = 0;
1172 1171
1173 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; 1172 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
1174 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 1173 ret_val =
1175 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1174 e1000_write_kmrn_reg_80003es2lan(hw,
1176 reg_data); 1175 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
1176 reg_data);
1177 if (ret_val) 1177 if (ret_val)
1178 return ret_val; 1178 return ret_val;
1179 1179
@@ -1220,7 +1220,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
1220 return ret_val; 1220 return ret_val;
1221 1221
1222 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 1222 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
1223 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; 1223 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
1224 ew32(KMRNCTRLSTA, kmrnctrlsta); 1224 ew32(KMRNCTRLSTA, kmrnctrlsta);
1225 e1e_flush(); 1225 e1e_flush();
1226 1226
@@ -1255,7 +1255,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
1255 return ret_val; 1255 return ret_val;
1256 1256
1257 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 1257 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
1258 E1000_KMRNCTRLSTA_OFFSET) | data; 1258 E1000_KMRNCTRLSTA_OFFSET) | data;
1259 ew32(KMRNCTRLSTA, kmrnctrlsta); 1259 ew32(KMRNCTRLSTA, kmrnctrlsta);
1260 e1e_flush(); 1260 e1e_flush();
1261 1261
@@ -1419,4 +1419,3 @@ const struct e1000_info e1000_es2_info = {
1419 .phy_ops = &es2_phy_ops, 1419 .phy_ops = &es2_phy_ops,
1420 .nvm_ops = &es2_nvm_ops, 1420 .nvm_ops = &es2_nvm_ops,
1421}; 1421};
1422
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 2faffbde179e..7380442a3829 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -184,7 +184,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
184 default: 184 default:
185 nvm->type = e1000_nvm_eeprom_spi; 185 nvm->type = e1000_nvm_eeprom_spi;
186 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 186 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
187 E1000_EECD_SIZE_EX_SHIFT); 187 E1000_EECD_SIZE_EX_SHIFT);
188 /* Added to a constant, "size" becomes the left-shift value 188 /* Added to a constant, "size" becomes the left-shift value
189 * for setting word_size. 189 * for setting word_size.
190 */ 190 */
@@ -437,7 +437,7 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
437 return ret_val; 437 return ret_val;
438 438
439 phy->id = (u32)(phy_id << 16); 439 phy->id = (u32)(phy_id << 16);
440 udelay(20); 440 usleep_range(20, 40);
441 ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); 441 ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
442 if (ret_val) 442 if (ret_val)
443 return ret_val; 443 return ret_val;
@@ -482,7 +482,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
482 if (!(swsm & E1000_SWSM_SMBI)) 482 if (!(swsm & E1000_SWSM_SMBI))
483 break; 483 break;
484 484
485 udelay(50); 485 usleep_range(50, 100);
486 i++; 486 i++;
487 } 487 }
488 488
@@ -499,7 +499,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
499 if (er32(SWSM) & E1000_SWSM_SWESMBI) 499 if (er32(SWSM) & E1000_SWSM_SWESMBI)
500 break; 500 break;
501 501
502 udelay(50); 502 usleep_range(50, 100);
503 } 503 }
504 504
505 if (i == fw_timeout) { 505 if (i == fw_timeout) {
@@ -526,6 +526,7 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
526 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); 526 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
527 ew32(SWSM, swsm); 527 ew32(SWSM, swsm);
528} 528}
529
529/** 530/**
530 * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore 531 * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
531 * @hw: pointer to the HW structure 532 * @hw: pointer to the HW structure
@@ -846,9 +847,9 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
846 } 847 }
847 848
848 for (i = 0; i < words; i++) { 849 for (i = 0; i < words; i++) {
849 eewr = (data[i] << E1000_NVM_RW_REG_DATA) | 850 eewr = ((data[i] << E1000_NVM_RW_REG_DATA) |
850 ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | 851 ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) |
851 E1000_NVM_RW_REG_START; 852 E1000_NVM_RW_REG_START);
852 853
853 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); 854 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
854 if (ret_val) 855 if (ret_val)
@@ -875,8 +876,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
875 s32 timeout = PHY_CFG_TIMEOUT; 876 s32 timeout = PHY_CFG_TIMEOUT;
876 877
877 while (timeout) { 878 while (timeout) {
878 if (er32(EEMNGCTL) & 879 if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0)
879 E1000_NVM_CFG_DONE_PORT_0)
880 break; 880 break;
881 usleep_range(1000, 2000); 881 usleep_range(1000, 2000);
882 timeout--; 882 timeout--;
@@ -1022,7 +1022,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1022 } 1022 }
1023 1023
1024 if (hw->nvm.type == e1000_nvm_flash_hw) { 1024 if (hw->nvm.type == e1000_nvm_flash_hw) {
1025 udelay(10); 1025 usleep_range(10, 20);
1026 ctrl_ext = er32(CTRL_EXT); 1026 ctrl_ext = er32(CTRL_EXT);
1027 ctrl_ext |= E1000_CTRL_EXT_EE_RST; 1027 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
1028 ew32(CTRL_EXT, ctrl_ext); 1028 ew32(CTRL_EXT, ctrl_ext);
@@ -1095,9 +1095,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
1095 1095
1096 /* Initialize identification LED */ 1096 /* Initialize identification LED */
1097 ret_val = mac->ops.id_led_init(hw); 1097 ret_val = mac->ops.id_led_init(hw);
1098 /* An error is not fatal and we should not stop init due to this */
1098 if (ret_val) 1099 if (ret_val)
1099 e_dbg("Error initializing identification LED\n"); 1100 e_dbg("Error initializing identification LED\n");
1100 /* This is not fatal and we should not stop init due to this */
1101 1101
1102 /* Disabling VLAN filtering */ 1102 /* Disabling VLAN filtering */
1103 e_dbg("Initializing the IEEE VLAN\n"); 1103 e_dbg("Initializing the IEEE VLAN\n");
@@ -1122,9 +1122,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
1122 1122
1123 /* Set the transmit descriptor write-back policy */ 1123 /* Set the transmit descriptor write-back policy */
1124 reg_data = er32(TXDCTL(0)); 1124 reg_data = er32(TXDCTL(0));
1125 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 1125 reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
1126 E1000_TXDCTL_FULL_TX_DESC_WB | 1126 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
1127 E1000_TXDCTL_COUNT_DESC;
1128 ew32(TXDCTL(0), reg_data); 1127 ew32(TXDCTL(0), reg_data);
1129 1128
1130 /* ...for both queues. */ 1129 /* ...for both queues. */
@@ -1140,9 +1139,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
1140 break; 1139 break;
1141 default: 1140 default:
1142 reg_data = er32(TXDCTL(1)); 1141 reg_data = er32(TXDCTL(1));
1143 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 1142 reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
1144 E1000_TXDCTL_FULL_TX_DESC_WB | 1143 E1000_TXDCTL_FULL_TX_DESC_WB |
1145 E1000_TXDCTL_COUNT_DESC; 1144 E1000_TXDCTL_COUNT_DESC);
1146 ew32(TXDCTL(1), reg_data); 1145 ew32(TXDCTL(1), reg_data);
1147 break; 1146 break;
1148 } 1147 }
@@ -1530,7 +1529,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1530 status = er32(STATUS); 1529 status = er32(STATUS);
1531 er32(RXCW); 1530 er32(RXCW);
1532 /* SYNCH bit and IV bit are sticky */ 1531 /* SYNCH bit and IV bit are sticky */
1533 udelay(10); 1532 usleep_range(10, 20);
1534 rxcw = er32(RXCW); 1533 rxcw = er32(RXCW);
1535 1534
1536 if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { 1535 if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
@@ -1633,7 +1632,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1633 * the IV bit and restart Autoneg 1632 * the IV bit and restart Autoneg
1634 */ 1633 */
1635 for (i = 0; i < AN_RETRY_COUNT; i++) { 1634 for (i = 0; i < AN_RETRY_COUNT; i++) {
1636 udelay(10); 1635 usleep_range(10, 20);
1637 rxcw = er32(RXCW); 1636 rxcw = er32(RXCW);
1638 if ((rxcw & E1000_RXCW_SYNCH) && 1637 if ((rxcw & E1000_RXCW_SYNCH) &&
1639 (rxcw & E1000_RXCW_C)) 1638 (rxcw & E1000_RXCW_C))
@@ -2066,4 +2065,3 @@ const struct e1000_info e1000_82583_info = {
2066 .phy_ops = &e82_phy_ops_bm, 2065 .phy_ops = &e82_phy_ops_bm,
2067 .nvm_ops = &e82571_nvm_ops, 2066 .nvm_ops = &e82571_nvm_ops,
2068}; 2067};
2069
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 85cb1a3b7cd4..08e24dc3dc0e 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -44,6 +44,8 @@
44#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */ 44#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
45#define E1000_EIAC_MASK_82574 0x01F00000 45#define E1000_EIAC_MASK_82574 0x01F00000
46 46
47#define E1000_IVAR_INT_ALLOC_VALID 0x8
48
47/* Manageability Operation Mode mask */ 49/* Manageability Operation Mode mask */
48#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 50#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
49 51
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index fc3a4fe1ac71..351c94a0cf74 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -66,7 +66,7 @@
66#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 66#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
67#define E1000_CTRL_EXT_EIAME 0x01000000 67#define E1000_CTRL_EXT_EIAME 0x01000000
68#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ 68#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
69#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ 69#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
70#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ 70#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
71#define E1000_CTRL_EXT_LSECCK 0x00001000 71#define E1000_CTRL_EXT_LSECCK 0x00001000
72#define E1000_CTRL_EXT_PHYPDEN 0x00100000 72#define E1000_CTRL_EXT_PHYPDEN 0x00100000
@@ -216,6 +216,8 @@
216#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ 216#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
217#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 217#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
218#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 218#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
219#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
220#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
219#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ 221#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
220#define E1000_CTRL_RST 0x04000000 /* Global reset */ 222#define E1000_CTRL_RST 0x04000000 /* Global reset */
221#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ 223#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
@@ -234,17 +236,17 @@
234#define E1000_STATUS_FUNC_SHIFT 2 236#define E1000_STATUS_FUNC_SHIFT 2
235#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ 237#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
236#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ 238#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
239#define E1000_STATUS_SPEED_MASK 0x000000C0
237#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ 240#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
238#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ 241#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
239#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ 242#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
240#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ 243#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
241#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ 244#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
242#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ 245#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */
243 246
244#define HALF_DUPLEX 1 247#define HALF_DUPLEX 1
245#define FULL_DUPLEX 2 248#define FULL_DUPLEX 2
246 249
247
248#define ADVERTISE_10_HALF 0x0001 250#define ADVERTISE_10_HALF 0x0001
249#define ADVERTISE_10_FULL 0x0002 251#define ADVERTISE_10_FULL 0x0002
250#define ADVERTISE_100_HALF 0x0004 252#define ADVERTISE_100_HALF 0x0004
@@ -311,6 +313,7 @@
311 313
312/* SerDes Control */ 314/* SerDes Control */
313#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 315#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
316#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
314 317
315/* Receive Checksum Control */ 318/* Receive Checksum Control */
316#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ 319#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
@@ -400,7 +403,8 @@
400#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ 403#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
401#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ 404#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
402#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ 405#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
403#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 406/* If this bit asserted, the driver should claim the interrupt */
407#define E1000_ICR_INT_ASSERTED 0x80000000
404#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ 408#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
405#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ 409#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
406#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ 410#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
@@ -583,13 +587,13 @@
583#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ 587#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
584#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) 588#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
585 589
586#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */ 590#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM r/w regs */
587#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ 591#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
588#define E1000_NVM_RW_REG_START 1 /* Start operation */ 592#define E1000_NVM_RW_REG_START 1 /* Start operation */
589#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ 593#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
590#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ 594#define E1000_NVM_POLL_WRITE 1 /* Flag for polling write complete */
591#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ 595#define E1000_NVM_POLL_READ 0 /* Flag for polling read complete */
592#define E1000_FLASH_UPDATES 2000 596#define E1000_FLASH_UPDATES 2000
593 597
594/* NVM Word Offsets */ 598/* NVM Word Offsets */
595#define NVM_COMPAT 0x0003 599#define NVM_COMPAT 0x0003
@@ -785,6 +789,7 @@
785 GG82563_REG(194, 18) /* Inband Control */ 789 GG82563_REG(194, 18) /* Inband Control */
786 790
787/* MDI Control */ 791/* MDI Control */
792#define E1000_MDIC_REG_MASK 0x001F0000
788#define E1000_MDIC_REG_SHIFT 16 793#define E1000_MDIC_REG_SHIFT 16
789#define E1000_MDIC_PHY_SHIFT 21 794#define E1000_MDIC_PHY_SHIFT 21
790#define E1000_MDIC_OP_WRITE 0x04000000 795#define E1000_MDIC_OP_WRITE 0x04000000
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index fcc758138b8a..82f1c84282db 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -46,6 +46,7 @@
46#include <linux/ptp_clock_kernel.h> 46#include <linux/ptp_clock_kernel.h>
47#include <linux/ptp_classify.h> 47#include <linux/ptp_classify.h>
48#include <linux/mii.h> 48#include <linux/mii.h>
49#include <linux/mdio.h>
49#include "hw.h" 50#include "hw.h"
50 51
51struct e1000_info; 52struct e1000_info;
@@ -61,7 +62,6 @@ struct e1000_info;
61#define e_notice(format, arg...) \ 62#define e_notice(format, arg...) \
62 netdev_notice(adapter->netdev, format, ## arg) 63 netdev_notice(adapter->netdev, format, ## arg)
63 64
64
65/* Interrupt modes, as used by the IntMode parameter */ 65/* Interrupt modes, as used by the IntMode parameter */
66#define E1000E_INT_MODE_LEGACY 0 66#define E1000E_INT_MODE_LEGACY 0
67#define E1000E_INT_MODE_MSI 1 67#define E1000E_INT_MODE_MSI 1
@@ -239,9 +239,8 @@ struct e1000_adapter {
239 u16 tx_itr; 239 u16 tx_itr;
240 u16 rx_itr; 240 u16 rx_itr;
241 241
242 /* Tx */ 242 /* Tx - one ring per active queue */
243 struct e1000_ring *tx_ring /* One per active queue */ 243 struct e1000_ring *tx_ring ____cacheline_aligned_in_smp;
244 ____cacheline_aligned_in_smp;
245 u32 tx_fifo_limit; 244 u32 tx_fifo_limit;
246 245
247 struct napi_struct napi; 246 struct napi_struct napi;
@@ -352,6 +351,8 @@ struct e1000_adapter {
352 struct timecounter tc; 351 struct timecounter tc;
353 struct ptp_clock *ptp_clock; 352 struct ptp_clock *ptp_clock;
354 struct ptp_clock_info ptp_clock_info; 353 struct ptp_clock_info ptp_clock_info;
354
355 u16 eee_advert;
355}; 356};
356 357
357struct e1000_info { 358struct e1000_info {
@@ -487,8 +488,8 @@ extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
487extern void e1000e_free_rx_resources(struct e1000_ring *ring); 488extern void e1000e_free_rx_resources(struct e1000_ring *ring);
488extern void e1000e_free_tx_resources(struct e1000_ring *ring); 489extern void e1000e_free_tx_resources(struct e1000_ring *ring);
489extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, 490extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
490 struct rtnl_link_stats64 491 struct rtnl_link_stats64
491 *stats); 492 *stats);
492extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 493extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
493extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 494extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
494extern void e1000e_get_hw_control(struct e1000_adapter *adapter); 495extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
@@ -558,12 +559,14 @@ static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
558 return hw->nvm.ops.update(hw); 559 return hw->nvm.ops.update(hw);
559} 560}
560 561
561static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) 562static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words,
563 u16 *data)
562{ 564{
563 return hw->nvm.ops.read(hw, offset, words, data); 565 return hw->nvm.ops.read(hw, offset, words, data);
564} 566}
565 567
566static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) 568static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
569 u16 *data)
567{ 570{
568 return hw->nvm.ops.write(hw, offset, words, data); 571 return hw->nvm.ops.write(hw, offset, words, data);
569} 572}
@@ -597,7 +600,7 @@ static inline s32 __ew32_prepare(struct e1000_hw *hw)
597 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; 600 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
598 601
599 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) 602 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
600 udelay(50); 603 usleep_range(50, 100);
601 604
602 return i; 605 return i;
603} 606}
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index f91a8f3f9d48..7c8ca658d553 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -35,12 +35,11 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/vmalloc.h> 37#include <linux/vmalloc.h>
38#include <linux/mdio.h>
39#include <linux/pm_runtime.h> 38#include <linux/pm_runtime.h>
40 39
41#include "e1000.h" 40#include "e1000.h"
42 41
43enum {NETDEV_STATS, E1000_STATS}; 42enum { NETDEV_STATS, E1000_STATS };
44 43
45struct e1000_stats { 44struct e1000_stats {
46 char stat_string[ETH_GSTRING_LEN]; 45 char stat_string[ETH_GSTRING_LEN];
@@ -121,6 +120,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
121 "Interrupt test (offline)", "Loopback test (offline)", 120 "Interrupt test (offline)", "Loopback test (offline)",
122 "Link test (on/offline)" 121 "Link test (on/offline)"
123}; 122};
123
124#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) 124#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
125 125
126static int e1000_get_settings(struct net_device *netdev, 126static int e1000_get_settings(struct net_device *netdev,
@@ -197,8 +197,7 @@ static int e1000_get_settings(struct net_device *netdev,
197 /* MDI-X => 2; MDI =>1; Invalid =>0 */ 197 /* MDI-X => 2; MDI =>1; Invalid =>0 */
198 if ((hw->phy.media_type == e1000_media_type_copper) && 198 if ((hw->phy.media_type == e1000_media_type_copper) &&
199 netif_carrier_ok(netdev)) 199 netif_carrier_ok(netdev))
200 ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : 200 ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI;
201 ETH_TP_MDI;
202 else 201 else
203 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; 202 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
204 203
@@ -224,8 +223,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
224 223
225 /* Fiber NICs only allow 1000 gbps Full duplex */ 224 /* Fiber NICs only allow 1000 gbps Full duplex */
226 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && 225 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
227 spd != SPEED_1000 && 226 (spd != SPEED_1000) && (dplx != DUPLEX_FULL)) {
228 dplx != DUPLEX_FULL) {
229 goto err_inval; 227 goto err_inval;
230 } 228 }
231 229
@@ -298,12 +296,10 @@ static int e1000_set_settings(struct net_device *netdev,
298 hw->mac.autoneg = 1; 296 hw->mac.autoneg = 1;
299 if (hw->phy.media_type == e1000_media_type_fiber) 297 if (hw->phy.media_type == e1000_media_type_fiber)
300 hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | 298 hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
301 ADVERTISED_FIBRE | 299 ADVERTISED_FIBRE | ADVERTISED_Autoneg;
302 ADVERTISED_Autoneg;
303 else 300 else
304 hw->phy.autoneg_advertised = ecmd->advertising | 301 hw->phy.autoneg_advertised = ecmd->advertising |
305 ADVERTISED_TP | 302 ADVERTISED_TP | ADVERTISED_Autoneg;
306 ADVERTISED_Autoneg;
307 ecmd->advertising = hw->phy.autoneg_advertised; 303 ecmd->advertising = hw->phy.autoneg_advertised;
308 if (adapter->fc_autoneg) 304 if (adapter->fc_autoneg)
309 hw->fc.requested_mode = e1000_fc_default; 305 hw->fc.requested_mode = e1000_fc_default;
@@ -346,7 +342,7 @@ static void e1000_get_pauseparam(struct net_device *netdev,
346 struct e1000_hw *hw = &adapter->hw; 342 struct e1000_hw *hw = &adapter->hw;
347 343
348 pause->autoneg = 344 pause->autoneg =
349 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 345 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
350 346
351 if (hw->fc.current_mode == e1000_fc_rx_pause) { 347 if (hw->fc.current_mode == e1000_fc_rx_pause) {
352 pause->rx_pause = 1; 348 pause->rx_pause = 1;
@@ -435,7 +431,7 @@ static void e1000_get_regs(struct net_device *netdev,
435 memset(p, 0, E1000_REGS_LEN * sizeof(u32)); 431 memset(p, 0, E1000_REGS_LEN * sizeof(u32));
436 432
437 regs->version = (1 << 24) | (adapter->pdev->revision << 16) | 433 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
438 adapter->pdev->device; 434 adapter->pdev->device;
439 435
440 regs_buff[0] = er32(CTRL); 436 regs_buff[0] = er32(CTRL);
441 regs_buff[1] = er32(STATUS); 437 regs_buff[1] = er32(STATUS);
@@ -503,8 +499,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
503 first_word = eeprom->offset >> 1; 499 first_word = eeprom->offset >> 1;
504 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 500 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
505 501
506 eeprom_buff = kmalloc(sizeof(u16) * 502 eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
507 (last_word - first_word + 1), GFP_KERNEL); 503 GFP_KERNEL);
508 if (!eeprom_buff) 504 if (!eeprom_buff)
509 return -ENOMEM; 505 return -ENOMEM;
510 506
@@ -515,7 +511,7 @@ static int e1000_get_eeprom(struct net_device *netdev,
515 } else { 511 } else {
516 for (i = 0; i < last_word - first_word + 1; i++) { 512 for (i = 0; i < last_word - first_word + 1; i++) {
517 ret_val = e1000_read_nvm(hw, first_word + i, 1, 513 ret_val = e1000_read_nvm(hw, first_word + i, 1,
518 &eeprom_buff[i]); 514 &eeprom_buff[i]);
519 if (ret_val) 515 if (ret_val)
520 break; 516 break;
521 } 517 }
@@ -553,7 +549,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
553 if (eeprom->len == 0) 549 if (eeprom->len == 0)
554 return -EOPNOTSUPP; 550 return -EOPNOTSUPP;
555 551
556 if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) 552 if (eeprom->magic !=
553 (adapter->pdev->vendor | (adapter->pdev->device << 16)))
557 return -EFAULT; 554 return -EFAULT;
558 555
559 if (adapter->flags & FLAG_READ_ONLY_NVM) 556 if (adapter->flags & FLAG_READ_ONLY_NVM)
@@ -579,7 +576,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
579 /* need read/modify/write of last changed EEPROM word */ 576 /* need read/modify/write of last changed EEPROM word */
580 /* only the first byte of the word is being modified */ 577 /* only the first byte of the word is being modified */
581 ret_val = e1000_read_nvm(hw, last_word, 1, 578 ret_val = e1000_read_nvm(hw, last_word, 1,
582 &eeprom_buff[last_word - first_word]); 579 &eeprom_buff[last_word - first_word]);
583 580
584 if (ret_val) 581 if (ret_val)
585 goto out; 582 goto out;
@@ -618,8 +615,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
618{ 615{
619 struct e1000_adapter *adapter = netdev_priv(netdev); 616 struct e1000_adapter *adapter = netdev_priv(netdev);
620 617
621 strlcpy(drvinfo->driver, e1000e_driver_name, 618 strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
622 sizeof(drvinfo->driver));
623 strlcpy(drvinfo->version, e1000e_driver_version, 619 strlcpy(drvinfo->version, e1000e_driver_version,
624 sizeof(drvinfo->version)); 620 sizeof(drvinfo->version));
625 621
@@ -627,10 +623,10 @@ static void e1000_get_drvinfo(struct net_device *netdev,
627 * PCI-E controllers 623 * PCI-E controllers
628 */ 624 */
629 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 625 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
630 "%d.%d-%d", 626 "%d.%d-%d",
631 (adapter->eeprom_vers & 0xF000) >> 12, 627 (adapter->eeprom_vers & 0xF000) >> 12,
632 (adapter->eeprom_vers & 0x0FF0) >> 4, 628 (adapter->eeprom_vers & 0x0FF0) >> 4,
633 (adapter->eeprom_vers & 0x000F)); 629 (adapter->eeprom_vers & 0x000F));
634 630
635 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 631 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
636 sizeof(drvinfo->bus_info)); 632 sizeof(drvinfo->bus_info));
@@ -756,7 +752,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
756{ 752{
757 u32 pat, val; 753 u32 pat, val;
758 static const u32 test[] = { 754 static const u32 test[] = {
759 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 755 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
756 };
760 for (pat = 0; pat < ARRAY_SIZE(test); pat++) { 757 for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
761 E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, 758 E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
762 (test[pat] & write)); 759 (test[pat] & write));
@@ -786,6 +783,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
786 } 783 }
787 return 0; 784 return 0;
788} 785}
786
789#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ 787#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
790 do { \ 788 do { \
791 if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ 789 if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
@@ -813,16 +811,16 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
813 u32 wlock_mac = 0; 811 u32 wlock_mac = 0;
814 812
815 /* The status register is Read Only, so a write should fail. 813 /* The status register is Read Only, so a write should fail.
816 * Some bits that get toggled are ignored. 814 * Some bits that get toggled are ignored. There are several bits
815 * on newer hardware that are r/w.
817 */ 816 */
818 switch (mac->type) { 817 switch (mac->type) {
819 /* there are several bits on newer hardware that are r/w */
820 case e1000_82571: 818 case e1000_82571:
821 case e1000_82572: 819 case e1000_82572:
822 case e1000_80003es2lan: 820 case e1000_80003es2lan:
823 toggle = 0x7FFFF3FF; 821 toggle = 0x7FFFF3FF;
824 break; 822 break;
825 default: 823 default:
826 toggle = 0x7FFFF033; 824 toggle = 0x7FFFF033;
827 break; 825 break;
828 } 826 }
@@ -928,7 +926,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
928 } 926 }
929 927
930 /* If Checksum is not Correct return error else test passed */ 928 /* If Checksum is not Correct return error else test passed */
931 if ((checksum != (u16) NVM_SUM) && !(*data)) 929 if ((checksum != (u16)NVM_SUM) && !(*data))
932 *data = 2; 930 *data = 2;
933 931
934 return *data; 932 return *data;
@@ -936,7 +934,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
936 934
937static irqreturn_t e1000_test_intr(int __always_unused irq, void *data) 935static irqreturn_t e1000_test_intr(int __always_unused irq, void *data)
938{ 936{
939 struct net_device *netdev = (struct net_device *) data; 937 struct net_device *netdev = (struct net_device *)data;
940 struct e1000_adapter *adapter = netdev_priv(netdev); 938 struct e1000_adapter *adapter = netdev_priv(netdev);
941 struct e1000_hw *hw = &adapter->hw; 939 struct e1000_hw *hw = &adapter->hw;
942 940
@@ -969,8 +967,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
969 if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, 967 if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
970 netdev)) { 968 netdev)) {
971 shared_int = 0; 969 shared_int = 0;
972 } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, 970 } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name,
973 netdev->name, netdev)) { 971 netdev)) {
974 *data = 1; 972 *data = 1;
975 ret_val = -1; 973 ret_val = -1;
976 goto out; 974 goto out;
@@ -1080,28 +1078,33 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
1080 struct e1000_ring *tx_ring = &adapter->test_tx_ring; 1078 struct e1000_ring *tx_ring = &adapter->test_tx_ring;
1081 struct e1000_ring *rx_ring = &adapter->test_rx_ring; 1079 struct e1000_ring *rx_ring = &adapter->test_rx_ring;
1082 struct pci_dev *pdev = adapter->pdev; 1080 struct pci_dev *pdev = adapter->pdev;
1081 struct e1000_buffer *buffer_info;
1083 int i; 1082 int i;
1084 1083
1085 if (tx_ring->desc && tx_ring->buffer_info) { 1084 if (tx_ring->desc && tx_ring->buffer_info) {
1086 for (i = 0; i < tx_ring->count; i++) { 1085 for (i = 0; i < tx_ring->count; i++) {
1087 if (tx_ring->buffer_info[i].dma) 1086 buffer_info = &tx_ring->buffer_info[i];
1087
1088 if (buffer_info->dma)
1088 dma_unmap_single(&pdev->dev, 1089 dma_unmap_single(&pdev->dev,
1089 tx_ring->buffer_info[i].dma, 1090 buffer_info->dma,
1090 tx_ring->buffer_info[i].length, 1091 buffer_info->length,
1091 DMA_TO_DEVICE); 1092 DMA_TO_DEVICE);
1092 if (tx_ring->buffer_info[i].skb) 1093 if (buffer_info->skb)
1093 dev_kfree_skb(tx_ring->buffer_info[i].skb); 1094 dev_kfree_skb(buffer_info->skb);
1094 } 1095 }
1095 } 1096 }
1096 1097
1097 if (rx_ring->desc && rx_ring->buffer_info) { 1098 if (rx_ring->desc && rx_ring->buffer_info) {
1098 for (i = 0; i < rx_ring->count; i++) { 1099 for (i = 0; i < rx_ring->count; i++) {
1099 if (rx_ring->buffer_info[i].dma) 1100 buffer_info = &rx_ring->buffer_info[i];
1101
1102 if (buffer_info->dma)
1100 dma_unmap_single(&pdev->dev, 1103 dma_unmap_single(&pdev->dev,
1101 rx_ring->buffer_info[i].dma, 1104 buffer_info->dma,
1102 2048, DMA_FROM_DEVICE); 1105 2048, DMA_FROM_DEVICE);
1103 if (rx_ring->buffer_info[i].skb) 1106 if (buffer_info->skb)
1104 dev_kfree_skb(rx_ring->buffer_info[i].skb); 1107 dev_kfree_skb(buffer_info->skb);
1105 } 1108 }
1106 } 1109 }
1107 1110
@@ -1138,8 +1141,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1138 tx_ring->count = E1000_DEFAULT_TXD; 1141 tx_ring->count = E1000_DEFAULT_TXD;
1139 1142
1140 tx_ring->buffer_info = kcalloc(tx_ring->count, 1143 tx_ring->buffer_info = kcalloc(tx_ring->count,
1141 sizeof(struct e1000_buffer), 1144 sizeof(struct e1000_buffer), GFP_KERNEL);
1142 GFP_KERNEL);
1143 if (!tx_ring->buffer_info) { 1145 if (!tx_ring->buffer_info) {
1144 ret_val = 1; 1146 ret_val = 1;
1145 goto err_nomem; 1147 goto err_nomem;
@@ -1156,8 +1158,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1156 tx_ring->next_to_use = 0; 1158 tx_ring->next_to_use = 0;
1157 tx_ring->next_to_clean = 0; 1159 tx_ring->next_to_clean = 0;
1158 1160
1159 ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); 1161 ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF));
1160 ew32(TDBAH(0), ((u64) tx_ring->dma >> 32)); 1162 ew32(TDBAH(0), ((u64)tx_ring->dma >> 32));
1161 ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc)); 1163 ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
1162 ew32(TDH(0), 0); 1164 ew32(TDH(0), 0);
1163 ew32(TDT(0), 0); 1165 ew32(TDT(0), 0);
@@ -1179,8 +1181,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1179 tx_ring->buffer_info[i].skb = skb; 1181 tx_ring->buffer_info[i].skb = skb;
1180 tx_ring->buffer_info[i].length = skb->len; 1182 tx_ring->buffer_info[i].length = skb->len;
1181 tx_ring->buffer_info[i].dma = 1183 tx_ring->buffer_info[i].dma =
1182 dma_map_single(&pdev->dev, skb->data, skb->len, 1184 dma_map_single(&pdev->dev, skb->data, skb->len,
1183 DMA_TO_DEVICE); 1185 DMA_TO_DEVICE);
1184 if (dma_mapping_error(&pdev->dev, 1186 if (dma_mapping_error(&pdev->dev,
1185 tx_ring->buffer_info[i].dma)) { 1187 tx_ring->buffer_info[i].dma)) {
1186 ret_val = 4; 1188 ret_val = 4;
@@ -1200,8 +1202,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1200 rx_ring->count = E1000_DEFAULT_RXD; 1202 rx_ring->count = E1000_DEFAULT_RXD;
1201 1203
1202 rx_ring->buffer_info = kcalloc(rx_ring->count, 1204 rx_ring->buffer_info = kcalloc(rx_ring->count,
1203 sizeof(struct e1000_buffer), 1205 sizeof(struct e1000_buffer), GFP_KERNEL);
1204 GFP_KERNEL);
1205 if (!rx_ring->buffer_info) { 1206 if (!rx_ring->buffer_info) {
1206 ret_val = 5; 1207 ret_val = 5;
1207 goto err_nomem; 1208 goto err_nomem;
@@ -1220,16 +1221,16 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1220 rctl = er32(RCTL); 1221 rctl = er32(RCTL);
1221 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 1222 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
1222 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1223 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1223 ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF)); 1224 ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF));
1224 ew32(RDBAH(0), ((u64) rx_ring->dma >> 32)); 1225 ew32(RDBAH(0), ((u64)rx_ring->dma >> 32));
1225 ew32(RDLEN(0), rx_ring->size); 1226 ew32(RDLEN(0), rx_ring->size);
1226 ew32(RDH(0), 0); 1227 ew32(RDH(0), 0);
1227 ew32(RDT(0), 0); 1228 ew32(RDT(0), 0);
1228 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | 1229 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1229 E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | 1230 E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
1230 E1000_RCTL_SBP | E1000_RCTL_SECRC | 1231 E1000_RCTL_SBP | E1000_RCTL_SECRC |
1231 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1232 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1232 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1233 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1233 ew32(RCTL, rctl); 1234 ew32(RCTL, rctl);
1234 1235
1235 for (i = 0; i < rx_ring->count; i++) { 1236 for (i = 0; i < rx_ring->count; i++) {
@@ -1244,8 +1245,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1244 skb_reserve(skb, NET_IP_ALIGN); 1245 skb_reserve(skb, NET_IP_ALIGN);
1245 rx_ring->buffer_info[i].skb = skb; 1246 rx_ring->buffer_info[i].skb = skb;
1246 rx_ring->buffer_info[i].dma = 1247 rx_ring->buffer_info[i].dma =
1247 dma_map_single(&pdev->dev, skb->data, 2048, 1248 dma_map_single(&pdev->dev, skb->data, 2048,
1248 DMA_FROM_DEVICE); 1249 DMA_FROM_DEVICE);
1249 if (dma_mapping_error(&pdev->dev, 1250 if (dma_mapping_error(&pdev->dev,
1250 rx_ring->buffer_info[i].dma)) { 1251 rx_ring->buffer_info[i].dma)) {
1251 ret_val = 8; 1252 ret_val = 8;
@@ -1296,7 +1297,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1296 1297
1297 ew32(CTRL, ctrl_reg); 1298 ew32(CTRL, ctrl_reg);
1298 e1e_flush(); 1299 e1e_flush();
1299 udelay(500); 1300 usleep_range(500, 1000);
1300 1301
1301 return 0; 1302 return 0;
1302 } 1303 }
@@ -1322,7 +1323,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1322 e1e_wphy(hw, PHY_REG(2, 21), phy_reg); 1323 e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
1323 /* Assert SW reset for above settings to take effect */ 1324 /* Assert SW reset for above settings to take effect */
1324 hw->phy.ops.commit(hw); 1325 hw->phy.ops.commit(hw);
1325 mdelay(1); 1326 usleep_range(1000, 2000);
1326 /* Force Full Duplex */ 1327 /* Force Full Duplex */
1327 e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); 1328 e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
1328 e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); 1329 e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
@@ -1363,7 +1364,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1363 1364
1364 /* force 1000, set loopback */ 1365 /* force 1000, set loopback */
1365 e1e_wphy(hw, MII_BMCR, 0x4140); 1366 e1e_wphy(hw, MII_BMCR, 0x4140);
1366 mdelay(250); 1367 msleep(250);
1367 1368
1368 /* Now set up the MAC to the same speed/duplex as the PHY. */ 1369 /* Now set up the MAC to the same speed/duplex as the PHY. */
1369 ctrl_reg = er32(CTRL); 1370 ctrl_reg = er32(CTRL);
@@ -1395,7 +1396,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1395 if (hw->phy.type == e1000_phy_m88) 1396 if (hw->phy.type == e1000_phy_m88)
1396 e1000_phy_disable_receiver(adapter); 1397 e1000_phy_disable_receiver(adapter);
1397 1398
1398 udelay(500); 1399 usleep_range(500, 1000);
1399 1400
1400 return 0; 1401 return 0;
1401} 1402}
@@ -1431,8 +1432,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
1431 /* special write to serdes control register to enable SerDes analog 1432 /* special write to serdes control register to enable SerDes analog
1432 * loopback 1433 * loopback
1433 */ 1434 */
1434#define E1000_SERDES_LB_ON 0x410 1435 ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK);
1435 ew32(SCTL, E1000_SERDES_LB_ON);
1436 e1e_flush(); 1436 e1e_flush();
1437 usleep_range(10000, 20000); 1437 usleep_range(10000, 20000);
1438 1438
@@ -1526,8 +1526,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
1526 case e1000_82572: 1526 case e1000_82572:
1527 if (hw->phy.media_type == e1000_media_type_fiber || 1527 if (hw->phy.media_type == e1000_media_type_fiber ||
1528 hw->phy.media_type == e1000_media_type_internal_serdes) { 1528 hw->phy.media_type == e1000_media_type_internal_serdes) {
1529#define E1000_SERDES_LB_OFF 0x400 1529 ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1530 ew32(SCTL, E1000_SERDES_LB_OFF);
1531 e1e_flush(); 1530 e1e_flush();
1532 usleep_range(10000, 20000); 1531 usleep_range(10000, 20000);
1533 break; 1532 break;
@@ -1564,7 +1563,7 @@ static int e1000_check_lbtest_frame(struct sk_buff *skb,
1564 frame_size &= ~1; 1563 frame_size &= ~1;
1565 if (*(skb->data + 3) == 0xFF) 1564 if (*(skb->data + 3) == 0xFF)
1566 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1565 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1567 (*(skb->data + frame_size / 2 + 12) == 0xAF)) 1566 (*(skb->data + frame_size / 2 + 12) == 0xAF))
1568 return 0; 1567 return 0;
1569 return 13; 1568 return 13;
1570} 1569}
@@ -1575,6 +1574,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1575 struct e1000_ring *rx_ring = &adapter->test_rx_ring; 1574 struct e1000_ring *rx_ring = &adapter->test_rx_ring;
1576 struct pci_dev *pdev = adapter->pdev; 1575 struct pci_dev *pdev = adapter->pdev;
1577 struct e1000_hw *hw = &adapter->hw; 1576 struct e1000_hw *hw = &adapter->hw;
1577 struct e1000_buffer *buffer_info;
1578 int i, j, k, l; 1578 int i, j, k, l;
1579 int lc; 1579 int lc;
1580 int good_cnt; 1580 int good_cnt;
@@ -1595,14 +1595,17 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1595 1595
1596 k = 0; 1596 k = 0;
1597 l = 0; 1597 l = 0;
1598 for (j = 0; j <= lc; j++) { /* loop count loop */ 1598 /* loop count loop */
1599 for (i = 0; i < 64; i++) { /* send the packets */ 1599 for (j = 0; j <= lc; j++) {
1600 e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, 1600 /* send the packets */
1601 1024); 1601 for (i = 0; i < 64; i++) {
1602 buffer_info = &tx_ring->buffer_info[k];
1603
1604 e1000_create_lbtest_frame(buffer_info->skb, 1024);
1602 dma_sync_single_for_device(&pdev->dev, 1605 dma_sync_single_for_device(&pdev->dev,
1603 tx_ring->buffer_info[k].dma, 1606 buffer_info->dma,
1604 tx_ring->buffer_info[k].length, 1607 buffer_info->length,
1605 DMA_TO_DEVICE); 1608 DMA_TO_DEVICE);
1606 k++; 1609 k++;
1607 if (k == tx_ring->count) 1610 if (k == tx_ring->count)
1608 k = 0; 1611 k = 0;
@@ -1612,13 +1615,16 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1612 msleep(200); 1615 msleep(200);
1613 time = jiffies; /* set the start time for the receive */ 1616 time = jiffies; /* set the start time for the receive */
1614 good_cnt = 0; 1617 good_cnt = 0;
1615 do { /* receive the sent packets */ 1618 /* receive the sent packets */
1619 do {
1620 buffer_info = &rx_ring->buffer_info[l];
1621
1616 dma_sync_single_for_cpu(&pdev->dev, 1622 dma_sync_single_for_cpu(&pdev->dev,
1617 rx_ring->buffer_info[l].dma, 2048, 1623 buffer_info->dma, 2048,
1618 DMA_FROM_DEVICE); 1624 DMA_FROM_DEVICE);
1619 1625
1620 ret_val = e1000_check_lbtest_frame( 1626 ret_val = e1000_check_lbtest_frame(buffer_info->skb,
1621 rx_ring->buffer_info[l].skb, 1024); 1627 1024);
1622 if (!ret_val) 1628 if (!ret_val)
1623 good_cnt++; 1629 good_cnt++;
1624 l++; 1630 l++;
@@ -1637,7 +1643,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1637 ret_val = 14; /* error code for time out error */ 1643 ret_val = 14; /* error code for time out error */
1638 break; 1644 break;
1639 } 1645 }
1640 } /* end loop count loop */ 1646 }
1641 return ret_val; 1647 return ret_val;
1642} 1648}
1643 1649
@@ -1696,7 +1702,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1696 /* On some Phy/switch combinations, link establishment 1702 /* On some Phy/switch combinations, link establishment
1697 * can take a few seconds more than expected. 1703 * can take a few seconds more than expected.
1698 */ 1704 */
1699 msleep(5000); 1705 msleep_interruptible(5000);
1700 1706
1701 if (!(er32(STATUS) & E1000_STATUS_LU)) 1707 if (!(er32(STATUS) & E1000_STATUS_LU))
1702 *data = 1; 1708 *data = 1;
@@ -1980,12 +1986,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1980 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1986 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1981 switch (e1000_gstrings_stats[i].type) { 1987 switch (e1000_gstrings_stats[i].type) {
1982 case NETDEV_STATS: 1988 case NETDEV_STATS:
1983 p = (char *) &net_stats + 1989 p = (char *)&net_stats +
1984 e1000_gstrings_stats[i].stat_offset; 1990 e1000_gstrings_stats[i].stat_offset;
1985 break; 1991 break;
1986 case E1000_STATS: 1992 case E1000_STATS:
1987 p = (char *) adapter + 1993 p = (char *)adapter +
1988 e1000_gstrings_stats[i].stat_offset; 1994 e1000_gstrings_stats[i].stat_offset;
1989 break; 1995 break;
1990 default: 1996 default:
1991 data[i] = 0; 1997 data[i] = 0;
@@ -1993,7 +1999,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1993 } 1999 }
1994 2000
1995 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 2001 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1996 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 2002 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1997 } 2003 }
1998} 2004}
1999 2005
@@ -2069,23 +2075,20 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2069{ 2075{
2070 struct e1000_adapter *adapter = netdev_priv(netdev); 2076 struct e1000_adapter *adapter = netdev_priv(netdev);
2071 struct e1000_hw *hw = &adapter->hw; 2077 struct e1000_hw *hw = &adapter->hw;
2072 u16 cap_addr, adv_addr, lpa_addr, pcs_stat_addr, phy_data, lpi_ctrl; 2078 u16 cap_addr, lpa_addr, pcs_stat_addr, phy_data;
2073 u32 status, ret_val; 2079 u32 ret_val;
2074 2080
2075 if (!(adapter->flags & FLAG_IS_ICH) || 2081 if (!(adapter->flags2 & FLAG2_HAS_EEE))
2076 !(adapter->flags2 & FLAG2_HAS_EEE))
2077 return -EOPNOTSUPP; 2082 return -EOPNOTSUPP;
2078 2083
2079 switch (hw->phy.type) { 2084 switch (hw->phy.type) {
2080 case e1000_phy_82579: 2085 case e1000_phy_82579:
2081 cap_addr = I82579_EEE_CAPABILITY; 2086 cap_addr = I82579_EEE_CAPABILITY;
2082 adv_addr = I82579_EEE_ADVERTISEMENT;
2083 lpa_addr = I82579_EEE_LP_ABILITY; 2087 lpa_addr = I82579_EEE_LP_ABILITY;
2084 pcs_stat_addr = I82579_EEE_PCS_STATUS; 2088 pcs_stat_addr = I82579_EEE_PCS_STATUS;
2085 break; 2089 break;
2086 case e1000_phy_i217: 2090 case e1000_phy_i217:
2087 cap_addr = I217_EEE_CAPABILITY; 2091 cap_addr = I217_EEE_CAPABILITY;
2088 adv_addr = I217_EEE_ADVERTISEMENT;
2089 lpa_addr = I217_EEE_LP_ABILITY; 2092 lpa_addr = I217_EEE_LP_ABILITY;
2090 pcs_stat_addr = I217_EEE_PCS_STATUS; 2093 pcs_stat_addr = I217_EEE_PCS_STATUS;
2091 break; 2094 break;
@@ -2104,10 +2107,7 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2104 edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data); 2107 edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data);
2105 2108
2106 /* EEE Advertised */ 2109 /* EEE Advertised */
2107 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &phy_data); 2110 edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
2108 if (ret_val)
2109 goto release;
2110 edata->advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
2111 2111
2112 /* EEE Link Partner Advertised */ 2112 /* EEE Link Partner Advertised */
2113 ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data); 2113 ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data);
@@ -2125,25 +2125,11 @@ release:
2125 if (ret_val) 2125 if (ret_val)
2126 return -ENODATA; 2126 return -ENODATA;
2127 2127
2128 e1e_rphy(hw, I82579_LPI_CTRL, &lpi_ctrl);
2129 status = er32(STATUS);
2130
2131 /* Result of the EEE auto negotiation - there is no register that 2128 /* Result of the EEE auto negotiation - there is no register that
2132 * has the status of the EEE negotiation so do a best-guess based 2129 * has the status of the EEE negotiation so do a best-guess based
2133 * on whether both Tx and Rx LPI indications have been received or 2130 * on whether Tx or Rx LPI indications have been received.
2134 * base it on the link speed, the EEE advertised speeds on both ends
2135 * and the speeds on which EEE is enabled locally.
2136 */ 2131 */
2137 if (((phy_data & E1000_EEE_TX_LPI_RCVD) && 2132 if (phy_data & (E1000_EEE_TX_LPI_RCVD | E1000_EEE_RX_LPI_RCVD))
2138 (phy_data & E1000_EEE_RX_LPI_RCVD)) ||
2139 ((status & E1000_STATUS_SPEED_100) &&
2140 (edata->advertised & ADVERTISED_100baseT_Full) &&
2141 (edata->lp_advertised & ADVERTISED_100baseT_Full) &&
2142 (lpi_ctrl & I82579_LPI_CTRL_100_ENABLE)) ||
2143 ((status & E1000_STATUS_SPEED_1000) &&
2144 (edata->advertised & ADVERTISED_1000baseT_Full) &&
2145 (edata->lp_advertised & ADVERTISED_1000baseT_Full) &&
2146 (lpi_ctrl & I82579_LPI_CTRL_1000_ENABLE)))
2147 edata->eee_active = true; 2133 edata->eee_active = true;
2148 2134
2149 edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable; 2135 edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable;
@@ -2160,19 +2146,10 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
2160 struct ethtool_eee eee_curr; 2146 struct ethtool_eee eee_curr;
2161 s32 ret_val; 2147 s32 ret_val;
2162 2148
2163 if (!(adapter->flags & FLAG_IS_ICH) ||
2164 !(adapter->flags2 & FLAG2_HAS_EEE))
2165 return -EOPNOTSUPP;
2166
2167 ret_val = e1000e_get_eee(netdev, &eee_curr); 2149 ret_val = e1000e_get_eee(netdev, &eee_curr);
2168 if (ret_val) 2150 if (ret_val)
2169 return ret_val; 2151 return ret_val;
2170 2152
2171 if (eee_curr.advertised != edata->advertised) {
2172 e_err("Setting EEE advertisement is not supported\n");
2173 return -EINVAL;
2174 }
2175
2176 if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { 2153 if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
2177 e_err("Setting EEE tx-lpi is not supported\n"); 2154 e_err("Setting EEE tx-lpi is not supported\n");
2178 return -EINVAL; 2155 return -EINVAL;
@@ -2183,16 +2160,21 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
2183 return -EINVAL; 2160 return -EINVAL;
2184 } 2161 }
2185 2162
2186 if (hw->dev_spec.ich8lan.eee_disable != !edata->eee_enabled) { 2163 if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
2187 hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; 2164 e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n");
2188 2165 return -EINVAL;
2189 /* reset the link */
2190 if (netif_running(netdev))
2191 e1000e_reinit_locked(adapter);
2192 else
2193 e1000e_reset(adapter);
2194 } 2166 }
2195 2167
2168 adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
2169
2170 hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
2171
2172 /* reset the link */
2173 if (netif_running(netdev))
2174 e1000e_reinit_locked(adapter);
2175 else
2176 e1000e_reset(adapter);
2177
2196 return 0; 2178 return 0;
2197} 2179}
2198 2180
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 1e6b889aee87..84850f7a23e4 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -167,7 +167,7 @@ enum e1000_1000t_rx_status {
167 e1000_1000t_rx_status_undefined = 0xFF 167 e1000_1000t_rx_status_undefined = 0xFF
168}; 168};
169 169
170enum e1000_rev_polarity{ 170enum e1000_rev_polarity {
171 e1000_rev_polarity_normal = 0, 171 e1000_rev_polarity_normal = 0,
172 e1000_rev_polarity_reversed, 172 e1000_rev_polarity_reversed,
173 e1000_rev_polarity_undefined = 0xFF 173 e1000_rev_polarity_undefined = 0xFF
@@ -545,7 +545,7 @@ struct e1000_mac_info {
545 u16 mta_reg_count; 545 u16 mta_reg_count;
546 546
547 /* Maximum size of the MTA register table in all supported adapters */ 547 /* Maximum size of the MTA register table in all supported adapters */
548 #define MAX_MTA_REG 128 548#define MAX_MTA_REG 128
549 u32 mta_shadow[MAX_MTA_REG]; 549 u32 mta_shadow[MAX_MTA_REG];
550 u16 rar_entry_count; 550 u16 rar_entry_count;
551 551
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 121a865c7fbd..ad9d8f2dd868 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -61,15 +61,15 @@
61/* Offset 04h HSFSTS */ 61/* Offset 04h HSFSTS */
62union ich8_hws_flash_status { 62union ich8_hws_flash_status {
63 struct ich8_hsfsts { 63 struct ich8_hsfsts {
64 u16 flcdone :1; /* bit 0 Flash Cycle Done */ 64 u16 flcdone:1; /* bit 0 Flash Cycle Done */
65 u16 flcerr :1; /* bit 1 Flash Cycle Error */ 65 u16 flcerr:1; /* bit 1 Flash Cycle Error */
66 u16 dael :1; /* bit 2 Direct Access error Log */ 66 u16 dael:1; /* bit 2 Direct Access error Log */
67 u16 berasesz :2; /* bit 4:3 Sector Erase Size */ 67 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
68 u16 flcinprog :1; /* bit 5 flash cycle in Progress */ 68 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
69 u16 reserved1 :2; /* bit 13:6 Reserved */ 69 u16 reserved1:2; /* bit 13:6 Reserved */
70 u16 reserved2 :6; /* bit 13:6 Reserved */ 70 u16 reserved2:6; /* bit 13:6 Reserved */
71 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ 71 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
72 u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ 72 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
73 } hsf_status; 73 } hsf_status;
74 u16 regval; 74 u16 regval;
75}; 75};
@@ -78,11 +78,11 @@ union ich8_hws_flash_status {
78/* Offset 06h FLCTL */ 78/* Offset 06h FLCTL */
79union ich8_hws_flash_ctrl { 79union ich8_hws_flash_ctrl {
80 struct ich8_hsflctl { 80 struct ich8_hsflctl {
81 u16 flcgo :1; /* 0 Flash Cycle Go */ 81 u16 flcgo:1; /* 0 Flash Cycle Go */
82 u16 flcycle :2; /* 2:1 Flash Cycle */ 82 u16 flcycle:2; /* 2:1 Flash Cycle */
83 u16 reserved :5; /* 7:3 Reserved */ 83 u16 reserved:5; /* 7:3 Reserved */
84 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ 84 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
85 u16 flockdn :6; /* 15:10 Reserved */ 85 u16 flockdn:6; /* 15:10 Reserved */
86 } hsf_ctrl; 86 } hsf_ctrl;
87 u16 regval; 87 u16 regval;
88}; 88};
@@ -90,10 +90,10 @@ union ich8_hws_flash_ctrl {
90/* ICH Flash Region Access Permissions */ 90/* ICH Flash Region Access Permissions */
91union ich8_hws_flash_regacc { 91union ich8_hws_flash_regacc {
92 struct ich8_flracc { 92 struct ich8_flracc {
93 u32 grra :8; /* 0:7 GbE region Read Access */ 93 u32 grra:8; /* 0:7 GbE region Read Access */
94 u32 grwa :8; /* 8:15 GbE region Write Access */ 94 u32 grwa:8; /* 8:15 GbE region Write Access */
95 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ 95 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
96 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ 96 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
97 } hsf_flregacc; 97 } hsf_flregacc;
98 u16 regval; 98 u16 regval;
99}; 99};
@@ -142,6 +142,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
142static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); 142static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
143static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 143static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
144static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 144static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
145static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
145 146
146static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 147static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
147{ 148{
@@ -312,7 +313,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
312 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; 313 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
313 ew32(CTRL, mac_reg); 314 ew32(CTRL, mac_reg);
314 e1e_flush(); 315 e1e_flush();
315 udelay(10); 316 usleep_range(10, 20);
316 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 317 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
317 ew32(CTRL, mac_reg); 318 ew32(CTRL, mac_reg);
318 e1e_flush(); 319 e1e_flush();
@@ -548,8 +549,8 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
548 /* find total size of the NVM, then cut in half since the total 549 /* find total size of the NVM, then cut in half since the total
549 * size represents two separate NVM banks. 550 * size represents two separate NVM banks.
550 */ 551 */
551 nvm->flash_bank_size = (sector_end_addr - sector_base_addr) 552 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
552 << FLASH_SECTOR_ADDR_SHIFT; 553 << FLASH_SECTOR_ADDR_SHIFT);
553 nvm->flash_bank_size /= 2; 554 nvm->flash_bank_size /= 2;
554 /* Adjust to word count */ 555 /* Adjust to word count */
555 nvm->flash_bank_size /= sizeof(u16); 556 nvm->flash_bank_size /= sizeof(u16);
@@ -636,6 +637,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
636 if (mac->type == e1000_pch_lpt) { 637 if (mac->type == e1000_pch_lpt) {
637 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; 638 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
638 mac->ops.rar_set = e1000_rar_set_pch_lpt; 639 mac->ops.rar_set = e1000_rar_set_pch_lpt;
640 mac->ops.setup_physical_interface =
641 e1000_setup_copper_link_pch_lpt;
639 } 642 }
640 643
641 /* Enable PCS Lock-loss workaround for ICH8 */ 644 /* Enable PCS Lock-loss workaround for ICH8 */
@@ -692,7 +695,7 @@ s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
692 * 695 *
693 * Assumes the SW/FW/HW Semaphore is already acquired. 696 * Assumes the SW/FW/HW Semaphore is already acquired.
694 **/ 697 **/
695static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) 698s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
696{ 699{
697 return __e1000_access_emi_reg_locked(hw, addr, &data, false); 700 return __e1000_access_emi_reg_locked(hw, addr, &data, false);
698} 701}
@@ -709,11 +712,22 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
709{ 712{
710 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 713 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
711 s32 ret_val; 714 s32 ret_val;
712 u16 lpi_ctrl; 715 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
713 716
714 if ((hw->phy.type != e1000_phy_82579) && 717 switch (hw->phy.type) {
715 (hw->phy.type != e1000_phy_i217)) 718 case e1000_phy_82579:
719 lpa = I82579_EEE_LP_ABILITY;
720 pcs_status = I82579_EEE_PCS_STATUS;
721 adv_addr = I82579_EEE_ADVERTISEMENT;
722 break;
723 case e1000_phy_i217:
724 lpa = I217_EEE_LP_ABILITY;
725 pcs_status = I217_EEE_PCS_STATUS;
726 adv_addr = I217_EEE_ADVERTISEMENT;
727 break;
728 default:
716 return 0; 729 return 0;
730 }
717 731
718 ret_val = hw->phy.ops.acquire(hw); 732 ret_val = hw->phy.ops.acquire(hw);
719 if (ret_val) 733 if (ret_val)
@@ -728,34 +742,24 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
728 742
729 /* Enable EEE if not disabled by user */ 743 /* Enable EEE if not disabled by user */
730 if (!dev_spec->eee_disable) { 744 if (!dev_spec->eee_disable) {
731 u16 lpa, pcs_status, data;
732
733 /* Save off link partner's EEE ability */ 745 /* Save off link partner's EEE ability */
734 switch (hw->phy.type) {
735 case e1000_phy_82579:
736 lpa = I82579_EEE_LP_ABILITY;
737 pcs_status = I82579_EEE_PCS_STATUS;
738 break;
739 case e1000_phy_i217:
740 lpa = I217_EEE_LP_ABILITY;
741 pcs_status = I217_EEE_PCS_STATUS;
742 break;
743 default:
744 ret_val = -E1000_ERR_PHY;
745 goto release;
746 }
747 ret_val = e1000_read_emi_reg_locked(hw, lpa, 746 ret_val = e1000_read_emi_reg_locked(hw, lpa,
748 &dev_spec->eee_lp_ability); 747 &dev_spec->eee_lp_ability);
749 if (ret_val) 748 if (ret_val)
750 goto release; 749 goto release;
751 750
751 /* Read EEE advertisement */
752 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
753 if (ret_val)
754 goto release;
755
752 /* Enable EEE only for speeds in which the link partner is 756 /* Enable EEE only for speeds in which the link partner is
753 * EEE capable. 757 * EEE capable and for which we advertise EEE.
754 */ 758 */
755 if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) 759 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
756 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; 760 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
757 761
758 if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { 762 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
759 e1e_rphy_locked(hw, MII_LPA, &data); 763 e1e_rphy_locked(hw, MII_LPA, &data);
760 if (data & LPA_100FULL) 764 if (data & LPA_100FULL)
761 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; 765 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
@@ -767,13 +771,13 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
767 dev_spec->eee_lp_ability &= 771 dev_spec->eee_lp_ability &=
768 ~I82579_EEE_100_SUPPORTED; 772 ~I82579_EEE_100_SUPPORTED;
769 } 773 }
770
771 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
772 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
773 if (ret_val)
774 goto release;
775 } 774 }
776 775
776 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
777 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
778 if (ret_val)
779 goto release;
780
777 ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl); 781 ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
778release: 782release:
779 hw->phy.ops.release(hw); 783 hw->phy.ops.release(hw);
@@ -835,6 +839,94 @@ release:
835} 839}
836 840
837/** 841/**
842 * e1000_platform_pm_pch_lpt - Set platform power management values
843 * @hw: pointer to the HW structure
844 * @link: bool indicating link status
845 *
846 * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
847 * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
848 * when link is up (which must not exceed the maximum latency supported
849 * by the platform), otherwise specify there is no LTR requirement.
850 * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop
851 * latencies in the LTR Extended Capability Structure in the PCIe Extended
852 * Capability register set, on this device LTR is set by writing the
853 * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
854 * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
855 * message to the PMC.
856 **/
857static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
858{
859 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
860 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
861 u16 lat_enc = 0; /* latency encoded */
862
863 if (link) {
864 u16 speed, duplex, scale = 0;
865 u16 max_snoop, max_nosnoop;
866 u16 max_ltr_enc; /* max LTR latency encoded */
867 s64 lat_ns; /* latency (ns) */
868 s64 value;
869 u32 rxa;
870
871 if (!hw->adapter->max_frame_size) {
872 e_dbg("max_frame_size not set.\n");
873 return -E1000_ERR_CONFIG;
874 }
875
876 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
877 if (!speed) {
878 e_dbg("Speed not set.\n");
879 return -E1000_ERR_CONFIG;
880 }
881
882 /* Rx Packet Buffer Allocation size (KB) */
883 rxa = er32(PBA) & E1000_PBA_RXA_MASK;
884
885 /* Determine the maximum latency tolerated by the device.
886 *
887 * Per the PCIe spec, the tolerated latencies are encoded as
888 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
889 * a 10-bit value (0-1023) to provide a range from 1 ns to
890 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
891 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
892 */
893 lat_ns = ((s64)rxa * 1024 -
894 (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000;
895 if (lat_ns < 0)
896 lat_ns = 0;
897 else
898 do_div(lat_ns, speed);
899
900 value = lat_ns;
901 while (value > PCI_LTR_VALUE_MASK) {
902 scale++;
903 value = DIV_ROUND_UP(value, (1 << 5));
904 }
905 if (scale > E1000_LTRV_SCALE_MAX) {
906 e_dbg("Invalid LTR latency scale %d\n", scale);
907 return -E1000_ERR_CONFIG;
908 }
909 lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value);
910
911 /* Determine the maximum latency tolerated by the platform */
912 pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT,
913 &max_snoop);
914 pci_read_config_word(hw->adapter->pdev,
915 E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
916 max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
917
918 if (lat_enc > max_ltr_enc)
919 lat_enc = max_ltr_enc;
920 }
921
922 /* Set Snoop and No-Snoop latencies the same */
923 reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
924 ew32(LTRV, reg);
925
926 return 0;
927}
928
929/**
838 * e1000_check_for_copper_link_ich8lan - Check for link (Copper) 930 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
839 * @hw: pointer to the HW structure 931 * @hw: pointer to the HW structure
840 * 932 *
@@ -871,6 +963,34 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
871 return ret_val; 963 return ret_val;
872 } 964 }
873 965
966 /* When connected at 10Mbps half-duplex, 82579 parts are excessively
967 * aggressive resulting in many collisions. To avoid this, increase
968 * the IPG and reduce Rx latency in the PHY.
969 */
970 if ((hw->mac.type == e1000_pch2lan) && link) {
971 u32 reg;
972 reg = er32(STATUS);
973 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
974 reg = er32(TIPG);
975 reg &= ~E1000_TIPG_IPGT_MASK;
976 reg |= 0xFF;
977 ew32(TIPG, reg);
978
979 /* Reduce Rx latency in analog PHY */
980 ret_val = hw->phy.ops.acquire(hw);
981 if (ret_val)
982 return ret_val;
983
984 ret_val =
985 e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
986
987 hw->phy.ops.release(hw);
988
989 if (ret_val)
990 return ret_val;
991 }
992 }
993
874 /* Work-around I218 hang issue */ 994 /* Work-around I218 hang issue */
875 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 995 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
876 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) { 996 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
@@ -879,6 +999,15 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
879 return ret_val; 999 return ret_val;
880 } 1000 }
881 1001
1002 if (hw->mac.type == e1000_pch_lpt) {
1003 /* Set platform power management values for
1004 * Latency Tolerance Reporting (LTR)
1005 */
1006 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1007 if (ret_val)
1008 return ret_val;
1009 }
1010
882 /* Clear link partner's EEE ability */ 1011 /* Clear link partner's EEE ability */
883 hw->dev_spec.ich8lan.eee_lp_ability = 0; 1012 hw->dev_spec.ich8lan.eee_lp_ability = 0;
884 1013
@@ -1002,10 +1131,6 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
1002 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 1131 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1003 adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; 1132 adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
1004 1133
1005 /* Disable EEE by default until IEEE802.3az spec is finalized */
1006 if (adapter->flags2 & FLAG2_HAS_EEE)
1007 adapter->hw.dev_spec.ich8lan.eee_disable = true;
1008
1009 return 0; 1134 return 0;
1010} 1135}
1011 1136
@@ -1134,9 +1259,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1134 u32 fwsm; 1259 u32 fwsm;
1135 1260
1136 fwsm = er32(FWSM); 1261 fwsm = er32(FWSM);
1137 return (fwsm & E1000_ICH_FWSM_FW_VALID) && 1262 return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
1138 ((fwsm & E1000_FWSM_MODE_MASK) == 1263 ((fwsm & E1000_FWSM_MODE_MASK) ==
1139 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); 1264 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
1140} 1265}
1141 1266
1142/** 1267/**
@@ -1153,7 +1278,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1153 1278
1154 fwsm = er32(FWSM); 1279 fwsm = er32(FWSM);
1155 return (fwsm & E1000_ICH_FWSM_FW_VALID) && 1280 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1156 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); 1281 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1157} 1282}
1158 1283
1159/** 1284/**
@@ -1440,8 +1565,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1440 word_addr = (u16)(cnf_base_addr << 1); 1565 word_addr = (u16)(cnf_base_addr << 1);
1441 1566
1442 for (i = 0; i < cnf_size; i++) { 1567 for (i = 0; i < cnf_size; i++) {
1443 ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, 1568 ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, &reg_data);
1444 &reg_data);
1445 if (ret_val) 1569 if (ret_val)
1446 goto release; 1570 goto release;
1447 1571
@@ -1501,13 +1625,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1501 if (ret_val) 1625 if (ret_val)
1502 goto release; 1626 goto release;
1503 1627
1504 status_reg &= BM_CS_STATUS_LINK_UP | 1628 status_reg &= (BM_CS_STATUS_LINK_UP |
1505 BM_CS_STATUS_RESOLVED | 1629 BM_CS_STATUS_RESOLVED |
1506 BM_CS_STATUS_SPEED_MASK; 1630 BM_CS_STATUS_SPEED_MASK);
1507 1631
1508 if (status_reg == (BM_CS_STATUS_LINK_UP | 1632 if (status_reg == (BM_CS_STATUS_LINK_UP |
1509 BM_CS_STATUS_RESOLVED | 1633 BM_CS_STATUS_RESOLVED |
1510 BM_CS_STATUS_SPEED_1000)) 1634 BM_CS_STATUS_SPEED_1000))
1511 k1_enable = false; 1635 k1_enable = false;
1512 } 1636 }
1513 1637
@@ -1516,13 +1640,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1516 if (ret_val) 1640 if (ret_val)
1517 goto release; 1641 goto release;
1518 1642
1519 status_reg &= HV_M_STATUS_LINK_UP | 1643 status_reg &= (HV_M_STATUS_LINK_UP |
1520 HV_M_STATUS_AUTONEG_COMPLETE | 1644 HV_M_STATUS_AUTONEG_COMPLETE |
1521 HV_M_STATUS_SPEED_MASK; 1645 HV_M_STATUS_SPEED_MASK);
1522 1646
1523 if (status_reg == (HV_M_STATUS_LINK_UP | 1647 if (status_reg == (HV_M_STATUS_LINK_UP |
1524 HV_M_STATUS_AUTONEG_COMPLETE | 1648 HV_M_STATUS_AUTONEG_COMPLETE |
1525 HV_M_STATUS_SPEED_1000)) 1649 HV_M_STATUS_SPEED_1000))
1526 k1_enable = false; 1650 k1_enable = false;
1527 } 1651 }
1528 1652
@@ -1579,7 +1703,7 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1579 if (ret_val) 1703 if (ret_val)
1580 return ret_val; 1704 return ret_val;
1581 1705
1582 udelay(20); 1706 usleep_range(20, 40);
1583 ctrl_ext = er32(CTRL_EXT); 1707 ctrl_ext = er32(CTRL_EXT);
1584 ctrl_reg = er32(CTRL); 1708 ctrl_reg = er32(CTRL);
1585 1709
@@ -1589,11 +1713,11 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1589 1713
1590 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); 1714 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1591 e1e_flush(); 1715 e1e_flush();
1592 udelay(20); 1716 usleep_range(20, 40);
1593 ew32(CTRL, ctrl_reg); 1717 ew32(CTRL, ctrl_reg);
1594 ew32(CTRL_EXT, ctrl_ext); 1718 ew32(CTRL_EXT, ctrl_ext);
1595 e1e_flush(); 1719 e1e_flush();
1596 udelay(20); 1720 usleep_range(20, 40);
1597 1721
1598 return 0; 1722 return 0;
1599} 1723}
@@ -1667,7 +1791,6 @@ release:
1667 return ret_val; 1791 return ret_val;
1668} 1792}
1669 1793
1670
1671/** 1794/**
1672 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode 1795 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1673 * @hw: pointer to the HW structure 1796 * @hw: pointer to the HW structure
@@ -1834,7 +1957,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1834 * SHRAL/H) and initial CRC values to the MAC 1957 * SHRAL/H) and initial CRC values to the MAC
1835 */ 1958 */
1836 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { 1959 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1837 u8 mac_addr[ETH_ALEN] = {0}; 1960 u8 mac_addr[ETH_ALEN] = { 0 };
1838 u32 addr_high, addr_low; 1961 u32 addr_high, addr_low;
1839 1962
1840 addr_high = er32(RAH(i)); 1963 addr_high = er32(RAH(i));
@@ -1865,8 +1988,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1865 ew32(RCTL, mac_reg); 1988 ew32(RCTL, mac_reg);
1866 1989
1867 ret_val = e1000e_read_kmrn_reg(hw, 1990 ret_val = e1000e_read_kmrn_reg(hw,
1868 E1000_KMRNCTRLSTA_CTRL_OFFSET, 1991 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1869 &data); 1992 &data);
1870 if (ret_val) 1993 if (ret_val)
1871 return ret_val; 1994 return ret_val;
1872 ret_val = e1000e_write_kmrn_reg(hw, 1995 ret_val = e1000e_write_kmrn_reg(hw,
@@ -1875,8 +1998,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1875 if (ret_val) 1998 if (ret_val)
1876 return ret_val; 1999 return ret_val;
1877 ret_val = e1000e_read_kmrn_reg(hw, 2000 ret_val = e1000e_read_kmrn_reg(hw,
1878 E1000_KMRNCTRLSTA_HD_CTRL, 2001 E1000_KMRNCTRLSTA_HD_CTRL,
1879 &data); 2002 &data);
1880 if (ret_val) 2003 if (ret_val)
1881 return ret_val; 2004 return ret_val;
1882 data &= ~(0xF << 8); 2005 data &= ~(0xF << 8);
@@ -1923,8 +2046,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1923 ew32(RCTL, mac_reg); 2046 ew32(RCTL, mac_reg);
1924 2047
1925 ret_val = e1000e_read_kmrn_reg(hw, 2048 ret_val = e1000e_read_kmrn_reg(hw,
1926 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2049 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1927 &data); 2050 &data);
1928 if (ret_val) 2051 if (ret_val)
1929 return ret_val; 2052 return ret_val;
1930 ret_val = e1000e_write_kmrn_reg(hw, 2053 ret_val = e1000e_write_kmrn_reg(hw,
@@ -1933,8 +2056,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1933 if (ret_val) 2056 if (ret_val)
1934 return ret_val; 2057 return ret_val;
1935 ret_val = e1000e_read_kmrn_reg(hw, 2058 ret_val = e1000e_read_kmrn_reg(hw,
1936 E1000_KMRNCTRLSTA_HD_CTRL, 2059 E1000_KMRNCTRLSTA_HD_CTRL,
1937 &data); 2060 &data);
1938 if (ret_val) 2061 if (ret_val)
1939 return ret_val; 2062 return ret_val;
1940 data &= ~(0xF << 8); 2063 data &= ~(0xF << 8);
@@ -2100,7 +2223,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2100 do { 2223 do {
2101 data = er32(STATUS); 2224 data = er32(STATUS);
2102 data &= E1000_STATUS_LAN_INIT_DONE; 2225 data &= E1000_STATUS_LAN_INIT_DONE;
2103 udelay(100); 2226 usleep_range(100, 200);
2104 } while ((!data) && --loop); 2227 } while ((!data) && --loop);
2105 2228
2106 /* If basic configuration is incomplete before the above loop 2229 /* If basic configuration is incomplete before the above loop
@@ -2445,7 +2568,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2445 2568
2446 /* Check bank 0 */ 2569 /* Check bank 0 */
2447 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, 2570 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2448 &sig_byte); 2571 &sig_byte);
2449 if (ret_val) 2572 if (ret_val)
2450 return ret_val; 2573 return ret_val;
2451 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 2574 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
@@ -2456,8 +2579,8 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2456 2579
2457 /* Check bank 1 */ 2580 /* Check bank 1 */
2458 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + 2581 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2459 bank1_offset, 2582 bank1_offset,
2460 &sig_byte); 2583 &sig_byte);
2461 if (ret_val) 2584 if (ret_val)
2462 return ret_val; 2585 return ret_val;
2463 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 2586 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
@@ -2510,8 +2633,8 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2510 2633
2511 ret_val = 0; 2634 ret_val = 0;
2512 for (i = 0; i < words; i++) { 2635 for (i = 0; i < words; i++) {
2513 if (dev_spec->shadow_ram[offset+i].modified) { 2636 if (dev_spec->shadow_ram[offset + i].modified) {
2514 data[i] = dev_spec->shadow_ram[offset+i].value; 2637 data[i] = dev_spec->shadow_ram[offset + i].value;
2515 } else { 2638 } else {
2516 ret_val = e1000_read_flash_word_ich8lan(hw, 2639 ret_val = e1000_read_flash_word_ich8lan(hw,
2517 act_offset + i, 2640 act_offset + i,
@@ -2696,8 +2819,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2696 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 2819 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2697 return -E1000_ERR_NVM; 2820 return -E1000_ERR_NVM;
2698 2821
2699 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + 2822 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2700 hw->nvm.flash_base_addr; 2823 hw->nvm.flash_base_addr);
2701 2824
2702 do { 2825 do {
2703 udelay(1); 2826 udelay(1);
@@ -2714,8 +2837,9 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2714 2837
2715 ew32flash(ICH_FLASH_FADDR, flash_linear_addr); 2838 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
2716 2839
2717 ret_val = e1000_flash_cycle_ich8lan(hw, 2840 ret_val =
2718 ICH_FLASH_READ_COMMAND_TIMEOUT); 2841 e1000_flash_cycle_ich8lan(hw,
2842 ICH_FLASH_READ_COMMAND_TIMEOUT);
2719 2843
2720 /* Check if FCERR is set to 1, if set to 1, clear it 2844 /* Check if FCERR is set to 1, if set to 1, clear it
2721 * and try the whole sequence a few more times, else 2845 * and try the whole sequence a few more times, else
@@ -2774,8 +2898,8 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2774 nvm->ops.acquire(hw); 2898 nvm->ops.acquire(hw);
2775 2899
2776 for (i = 0; i < words; i++) { 2900 for (i = 0; i < words; i++) {
2777 dev_spec->shadow_ram[offset+i].modified = true; 2901 dev_spec->shadow_ram[offset + i].modified = true;
2778 dev_spec->shadow_ram[offset+i].value = data[i]; 2902 dev_spec->shadow_ram[offset + i].value = data[i];
2779 } 2903 }
2780 2904
2781 nvm->ops.release(hw); 2905 nvm->ops.release(hw);
@@ -2844,8 +2968,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2844 data = dev_spec->shadow_ram[i].value; 2968 data = dev_spec->shadow_ram[i].value;
2845 } else { 2969 } else {
2846 ret_val = e1000_read_flash_word_ich8lan(hw, i + 2970 ret_val = e1000_read_flash_word_ich8lan(hw, i +
2847 old_bank_offset, 2971 old_bank_offset,
2848 &data); 2972 &data);
2849 if (ret_val) 2973 if (ret_val)
2850 break; 2974 break;
2851 } 2975 }
@@ -2863,7 +2987,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2863 /* Convert offset to bytes. */ 2987 /* Convert offset to bytes. */
2864 act_offset = (i + new_bank_offset) << 1; 2988 act_offset = (i + new_bank_offset) << 1;
2865 2989
2866 udelay(100); 2990 usleep_range(100, 200);
2867 /* Write the bytes to the new bank. */ 2991 /* Write the bytes to the new bank. */
2868 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 2992 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2869 act_offset, 2993 act_offset,
@@ -2871,10 +2995,10 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2871 if (ret_val) 2995 if (ret_val)
2872 break; 2996 break;
2873 2997
2874 udelay(100); 2998 usleep_range(100, 200);
2875 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 2999 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2876 act_offset + 1, 3000 act_offset + 1,
2877 (u8)(data >> 8)); 3001 (u8)(data >> 8));
2878 if (ret_val) 3002 if (ret_val)
2879 break; 3003 break;
2880 } 3004 }
@@ -3050,8 +3174,8 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3050 offset > ICH_FLASH_LINEAR_ADDR_MASK) 3174 offset > ICH_FLASH_LINEAR_ADDR_MASK)
3051 return -E1000_ERR_NVM; 3175 return -E1000_ERR_NVM;
3052 3176
3053 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + 3177 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3054 hw->nvm.flash_base_addr; 3178 hw->nvm.flash_base_addr);
3055 3179
3056 do { 3180 do {
3057 udelay(1); 3181 udelay(1);
@@ -3062,7 +3186,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3062 3186
3063 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 3187 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3064 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 3188 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3065 hsflctl.hsf_ctrl.fldbcount = size -1; 3189 hsflctl.hsf_ctrl.fldbcount = size - 1;
3066 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; 3190 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3067 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 3191 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3068 3192
@@ -3078,8 +3202,9 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3078 /* check if FCERR is set to 1 , if set to 1, clear it 3202 /* check if FCERR is set to 1 , if set to 1, clear it
3079 * and try the whole sequence a few more times else done 3203 * and try the whole sequence a few more times else done
3080 */ 3204 */
3081 ret_val = e1000_flash_cycle_ich8lan(hw, 3205 ret_val =
3082 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 3206 e1000_flash_cycle_ich8lan(hw,
3207 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3083 if (!ret_val) 3208 if (!ret_val)
3084 break; 3209 break;
3085 3210
@@ -3138,7 +3263,7 @@ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3138 3263
3139 for (program_retries = 0; program_retries < 100; program_retries++) { 3264 for (program_retries = 0; program_retries < 100; program_retries++) {
3140 e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); 3265 e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
3141 udelay(100); 3266 usleep_range(100, 200);
3142 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 3267 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3143 if (!ret_val) 3268 if (!ret_val)
3144 break; 3269 break;
@@ -3209,8 +3334,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3209 flash_linear_addr = hw->nvm.flash_base_addr; 3334 flash_linear_addr = hw->nvm.flash_base_addr;
3210 flash_linear_addr += (bank) ? flash_bank_size : 0; 3335 flash_linear_addr += (bank) ? flash_bank_size : 0;
3211 3336
3212 for (j = 0; j < iteration ; j++) { 3337 for (j = 0; j < iteration; j++) {
3213 do { 3338 do {
3339 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3340
3214 /* Steps */ 3341 /* Steps */
3215 ret_val = e1000_flash_cycle_init_ich8lan(hw); 3342 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3216 if (ret_val) 3343 if (ret_val)
@@ -3230,8 +3357,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3230 flash_linear_addr += (j * sector_size); 3357 flash_linear_addr += (j * sector_size);
3231 ew32flash(ICH_FLASH_FADDR, flash_linear_addr); 3358 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3232 3359
3233 ret_val = e1000_flash_cycle_ich8lan(hw, 3360 ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
3234 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3235 if (!ret_val) 3361 if (!ret_val)
3236 break; 3362 break;
3237 3363
@@ -3270,8 +3396,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3270 return ret_val; 3396 return ret_val;
3271 } 3397 }
3272 3398
3273 if (*data == ID_LED_RESERVED_0000 || 3399 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3274 *data == ID_LED_RESERVED_FFFF)
3275 *data = ID_LED_DEFAULT_ICH8LAN; 3400 *data = ID_LED_DEFAULT_ICH8LAN;
3276 3401
3277 return 0; 3402 return 0;
@@ -3511,9 +3636,9 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3511 3636
3512 /* Initialize identification LED */ 3637 /* Initialize identification LED */
3513 ret_val = mac->ops.id_led_init(hw); 3638 ret_val = mac->ops.id_led_init(hw);
3639 /* An error is not fatal and we should not stop init due to this */
3514 if (ret_val) 3640 if (ret_val)
3515 e_dbg("Error initializing identification LED\n"); 3641 e_dbg("Error initializing identification LED\n");
3516 /* This is not fatal and we should not stop init due to this */
3517 3642
3518 /* Setup the receive address. */ 3643 /* Setup the receive address. */
3519 e1000e_init_rx_addrs(hw, mac->rar_entry_count); 3644 e1000e_init_rx_addrs(hw, mac->rar_entry_count);
@@ -3541,16 +3666,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3541 3666
3542 /* Set the transmit descriptor write-back policy for both queues */ 3667 /* Set the transmit descriptor write-back policy for both queues */
3543 txdctl = er32(TXDCTL(0)); 3668 txdctl = er32(TXDCTL(0));
3544 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | 3669 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
3545 E1000_TXDCTL_FULL_TX_DESC_WB; 3670 E1000_TXDCTL_FULL_TX_DESC_WB);
3546 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | 3671 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
3547 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 3672 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
3548 ew32(TXDCTL(0), txdctl); 3673 ew32(TXDCTL(0), txdctl);
3549 txdctl = er32(TXDCTL(1)); 3674 txdctl = er32(TXDCTL(1));
3550 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | 3675 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
3551 E1000_TXDCTL_FULL_TX_DESC_WB; 3676 E1000_TXDCTL_FULL_TX_DESC_WB);
3552 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | 3677 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
3553 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 3678 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
3554 ew32(TXDCTL(1), txdctl); 3679 ew32(TXDCTL(1), txdctl);
3555 3680
3556 /* ICH8 has opposite polarity of no_snoop bits. 3681 /* ICH8 has opposite polarity of no_snoop bits.
@@ -3559,7 +3684,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3559 if (mac->type == e1000_ich8lan) 3684 if (mac->type == e1000_ich8lan)
3560 snoop = PCIE_ICH8_SNOOP_ALL; 3685 snoop = PCIE_ICH8_SNOOP_ALL;
3561 else 3686 else
3562 snoop = (u32) ~(PCIE_NO_SNOOP_ALL); 3687 snoop = (u32)~(PCIE_NO_SNOOP_ALL);
3563 e1000e_set_pcie_no_snoop(hw, snoop); 3688 e1000e_set_pcie_no_snoop(hw, snoop);
3564 3689
3565 ctrl_ext = er32(CTRL_EXT); 3690 ctrl_ext = er32(CTRL_EXT);
@@ -3575,6 +3700,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3575 3700
3576 return ret_val; 3701 return ret_val;
3577} 3702}
3703
3578/** 3704/**
3579 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits 3705 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3580 * @hw: pointer to the HW structure 3706 * @hw: pointer to the HW structure
@@ -3686,8 +3812,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3686 */ 3812 */
3687 hw->fc.current_mode = hw->fc.requested_mode; 3813 hw->fc.current_mode = hw->fc.requested_mode;
3688 3814
3689 e_dbg("After fix-ups FlowControl is now = %x\n", 3815 e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
3690 hw->fc.current_mode);
3691 3816
3692 /* Continue to configure the copper link. */ 3817 /* Continue to configure the copper link. */
3693 ret_val = hw->mac.ops.setup_physical_interface(hw); 3818 ret_val = hw->mac.ops.setup_physical_interface(hw);
@@ -3737,12 +3862,12 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3737 if (ret_val) 3862 if (ret_val)
3738 return ret_val; 3863 return ret_val;
3739 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 3864 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3740 &reg_data); 3865 &reg_data);
3741 if (ret_val) 3866 if (ret_val)
3742 return ret_val; 3867 return ret_val;
3743 reg_data |= 0x3F; 3868 reg_data |= 0x3F;
3744 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 3869 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3745 reg_data); 3870 reg_data);
3746 if (ret_val) 3871 if (ret_val)
3747 return ret_val; 3872 return ret_val;
3748 3873
@@ -3760,7 +3885,6 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3760 break; 3885 break;
3761 case e1000_phy_82577: 3886 case e1000_phy_82577:
3762 case e1000_phy_82579: 3887 case e1000_phy_82579:
3763 case e1000_phy_i217:
3764 ret_val = e1000_copper_link_setup_82577(hw); 3888 ret_val = e1000_copper_link_setup_82577(hw);
3765 if (ret_val) 3889 if (ret_val)
3766 return ret_val; 3890 return ret_val;
@@ -3796,6 +3920,31 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3796} 3920}
3797 3921
3798/** 3922/**
3923 * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
3924 * @hw: pointer to the HW structure
3925 *
3926 * Calls the PHY specific link setup function and then calls the
3927 * generic setup_copper_link to finish configuring the link for
3928 * Lynxpoint PCH devices
3929 **/
3930static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
3931{
3932 u32 ctrl;
3933 s32 ret_val;
3934
3935 ctrl = er32(CTRL);
3936 ctrl |= E1000_CTRL_SLU;
3937 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3938 ew32(CTRL, ctrl);
3939
3940 ret_val = e1000_copper_link_setup_82577(hw);
3941 if (ret_val)
3942 return ret_val;
3943
3944 return e1000e_setup_copper_link(hw);
3945}
3946
3947/**
3799 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex 3948 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3800 * @hw: pointer to the HW structure 3949 * @hw: pointer to the HW structure
3801 * @speed: pointer to store current link speed 3950 * @speed: pointer to store current link speed
@@ -3815,8 +3964,7 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3815 return ret_val; 3964 return ret_val;
3816 3965
3817 if ((hw->mac.type == e1000_ich8lan) && 3966 if ((hw->mac.type == e1000_ich8lan) &&
3818 (hw->phy.type == e1000_phy_igp_3) && 3967 (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
3819 (*speed == SPEED_1000)) {
3820 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); 3968 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3821 } 3969 }
3822 3970
@@ -3899,7 +4047,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3899 * /disabled - false). 4047 * /disabled - false).
3900 **/ 4048 **/
3901void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, 4049void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3902 bool state) 4050 bool state)
3903{ 4051{
3904 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 4052 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3905 4053
@@ -3981,12 +4129,12 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3981 return; 4129 return;
3982 4130
3983 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 4131 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3984 &reg_data); 4132 &reg_data);
3985 if (ret_val) 4133 if (ret_val)
3986 return; 4134 return;
3987 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; 4135 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3988 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 4136 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3989 reg_data); 4137 reg_data);
3990 if (ret_val) 4138 if (ret_val)
3991 return; 4139 return;
3992 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; 4140 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 8bf4655c2e17..80034a2b297c 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -211,7 +211,8 @@
211#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */ 211#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */
212#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ 212#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
213#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ 213#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
214#define I82579_EEE_PCS_STATUS 0x182D /* IEEE MMD Register 3.1 >> 8 */ 214#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
215#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
215#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ 216#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
216#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ 217#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
217#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ 218#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
@@ -249,13 +250,6 @@
249/* Proprietary Latency Tolerance Reporting PCI Capability */ 250/* Proprietary Latency Tolerance Reporting PCI Capability */
250#define E1000_PCI_LTR_CAP_LPT 0xA8 251#define E1000_PCI_LTR_CAP_LPT 0xA8
251 252
252/* OBFF Control & Threshold Defines */
253#define E1000_SVCR_OFF_EN 0x00000001
254#define E1000_SVCR_OFF_MASKINT 0x00001000
255#define E1000_SVCR_OFF_TIMER_MASK 0xFFFF0000
256#define E1000_SVCR_OFF_TIMER_SHIFT 16
257#define E1000_SVT_OFF_HWM_MASK 0x0000001F
258
259void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); 253void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
260void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, 254void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
261 bool state); 255 bool state);
@@ -267,4 +261,5 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
267void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); 261void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
268s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); 262s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
269s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data); 263s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
264s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data);
270#endif /* _E1000E_ICH8LAN_H_ */ 265#endif /* _E1000E_ICH8LAN_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index b78e02174601..2480c1091873 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -596,7 +596,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
596 * serdes media type. 596 * serdes media type.
597 */ 597 */
598 /* SYNCH bit and IV bit are sticky. */ 598 /* SYNCH bit and IV bit are sticky. */
599 udelay(10); 599 usleep_range(10, 20);
600 rxcw = er32(RXCW); 600 rxcw = er32(RXCW);
601 if (rxcw & E1000_RXCW_SYNCH) { 601 if (rxcw & E1000_RXCW_SYNCH) {
602 if (!(rxcw & E1000_RXCW_IV)) { 602 if (!(rxcw & E1000_RXCW_IV)) {
@@ -613,7 +613,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
613 status = er32(STATUS); 613 status = er32(STATUS);
614 if (status & E1000_STATUS_LU) { 614 if (status & E1000_STATUS_LU) {
615 /* SYNCH bit and IV bit are sticky, so reread rxcw. */ 615 /* SYNCH bit and IV bit are sticky, so reread rxcw. */
616 udelay(10); 616 usleep_range(10, 20);
617 rxcw = er32(RXCW); 617 rxcw = er32(RXCW);
618 if (rxcw & E1000_RXCW_SYNCH) { 618 if (rxcw & E1000_RXCW_SYNCH) {
619 if (!(rxcw & E1000_RXCW_IV)) { 619 if (!(rxcw & E1000_RXCW_IV)) {
@@ -1382,7 +1382,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1382 if (!(swsm & E1000_SWSM_SMBI)) 1382 if (!(swsm & E1000_SWSM_SMBI))
1383 break; 1383 break;
1384 1384
1385 udelay(50); 1385 usleep_range(50, 100);
1386 i++; 1386 i++;
1387 } 1387 }
1388 1388
@@ -1400,7 +1400,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1400 if (er32(SWSM) & E1000_SWSM_SWESMBI) 1400 if (er32(SWSM) & E1000_SWSM_SWESMBI)
1401 break; 1401 break;
1402 1402
1403 udelay(50); 1403 usleep_range(50, 100);
1404 } 1404 }
1405 1405
1406 if (i == timeout) { 1406 if (i == timeout) {
@@ -1600,15 +1600,28 @@ s32 e1000e_blink_led_generic(struct e1000_hw *hw)
1600 ledctl_blink = E1000_LEDCTL_LED0_BLINK | 1600 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1601 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); 1601 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1602 } else { 1602 } else {
1603 /* set the blink bit for each LED that's "on" (0x0E) 1603 /* Set the blink bit for each LED that's "on" (0x0E)
1604 * in ledctl_mode2 1604 * (or "off" if inverted) in ledctl_mode2. The blink
1605 * logic in hardware only works when mode is set to "on"
1606 * so it must be changed accordingly when the mode is
1607 * "off" and inverted.
1605 */ 1608 */
1606 ledctl_blink = hw->mac.ledctl_mode2; 1609 ledctl_blink = hw->mac.ledctl_mode2;
1607 for (i = 0; i < 4; i++) 1610 for (i = 0; i < 32; i += 8) {
1608 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == 1611 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1609 E1000_LEDCTL_MODE_LED_ON) 1612 E1000_LEDCTL_LED0_MODE_MASK;
1610 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << 1613 u32 led_default = hw->mac.ledctl_default >> i;
1611 (i * 8)); 1614
1615 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1616 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1617 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1618 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1619 ledctl_blink &=
1620 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1621 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1622 E1000_LEDCTL_MODE_LED_ON) << i;
1623 }
1624 }
1612 } 1625 }
1613 1626
1614 ew32(LEDCTL, ledctl_blink); 1627 ew32(LEDCTL, ledctl_blink);
@@ -1712,7 +1725,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1712 while (timeout) { 1725 while (timeout) {
1713 if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) 1726 if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
1714 break; 1727 break;
1715 udelay(100); 1728 usleep_range(100, 200);
1716 timeout--; 1729 timeout--;
1717 } 1730 }
1718 1731
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 7e615e2bf7e6..a27e3bcc3249 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -55,7 +55,7 @@
55 55
56#define DRV_EXTRAVERSION "-k" 56#define DRV_EXTRAVERSION "-k"
57 57
58#define DRV_VERSION "2.2.14" DRV_EXTRAVERSION 58#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
59char e1000e_driver_name[] = "e1000e"; 59char e1000e_driver_name[] = "e1000e";
60const char e1000e_driver_version[] = DRV_VERSION; 60const char e1000e_driver_version[] = DRV_VERSION;
61 61
@@ -219,9 +219,8 @@ static void e1000e_dump(struct e1000_adapter *adapter)
219 if (netdev) { 219 if (netdev) {
220 dev_info(&adapter->pdev->dev, "Net device Info\n"); 220 dev_info(&adapter->pdev->dev, "Net device Info\n");
221 pr_info("Device Name state trans_start last_rx\n"); 221 pr_info("Device Name state trans_start last_rx\n");
222 pr_info("%-15s %016lX %016lX %016lX\n", 222 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
223 netdev->name, netdev->state, netdev->trans_start, 223 netdev->state, netdev->trans_start, netdev->last_rx);
224 netdev->last_rx);
225 } 224 }
226 225
227 /* Print Registers */ 226 /* Print Registers */
@@ -555,7 +554,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
555 skb->protocol = eth_type_trans(skb, netdev); 554 skb->protocol = eth_type_trans(skb, netdev);
556 555
557 if (staterr & E1000_RXD_STAT_VP) 556 if (staterr & E1000_RXD_STAT_VP)
558 __vlan_hwaccel_put_tag(skb, tag); 557 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
559 558
560 napi_gro_receive(&adapter->napi, skb); 559 napi_gro_receive(&adapter->napi, skb);
561} 560}
@@ -755,8 +754,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
755 cpu_to_le64(ps_page->dma); 754 cpu_to_le64(ps_page->dma);
756 } 755 }
757 756
758 skb = __netdev_alloc_skb_ip_align(netdev, 757 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
759 adapter->rx_ps_bsize0,
760 gfp); 758 gfp);
761 759
762 if (!skb) { 760 if (!skb) {
@@ -850,8 +848,8 @@ check_page:
850 848
851 if (!buffer_info->dma) { 849 if (!buffer_info->dma) {
852 buffer_info->dma = dma_map_page(&pdev->dev, 850 buffer_info->dma = dma_map_page(&pdev->dev,
853 buffer_info->page, 0, 851 buffer_info->page, 0,
854 PAGE_SIZE, 852 PAGE_SIZE,
855 DMA_FROM_DEVICE); 853 DMA_FROM_DEVICE);
856 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 854 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
857 adapter->alloc_rx_buff_failed++; 855 adapter->alloc_rx_buff_failed++;
@@ -942,10 +940,8 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
942 940
943 cleaned = true; 941 cleaned = true;
944 cleaned_count++; 942 cleaned_count++;
945 dma_unmap_single(&pdev->dev, 943 dma_unmap_single(&pdev->dev, buffer_info->dma,
946 buffer_info->dma, 944 adapter->rx_buffer_len, DMA_FROM_DEVICE);
947 adapter->rx_buffer_len,
948 DMA_FROM_DEVICE);
949 buffer_info->dma = 0; 945 buffer_info->dma = 0;
950 946
951 length = le16_to_cpu(rx_desc->wb.upper.length); 947 length = le16_to_cpu(rx_desc->wb.upper.length);
@@ -1073,8 +1069,8 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring,
1073static void e1000_print_hw_hang(struct work_struct *work) 1069static void e1000_print_hw_hang(struct work_struct *work)
1074{ 1070{
1075 struct e1000_adapter *adapter = container_of(work, 1071 struct e1000_adapter *adapter = container_of(work,
1076 struct e1000_adapter, 1072 struct e1000_adapter,
1077 print_hang_task); 1073 print_hang_task);
1078 struct net_device *netdev = adapter->netdev; 1074 struct net_device *netdev = adapter->netdev;
1079 struct e1000_ring *tx_ring = adapter->tx_ring; 1075 struct e1000_ring *tx_ring = adapter->tx_ring;
1080 unsigned int i = tx_ring->next_to_clean; 1076 unsigned int i = tx_ring->next_to_clean;
@@ -1087,8 +1083,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
1087 if (test_bit(__E1000_DOWN, &adapter->state)) 1083 if (test_bit(__E1000_DOWN, &adapter->state))
1088 return; 1084 return;
1089 1085
1090 if (!adapter->tx_hang_recheck && 1086 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
1091 (adapter->flags2 & FLAG2_DMA_BURST)) {
1092 /* May be block on write-back, flush and detect again 1087 /* May be block on write-back, flush and detect again
1093 * flush pending descriptor writebacks to memory 1088 * flush pending descriptor writebacks to memory
1094 */ 1089 */
@@ -1130,19 +1125,10 @@ static void e1000_print_hw_hang(struct work_struct *work)
1130 "PHY 1000BASE-T Status <%x>\n" 1125 "PHY 1000BASE-T Status <%x>\n"
1131 "PHY Extended Status <%x>\n" 1126 "PHY Extended Status <%x>\n"
1132 "PCI Status <%x>\n", 1127 "PCI Status <%x>\n",
1133 readl(tx_ring->head), 1128 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
1134 readl(tx_ring->tail), 1129 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
1135 tx_ring->next_to_use, 1130 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
1136 tx_ring->next_to_clean, 1131 phy_status, phy_1000t_status, phy_ext_status, pci_status);
1137 tx_ring->buffer_info[eop].time_stamp,
1138 eop,
1139 jiffies,
1140 eop_desc->upper.fields.status,
1141 er32(STATUS),
1142 phy_status,
1143 phy_1000t_status,
1144 phy_ext_status,
1145 pci_status);
1146 1132
1147 /* Suggest workaround for known h/w issue */ 1133 /* Suggest workaround for known h/w issue */
1148 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) 1134 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
@@ -1435,7 +1421,7 @@ copydone:
1435 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1421 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1436 1422
1437 if (rx_desc->wb.upper.header_status & 1423 if (rx_desc->wb.upper.header_status &
1438 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) 1424 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1439 adapter->rx_hdr_split++; 1425 adapter->rx_hdr_split++;
1440 1426
1441 e1000_receive_skb(adapter, netdev, skb, staterr, 1427 e1000_receive_skb(adapter, netdev, skb, staterr,
@@ -1473,7 +1459,7 @@ next_desc:
1473 * e1000_consume_page - helper function 1459 * e1000_consume_page - helper function
1474 **/ 1460 **/
1475static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, 1461static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1476 u16 length) 1462 u16 length)
1477{ 1463{
1478 bi->page = NULL; 1464 bi->page = NULL;
1479 skb->len += length; 1465 skb->len += length;
@@ -1500,7 +1486,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1500 unsigned int i; 1486 unsigned int i;
1501 int cleaned_count = 0; 1487 int cleaned_count = 0;
1502 bool cleaned = false; 1488 bool cleaned = false;
1503 unsigned int total_rx_bytes=0, total_rx_packets=0; 1489 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1490 struct skb_shared_info *shinfo;
1504 1491
1505 i = rx_ring->next_to_clean; 1492 i = rx_ring->next_to_clean;
1506 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 1493 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
@@ -1546,7 +1533,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1546 rx_ring->rx_skb_top = NULL; 1533 rx_ring->rx_skb_top = NULL;
1547 goto next_desc; 1534 goto next_desc;
1548 } 1535 }
1549
1550#define rxtop (rx_ring->rx_skb_top) 1536#define rxtop (rx_ring->rx_skb_top)
1551 if (!(staterr & E1000_RXD_STAT_EOP)) { 1537 if (!(staterr & E1000_RXD_STAT_EOP)) {
1552 /* this descriptor is only the beginning (or middle) */ 1538 /* this descriptor is only the beginning (or middle) */
@@ -1554,12 +1540,13 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1554 /* this is the beginning of a chain */ 1540 /* this is the beginning of a chain */
1555 rxtop = skb; 1541 rxtop = skb;
1556 skb_fill_page_desc(rxtop, 0, buffer_info->page, 1542 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1557 0, length); 1543 0, length);
1558 } else { 1544 } else {
1559 /* this is the middle of a chain */ 1545 /* this is the middle of a chain */
1560 skb_fill_page_desc(rxtop, 1546 shinfo = skb_shinfo(rxtop);
1561 skb_shinfo(rxtop)->nr_frags, 1547 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1562 buffer_info->page, 0, length); 1548 buffer_info->page, 0,
1549 length);
1563 /* re-use the skb, only consumed the page */ 1550 /* re-use the skb, only consumed the page */
1564 buffer_info->skb = skb; 1551 buffer_info->skb = skb;
1565 } 1552 }
@@ -1568,9 +1555,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1568 } else { 1555 } else {
1569 if (rxtop) { 1556 if (rxtop) {
1570 /* end of the chain */ 1557 /* end of the chain */
1571 skb_fill_page_desc(rxtop, 1558 shinfo = skb_shinfo(rxtop);
1572 skb_shinfo(rxtop)->nr_frags, 1559 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1573 buffer_info->page, 0, length); 1560 buffer_info->page, 0,
1561 length);
1574 /* re-use the current skb, we only consumed the 1562 /* re-use the current skb, we only consumed the
1575 * page 1563 * page
1576 */ 1564 */
@@ -1595,10 +1583,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1595 skb_put(skb, length); 1583 skb_put(skb, length);
1596 } else { 1584 } else {
1597 skb_fill_page_desc(skb, 0, 1585 skb_fill_page_desc(skb, 0,
1598 buffer_info->page, 0, 1586 buffer_info->page, 0,
1599 length); 1587 length);
1600 e1000_consume_page(buffer_info, skb, 1588 e1000_consume_page(buffer_info, skb,
1601 length); 1589 length);
1602 } 1590 }
1603 } 1591 }
1604 } 1592 }
@@ -1671,8 +1659,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1671 DMA_FROM_DEVICE); 1659 DMA_FROM_DEVICE);
1672 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) 1660 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1673 dma_unmap_page(&pdev->dev, buffer_info->dma, 1661 dma_unmap_page(&pdev->dev, buffer_info->dma,
1674 PAGE_SIZE, 1662 PAGE_SIZE, DMA_FROM_DEVICE);
1675 DMA_FROM_DEVICE);
1676 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 1663 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1677 dma_unmap_single(&pdev->dev, buffer_info->dma, 1664 dma_unmap_single(&pdev->dev, buffer_info->dma,
1678 adapter->rx_ps_bsize0, 1665 adapter->rx_ps_bsize0,
@@ -1725,7 +1712,8 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1725static void e1000e_downshift_workaround(struct work_struct *work) 1712static void e1000e_downshift_workaround(struct work_struct *work)
1726{ 1713{
1727 struct e1000_adapter *adapter = container_of(work, 1714 struct e1000_adapter *adapter = container_of(work,
1728 struct e1000_adapter, downshift_task); 1715 struct e1000_adapter,
1716 downshift_task);
1729 1717
1730 if (test_bit(__E1000_DOWN, &adapter->state)) 1718 if (test_bit(__E1000_DOWN, &adapter->state))
1731 return; 1719 return;
@@ -1918,7 +1906,6 @@ static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
1918 struct e1000_hw *hw = &adapter->hw; 1906 struct e1000_hw *hw = &adapter->hw;
1919 struct e1000_ring *tx_ring = adapter->tx_ring; 1907 struct e1000_ring *tx_ring = adapter->tx_ring;
1920 1908
1921
1922 adapter->total_tx_bytes = 0; 1909 adapter->total_tx_bytes = 0;
1923 adapter->total_tx_packets = 0; 1910 adapter->total_tx_packets = 0;
1924 1911
@@ -1975,7 +1962,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
1975 ew32(RFCTL, rfctl); 1962 ew32(RFCTL, rfctl);
1976 } 1963 }
1977 1964
1978#define E1000_IVAR_INT_ALLOC_VALID 0x8
1979 /* Configure Rx vector */ 1965 /* Configure Rx vector */
1980 rx_ring->ims_val = E1000_IMS_RXQ0; 1966 rx_ring->ims_val = E1000_IMS_RXQ0;
1981 adapter->eiac_mask |= rx_ring->ims_val; 1967 adapter->eiac_mask |= rx_ring->ims_val;
@@ -2050,8 +2036,9 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
2050 if (adapter->flags & FLAG_HAS_MSIX) { 2036 if (adapter->flags & FLAG_HAS_MSIX) {
2051 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ 2037 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
2052 adapter->msix_entries = kcalloc(adapter->num_vectors, 2038 adapter->msix_entries = kcalloc(adapter->num_vectors,
2053 sizeof(struct msix_entry), 2039 sizeof(struct
2054 GFP_KERNEL); 2040 msix_entry),
2041 GFP_KERNEL);
2055 if (adapter->msix_entries) { 2042 if (adapter->msix_entries) {
2056 for (i = 0; i < adapter->num_vectors; i++) 2043 for (i = 0; i < adapter->num_vectors; i++)
2057 adapter->msix_entries[i].entry = i; 2044 adapter->msix_entries[i].entry = i;
@@ -2495,7 +2482,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
2495 switch (itr_setting) { 2482 switch (itr_setting) {
2496 case lowest_latency: 2483 case lowest_latency:
2497 /* handle TSO and jumbo frames */ 2484 /* handle TSO and jumbo frames */
2498 if (bytes/packets > 8000) 2485 if (bytes / packets > 8000)
2499 retval = bulk_latency; 2486 retval = bulk_latency;
2500 else if ((packets < 5) && (bytes > 512)) 2487 else if ((packets < 5) && (bytes > 512))
2501 retval = low_latency; 2488 retval = low_latency;
@@ -2503,13 +2490,13 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
2503 case low_latency: /* 50 usec aka 20000 ints/s */ 2490 case low_latency: /* 50 usec aka 20000 ints/s */
2504 if (bytes > 10000) { 2491 if (bytes > 10000) {
2505 /* this if handles the TSO accounting */ 2492 /* this if handles the TSO accounting */
2506 if (bytes/packets > 8000) 2493 if (bytes / packets > 8000)
2507 retval = bulk_latency; 2494 retval = bulk_latency;
2508 else if ((packets < 10) || ((bytes/packets) > 1200)) 2495 else if ((packets < 10) || ((bytes / packets) > 1200))
2509 retval = bulk_latency; 2496 retval = bulk_latency;
2510 else if ((packets > 35)) 2497 else if ((packets > 35))
2511 retval = lowest_latency; 2498 retval = lowest_latency;
2512 } else if (bytes/packets > 2000) { 2499 } else if (bytes / packets > 2000) {
2513 retval = bulk_latency; 2500 retval = bulk_latency;
2514 } else if (packets <= 2 && bytes < 512) { 2501 } else if (packets <= 2 && bytes < 512) {
2515 retval = lowest_latency; 2502 retval = lowest_latency;
@@ -2561,8 +2548,8 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
2561 2548
2562 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2549 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2563 2550
2564 switch (current_itr) {
2565 /* counts and packets in update_itr are dependent on these numbers */ 2551 /* counts and packets in update_itr are dependent on these numbers */
2552 switch (current_itr) {
2566 case lowest_latency: 2553 case lowest_latency:
2567 new_itr = 70000; 2554 new_itr = 70000;
2568 break; 2555 break;
@@ -2583,8 +2570,7 @@ set_itr_now:
2583 * increasing 2570 * increasing
2584 */ 2571 */
2585 new_itr = new_itr > adapter->itr ? 2572 new_itr = new_itr > adapter->itr ?
2586 min(adapter->itr + (new_itr >> 2), new_itr) : 2573 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
2587 new_itr;
2588 adapter->itr = new_itr; 2574 adapter->itr = new_itr;
2589 adapter->rx_ring->itr_val = new_itr; 2575 adapter->rx_ring->itr_val = new_itr;
2590 if (adapter->msix_entries) 2576 if (adapter->msix_entries)
@@ -2686,7 +2672,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
2686 return work_done; 2672 return work_done;
2687} 2673}
2688 2674
2689static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 2675static int e1000_vlan_rx_add_vid(struct net_device *netdev,
2676 __be16 proto, u16 vid)
2690{ 2677{
2691 struct e1000_adapter *adapter = netdev_priv(netdev); 2678 struct e1000_adapter *adapter = netdev_priv(netdev);
2692 struct e1000_hw *hw = &adapter->hw; 2679 struct e1000_hw *hw = &adapter->hw;
@@ -2711,7 +2698,8 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2711 return 0; 2698 return 0;
2712} 2699}
2713 2700
2714static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 2701static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
2702 __be16 proto, u16 vid)
2715{ 2703{
2716 struct e1000_adapter *adapter = netdev_priv(netdev); 2704 struct e1000_adapter *adapter = netdev_priv(netdev);
2717 struct e1000_hw *hw = &adapter->hw; 2705 struct e1000_hw *hw = &adapter->hw;
@@ -2755,7 +2743,8 @@ static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2755 ew32(RCTL, rctl); 2743 ew32(RCTL, rctl);
2756 2744
2757 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { 2745 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2758 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 2746 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
2747 adapter->mng_vlan_id);
2759 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 2748 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2760 } 2749 }
2761 } 2750 }
@@ -2815,24 +2804,23 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2815 u16 vid = adapter->hw.mng_cookie.vlan_id; 2804 u16 vid = adapter->hw.mng_cookie.vlan_id;
2816 u16 old_vid = adapter->mng_vlan_id; 2805 u16 old_vid = adapter->mng_vlan_id;
2817 2806
2818 if (adapter->hw.mng_cookie.status & 2807 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2819 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { 2808 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
2820 e1000_vlan_rx_add_vid(netdev, vid);
2821 adapter->mng_vlan_id = vid; 2809 adapter->mng_vlan_id = vid;
2822 } 2810 }
2823 2811
2824 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) 2812 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2825 e1000_vlan_rx_kill_vid(netdev, old_vid); 2813 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
2826} 2814}
2827 2815
2828static void e1000_restore_vlan(struct e1000_adapter *adapter) 2816static void e1000_restore_vlan(struct e1000_adapter *adapter)
2829{ 2817{
2830 u16 vid; 2818 u16 vid;
2831 2819
2832 e1000_vlan_rx_add_vid(adapter->netdev, 0); 2820 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
2833 2821
2834 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 2822 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2835 e1000_vlan_rx_add_vid(adapter->netdev, vid); 2823 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2836} 2824}
2837 2825
2838static void e1000_init_manageability_pt(struct e1000_adapter *adapter) 2826static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
@@ -3007,8 +2995,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
3007 rctl = er32(RCTL); 2995 rctl = er32(RCTL);
3008 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2996 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3009 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | 2997 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
3010 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 2998 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
3011 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2999 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3012 3000
3013 /* Do not Store bad packets */ 3001 /* Do not Store bad packets */
3014 rctl &= ~E1000_RCTL_SBP; 3002 rctl &= ~E1000_RCTL_SBP;
@@ -3094,19 +3082,17 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
3094 /* Enable Packet split descriptors */ 3082 /* Enable Packet split descriptors */
3095 rctl |= E1000_RCTL_DTYP_PS; 3083 rctl |= E1000_RCTL_DTYP_PS;
3096 3084
3097 psrctl |= adapter->rx_ps_bsize0 >> 3085 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
3098 E1000_PSRCTL_BSIZE0_SHIFT;
3099 3086
3100 switch (adapter->rx_ps_pages) { 3087 switch (adapter->rx_ps_pages) {
3101 case 3: 3088 case 3:
3102 psrctl |= PAGE_SIZE << 3089 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
3103 E1000_PSRCTL_BSIZE3_SHIFT; 3090 /* fall-through */
3104 case 2: 3091 case 2:
3105 psrctl |= PAGE_SIZE << 3092 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
3106 E1000_PSRCTL_BSIZE2_SHIFT; 3093 /* fall-through */
3107 case 1: 3094 case 1:
3108 psrctl |= PAGE_SIZE >> 3095 psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
3109 E1000_PSRCTL_BSIZE1_SHIFT;
3110 break; 3096 break;
3111 } 3097 }
3112 3098
@@ -3280,7 +3266,7 @@ static int e1000e_write_mc_addr_list(struct net_device *netdev)
3280 /* update_mc_addr_list expects a packed array of only addresses. */ 3266 /* update_mc_addr_list expects a packed array of only addresses. */
3281 i = 0; 3267 i = 0;
3282 netdev_for_each_mc_addr(ha, netdev) 3268 netdev_for_each_mc_addr(ha, netdev)
3283 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 3269 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3284 3270
3285 hw->mac.ops.update_mc_addr_list(hw, mta_list, i); 3271 hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
3286 kfree(mta_list); 3272 kfree(mta_list);
@@ -3390,7 +3376,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
3390 3376
3391 ew32(RCTL, rctl); 3377 ew32(RCTL, rctl);
3392 3378
3393 if (netdev->features & NETIF_F_HW_VLAN_RX) 3379 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3394 e1000e_vlan_strip_enable(adapter); 3380 e1000e_vlan_strip_enable(adapter);
3395 else 3381 else
3396 e1000e_vlan_strip_disable(adapter); 3382 e1000e_vlan_strip_disable(adapter);
@@ -3757,8 +3743,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3757 * but don't include ethernet FCS because hardware appends it 3743 * but don't include ethernet FCS because hardware appends it
3758 */ 3744 */
3759 min_tx_space = (adapter->max_frame_size + 3745 min_tx_space = (adapter->max_frame_size +
3760 sizeof(struct e1000_tx_desc) - 3746 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
3761 ETH_FCS_LEN) * 2;
3762 min_tx_space = ALIGN(min_tx_space, 1024); 3747 min_tx_space = ALIGN(min_tx_space, 1024);
3763 min_tx_space >>= 10; 3748 min_tx_space >>= 10;
3764 /* software strips receive CRC, so leave room for it */ 3749 /* software strips receive CRC, so leave room for it */
@@ -3861,13 +3846,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
3861 if ((adapter->max_frame_size * 2) > (pba << 10)) { 3846 if ((adapter->max_frame_size * 2) > (pba << 10)) {
3862 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { 3847 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3863 dev_info(&adapter->pdev->dev, 3848 dev_info(&adapter->pdev->dev,
3864 "Interrupt Throttle Rate turned off\n"); 3849 "Interrupt Throttle Rate off\n");
3865 adapter->flags2 |= FLAG2_DISABLE_AIM; 3850 adapter->flags2 |= FLAG2_DISABLE_AIM;
3866 e1000e_write_itr(adapter, 0); 3851 e1000e_write_itr(adapter, 0);
3867 } 3852 }
3868 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { 3853 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3869 dev_info(&adapter->pdev->dev, 3854 dev_info(&adapter->pdev->dev,
3870 "Interrupt Throttle Rate turned on\n"); 3855 "Interrupt Throttle Rate on\n");
3871 adapter->flags2 &= ~FLAG2_DISABLE_AIM; 3856 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3872 adapter->itr = 20000; 3857 adapter->itr = 20000;
3873 e1000e_write_itr(adapter, adapter->itr); 3858 e1000e_write_itr(adapter, adapter->itr);
@@ -3898,6 +3883,38 @@ void e1000e_reset(struct e1000_adapter *adapter)
3898 /* initialize systim and reset the ns time counter */ 3883 /* initialize systim and reset the ns time counter */
3899 e1000e_config_hwtstamp(adapter); 3884 e1000e_config_hwtstamp(adapter);
3900 3885
3886 /* Set EEE advertisement as appropriate */
3887 if (adapter->flags2 & FLAG2_HAS_EEE) {
3888 s32 ret_val;
3889 u16 adv_addr;
3890
3891 switch (hw->phy.type) {
3892 case e1000_phy_82579:
3893 adv_addr = I82579_EEE_ADVERTISEMENT;
3894 break;
3895 case e1000_phy_i217:
3896 adv_addr = I217_EEE_ADVERTISEMENT;
3897 break;
3898 default:
3899 dev_err(&adapter->pdev->dev,
3900 "Invalid PHY type setting EEE advertisement\n");
3901 return;
3902 }
3903
3904 ret_val = hw->phy.ops.acquire(hw);
3905 if (ret_val) {
3906 dev_err(&adapter->pdev->dev,
3907 "EEE advertisement - unable to acquire PHY\n");
3908 return;
3909 }
3910
3911 e1000_write_emi_reg_locked(hw, adv_addr,
3912 hw->dev_spec.ich8lan.eee_disable ?
3913 0 : adapter->eee_advert);
3914
3915 hw->phy.ops.release(hw);
3916 }
3917
3901 if (!netif_running(adapter->netdev) && 3918 if (!netif_running(adapter->netdev) &&
3902 !test_bit(__E1000_TESTING, &adapter->state)) { 3919 !test_bit(__E1000_TESTING, &adapter->state)) {
3903 e1000_power_down_phy(adapter); 3920 e1000_power_down_phy(adapter);
@@ -3999,6 +4016,8 @@ void e1000e_down(struct e1000_adapter *adapter)
3999 4016
4000 e1000_irq_disable(adapter); 4017 e1000_irq_disable(adapter);
4001 4018
4019 napi_synchronize(&adapter->napi);
4020
4002 del_timer_sync(&adapter->watchdog_timer); 4021 del_timer_sync(&adapter->watchdog_timer);
4003 del_timer_sync(&adapter->phy_info_timer); 4022 del_timer_sync(&adapter->phy_info_timer);
4004 4023
@@ -4266,8 +4285,7 @@ static int e1000_open(struct net_device *netdev)
4266 e1000e_power_up_phy(adapter); 4285 e1000e_power_up_phy(adapter);
4267 4286
4268 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4287 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4269 if ((adapter->hw.mng_cookie.status & 4288 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
4270 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
4271 e1000_update_mng_vlan(adapter); 4289 e1000_update_mng_vlan(adapter);
4272 4290
4273 /* DMA latency requirement to workaround jumbo issue */ 4291 /* DMA latency requirement to workaround jumbo issue */
@@ -4356,12 +4374,13 @@ static int e1000_close(struct net_device *netdev)
4356 4374
4357 pm_runtime_get_sync(&pdev->dev); 4375 pm_runtime_get_sync(&pdev->dev);
4358 4376
4359 napi_disable(&adapter->napi);
4360
4361 if (!test_bit(__E1000_DOWN, &adapter->state)) { 4377 if (!test_bit(__E1000_DOWN, &adapter->state)) {
4362 e1000e_down(adapter); 4378 e1000e_down(adapter);
4363 e1000_free_irq(adapter); 4379 e1000_free_irq(adapter);
4364 } 4380 }
4381
4382 napi_disable(&adapter->napi);
4383
4365 e1000_power_down_phy(adapter); 4384 e1000_power_down_phy(adapter);
4366 4385
4367 e1000e_free_tx_resources(adapter->tx_ring); 4386 e1000e_free_tx_resources(adapter->tx_ring);
@@ -4370,9 +4389,9 @@ static int e1000_close(struct net_device *netdev)
4370 /* kill manageability vlan ID if supported, but not if a vlan with 4389 /* kill manageability vlan ID if supported, but not if a vlan with
4371 * the same ID is registered on the host OS (let 8021q kill it) 4390 * the same ID is registered on the host OS (let 8021q kill it)
4372 */ 4391 */
4373 if (adapter->hw.mng_cookie.status & 4392 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
4374 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) 4393 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
4375 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4394 adapter->mng_vlan_id);
4376 4395
4377 /* If AMT is enabled, let the firmware know that the network 4396 /* If AMT is enabled, let the firmware know that the network
4378 * interface is now closed 4397 * interface is now closed
@@ -4387,6 +4406,7 @@ static int e1000_close(struct net_device *netdev)
4387 4406
4388 return 0; 4407 return 0;
4389} 4408}
4409
4390/** 4410/**
4391 * e1000_set_mac - Change the Ethernet Address of the NIC 4411 * e1000_set_mac - Change the Ethernet Address of the NIC
4392 * @netdev: network interface device structure 4412 * @netdev: network interface device structure
@@ -4437,7 +4457,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
4437static void e1000e_update_phy_task(struct work_struct *work) 4457static void e1000e_update_phy_task(struct work_struct *work)
4438{ 4458{
4439 struct e1000_adapter *adapter = container_of(work, 4459 struct e1000_adapter *adapter = container_of(work,
4440 struct e1000_adapter, update_phy_task); 4460 struct e1000_adapter,
4461 update_phy_task);
4441 4462
4442 if (test_bit(__E1000_DOWN, &adapter->state)) 4463 if (test_bit(__E1000_DOWN, &adapter->state))
4443 return; 4464 return;
@@ -4454,7 +4475,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
4454 **/ 4475 **/
4455static void e1000_update_phy_info(unsigned long data) 4476static void e1000_update_phy_info(unsigned long data)
4456{ 4477{
4457 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 4478 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
4458 4479
4459 if (test_bit(__E1000_DOWN, &adapter->state)) 4480 if (test_bit(__E1000_DOWN, &adapter->state))
4460 return; 4481 return;
@@ -4621,18 +4642,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
4621 * our own version based on RUC and ROC 4642 * our own version based on RUC and ROC
4622 */ 4643 */
4623 netdev->stats.rx_errors = adapter->stats.rxerrc + 4644 netdev->stats.rx_errors = adapter->stats.rxerrc +
4624 adapter->stats.crcerrs + adapter->stats.algnerrc + 4645 adapter->stats.crcerrs + adapter->stats.algnerrc +
4625 adapter->stats.ruc + adapter->stats.roc + 4646 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
4626 adapter->stats.cexterr;
4627 netdev->stats.rx_length_errors = adapter->stats.ruc + 4647 netdev->stats.rx_length_errors = adapter->stats.ruc +
4628 adapter->stats.roc; 4648 adapter->stats.roc;
4629 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 4649 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4630 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 4650 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4631 netdev->stats.rx_missed_errors = adapter->stats.mpc; 4651 netdev->stats.rx_missed_errors = adapter->stats.mpc;
4632 4652
4633 /* Tx Errors */ 4653 /* Tx Errors */
4634 netdev->stats.tx_errors = adapter->stats.ecol + 4654 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
4635 adapter->stats.latecol;
4636 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 4655 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4637 netdev->stats.tx_window_errors = adapter->stats.latecol; 4656 netdev->stats.tx_window_errors = adapter->stats.latecol;
4638 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 4657 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
@@ -4790,7 +4809,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4790 **/ 4809 **/
4791static void e1000_watchdog(unsigned long data) 4810static void e1000_watchdog(unsigned long data)
4792{ 4811{
4793 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 4812 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
4794 4813
4795 /* Do the rest outside of interrupt context */ 4814 /* Do the rest outside of interrupt context */
4796 schedule_work(&adapter->watchdog_task); 4815 schedule_work(&adapter->watchdog_task);
@@ -4801,7 +4820,8 @@ static void e1000_watchdog(unsigned long data)
4801static void e1000_watchdog_task(struct work_struct *work) 4820static void e1000_watchdog_task(struct work_struct *work)
4802{ 4821{
4803 struct e1000_adapter *adapter = container_of(work, 4822 struct e1000_adapter *adapter = container_of(work,
4804 struct e1000_adapter, watchdog_task); 4823 struct e1000_adapter,
4824 watchdog_task);
4805 struct net_device *netdev = adapter->netdev; 4825 struct net_device *netdev = adapter->netdev;
4806 struct e1000_mac_info *mac = &adapter->hw.mac; 4826 struct e1000_mac_info *mac = &adapter->hw.mac;
4807 struct e1000_phy_info *phy = &adapter->hw.phy; 4827 struct e1000_phy_info *phy = &adapter->hw.phy;
@@ -4835,8 +4855,8 @@ static void e1000_watchdog_task(struct work_struct *work)
4835 /* update snapshot of PHY registers on LSC */ 4855 /* update snapshot of PHY registers on LSC */
4836 e1000_phy_read_status(adapter); 4856 e1000_phy_read_status(adapter);
4837 mac->ops.get_link_up_info(&adapter->hw, 4857 mac->ops.get_link_up_info(&adapter->hw,
4838 &adapter->link_speed, 4858 &adapter->link_speed,
4839 &adapter->link_duplex); 4859 &adapter->link_duplex);
4840 e1000_print_link_info(adapter); 4860 e1000_print_link_info(adapter);
4841 4861
4842 /* check if SmartSpeed worked */ 4862 /* check if SmartSpeed worked */
@@ -4949,7 +4969,7 @@ static void e1000_watchdog_task(struct work_struct *work)
4949 adapter->flags |= FLAG_RESTART_NOW; 4969 adapter->flags |= FLAG_RESTART_NOW;
4950 else 4970 else
4951 pm_schedule_suspend(netdev->dev.parent, 4971 pm_schedule_suspend(netdev->dev.parent,
4952 LINK_TIMEOUT); 4972 LINK_TIMEOUT);
4953 } 4973 }
4954 } 4974 }
4955 4975
@@ -4984,8 +5004,8 @@ link_up:
4984 */ 5004 */
4985 u32 goc = (adapter->gotc + adapter->gorc) / 10000; 5005 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
4986 u32 dif = (adapter->gotc > adapter->gorc ? 5006 u32 dif = (adapter->gotc > adapter->gorc ?
4987 adapter->gotc - adapter->gorc : 5007 adapter->gotc - adapter->gorc :
4988 adapter->gorc - adapter->gotc) / 10000; 5008 adapter->gorc - adapter->gotc) / 10000;
4989 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 5009 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4990 5010
4991 e1000e_write_itr(adapter, itr); 5011 e1000e_write_itr(adapter, itr);
@@ -5064,14 +5084,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
5064 iph->tot_len = 0; 5084 iph->tot_len = 0;
5065 iph->check = 0; 5085 iph->check = 0;
5066 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 5086 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
5067 0, IPPROTO_TCP, 0); 5087 0, IPPROTO_TCP, 0);
5068 cmd_length = E1000_TXD_CMD_IP; 5088 cmd_length = E1000_TXD_CMD_IP;
5069 ipcse = skb_transport_offset(skb) - 1; 5089 ipcse = skb_transport_offset(skb) - 1;
5070 } else if (skb_is_gso_v6(skb)) { 5090 } else if (skb_is_gso_v6(skb)) {
5071 ipv6_hdr(skb)->payload_len = 0; 5091 ipv6_hdr(skb)->payload_len = 0;
5072 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5092 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5073 &ipv6_hdr(skb)->daddr, 5093 &ipv6_hdr(skb)->daddr,
5074 0, IPPROTO_TCP, 0); 5094 0, IPPROTO_TCP, 0);
5075 ipcse = 0; 5095 ipcse = 0;
5076 } 5096 }
5077 ipcss = skb_network_offset(skb); 5097 ipcss = skb_network_offset(skb);
@@ -5080,7 +5100,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
5080 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 5100 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
5081 5101
5082 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 5102 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
5083 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 5103 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
5084 5104
5085 i = tx_ring->next_to_use; 5105 i = tx_ring->next_to_use;
5086 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 5106 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
@@ -5150,8 +5170,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
5150 5170
5151 context_desc->lower_setup.ip_config = 0; 5171 context_desc->lower_setup.ip_config = 0;
5152 context_desc->upper_setup.tcp_fields.tucss = css; 5172 context_desc->upper_setup.tcp_fields.tucss = css;
5153 context_desc->upper_setup.tcp_fields.tucso = 5173 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
5154 css + skb->csum_offset;
5155 context_desc->upper_setup.tcp_fields.tucse = 0; 5174 context_desc->upper_setup.tcp_fields.tucse = 0;
5156 context_desc->tcp_seg_setup.data = 0; 5175 context_desc->tcp_seg_setup.data = 0;
5157 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 5176 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
@@ -5224,7 +5243,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
5224 buffer_info->time_stamp = jiffies; 5243 buffer_info->time_stamp = jiffies;
5225 buffer_info->next_to_watch = i; 5244 buffer_info->next_to_watch = i;
5226 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 5245 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
5227 offset, size, DMA_TO_DEVICE); 5246 offset, size,
5247 DMA_TO_DEVICE);
5228 buffer_info->mapped_as_page = true; 5248 buffer_info->mapped_as_page = true;
5229 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 5249 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
5230 goto dma_error; 5250 goto dma_error;
@@ -5273,7 +5293,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5273 5293
5274 if (tx_flags & E1000_TX_FLAGS_TSO) { 5294 if (tx_flags & E1000_TX_FLAGS_TSO) {
5275 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 5295 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
5276 E1000_TXD_CMD_TSE; 5296 E1000_TXD_CMD_TSE;
5277 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 5297 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5278 5298
5279 if (tx_flags & E1000_TX_FLAGS_IPV4) 5299 if (tx_flags & E1000_TX_FLAGS_IPV4)
@@ -5304,8 +5324,8 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5304 buffer_info = &tx_ring->buffer_info[i]; 5324 buffer_info = &tx_ring->buffer_info[i];
5305 tx_desc = E1000_TX_DESC(*tx_ring, i); 5325 tx_desc = E1000_TX_DESC(*tx_ring, i);
5306 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 5326 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
5307 tx_desc->lower.data = 5327 tx_desc->lower.data = cpu_to_le32(txd_lower |
5308 cpu_to_le32(txd_lower | buffer_info->length); 5328 buffer_info->length);
5309 tx_desc->upper.data = cpu_to_le32(txd_upper); 5329 tx_desc->upper.data = cpu_to_le32(txd_upper);
5310 5330
5311 i++; 5331 i++;
@@ -5355,11 +5375,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
5355 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) 5375 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
5356 return 0; 5376 return 0;
5357 5377
5358 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) 5378 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
5359 return 0; 5379 return 0;
5360 5380
5361 { 5381 {
5362 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); 5382 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
5363 struct udphdr *udp; 5383 struct udphdr *udp;
5364 5384
5365 if (ip->protocol != IPPROTO_UDP) 5385 if (ip->protocol != IPPROTO_UDP)
@@ -5584,7 +5604,7 @@ static void e1000_reset_task(struct work_struct *work)
5584 * Returns the address of the device statistics structure. 5604 * Returns the address of the device statistics structure.
5585 **/ 5605 **/
5586struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, 5606struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5587 struct rtnl_link_stats64 *stats) 5607 struct rtnl_link_stats64 *stats)
5588{ 5608{
5589 struct e1000_adapter *adapter = netdev_priv(netdev); 5609 struct e1000_adapter *adapter = netdev_priv(netdev);
5590 5610
@@ -5605,18 +5625,15 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5605 * our own version based on RUC and ROC 5625 * our own version based on RUC and ROC
5606 */ 5626 */
5607 stats->rx_errors = adapter->stats.rxerrc + 5627 stats->rx_errors = adapter->stats.rxerrc +
5608 adapter->stats.crcerrs + adapter->stats.algnerrc + 5628 adapter->stats.crcerrs + adapter->stats.algnerrc +
5609 adapter->stats.ruc + adapter->stats.roc + 5629 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
5610 adapter->stats.cexterr; 5630 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
5611 stats->rx_length_errors = adapter->stats.ruc +
5612 adapter->stats.roc;
5613 stats->rx_crc_errors = adapter->stats.crcerrs; 5631 stats->rx_crc_errors = adapter->stats.crcerrs;
5614 stats->rx_frame_errors = adapter->stats.algnerrc; 5632 stats->rx_frame_errors = adapter->stats.algnerrc;
5615 stats->rx_missed_errors = adapter->stats.mpc; 5633 stats->rx_missed_errors = adapter->stats.mpc;
5616 5634
5617 /* Tx Errors */ 5635 /* Tx Errors */
5618 stats->tx_errors = adapter->stats.ecol + 5636 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
5619 adapter->stats.latecol;
5620 stats->tx_aborted_errors = adapter->stats.ecol; 5637 stats->tx_aborted_errors = adapter->stats.ecol;
5621 stats->tx_window_errors = adapter->stats.latecol; 5638 stats->tx_window_errors = adapter->stats.latecol;
5622 stats->tx_carrier_errors = adapter->stats.tncrs; 5639 stats->tx_carrier_errors = adapter->stats.tncrs;
@@ -5685,9 +5702,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5685 5702
5686 /* adjust allocation if LPE protects us, and we aren't using SBP */ 5703 /* adjust allocation if LPE protects us, and we aren't using SBP */
5687 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 5704 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
5688 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 5705 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
5689 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 5706 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
5690 + ETH_FCS_LEN; 5707 + ETH_FCS_LEN;
5691 5708
5692 if (netif_running(netdev)) 5709 if (netif_running(netdev))
5693 e1000e_up(adapter); 5710 e1000e_up(adapter);
@@ -5866,7 +5883,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5866 phy_reg &= ~(BM_RCTL_MO_MASK); 5883 phy_reg &= ~(BM_RCTL_MO_MASK);
5867 if (mac_reg & E1000_RCTL_MO_3) 5884 if (mac_reg & E1000_RCTL_MO_3)
5868 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) 5885 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5869 << BM_RCTL_MO_SHIFT); 5886 << BM_RCTL_MO_SHIFT);
5870 if (mac_reg & E1000_RCTL_BAM) 5887 if (mac_reg & E1000_RCTL_BAM)
5871 phy_reg |= BM_RCTL_BAM; 5888 phy_reg |= BM_RCTL_BAM;
5872 if (mac_reg & E1000_RCTL_PMCF) 5889 if (mac_reg & E1000_RCTL_PMCF)
@@ -5935,10 +5952,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5935 } 5952 }
5936 5953
5937 ctrl = er32(CTRL); 5954 ctrl = er32(CTRL);
5938 /* advertise wake from D3Cold */
5939 #define E1000_CTRL_ADVD3WUC 0x00100000
5940 /* phy power management enable */
5941 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5942 ctrl |= E1000_CTRL_ADVD3WUC; 5955 ctrl |= E1000_CTRL_ADVD3WUC;
5943 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) 5956 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
5944 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; 5957 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
@@ -5982,8 +5995,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5982 */ 5995 */
5983 e1000e_release_hw_control(adapter); 5996 e1000e_release_hw_control(adapter);
5984 5997
5985 pci_clear_master(pdev);
5986
5987 /* The pci-e switch on some quad port adapters will report a 5998 /* The pci-e switch on some quad port adapters will report a
5988 * correctable error when the MAC transitions from D0 to D3. To 5999 * correctable error when the MAC transitions from D0 to D3. To
5989 * prevent this we need to mask off the correctable errors on the 6000 * prevent this we need to mask off the correctable errors on the
@@ -6082,24 +6093,24 @@ static int __e1000_resume(struct pci_dev *pdev)
6082 e1e_rphy(&adapter->hw, BM_WUS, &phy_data); 6093 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
6083 if (phy_data) { 6094 if (phy_data) {
6084 e_info("PHY Wakeup cause - %s\n", 6095 e_info("PHY Wakeup cause - %s\n",
6085 phy_data & E1000_WUS_EX ? "Unicast Packet" : 6096 phy_data & E1000_WUS_EX ? "Unicast Packet" :
6086 phy_data & E1000_WUS_MC ? "Multicast Packet" : 6097 phy_data & E1000_WUS_MC ? "Multicast Packet" :
6087 phy_data & E1000_WUS_BC ? "Broadcast Packet" : 6098 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
6088 phy_data & E1000_WUS_MAG ? "Magic Packet" : 6099 phy_data & E1000_WUS_MAG ? "Magic Packet" :
6089 phy_data & E1000_WUS_LNKC ? 6100 phy_data & E1000_WUS_LNKC ?
6090 "Link Status Change" : "other"); 6101 "Link Status Change" : "other");
6091 } 6102 }
6092 e1e_wphy(&adapter->hw, BM_WUS, ~0); 6103 e1e_wphy(&adapter->hw, BM_WUS, ~0);
6093 } else { 6104 } else {
6094 u32 wus = er32(WUS); 6105 u32 wus = er32(WUS);
6095 if (wus) { 6106 if (wus) {
6096 e_info("MAC Wakeup cause - %s\n", 6107 e_info("MAC Wakeup cause - %s\n",
6097 wus & E1000_WUS_EX ? "Unicast Packet" : 6108 wus & E1000_WUS_EX ? "Unicast Packet" :
6098 wus & E1000_WUS_MC ? "Multicast Packet" : 6109 wus & E1000_WUS_MC ? "Multicast Packet" :
6099 wus & E1000_WUS_BC ? "Broadcast Packet" : 6110 wus & E1000_WUS_BC ? "Broadcast Packet" :
6100 wus & E1000_WUS_MAG ? "Magic Packet" : 6111 wus & E1000_WUS_MAG ? "Magic Packet" :
6101 wus & E1000_WUS_LNKC ? "Link Status Change" : 6112 wus & E1000_WUS_LNKC ? "Link Status Change" :
6102 "other"); 6113 "other");
6103 } 6114 }
6104 ew32(WUS, ~0); 6115 ew32(WUS, ~0);
6105 } 6116 }
@@ -6374,7 +6385,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
6374 e_info("(PCI Express:2.5GT/s:%s) %pM\n", 6385 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
6375 /* bus width */ 6386 /* bus width */
6376 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 6387 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
6377 "Width x1"), 6388 "Width x1"),
6378 /* MAC address */ 6389 /* MAC address */
6379 netdev->dev_addr); 6390 netdev->dev_addr);
6380 e_info("Intel(R) PRO/%s Network Connection\n", 6391 e_info("Intel(R) PRO/%s Network Connection\n",
@@ -6414,7 +6425,7 @@ static int e1000_set_features(struct net_device *netdev,
6414 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) 6425 if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
6415 adapter->flags |= FLAG_TSO_FORCE; 6426 adapter->flags |= FLAG_TSO_FORCE;
6416 6427
6417 if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | 6428 if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
6418 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS | 6429 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
6419 NETIF_F_RXALL))) 6430 NETIF_F_RXALL)))
6420 return 0; 6431 return 0;
@@ -6484,7 +6495,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6484 resource_size_t flash_start, flash_len; 6495 resource_size_t flash_start, flash_len;
6485 static int cards_found; 6496 static int cards_found;
6486 u16 aspm_disable_flag = 0; 6497 u16 aspm_disable_flag = 0;
6487 int i, err, pci_using_dac; 6498 int bars, i, err, pci_using_dac;
6488 u16 eeprom_data = 0; 6499 u16 eeprom_data = 0;
6489 u16 eeprom_apme_mask = E1000_EEPROM_APME; 6500 u16 eeprom_apme_mask = E1000_EEPROM_APME;
6490 6501
@@ -6511,15 +6522,16 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6511 err = dma_set_coherent_mask(&pdev->dev, 6522 err = dma_set_coherent_mask(&pdev->dev,
6512 DMA_BIT_MASK(32)); 6523 DMA_BIT_MASK(32));
6513 if (err) { 6524 if (err) {
6514 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 6525 dev_err(&pdev->dev,
6526 "No usable DMA configuration, aborting\n");
6515 goto err_dma; 6527 goto err_dma;
6516 } 6528 }
6517 } 6529 }
6518 } 6530 }
6519 6531
6520 err = pci_request_selected_regions_exclusive(pdev, 6532 bars = pci_select_bars(pdev, IORESOURCE_MEM);
6521 pci_select_bars(pdev, IORESOURCE_MEM), 6533 err = pci_request_selected_regions_exclusive(pdev, bars,
6522 e1000e_driver_name); 6534 e1000e_driver_name);
6523 if (err) 6535 if (err)
6524 goto err_pci_reg; 6536 goto err_pci_reg;
6525 6537
@@ -6572,6 +6584,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6572 goto err_flashmap; 6584 goto err_flashmap;
6573 } 6585 }
6574 6586
6587 /* Set default EEE advertisement */
6588 if (adapter->flags2 & FLAG2_HAS_EEE)
6589 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
6590
6575 /* construct the net_device struct */ 6591 /* construct the net_device struct */
6576 netdev->netdev_ops = &e1000e_netdev_ops; 6592 netdev->netdev_ops = &e1000e_netdev_ops;
6577 e1000e_set_ethtool_ops(netdev); 6593 e1000e_set_ethtool_ops(netdev);
@@ -6620,8 +6636,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6620 6636
6621 /* Set initial default active device features */ 6637 /* Set initial default active device features */
6622 netdev->features = (NETIF_F_SG | 6638 netdev->features = (NETIF_F_SG |
6623 NETIF_F_HW_VLAN_RX | 6639 NETIF_F_HW_VLAN_CTAG_RX |
6624 NETIF_F_HW_VLAN_TX | 6640 NETIF_F_HW_VLAN_CTAG_TX |
6625 NETIF_F_TSO | 6641 NETIF_F_TSO |
6626 NETIF_F_TSO6 | 6642 NETIF_F_TSO6 |
6627 NETIF_F_RXHASH | 6643 NETIF_F_RXHASH |
@@ -6635,7 +6651,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6635 netdev->hw_features |= NETIF_F_RXALL; 6651 netdev->hw_features |= NETIF_F_RXALL;
6636 6652
6637 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) 6653 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
6638 netdev->features |= NETIF_F_HW_VLAN_FILTER; 6654 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6639 6655
6640 netdev->vlan_features |= (NETIF_F_SG | 6656 netdev->vlan_features |= (NETIF_F_SG |
6641 NETIF_F_TSO | 6657 NETIF_F_TSO |
@@ -6688,11 +6704,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6688 6704
6689 init_timer(&adapter->watchdog_timer); 6705 init_timer(&adapter->watchdog_timer);
6690 adapter->watchdog_timer.function = e1000_watchdog; 6706 adapter->watchdog_timer.function = e1000_watchdog;
6691 adapter->watchdog_timer.data = (unsigned long) adapter; 6707 adapter->watchdog_timer.data = (unsigned long)adapter;
6692 6708
6693 init_timer(&adapter->phy_info_timer); 6709 init_timer(&adapter->phy_info_timer);
6694 adapter->phy_info_timer.function = e1000_update_phy_info; 6710 adapter->phy_info_timer.function = e1000_update_phy_info;
6695 adapter->phy_info_timer.data = (unsigned long) adapter; 6711 adapter->phy_info_timer.data = (unsigned long)adapter;
6696 6712
6697 INIT_WORK(&adapter->reset_task, e1000_reset_task); 6713 INIT_WORK(&adapter->reset_task, e1000_reset_task);
6698 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); 6714 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
@@ -6800,7 +6816,7 @@ err_ioremap:
6800 free_netdev(netdev); 6816 free_netdev(netdev);
6801err_alloc_etherdev: 6817err_alloc_etherdev:
6802 pci_release_selected_regions(pdev, 6818 pci_release_selected_regions(pdev,
6803 pci_select_bars(pdev, IORESOURCE_MEM)); 6819 pci_select_bars(pdev, IORESOURCE_MEM));
6804err_pci_reg: 6820err_pci_reg:
6805err_dma: 6821err_dma:
6806 pci_disable_device(pdev); 6822 pci_disable_device(pdev);
@@ -6870,7 +6886,7 @@ static void e1000_remove(struct pci_dev *pdev)
6870 if (adapter->hw.flash_address) 6886 if (adapter->hw.flash_address)
6871 iounmap(adapter->hw.flash_address); 6887 iounmap(adapter->hw.flash_address);
6872 pci_release_selected_regions(pdev, 6888 pci_release_selected_regions(pdev,
6873 pci_select_bars(pdev, IORESOURCE_MEM)); 6889 pci_select_bars(pdev, IORESOURCE_MEM));
6874 6890
6875 free_netdev(netdev); 6891 free_netdev(netdev);
6876 6892
@@ -6891,7 +6907,8 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6891 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, 6907 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
6892 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, 6908 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
6893 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, 6909 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
6894 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, 6910 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
6911 board_82571 },
6895 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, 6912 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
6896 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, 6913 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
6897 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, 6914 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
@@ -6967,8 +6984,8 @@ MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
6967#ifdef CONFIG_PM 6984#ifdef CONFIG_PM
6968static const struct dev_pm_ops e1000_pm_ops = { 6985static const struct dev_pm_ops e1000_pm_ops = {
6969 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) 6986 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
6970 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, 6987 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
6971 e1000_runtime_resume, e1000_idle) 6988 e1000_idle)
6972}; 6989};
6973#endif 6990#endif
6974 6991
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 84fecc268162..44ddc0a0ee0e 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -630,7 +630,7 @@ void e1000e_reload_nvm_generic(struct e1000_hw *hw)
630{ 630{
631 u32 ctrl_ext; 631 u32 ctrl_ext;
632 632
633 udelay(10); 633 usleep_range(10, 20);
634 ctrl_ext = er32(CTRL_EXT); 634 ctrl_ext = er32(CTRL_EXT);
635 ctrl_ext |= E1000_CTRL_EXT_EE_RST; 635 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
636 ew32(CTRL_EXT, ctrl_ext); 636 ew32(CTRL_EXT, ctrl_ext);
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 98da75dff936..c16bd75b6caa 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -45,7 +45,7 @@
45unsigned int copybreak = COPYBREAK_DEFAULT; 45unsigned int copybreak = COPYBREAK_DEFAULT;
46module_param(copybreak, uint, 0644); 46module_param(copybreak, uint, 0644);
47MODULE_PARM_DESC(copybreak, 47MODULE_PARM_DESC(copybreak,
48 "Maximum size of packet that is copied to a new buffer on receive"); 48 "Maximum size of packet that is copied to a new buffer on receive");
49 49
50/* All parameters are treated the same, as an integer array of values. 50/* All parameters are treated the same, as an integer array of values.
51 * This macro just reduces the need to repeat the same declaration code 51 * This macro just reduces the need to repeat the same declaration code
@@ -143,7 +143,8 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
143 * 143 *
144 * Default Value: 1 (enabled) 144 * Default Value: 1 (enabled)
145 */ 145 */
146E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); 146E1000_PARAM(WriteProtectNVM,
147 "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
147 148
148/* Enable CRC Stripping 149/* Enable CRC Stripping
149 * 150 *
@@ -160,13 +161,18 @@ struct e1000_option {
160 const char *err; 161 const char *err;
161 int def; 162 int def;
162 union { 163 union {
163 struct { /* range_option info */ 164 /* range_option info */
165 struct {
164 int min; 166 int min;
165 int max; 167 int max;
166 } r; 168 } r;
167 struct { /* list_option info */ 169 /* list_option info */
170 struct {
168 int nr; 171 int nr;
169 struct e1000_opt_list { int i; char *str; } *p; 172 struct e1000_opt_list {
173 int i;
174 char *str;
175 } *p;
170 } l; 176 } l;
171 } arg; 177 } arg;
172}; 178};
@@ -246,7 +252,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
246 "Using defaults for all values\n"); 252 "Using defaults for all values\n");
247 } 253 }
248 254
249 { /* Transmit Interrupt Delay */ 255 /* Transmit Interrupt Delay */
256 {
250 static const struct e1000_option opt = { 257 static const struct e1000_option opt = {
251 .type = range_option, 258 .type = range_option,
252 .name = "Transmit Interrupt Delay", 259 .name = "Transmit Interrupt Delay",
@@ -265,7 +272,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
265 adapter->tx_int_delay = opt.def; 272 adapter->tx_int_delay = opt.def;
266 } 273 }
267 } 274 }
268 { /* Transmit Absolute Interrupt Delay */ 275 /* Transmit Absolute Interrupt Delay */
276 {
269 static const struct e1000_option opt = { 277 static const struct e1000_option opt = {
270 .type = range_option, 278 .type = range_option,
271 .name = "Transmit Absolute Interrupt Delay", 279 .name = "Transmit Absolute Interrupt Delay",
@@ -284,7 +292,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
284 adapter->tx_abs_int_delay = opt.def; 292 adapter->tx_abs_int_delay = opt.def;
285 } 293 }
286 } 294 }
287 { /* Receive Interrupt Delay */ 295 /* Receive Interrupt Delay */
296 {
288 static struct e1000_option opt = { 297 static struct e1000_option opt = {
289 .type = range_option, 298 .type = range_option,
290 .name = "Receive Interrupt Delay", 299 .name = "Receive Interrupt Delay",
@@ -303,7 +312,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
303 adapter->rx_int_delay = opt.def; 312 adapter->rx_int_delay = opt.def;
304 } 313 }
305 } 314 }
306 { /* Receive Absolute Interrupt Delay */ 315 /* Receive Absolute Interrupt Delay */
316 {
307 static const struct e1000_option opt = { 317 static const struct e1000_option opt = {
308 .type = range_option, 318 .type = range_option,
309 .name = "Receive Absolute Interrupt Delay", 319 .name = "Receive Absolute Interrupt Delay",
@@ -322,7 +332,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
322 adapter->rx_abs_int_delay = opt.def; 332 adapter->rx_abs_int_delay = opt.def;
323 } 333 }
324 } 334 }
325 { /* Interrupt Throttling Rate */ 335 /* Interrupt Throttling Rate */
336 {
326 static const struct e1000_option opt = { 337 static const struct e1000_option opt = {
327 .type = range_option, 338 .type = range_option,
328 .name = "Interrupt Throttling Rate (ints/sec)", 339 .name = "Interrupt Throttling Rate (ints/sec)",
@@ -392,7 +403,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
392 break; 403 break;
393 } 404 }
394 } 405 }
395 { /* Interrupt Mode */ 406 /* Interrupt Mode */
407 {
396 static struct e1000_option opt = { 408 static struct e1000_option opt = {
397 .type = range_option, 409 .type = range_option,
398 .name = "Interrupt Mode", 410 .name = "Interrupt Mode",
@@ -435,7 +447,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
435 kfree(opt.err); 447 kfree(opt.err);
436#endif 448#endif
437 } 449 }
438 { /* Smart Power Down */ 450 /* Smart Power Down */
451 {
439 static const struct e1000_option opt = { 452 static const struct e1000_option opt = {
440 .type = enable_option, 453 .type = enable_option,
441 .name = "PHY Smart Power Down", 454 .name = "PHY Smart Power Down",
@@ -450,7 +463,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
450 adapter->flags |= FLAG_SMART_POWER_DOWN; 463 adapter->flags |= FLAG_SMART_POWER_DOWN;
451 } 464 }
452 } 465 }
453 { /* CRC Stripping */ 466 /* CRC Stripping */
467 {
454 static const struct e1000_option opt = { 468 static const struct e1000_option opt = {
455 .type = enable_option, 469 .type = enable_option,
456 .name = "CRC Stripping", 470 .name = "CRC Stripping",
@@ -470,27 +484,28 @@ void e1000e_check_options(struct e1000_adapter *adapter)
470 adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; 484 adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING;
471 } 485 }
472 } 486 }
473 { /* Kumeran Lock Loss Workaround */ 487 /* Kumeran Lock Loss Workaround */
488 {
474 static const struct e1000_option opt = { 489 static const struct e1000_option opt = {
475 .type = enable_option, 490 .type = enable_option,
476 .name = "Kumeran Lock Loss Workaround", 491 .name = "Kumeran Lock Loss Workaround",
477 .err = "defaulting to Enabled", 492 .err = "defaulting to Enabled",
478 .def = OPTION_ENABLED 493 .def = OPTION_ENABLED
479 }; 494 };
495 bool enabled = opt.def;
480 496
481 if (num_KumeranLockLoss > bd) { 497 if (num_KumeranLockLoss > bd) {
482 unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; 498 unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
483 e1000_validate_option(&kmrn_lock_loss, &opt, adapter); 499 e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
484 if (hw->mac.type == e1000_ich8lan) 500 enabled = kmrn_lock_loss;
485 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
486 kmrn_lock_loss);
487 } else {
488 if (hw->mac.type == e1000_ich8lan)
489 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
490 opt.def);
491 } 501 }
502
503 if (hw->mac.type == e1000_ich8lan)
504 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
505 enabled);
492 } 506 }
493 { /* Write-protect NVM */ 507 /* Write-protect NVM */
508 {
494 static const struct e1000_option opt = { 509 static const struct e1000_option opt = {
495 .type = enable_option, 510 .type = enable_option,
496 .name = "Write-protect NVM", 511 .name = "Write-protect NVM",
@@ -500,7 +515,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
500 515
501 if (adapter->flags & FLAG_IS_ICH) { 516 if (adapter->flags & FLAG_IS_ICH) {
502 if (num_WriteProtectNVM > bd) { 517 if (num_WriteProtectNVM > bd) {
503 unsigned int write_protect_nvm = WriteProtectNVM[bd]; 518 unsigned int write_protect_nvm =
519 WriteProtectNVM[bd];
504 e1000_validate_option(&write_protect_nvm, &opt, 520 e1000_validate_option(&write_protect_nvm, &opt,
505 adapter); 521 adapter);
506 if (write_protect_nvm) 522 if (write_protect_nvm)
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0930c136aa31..59c76a6815a0 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -37,7 +37,9 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
37 37
38/* Cable length tables */ 38/* Cable length tables */
39static const u16 e1000_m88_cable_length_table[] = { 39static const u16 e1000_m88_cable_length_table[] = {
40 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; 40 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED
41};
42
41#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ 43#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
42 ARRAY_SIZE(e1000_m88_cable_length_table) 44 ARRAY_SIZE(e1000_m88_cable_length_table)
43 45
@@ -49,7 +51,9 @@ static const u16 e1000_igp_2_cable_length_table[] = {
49 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, 51 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
50 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, 52 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
51 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, 53 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
52 124}; 54 124
55};
56
53#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ 57#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
54 ARRAY_SIZE(e1000_igp_2_cable_length_table) 58 ARRAY_SIZE(e1000_igp_2_cable_length_table)
55 59
@@ -67,8 +71,7 @@ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
67 71
68 manc = er32(MANC); 72 manc = er32(MANC);
69 73
70 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? 74 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;
71 E1000_BLK_PHY_RESET : 0;
72} 75}
73 76
74/** 77/**
@@ -94,7 +97,7 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
94 return ret_val; 97 return ret_val;
95 98
96 phy->id = (u32)(phy_id << 16); 99 phy->id = (u32)(phy_id << 16);
97 udelay(20); 100 usleep_range(20, 40);
98 ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); 101 ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
99 if (ret_val) 102 if (ret_val)
100 return ret_val; 103 return ret_val;
@@ -175,7 +178,13 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
175 e_dbg("MDI Error\n"); 178 e_dbg("MDI Error\n");
176 return -E1000_ERR_PHY; 179 return -E1000_ERR_PHY;
177 } 180 }
178 *data = (u16) mdic; 181 if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
182 e_dbg("MDI Read offset error - requested %d, returned %d\n",
183 offset,
184 (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
185 return -E1000_ERR_PHY;
186 }
187 *data = (u16)mdic;
179 188
180 /* Allow some time after each MDIC transaction to avoid 189 /* Allow some time after each MDIC transaction to avoid
181 * reading duplicate data in the next MDIC transaction. 190 * reading duplicate data in the next MDIC transaction.
@@ -233,6 +242,12 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
233 e_dbg("MDI Error\n"); 242 e_dbg("MDI Error\n");
234 return -E1000_ERR_PHY; 243 return -E1000_ERR_PHY;
235 } 244 }
245 if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
246 e_dbg("MDI Write offset error - requested %d, returned %d\n",
247 offset,
248 (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
249 return -E1000_ERR_PHY;
250 }
236 251
237 /* Allow some time after each MDIC transaction to avoid 252 /* Allow some time after each MDIC transaction to avoid
238 * reading duplicate data in the next MDIC transaction. 253 * reading duplicate data in the next MDIC transaction.
@@ -324,7 +339,7 @@ s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
324 * semaphores before exiting. 339 * semaphores before exiting.
325 **/ 340 **/
326static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, 341static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
327 bool locked) 342 bool locked)
328{ 343{
329 s32 ret_val = 0; 344 s32 ret_val = 0;
330 345
@@ -391,7 +406,7 @@ s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
391 * at the offset. Release any acquired semaphores before exiting. 406 * at the offset. Release any acquired semaphores before exiting.
392 **/ 407 **/
393static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, 408static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
394 bool locked) 409 bool locked)
395{ 410{
396 s32 ret_val = 0; 411 s32 ret_val = 0;
397 412
@@ -410,8 +425,7 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
410 (u16)offset); 425 (u16)offset);
411 if (!ret_val) 426 if (!ret_val)
412 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & 427 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS &
413 offset, 428 offset, data);
414 data);
415 if (!locked) 429 if (!locked)
416 hw->phy.ops.release(hw); 430 hw->phy.ops.release(hw);
417 431
@@ -458,7 +472,7 @@ s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
458 * Release any acquired semaphores before exiting. 472 * Release any acquired semaphores before exiting.
459 **/ 473 **/
460static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, 474static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
461 bool locked) 475 bool locked)
462{ 476{
463 u32 kmrnctrlsta; 477 u32 kmrnctrlsta;
464 478
@@ -531,7 +545,7 @@ s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
531 * before exiting. 545 * before exiting.
532 **/ 546 **/
533static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, 547static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
534 bool locked) 548 bool locked)
535{ 549{
536 u32 kmrnctrlsta; 550 u32 kmrnctrlsta;
537 551
@@ -772,8 +786,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
772 786
773 phy_data |= M88E1000_EPSCR_TX_CLK_25; 787 phy_data |= M88E1000_EPSCR_TX_CLK_25;
774 788
775 if ((phy->revision == 2) && 789 if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) {
776 (phy->id == M88E1111_I_PHY_ID)) {
777 /* 82573L PHY - set the downshift counter to 5x. */ 790 /* 82573L PHY - set the downshift counter to 5x. */
778 phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; 791 phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
779 phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; 792 phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
@@ -1296,7 +1309,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1296 e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); 1309 e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
1297 1310
1298 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 1311 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
1299 100000, &link); 1312 100000, &link);
1300 if (ret_val) 1313 if (ret_val)
1301 return ret_val; 1314 return ret_val;
1302 1315
@@ -1319,7 +1332,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1319 1332
1320 /* Try once more */ 1333 /* Try once more */
1321 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 1334 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
1322 100000, &link); 1335 100000, &link);
1323 if (ret_val) 1336 if (ret_val)
1324 return ret_val; 1337 return ret_val;
1325 } 1338 }
@@ -1609,9 +1622,9 @@ s32 e1000_check_polarity_m88(struct e1000_hw *hw)
1609 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); 1622 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data);
1610 1623
1611 if (!ret_val) 1624 if (!ret_val)
1612 phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) 1625 phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
1613 ? e1000_rev_polarity_reversed 1626 ? e1000_rev_polarity_reversed
1614 : e1000_rev_polarity_normal; 1627 : e1000_rev_polarity_normal);
1615 1628
1616 return ret_val; 1629 return ret_val;
1617} 1630}
@@ -1653,9 +1666,9 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
1653 ret_val = e1e_rphy(hw, offset, &data); 1666 ret_val = e1e_rphy(hw, offset, &data);
1654 1667
1655 if (!ret_val) 1668 if (!ret_val)
1656 phy->cable_polarity = (data & mask) 1669 phy->cable_polarity = ((data & mask)
1657 ? e1000_rev_polarity_reversed 1670 ? e1000_rev_polarity_reversed
1658 : e1000_rev_polarity_normal; 1671 : e1000_rev_polarity_normal);
1659 1672
1660 return ret_val; 1673 return ret_val;
1661} 1674}
@@ -1685,9 +1698,9 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw)
1685 ret_val = e1e_rphy(hw, offset, &phy_data); 1698 ret_val = e1e_rphy(hw, offset, &phy_data);
1686 1699
1687 if (!ret_val) 1700 if (!ret_val)
1688 phy->cable_polarity = (phy_data & mask) 1701 phy->cable_polarity = ((phy_data & mask)
1689 ? e1000_rev_polarity_reversed 1702 ? e1000_rev_polarity_reversed
1690 : e1000_rev_polarity_normal; 1703 : e1000_rev_polarity_normal);
1691 1704
1692 return ret_val; 1705 return ret_val;
1693} 1706}
@@ -1733,7 +1746,7 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
1733 * Polls the PHY status register for link, 'iterations' number of times. 1746 * Polls the PHY status register for link, 'iterations' number of times.
1734 **/ 1747 **/
1735s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, 1748s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1736 u32 usec_interval, bool *success) 1749 u32 usec_interval, bool *success)
1737{ 1750{
1738 s32 ret_val = 0; 1751 s32 ret_val = 0;
1739 u16 i, phy_status; 1752 u16 i, phy_status;
@@ -1756,7 +1769,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1756 if (phy_status & BMSR_LSTATUS) 1769 if (phy_status & BMSR_LSTATUS)
1757 break; 1770 break;
1758 if (usec_interval >= 1000) 1771 if (usec_interval >= 1000)
1759 mdelay(usec_interval/1000); 1772 mdelay(usec_interval / 1000);
1760 else 1773 else
1761 udelay(usec_interval); 1774 udelay(usec_interval);
1762 } 1775 }
@@ -1791,8 +1804,8 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
1791 if (ret_val) 1804 if (ret_val)
1792 return ret_val; 1805 return ret_val;
1793 1806
1794 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> 1807 index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
1795 M88E1000_PSSR_CABLE_LENGTH_SHIFT; 1808 M88E1000_PSSR_CABLE_LENGTH_SHIFT);
1796 1809
1797 if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) 1810 if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
1798 return -E1000_ERR_PHY; 1811 return -E1000_ERR_PHY;
@@ -1824,10 +1837,10 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1824 u16 cur_agc_index, max_agc_index = 0; 1837 u16 cur_agc_index, max_agc_index = 0;
1825 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; 1838 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
1826 static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { 1839 static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
1827 IGP02E1000_PHY_AGC_A, 1840 IGP02E1000_PHY_AGC_A,
1828 IGP02E1000_PHY_AGC_B, 1841 IGP02E1000_PHY_AGC_B,
1829 IGP02E1000_PHY_AGC_C, 1842 IGP02E1000_PHY_AGC_C,
1830 IGP02E1000_PHY_AGC_D 1843 IGP02E1000_PHY_AGC_D
1831 }; 1844 };
1832 1845
1833 /* Read the AGC registers for all channels */ 1846 /* Read the AGC registers for all channels */
@@ -1841,8 +1854,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1841 * that can be put into the lookup table to obtain the 1854 * that can be put into the lookup table to obtain the
1842 * approximate cable length. 1855 * approximate cable length.
1843 */ 1856 */
1844 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & 1857 cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
1845 IGP02E1000_AGC_LENGTH_MASK; 1858 IGP02E1000_AGC_LENGTH_MASK);
1846 1859
1847 /* Array index bound check. */ 1860 /* Array index bound check. */
1848 if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || 1861 if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
@@ -1865,8 +1878,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1865 agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); 1878 agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
1866 1879
1867 /* Calculate cable length with the error range of +/- 10 meters. */ 1880 /* Calculate cable length with the error range of +/- 10 meters. */
1868 phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? 1881 phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
1869 (agc_value - IGP02E1000_AGC_RANGE) : 0; 1882 (agc_value - IGP02E1000_AGC_RANGE) : 0);
1870 phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; 1883 phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
1871 1884
1872 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; 1885 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
@@ -2040,9 +2053,9 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
2040 return ret_val; 2053 return ret_val;
2041 } else { 2054 } else {
2042 /* Polarity is forced */ 2055 /* Polarity is forced */
2043 phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) 2056 phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
2044 ? e1000_rev_polarity_reversed 2057 ? e1000_rev_polarity_reversed
2045 : e1000_rev_polarity_normal; 2058 : e1000_rev_polarity_normal);
2046 } 2059 }
2047 2060
2048 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); 2061 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
@@ -2119,7 +2132,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
2119 ew32(CTRL, ctrl); 2132 ew32(CTRL, ctrl);
2120 e1e_flush(); 2133 e1e_flush();
2121 2134
2122 udelay(150); 2135 usleep_range(150, 300);
2123 2136
2124 phy->ops.release(hw); 2137 phy->ops.release(hw);
2125 2138
@@ -2375,13 +2388,13 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2375 2388
2376 /* Page is shifted left, PHY expects (page x 32) */ 2389 /* Page is shifted left, PHY expects (page x 32) */
2377 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, 2390 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
2378 (page << page_shift)); 2391 (page << page_shift));
2379 if (ret_val) 2392 if (ret_val)
2380 goto release; 2393 goto release;
2381 } 2394 }
2382 2395
2383 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2396 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2384 data); 2397 data);
2385 2398
2386release: 2399release:
2387 hw->phy.ops.release(hw); 2400 hw->phy.ops.release(hw);
@@ -2433,13 +2446,13 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2433 2446
2434 /* Page is shifted left, PHY expects (page x 32) */ 2447 /* Page is shifted left, PHY expects (page x 32) */
2435 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, 2448 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
2436 (page << page_shift)); 2449 (page << page_shift));
2437 if (ret_val) 2450 if (ret_val)
2438 goto release; 2451 goto release;
2439 } 2452 }
2440 2453
2441 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2454 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2442 data); 2455 data);
2443release: 2456release:
2444 hw->phy.ops.release(hw); 2457 hw->phy.ops.release(hw);
2445 return ret_val; 2458 return ret_val;
@@ -2674,7 +2687,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2674 if (read) { 2687 if (read) {
2675 /* Read the Wakeup register page value using opcode 0x12 */ 2688 /* Read the Wakeup register page value using opcode 0x12 */
2676 ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, 2689 ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
2677 data); 2690 data);
2678 } else { 2691 } else {
2679 /* Write the Wakeup register page value using opcode 0x12 */ 2692 /* Write the Wakeup register page value using opcode 0x12 */
2680 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, 2693 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
@@ -2763,7 +2776,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2763 2776
2764 if (page > 0 && page < HV_INTC_FC_PAGE_START) { 2777 if (page > 0 && page < HV_INTC_FC_PAGE_START) {
2765 ret_val = e1000_access_phy_debug_regs_hv(hw, offset, 2778 ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
2766 data, true); 2779 data, true);
2767 goto out; 2780 goto out;
2768 } 2781 }
2769 2782
@@ -2786,8 +2799,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2786 e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, 2799 e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
2787 page << IGP_PAGE_SHIFT, reg); 2800 page << IGP_PAGE_SHIFT, reg);
2788 2801
2789 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2802 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data);
2790 data);
2791out: 2803out:
2792 if (!locked) 2804 if (!locked)
2793 hw->phy.ops.release(hw); 2805 hw->phy.ops.release(hw);
@@ -2871,7 +2883,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2871 2883
2872 if (page > 0 && page < HV_INTC_FC_PAGE_START) { 2884 if (page > 0 && page < HV_INTC_FC_PAGE_START) {
2873 ret_val = e1000_access_phy_debug_regs_hv(hw, offset, 2885 ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
2874 &data, false); 2886 &data, false);
2875 goto out; 2887 goto out;
2876 } 2888 }
2877 2889
@@ -2910,7 +2922,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2910 page << IGP_PAGE_SHIFT, reg); 2922 page << IGP_PAGE_SHIFT, reg);
2911 2923
2912 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2924 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2913 data); 2925 data);
2914 2926
2915out: 2927out:
2916 if (!locked) 2928 if (!locked)
@@ -2988,15 +3000,15 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
2988 * These accesses done with PHY address 2 and without using pages. 3000 * These accesses done with PHY address 2 and without using pages.
2989 **/ 3001 **/
2990static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, 3002static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2991 u16 *data, bool read) 3003 u16 *data, bool read)
2992{ 3004{
2993 s32 ret_val; 3005 s32 ret_val;
2994 u32 addr_reg; 3006 u32 addr_reg;
2995 u32 data_reg; 3007 u32 data_reg;
2996 3008
2997 /* This takes care of the difference with desktop vs mobile phy */ 3009 /* This takes care of the difference with desktop vs mobile phy */
2998 addr_reg = (hw->phy.type == e1000_phy_82578) ? 3010 addr_reg = ((hw->phy.type == e1000_phy_82578) ?
2999 I82578_ADDR_REG : I82577_ADDR_REG; 3011 I82578_ADDR_REG : I82577_ADDR_REG);
3000 data_reg = addr_reg + 1; 3012 data_reg = addr_reg + 1;
3001 3013
3002 /* All operations in this function are phy address 2 */ 3014 /* All operations in this function are phy address 2 */
@@ -3050,8 +3062,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
3050 if (ret_val) 3062 if (ret_val)
3051 return ret_val; 3063 return ret_val;
3052 3064
3053 data &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | 3065 data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
3054 BM_CS_STATUS_SPEED_MASK; 3066 BM_CS_STATUS_SPEED_MASK);
3055 3067
3056 if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | 3068 if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
3057 BM_CS_STATUS_SPEED_1000)) 3069 BM_CS_STATUS_SPEED_1000))
@@ -3086,9 +3098,9 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
3086 ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); 3098 ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
3087 3099
3088 if (!ret_val) 3100 if (!ret_val)
3089 phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) 3101 phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
3090 ? e1000_rev_polarity_reversed 3102 ? e1000_rev_polarity_reversed
3091 : e1000_rev_polarity_normal; 3103 : e1000_rev_polarity_normal);
3092 3104
3093 return ret_val; 3105 return ret_val;
3094} 3106}
@@ -3215,8 +3227,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
3215 if (ret_val) 3227 if (ret_val)
3216 return ret_val; 3228 return ret_val;
3217 3229
3218 length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> 3230 length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
3219 I82577_DSTATUS_CABLE_LENGTH_SHIFT; 3231 I82577_DSTATUS_CABLE_LENGTH_SHIFT);
3220 3232
3221 if (length == E1000_CABLE_LENGTH_UNDEFINED) 3233 if (length == E1000_CABLE_LENGTH_UNDEFINED)
3222 return -E1000_ERR_PHY; 3234 return -E1000_ERR_PHY;
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index b477fa53ec94..065f8c80d4f2 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -145,8 +145,7 @@ static int e1000e_phc_settime(struct ptp_clock_info *ptp,
145 unsigned long flags; 145 unsigned long flags;
146 u64 ns; 146 u64 ns;
147 147
148 ns = ts->tv_sec * NSEC_PER_SEC; 148 ns = timespec_to_ns(ts);
149 ns += ts->tv_nsec;
150 149
151 /* reset the timecounter */ 150 /* reset the timecounter */
152 spin_lock_irqsave(&adapter->systim_lock, flags); 151 spin_lock_irqsave(&adapter->systim_lock, flags);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 12b1d8480808..ff6a17cb1362 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -100,6 +100,7 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
100 break; 100 break;
101 case e1000_82580: 101 case e1000_82580:
102 case e1000_i350: 102 case e1000_i350:
103 case e1000_i354:
103 case e1000_i210: 104 case e1000_i210:
104 case e1000_i211: 105 case e1000_i211:
105 reg = rd32(E1000_MDICNFG); 106 reg = rd32(E1000_MDICNFG);
@@ -149,6 +150,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
149 switch (hw->mac.type) { 150 switch (hw->mac.type) {
150 case e1000_82580: 151 case e1000_82580:
151 case e1000_i350: 152 case e1000_i350:
153 case e1000_i354:
152 phy->ops.read_reg = igb_read_phy_reg_82580; 154 phy->ops.read_reg = igb_read_phy_reg_82580;
153 phy->ops.write_reg = igb_write_phy_reg_82580; 155 phy->ops.write_reg = igb_write_phy_reg_82580;
154 break; 156 break;
@@ -174,13 +176,14 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
174 176
175 /* Verify phy id and set remaining function pointers */ 177 /* Verify phy id and set remaining function pointers */
176 switch (phy->id) { 178 switch (phy->id) {
179 case M88E1545_E_PHY_ID:
177 case I347AT4_E_PHY_ID: 180 case I347AT4_E_PHY_ID:
178 case M88E1112_E_PHY_ID: 181 case M88E1112_E_PHY_ID:
179 case M88E1111_I_PHY_ID: 182 case M88E1111_I_PHY_ID:
180 phy->type = e1000_phy_m88; 183 phy->type = e1000_phy_m88;
184 phy->ops.check_polarity = igb_check_polarity_m88;
181 phy->ops.get_phy_info = igb_get_phy_info_m88; 185 phy->ops.get_phy_info = igb_get_phy_info_m88;
182 if (phy->id == I347AT4_E_PHY_ID || 186 if (phy->id != M88E1111_I_PHY_ID)
183 phy->id == M88E1112_E_PHY_ID)
184 phy->ops.get_cable_length = 187 phy->ops.get_cable_length =
185 igb_get_cable_length_m88_gen2; 188 igb_get_cable_length_m88_gen2;
186 else 189 else
@@ -227,7 +230,7 @@ out:
227 * igb_init_nvm_params_82575 - Init NVM func ptrs. 230 * igb_init_nvm_params_82575 - Init NVM func ptrs.
228 * @hw: pointer to the HW structure 231 * @hw: pointer to the HW structure
229 **/ 232 **/
230s32 igb_init_nvm_params_82575(struct e1000_hw *hw) 233static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
231{ 234{
232 struct e1000_nvm_info *nvm = &hw->nvm; 235 struct e1000_nvm_info *nvm = &hw->nvm;
233 u32 eecd = rd32(E1000_EECD); 236 u32 eecd = rd32(E1000_EECD);
@@ -287,6 +290,7 @@ s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
287 nvm->ops.read = igb_read_nvm_spi; 290 nvm->ops.read = igb_read_nvm_spi;
288 nvm->ops.write = igb_write_nvm_spi; 291 nvm->ops.write = igb_write_nvm_spi;
289 break; 292 break;
293 case e1000_i354:
290 case e1000_i350: 294 case e1000_i350:
291 nvm->ops.validate = igb_validate_nvm_checksum_i350; 295 nvm->ops.validate = igb_validate_nvm_checksum_i350;
292 nvm->ops.update = igb_update_nvm_checksum_i350; 296 nvm->ops.update = igb_update_nvm_checksum_i350;
@@ -352,6 +356,7 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
352 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 356 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
353 break; 357 break;
354 case e1000_i350: 358 case e1000_i350:
359 case e1000_i354:
355 mac->rar_entry_count = E1000_RAR_ENTRIES_I350; 360 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
356 break; 361 break;
357 default: 362 default:
@@ -384,6 +389,9 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
384 dev_spec->eee_disable = false; 389 dev_spec->eee_disable = false;
385 else 390 else
386 dev_spec->eee_disable = true; 391 dev_spec->eee_disable = true;
392 /* Allow a single clear of the SW semaphore on I210 and newer */
393 if (mac->type >= e1000_i210)
394 dev_spec->clear_semaphore_once = true;
387 /* physical interface link setup */ 395 /* physical interface link setup */
388 mac->ops.setup_physical_interface = 396 mac->ops.setup_physical_interface =
389 (hw->phy.media_type == e1000_media_type_copper) 397 (hw->phy.media_type == e1000_media_type_copper)
@@ -435,8 +443,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
435 mac->type = e1000_i350; 443 mac->type = e1000_i350;
436 break; 444 break;
437 case E1000_DEV_ID_I210_COPPER: 445 case E1000_DEV_ID_I210_COPPER:
438 case E1000_DEV_ID_I210_COPPER_OEM1:
439 case E1000_DEV_ID_I210_COPPER_IT:
440 case E1000_DEV_ID_I210_FIBER: 446 case E1000_DEV_ID_I210_FIBER:
441 case E1000_DEV_ID_I210_SERDES: 447 case E1000_DEV_ID_I210_SERDES:
442 case E1000_DEV_ID_I210_SGMII: 448 case E1000_DEV_ID_I210_SGMII:
@@ -445,14 +451,18 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
445 case E1000_DEV_ID_I211_COPPER: 451 case E1000_DEV_ID_I211_COPPER:
446 mac->type = e1000_i211; 452 mac->type = e1000_i211;
447 break; 453 break;
454 case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
455 case E1000_DEV_ID_I354_SGMII:
456 case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
457 mac->type = e1000_i354;
458 break;
448 default: 459 default:
449 return -E1000_ERR_MAC_INIT; 460 return -E1000_ERR_MAC_INIT;
450 break; 461 break;
451 } 462 }
452 463
453 /* Set media type */ 464 /* Set media type */
454 /* 465 /* The 82575 uses bits 22:23 for link mode. The mode can be changed
455 * The 82575 uses bits 22:23 for link mode. The mode can be changed
456 * based on the EEPROM. We cannot rely upon device ID. There 466 * based on the EEPROM. We cannot rely upon device ID. There
457 * is no distinguishable difference between fiber and internal 467 * is no distinguishable difference between fiber and internal
458 * SerDes mode on the 82575. There can be an external PHY attached 468 * SerDes mode on the 82575. There can be an external PHY attached
@@ -621,8 +631,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
621 u32 ctrl_ext; 631 u32 ctrl_ext;
622 u32 mdic; 632 u32 mdic;
623 633
624 /* 634 /* For SGMII PHYs, we try the list of possible addresses until
625 * For SGMII PHYs, we try the list of possible addresses until
626 * we find one that works. For non-SGMII PHYs 635 * we find one that works. For non-SGMII PHYs
627 * (e.g. integrated copper PHYs), an address of 1 should 636 * (e.g. integrated copper PHYs), an address of 1 should
628 * work. The result of this function should mean phy->phy_addr 637 * work. The result of this function should mean phy->phy_addr
@@ -644,6 +653,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
644 break; 653 break;
645 case e1000_82580: 654 case e1000_82580:
646 case e1000_i350: 655 case e1000_i350:
656 case e1000_i354:
647 case e1000_i210: 657 case e1000_i210:
648 case e1000_i211: 658 case e1000_i211:
649 mdic = rd32(E1000_MDICNFG); 659 mdic = rd32(E1000_MDICNFG);
@@ -665,8 +675,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
665 wrfl(); 675 wrfl();
666 msleep(300); 676 msleep(300);
667 677
668 /* 678 /* The address field in the I2CCMD register is 3 bits and 0 is invalid.
669 * The address field in the I2CCMD register is 3 bits and 0 is invalid.
670 * Therefore, we need to test 1-7 679 * Therefore, we need to test 1-7
671 */ 680 */
672 for (phy->addr = 1; phy->addr < 8; phy->addr++) { 681 for (phy->addr = 1; phy->addr < 8; phy->addr++) {
@@ -674,8 +683,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
674 if (ret_val == 0) { 683 if (ret_val == 0) {
675 hw_dbg("Vendor ID 0x%08X read at address %u\n", 684 hw_dbg("Vendor ID 0x%08X read at address %u\n",
676 phy_id, phy->addr); 685 phy_id, phy->addr);
677 /* 686 /* At the time of this writing, The M88 part is
678 * At the time of this writing, The M88 part is
679 * the only supported SGMII PHY product. 687 * the only supported SGMII PHY product.
680 */ 688 */
681 if (phy_id == M88_VENDOR) 689 if (phy_id == M88_VENDOR)
@@ -711,15 +719,13 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
711{ 719{
712 s32 ret_val; 720 s32 ret_val;
713 721
714 /* 722 /* This isn't a true "hard" reset, but is the only reset
715 * This isn't a true "hard" reset, but is the only reset
716 * available to us at this time. 723 * available to us at this time.
717 */ 724 */
718 725
719 hw_dbg("Soft resetting SGMII attached PHY...\n"); 726 hw_dbg("Soft resetting SGMII attached PHY...\n");
720 727
721 /* 728 /* SFP documentation requires the following to configure the SPF module
722 * SFP documentation requires the following to configure the SPF module
723 * to work on SGMII. No further documentation is given. 729 * to work on SGMII. No further documentation is given.
724 */ 730 */
725 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); 731 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
@@ -774,8 +780,7 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
774 data &= ~IGP02E1000_PM_D0_LPLU; 780 data &= ~IGP02E1000_PM_D0_LPLU;
775 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 781 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
776 data); 782 data);
777 /* 783 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
778 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
779 * during Dx states where the power conservation is most 784 * during Dx states where the power conservation is most
780 * important. During driver activity we should enable 785 * important. During driver activity we should enable
781 * SmartSpeed, so performance is maintained. 786 * SmartSpeed, so performance is maintained.
@@ -838,8 +843,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
838 } else { 843 } else {
839 data &= ~E1000_82580_PM_D0_LPLU; 844 data &= ~E1000_82580_PM_D0_LPLU;
840 845
841 /* 846 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
842 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
843 * during Dx states where the power conservation is most 847 * during Dx states where the power conservation is most
844 * important. During driver activity we should enable 848 * important. During driver activity we should enable
845 * SmartSpeed, so performance is maintained. 849 * SmartSpeed, so performance is maintained.
@@ -867,7 +871,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
867 * During driver activity, SmartSpeed should be enabled so performance is 871 * During driver activity, SmartSpeed should be enabled so performance is
868 * maintained. 872 * maintained.
869 **/ 873 **/
870s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) 874static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
871{ 875{
872 struct e1000_phy_info *phy = &hw->phy; 876 struct e1000_phy_info *phy = &hw->phy;
873 s32 ret_val = 0; 877 s32 ret_val = 0;
@@ -877,8 +881,7 @@ s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
877 881
878 if (!active) { 882 if (!active) {
879 data &= ~E1000_82580_PM_D3_LPLU; 883 data &= ~E1000_82580_PM_D3_LPLU;
880 /* 884 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
881 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
882 * during Dx states where the power conservation is most 885 * during Dx states where the power conservation is most
883 * important. During driver activity we should enable 886 * important. During driver activity we should enable
884 * SmartSpeed, so performance is maintained. 887 * SmartSpeed, so performance is maintained.
@@ -964,8 +967,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
964 if (!(swfw_sync & (fwmask | swmask))) 967 if (!(swfw_sync & (fwmask | swmask)))
965 break; 968 break;
966 969
967 /* 970 /* Firmware currently using resource (fwmask)
968 * Firmware currently using resource (fwmask)
969 * or other software thread using resource (swmask) 971 * or other software thread using resource (swmask)
970 */ 972 */
971 igb_put_hw_semaphore(hw); 973 igb_put_hw_semaphore(hw);
@@ -1065,8 +1067,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
1065 if (hw->phy.media_type != e1000_media_type_copper) { 1067 if (hw->phy.media_type != e1000_media_type_copper) {
1066 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 1068 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
1067 &duplex); 1069 &duplex);
1068 /* 1070 /* Use this flag to determine if link needs to be checked or
1069 * Use this flag to determine if link needs to be checked or
1070 * not. If we have link clear the flag so that we do not 1071 * not. If we have link clear the flag so that we do not
1071 * continue to check for link. 1072 * continue to check for link.
1072 */ 1073 */
@@ -1135,15 +1136,13 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
1135 *speed = 0; 1136 *speed = 0;
1136 *duplex = 0; 1137 *duplex = 0;
1137 1138
1138 /* 1139 /* Read the PCS Status register for link state. For non-copper mode,
1139 * Read the PCS Status register for link state. For non-copper mode,
1140 * the status register is not accurate. The PCS status register is 1140 * the status register is not accurate. The PCS status register is
1141 * used instead. 1141 * used instead.
1142 */ 1142 */
1143 pcs = rd32(E1000_PCS_LSTAT); 1143 pcs = rd32(E1000_PCS_LSTAT);
1144 1144
1145 /* 1145 /* The link up bit determines when link is up on autoneg. The sync ok
1146 * The link up bit determines when link is up on autoneg. The sync ok
1147 * gets set once both sides sync up and agree upon link. Stable link 1146 * gets set once both sides sync up and agree upon link. Stable link
1148 * can be determined by checking for both link up and link sync ok 1147 * can be determined by checking for both link up and link sync ok
1149 */ 1148 */
@@ -1214,8 +1213,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1214 u32 ctrl, icr; 1213 u32 ctrl, icr;
1215 s32 ret_val; 1214 s32 ret_val;
1216 1215
1217 /* 1216 /* Prevent the PCI-E bus from sticking if there is no TLP connection
1218 * Prevent the PCI-E bus from sticking if there is no TLP connection
1219 * on the last TLP read/write transaction when MAC is reset. 1217 * on the last TLP read/write transaction when MAC is reset.
1220 */ 1218 */
1221 ret_val = igb_disable_pcie_master(hw); 1219 ret_val = igb_disable_pcie_master(hw);
@@ -1244,8 +1242,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1244 1242
1245 ret_val = igb_get_auto_rd_done(hw); 1243 ret_val = igb_get_auto_rd_done(hw);
1246 if (ret_val) { 1244 if (ret_val) {
1247 /* 1245 /* When auto config read does not complete, do not
1248 * When auto config read does not complete, do not
1249 * return with an error. This can happen in situations 1246 * return with an error. This can happen in situations
1250 * where there is no eeprom and prevents getting link. 1247 * where there is no eeprom and prevents getting link.
1251 */ 1248 */
@@ -1287,7 +1284,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
1287 1284
1288 /* Disabling VLAN filtering */ 1285 /* Disabling VLAN filtering */
1289 hw_dbg("Initializing the IEEE VLAN\n"); 1286 hw_dbg("Initializing the IEEE VLAN\n");
1290 if (hw->mac.type == e1000_i350) 1287 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
1291 igb_clear_vfta_i350(hw); 1288 igb_clear_vfta_i350(hw);
1292 else 1289 else
1293 igb_clear_vfta(hw); 1290 igb_clear_vfta(hw);
@@ -1308,8 +1305,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
1308 /* Setup link and flow control */ 1305 /* Setup link and flow control */
1309 ret_val = igb_setup_link(hw); 1306 ret_val = igb_setup_link(hw);
1310 1307
1311 /* 1308 /* Clear all of the statistics registers (clear on read). It is
1312 * Clear all of the statistics registers (clear on read). It is
1313 * important that we do this after we have tried to establish link 1309 * important that we do this after we have tried to establish link
1314 * because the symbol error count will increment wildly if there 1310 * because the symbol error count will increment wildly if there
1315 * is no link. 1311 * is no link.
@@ -1364,6 +1360,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1364 switch (hw->phy.id) { 1360 switch (hw->phy.id) {
1365 case I347AT4_E_PHY_ID: 1361 case I347AT4_E_PHY_ID:
1366 case M88E1112_E_PHY_ID: 1362 case M88E1112_E_PHY_ID:
1363 case M88E1545_E_PHY_ID:
1367 case I210_I_PHY_ID: 1364 case I210_I_PHY_ID:
1368 ret_val = igb_copper_link_setup_m88_gen2(hw); 1365 ret_val = igb_copper_link_setup_m88_gen2(hw);
1369 break; 1366 break;
@@ -1412,17 +1409,17 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1412 return ret_val; 1409 return ret_val;
1413 1410
1414 1411
1415 /* 1412 /* On the 82575, SerDes loopback mode persists until it is
1416 * On the 82575, SerDes loopback mode persists until it is
1417 * explicitly turned off or a power cycle is performed. A read to 1413 * explicitly turned off or a power cycle is performed. A read to
1418 * the register does not indicate its status. Therefore, we ensure 1414 * the register does not indicate its status. Therefore, we ensure
1419 * loopback mode is disabled during initialization. 1415 * loopback mode is disabled during initialization.
1420 */ 1416 */
1421 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1417 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1422 1418
1423 /* power on the sfp cage if present */ 1419 /* power on the sfp cage if present and turn on I2C */
1424 ctrl_ext = rd32(E1000_CTRL_EXT); 1420 ctrl_ext = rd32(E1000_CTRL_EXT);
1425 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 1421 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1422 ctrl_ext |= E1000_CTRL_I2C_ENA;
1426 wr32(E1000_CTRL_EXT, ctrl_ext); 1423 wr32(E1000_CTRL_EXT, ctrl_ext);
1427 1424
1428 ctrl_reg = rd32(E1000_CTRL); 1425 ctrl_reg = rd32(E1000_CTRL);
@@ -1466,8 +1463,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1466 pcs_autoneg = false; 1463 pcs_autoneg = false;
1467 } 1464 }
1468 1465
1469 /* 1466 /* non-SGMII modes only supports a speed of 1000/Full for the
1470 * non-SGMII modes only supports a speed of 1000/Full for the
1471 * link so it is best to just force the MAC and let the pcs 1467 * link so it is best to just force the MAC and let the pcs
1472 * link either autoneg or be forced to 1000/Full 1468 * link either autoneg or be forced to 1000/Full
1473 */ 1469 */
@@ -1481,8 +1477,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1481 1477
1482 wr32(E1000_CTRL, ctrl_reg); 1478 wr32(E1000_CTRL, ctrl_reg);
1483 1479
1484 /* 1480 /* New SerDes mode allows for forcing speed or autonegotiating speed
1485 * New SerDes mode allows for forcing speed or autonegotiating speed
1486 * at 1gb. Autoneg should be default set by most drivers. This is the 1481 * at 1gb. Autoneg should be default set by most drivers. This is the
1487 * mode that will be compatible with older link partners and switches. 1482 * mode that will be compatible with older link partners and switches.
1488 * However, both are supported by the hardware and some drivers/tools. 1483 * However, both are supported by the hardware and some drivers/tools.
@@ -1592,8 +1587,7 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1592{ 1587{
1593 s32 ret_val = 0; 1588 s32 ret_val = 0;
1594 1589
1595 /* 1590 /* If there's an alternate MAC address place it in RAR0
1596 * If there's an alternate MAC address place it in RAR0
1597 * so that it will override the Si installed default perm 1591 * so that it will override the Si installed default perm
1598 * address. 1592 * address.
1599 */ 1593 */
@@ -1777,8 +1771,7 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
1777 if (gcr & E1000_GCR_CMPL_TMOUT_MASK) 1771 if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1778 goto out; 1772 goto out;
1779 1773
1780 /* 1774 /* if capabilities version is type 1 we can write the
1781 * if capababilities version is type 1 we can write the
1782 * timeout of 10ms to 200ms through the GCR register 1775 * timeout of 10ms to 200ms through the GCR register
1783 */ 1776 */
1784 if (!(gcr & E1000_GCR_CAP_VER2)) { 1777 if (!(gcr & E1000_GCR_CAP_VER2)) {
@@ -1786,8 +1779,7 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
1786 goto out; 1779 goto out;
1787 } 1780 }
1788 1781
1789 /* 1782 /* for version 2 capabilities we need to write the config space
1790 * for version 2 capabilities we need to write the config space
1791 * directly in order to set the completion timeout value for 1783 * directly in order to set the completion timeout value for
1792 * 16ms to 55ms 1784 * 16ms to 55ms
1793 */ 1785 */
@@ -1825,6 +1817,7 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
1825 reg_offset = E1000_DTXSWC; 1817 reg_offset = E1000_DTXSWC;
1826 break; 1818 break;
1827 case e1000_i350: 1819 case e1000_i350:
1820 case e1000_i354:
1828 reg_offset = E1000_TXSWC; 1821 reg_offset = E1000_TXSWC;
1829 break; 1822 break;
1830 default: 1823 default:
@@ -1866,6 +1859,7 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1866 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1859 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1867 wr32(E1000_DTXSWC, dtxswc); 1860 wr32(E1000_DTXSWC, dtxswc);
1868 break; 1861 break;
1862 case e1000_i354:
1869 case e1000_i350: 1863 case e1000_i350:
1870 dtxswc = rd32(E1000_TXSWC); 1864 dtxswc = rd32(E1000_TXSWC);
1871 if (enable) 1865 if (enable)
@@ -1879,7 +1873,6 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1879 break; 1873 break;
1880 } 1874 }
1881 1875
1882
1883} 1876}
1884 1877
1885/** 1878/**
@@ -1914,7 +1907,6 @@ static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1914{ 1907{
1915 s32 ret_val; 1908 s32 ret_val;
1916 1909
1917
1918 ret_val = hw->phy.ops.acquire(hw); 1910 ret_val = hw->phy.ops.acquire(hw);
1919 if (ret_val) 1911 if (ret_val)
1920 goto out; 1912 goto out;
@@ -2016,8 +2008,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2016 /* Get current control state. */ 2008 /* Get current control state. */
2017 ctrl = rd32(E1000_CTRL); 2009 ctrl = rd32(E1000_CTRL);
2018 2010
2019 /* 2011 /* Prevent the PCI-E bus from sticking if there is no TLP connection
2020 * Prevent the PCI-E bus from sticking if there is no TLP connection
2021 * on the last TLP read/write transaction when MAC is reset. 2012 * on the last TLP read/write transaction when MAC is reset.
2022 */ 2013 */
2023 ret_val = igb_disable_pcie_master(hw); 2014 ret_val = igb_disable_pcie_master(hw);
@@ -2052,18 +2043,13 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2052 2043
2053 ret_val = igb_get_auto_rd_done(hw); 2044 ret_val = igb_get_auto_rd_done(hw);
2054 if (ret_val) { 2045 if (ret_val) {
2055 /* 2046 /* When auto config read does not complete, do not
2056 * When auto config read does not complete, do not
2057 * return with an error. This can happen in situations 2047 * return with an error. This can happen in situations
2058 * where there is no eeprom and prevents getting link. 2048 * where there is no eeprom and prevents getting link.
2059 */ 2049 */
2060 hw_dbg("Auto Read Done did not complete\n"); 2050 hw_dbg("Auto Read Done did not complete\n");
2061 } 2051 }
2062 2052
2063 /* If EEPROM is not present, run manual init scripts */
2064 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
2065 igb_reset_init_script_82575(hw);
2066
2067 /* clear global device reset status bit */ 2053 /* clear global device reset status bit */
2068 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); 2054 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
2069 2055
@@ -2197,7 +2183,8 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
2197 2183
2198 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { 2184 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
2199 /* if checksums compatibility bit is set validate checksums 2185 /* if checksums compatibility bit is set validate checksums
2200 * for all 4 ports. */ 2186 * for all 4 ports.
2187 */
2201 eeprom_regions_count = 4; 2188 eeprom_regions_count = 4;
2202 } 2189 }
2203 2190
@@ -2309,6 +2296,41 @@ out:
2309} 2296}
2310 2297
2311/** 2298/**
2299 * __igb_access_emi_reg - Read/write EMI register
2300 * @hw: pointer to the HW structure
2301 * @addr: EMI address to program
2302 * @data: pointer to value to read/write from/to the EMI address
2303 * @read: boolean flag to indicate read or write
2304 **/
2305static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
2306 u16 *data, bool read)
2307{
2308 s32 ret_val = E1000_SUCCESS;
2309
2310 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
2311 if (ret_val)
2312 return ret_val;
2313
2314 if (read)
2315 ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
2316 else
2317 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
2318
2319 return ret_val;
2320}
2321
2322/**
2323 * igb_read_emi_reg - Read Extended Management Interface register
2324 * @hw: pointer to the HW structure
2325 * @addr: EMI address to program
2326 * @data: value to be read from the EMI address
2327 **/
2328s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
2329{
2330 return __igb_access_emi_reg(hw, addr, data, true);
2331}
2332
2333/**
2312 * igb_set_eee_i350 - Enable/disable EEE support 2334 * igb_set_eee_i350 - Enable/disable EEE support
2313 * @hw: pointer to the HW structure 2335 * @hw: pointer to the HW structure
2314 * 2336 *
@@ -2338,7 +2360,6 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
2338 if (eee_su & E1000_EEE_SU_LPI_CLK_STP) 2360 if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
2339 hw_dbg("LPI Clock Stop Bit should not be set!\n"); 2361 hw_dbg("LPI Clock Stop Bit should not be set!\n");
2340 2362
2341
2342 } else { 2363 } else {
2343 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | 2364 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2344 E1000_IPCNFG_EEE_100M_AN); 2365 E1000_IPCNFG_EEE_100M_AN);
@@ -2355,6 +2376,108 @@ out:
2355 return ret_val; 2376 return ret_val;
2356} 2377}
2357 2378
2379/**
2380 * igb_set_eee_i354 - Enable/disable EEE support
2381 * @hw: pointer to the HW structure
2382 *
2383 * Enable/disable EEE legacy mode based on setting in dev_spec structure.
2384 *
2385 **/
2386s32 igb_set_eee_i354(struct e1000_hw *hw)
2387{
2388 struct e1000_phy_info *phy = &hw->phy;
2389 s32 ret_val = 0;
2390 u16 phy_data;
2391
2392 if ((hw->phy.media_type != e1000_media_type_copper) ||
2393 (phy->id != M88E1545_E_PHY_ID))
2394 goto out;
2395
2396 if (!hw->dev_spec._82575.eee_disable) {
2397 /* Switch to PHY page 18. */
2398 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 18);
2399 if (ret_val)
2400 goto out;
2401
2402 ret_val = phy->ops.read_reg(hw, E1000_M88E1545_EEE_CTRL_1,
2403 &phy_data);
2404 if (ret_val)
2405 goto out;
2406
2407 phy_data |= E1000_M88E1545_EEE_CTRL_1_MS;
2408 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_EEE_CTRL_1,
2409 phy_data);
2410 if (ret_val)
2411 goto out;
2412
2413 /* Return the PHY to page 0. */
2414 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 0);
2415 if (ret_val)
2416 goto out;
2417
2418 /* Turn on EEE advertisement. */
2419 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2420 E1000_EEE_ADV_DEV_I354,
2421 &phy_data);
2422 if (ret_val)
2423 goto out;
2424
2425 phy_data |= E1000_EEE_ADV_100_SUPPORTED |
2426 E1000_EEE_ADV_1000_SUPPORTED;
2427 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2428 E1000_EEE_ADV_DEV_I354,
2429 phy_data);
2430 } else {
2431 /* Turn off EEE advertisement. */
2432 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2433 E1000_EEE_ADV_DEV_I354,
2434 &phy_data);
2435 if (ret_val)
2436 goto out;
2437
2438 phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
2439 E1000_EEE_ADV_1000_SUPPORTED);
2440 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2441 E1000_EEE_ADV_DEV_I354,
2442 phy_data);
2443 }
2444
2445out:
2446 return ret_val;
2447}
2448
2449/**
2450 * igb_get_eee_status_i354 - Get EEE status
2451 * @hw: pointer to the HW structure
2452 * @status: EEE status
2453 *
2454 * Get EEE status by guessing based on whether Tx or Rx LPI indications have
2455 * been received.
2456 **/
2457s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
2458{
2459 struct e1000_phy_info *phy = &hw->phy;
2460 s32 ret_val = 0;
2461 u16 phy_data;
2462
2463 /* Check if EEE is supported on this device. */
2464 if ((hw->phy.media_type != e1000_media_type_copper) ||
2465 (phy->id != M88E1545_E_PHY_ID))
2466 goto out;
2467
2468 ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
2469 E1000_PCS_STATUS_DEV_I354,
2470 &phy_data);
2471 if (ret_val)
2472 goto out;
2473
2474 *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
2475 E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
2476
2477out:
2478 return ret_val;
2479}
2480
2358static const u8 e1000_emc_temp_data[4] = { 2481static const u8 e1000_emc_temp_data[4] = {
2359 E1000_EMC_INTERNAL_DATA, 2482 E1000_EMC_INTERNAL_DATA,
2360 E1000_EMC_DIODE1_DATA, 2483 E1000_EMC_DIODE1_DATA,
@@ -2368,11 +2491,12 @@ static const u8 e1000_emc_therm_limit[4] = {
2368 E1000_EMC_DIODE3_THERM_LIMIT 2491 E1000_EMC_DIODE3_THERM_LIMIT
2369}; 2492};
2370 2493
2371/* igb_get_thermal_sensor_data_generic - Gathers thermal sensor data 2494/**
2495 * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
2372 * @hw: pointer to hardware structure 2496 * @hw: pointer to hardware structure
2373 * 2497 *
2374 * Updates the temperatures in mac.thermal_sensor_data 2498 * Updates the temperatures in mac.thermal_sensor_data
2375 */ 2499 **/
2376s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) 2500s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2377{ 2501{
2378 s32 status = E1000_SUCCESS; 2502 s32 status = E1000_SUCCESS;
@@ -2420,12 +2544,13 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2420 return status; 2544 return status;
2421} 2545}
2422 2546
2423/* igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds 2547/**
2548 * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
2424 * @hw: pointer to hardware structure 2549 * @hw: pointer to hardware structure
2425 * 2550 *
2426 * Sets the thermal sensor thresholds according to the NVM map 2551 * Sets the thermal sensor thresholds according to the NVM map
2427 * and save off the threshold and location values into mac.thermal_sensor_data 2552 * and save off the threshold and location values into mac.thermal_sensor_data
2428 */ 2553 **/
2429s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) 2554s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2430{ 2555{
2431 s32 status = E1000_SUCCESS; 2556 s32 status = E1000_SUCCESS;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 73ab41f0e032..74a1506b4235 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -263,7 +263,9 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
263void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); 263void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
264void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); 264void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
265u16 igb_rxpbs_adjust_82580(u32 data); 265u16 igb_rxpbs_adjust_82580(u32 data);
266s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
266s32 igb_set_eee_i350(struct e1000_hw *); 267s32 igb_set_eee_i350(struct e1000_hw *);
268s32 igb_set_eee_i354(struct e1000_hw *);
267s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *); 269s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *);
268s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw); 270s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw);
269 271
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 7e13337d3b9d..31a0f82cc650 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -138,8 +138,7 @@
138#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ 138#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
139#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 139#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
140 140
141/* 141/* Use byte values for the following shift parameters
142 * Use byte values for the following shift parameters
143 * Usage: 142 * Usage:
144 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & 143 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
145 * E1000_PSRCTL_BSIZE0_MASK) | 144 * E1000_PSRCTL_BSIZE0_MASK) |
@@ -237,11 +236,14 @@
237#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 236#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000
238/* BMC external code execution disabled */ 237/* BMC external code execution disabled */
239 238
239#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */
240#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */
240/* Constants used to intrepret the masked PCI-X bus speed. */ 241/* Constants used to intrepret the masked PCI-X bus speed. */
241 242
242#define SPEED_10 10 243#define SPEED_10 10
243#define SPEED_100 100 244#define SPEED_100 100
244#define SPEED_1000 1000 245#define SPEED_1000 1000
246#define SPEED_2500 2500
245#define HALF_DUPLEX 1 247#define HALF_DUPLEX 1
246#define FULL_DUPLEX 2 248#define FULL_DUPLEX 2
247 249
@@ -382,8 +384,7 @@
382#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ 384#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
383/* TCP Timer */ 385/* TCP Timer */
384 386
385/* 387/* This defines the bits that are set in the Interrupt Mask
386 * This defines the bits that are set in the Interrupt Mask
387 * Set/Read Register. Each bit is documented below: 388 * Set/Read Register. Each bit is documented below:
388 * o RXT0 = Receiver Timer Interrupt (ring 0) 389 * o RXT0 = Receiver Timer Interrupt (ring 0)
389 * o TXDW = Transmit Descriptor Written Back 390 * o TXDW = Transmit Descriptor Written Back
@@ -440,8 +441,7 @@
440#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ 441#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
441 442
442/* Receive Address */ 443/* Receive Address */
443/* 444/* Number of high/low register pairs in the RAR. The RAR (Receive Address
444 * Number of high/low register pairs in the RAR. The RAR (Receive Address
445 * Registers) holds the directed and multicast addresses that we monitor. 445 * Registers) holds the directed and multicast addresses that we monitor.
446 * Technically, we have 16 spots. However, we reserve one of these spots 446 * Technically, we have 16 spots. However, we reserve one of these spots
447 * (RAR[15]) for our directed address used by controllers with 447 * (RAR[15]) for our directed address used by controllers with
@@ -760,8 +760,7 @@
760#define MAX_PHY_MULTI_PAGE_REG 0xF 760#define MAX_PHY_MULTI_PAGE_REG 0xF
761 761
762/* Bit definitions for valid PHY IDs. */ 762/* Bit definitions for valid PHY IDs. */
763/* 763/* I = Integrated
764 * I = Integrated
765 * E = External 764 * E = External
766 */ 765 */
767#define M88E1111_I_PHY_ID 0x01410CC0 766#define M88E1111_I_PHY_ID 0x01410CC0
@@ -772,6 +771,7 @@
772#define I350_I_PHY_ID 0x015403B0 771#define I350_I_PHY_ID 0x015403B0
773#define M88_VENDOR 0x0141 772#define M88_VENDOR 0x0141
774#define I210_I_PHY_ID 0x01410C00 773#define I210_I_PHY_ID 0x01410C00
774#define M88E1545_E_PHY_ID 0x01410EA0
775 775
776/* M88E1000 Specific Registers */ 776/* M88E1000 Specific Registers */
777#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ 777#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -791,8 +791,7 @@
791#define M88E1000_PSCR_AUTO_X_1000T 0x0040 791#define M88E1000_PSCR_AUTO_X_1000T 0x0040
792/* Auto crossover enabled all speeds */ 792/* Auto crossover enabled all speeds */
793#define M88E1000_PSCR_AUTO_X_MODE 0x0060 793#define M88E1000_PSCR_AUTO_X_MODE 0x0060
794/* 794/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
795 * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
796 * 0=Normal 10BASE-T Rx Threshold 795 * 0=Normal 10BASE-T Rx Threshold
797 */ 796 */
798/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ 797/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
@@ -802,8 +801,7 @@
802#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ 801#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
803#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ 802#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
804#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ 803#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
805/* 804/* 0 = <50M
806 * 0 = <50M
807 * 1 = 50-80M 805 * 1 = 50-80M
808 * 2 = 80-110M 806 * 2 = 80-110M
809 * 3 = 110-140M 807 * 3 = 110-140M
@@ -816,20 +814,17 @@
816#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 814#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
817 815
818/* M88E1000 Extended PHY Specific Control Register */ 816/* M88E1000 Extended PHY Specific Control Register */
819/* 817/* 1 = Lost lock detect enabled.
820 * 1 = Lost lock detect enabled.
821 * Will assert lost lock and bring 818 * Will assert lost lock and bring
822 * link down if idle not seen 819 * link down if idle not seen
823 * within 1ms in 1000BASE-T 820 * within 1ms in 1000BASE-T
824 */ 821 */
825/* 822/* Number of times we will attempt to autonegotiate before downshifting if we
826 * Number of times we will attempt to autonegotiate before downshifting if we
827 * are the master 823 * are the master
828 */ 824 */
829#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 825#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
830#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 826#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
831/* 827/* Number of times we will attempt to autonegotiate before downshifting if we
832 * Number of times we will attempt to autonegotiate before downshifting if we
833 * are the slave 828 * are the slave
834 */ 829 */
835#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 830#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
@@ -844,8 +839,7 @@
844 839
845/* i347-AT4 Extended PHY Specific Control Register */ 840/* i347-AT4 Extended PHY Specific Control Register */
846 841
847/* 842/* Number of times we will attempt to autonegotiate before downshifting if we
848 * Number of times we will attempt to autonegotiate before downshifting if we
849 * are the master 843 * are the master
850 */ 844 */
851#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 845#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
@@ -895,6 +889,22 @@
895#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ 889#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
896#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */ 890#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */
897#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ 891#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
892#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */
893#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
894#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
895#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
896#define E1000_M88E1545_PAGE_ADDR 0x16 /* Page Offset Register */
897#define E1000_M88E1545_EEE_CTRL_1 0x0
898#define E1000_M88E1545_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
899#define E1000_EEE_ADV_DEV_I354 7
900#define E1000_EEE_ADV_ADDR_I354 60
901#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
902#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
903#define E1000_PCS_STATUS_DEV_I354 3
904#define E1000_PCS_STATUS_ADDR_I354 1
905#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */
906#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400
907#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800
898 908
899/* SerDes Control */ 909/* SerDes Control */
900#define E1000_GEN_CTL_READY 0x80000000 910#define E1000_GEN_CTL_READY 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 0d5cf9c63d0d..488abb24a54f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -38,38 +38,39 @@
38 38
39struct e1000_hw; 39struct e1000_hw;
40 40
41#define E1000_DEV_ID_82576 0x10C9 41#define E1000_DEV_ID_82576 0x10C9
42#define E1000_DEV_ID_82576_FIBER 0x10E6 42#define E1000_DEV_ID_82576_FIBER 0x10E6
43#define E1000_DEV_ID_82576_SERDES 0x10E7 43#define E1000_DEV_ID_82576_SERDES 0x10E7
44#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 44#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
45#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 45#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
46#define E1000_DEV_ID_82576_NS 0x150A 46#define E1000_DEV_ID_82576_NS 0x150A
47#define E1000_DEV_ID_82576_NS_SERDES 0x1518 47#define E1000_DEV_ID_82576_NS_SERDES 0x1518
48#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D 48#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
49#define E1000_DEV_ID_82575EB_COPPER 0x10A7 49#define E1000_DEV_ID_82575EB_COPPER 0x10A7
50#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 50#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
51#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 51#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
52#define E1000_DEV_ID_82580_COPPER 0x150E 52#define E1000_DEV_ID_82580_COPPER 0x150E
53#define E1000_DEV_ID_82580_FIBER 0x150F 53#define E1000_DEV_ID_82580_FIBER 0x150F
54#define E1000_DEV_ID_82580_SERDES 0x1510 54#define E1000_DEV_ID_82580_SERDES 0x1510
55#define E1000_DEV_ID_82580_SGMII 0x1511 55#define E1000_DEV_ID_82580_SGMII 0x1511
56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
57#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 57#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
58#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 58#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
59#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A 59#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
60#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C 60#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
61#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 61#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
62#define E1000_DEV_ID_I350_COPPER 0x1521 62#define E1000_DEV_ID_I350_COPPER 0x1521
63#define E1000_DEV_ID_I350_FIBER 0x1522 63#define E1000_DEV_ID_I350_FIBER 0x1522
64#define E1000_DEV_ID_I350_SERDES 0x1523 64#define E1000_DEV_ID_I350_SERDES 0x1523
65#define E1000_DEV_ID_I350_SGMII 0x1524 65#define E1000_DEV_ID_I350_SGMII 0x1524
66#define E1000_DEV_ID_I210_COPPER 0x1533 66#define E1000_DEV_ID_I210_COPPER 0x1533
67#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
68#define E1000_DEV_ID_I210_COPPER_IT 0x1535
69#define E1000_DEV_ID_I210_FIBER 0x1536 67#define E1000_DEV_ID_I210_FIBER 0x1536
70#define E1000_DEV_ID_I210_SERDES 0x1537 68#define E1000_DEV_ID_I210_SERDES 0x1537
71#define E1000_DEV_ID_I210_SGMII 0x1538 69#define E1000_DEV_ID_I210_SGMII 0x1538
72#define E1000_DEV_ID_I211_COPPER 0x1539 70#define E1000_DEV_ID_I211_COPPER 0x1539
71#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
72#define E1000_DEV_ID_I354_SGMII 0x1F41
73#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45
73 74
74#define E1000_REVISION_2 2 75#define E1000_REVISION_2 2
75#define E1000_REVISION_4 4 76#define E1000_REVISION_4 4
@@ -90,6 +91,7 @@ enum e1000_mac_type {
90 e1000_82576, 91 e1000_82576,
91 e1000_82580, 92 e1000_82580,
92 e1000_i350, 93 e1000_i350,
94 e1000_i354,
93 e1000_i210, 95 e1000_i210,
94 e1000_i211, 96 e1000_i211,
95 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ 97 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
@@ -98,7 +100,8 @@ enum e1000_mac_type {
98enum e1000_media_type { 100enum e1000_media_type {
99 e1000_media_type_unknown = 0, 101 e1000_media_type_unknown = 0,
100 e1000_media_type_copper = 1, 102 e1000_media_type_copper = 1,
101 e1000_media_type_internal_serdes = 2, 103 e1000_media_type_fiber = 2,
104 e1000_media_type_internal_serdes = 3,
102 e1000_num_media_types 105 e1000_num_media_types
103}; 106};
104 107
@@ -524,6 +527,7 @@ struct e1000_dev_spec_82575 {
524 bool sgmii_active; 527 bool sgmii_active;
525 bool global_device_reset; 528 bool global_device_reset;
526 bool eee_disable; 529 bool eee_disable;
530 bool clear_semaphore_once;
527}; 531};
528 532
529struct e1000_hw { 533struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 6a42344f24f1..ddb3cf51b9b9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -44,10 +44,42 @@
44static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) 44static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
45{ 45{
46 u32 swsm; 46 u32 swsm;
47 s32 ret_val = E1000_SUCCESS;
48 s32 timeout = hw->nvm.word_size + 1; 47 s32 timeout = hw->nvm.word_size + 1;
49 s32 i = 0; 48 s32 i = 0;
50 49
50 /* Get the SW semaphore */
51 while (i < timeout) {
52 swsm = rd32(E1000_SWSM);
53 if (!(swsm & E1000_SWSM_SMBI))
54 break;
55
56 udelay(50);
57 i++;
58 }
59
60 if (i == timeout) {
61 /* In rare circumstances, the SW semaphore may already be held
62 * unintentionally. Clear the semaphore once before giving up.
63 */
64 if (hw->dev_spec._82575.clear_semaphore_once) {
65 hw->dev_spec._82575.clear_semaphore_once = false;
66 igb_put_hw_semaphore(hw);
67 for (i = 0; i < timeout; i++) {
68 swsm = rd32(E1000_SWSM);
69 if (!(swsm & E1000_SWSM_SMBI))
70 break;
71
72 udelay(50);
73 }
74 }
75
76 /* If we do not have the semaphore here, we have to give up. */
77 if (i == timeout) {
78 hw_dbg("Driver can't access device - SMBI bit is set.\n");
79 return -E1000_ERR_NVM;
80 }
81 }
82
51 /* Get the FW semaphore. */ 83 /* Get the FW semaphore. */
52 for (i = 0; i < timeout; i++) { 84 for (i = 0; i < timeout; i++) {
53 swsm = rd32(E1000_SWSM); 85 swsm = rd32(E1000_SWSM);
@@ -64,12 +96,10 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
64 /* Release semaphores */ 96 /* Release semaphores */
65 igb_put_hw_semaphore(hw); 97 igb_put_hw_semaphore(hw);
66 hw_dbg("Driver can't access the NVM\n"); 98 hw_dbg("Driver can't access the NVM\n");
67 ret_val = -E1000_ERR_NVM; 99 return -E1000_ERR_NVM;
68 goto out;
69 } 100 }
70 101
71out: 102 return E1000_SUCCESS;
72 return ret_val;
73} 103}
74 104
75/** 105/**
@@ -99,23 +129,6 @@ void igb_release_nvm_i210(struct e1000_hw *hw)
99} 129}
100 130
101/** 131/**
102 * igb_put_hw_semaphore_i210 - Release hardware semaphore
103 * @hw: pointer to the HW structure
104 *
105 * Release hardware semaphore used to access the PHY or NVM
106 */
107static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
108{
109 u32 swsm;
110
111 swsm = rd32(E1000_SWSM);
112
113 swsm &= ~E1000_SWSM_SWESMBI;
114
115 wr32(E1000_SWSM, swsm);
116}
117
118/**
119 * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore 132 * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
120 * @hw: pointer to the HW structure 133 * @hw: pointer to the HW structure
121 * @mask: specifies which semaphore to acquire 134 * @mask: specifies which semaphore to acquire
@@ -138,13 +151,11 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
138 } 151 }
139 152
140 swfw_sync = rd32(E1000_SW_FW_SYNC); 153 swfw_sync = rd32(E1000_SW_FW_SYNC);
141 if (!(swfw_sync & fwmask)) 154 if (!(swfw_sync & (fwmask | swmask)))
142 break; 155 break;
143 156
144 /* 157 /* Firmware currently using resource (fwmask) */
145 * Firmware currently using resource (fwmask) 158 igb_put_hw_semaphore(hw);
146 */
147 igb_put_hw_semaphore_i210(hw);
148 mdelay(5); 159 mdelay(5);
149 i++; 160 i++;
150 } 161 }
@@ -158,7 +169,7 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
158 swfw_sync |= swmask; 169 swfw_sync |= swmask;
159 wr32(E1000_SW_FW_SYNC, swfw_sync); 170 wr32(E1000_SW_FW_SYNC, swfw_sync);
160 171
161 igb_put_hw_semaphore_i210(hw); 172 igb_put_hw_semaphore(hw);
162out: 173out:
163 return ret_val; 174 return ret_val;
164} 175}
@@ -182,7 +193,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
182 swfw_sync &= ~mask; 193 swfw_sync &= ~mask;
183 wr32(E1000_SW_FW_SYNC, swfw_sync); 194 wr32(E1000_SW_FW_SYNC, swfw_sync);
184 195
185 igb_put_hw_semaphore_i210(hw); 196 igb_put_hw_semaphore(hw);
186} 197}
187 198
188/** 199/**
@@ -203,7 +214,8 @@ s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
203 214
204 /* We cannot hold synchronization semaphores for too long, 215 /* We cannot hold synchronization semaphores for too long,
205 * because of forceful takeover procedure. However it is more efficient 216 * because of forceful takeover procedure. However it is more efficient
206 * to read in bursts than synchronizing access for each word. */ 217 * to read in bursts than synchronizing access for each word.
218 */
207 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { 219 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
208 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? 220 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
209 E1000_EERD_EEWR_MAX_COUNT : (words - i); 221 E1000_EERD_EEWR_MAX_COUNT : (words - i);
@@ -242,8 +254,7 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
242 u32 attempts = 100000; 254 u32 attempts = 100000;
243 s32 ret_val = E1000_SUCCESS; 255 s32 ret_val = E1000_SUCCESS;
244 256
245 /* 257 /* A check for invalid values: offset too large, too many words,
246 * A check for invalid values: offset too large, too many words,
247 * too many words for the offset, and not enough words. 258 * too many words for the offset, and not enough words.
248 */ 259 */
249 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 260 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -294,7 +305,7 @@ out:
294 * 305 *
295 * If error code is returned, data and Shadow RAM may be inconsistent - buffer 306 * If error code is returned, data and Shadow RAM may be inconsistent - buffer
296 * partially written. 307 * partially written.
297 */ 308 **/
298s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, 309s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
299 u16 *data) 310 u16 *data)
300{ 311{
@@ -326,7 +337,7 @@ s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
326/** 337/**
327 * igb_read_nvm_i211 - Read NVM wrapper function for I211 338 * igb_read_nvm_i211 - Read NVM wrapper function for I211
328 * @hw: pointer to the HW structure 339 * @hw: pointer to the HW structure
329 * @address: the word address (aka eeprom offset) to read 340 * @words: number of words to read
330 * @data: pointer to the data read 341 * @data: pointer to the data read
331 * 342 *
332 * Wrapper function to return data formerly found in the NVM. 343 * Wrapper function to return data formerly found in the NVM.
@@ -549,8 +560,7 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
549 560
550 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 561 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
551 562
552 /* 563 /* Replace the read function with semaphore grabbing with
553 * Replace the read function with semaphore grabbing with
554 * the one that skips this for a while. 564 * the one that skips this for a while.
555 * We have semaphore taken already here. 565 * We have semaphore taken already here.
556 */ 566 */
@@ -570,7 +580,6 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
570 return status; 580 return status;
571} 581}
572 582
573
574/** 583/**
575 * igb_update_nvm_checksum_i210 - Update EEPROM checksum 584 * igb_update_nvm_checksum_i210 - Update EEPROM checksum
576 * @hw: pointer to the HW structure 585 * @hw: pointer to the HW structure
@@ -585,8 +594,7 @@ s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
585 u16 checksum = 0; 594 u16 checksum = 0;
586 u16 i, nvm_data; 595 u16 i, nvm_data;
587 596
588 /* 597 /* Read the first word from the EEPROM. If this times out or fails, do
589 * Read the first word from the EEPROM. If this times out or fails, do
590 * not continue or we could be in for a very long wait while every 598 * not continue or we could be in for a very long wait while every
591 * EEPROM read fails 599 * EEPROM read fails
592 */ 600 */
@@ -597,8 +605,7 @@ s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
597 } 605 }
598 606
599 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 607 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
600 /* 608 /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
601 * Do not use hw->nvm.ops.write, hw->nvm.ops.read
602 * because we do not want to take the synchronization 609 * because we do not want to take the synchronization
603 * semaphores twice here. 610 * semaphores twice here.
604 */ 611 */
@@ -635,7 +642,7 @@ out:
635 * igb_pool_flash_update_done_i210 - Pool FLUDONE status. 642 * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
636 * @hw: pointer to the HW structure 643 * @hw: pointer to the HW structure
637 * 644 *
638 */ 645 **/
639static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) 646static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
640{ 647{
641 s32 ret_val = -E1000_ERR_NVM; 648 s32 ret_val = -E1000_ERR_NVM;
@@ -714,3 +721,68 @@ s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
714out: 721out:
715 return ret_val; 722 return ret_val;
716} 723}
724
725/**
726 * __igb_access_xmdio_reg - Read/write XMDIO register
727 * @hw: pointer to the HW structure
728 * @address: XMDIO address to program
729 * @dev_addr: device address to program
730 * @data: pointer to value to read/write from/to the XMDIO address
731 * @read: boolean flag to indicate read or write
732 **/
733static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
734 u8 dev_addr, u16 *data, bool read)
735{
736 s32 ret_val = E1000_SUCCESS;
737
738 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
739 if (ret_val)
740 return ret_val;
741
742 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
743 if (ret_val)
744 return ret_val;
745
746 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
747 dev_addr);
748 if (ret_val)
749 return ret_val;
750
751 if (read)
752 ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
753 else
754 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
755 if (ret_val)
756 return ret_val;
757
758 /* Recalibrate the device back to 0 */
759 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
760 if (ret_val)
761 return ret_val;
762
763 return ret_val;
764}
765
766/**
767 * igb_read_xmdio_reg - Read XMDIO register
768 * @hw: pointer to the HW structure
769 * @addr: XMDIO address to program
770 * @dev_addr: device address to program
771 * @data: value to be read from the EMI address
772 **/
773s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
774{
775 return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true);
776}
777
778/**
779 * igb_write_xmdio_reg - Write XMDIO register
780 * @hw: pointer to the HW structure
781 * @addr: XMDIO address to program
782 * @dev_addr: device address to program
783 * @data: value to be written to the XMDIO address
784 **/
785s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
786{
787 return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
788}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index e4e1a73b7c75..bfc08e05c907 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -45,6 +45,10 @@ extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
45 u16 *data); 45 u16 *data);
46extern s32 igb_read_invm_version(struct e1000_hw *hw, 46extern s32 igb_read_invm_version(struct e1000_hw *hw,
47 struct e1000_fw_version *invm_ver); 47 struct e1000_fw_version *invm_ver);
48extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
49 u16 *data);
50extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
51 u16 data);
48 52
49#define E1000_STM_OPCODE 0xDB00 53#define E1000_STM_OPCODE 0xDB00
50#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 54#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index a5c7200b9a71..2559d70a2321 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -214,7 +214,7 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
214 else 214 else
215 vfta &= ~mask; 215 vfta &= ~mask;
216 } 216 }
217 if (hw->mac.type == e1000_i350) 217 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
218 igb_write_vfta_i350(hw, index, vfta); 218 igb_write_vfta_i350(hw, index, vfta);
219 else 219 else
220 igb_write_vfta(hw, index, vfta); 220 igb_write_vfta(hw, index, vfta);
@@ -230,8 +230,8 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
230 * Checks the nvm for an alternate MAC address. An alternate MAC address 230 * Checks the nvm for an alternate MAC address. An alternate MAC address
231 * can be setup by pre-boot software and must be treated like a permanent 231 * can be setup by pre-boot software and must be treated like a permanent
232 * address and must override the actual permanent MAC address. If an 232 * address and must override the actual permanent MAC address. If an
233 * alternate MAC address is fopund it is saved in the hw struct and 233 * alternate MAC address is found it is saved in the hw struct and
234 * prgrammed into RAR0 and the cuntion returns success, otherwise the 234 * programmed into RAR0 and the function returns success, otherwise the
235 * function returns an error. 235 * function returns an error.
236 **/ 236 **/
237s32 igb_check_alt_mac_addr(struct e1000_hw *hw) 237s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
@@ -241,8 +241,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
241 u16 offset, nvm_alt_mac_addr_offset, nvm_data; 241 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
242 u8 alt_mac_addr[ETH_ALEN]; 242 u8 alt_mac_addr[ETH_ALEN];
243 243
244 /* 244 /* Alternate MAC address is handled by the option ROM for 82580
245 * Alternate MAC address is handled by the option ROM for 82580
246 * and newer. SW support not required. 245 * and newer. SW support not required.
247 */ 246 */
248 if (hw->mac.type >= e1000_82580) 247 if (hw->mac.type >= e1000_82580)
@@ -285,8 +284,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
285 goto out; 284 goto out;
286 } 285 }
287 286
288 /* 287 /* We have a valid alternate MAC address, and we want to treat it the
289 * We have a valid alternate MAC address, and we want to treat it the
290 * same as the normal permanent MAC address stored by the HW into the 288 * same as the normal permanent MAC address stored by the HW into the
291 * RAR. Do this by mapping this address into RAR0. 289 * RAR. Do this by mapping this address into RAR0.
292 */ 290 */
@@ -309,8 +307,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
309{ 307{
310 u32 rar_low, rar_high; 308 u32 rar_low, rar_high;
311 309
312 /* 310 /* HW expects these in little endian so we reverse the byte order
313 * HW expects these in little endian so we reverse the byte order
314 * from network order (big endian) to little endian 311 * from network order (big endian) to little endian
315 */ 312 */
316 rar_low = ((u32) addr[0] | 313 rar_low = ((u32) addr[0] |
@@ -323,8 +320,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
323 if (rar_low || rar_high) 320 if (rar_low || rar_high)
324 rar_high |= E1000_RAH_AV; 321 rar_high |= E1000_RAH_AV;
325 322
326 /* 323 /* Some bridges will combine consecutive 32-bit writes into
327 * Some bridges will combine consecutive 32-bit writes into
328 * a single burst write, which will malfunction on some parts. 324 * a single burst write, which will malfunction on some parts.
329 * The flushes avoid this. 325 * The flushes avoid this.
330 */ 326 */
@@ -348,8 +344,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
348{ 344{
349 u32 hash_bit, hash_reg, mta; 345 u32 hash_bit, hash_reg, mta;
350 346
351 /* 347 /* The MTA is a register array of 32-bit registers. It is
352 * The MTA is a register array of 32-bit registers. It is
353 * treated like an array of (32*mta_reg_count) bits. We want to 348 * treated like an array of (32*mta_reg_count) bits. We want to
354 * set bit BitArray[hash_value]. So we figure out what register 349 * set bit BitArray[hash_value]. So we figure out what register
355 * the bit is in, read it, OR in the new bit, then write 350 * the bit is in, read it, OR in the new bit, then write
@@ -386,15 +381,13 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
386 /* Register count multiplied by bits per register */ 381 /* Register count multiplied by bits per register */
387 hash_mask = (hw->mac.mta_reg_count * 32) - 1; 382 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
388 383
389 /* 384 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
390 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
391 * where 0xFF would still fall within the hash mask. 385 * where 0xFF would still fall within the hash mask.
392 */ 386 */
393 while (hash_mask >> bit_shift != 0xFF) 387 while (hash_mask >> bit_shift != 0xFF)
394 bit_shift++; 388 bit_shift++;
395 389
396 /* 390 /* The portion of the address that is used for the hash table
397 * The portion of the address that is used for the hash table
398 * is determined by the mc_filter_type setting. 391 * is determined by the mc_filter_type setting.
399 * The algorithm is such that there is a total of 8 bits of shifting. 392 * The algorithm is such that there is a total of 8 bits of shifting.
400 * The bit_shift for a mc_filter_type of 0 represents the number of 393 * The bit_shift for a mc_filter_type of 0 represents the number of
@@ -536,8 +529,7 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)
536 s32 ret_val; 529 s32 ret_val;
537 bool link; 530 bool link;
538 531
539 /* 532 /* We only want to go out to the PHY registers to see if Auto-Neg
540 * We only want to go out to the PHY registers to see if Auto-Neg
541 * has completed and/or if our link status has changed. The 533 * has completed and/or if our link status has changed. The
542 * get_link_status flag is set upon receiving a Link Status 534 * get_link_status flag is set upon receiving a Link Status
543 * Change or Rx Sequence Error interrupt. 535 * Change or Rx Sequence Error interrupt.
@@ -547,8 +539,7 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)
547 goto out; 539 goto out;
548 } 540 }
549 541
550 /* 542 /* First we want to see if the MII Status Register reports
551 * First we want to see if the MII Status Register reports
552 * link. If so, then we want to get the current speed/duplex 543 * link. If so, then we want to get the current speed/duplex
553 * of the PHY. 544 * of the PHY.
554 */ 545 */
@@ -561,14 +552,12 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)
561 552
562 mac->get_link_status = false; 553 mac->get_link_status = false;
563 554
564 /* 555 /* Check if there was DownShift, must be checked
565 * Check if there was DownShift, must be checked
566 * immediately after link-up 556 * immediately after link-up
567 */ 557 */
568 igb_check_downshift(hw); 558 igb_check_downshift(hw);
569 559
570 /* 560 /* If we are forcing speed/duplex, then we simply return since
571 * If we are forcing speed/duplex, then we simply return since
572 * we have already determined whether we have link or not. 561 * we have already determined whether we have link or not.
573 */ 562 */
574 if (!mac->autoneg) { 563 if (!mac->autoneg) {
@@ -576,15 +565,13 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)
576 goto out; 565 goto out;
577 } 566 }
578 567
579 /* 568 /* Auto-Neg is enabled. Auto Speed Detection takes care
580 * Auto-Neg is enabled. Auto Speed Detection takes care
581 * of MAC speed/duplex configuration. So we only need to 569 * of MAC speed/duplex configuration. So we only need to
582 * configure Collision Distance in the MAC. 570 * configure Collision Distance in the MAC.
583 */ 571 */
584 igb_config_collision_dist(hw); 572 igb_config_collision_dist(hw);
585 573
586 /* 574 /* Configure Flow Control now that Auto-Neg has completed.
587 * Configure Flow Control now that Auto-Neg has completed.
588 * First, we need to restore the desired flow control 575 * First, we need to restore the desired flow control
589 * settings because we may have had to re-autoneg with a 576 * settings because we may have had to re-autoneg with a
590 * different link partner. 577 * different link partner.
@@ -611,15 +598,13 @@ s32 igb_setup_link(struct e1000_hw *hw)
611{ 598{
612 s32 ret_val = 0; 599 s32 ret_val = 0;
613 600
614 /* 601 /* In the case of the phy reset being blocked, we already have a link.
615 * In the case of the phy reset being blocked, we already have a link.
616 * We do not need to set it up again. 602 * We do not need to set it up again.
617 */ 603 */
618 if (igb_check_reset_block(hw)) 604 if (igb_check_reset_block(hw))
619 goto out; 605 goto out;
620 606
621 /* 607 /* If requested flow control is set to default, set flow control
622 * If requested flow control is set to default, set flow control
623 * based on the EEPROM flow control settings. 608 * based on the EEPROM flow control settings.
624 */ 609 */
625 if (hw->fc.requested_mode == e1000_fc_default) { 610 if (hw->fc.requested_mode == e1000_fc_default) {
@@ -628,8 +613,7 @@ s32 igb_setup_link(struct e1000_hw *hw)
628 goto out; 613 goto out;
629 } 614 }
630 615
631 /* 616 /* We want to save off the original Flow Control configuration just
632 * We want to save off the original Flow Control configuration just
633 * in case we get disconnected and then reconnected into a different 617 * in case we get disconnected and then reconnected into a different
634 * hub or switch with different Flow Control capabilities. 618 * hub or switch with different Flow Control capabilities.
635 */ 619 */
@@ -642,8 +626,7 @@ s32 igb_setup_link(struct e1000_hw *hw)
642 if (ret_val) 626 if (ret_val)
643 goto out; 627 goto out;
644 628
645 /* 629 /* Initialize the flow control address, type, and PAUSE timer
646 * Initialize the flow control address, type, and PAUSE timer
647 * registers to their default values. This is done even if flow 630 * registers to their default values. This is done even if flow
648 * control is disabled, because it does not hurt anything to 631 * control is disabled, because it does not hurt anything to
649 * initialize these registers. 632 * initialize these registers.
@@ -696,16 +679,14 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
696 s32 ret_val = 0; 679 s32 ret_val = 0;
697 u32 fcrtl = 0, fcrth = 0; 680 u32 fcrtl = 0, fcrth = 0;
698 681
699 /* 682 /* Set the flow control receive threshold registers. Normally,
700 * Set the flow control receive threshold registers. Normally,
701 * these registers will be set to a default threshold that may be 683 * these registers will be set to a default threshold that may be
702 * adjusted later by the driver's runtime code. However, if the 684 * adjusted later by the driver's runtime code. However, if the
703 * ability to transmit pause frames is not enabled, then these 685 * ability to transmit pause frames is not enabled, then these
704 * registers will be set to 0. 686 * registers will be set to 0.
705 */ 687 */
706 if (hw->fc.current_mode & e1000_fc_tx_pause) { 688 if (hw->fc.current_mode & e1000_fc_tx_pause) {
707 /* 689 /* We need to set up the Receive Threshold high and low water
708 * We need to set up the Receive Threshold high and low water
709 * marks as well as (optionally) enabling the transmission of 690 * marks as well as (optionally) enabling the transmission of
710 * XON frames. 691 * XON frames.
711 */ 692 */
@@ -733,8 +714,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
733 s32 ret_val = 0; 714 s32 ret_val = 0;
734 u16 nvm_data; 715 u16 nvm_data;
735 716
736 /* 717 /* Read and store word 0x0F of the EEPROM. This word contains bits
737 * Read and store word 0x0F of the EEPROM. This word contains bits
738 * that determine the hardware's default PAUSE (flow control) mode, 718 * that determine the hardware's default PAUSE (flow control) mode,
739 * a bit that determines whether the HW defaults to enabling or 719 * a bit that determines whether the HW defaults to enabling or
740 * disabling auto-negotiation, and the direction of the 720 * disabling auto-negotiation, and the direction of the
@@ -778,8 +758,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw)
778 758
779 ctrl = rd32(E1000_CTRL); 759 ctrl = rd32(E1000_CTRL);
780 760
781 /* 761 /* Because we didn't get link via the internal auto-negotiation
782 * Because we didn't get link via the internal auto-negotiation
783 * mechanism (we either forced link or we got link via PHY 762 * mechanism (we either forced link or we got link via PHY
784 * auto-neg), we have to manually enable/disable transmit an 763 * auto-neg), we have to manually enable/disable transmit an
785 * receive flow control. 764 * receive flow control.
@@ -843,8 +822,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
843 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; 822 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
844 u16 speed, duplex; 823 u16 speed, duplex;
845 824
846 /* 825 /* Check for the case where we have fiber media and auto-neg failed
847 * Check for the case where we have fiber media and auto-neg failed
848 * so we had to force link. In this case, we need to force the 826 * so we had to force link. In this case, we need to force the
849 * configuration of the MAC to match the "fc" parameter. 827 * configuration of the MAC to match the "fc" parameter.
850 */ 828 */
@@ -861,15 +839,13 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
861 goto out; 839 goto out;
862 } 840 }
863 841
864 /* 842 /* Check for the case where we have copper media and auto-neg is
865 * Check for the case where we have copper media and auto-neg is
866 * enabled. In this case, we need to check and see if Auto-Neg 843 * enabled. In this case, we need to check and see if Auto-Neg
867 * has completed, and if so, how the PHY and link partner has 844 * has completed, and if so, how the PHY and link partner has
868 * flow control configured. 845 * flow control configured.
869 */ 846 */
870 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { 847 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
871 /* 848 /* Read the MII Status Register and check to see if AutoNeg
872 * Read the MII Status Register and check to see if AutoNeg
873 * has completed. We read this twice because this reg has 849 * has completed. We read this twice because this reg has
874 * some "sticky" (latched) bits. 850 * some "sticky" (latched) bits.
875 */ 851 */
@@ -888,8 +864,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
888 goto out; 864 goto out;
889 } 865 }
890 866
891 /* 867 /* The AutoNeg process has completed, so we now need to
892 * The AutoNeg process has completed, so we now need to
893 * read both the Auto Negotiation Advertisement 868 * read both the Auto Negotiation Advertisement
894 * Register (Address 4) and the Auto_Negotiation Base 869 * Register (Address 4) and the Auto_Negotiation Base
895 * Page Ability Register (Address 5) to determine how 870 * Page Ability Register (Address 5) to determine how
@@ -904,8 +879,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
904 if (ret_val) 879 if (ret_val)
905 goto out; 880 goto out;
906 881
907 /* 882 /* Two bits in the Auto Negotiation Advertisement Register
908 * Two bits in the Auto Negotiation Advertisement Register
909 * (Address 4) and two bits in the Auto Negotiation Base 883 * (Address 4) and two bits in the Auto Negotiation Base
910 * Page Ability Register (Address 5) determine flow control 884 * Page Ability Register (Address 5) determine flow control
911 * for both the PHY and the link partner. The following 885 * for both the PHY and the link partner. The following
@@ -940,8 +914,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
940 */ 914 */
941 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 915 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
942 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { 916 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
943 /* 917 /* Now we need to check if the user selected RX ONLY
944 * Now we need to check if the user selected RX ONLY
945 * of pause frames. In this case, we had to advertise 918 * of pause frames. In this case, we had to advertise
946 * FULL flow control because we could not advertise RX 919 * FULL flow control because we could not advertise RX
947 * ONLY. Hence, we must now check to see if we need to 920 * ONLY. Hence, we must now check to see if we need to
@@ -956,8 +929,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
956 "RX PAUSE frames only.\r\n"); 929 "RX PAUSE frames only.\r\n");
957 } 930 }
958 } 931 }
959 /* 932 /* For receiving PAUSE frames ONLY.
960 * For receiving PAUSE frames ONLY.
961 * 933 *
962 * LOCAL DEVICE | LINK PARTNER 934 * LOCAL DEVICE | LINK PARTNER
963 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 935 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -971,8 +943,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
971 hw->fc.current_mode = e1000_fc_tx_pause; 943 hw->fc.current_mode = e1000_fc_tx_pause;
972 hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); 944 hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
973 } 945 }
974 /* 946 /* For transmitting PAUSE frames ONLY.
975 * For transmitting PAUSE frames ONLY.
976 * 947 *
977 * LOCAL DEVICE | LINK PARTNER 948 * LOCAL DEVICE | LINK PARTNER
978 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 949 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -986,8 +957,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
986 hw->fc.current_mode = e1000_fc_rx_pause; 957 hw->fc.current_mode = e1000_fc_rx_pause;
987 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 958 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
988 } 959 }
989 /* 960 /* Per the IEEE spec, at this point flow control should be
990 * Per the IEEE spec, at this point flow control should be
991 * disabled. However, we want to consider that we could 961 * disabled. However, we want to consider that we could
992 * be connected to a legacy switch that doesn't advertise 962 * be connected to a legacy switch that doesn't advertise
993 * desired flow control, but can be forced on the link 963 * desired flow control, but can be forced on the link
@@ -1007,9 +977,9 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
1007 * be asked to delay transmission of packets than asking 977 * be asked to delay transmission of packets than asking
1008 * our link partner to pause transmission of frames. 978 * our link partner to pause transmission of frames.
1009 */ 979 */
1010 else if ((hw->fc.requested_mode == e1000_fc_none || 980 else if ((hw->fc.requested_mode == e1000_fc_none) ||
1011 hw->fc.requested_mode == e1000_fc_tx_pause) || 981 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
1012 hw->fc.strict_ieee) { 982 (hw->fc.strict_ieee)) {
1013 hw->fc.current_mode = e1000_fc_none; 983 hw->fc.current_mode = e1000_fc_none;
1014 hw_dbg("Flow Control = NONE.\r\n"); 984 hw_dbg("Flow Control = NONE.\r\n");
1015 } else { 985 } else {
@@ -1017,8 +987,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
1017 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 987 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
1018 } 988 }
1019 989
1020 /* 990 /* Now we need to do one last check... If we auto-
1021 * Now we need to do one last check... If we auto-
1022 * negotiated to HALF DUPLEX, flow control should not be 991 * negotiated to HALF DUPLEX, flow control should not be
1023 * enabled per IEEE 802.3 spec. 992 * enabled per IEEE 802.3 spec.
1024 */ 993 */
@@ -1031,8 +1000,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
1031 if (duplex == HALF_DUPLEX) 1000 if (duplex == HALF_DUPLEX)
1032 hw->fc.current_mode = e1000_fc_none; 1001 hw->fc.current_mode = e1000_fc_none;
1033 1002
1034 /* 1003 /* Now we call a subroutine to actually force the MAC
1035 * Now we call a subroutine to actually force the MAC
1036 * controller to use the correct flow control settings. 1004 * controller to use the correct flow control settings.
1037 */ 1005 */
1038 ret_val = igb_force_mac_fc(hw); 1006 ret_val = igb_force_mac_fc(hw);
@@ -1203,6 +1171,17 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1203 hw_dbg("Half Duplex\n"); 1171 hw_dbg("Half Duplex\n");
1204 } 1172 }
1205 1173
1174 /* Check if it is an I354 2.5Gb backplane connection. */
1175 if (hw->mac.type == e1000_i354) {
1176 if ((status & E1000_STATUS_2P5_SKU) &&
1177 !(status & E1000_STATUS_2P5_SKU_OVER)) {
1178 *speed = SPEED_2500;
1179 *duplex = FULL_DUPLEX;
1180 hw_dbg("2500 Mbs, ");
1181 hw_dbg("Full Duplex\n");
1182 }
1183 }
1184
1206 return 0; 1185 return 0;
1207} 1186}
1208 1187
@@ -1427,8 +1406,7 @@ s32 igb_blink_led(struct e1000_hw *hw)
1427 u32 ledctl_blink = 0; 1406 u32 ledctl_blink = 0;
1428 u32 i; 1407 u32 i;
1429 1408
1430 /* 1409 /* set the blink bit for each LED that's "on" (0x0E)
1431 * set the blink bit for each LED that's "on" (0x0E)
1432 * in ledctl_mode2 1410 * in ledctl_mode2
1433 */ 1411 */
1434 ledctl_blink = hw->mac.ledctl_mode2; 1412 ledctl_blink = hw->mac.ledctl_mode2;
@@ -1467,7 +1445,7 @@ s32 igb_led_off(struct e1000_hw *hw)
1467 * @hw: pointer to the HW structure 1445 * @hw: pointer to the HW structure
1468 * 1446 *
1469 * Returns 0 (0) if successful, else returns -10 1447 * Returns 0 (0) if successful, else returns -10
1470 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued 1448 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
1471 * the master requests to be disabled. 1449 * the master requests to be disabled.
1472 * 1450 *
1473 * Disables PCI-Express master access and verifies there are no pending 1451 * Disables PCI-Express master access and verifies there are no pending
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index e6d6ce433261..5e13e83cc608 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -35,8 +35,7 @@
35#include "e1000_defines.h" 35#include "e1000_defines.h"
36#include "e1000_i210.h" 36#include "e1000_i210.h"
37 37
38/* 38/* Functions that should not be called directly from drivers but can be used
39 * Functions that should not be called directly from drivers but can be used
40 * by other files in this 'shared code' 39 * by other files in this 'shared code'
41 */ 40 */
42s32 igb_blink_led(struct e1000_hw *hw); 41s32 igb_blink_led(struct e1000_hw *hw);
@@ -49,15 +48,15 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw);
49s32 igb_get_bus_info_pcie(struct e1000_hw *hw); 48s32 igb_get_bus_info_pcie(struct e1000_hw *hw);
50s32 igb_get_hw_semaphore(struct e1000_hw *hw); 49s32 igb_get_hw_semaphore(struct e1000_hw *hw);
51s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, 50s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
52 u16 *duplex); 51 u16 *duplex);
53s32 igb_id_led_init(struct e1000_hw *hw); 52s32 igb_id_led_init(struct e1000_hw *hw);
54s32 igb_led_off(struct e1000_hw *hw); 53s32 igb_led_off(struct e1000_hw *hw);
55void igb_update_mc_addr_list(struct e1000_hw *hw, 54void igb_update_mc_addr_list(struct e1000_hw *hw,
56 u8 *mc_addr_list, u32 mc_addr_count); 55 u8 *mc_addr_list, u32 mc_addr_count);
57s32 igb_setup_link(struct e1000_hw *hw); 56s32 igb_setup_link(struct e1000_hw *hw);
58s32 igb_validate_mdi_setting(struct e1000_hw *hw); 57s32 igb_validate_mdi_setting(struct e1000_hw *hw);
59s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, 58s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
60 u32 offset, u8 data); 59 u32 offset, u8 data);
61 60
62void igb_clear_hw_cntrs_base(struct e1000_hw *hw); 61void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
63void igb_clear_vfta(struct e1000_hw *hw); 62void igb_clear_vfta(struct e1000_hw *hw);
@@ -80,12 +79,12 @@ enum e1000_mng_mode {
80 e1000_mng_mode_host_if_only 79 e1000_mng_mode_host_if_only
81}; 80};
82 81
83#define E1000_FACTPS_MNGCG 0x20000000 82#define E1000_FACTPS_MNGCG 0x20000000
84 83
85#define E1000_FWSM_MODE_MASK 0xE 84#define E1000_FWSM_MODE_MASK 0xE
86#define E1000_FWSM_MODE_SHIFT 1 85#define E1000_FWSM_MODE_SHIFT 1
87 86
88#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 87#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
89 88
90extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); 89extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
91 90
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 38e0df350904..dac1447fabf7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -196,7 +196,8 @@ out:
196 * returns SUCCESS if it successfully received a message notification and 196 * returns SUCCESS if it successfully received a message notification and
197 * copied it into the receive buffer. 197 * copied it into the receive buffer.
198 **/ 198 **/
199static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) 199static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size,
200 u16 mbx_id)
200{ 201{
201 struct e1000_mbx_info *mbx = &hw->mbx; 202 struct e1000_mbx_info *mbx = &hw->mbx;
202 s32 ret_val = -E1000_ERR_MBX; 203 s32 ret_val = -E1000_ERR_MBX;
@@ -222,7 +223,8 @@ out:
222 * returns SUCCESS if it successfully copied message into the buffer and 223 * returns SUCCESS if it successfully copied message into the buffer and
223 * received an ack to that message within delay * timeout period 224 * received an ack to that message within delay * timeout period
224 **/ 225 **/
225static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) 226static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size,
227 u16 mbx_id)
226{ 228{
227 struct e1000_mbx_info *mbx = &hw->mbx; 229 struct e1000_mbx_info *mbx = &hw->mbx;
228 s32 ret_val = -E1000_ERR_MBX; 230 s32 ret_val = -E1000_ERR_MBX;
@@ -325,7 +327,6 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
325 s32 ret_val = -E1000_ERR_MBX; 327 s32 ret_val = -E1000_ERR_MBX;
326 u32 p2v_mailbox; 328 u32 p2v_mailbox;
327 329
328
329 /* Take ownership of the buffer */ 330 /* Take ownership of the buffer */
330 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); 331 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
331 332
@@ -347,7 +348,7 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
347 * returns SUCCESS if it successfully copied message into the buffer 348 * returns SUCCESS if it successfully copied message into the buffer
348 **/ 349 **/
349static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, 350static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
350 u16 vf_number) 351 u16 vf_number)
351{ 352{
352 s32 ret_val; 353 s32 ret_val;
353 u16 i; 354 u16 i;
@@ -388,7 +389,7 @@ out_no_write:
388 * a message due to a VF request so no polling for message is needed. 389 * a message due to a VF request so no polling for message is needed.
389 **/ 390 **/
390static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, 391static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
391 u16 vf_number) 392 u16 vf_number)
392{ 393{
393 s32 ret_val; 394 s32 ret_val;
394 u16 i; 395 u16 i;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index c13b56d9edb2..de9bba41acf3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -30,42 +30,42 @@
30 30
31#include "e1000_hw.h" 31#include "e1000_hw.h"
32 32
33#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ 33#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
34#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ 34#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
35#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ 35#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
36#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ 36#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
37#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ 37#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
38 38
39#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ 39#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
40#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ 40#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
41#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ 41#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
42#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ 42#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
43 43
44#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ 44#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
45 45
46/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the 46/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
47 * PF. The reverse is true if it is E1000_PF_*. 47 * PF. The reverse is true if it is E1000_PF_*.
48 * Message ACK's are the value or'd with 0xF0000000 48 * Message ACK's are the value or'd with 0xF0000000
49 */ 49 */
50#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with 50/* Messages below or'd with this are the ACK */
51 * this are the ACK */ 51#define E1000_VT_MSGTYPE_ACK 0x80000000
52#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with 52/* Messages below or'd with this are the NACK */
53 * this are the NACK */ 53#define E1000_VT_MSGTYPE_NACK 0x40000000
54#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still 54/* Indicates that VF is still clear to send requests */
55 clear to send requests */ 55#define E1000_VT_MSGTYPE_CTS 0x20000000
56#define E1000_VT_MSGINFO_SHIFT 16 56#define E1000_VT_MSGINFO_SHIFT 16
57/* bits 23:16 are used for exra info for certain messages */ 57/* bits 23:16 are used for exra info for certain messages */
58#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) 58#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
59 59
60#define E1000_VF_RESET 0x01 /* VF requests reset */ 60#define E1000_VF_RESET 0x01 /* VF requests reset */
61#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ 61#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
62#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ 62#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
63#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ 63#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
64#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ 64#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
65#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ 65#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
66#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) 66#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
67 67
68#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ 68#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
69 69
70s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); 70s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16);
71s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); 71s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16);
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 5b62adbe134d..7f9cd7cbd353 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -289,15 +289,14 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
289 udelay(1); 289 udelay(1);
290 timeout = NVM_MAX_RETRY_SPI; 290 timeout = NVM_MAX_RETRY_SPI;
291 291
292 /* 292 /* Read "Status Register" repeatedly until the LSB is cleared.
293 * Read "Status Register" repeatedly until the LSB is cleared.
294 * The EEPROM will signal that the command has been completed 293 * The EEPROM will signal that the command has been completed
295 * by clearing bit 0 of the internal status register. If it's 294 * by clearing bit 0 of the internal status register. If it's
296 * not cleared within 'timeout', then error out. 295 * not cleared within 'timeout', then error out.
297 */ 296 */
298 while (timeout) { 297 while (timeout) {
299 igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, 298 igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
300 hw->nvm.opcode_bits); 299 hw->nvm.opcode_bits);
301 spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); 300 spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);
302 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) 301 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
303 break; 302 break;
@@ -335,8 +334,7 @@ s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
335 u16 word_in; 334 u16 word_in;
336 u8 read_opcode = NVM_READ_OPCODE_SPI; 335 u8 read_opcode = NVM_READ_OPCODE_SPI;
337 336
338 /* 337 /* A check for invalid values: offset too large, too many words,
339 * A check for invalid values: offset too large, too many words,
340 * and not enough words. 338 * and not enough words.
341 */ 339 */
342 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 340 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -363,8 +361,7 @@ s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
363 igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); 361 igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
364 igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); 362 igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
365 363
366 /* 364 /* Read the data. SPI NVMs increment the address with each byte
367 * Read the data. SPI NVMs increment the address with each byte
368 * read and will roll over if reading beyond the end. This allows 365 * read and will roll over if reading beyond the end. This allows
369 * us to read the whole NVM from any offset 366 * us to read the whole NVM from any offset
370 */ 367 */
@@ -395,8 +392,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
395 u32 i, eerd = 0; 392 u32 i, eerd = 0;
396 s32 ret_val = 0; 393 s32 ret_val = 0;
397 394
398 /* 395 /* A check for invalid values: offset too large, too many words,
399 * A check for invalid values: offset too large, too many words,
400 * and not enough words. 396 * and not enough words.
401 */ 397 */
402 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 398 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -408,7 +404,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
408 404
409 for (i = 0; i < words; i++) { 405 for (i = 0; i < words; i++) {
410 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + 406 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
411 E1000_NVM_RW_REG_START; 407 E1000_NVM_RW_REG_START;
412 408
413 wr32(E1000_EERD, eerd); 409 wr32(E1000_EERD, eerd);
414 ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); 410 ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
@@ -441,8 +437,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
441 s32 ret_val = -E1000_ERR_NVM; 437 s32 ret_val = -E1000_ERR_NVM;
442 u16 widx = 0; 438 u16 widx = 0;
443 439
444 /* 440 /* A check for invalid values: offset too large, too many words,
445 * A check for invalid values: offset too large, too many words,
446 * and not enough words. 441 * and not enough words.
447 */ 442 */
448 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 443 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -472,8 +467,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
472 467
473 igb_standby_nvm(hw); 468 igb_standby_nvm(hw);
474 469
475 /* 470 /* Some SPI eeproms use the 8th address bit embedded in the
476 * Some SPI eeproms use the 8th address bit embedded in the
477 * opcode 471 * opcode
478 */ 472 */
479 if ((nvm->address_bits == 8) && (offset >= 128)) 473 if ((nvm->address_bits == 8) && (offset >= 128))
@@ -538,8 +532,7 @@ s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
538 goto out; 532 goto out;
539 } 533 }
540 534
541 /* 535 /* if nvm_data is not ptr guard the PBA must be in legacy format which
542 * if nvm_data is not ptr guard the PBA must be in legacy format which
543 * means pointer is actually our second data word for the PBA number 536 * means pointer is actually our second data word for the PBA number
544 * and we can decode it into an ascii string 537 * and we can decode it into an ascii string
545 */ 538 */
@@ -728,6 +721,7 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
728 case e1000_82575: 721 case e1000_82575:
729 case e1000_82576: 722 case e1000_82576:
730 case e1000_82580: 723 case e1000_82580:
724 case e1000_i354:
731 case e1000_i350: 725 case e1000_i350:
732 case e1000_i210: 726 case e1000_i210:
733 break; 727 break;
@@ -746,6 +740,7 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
746 740
747 switch (hw->mac.type) { 741 switch (hw->mac.type) {
748 case e1000_i210: 742 case e1000_i210:
743 case e1000_i354:
749 case e1000_i350: 744 case e1000_i350:
750 /* find combo image version */ 745 /* find combo image version */
751 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); 746 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 2918c979b5bb..115b0da6e013 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -33,29 +33,29 @@
33 33
34static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); 34static s32 igb_phy_setup_autoneg(struct e1000_hw *hw);
35static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, 35static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
36 u16 *phy_ctrl); 36 u16 *phy_ctrl);
37static s32 igb_wait_autoneg(struct e1000_hw *hw); 37static s32 igb_wait_autoneg(struct e1000_hw *hw);
38static s32 igb_set_master_slave_mode(struct e1000_hw *hw); 38static s32 igb_set_master_slave_mode(struct e1000_hw *hw);
39 39
40/* Cable length tables */ 40/* Cable length tables */
41static const u16 e1000_m88_cable_length_table[] = 41static const u16 e1000_m88_cable_length_table[] = {
42 { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; 42 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
43#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ 43#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
44 (sizeof(e1000_m88_cable_length_table) / \ 44 (sizeof(e1000_m88_cable_length_table) / \
45 sizeof(e1000_m88_cable_length_table[0])) 45 sizeof(e1000_m88_cable_length_table[0]))
46 46
47static const u16 e1000_igp_2_cable_length_table[] = 47static const u16 e1000_igp_2_cable_length_table[] = {
48 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 48 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
49 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 49 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
50 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 50 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
51 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 51 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
52 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 52 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
53 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 53 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
54 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, 54 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
55 104, 109, 114, 118, 121, 124}; 55 104, 109, 114, 118, 121, 124};
56#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ 56#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
57 (sizeof(e1000_igp_2_cable_length_table) / \ 57 (sizeof(e1000_igp_2_cable_length_table) / \
58 sizeof(e1000_igp_2_cable_length_table[0])) 58 sizeof(e1000_igp_2_cable_length_table[0]))
59 59
60/** 60/**
61 * igb_check_reset_block - Check if PHY reset is blocked 61 * igb_check_reset_block - Check if PHY reset is blocked
@@ -71,8 +71,7 @@ s32 igb_check_reset_block(struct e1000_hw *hw)
71 71
72 manc = rd32(E1000_MANC); 72 manc = rd32(E1000_MANC);
73 73
74 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? 74 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;
75 E1000_BLK_PHY_RESET : 0;
76} 75}
77 76
78/** 77/**
@@ -149,8 +148,7 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
149 goto out; 148 goto out;
150 } 149 }
151 150
152 /* 151 /* Set up Op-code, Phy Address, and register offset in the MDI
153 * Set up Op-code, Phy Address, and register offset in the MDI
154 * Control register. The MAC will take care of interfacing with the 152 * Control register. The MAC will take care of interfacing with the
155 * PHY to retrieve the desired data. 153 * PHY to retrieve the desired data.
156 */ 154 */
@@ -160,8 +158,7 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
160 158
161 wr32(E1000_MDIC, mdic); 159 wr32(E1000_MDIC, mdic);
162 160
163 /* 161 /* Poll the ready bit to see if the MDI read completed
164 * Poll the ready bit to see if the MDI read completed
165 * Increasing the time out as testing showed failures with 162 * Increasing the time out as testing showed failures with
166 * the lower time out 163 * the lower time out
167 */ 164 */
@@ -207,8 +204,7 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
207 goto out; 204 goto out;
208 } 205 }
209 206
210 /* 207 /* Set up Op-code, Phy Address, and register offset in the MDI
211 * Set up Op-code, Phy Address, and register offset in the MDI
212 * Control register. The MAC will take care of interfacing with the 208 * Control register. The MAC will take care of interfacing with the
213 * PHY to retrieve the desired data. 209 * PHY to retrieve the desired data.
214 */ 210 */
@@ -219,8 +215,7 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
219 215
220 wr32(E1000_MDIC, mdic); 216 wr32(E1000_MDIC, mdic);
221 217
222 /* 218 /* Poll the ready bit to see if the MDI read completed
223 * Poll the ready bit to see if the MDI read completed
224 * Increasing the time out as testing showed failures with 219 * Increasing the time out as testing showed failures with
225 * the lower time out 220 * the lower time out
226 */ 221 */
@@ -259,15 +254,13 @@ s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
259 struct e1000_phy_info *phy = &hw->phy; 254 struct e1000_phy_info *phy = &hw->phy;
260 u32 i, i2ccmd = 0; 255 u32 i, i2ccmd = 0;
261 256
262 257 /* Set up Op-code, Phy Address, and register address in the I2CCMD
263 /*
264 * Set up Op-code, Phy Address, and register address in the I2CCMD
265 * register. The MAC will take care of interfacing with the 258 * register. The MAC will take care of interfacing with the
266 * PHY to retrieve the desired data. 259 * PHY to retrieve the desired data.
267 */ 260 */
268 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | 261 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
269 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | 262 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
270 (E1000_I2CCMD_OPCODE_READ)); 263 (E1000_I2CCMD_OPCODE_READ));
271 264
272 wr32(E1000_I2CCMD, i2ccmd); 265 wr32(E1000_I2CCMD, i2ccmd);
273 266
@@ -317,15 +310,14 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
317 /* Swap the data bytes for the I2C interface */ 310 /* Swap the data bytes for the I2C interface */
318 phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); 311 phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
319 312
320 /* 313 /* Set up Op-code, Phy Address, and register address in the I2CCMD
321 * Set up Op-code, Phy Address, and register address in the I2CCMD
322 * register. The MAC will take care of interfacing with the 314 * register. The MAC will take care of interfacing with the
323 * PHY to retrieve the desired data. 315 * PHY to retrieve the desired data.
324 */ 316 */
325 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | 317 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
326 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | 318 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
327 E1000_I2CCMD_OPCODE_WRITE | 319 E1000_I2CCMD_OPCODE_WRITE |
328 phy_data_swapped); 320 phy_data_swapped);
329 321
330 wr32(E1000_I2CCMD, i2ccmd); 322 wr32(E1000_I2CCMD, i2ccmd);
331 323
@@ -371,8 +363,8 @@ s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
371 363
372 if (offset > MAX_PHY_MULTI_PAGE_REG) { 364 if (offset > MAX_PHY_MULTI_PAGE_REG) {
373 ret_val = igb_write_phy_reg_mdic(hw, 365 ret_val = igb_write_phy_reg_mdic(hw,
374 IGP01E1000_PHY_PAGE_SELECT, 366 IGP01E1000_PHY_PAGE_SELECT,
375 (u16)offset); 367 (u16)offset);
376 if (ret_val) { 368 if (ret_val) {
377 hw->phy.ops.release(hw); 369 hw->phy.ops.release(hw);
378 goto out; 370 goto out;
@@ -410,8 +402,8 @@ s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
410 402
411 if (offset > MAX_PHY_MULTI_PAGE_REG) { 403 if (offset > MAX_PHY_MULTI_PAGE_REG) {
412 ret_val = igb_write_phy_reg_mdic(hw, 404 ret_val = igb_write_phy_reg_mdic(hw,
413 IGP01E1000_PHY_PAGE_SELECT, 405 IGP01E1000_PHY_PAGE_SELECT,
414 (u16)offset); 406 (u16)offset);
415 if (ret_val) { 407 if (ret_val) {
416 hw->phy.ops.release(hw); 408 hw->phy.ops.release(hw);
417 goto out; 409 goto out;
@@ -419,7 +411,7 @@ s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
419 } 411 }
420 412
421 ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 413 ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
422 data); 414 data);
423 415
424 hw->phy.ops.release(hw); 416 hw->phy.ops.release(hw);
425 417
@@ -439,7 +431,6 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
439 s32 ret_val; 431 s32 ret_val;
440 u16 phy_data; 432 u16 phy_data;
441 433
442
443 if (phy->reset_disable) { 434 if (phy->reset_disable) {
444 ret_val = 0; 435 ret_val = 0;
445 goto out; 436 goto out;
@@ -472,8 +463,7 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
472 if (ret_val) 463 if (ret_val)
473 goto out; 464 goto out;
474 phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; 465 phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
475 /* 466 /* Options:
476 * Options:
477 * 0 - Auto (default) 467 * 0 - Auto (default)
478 * 1 - MDI mode 468 * 1 - MDI mode
479 * 2 - MDI-X mode 469 * 2 - MDI-X mode
@@ -520,8 +510,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
520 510
521 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 511 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
522 512
523 /* 513 /* Options:
524 * Options:
525 * MDI/MDI-X = 0 (default) 514 * MDI/MDI-X = 0 (default)
526 * 0 - Auto for all speeds 515 * 0 - Auto for all speeds
527 * 1 - MDI mode 516 * 1 - MDI mode
@@ -546,8 +535,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
546 break; 535 break;
547 } 536 }
548 537
549 /* 538 /* Options:
550 * Options:
551 * disable_polarity_correction = 0 (default) 539 * disable_polarity_correction = 0 (default)
552 * Automatic Correction for Reversed Cable Polarity 540 * Automatic Correction for Reversed Cable Polarity
553 * 0 - Disabled 541 * 0 - Disabled
@@ -562,12 +550,11 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
562 goto out; 550 goto out;
563 551
564 if (phy->revision < E1000_REVISION_4) { 552 if (phy->revision < E1000_REVISION_4) {
565 /* 553 /* Force TX_CLK in the Extended PHY Specific Control Register
566 * Force TX_CLK in the Extended PHY Specific Control Register
567 * to 25MHz clock. 554 * to 25MHz clock.
568 */ 555 */
569 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 556 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
570 &phy_data); 557 &phy_data);
571 if (ret_val) 558 if (ret_val)
572 goto out; 559 goto out;
573 560
@@ -630,8 +617,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
630 if (ret_val) 617 if (ret_val)
631 goto out; 618 goto out;
632 619
633 /* 620 /* Options:
634 * Options:
635 * MDI/MDI-X = 0 (default) 621 * MDI/MDI-X = 0 (default)
636 * 0 - Auto for all speeds 622 * 0 - Auto for all speeds
637 * 1 - MDI mode 623 * 1 - MDI mode
@@ -659,8 +645,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
659 break; 645 break;
660 } 646 }
661 647
662 /* 648 /* Options:
663 * Options:
664 * disable_polarity_correction = 0 (default) 649 * disable_polarity_correction = 0 (default)
665 * Automatic Correction for Reversed Cable Polarity 650 * Automatic Correction for Reversed Cable Polarity
666 * 0 - Disabled 651 * 0 - Disabled
@@ -714,14 +699,12 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
714 goto out; 699 goto out;
715 } 700 }
716 701
717 /* 702 /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
718 * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
719 * timeout issues when LFS is enabled. 703 * timeout issues when LFS is enabled.
720 */ 704 */
721 msleep(100); 705 msleep(100);
722 706
723 /* 707 /* The NVM settings will configure LPLU in D3 for
724 * The NVM settings will configure LPLU in D3 for
725 * non-IGP1 PHYs. 708 * non-IGP1 PHYs.
726 */ 709 */
727 if (phy->type == e1000_phy_igp) { 710 if (phy->type == e1000_phy_igp) {
@@ -765,8 +748,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
765 748
766 /* set auto-master slave resolution settings */ 749 /* set auto-master slave resolution settings */
767 if (hw->mac.autoneg) { 750 if (hw->mac.autoneg) {
768 /* 751 /* when autonegotiation advertisement is only 1000Mbps then we
769 * when autonegotiation advertisement is only 1000Mbps then we
770 * should disable SmartSpeed and enable Auto MasterSlave 752 * should disable SmartSpeed and enable Auto MasterSlave
771 * resolution as hardware default. 753 * resolution as hardware default.
772 */ 754 */
@@ -844,14 +826,12 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
844 s32 ret_val; 826 s32 ret_val;
845 u16 phy_ctrl; 827 u16 phy_ctrl;
846 828
847 /* 829 /* Perform some bounds checking on the autoneg advertisement
848 * Perform some bounds checking on the autoneg advertisement
849 * parameter. 830 * parameter.
850 */ 831 */
851 phy->autoneg_advertised &= phy->autoneg_mask; 832 phy->autoneg_advertised &= phy->autoneg_mask;
852 833
853 /* 834 /* If autoneg_advertised is zero, we assume it was not defaulted
854 * If autoneg_advertised is zero, we assume it was not defaulted
855 * by the calling code so we set to advertise full capability. 835 * by the calling code so we set to advertise full capability.
856 */ 836 */
857 if (phy->autoneg_advertised == 0) 837 if (phy->autoneg_advertised == 0)
@@ -865,8 +845,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
865 } 845 }
866 hw_dbg("Restarting Auto-Neg\n"); 846 hw_dbg("Restarting Auto-Neg\n");
867 847
868 /* 848 /* Restart auto-negotiation by setting the Auto Neg Enable bit and
869 * Restart auto-negotiation by setting the Auto Neg Enable bit and
870 * the Auto Neg Restart bit in the PHY control register. 849 * the Auto Neg Restart bit in the PHY control register.
871 */ 850 */
872 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); 851 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
@@ -878,8 +857,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
878 if (ret_val) 857 if (ret_val)
879 goto out; 858 goto out;
880 859
881 /* 860 /* Does the user want to wait for Auto-Neg to complete here, or
882 * Does the user want to wait for Auto-Neg to complete here, or
883 * check at a later time (for example, callback routine). 861 * check at a later time (for example, callback routine).
884 */ 862 */
885 if (phy->autoneg_wait_to_complete) { 863 if (phy->autoneg_wait_to_complete) {
@@ -928,16 +906,14 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
928 goto out; 906 goto out;
929 } 907 }
930 908
931 /* 909 /* Need to parse both autoneg_advertised and fc and set up
932 * Need to parse both autoneg_advertised and fc and set up
933 * the appropriate PHY registers. First we will parse for 910 * the appropriate PHY registers. First we will parse for
934 * autoneg_advertised software override. Since we can advertise 911 * autoneg_advertised software override. Since we can advertise
935 * a plethora of combinations, we need to check each bit 912 * a plethora of combinations, we need to check each bit
936 * individually. 913 * individually.
937 */ 914 */
938 915
939 /* 916 /* First we clear all the 10/100 mb speed bits in the Auto-Neg
940 * First we clear all the 10/100 mb speed bits in the Auto-Neg
941 * Advertisement Register (Address 4) and the 1000 mb speed bits in 917 * Advertisement Register (Address 4) and the 1000 mb speed bits in
942 * the 1000Base-T Control Register (Address 9). 918 * the 1000Base-T Control Register (Address 9).
943 */ 919 */
@@ -983,8 +959,7 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
983 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; 959 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
984 } 960 }
985 961
986 /* 962 /* Check for a software override of the flow control settings, and
987 * Check for a software override of the flow control settings, and
988 * setup the PHY advertisement registers accordingly. If 963 * setup the PHY advertisement registers accordingly. If
989 * auto-negotiation is enabled, then software will have to set the 964 * auto-negotiation is enabled, then software will have to set the
990 * "PAUSE" bits to the correct value in the Auto-Negotiation 965 * "PAUSE" bits to the correct value in the Auto-Negotiation
@@ -1003,15 +978,13 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
1003 */ 978 */
1004 switch (hw->fc.current_mode) { 979 switch (hw->fc.current_mode) {
1005 case e1000_fc_none: 980 case e1000_fc_none:
1006 /* 981 /* Flow control (RX & TX) is completely disabled by a
1007 * Flow control (RX & TX) is completely disabled by a
1008 * software over-ride. 982 * software over-ride.
1009 */ 983 */
1010 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 984 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1011 break; 985 break;
1012 case e1000_fc_rx_pause: 986 case e1000_fc_rx_pause:
1013 /* 987 /* RX Flow control is enabled, and TX Flow control is
1014 * RX Flow control is enabled, and TX Flow control is
1015 * disabled, by a software over-ride. 988 * disabled, by a software over-ride.
1016 * 989 *
1017 * Since there really isn't a way to advertise that we are 990 * Since there really isn't a way to advertise that we are
@@ -1023,16 +996,14 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
1023 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 996 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1024 break; 997 break;
1025 case e1000_fc_tx_pause: 998 case e1000_fc_tx_pause:
1026 /* 999 /* TX Flow control is enabled, and RX Flow control is
1027 * TX Flow control is enabled, and RX Flow control is
1028 * disabled, by a software over-ride. 1000 * disabled, by a software over-ride.
1029 */ 1001 */
1030 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; 1002 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
1031 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; 1003 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
1032 break; 1004 break;
1033 case e1000_fc_full: 1005 case e1000_fc_full:
1034 /* 1006 /* Flow control (both RX and TX) is enabled by a software
1035 * Flow control (both RX and TX) is enabled by a software
1036 * over-ride. 1007 * over-ride.
1037 */ 1008 */
1038 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 1009 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
@@ -1075,18 +1046,15 @@ s32 igb_setup_copper_link(struct e1000_hw *hw)
1075 s32 ret_val; 1046 s32 ret_val;
1076 bool link; 1047 bool link;
1077 1048
1078
1079 if (hw->mac.autoneg) { 1049 if (hw->mac.autoneg) {
1080 /* 1050 /* Setup autoneg and flow control advertisement and perform
1081 * Setup autoneg and flow control advertisement and perform
1082 * autonegotiation. 1051 * autonegotiation.
1083 */ 1052 */
1084 ret_val = igb_copper_link_autoneg(hw); 1053 ret_val = igb_copper_link_autoneg(hw);
1085 if (ret_val) 1054 if (ret_val)
1086 goto out; 1055 goto out;
1087 } else { 1056 } else {
1088 /* 1057 /* PHY will be set to 10H, 10F, 100H or 100F
1089 * PHY will be set to 10H, 10F, 100H or 100F
1090 * depending on user settings. 1058 * depending on user settings.
1091 */ 1059 */
1092 hw_dbg("Forcing Speed and Duplex\n"); 1060 hw_dbg("Forcing Speed and Duplex\n");
@@ -1097,14 +1065,10 @@ s32 igb_setup_copper_link(struct e1000_hw *hw)
1097 } 1065 }
1098 } 1066 }
1099 1067
1100 /* 1068 /* Check link status. Wait up to 100 microseconds for link to become
1101 * Check link status. Wait up to 100 microseconds for link to become
1102 * valid. 1069 * valid.
1103 */ 1070 */
1104 ret_val = igb_phy_has_link(hw, 1071 ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
1105 COPPER_LINK_UP_LIMIT,
1106 10,
1107 &link);
1108 if (ret_val) 1072 if (ret_val)
1109 goto out; 1073 goto out;
1110 1074
@@ -1145,8 +1109,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
1145 if (ret_val) 1109 if (ret_val)
1146 goto out; 1110 goto out;
1147 1111
1148 /* 1112 /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
1149 * Clear Auto-Crossover to force MDI manually. IGP requires MDI
1150 * forced whenever speed and duplex are forced. 1113 * forced whenever speed and duplex are forced.
1151 */ 1114 */
1152 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); 1115 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
@@ -1167,10 +1130,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
1167 if (phy->autoneg_wait_to_complete) { 1130 if (phy->autoneg_wait_to_complete) {
1168 hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); 1131 hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
1169 1132
1170 ret_val = igb_phy_has_link(hw, 1133 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link);
1171 PHY_FORCE_LIMIT,
1172 100000,
1173 &link);
1174 if (ret_val) 1134 if (ret_val)
1175 goto out; 1135 goto out;
1176 1136
@@ -1178,10 +1138,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
1178 hw_dbg("Link taking longer than expected.\n"); 1138 hw_dbg("Link taking longer than expected.\n");
1179 1139
1180 /* Try once more */ 1140 /* Try once more */
1181 ret_val = igb_phy_has_link(hw, 1141 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link);
1182 PHY_FORCE_LIMIT,
1183 100000,
1184 &link);
1185 if (ret_val) 1142 if (ret_val)
1186 goto out; 1143 goto out;
1187 } 1144 }
@@ -1209,8 +1166,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1209 1166
1210 /* I210 and I211 devices support Auto-Crossover in forced operation. */ 1167 /* I210 and I211 devices support Auto-Crossover in forced operation. */
1211 if (phy->type != e1000_phy_i210) { 1168 if (phy->type != e1000_phy_i210) {
1212 /* 1169 /* Clear Auto-Crossover to force MDI manually. M88E1000
1213 * Clear Auto-Crossover to force MDI manually. M88E1000
1214 * requires MDI forced whenever speed and duplex are forced. 1170 * requires MDI forced whenever speed and duplex are forced.
1215 */ 1171 */
1216 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, 1172 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
@@ -1266,13 +1222,12 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1266 if (!reset_dsp) 1222 if (!reset_dsp)
1267 hw_dbg("Link taking longer than expected.\n"); 1223 hw_dbg("Link taking longer than expected.\n");
1268 else { 1224 else {
1269 /* 1225 /* We didn't get link.
1270 * We didn't get link.
1271 * Reset the DSP and cross our fingers. 1226 * Reset the DSP and cross our fingers.
1272 */ 1227 */
1273 ret_val = phy->ops.write_reg(hw, 1228 ret_val = phy->ops.write_reg(hw,
1274 M88E1000_PHY_PAGE_SELECT, 1229 M88E1000_PHY_PAGE_SELECT,
1275 0x001d); 1230 0x001d);
1276 if (ret_val) 1231 if (ret_val)
1277 goto out; 1232 goto out;
1278 ret_val = igb_phy_reset_dsp(hw); 1233 ret_val = igb_phy_reset_dsp(hw);
@@ -1298,8 +1253,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1298 if (ret_val) 1253 if (ret_val)
1299 goto out; 1254 goto out;
1300 1255
1301 /* 1256 /* Resetting the phy means we need to re-force TX_CLK in the
1302 * Resetting the phy means we need to re-force TX_CLK in the
1303 * Extended PHY Specific Control Register to 25MHz clock from 1257 * Extended PHY Specific Control Register to 25MHz clock from
1304 * the reset value of 2.5MHz. 1258 * the reset value of 2.5MHz.
1305 */ 1259 */
@@ -1308,8 +1262,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1308 if (ret_val) 1262 if (ret_val)
1309 goto out; 1263 goto out;
1310 1264
1311 /* 1265 /* In addition, we must re-enable CRS on Tx for both half and full
1312 * In addition, we must re-enable CRS on Tx for both half and full
1313 * duplex. 1266 * duplex.
1314 */ 1267 */
1315 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1268 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1336,7 +1289,7 @@ out:
1336 * take affect. 1289 * take affect.
1337 **/ 1290 **/
1338static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, 1291static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
1339 u16 *phy_ctrl) 1292 u16 *phy_ctrl)
1340{ 1293{
1341 struct e1000_mac_info *mac = &hw->mac; 1294 struct e1000_mac_info *mac = &hw->mac;
1342 u32 ctrl; 1295 u32 ctrl;
@@ -1417,8 +1370,7 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1417 data); 1370 data);
1418 if (ret_val) 1371 if (ret_val)
1419 goto out; 1372 goto out;
1420 /* 1373 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
1421 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
1422 * during Dx states where the power conservation is most 1374 * during Dx states where the power conservation is most
1423 * important. During driver activity we should enable 1375 * important. During driver activity we should enable
1424 * SmartSpeed, so performance is maintained. 1376 * SmartSpeed, so performance is maintained.
@@ -1461,13 +1413,13 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1461 1413
1462 /* When LPLU is enabled, we should disable SmartSpeed */ 1414 /* When LPLU is enabled, we should disable SmartSpeed */
1463 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 1415 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1464 &data); 1416 &data);
1465 if (ret_val) 1417 if (ret_val)
1466 goto out; 1418 goto out;
1467 1419
1468 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1420 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1469 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 1421 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1470 data); 1422 data);
1471 } 1423 }
1472 1424
1473out: 1425out:
@@ -1556,8 +1508,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw)
1556 s32 ret_val; 1508 s32 ret_val;
1557 u16 data, offset, mask; 1509 u16 data, offset, mask;
1558 1510
1559 /* 1511 /* Polarity is determined based on the speed of
1560 * Polarity is determined based on the speed of
1561 * our connection. 1512 * our connection.
1562 */ 1513 */
1563 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); 1514 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
@@ -1569,8 +1520,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw)
1569 offset = IGP01E1000_PHY_PCS_INIT_REG; 1520 offset = IGP01E1000_PHY_PCS_INIT_REG;
1570 mask = IGP01E1000_PHY_POLARITY_MASK; 1521 mask = IGP01E1000_PHY_POLARITY_MASK;
1571 } else { 1522 } else {
1572 /* 1523 /* This really only applies to 10Mbps since
1573 * This really only applies to 10Mbps since
1574 * there is no polarity for 100Mbps (always 0). 1524 * there is no polarity for 100Mbps (always 0).
1575 */ 1525 */
1576 offset = IGP01E1000_PHY_PORT_STATUS; 1526 offset = IGP01E1000_PHY_PORT_STATUS;
@@ -1589,7 +1539,7 @@ out:
1589} 1539}
1590 1540
1591/** 1541/**
1592 * igb_wait_autoneg - Wait for auto-neg compeletion 1542 * igb_wait_autoneg - Wait for auto-neg completion
1593 * @hw: pointer to the HW structure 1543 * @hw: pointer to the HW structure
1594 * 1544 *
1595 * Waits for auto-negotiation to complete or for the auto-negotiation time 1545 * Waits for auto-negotiation to complete or for the auto-negotiation time
@@ -1613,8 +1563,7 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw)
1613 msleep(100); 1563 msleep(100);
1614 } 1564 }
1615 1565
1616 /* 1566 /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
1617 * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
1618 * has completed. 1567 * has completed.
1619 */ 1568 */
1620 return ret_val; 1569 return ret_val;
@@ -1630,21 +1579,19 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw)
1630 * Polls the PHY status register for link, 'iterations' number of times. 1579 * Polls the PHY status register for link, 'iterations' number of times.
1631 **/ 1580 **/
1632s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, 1581s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
1633 u32 usec_interval, bool *success) 1582 u32 usec_interval, bool *success)
1634{ 1583{
1635 s32 ret_val = 0; 1584 s32 ret_val = 0;
1636 u16 i, phy_status; 1585 u16 i, phy_status;
1637 1586
1638 for (i = 0; i < iterations; i++) { 1587 for (i = 0; i < iterations; i++) {
1639 /* 1588 /* Some PHYs require the PHY_STATUS register to be read
1640 * Some PHYs require the PHY_STATUS register to be read
1641 * twice due to the link bit being sticky. No harm doing 1589 * twice due to the link bit being sticky. No harm doing
1642 * it across the board. 1590 * it across the board.
1643 */ 1591 */
1644 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); 1592 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1645 if (ret_val) { 1593 if (ret_val && usec_interval > 0) {
1646 /* 1594 /* If the first read fails, another entity may have
1647 * If the first read fails, another entity may have
1648 * ownership of the resources, wait and try again to 1595 * ownership of the resources, wait and try again to
1649 * see if they have relinquished the resources yet. 1596 * see if they have relinquished the resources yet.
1650 */ 1597 */
@@ -1735,6 +1682,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
1735 phy->max_cable_length = phy_data / (is_cm ? 100 : 1); 1682 phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
1736 phy->cable_length = phy_data / (is_cm ? 100 : 1); 1683 phy->cable_length = phy_data / (is_cm ? 100 : 1);
1737 break; 1684 break;
1685 case M88E1545_E_PHY_ID:
1738 case I347AT4_E_PHY_ID: 1686 case I347AT4_E_PHY_ID:
1739 /* Remember the original page select and set it to 7 */ 1687 /* Remember the original page select and set it to 7 */
1740 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, 1688 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
@@ -1834,10 +1782,10 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
1834 u16 cur_agc_index, max_agc_index = 0; 1782 u16 cur_agc_index, max_agc_index = 0;
1835 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; 1783 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
1836 static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { 1784 static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
1837 IGP02E1000_PHY_AGC_A, 1785 IGP02E1000_PHY_AGC_A,
1838 IGP02E1000_PHY_AGC_B, 1786 IGP02E1000_PHY_AGC_B,
1839 IGP02E1000_PHY_AGC_C, 1787 IGP02E1000_PHY_AGC_C,
1840 IGP02E1000_PHY_AGC_D 1788 IGP02E1000_PHY_AGC_D
1841 }; 1789 };
1842 1790
1843 /* Read the AGC registers for all channels */ 1791 /* Read the AGC registers for all channels */
@@ -1846,8 +1794,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
1846 if (ret_val) 1794 if (ret_val)
1847 goto out; 1795 goto out;
1848 1796
1849 /* 1797 /* Getting bits 15:9, which represent the combination of
1850 * Getting bits 15:9, which represent the combination of
1851 * coarse and fine gain values. The result is a number 1798 * coarse and fine gain values. The result is a number
1852 * that can be put into the lookup table to obtain the 1799 * that can be put into the lookup table to obtain the
1853 * approximate cable length. 1800 * approximate cable length.
@@ -2167,15 +2114,13 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
2167 hw->phy.ops.write_reg(hw, 0x1796, 0x0008); 2114 hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
2168 /* Change cg_icount + enable integbp for channels BCD */ 2115 /* Change cg_icount + enable integbp for channels BCD */
2169 hw->phy.ops.write_reg(hw, 0x1798, 0xD008); 2116 hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
2170 /* 2117 /* Change cg_icount + enable integbp + change prop_factor_master
2171 * Change cg_icount + enable integbp + change prop_factor_master
2172 * to 8 for channel A 2118 * to 8 for channel A
2173 */ 2119 */
2174 hw->phy.ops.write_reg(hw, 0x1898, 0xD918); 2120 hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
2175 /* Disable AHT in Slave mode on channel A */ 2121 /* Disable AHT in Slave mode on channel A */
2176 hw->phy.ops.write_reg(hw, 0x187A, 0x0800); 2122 hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
2177 /* 2123 /* Enable LPLU and disable AN to 1000 in non-D0a states,
2178 * Enable LPLU and disable AN to 1000 in non-D0a states,
2179 * Enable SPD+B2B 2124 * Enable SPD+B2B
2180 */ 2125 */
2181 hw->phy.ops.write_reg(hw, 0x0019, 0x008D); 2126 hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
@@ -2257,8 +2202,8 @@ static s32 igb_check_polarity_82580(struct e1000_hw *hw)
2257 2202
2258 if (!ret_val) 2203 if (!ret_val)
2259 phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) 2204 phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY)
2260 ? e1000_rev_polarity_reversed 2205 ? e1000_rev_polarity_reversed
2261 : e1000_rev_polarity_normal; 2206 : e1000_rev_polarity_normal;
2262 2207
2263 return ret_val; 2208 return ret_val;
2264} 2209}
@@ -2278,7 +2223,6 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
2278 u16 phy_data; 2223 u16 phy_data;
2279 bool link; 2224 bool link;
2280 2225
2281
2282 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); 2226 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
2283 if (ret_val) 2227 if (ret_val)
2284 goto out; 2228 goto out;
@@ -2289,8 +2233,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
2289 if (ret_val) 2233 if (ret_val)
2290 goto out; 2234 goto out;
2291 2235
2292 /* 2236 /* Clear Auto-Crossover to force MDI manually. 82580 requires MDI
2293 * Clear Auto-Crossover to force MDI manually. 82580 requires MDI
2294 * forced whenever speed and duplex are forced. 2237 * forced whenever speed and duplex are forced.
2295 */ 2238 */
2296 ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); 2239 ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
@@ -2310,10 +2253,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
2310 if (phy->autoneg_wait_to_complete) { 2253 if (phy->autoneg_wait_to_complete) {
2311 hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); 2254 hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n");
2312 2255
2313 ret_val = igb_phy_has_link(hw, 2256 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
2314 PHY_FORCE_LIMIT,
2315 100000,
2316 &link);
2317 if (ret_val) 2257 if (ret_val)
2318 goto out; 2258 goto out;
2319 2259
@@ -2321,10 +2261,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
2321 hw_dbg("Link taking longer than expected.\n"); 2261 hw_dbg("Link taking longer than expected.\n");
2322 2262
2323 /* Try once more */ 2263 /* Try once more */
2324 ret_val = igb_phy_has_link(hw, 2264 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
2325 PHY_FORCE_LIMIT,
2326 100000,
2327 &link);
2328 if (ret_val) 2265 if (ret_val)
2329 goto out; 2266 goto out;
2330 } 2267 }
@@ -2349,7 +2286,6 @@ s32 igb_get_phy_info_82580(struct e1000_hw *hw)
2349 u16 data; 2286 u16 data;
2350 bool link; 2287 bool link;
2351 2288
2352
2353 ret_val = igb_phy_has_link(hw, 1, 0, &link); 2289 ret_val = igb_phy_has_link(hw, 1, 0, &link);
2354 if (ret_val) 2290 if (ret_val)
2355 goto out; 2291 goto out;
@@ -2383,12 +2319,12 @@ s32 igb_get_phy_info_82580(struct e1000_hw *hw)
2383 goto out; 2319 goto out;
2384 2320
2385 phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) 2321 phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
2386 ? e1000_1000t_rx_status_ok 2322 ? e1000_1000t_rx_status_ok
2387 : e1000_1000t_rx_status_not_ok; 2323 : e1000_1000t_rx_status_not_ok;
2388 2324
2389 phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) 2325 phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
2390 ? e1000_1000t_rx_status_ok 2326 ? e1000_1000t_rx_status_ok
2391 : e1000_1000t_rx_status_not_ok; 2327 : e1000_1000t_rx_status_not_ok;
2392 } else { 2328 } else {
2393 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; 2329 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
2394 phy->local_rx = e1000_1000t_rx_status_undefined; 2330 phy->local_rx = e1000_1000t_rx_status_undefined;
@@ -2412,13 +2348,12 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw)
2412 s32 ret_val; 2348 s32 ret_val;
2413 u16 phy_data, length; 2349 u16 phy_data, length;
2414 2350
2415
2416 ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); 2351 ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data);
2417 if (ret_val) 2352 if (ret_val)
2418 goto out; 2353 goto out;
2419 2354
2420 length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> 2355 length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >>
2421 I82580_DSTATUS_CABLE_LENGTH_SHIFT; 2356 I82580_DSTATUS_CABLE_LENGTH_SHIFT;
2422 2357
2423 if (length == E1000_CABLE_LENGTH_UNDEFINED) 2358 if (length == E1000_CABLE_LENGTH_UNDEFINED)
2424 ret_val = -E1000_ERR_PHY; 2359 ret_val = -E1000_ERR_PHY;
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 15343286082e..82632c6c53af 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -65,6 +65,7 @@
65#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ 65#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
66#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ 66#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
67#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ 67#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
68#define E1000_LEDMUX 0x08130 /* LED MUX Control */
68#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ 69#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
69#define E1000_PBS 0x01008 /* Packet Buffer Size */ 70#define E1000_PBS 0x01008 /* Packet Buffer Size */
70#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ 71#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
@@ -83,6 +84,9 @@
83#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ 84#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */
84#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ 85#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */
85#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ 86#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */
87#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */
88#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */
89#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */
86 90
87/* IEEE 1588 TIMESYNCH */ 91/* IEEE 1588 TIMESYNCH */
88#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ 92#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
@@ -117,21 +121,21 @@
117#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) 121#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
118 122
119/* DMA Coalescing registers */ 123/* DMA Coalescing registers */
120#define E1000_DMACR 0x02508 /* Control Register */ 124#define E1000_DMACR 0x02508 /* Control Register */
121#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ 125#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
122#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ 126#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
123#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ 127#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
124#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ 128#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
125#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ 129#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
126#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ 130#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
127 131
128/* TX Rate Limit Registers */ 132/* TX Rate Limit Registers */
129#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ 133#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
130#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */ 134#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */
131#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ 135#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
132 136
133/* Split and Replication RX Control - RW */ 137/* Split and Replication RX Control - RW */
134#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ 138#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
135 139
136/* Thermal sensor configuration and status registers */ 140/* Thermal sensor configuration and status registers */
137#define E1000_THMJT 0x08100 /* Junction Temperature */ 141#define E1000_THMJT 0x08100 /* Junction Temperature */
@@ -140,8 +144,7 @@
140#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ 144#define E1000_THHIGHTC 0x0810C /* High Threshold Control */
141#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ 145#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
142 146
143/* 147/* Convenience macros
144 * Convenience macros
145 * 148 *
146 * Note: "_n" is the queue number of the register to be written to. 149 * Note: "_n" is the queue number of the register to be written to.
147 * 150 *
@@ -287,7 +290,7 @@
287#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ 290#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
288#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 291#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
289#define E1000_RA 0x05400 /* Receive Address - RW Array */ 292#define E1000_RA 0x05400 /* Receive Address - RW Array */
290#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ 293#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
291#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) 294#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
292#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ 295#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
293 (0x054E0 + ((_i - 16) * 8))) 296 (0x054E0 + ((_i - 16) * 8)))
@@ -360,21 +363,25 @@
360 (readl(hw->hw_addr + reg + ((offset) << 2))) 363 (readl(hw->hw_addr + reg + ((offset) << 2)))
361 364
362/* DMA Coalescing registers */ 365/* DMA Coalescing registers */
363#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ 366#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
364 367
365/* Energy Efficient Ethernet "EEE" register */ 368/* Energy Efficient Ethernet "EEE" register */
366#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ 369#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
367#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ 370#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
368#define E1000_EEE_SU 0X0E34 /* EEE Setup */ 371#define E1000_EEE_SU 0X0E34 /* EEE Setup */
372#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */
373#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */
374#define E1000_MMDAC 13 /* MMD Access Control */
375#define E1000_MMDAAD 14 /* MMD Access Address/Data */
369 376
370/* Thermal Sensor Register */ 377/* Thermal Sensor Register */
371#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ 378#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
372 379
373/* OS2BMC Registers */ 380/* OS2BMC Registers */
374#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ 381#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
375#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ 382#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
376#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ 383#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
377#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ 384#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
378 385
379#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ 386#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */
380#define E1000_I210_FLMNGCTL 0x12038 387#define E1000_I210_FLMNGCTL 0x12038
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ab577a763a20..9d6c075e232d 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -44,54 +44,54 @@
44 44
45struct igb_adapter; 45struct igb_adapter;
46 46
47#define E1000_PCS_CFG_IGN_SD 1 47#define E1000_PCS_CFG_IGN_SD 1
48 48
49/* Interrupt defines */ 49/* Interrupt defines */
50#define IGB_START_ITR 648 /* ~6000 ints/sec */ 50#define IGB_START_ITR 648 /* ~6000 ints/sec */
51#define IGB_4K_ITR 980 51#define IGB_4K_ITR 980
52#define IGB_20K_ITR 196 52#define IGB_20K_ITR 196
53#define IGB_70K_ITR 56 53#define IGB_70K_ITR 56
54 54
55/* TX/RX descriptor defines */ 55/* TX/RX descriptor defines */
56#define IGB_DEFAULT_TXD 256 56#define IGB_DEFAULT_TXD 256
57#define IGB_DEFAULT_TX_WORK 128 57#define IGB_DEFAULT_TX_WORK 128
58#define IGB_MIN_TXD 80 58#define IGB_MIN_TXD 80
59#define IGB_MAX_TXD 4096 59#define IGB_MAX_TXD 4096
60 60
61#define IGB_DEFAULT_RXD 256 61#define IGB_DEFAULT_RXD 256
62#define IGB_MIN_RXD 80 62#define IGB_MIN_RXD 80
63#define IGB_MAX_RXD 4096 63#define IGB_MAX_RXD 4096
64 64
65#define IGB_DEFAULT_ITR 3 /* dynamic */ 65#define IGB_DEFAULT_ITR 3 /* dynamic */
66#define IGB_MAX_ITR_USECS 10000 66#define IGB_MAX_ITR_USECS 10000
67#define IGB_MIN_ITR_USECS 10 67#define IGB_MIN_ITR_USECS 10
68#define NON_Q_VECTORS 1 68#define NON_Q_VECTORS 1
69#define MAX_Q_VECTORS 8 69#define MAX_Q_VECTORS 8
70 70
71/* Transmit and receive queues */ 71/* Transmit and receive queues */
72#define IGB_MAX_RX_QUEUES 8 72#define IGB_MAX_RX_QUEUES 8
73#define IGB_MAX_RX_QUEUES_82575 4 73#define IGB_MAX_RX_QUEUES_82575 4
74#define IGB_MAX_RX_QUEUES_I211 2 74#define IGB_MAX_RX_QUEUES_I211 2
75#define IGB_MAX_TX_QUEUES 8 75#define IGB_MAX_TX_QUEUES 8
76#define IGB_MAX_VF_MC_ENTRIES 30 76#define IGB_MAX_VF_MC_ENTRIES 30
77#define IGB_MAX_VF_FUNCTIONS 8 77#define IGB_MAX_VF_FUNCTIONS 8
78#define IGB_MAX_VFTA_ENTRIES 128 78#define IGB_MAX_VFTA_ENTRIES 128
79#define IGB_82576_VF_DEV_ID 0x10CA 79#define IGB_82576_VF_DEV_ID 0x10CA
80#define IGB_I350_VF_DEV_ID 0x1520 80#define IGB_I350_VF_DEV_ID 0x1520
81 81
82/* NVM version defines */ 82/* NVM version defines */
83#define IGB_MAJOR_MASK 0xF000 83#define IGB_MAJOR_MASK 0xF000
84#define IGB_MINOR_MASK 0x0FF0 84#define IGB_MINOR_MASK 0x0FF0
85#define IGB_BUILD_MASK 0x000F 85#define IGB_BUILD_MASK 0x000F
86#define IGB_COMB_VER_MASK 0x00FF 86#define IGB_COMB_VER_MASK 0x00FF
87#define IGB_MAJOR_SHIFT 12 87#define IGB_MAJOR_SHIFT 12
88#define IGB_MINOR_SHIFT 4 88#define IGB_MINOR_SHIFT 4
89#define IGB_COMB_VER_SHFT 8 89#define IGB_COMB_VER_SHFT 8
90#define IGB_NVM_VER_INVALID 0xFFFF 90#define IGB_NVM_VER_INVALID 0xFFFF
91#define IGB_ETRACK_SHIFT 16 91#define IGB_ETRACK_SHIFT 16
92#define NVM_ETRACK_WORD 0x0042 92#define NVM_ETRACK_WORD 0x0042
93#define NVM_COMB_VER_OFF 0x0083 93#define NVM_COMB_VER_OFF 0x0083
94#define NVM_COMB_VER_PTR 0x003d 94#define NVM_COMB_VER_PTR 0x003d
95 95
96struct vf_data_storage { 96struct vf_data_storage {
97 unsigned char vf_mac_addresses[ETH_ALEN]; 97 unsigned char vf_mac_addresses[ETH_ALEN];
@@ -103,6 +103,7 @@ struct vf_data_storage {
103 u16 pf_vlan; /* When set, guest VLAN config not allowed. */ 103 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
104 u16 pf_qos; 104 u16 pf_qos;
105 u16 tx_rate; 105 u16 tx_rate;
106 bool spoofchk_enabled;
106}; 107};
107 108
108#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ 109#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
@@ -121,14 +122,14 @@ struct vf_data_storage {
121 * descriptors until either it has this many to write back, or the 122 * descriptors until either it has this many to write back, or the
122 * ITR timer expires. 123 * ITR timer expires.
123 */ 124 */
124#define IGB_RX_PTHRESH 8 125#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
125#define IGB_RX_HTHRESH 8 126#define IGB_RX_HTHRESH 8
126#define IGB_TX_PTHRESH 8 127#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
127#define IGB_TX_HTHRESH 1 128#define IGB_TX_HTHRESH 1
128#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ 129#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
129 adapter->msix_entries) ? 1 : 4) 130 adapter->msix_entries) ? 1 : 4)
130#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ 131#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
131 adapter->msix_entries) ? 1 : 16) 132 adapter->msix_entries) ? 1 : 16)
132 133
133/* this is the size past which hardware will drop packets when setting LPE=0 */ 134/* this is the size past which hardware will drop packets when setting LPE=0 */
134#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 135#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
@@ -140,17 +141,17 @@ struct vf_data_storage {
140#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 141#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
141 142
142/* How many Rx Buffers do we bundle into one write to the hardware ? */ 143/* How many Rx Buffers do we bundle into one write to the hardware ? */
143#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 144#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
144 145
145#define AUTO_ALL_MODES 0 146#define AUTO_ALL_MODES 0
146#define IGB_EEPROM_APME 0x0400 147#define IGB_EEPROM_APME 0x0400
147 148
148#ifndef IGB_MASTER_SLAVE 149#ifndef IGB_MASTER_SLAVE
149/* Switch to override PHY master/slave setting */ 150/* Switch to override PHY master/slave setting */
150#define IGB_MASTER_SLAVE e1000_ms_hw_default 151#define IGB_MASTER_SLAVE e1000_ms_hw_default
151#endif 152#endif
152 153
153#define IGB_MNG_VLAN_NONE -1 154#define IGB_MNG_VLAN_NONE -1
154 155
155enum igb_tx_flags { 156enum igb_tx_flags {
156 /* cmd_type flags */ 157 /* cmd_type flags */
@@ -164,11 +165,10 @@ enum igb_tx_flags {
164}; 165};
165 166
166/* VLAN info */ 167/* VLAN info */
167#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 168#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
168#define IGB_TX_FLAGS_VLAN_SHIFT 16 169#define IGB_TX_FLAGS_VLAN_SHIFT 16
169 170
170/* 171/* The largest size we can write to the descriptor is 65535. In order to
171 * The largest size we can write to the descriptor is 65535. In order to
172 * maintain a power of two alignment we have to limit ourselves to 32K. 172 * maintain a power of two alignment we have to limit ourselves to 32K.
173 */ 173 */
174#define IGB_MAX_TXD_PWR 15 174#define IGB_MAX_TXD_PWR 15
@@ -178,8 +178,17 @@ enum igb_tx_flags {
178#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) 178#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
179#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 179#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
180 180
181/* EEPROM byte offsets */
182#define IGB_SFF_8472_SWAP 0x5C
183#define IGB_SFF_8472_COMP 0x5E
184
185/* Bitmasks */
186#define IGB_SFF_ADDRESSING_MODE 0x4
187#define IGB_SFF_8472_UNSUP 0x00
188
181/* wrapper around a pointer to a socket buffer, 189/* wrapper around a pointer to a socket buffer,
182 * so a DMA handle can be stored along with the buffer */ 190 * so a DMA handle can be stored along with the buffer
191 */
183struct igb_tx_buffer { 192struct igb_tx_buffer {
184 union e1000_adv_tx_desc *next_to_watch; 193 union e1000_adv_tx_desc *next_to_watch;
185 unsigned long time_stamp; 194 unsigned long time_stamp;
@@ -290,11 +299,11 @@ enum e1000_ring_flags_t {
290 299
291#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) 300#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
292 301
293#define IGB_RX_DESC(R, i) \ 302#define IGB_RX_DESC(R, i) \
294 (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) 303 (&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
295#define IGB_TX_DESC(R, i) \ 304#define IGB_TX_DESC(R, i) \
296 (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) 305 (&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
297#define IGB_TX_CTXTDESC(R, i) \ 306#define IGB_TX_CTXTDESC(R, i) \
298 (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) 307 (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
299 308
300/* igb_test_staterr - tests bits within Rx descriptor status and error fields */ 309/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
@@ -453,12 +462,12 @@ struct igb_adapter {
453#define IGB_FLAG_WOL_SUPPORTED (1 << 8) 462#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
454 463
455/* DMA Coalescing defines */ 464/* DMA Coalescing defines */
456#define IGB_MIN_TXPBSIZE 20408 465#define IGB_MIN_TXPBSIZE 20408
457#define IGB_TX_BUF_4096 4096 466#define IGB_TX_BUF_4096 4096
458#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ 467#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
459 468
460#define IGB_82576_TSYNC_SHIFT 19 469#define IGB_82576_TSYNC_SHIFT 19
461#define IGB_TS_HDR_LEN 16 470#define IGB_TS_HDR_LEN 16
462enum e1000_state_t { 471enum e1000_state_t {
463 __IGB_TESTING, 472 __IGB_TESTING,
464 __IGB_RESETTING, 473 __IGB_RESETTING,
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index a3830a8ba4c1..7876240fa74e 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -38,6 +38,7 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/pm_runtime.h> 39#include <linux/pm_runtime.h>
40#include <linux/highmem.h> 40#include <linux/highmem.h>
41#include <linux/mdio.h>
41 42
42#include "igb.h" 43#include "igb.h"
43 44
@@ -178,44 +179,67 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
178 179
179 ecmd->port = PORT_TP; 180 ecmd->port = PORT_TP;
180 ecmd->phy_address = hw->phy.addr; 181 ecmd->phy_address = hw->phy.addr;
182 ecmd->transceiver = XCVR_INTERNAL;
181 } else { 183 } else {
182 ecmd->supported = (SUPPORTED_1000baseT_Full | 184 ecmd->supported = (SUPPORTED_1000baseT_Full |
183 SUPPORTED_FIBRE | 185 SUPPORTED_100baseT_Full |
184 SUPPORTED_Autoneg); 186 SUPPORTED_FIBRE |
187 SUPPORTED_Autoneg |
188 SUPPORTED_Pause);
189 if (hw->mac.type == e1000_i354)
190 ecmd->supported |= SUPPORTED_2500baseX_Full;
191
192 ecmd->advertising = ADVERTISED_FIBRE;
193
194 switch (adapter->link_speed) {
195 case SPEED_2500:
196 ecmd->advertising = ADVERTISED_2500baseX_Full;
197 break;
198 case SPEED_1000:
199 ecmd->advertising = ADVERTISED_1000baseT_Full;
200 break;
201 case SPEED_100:
202 ecmd->advertising = ADVERTISED_100baseT_Full;
203 break;
204 default:
205 break;
206 }
185 207
186 ecmd->advertising = (ADVERTISED_1000baseT_Full | 208 if (hw->mac.autoneg == 1)
187 ADVERTISED_FIBRE | 209 ecmd->advertising |= ADVERTISED_Autoneg;
188 ADVERTISED_Autoneg |
189 ADVERTISED_Pause);
190 210
191 ecmd->port = PORT_FIBRE; 211 ecmd->port = PORT_FIBRE;
212 ecmd->transceiver = XCVR_EXTERNAL;
192 } 213 }
193 214
194 ecmd->transceiver = XCVR_INTERNAL;
195
196 status = rd32(E1000_STATUS); 215 status = rd32(E1000_STATUS);
197 216
198 if (status & E1000_STATUS_LU) { 217 if (status & E1000_STATUS_LU) {
199 218 if ((hw->mac.type == e1000_i354) &&
200 if ((status & E1000_STATUS_SPEED_1000) || 219 (status & E1000_STATUS_2P5_SKU) &&
201 hw->phy.media_type != e1000_media_type_copper) 220 !(status & E1000_STATUS_2P5_SKU_OVER))
202 ethtool_cmd_speed_set(ecmd, SPEED_1000); 221 ecmd->speed = SPEED_2500;
222 else if (status & E1000_STATUS_SPEED_1000)
223 ecmd->speed = SPEED_1000;
203 else if (status & E1000_STATUS_SPEED_100) 224 else if (status & E1000_STATUS_SPEED_100)
204 ethtool_cmd_speed_set(ecmd, SPEED_100); 225 ecmd->speed = SPEED_100;
205 else 226 else
206 ethtool_cmd_speed_set(ecmd, SPEED_10); 227 ecmd->speed = SPEED_10;
207
208 if ((status & E1000_STATUS_FD) || 228 if ((status & E1000_STATUS_FD) ||
209 hw->phy.media_type != e1000_media_type_copper) 229 hw->phy.media_type != e1000_media_type_copper)
210 ecmd->duplex = DUPLEX_FULL; 230 ecmd->duplex = DUPLEX_FULL;
211 else 231 else
212 ecmd->duplex = DUPLEX_HALF; 232 ecmd->duplex = DUPLEX_HALF;
213 } else { 233 } else {
214 ethtool_cmd_speed_set(ecmd, -1); 234 ecmd->speed = -1;
215 ecmd->duplex = -1; 235 ecmd->duplex = -1;
216 } 236 }
217 237
218 ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 238 if ((hw->phy.media_type == e1000_media_type_fiber) ||
239 hw->mac.autoneg)
240 ecmd->autoneg = AUTONEG_ENABLE;
241 else
242 ecmd->autoneg = AUTONEG_DISABLE;
219 243
220 /* MDI-X => 2; MDI =>1; Invalid =>0 */ 244 /* MDI-X => 2; MDI =>1; Invalid =>0 */
221 if (hw->phy.media_type == e1000_media_type_copper) 245 if (hw->phy.media_type == e1000_media_type_copper)
@@ -238,15 +262,15 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
238 struct e1000_hw *hw = &adapter->hw; 262 struct e1000_hw *hw = &adapter->hw;
239 263
240 /* When SoL/IDER sessions are active, autoneg/speed/duplex 264 /* When SoL/IDER sessions are active, autoneg/speed/duplex
241 * cannot be changed */ 265 * cannot be changed
266 */
242 if (igb_check_reset_block(hw)) { 267 if (igb_check_reset_block(hw)) {
243 dev_err(&adapter->pdev->dev, 268 dev_err(&adapter->pdev->dev,
244 "Cannot change link characteristics when SoL/IDER is active.\n"); 269 "Cannot change link characteristics when SoL/IDER is active.\n");
245 return -EINVAL; 270 return -EINVAL;
246 } 271 }
247 272
248 /* 273 /* MDI setting is only allowed when autoneg enabled because
249 * MDI setting is only allowed when autoneg enabled because
250 * some hardware doesn't allow MDI setting when speed or 274 * some hardware doesn't allow MDI setting when speed or
251 * duplex is forced. 275 * duplex is forced.
252 */ 276 */
@@ -266,9 +290,31 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
266 290
267 if (ecmd->autoneg == AUTONEG_ENABLE) { 291 if (ecmd->autoneg == AUTONEG_ENABLE) {
268 hw->mac.autoneg = 1; 292 hw->mac.autoneg = 1;
269 hw->phy.autoneg_advertised = ecmd->advertising | 293 if (hw->phy.media_type == e1000_media_type_fiber) {
270 ADVERTISED_TP | 294 hw->phy.autoneg_advertised = ecmd->advertising |
271 ADVERTISED_Autoneg; 295 ADVERTISED_FIBRE |
296 ADVERTISED_Autoneg;
297 switch (adapter->link_speed) {
298 case SPEED_2500:
299 hw->phy.autoneg_advertised =
300 ADVERTISED_2500baseX_Full;
301 break;
302 case SPEED_1000:
303 hw->phy.autoneg_advertised =
304 ADVERTISED_1000baseT_Full;
305 break;
306 case SPEED_100:
307 hw->phy.autoneg_advertised =
308 ADVERTISED_100baseT_Full;
309 break;
310 default:
311 break;
312 }
313 } else {
314 hw->phy.autoneg_advertised = ecmd->advertising |
315 ADVERTISED_TP |
316 ADVERTISED_Autoneg;
317 }
272 ecmd->advertising = hw->phy.autoneg_advertised; 318 ecmd->advertising = hw->phy.autoneg_advertised;
273 if (adapter->fc_autoneg) 319 if (adapter->fc_autoneg)
274 hw->fc.requested_mode = e1000_fc_default; 320 hw->fc.requested_mode = e1000_fc_default;
@@ -283,8 +329,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
283 329
284 /* MDI-X => 2; MDI => 1; Auto => 3 */ 330 /* MDI-X => 2; MDI => 1; Auto => 3 */
285 if (ecmd->eth_tp_mdix_ctrl) { 331 if (ecmd->eth_tp_mdix_ctrl) {
286 /* 332 /* fix up the value for auto (3 => 0) as zero is mapped
287 * fix up the value for auto (3 => 0) as zero is mapped
288 * internally to auto 333 * internally to auto
289 */ 334 */
290 if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) 335 if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
@@ -309,8 +354,7 @@ static u32 igb_get_link(struct net_device *netdev)
309 struct igb_adapter *adapter = netdev_priv(netdev); 354 struct igb_adapter *adapter = netdev_priv(netdev);
310 struct e1000_mac_info *mac = &adapter->hw.mac; 355 struct e1000_mac_info *mac = &adapter->hw.mac;
311 356
312 /* 357 /* If the link is not reported up to netdev, interrupts are disabled,
313 * If the link is not reported up to netdev, interrupts are disabled,
314 * and so the physical link state may have changed since we last 358 * and so the physical link state may have changed since we last
315 * looked. Set get_link_status to make sure that the true link 359 * looked. Set get_link_status to make sure that the true link
316 * state is interrogated, rather than pulling a cached and possibly 360 * state is interrogated, rather than pulling a cached and possibly
@@ -430,7 +474,8 @@ static void igb_get_regs(struct net_device *netdev,
430 474
431 /* Interrupt */ 475 /* Interrupt */
432 /* Reading EICS for EICR because they read the 476 /* Reading EICS for EICR because they read the
433 * same but EICS does not clear on read */ 477 * same but EICS does not clear on read
478 */
434 regs_buff[13] = rd32(E1000_EICS); 479 regs_buff[13] = rd32(E1000_EICS);
435 regs_buff[14] = rd32(E1000_EICS); 480 regs_buff[14] = rd32(E1000_EICS);
436 regs_buff[15] = rd32(E1000_EIMS); 481 regs_buff[15] = rd32(E1000_EIMS);
@@ -438,7 +483,8 @@ static void igb_get_regs(struct net_device *netdev,
438 regs_buff[17] = rd32(E1000_EIAC); 483 regs_buff[17] = rd32(E1000_EIAC);
439 regs_buff[18] = rd32(E1000_EIAM); 484 regs_buff[18] = rd32(E1000_EIAM);
440 /* Reading ICS for ICR because they read the 485 /* Reading ICS for ICR because they read the
441 * same but ICS does not clear on read */ 486 * same but ICS does not clear on read
487 */
442 regs_buff[19] = rd32(E1000_ICS); 488 regs_buff[19] = rd32(E1000_ICS);
443 regs_buff[20] = rd32(E1000_ICS); 489 regs_buff[20] = rd32(E1000_ICS);
444 regs_buff[21] = rd32(E1000_IMS); 490 regs_buff[21] = rd32(E1000_IMS);
@@ -688,12 +734,12 @@ static int igb_get_eeprom(struct net_device *netdev,
688 734
689 if (hw->nvm.type == e1000_nvm_eeprom_spi) 735 if (hw->nvm.type == e1000_nvm_eeprom_spi)
690 ret_val = hw->nvm.ops.read(hw, first_word, 736 ret_val = hw->nvm.ops.read(hw, first_word,
691 last_word - first_word + 1, 737 last_word - first_word + 1,
692 eeprom_buff); 738 eeprom_buff);
693 else { 739 else {
694 for (i = 0; i < last_word - first_word + 1; i++) { 740 for (i = 0; i < last_word - first_word + 1; i++) {
695 ret_val = hw->nvm.ops.read(hw, first_word + i, 1, 741 ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
696 &eeprom_buff[i]); 742 &eeprom_buff[i]);
697 if (ret_val) 743 if (ret_val)
698 break; 744 break;
699 } 745 }
@@ -740,15 +786,17 @@ static int igb_set_eeprom(struct net_device *netdev,
740 ptr = (void *)eeprom_buff; 786 ptr = (void *)eeprom_buff;
741 787
742 if (eeprom->offset & 1) { 788 if (eeprom->offset & 1) {
743 /* need read/modify/write of first changed EEPROM word */ 789 /* need read/modify/write of first changed EEPROM word
744 /* only the second byte of the word is being modified */ 790 * only the second byte of the word is being modified
791 */
745 ret_val = hw->nvm.ops.read(hw, first_word, 1, 792 ret_val = hw->nvm.ops.read(hw, first_word, 1,
746 &eeprom_buff[0]); 793 &eeprom_buff[0]);
747 ptr++; 794 ptr++;
748 } 795 }
749 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { 796 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
750 /* need read/modify/write of last changed EEPROM word */ 797 /* need read/modify/write of last changed EEPROM word
751 /* only the first byte of the word is being modified */ 798 * only the first byte of the word is being modified
799 */
752 ret_val = hw->nvm.ops.read(hw, last_word, 1, 800 ret_val = hw->nvm.ops.read(hw, last_word, 1,
753 &eeprom_buff[last_word - first_word]); 801 &eeprom_buff[last_word - first_word]);
754 } 802 }
@@ -763,10 +811,11 @@ static int igb_set_eeprom(struct net_device *netdev,
763 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); 811 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
764 812
765 ret_val = hw->nvm.ops.write(hw, first_word, 813 ret_val = hw->nvm.ops.write(hw, first_word,
766 last_word - first_word + 1, eeprom_buff); 814 last_word - first_word + 1, eeprom_buff);
767 815
768 /* Update the checksum over the first part of the EEPROM if needed 816 /* Update the checksum over the first part of the EEPROM if needed
769 * and flush shadow RAM for 82573 controllers */ 817 * and flush shadow RAM for 82573 controllers
818 */
770 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) 819 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
771 hw->nvm.ops.update(hw); 820 hw->nvm.ops.update(hw);
772 821
@@ -783,8 +832,7 @@ static void igb_get_drvinfo(struct net_device *netdev,
783 strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); 832 strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
784 strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); 833 strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
785 834
786 /* 835 /* EEPROM image version # is reported as firmware version # for
787 * EEPROM image version # is reported as firmware version # for
788 * 82575 controllers 836 * 82575 controllers
789 */ 837 */
790 strlcpy(drvinfo->fw_version, adapter->fw_version, 838 strlcpy(drvinfo->fw_version, adapter->fw_version,
@@ -847,9 +895,11 @@ static int igb_set_ringparam(struct net_device *netdev,
847 } 895 }
848 896
849 if (adapter->num_tx_queues > adapter->num_rx_queues) 897 if (adapter->num_tx_queues > adapter->num_rx_queues)
850 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); 898 temp_ring = vmalloc(adapter->num_tx_queues *
899 sizeof(struct igb_ring));
851 else 900 else
852 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); 901 temp_ring = vmalloc(adapter->num_rx_queues *
902 sizeof(struct igb_ring));
853 903
854 if (!temp_ring) { 904 if (!temp_ring) {
855 err = -ENOMEM; 905 err = -ENOMEM;
@@ -858,10 +908,9 @@ static int igb_set_ringparam(struct net_device *netdev,
858 908
859 igb_down(adapter); 909 igb_down(adapter);
860 910
861 /* 911 /* We can't just free everything and then setup again,
862 * We can't just free everything and then setup again,
863 * because the ISRs in MSI-X mode get passed pointers 912 * because the ISRs in MSI-X mode get passed pointers
864 * to the tx and rx ring structs. 913 * to the Tx and Rx ring structs.
865 */ 914 */
866 if (new_tx_count != adapter->tx_ring_count) { 915 if (new_tx_count != adapter->tx_ring_count) {
867 for (i = 0; i < adapter->num_tx_queues; i++) { 916 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -1199,6 +1248,7 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1199 1248
1200 switch (adapter->hw.mac.type) { 1249 switch (adapter->hw.mac.type) {
1201 case e1000_i350: 1250 case e1000_i350:
1251 case e1000_i354:
1202 test = reg_test_i350; 1252 test = reg_test_i350;
1203 toggle = 0x7FEFF3FF; 1253 toggle = 0x7FEFF3FF;
1204 break; 1254 break;
@@ -1361,6 +1411,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1361 ics_mask = 0x77DCFED5; 1411 ics_mask = 0x77DCFED5;
1362 break; 1412 break;
1363 case e1000_i350: 1413 case e1000_i350:
1414 case e1000_i354:
1364 case e1000_i210: 1415 case e1000_i210:
1365 case e1000_i211: 1416 case e1000_i211:
1366 ics_mask = 0x77DCFED5; 1417 ics_mask = 0x77DCFED5;
@@ -1627,17 +1678,12 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1627 wr32(E1000_CONNSW, reg); 1678 wr32(E1000_CONNSW, reg);
1628 1679
1629 /* Unset sigdetect for SERDES loopback on 1680 /* Unset sigdetect for SERDES loopback on
1630 * 82580 and i350 devices. 1681 * 82580 and newer devices.
1631 */ 1682 */
1632 switch (hw->mac.type) { 1683 if (hw->mac.type >= e1000_82580) {
1633 case e1000_82580:
1634 case e1000_i350:
1635 reg = rd32(E1000_PCS_CFG0); 1684 reg = rd32(E1000_PCS_CFG0);
1636 reg |= E1000_PCS_CFG_IGN_SD; 1685 reg |= E1000_PCS_CFG_IGN_SD;
1637 wr32(E1000_PCS_CFG0, reg); 1686 wr32(E1000_PCS_CFG0, reg);
1638 break;
1639 default:
1640 break;
1641 } 1687 }
1642 1688
1643 /* Set PCS register for forced speed */ 1689 /* Set PCS register for forced speed */
@@ -1723,8 +1769,8 @@ static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
1723} 1769}
1724 1770
1725static int igb_clean_test_rings(struct igb_ring *rx_ring, 1771static int igb_clean_test_rings(struct igb_ring *rx_ring,
1726 struct igb_ring *tx_ring, 1772 struct igb_ring *tx_ring,
1727 unsigned int size) 1773 unsigned int size)
1728{ 1774{
1729 union e1000_adv_rx_desc *rx_desc; 1775 union e1000_adv_rx_desc *rx_desc;
1730 struct igb_rx_buffer *rx_buffer_info; 1776 struct igb_rx_buffer *rx_buffer_info;
@@ -1737,7 +1783,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1737 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); 1783 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1738 1784
1739 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { 1785 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
1740 /* check rx buffer */ 1786 /* check Rx buffer */
1741 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; 1787 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1742 1788
1743 /* sync Rx buffer for CPU read */ 1789 /* sync Rx buffer for CPU read */
@@ -1756,11 +1802,11 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1756 IGB_RX_BUFSZ, 1802 IGB_RX_BUFSZ,
1757 DMA_FROM_DEVICE); 1803 DMA_FROM_DEVICE);
1758 1804
1759 /* unmap buffer on tx side */ 1805 /* unmap buffer on Tx side */
1760 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; 1806 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1761 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1807 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1762 1808
1763 /* increment rx/tx next to clean counters */ 1809 /* increment Rx/Tx next to clean counters */
1764 rx_ntc++; 1810 rx_ntc++;
1765 if (rx_ntc == rx_ring->count) 1811 if (rx_ntc == rx_ring->count)
1766 rx_ntc = 0; 1812 rx_ntc = 0;
@@ -1801,8 +1847,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
1801 igb_create_lbtest_frame(skb, size); 1847 igb_create_lbtest_frame(skb, size);
1802 skb_put(skb, size); 1848 skb_put(skb, size);
1803 1849
1804 /* 1850 /* Calculate the loop count based on the largest descriptor ring
1805 * Calculate the loop count based on the largest descriptor ring
1806 * The idea is to wrap the largest ring a number of times using 64 1851 * The idea is to wrap the largest ring a number of times using 64
1807 * send/receive pairs during each loop 1852 * send/receive pairs during each loop
1808 */ 1853 */
@@ -1829,7 +1874,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
1829 break; 1874 break;
1830 } 1875 }
1831 1876
1832 /* allow 200 milliseconds for packets to go from tx to rx */ 1877 /* allow 200 milliseconds for packets to go from Tx to Rx */
1833 msleep(200); 1878 msleep(200);
1834 1879
1835 good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); 1880 good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
@@ -1848,13 +1893,21 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
1848static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) 1893static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
1849{ 1894{
1850 /* PHY loopback cannot be performed if SoL/IDER 1895 /* PHY loopback cannot be performed if SoL/IDER
1851 * sessions are active */ 1896 * sessions are active
1897 */
1852 if (igb_check_reset_block(&adapter->hw)) { 1898 if (igb_check_reset_block(&adapter->hw)) {
1853 dev_err(&adapter->pdev->dev, 1899 dev_err(&adapter->pdev->dev,
1854 "Cannot do PHY loopback test when SoL/IDER is active.\n"); 1900 "Cannot do PHY loopback test when SoL/IDER is active.\n");
1855 *data = 0; 1901 *data = 0;
1856 goto out; 1902 goto out;
1857 } 1903 }
1904
1905 if (adapter->hw.mac.type == e1000_i354) {
1906 dev_info(&adapter->pdev->dev,
1907 "Loopback test not supported on i354.\n");
1908 *data = 0;
1909 goto out;
1910 }
1858 *data = igb_setup_desc_rings(adapter); 1911 *data = igb_setup_desc_rings(adapter);
1859 if (*data) 1912 if (*data)
1860 goto out; 1913 goto out;
@@ -1879,7 +1932,8 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1879 hw->mac.serdes_has_link = false; 1932 hw->mac.serdes_has_link = false;
1880 1933
1881 /* On some blade server designs, link establishment 1934 /* On some blade server designs, link establishment
1882 * could take as long as 2-3 minutes */ 1935 * could take as long as 2-3 minutes
1936 */
1883 do { 1937 do {
1884 hw->mac.ops.check_for_link(&adapter->hw); 1938 hw->mac.ops.check_for_link(&adapter->hw);
1885 if (hw->mac.serdes_has_link) 1939 if (hw->mac.serdes_has_link)
@@ -1922,7 +1976,8 @@ static void igb_diag_test(struct net_device *netdev,
1922 igb_power_up_link(adapter); 1976 igb_power_up_link(adapter);
1923 1977
1924 /* Link test performed before hardware reset so autoneg doesn't 1978 /* Link test performed before hardware reset so autoneg doesn't
1925 * interfere with test result */ 1979 * interfere with test result
1980 */
1926 if (igb_link_test(adapter, &data[4])) 1981 if (igb_link_test(adapter, &data[4]))
1927 eth_test->flags |= ETH_TEST_FL_FAILED; 1982 eth_test->flags |= ETH_TEST_FL_FAILED;
1928 1983
@@ -1987,8 +2042,8 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1987 struct igb_adapter *adapter = netdev_priv(netdev); 2042 struct igb_adapter *adapter = netdev_priv(netdev);
1988 2043
1989 wol->supported = WAKE_UCAST | WAKE_MCAST | 2044 wol->supported = WAKE_UCAST | WAKE_MCAST |
1990 WAKE_BCAST | WAKE_MAGIC | 2045 WAKE_BCAST | WAKE_MAGIC |
1991 WAKE_PHY; 2046 WAKE_PHY;
1992 wol->wolopts = 0; 2047 wol->wolopts = 0;
1993 2048
1994 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) 2049 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
@@ -2263,7 +2318,7 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2263 sprintf(p, "rx_queue_%u_alloc_failed", i); 2318 sprintf(p, "rx_queue_%u_alloc_failed", i);
2264 p += ETH_GSTRING_LEN; 2319 p += ETH_GSTRING_LEN;
2265 } 2320 }
2266/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ 2321 /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
2267 break; 2322 break;
2268 } 2323 }
2269} 2324}
@@ -2283,6 +2338,7 @@ static int igb_get_ts_info(struct net_device *dev,
2283 case e1000_82576: 2338 case e1000_82576:
2284 case e1000_82580: 2339 case e1000_82580:
2285 case e1000_i350: 2340 case e1000_i350:
2341 case e1000_i354:
2286 case e1000_i210: 2342 case e1000_i210:
2287 case e1000_i211: 2343 case e1000_i211:
2288 info->so_timestamping = 2344 info->so_timestamping =
@@ -2362,7 +2418,7 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
2362} 2418}
2363 2419
2364static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 2420static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2365 u32 *rule_locs) 2421 u32 *rule_locs)
2366{ 2422{
2367 struct igb_adapter *adapter = netdev_priv(dev); 2423 struct igb_adapter *adapter = netdev_priv(dev);
2368 int ret = -EOPNOTSUPP; 2424 int ret = -EOPNOTSUPP;
@@ -2506,7 +2562,8 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2506{ 2562{
2507 struct igb_adapter *adapter = netdev_priv(netdev); 2563 struct igb_adapter *adapter = netdev_priv(netdev);
2508 struct e1000_hw *hw = &adapter->hw; 2564 struct e1000_hw *hw = &adapter->hw;
2509 u32 ipcnfg, eeer; 2565 u32 ipcnfg, eeer, ret_val;
2566 u16 phy_data;
2510 2567
2511 if ((hw->mac.type < e1000_i350) || 2568 if ((hw->mac.type < e1000_i350) ||
2512 (hw->phy.media_type != e1000_media_type_copper)) 2569 (hw->phy.media_type != e1000_media_type_copper))
@@ -2525,6 +2582,32 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2525 if (ipcnfg & E1000_IPCNFG_EEE_100M_AN) 2582 if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
2526 edata->advertised |= ADVERTISED_100baseT_Full; 2583 edata->advertised |= ADVERTISED_100baseT_Full;
2527 2584
2585 /* EEE Link Partner Advertised */
2586 switch (hw->mac.type) {
2587 case e1000_i350:
2588 ret_val = igb_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350,
2589 &phy_data);
2590 if (ret_val)
2591 return -ENODATA;
2592
2593 edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
2594
2595 break;
2596 case e1000_i210:
2597 case e1000_i211:
2598 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
2599 E1000_EEE_LP_ADV_DEV_I210,
2600 &phy_data);
2601 if (ret_val)
2602 return -ENODATA;
2603
2604 edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
2605
2606 break;
2607 default:
2608 break;
2609 }
2610
2528 if (eeer & E1000_EEER_EEE_NEG) 2611 if (eeer & E1000_EEER_EEE_NEG)
2529 edata->eee_active = true; 2612 edata->eee_active = true;
2530 2613
@@ -2600,6 +2683,85 @@ static int igb_set_eee(struct net_device *netdev,
2600 return 0; 2683 return 0;
2601} 2684}
2602 2685
2686static int igb_get_module_info(struct net_device *netdev,
2687 struct ethtool_modinfo *modinfo)
2688{
2689 struct igb_adapter *adapter = netdev_priv(netdev);
2690 struct e1000_hw *hw = &adapter->hw;
2691 u32 status = E1000_SUCCESS;
2692 u16 sff8472_rev, addr_mode;
2693 bool page_swap = false;
2694
2695 if ((hw->phy.media_type == e1000_media_type_copper) ||
2696 (hw->phy.media_type == e1000_media_type_unknown))
2697 return -EOPNOTSUPP;
2698
2699 /* Check whether we support SFF-8472 or not */
2700 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
2701 if (status != E1000_SUCCESS)
2702 return -EIO;
2703
2704 /* addressing mode is not supported */
2705 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
2706 if (status != E1000_SUCCESS)
2707 return -EIO;
2708
2709 /* addressing mode is not supported */
2710 if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) {
2711 hw_dbg("Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
2712 page_swap = true;
2713 }
2714
2715 if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) {
2716 /* We have an SFP, but it does not support SFF-8472 */
2717 modinfo->type = ETH_MODULE_SFF_8079;
2718 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
2719 } else {
2720 /* We have an SFP which supports a revision of SFF-8472 */
2721 modinfo->type = ETH_MODULE_SFF_8472;
2722 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2723 }
2724
2725 return 0;
2726}
2727
2728static int igb_get_module_eeprom(struct net_device *netdev,
2729 struct ethtool_eeprom *ee, u8 *data)
2730{
2731 struct igb_adapter *adapter = netdev_priv(netdev);
2732 struct e1000_hw *hw = &adapter->hw;
2733 u32 status = E1000_SUCCESS;
2734 u16 *dataword;
2735 u16 first_word, last_word;
2736 int i = 0;
2737
2738 if (ee->len == 0)
2739 return -EINVAL;
2740
2741 first_word = ee->offset >> 1;
2742 last_word = (ee->offset + ee->len - 1) >> 1;
2743
2744 dataword = kmalloc(sizeof(u16) * (last_word - first_word + 1),
2745 GFP_KERNEL);
2746 if (!dataword)
2747 return -ENOMEM;
2748
2749 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
2750 for (i = 0; i < last_word - first_word + 1; i++) {
2751 status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
2752 if (status != E1000_SUCCESS)
2753 /* Error occurred while reading module */
2754 return -EIO;
2755
2756 be16_to_cpus(&dataword[i]);
2757 }
2758
2759 memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len);
2760 kfree(dataword);
2761
2762 return 0;
2763}
2764
2603static int igb_ethtool_begin(struct net_device *netdev) 2765static int igb_ethtool_begin(struct net_device *netdev)
2604{ 2766{
2605 struct igb_adapter *adapter = netdev_priv(netdev); 2767 struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2614,36 +2776,38 @@ static void igb_ethtool_complete(struct net_device *netdev)
2614} 2776}
2615 2777
2616static const struct ethtool_ops igb_ethtool_ops = { 2778static const struct ethtool_ops igb_ethtool_ops = {
2617 .get_settings = igb_get_settings, 2779 .get_settings = igb_get_settings,
2618 .set_settings = igb_set_settings, 2780 .set_settings = igb_set_settings,
2619 .get_drvinfo = igb_get_drvinfo, 2781 .get_drvinfo = igb_get_drvinfo,
2620 .get_regs_len = igb_get_regs_len, 2782 .get_regs_len = igb_get_regs_len,
2621 .get_regs = igb_get_regs, 2783 .get_regs = igb_get_regs,
2622 .get_wol = igb_get_wol, 2784 .get_wol = igb_get_wol,
2623 .set_wol = igb_set_wol, 2785 .set_wol = igb_set_wol,
2624 .get_msglevel = igb_get_msglevel, 2786 .get_msglevel = igb_get_msglevel,
2625 .set_msglevel = igb_set_msglevel, 2787 .set_msglevel = igb_set_msglevel,
2626 .nway_reset = igb_nway_reset, 2788 .nway_reset = igb_nway_reset,
2627 .get_link = igb_get_link, 2789 .get_link = igb_get_link,
2628 .get_eeprom_len = igb_get_eeprom_len, 2790 .get_eeprom_len = igb_get_eeprom_len,
2629 .get_eeprom = igb_get_eeprom, 2791 .get_eeprom = igb_get_eeprom,
2630 .set_eeprom = igb_set_eeprom, 2792 .set_eeprom = igb_set_eeprom,
2631 .get_ringparam = igb_get_ringparam, 2793 .get_ringparam = igb_get_ringparam,
2632 .set_ringparam = igb_set_ringparam, 2794 .set_ringparam = igb_set_ringparam,
2633 .get_pauseparam = igb_get_pauseparam, 2795 .get_pauseparam = igb_get_pauseparam,
2634 .set_pauseparam = igb_set_pauseparam, 2796 .set_pauseparam = igb_set_pauseparam,
2635 .self_test = igb_diag_test, 2797 .self_test = igb_diag_test,
2636 .get_strings = igb_get_strings, 2798 .get_strings = igb_get_strings,
2637 .set_phys_id = igb_set_phys_id, 2799 .set_phys_id = igb_set_phys_id,
2638 .get_sset_count = igb_get_sset_count, 2800 .get_sset_count = igb_get_sset_count,
2639 .get_ethtool_stats = igb_get_ethtool_stats, 2801 .get_ethtool_stats = igb_get_ethtool_stats,
2640 .get_coalesce = igb_get_coalesce, 2802 .get_coalesce = igb_get_coalesce,
2641 .set_coalesce = igb_set_coalesce, 2803 .set_coalesce = igb_set_coalesce,
2642 .get_ts_info = igb_get_ts_info, 2804 .get_ts_info = igb_get_ts_info,
2643 .get_rxnfc = igb_get_rxnfc, 2805 .get_rxnfc = igb_get_rxnfc,
2644 .set_rxnfc = igb_set_rxnfc, 2806 .set_rxnfc = igb_set_rxnfc,
2645 .get_eee = igb_get_eee, 2807 .get_eee = igb_get_eee,
2646 .set_eee = igb_set_eee, 2808 .set_eee = igb_set_eee,
2809 .get_module_info = igb_get_module_info,
2810 .get_module_eeprom = igb_get_module_eeprom,
2647 .begin = igb_ethtool_begin, 2811 .begin = igb_ethtool_begin,
2648 .complete = igb_ethtool_complete, 2812 .complete = igb_ethtool_complete,
2649}; 2813};
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 0478a1abe541..58f1ce967aeb 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -45,21 +45,21 @@ static struct i2c_board_info i350_sensor_info = {
45 45
46/* hwmon callback functions */ 46/* hwmon callback functions */
47static ssize_t igb_hwmon_show_location(struct device *dev, 47static ssize_t igb_hwmon_show_location(struct device *dev,
48 struct device_attribute *attr, 48 struct device_attribute *attr,
49 char *buf) 49 char *buf)
50{ 50{
51 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, 51 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
52 dev_attr); 52 dev_attr);
53 return sprintf(buf, "loc%u\n", 53 return sprintf(buf, "loc%u\n",
54 igb_attr->sensor->location); 54 igb_attr->sensor->location);
55} 55}
56 56
57static ssize_t igb_hwmon_show_temp(struct device *dev, 57static ssize_t igb_hwmon_show_temp(struct device *dev,
58 struct device_attribute *attr, 58 struct device_attribute *attr,
59 char *buf) 59 char *buf)
60{ 60{
61 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, 61 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
62 dev_attr); 62 dev_attr);
63 unsigned int value; 63 unsigned int value;
64 64
65 /* reset the temp field */ 65 /* reset the temp field */
@@ -74,11 +74,11 @@ static ssize_t igb_hwmon_show_temp(struct device *dev,
74} 74}
75 75
76static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, 76static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
77 struct device_attribute *attr, 77 struct device_attribute *attr,
78 char *buf) 78 char *buf)
79{ 79{
80 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, 80 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
81 dev_attr); 81 dev_attr);
82 unsigned int value = igb_attr->sensor->caution_thresh; 82 unsigned int value = igb_attr->sensor->caution_thresh;
83 83
84 /* display millidegree */ 84 /* display millidegree */
@@ -88,11 +88,11 @@ static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
88} 88}
89 89
90static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, 90static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
91 struct device_attribute *attr, 91 struct device_attribute *attr,
92 char *buf) 92 char *buf)
93{ 93{
94 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, 94 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
95 dev_attr); 95 dev_attr);
96 unsigned int value = igb_attr->sensor->max_op_thresh; 96 unsigned int value = igb_attr->sensor->max_op_thresh;
97 97
98 /* display millidegree */ 98 /* display millidegree */
@@ -111,7 +111,8 @@ static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
111 * the data structures we need to get the data to display. 111 * the data structures we need to get the data to display.
112 */ 112 */
113static int igb_add_hwmon_attr(struct igb_adapter *adapter, 113static int igb_add_hwmon_attr(struct igb_adapter *adapter,
114 unsigned int offset, int type) { 114 unsigned int offset, int type)
115{
115 int rc; 116 int rc;
116 unsigned int n_attr; 117 unsigned int n_attr;
117 struct hwmon_attr *igb_attr; 118 struct hwmon_attr *igb_attr;
@@ -217,7 +218,7 @@ int igb_sysfs_init(struct igb_adapter *adapter)
217 */ 218 */
218 n_attrs = E1000_MAX_SENSORS * 4; 219 n_attrs = E1000_MAX_SENSORS * 4;
219 igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), 220 igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
220 GFP_KERNEL); 221 GFP_KERNEL);
221 if (!igb_hwmon->hwmon_list) { 222 if (!igb_hwmon->hwmon_list) {
222 rc = -ENOMEM; 223 rc = -ENOMEM;
223 goto err; 224 goto err;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 64f75291e3a5..64cbe0dfe043 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -60,9 +60,9 @@
60#include <linux/i2c.h> 60#include <linux/i2c.h>
61#include "igb.h" 61#include "igb.h"
62 62
63#define MAJ 4 63#define MAJ 5
64#define MIN 1 64#define MIN 0
65#define BUILD 2 65#define BUILD 3
66#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 66#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
67__stringify(BUILD) "-k" 67__stringify(BUILD) "-k"
68char igb_driver_name[] = "igb"; 68char igb_driver_name[] = "igb";
@@ -77,6 +77,9 @@ static const struct e1000_info *igb_info_tbl[] = {
77}; 77};
78 78
79static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { 79static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 }, 83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 }, 84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, 85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
@@ -156,8 +159,8 @@ static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
156static void igb_tx_timeout(struct net_device *); 159static void igb_tx_timeout(struct net_device *);
157static void igb_reset_task(struct work_struct *); 160static void igb_reset_task(struct work_struct *);
158static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features); 161static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
159static int igb_vlan_rx_add_vid(struct net_device *, u16); 162static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
160static int igb_vlan_rx_kill_vid(struct net_device *, u16); 163static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
161static void igb_restore_vlan(struct igb_adapter *); 164static void igb_restore_vlan(struct igb_adapter *);
162static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); 165static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
163static void igb_ping_all_vfs(struct igb_adapter *); 166static void igb_ping_all_vfs(struct igb_adapter *);
@@ -169,13 +172,14 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
169static int igb_ndo_set_vf_vlan(struct net_device *netdev, 172static int igb_ndo_set_vf_vlan(struct net_device *netdev,
170 int vf, u16 vlan, u8 qos); 173 int vf, u16 vlan, u8 qos);
171static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); 174static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
175static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
176 bool setting);
172static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, 177static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
173 struct ifla_vf_info *ivi); 178 struct ifla_vf_info *ivi);
174static void igb_check_vf_rate_limit(struct igb_adapter *); 179static void igb_check_vf_rate_limit(struct igb_adapter *);
175 180
176#ifdef CONFIG_PCI_IOV 181#ifdef CONFIG_PCI_IOV
177static int igb_vf_configure(struct igb_adapter *adapter, int vf); 182static int igb_vf_configure(struct igb_adapter *adapter, int vf);
178static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
179#endif 183#endif
180 184
181#ifdef CONFIG_PM 185#ifdef CONFIG_PM
@@ -292,9 +296,7 @@ static const struct igb_reg_info igb_reg_info_tbl[] = {
292 {} 296 {}
293}; 297};
294 298
295/* 299/* igb_regdump - register printout routine */
296 * igb_regdump - register printout routine
297 */
298static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) 300static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
299{ 301{
300 int n = 0; 302 int n = 0;
@@ -360,9 +362,7 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
360 regs[2], regs[3]); 362 regs[2], regs[3]);
361} 363}
362 364
363/* 365/* igb_dump - Print registers, Tx-rings and Rx-rings */
364 * igb_dump - Print registers, tx-rings and rx-rings
365 */
366static void igb_dump(struct igb_adapter *adapter) 366static void igb_dump(struct igb_adapter *adapter)
367{ 367{
368 struct net_device *netdev = adapter->netdev; 368 struct net_device *netdev = adapter->netdev;
@@ -569,12 +569,13 @@ exit:
569 return; 569 return;
570} 570}
571 571
572/* igb_get_i2c_data - Reads the I2C SDA data bit 572/**
573 * igb_get_i2c_data - Reads the I2C SDA data bit
573 * @hw: pointer to hardware structure 574 * @hw: pointer to hardware structure
574 * @i2cctl: Current value of I2CCTL register 575 * @i2cctl: Current value of I2CCTL register
575 * 576 *
576 * Returns the I2C data bit value 577 * Returns the I2C data bit value
577 */ 578 **/
578static int igb_get_i2c_data(void *data) 579static int igb_get_i2c_data(void *data)
579{ 580{
580 struct igb_adapter *adapter = (struct igb_adapter *)data; 581 struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -584,12 +585,13 @@ static int igb_get_i2c_data(void *data)
584 return ((i2cctl & E1000_I2C_DATA_IN) != 0); 585 return ((i2cctl & E1000_I2C_DATA_IN) != 0);
585} 586}
586 587
587/* igb_set_i2c_data - Sets the I2C data bit 588/**
589 * igb_set_i2c_data - Sets the I2C data bit
588 * @data: pointer to hardware structure 590 * @data: pointer to hardware structure
589 * @state: I2C data value (0 or 1) to set 591 * @state: I2C data value (0 or 1) to set
590 * 592 *
591 * Sets the I2C data bit 593 * Sets the I2C data bit
592 */ 594 **/
593static void igb_set_i2c_data(void *data, int state) 595static void igb_set_i2c_data(void *data, int state)
594{ 596{
595 struct igb_adapter *adapter = (struct igb_adapter *)data; 597 struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -608,12 +610,13 @@ static void igb_set_i2c_data(void *data, int state)
608 610
609} 611}
610 612
611/* igb_set_i2c_clk - Sets the I2C SCL clock 613/**
614 * igb_set_i2c_clk - Sets the I2C SCL clock
612 * @data: pointer to hardware structure 615 * @data: pointer to hardware structure
613 * @state: state to set clock 616 * @state: state to set clock
614 * 617 *
615 * Sets the I2C clock line to state 618 * Sets the I2C clock line to state
616 */ 619 **/
617static void igb_set_i2c_clk(void *data, int state) 620static void igb_set_i2c_clk(void *data, int state)
618{ 621{
619 struct igb_adapter *adapter = (struct igb_adapter *)data; 622 struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -631,11 +634,12 @@ static void igb_set_i2c_clk(void *data, int state)
631 wrfl(); 634 wrfl();
632} 635}
633 636
634/* igb_get_i2c_clk - Gets the I2C SCL clock state 637/**
638 * igb_get_i2c_clk - Gets the I2C SCL clock state
635 * @data: pointer to hardware structure 639 * @data: pointer to hardware structure
636 * 640 *
637 * Gets the I2C clock state 641 * Gets the I2C clock state
638 */ 642 **/
639static int igb_get_i2c_clk(void *data) 643static int igb_get_i2c_clk(void *data)
640{ 644{
641 struct igb_adapter *adapter = (struct igb_adapter *)data; 645 struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -655,8 +659,10 @@ static const struct i2c_algo_bit_data igb_i2c_algo = {
655}; 659};
656 660
657/** 661/**
658 * igb_get_hw_dev - return device 662 * igb_get_hw_dev - return device
659 * used by hardware layer to print debugging information 663 * @hw: pointer to hardware structure
664 *
665 * used by hardware layer to print debugging information
660 **/ 666 **/
661struct net_device *igb_get_hw_dev(struct e1000_hw *hw) 667struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
662{ 668{
@@ -665,10 +671,10 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
665} 671}
666 672
667/** 673/**
668 * igb_init_module - Driver Registration Routine 674 * igb_init_module - Driver Registration Routine
669 * 675 *
670 * igb_init_module is the first routine called when the driver is 676 * igb_init_module is the first routine called when the driver is
671 * loaded. All it does is register with the PCI subsystem. 677 * loaded. All it does is register with the PCI subsystem.
672 **/ 678 **/
673static int __init igb_init_module(void) 679static int __init igb_init_module(void)
674{ 680{
@@ -688,10 +694,10 @@ static int __init igb_init_module(void)
688module_init(igb_init_module); 694module_init(igb_init_module);
689 695
690/** 696/**
691 * igb_exit_module - Driver Exit Cleanup Routine 697 * igb_exit_module - Driver Exit Cleanup Routine
692 * 698 *
693 * igb_exit_module is called just before the driver is removed 699 * igb_exit_module is called just before the driver is removed
694 * from memory. 700 * from memory.
695 **/ 701 **/
696static void __exit igb_exit_module(void) 702static void __exit igb_exit_module(void)
697{ 703{
@@ -705,11 +711,11 @@ module_exit(igb_exit_module);
705 711
706#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) 712#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
707/** 713/**
708 * igb_cache_ring_register - Descriptor ring to register mapping 714 * igb_cache_ring_register - Descriptor ring to register mapping
709 * @adapter: board private structure to initialize 715 * @adapter: board private structure to initialize
710 * 716 *
711 * Once we know the feature-set enabled for the device, we'll cache 717 * Once we know the feature-set enabled for the device, we'll cache
712 * the register offset the descriptor ring is assigned to. 718 * the register offset the descriptor ring is assigned to.
713 **/ 719 **/
714static void igb_cache_ring_register(struct igb_adapter *adapter) 720static void igb_cache_ring_register(struct igb_adapter *adapter)
715{ 721{
@@ -726,11 +732,12 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
726 if (adapter->vfs_allocated_count) { 732 if (adapter->vfs_allocated_count) {
727 for (; i < adapter->rss_queues; i++) 733 for (; i < adapter->rss_queues; i++)
728 adapter->rx_ring[i]->reg_idx = rbase_offset + 734 adapter->rx_ring[i]->reg_idx = rbase_offset +
729 Q_IDX_82576(i); 735 Q_IDX_82576(i);
730 } 736 }
731 case e1000_82575: 737 case e1000_82575:
732 case e1000_82580: 738 case e1000_82580:
733 case e1000_i350: 739 case e1000_i350:
740 case e1000_i354:
734 case e1000_i210: 741 case e1000_i210:
735 case e1000_i211: 742 case e1000_i211:
736 default: 743 default:
@@ -785,9 +792,10 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
785 switch (hw->mac.type) { 792 switch (hw->mac.type) {
786 case e1000_82575: 793 case e1000_82575:
787 /* The 82575 assigns vectors using a bitmask, which matches the 794 /* The 82575 assigns vectors using a bitmask, which matches the
788 bitmask for the EICR/EIMS/EIMC registers. To assign one 795 * bitmask for the EICR/EIMS/EIMC registers. To assign one
789 or more queues to a vector, we write the appropriate bits 796 * or more queues to a vector, we write the appropriate bits
790 into the MSIXBM register for that vector. */ 797 * into the MSIXBM register for that vector.
798 */
791 if (rx_queue > IGB_N0_QUEUE) 799 if (rx_queue > IGB_N0_QUEUE)
792 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 800 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
793 if (tx_queue > IGB_N0_QUEUE) 801 if (tx_queue > IGB_N0_QUEUE)
@@ -798,8 +806,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
798 q_vector->eims_value = msixbm; 806 q_vector->eims_value = msixbm;
799 break; 807 break;
800 case e1000_82576: 808 case e1000_82576:
801 /* 809 /* 82576 uses a table that essentially consists of 2 columns
802 * 82576 uses a table that essentially consists of 2 columns
803 * with 8 rows. The ordering is column-major so we use the 810 * with 8 rows. The ordering is column-major so we use the
804 * lower 3 bits as the row index, and the 4th bit as the 811 * lower 3 bits as the row index, and the 4th bit as the
805 * column offset. 812 * column offset.
@@ -816,10 +823,10 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
816 break; 823 break;
817 case e1000_82580: 824 case e1000_82580:
818 case e1000_i350: 825 case e1000_i350:
826 case e1000_i354:
819 case e1000_i210: 827 case e1000_i210:
820 case e1000_i211: 828 case e1000_i211:
821 /* 829 /* On 82580 and newer adapters the scheme is similar to 82576
822 * On 82580 and newer adapters the scheme is similar to 82576
823 * however instead of ordering column-major we have things 830 * however instead of ordering column-major we have things
824 * ordered row-major. So we traverse the table by using 831 * ordered row-major. So we traverse the table by using
825 * bit 0 as the column offset, and the remaining bits as the 832 * bit 0 as the column offset, and the remaining bits as the
@@ -848,10 +855,11 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
848} 855}
849 856
850/** 857/**
851 * igb_configure_msix - Configure MSI-X hardware 858 * igb_configure_msix - Configure MSI-X hardware
859 * @adapter: board private structure to initialize
852 * 860 *
853 * igb_configure_msix sets up the hardware to properly 861 * igb_configure_msix sets up the hardware to properly
854 * generate MSI-X interrupts. 862 * generate MSI-X interrupts.
855 **/ 863 **/
856static void igb_configure_msix(struct igb_adapter *adapter) 864static void igb_configure_msix(struct igb_adapter *adapter)
857{ 865{
@@ -875,8 +883,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
875 wr32(E1000_CTRL_EXT, tmp); 883 wr32(E1000_CTRL_EXT, tmp);
876 884
877 /* enable msix_other interrupt */ 885 /* enable msix_other interrupt */
878 array_wr32(E1000_MSIXBM(0), vector++, 886 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
879 E1000_EIMS_OTHER);
880 adapter->eims_other = E1000_EIMS_OTHER; 887 adapter->eims_other = E1000_EIMS_OTHER;
881 888
882 break; 889 break;
@@ -884,13 +891,15 @@ static void igb_configure_msix(struct igb_adapter *adapter)
884 case e1000_82576: 891 case e1000_82576:
885 case e1000_82580: 892 case e1000_82580:
886 case e1000_i350: 893 case e1000_i350:
894 case e1000_i354:
887 case e1000_i210: 895 case e1000_i210:
888 case e1000_i211: 896 case e1000_i211:
889 /* Turn on MSI-X capability first, or our settings 897 /* Turn on MSI-X capability first, or our settings
890 * won't stick. And it will take days to debug. */ 898 * won't stick. And it will take days to debug.
899 */
891 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | 900 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
892 E1000_GPIE_PBA | E1000_GPIE_EIAME | 901 E1000_GPIE_PBA | E1000_GPIE_EIAME |
893 E1000_GPIE_NSICR); 902 E1000_GPIE_NSICR);
894 903
895 /* enable msix_other interrupt */ 904 /* enable msix_other interrupt */
896 adapter->eims_other = 1 << vector; 905 adapter->eims_other = 1 << vector;
@@ -912,10 +921,11 @@ static void igb_configure_msix(struct igb_adapter *adapter)
912} 921}
913 922
914/** 923/**
915 * igb_request_msix - Initialize MSI-X interrupts 924 * igb_request_msix - Initialize MSI-X interrupts
925 * @adapter: board private structure to initialize
916 * 926 *
917 * igb_request_msix allocates MSI-X vectors and requests interrupts from the 927 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
918 * kernel. 928 * kernel.
919 **/ 929 **/
920static int igb_request_msix(struct igb_adapter *adapter) 930static int igb_request_msix(struct igb_adapter *adapter)
921{ 931{
@@ -924,7 +934,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
924 int i, err = 0, vector = 0, free_vector = 0; 934 int i, err = 0, vector = 0, free_vector = 0;
925 935
926 err = request_irq(adapter->msix_entries[vector].vector, 936 err = request_irq(adapter->msix_entries[vector].vector,
927 igb_msix_other, 0, netdev->name, adapter); 937 igb_msix_other, 0, netdev->name, adapter);
928 if (err) 938 if (err)
929 goto err_out; 939 goto err_out;
930 940
@@ -948,8 +958,8 @@ static int igb_request_msix(struct igb_adapter *adapter)
948 sprintf(q_vector->name, "%s-unused", netdev->name); 958 sprintf(q_vector->name, "%s-unused", netdev->name);
949 959
950 err = request_irq(adapter->msix_entries[vector].vector, 960 err = request_irq(adapter->msix_entries[vector].vector,
951 igb_msix_ring, 0, q_vector->name, 961 igb_msix_ring, 0, q_vector->name,
952 q_vector); 962 q_vector);
953 if (err) 963 if (err)
954 goto err_free; 964 goto err_free;
955 } 965 }
@@ -982,13 +992,13 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
982} 992}
983 993
984/** 994/**
985 * igb_free_q_vector - Free memory allocated for specific interrupt vector 995 * igb_free_q_vector - Free memory allocated for specific interrupt vector
986 * @adapter: board private structure to initialize 996 * @adapter: board private structure to initialize
987 * @v_idx: Index of vector to be freed 997 * @v_idx: Index of vector to be freed
988 * 998 *
989 * This function frees the memory allocated to the q_vector. In addition if 999 * This function frees the memory allocated to the q_vector. In addition if
990 * NAPI is enabled it will delete any references to the NAPI struct prior 1000 * NAPI is enabled it will delete any references to the NAPI struct prior
991 * to freeing the q_vector. 1001 * to freeing the q_vector.
992 **/ 1002 **/
993static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) 1003static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
994{ 1004{
@@ -1003,20 +1013,19 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1003 adapter->q_vector[v_idx] = NULL; 1013 adapter->q_vector[v_idx] = NULL;
1004 netif_napi_del(&q_vector->napi); 1014 netif_napi_del(&q_vector->napi);
1005 1015
1006 /* 1016 /* ixgbe_get_stats64() might access the rings on this vector,
1007 * ixgbe_get_stats64() might access the rings on this vector,
1008 * we must wait a grace period before freeing it. 1017 * we must wait a grace period before freeing it.
1009 */ 1018 */
1010 kfree_rcu(q_vector, rcu); 1019 kfree_rcu(q_vector, rcu);
1011} 1020}
1012 1021
1013/** 1022/**
1014 * igb_free_q_vectors - Free memory allocated for interrupt vectors 1023 * igb_free_q_vectors - Free memory allocated for interrupt vectors
1015 * @adapter: board private structure to initialize 1024 * @adapter: board private structure to initialize
1016 * 1025 *
1017 * This function frees the memory allocated to the q_vectors. In addition if 1026 * This function frees the memory allocated to the q_vectors. In addition if
1018 * NAPI is enabled it will delete any references to the NAPI struct prior 1027 * NAPI is enabled it will delete any references to the NAPI struct prior
1019 * to freeing the q_vector. 1028 * to freeing the q_vector.
1020 **/ 1029 **/
1021static void igb_free_q_vectors(struct igb_adapter *adapter) 1030static void igb_free_q_vectors(struct igb_adapter *adapter)
1022{ 1031{
@@ -1031,10 +1040,11 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
1031} 1040}
1032 1041
1033/** 1042/**
1034 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts 1043 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1044 * @adapter: board private structure to initialize
1035 * 1045 *
1036 * This function resets the device so that it has 0 rx queues, tx queues, and 1046 * This function resets the device so that it has 0 Rx queues, Tx queues, and
1037 * MSI-X interrupts allocated. 1047 * MSI-X interrupts allocated.
1038 */ 1048 */
1039static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) 1049static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1040{ 1050{
@@ -1043,10 +1053,12 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1043} 1053}
1044 1054
1045/** 1055/**
1046 * igb_set_interrupt_capability - set MSI or MSI-X if supported 1056 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1057 * @adapter: board private structure to initialize
1058 * @msix: boolean value of MSIX capability
1047 * 1059 *
1048 * Attempt to configure interrupts using the best available 1060 * Attempt to configure interrupts using the best available
1049 * capabilities of the hardware and kernel. 1061 * capabilities of the hardware and kernel.
1050 **/ 1062 **/
1051static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) 1063static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1052{ 1064{
@@ -1063,10 +1075,10 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1063 else 1075 else
1064 adapter->num_tx_queues = adapter->rss_queues; 1076 adapter->num_tx_queues = adapter->rss_queues;
1065 1077
1066 /* start with one vector for every rx queue */ 1078 /* start with one vector for every Rx queue */
1067 numvecs = adapter->num_rx_queues; 1079 numvecs = adapter->num_rx_queues;
1068 1080
1069 /* if tx handler is separate add 1 for every tx queue */ 1081 /* if Tx handler is separate add 1 for every Tx queue */
1070 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) 1082 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1071 numvecs += adapter->num_tx_queues; 1083 numvecs += adapter->num_tx_queues;
1072 1084
@@ -1128,16 +1140,16 @@ static void igb_add_ring(struct igb_ring *ring,
1128} 1140}
1129 1141
1130/** 1142/**
1131 * igb_alloc_q_vector - Allocate memory for a single interrupt vector 1143 * igb_alloc_q_vector - Allocate memory for a single interrupt vector
1132 * @adapter: board private structure to initialize 1144 * @adapter: board private structure to initialize
1133 * @v_count: q_vectors allocated on adapter, used for ring interleaving 1145 * @v_count: q_vectors allocated on adapter, used for ring interleaving
1134 * @v_idx: index of vector in adapter struct 1146 * @v_idx: index of vector in adapter struct
1135 * @txr_count: total number of Tx rings to allocate 1147 * @txr_count: total number of Tx rings to allocate
1136 * @txr_idx: index of first Tx ring to allocate 1148 * @txr_idx: index of first Tx ring to allocate
1137 * @rxr_count: total number of Rx rings to allocate 1149 * @rxr_count: total number of Rx rings to allocate
1138 * @rxr_idx: index of first Rx ring to allocate 1150 * @rxr_idx: index of first Rx ring to allocate
1139 * 1151 *
1140 * We allocate one q_vector. If allocation fails we return -ENOMEM. 1152 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1141 **/ 1153 **/
1142static int igb_alloc_q_vector(struct igb_adapter *adapter, 1154static int igb_alloc_q_vector(struct igb_adapter *adapter,
1143 int v_count, int v_idx, 1155 int v_count, int v_idx,
@@ -1179,6 +1191,17 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
1179 /* initialize pointer to rings */ 1191 /* initialize pointer to rings */
1180 ring = q_vector->ring; 1192 ring = q_vector->ring;
1181 1193
1194 /* intialize ITR */
1195 if (rxr_count) {
1196 /* rx or rx/tx vector */
1197 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1198 q_vector->itr_val = adapter->rx_itr_setting;
1199 } else {
1200 /* tx only vector */
1201 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1202 q_vector->itr_val = adapter->tx_itr_setting;
1203 }
1204
1182 if (txr_count) { 1205 if (txr_count) {
1183 /* assign generic ring traits */ 1206 /* assign generic ring traits */
1184 ring->dev = &adapter->pdev->dev; 1207 ring->dev = &adapter->pdev->dev;
@@ -1221,9 +1244,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
1221 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); 1244 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1222 1245
1223 /* 1246 /*
1224 * On i350, i210, and i211, loopback VLAN packets 1247 * On i350, i354, i210, and i211, loopback VLAN packets
1225 * have the tag byte-swapped. 1248 * have the tag byte-swapped.
1226 * */ 1249 */
1227 if (adapter->hw.mac.type >= e1000_i350) 1250 if (adapter->hw.mac.type >= e1000_i350)
1228 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); 1251 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1229 1252
@@ -1240,11 +1263,11 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
1240 1263
1241 1264
1242/** 1265/**
1243 * igb_alloc_q_vectors - Allocate memory for interrupt vectors 1266 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1244 * @adapter: board private structure to initialize 1267 * @adapter: board private structure to initialize
1245 * 1268 *
1246 * We allocate one q_vector per queue interrupt. If allocation fails we 1269 * We allocate one q_vector per queue interrupt. If allocation fails we
1247 * return -ENOMEM. 1270 * return -ENOMEM.
1248 **/ 1271 **/
1249static int igb_alloc_q_vectors(struct igb_adapter *adapter) 1272static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1250{ 1273{
@@ -1298,9 +1321,11 @@ err_out:
1298} 1321}
1299 1322
1300/** 1323/**
1301 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors 1324 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1325 * @adapter: board private structure to initialize
1326 * @msix: boolean value of MSIX capability
1302 * 1327 *
1303 * This function initializes the interrupts and allocates all of the queues. 1328 * This function initializes the interrupts and allocates all of the queues.
1304 **/ 1329 **/
1305static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) 1330static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1306{ 1331{
@@ -1325,10 +1350,11 @@ err_alloc_q_vectors:
1325} 1350}
1326 1351
1327/** 1352/**
1328 * igb_request_irq - initialize interrupts 1353 * igb_request_irq - initialize interrupts
1354 * @adapter: board private structure to initialize
1329 * 1355 *
1330 * Attempts to configure interrupts using the best available 1356 * Attempts to configure interrupts using the best available
1331 * capabilities of the hardware and kernel. 1357 * capabilities of the hardware and kernel.
1332 **/ 1358 **/
1333static int igb_request_irq(struct igb_adapter *adapter) 1359static int igb_request_irq(struct igb_adapter *adapter)
1334{ 1360{
@@ -1394,15 +1420,14 @@ static void igb_free_irq(struct igb_adapter *adapter)
1394} 1420}
1395 1421
1396/** 1422/**
1397 * igb_irq_disable - Mask off interrupt generation on the NIC 1423 * igb_irq_disable - Mask off interrupt generation on the NIC
1398 * @adapter: board private structure 1424 * @adapter: board private structure
1399 **/ 1425 **/
1400static void igb_irq_disable(struct igb_adapter *adapter) 1426static void igb_irq_disable(struct igb_adapter *adapter)
1401{ 1427{
1402 struct e1000_hw *hw = &adapter->hw; 1428 struct e1000_hw *hw = &adapter->hw;
1403 1429
1404 /* 1430 /* we need to be careful when disabling interrupts. The VFs are also
1405 * we need to be careful when disabling interrupts. The VFs are also
1406 * mapped into these registers and so clearing the bits can cause 1431 * mapped into these registers and so clearing the bits can cause
1407 * issues on the VF drivers so we only need to clear what we set 1432 * issues on the VF drivers so we only need to clear what we set
1408 */ 1433 */
@@ -1427,8 +1452,8 @@ static void igb_irq_disable(struct igb_adapter *adapter)
1427} 1452}
1428 1453
1429/** 1454/**
1430 * igb_irq_enable - Enable default interrupt generation settings 1455 * igb_irq_enable - Enable default interrupt generation settings
1431 * @adapter: board private structure 1456 * @adapter: board private structure
1432 **/ 1457 **/
1433static void igb_irq_enable(struct igb_adapter *adapter) 1458static void igb_irq_enable(struct igb_adapter *adapter)
1434{ 1459{
@@ -1477,13 +1502,12 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)
1477} 1502}
1478 1503
1479/** 1504/**
1480 * igb_release_hw_control - release control of the h/w to f/w 1505 * igb_release_hw_control - release control of the h/w to f/w
1481 * @adapter: address of board private structure 1506 * @adapter: address of board private structure
1482 *
1483 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1484 * For ASF and Pass Through versions of f/w this means that the
1485 * driver is no longer loaded.
1486 * 1507 *
1508 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1509 * For ASF and Pass Through versions of f/w this means that the
1510 * driver is no longer loaded.
1487 **/ 1511 **/
1488static void igb_release_hw_control(struct igb_adapter *adapter) 1512static void igb_release_hw_control(struct igb_adapter *adapter)
1489{ 1513{
@@ -1497,13 +1521,12 @@ static void igb_release_hw_control(struct igb_adapter *adapter)
1497} 1521}
1498 1522
1499/** 1523/**
1500 * igb_get_hw_control - get control of the h/w from f/w 1524 * igb_get_hw_control - get control of the h/w from f/w
1501 * @adapter: address of board private structure 1525 * @adapter: address of board private structure
1502 *
1503 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1504 * For ASF and Pass Through versions of f/w this means that
1505 * the driver is loaded.
1506 * 1526 *
1527 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1528 * For ASF and Pass Through versions of f/w this means that
1529 * the driver is loaded.
1507 **/ 1530 **/
1508static void igb_get_hw_control(struct igb_adapter *adapter) 1531static void igb_get_hw_control(struct igb_adapter *adapter)
1509{ 1532{
@@ -1517,8 +1540,8 @@ static void igb_get_hw_control(struct igb_adapter *adapter)
1517} 1540}
1518 1541
1519/** 1542/**
1520 * igb_configure - configure the hardware for RX and TX 1543 * igb_configure - configure the hardware for RX and TX
1521 * @adapter: private board structure 1544 * @adapter: private board structure
1522 **/ 1545 **/
1523static void igb_configure(struct igb_adapter *adapter) 1546static void igb_configure(struct igb_adapter *adapter)
1524{ 1547{
@@ -1541,7 +1564,8 @@ static void igb_configure(struct igb_adapter *adapter)
1541 1564
1542 /* call igb_desc_unused which always leaves 1565 /* call igb_desc_unused which always leaves
1543 * at least 1 descriptor unused to make sure 1566 * at least 1 descriptor unused to make sure
1544 * next_to_use != next_to_clean */ 1567 * next_to_use != next_to_clean
1568 */
1545 for (i = 0; i < adapter->num_rx_queues; i++) { 1569 for (i = 0; i < adapter->num_rx_queues; i++) {
1546 struct igb_ring *ring = adapter->rx_ring[i]; 1570 struct igb_ring *ring = adapter->rx_ring[i];
1547 igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); 1571 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
@@ -1549,8 +1573,8 @@ static void igb_configure(struct igb_adapter *adapter)
1549} 1573}
1550 1574
1551/** 1575/**
1552 * igb_power_up_link - Power up the phy/serdes link 1576 * igb_power_up_link - Power up the phy/serdes link
1553 * @adapter: address of board private structure 1577 * @adapter: address of board private structure
1554 **/ 1578 **/
1555void igb_power_up_link(struct igb_adapter *adapter) 1579void igb_power_up_link(struct igb_adapter *adapter)
1556{ 1580{
@@ -1563,8 +1587,8 @@ void igb_power_up_link(struct igb_adapter *adapter)
1563} 1587}
1564 1588
1565/** 1589/**
1566 * igb_power_down_link - Power down the phy/serdes link 1590 * igb_power_down_link - Power down the phy/serdes link
1567 * @adapter: address of board private structure 1591 * @adapter: address of board private structure
1568 */ 1592 */
1569static void igb_power_down_link(struct igb_adapter *adapter) 1593static void igb_power_down_link(struct igb_adapter *adapter)
1570{ 1594{
@@ -1575,8 +1599,8 @@ static void igb_power_down_link(struct igb_adapter *adapter)
1575} 1599}
1576 1600
1577/** 1601/**
1578 * igb_up - Open the interface and prepare it to handle traffic 1602 * igb_up - Open the interface and prepare it to handle traffic
1579 * @adapter: board private structure 1603 * @adapter: board private structure
1580 **/ 1604 **/
1581int igb_up(struct igb_adapter *adapter) 1605int igb_up(struct igb_adapter *adapter)
1582{ 1606{
@@ -1624,7 +1648,8 @@ void igb_down(struct igb_adapter *adapter)
1624 int i; 1648 int i;
1625 1649
1626 /* signal that we're down so the interrupt handler does not 1650 /* signal that we're down so the interrupt handler does not
1627 * reschedule our watchdog timer */ 1651 * reschedule our watchdog timer
1652 */
1628 set_bit(__IGB_DOWN, &adapter->state); 1653 set_bit(__IGB_DOWN, &adapter->state);
1629 1654
1630 /* disable receives in the hardware */ 1655 /* disable receives in the hardware */
@@ -1694,6 +1719,7 @@ void igb_reset(struct igb_adapter *adapter)
1694 */ 1719 */
1695 switch (mac->type) { 1720 switch (mac->type) {
1696 case e1000_i350: 1721 case e1000_i350:
1722 case e1000_i354:
1697 case e1000_82580: 1723 case e1000_82580:
1698 pba = rd32(E1000_RXPBS); 1724 pba = rd32(E1000_RXPBS);
1699 pba = igb_rxpbs_adjust_82580(pba); 1725 pba = igb_rxpbs_adjust_82580(pba);
@@ -1720,14 +1746,16 @@ void igb_reset(struct igb_adapter *adapter)
1720 * rounded up to the next 1KB and expressed in KB. Likewise, 1746 * rounded up to the next 1KB and expressed in KB. Likewise,
1721 * the Rx FIFO should be large enough to accommodate at least 1747 * the Rx FIFO should be large enough to accommodate at least
1722 * one full receive packet and is similarly rounded up and 1748 * one full receive packet and is similarly rounded up and
1723 * expressed in KB. */ 1749 * expressed in KB.
1750 */
1724 pba = rd32(E1000_PBA); 1751 pba = rd32(E1000_PBA);
1725 /* upper 16 bits has Tx packet buffer allocation size in KB */ 1752 /* upper 16 bits has Tx packet buffer allocation size in KB */
1726 tx_space = pba >> 16; 1753 tx_space = pba >> 16;
1727 /* lower 16 bits has Rx packet buffer allocation size in KB */ 1754 /* lower 16 bits has Rx packet buffer allocation size in KB */
1728 pba &= 0xffff; 1755 pba &= 0xffff;
1729 /* the tx fifo also stores 16 bytes of information about the tx 1756 /* the Tx fifo also stores 16 bytes of information about the Tx
1730 * but don't include ethernet FCS because hardware appends it */ 1757 * but don't include ethernet FCS because hardware appends it
1758 */
1731 min_tx_space = (adapter->max_frame_size + 1759 min_tx_space = (adapter->max_frame_size +
1732 sizeof(union e1000_adv_tx_desc) - 1760 sizeof(union e1000_adv_tx_desc) -
1733 ETH_FCS_LEN) * 2; 1761 ETH_FCS_LEN) * 2;
@@ -1740,13 +1768,15 @@ void igb_reset(struct igb_adapter *adapter)
1740 1768
1741 /* If current Tx allocation is less than the min Tx FIFO size, 1769 /* If current Tx allocation is less than the min Tx FIFO size,
1742 * and the min Tx FIFO size is less than the current Rx FIFO 1770 * and the min Tx FIFO size is less than the current Rx FIFO
1743 * allocation, take space away from current Rx allocation */ 1771 * allocation, take space away from current Rx allocation
1772 */
1744 if (tx_space < min_tx_space && 1773 if (tx_space < min_tx_space &&
1745 ((min_tx_space - tx_space) < pba)) { 1774 ((min_tx_space - tx_space) < pba)) {
1746 pba = pba - (min_tx_space - tx_space); 1775 pba = pba - (min_tx_space - tx_space);
1747 1776
1748 /* if short on rx space, rx wins and must trump tx 1777 /* if short on Rx space, Rx wins and must trump Tx
1749 * adjustment */ 1778 * adjustment
1779 */
1750 if (pba < min_rx_space) 1780 if (pba < min_rx_space)
1751 pba = min_rx_space; 1781 pba = min_rx_space;
1752 } 1782 }
@@ -1758,7 +1788,8 @@ void igb_reset(struct igb_adapter *adapter)
1758 * (or the size used for early receive) above it in the Rx FIFO. 1788 * (or the size used for early receive) above it in the Rx FIFO.
1759 * Set it to the lower of: 1789 * Set it to the lower of:
1760 * - 90% of the Rx FIFO size, or 1790 * - 90% of the Rx FIFO size, or
1761 * - the full Rx FIFO size minus one full frame */ 1791 * - the full Rx FIFO size minus one full frame
1792 */
1762 hwm = min(((pba << 10) * 9 / 10), 1793 hwm = min(((pba << 10) * 9 / 10),
1763 ((pba << 10) - 2 * adapter->max_frame_size)); 1794 ((pba << 10) - 2 * adapter->max_frame_size));
1764 1795
@@ -1789,8 +1820,7 @@ void igb_reset(struct igb_adapter *adapter)
1789 if (hw->mac.ops.init_hw(hw)) 1820 if (hw->mac.ops.init_hw(hw))
1790 dev_err(&pdev->dev, "Hardware Error\n"); 1821 dev_err(&pdev->dev, "Hardware Error\n");
1791 1822
1792 /* 1823 /* Flow control settings reset on hardware reset, so guarantee flow
1793 * Flow control settings reset on hardware reset, so guarantee flow
1794 * control is off when forcing speed. 1824 * control is off when forcing speed.
1795 */ 1825 */
1796 if (!hw->mac.autoneg) 1826 if (!hw->mac.autoneg)
@@ -1826,14 +1856,13 @@ void igb_reset(struct igb_adapter *adapter)
1826static netdev_features_t igb_fix_features(struct net_device *netdev, 1856static netdev_features_t igb_fix_features(struct net_device *netdev,
1827 netdev_features_t features) 1857 netdev_features_t features)
1828{ 1858{
1829 /* 1859 /* Since there is no support for separate Rx/Tx vlan accel
1830 * Since there is no support for separate rx/tx vlan accel 1860 * enable/disable make sure Tx flag is always in same state as Rx.
1831 * enable/disable make sure tx flag is always in same state as rx.
1832 */ 1861 */
1833 if (features & NETIF_F_HW_VLAN_RX) 1862 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1834 features |= NETIF_F_HW_VLAN_TX; 1863 features |= NETIF_F_HW_VLAN_CTAG_TX;
1835 else 1864 else
1836 features &= ~NETIF_F_HW_VLAN_TX; 1865 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
1837 1866
1838 return features; 1867 return features;
1839} 1868}
@@ -1844,7 +1873,7 @@ static int igb_set_features(struct net_device *netdev,
1844 netdev_features_t changed = netdev->features ^ features; 1873 netdev_features_t changed = netdev->features ^ features;
1845 struct igb_adapter *adapter = netdev_priv(netdev); 1874 struct igb_adapter *adapter = netdev_priv(netdev);
1846 1875
1847 if (changed & NETIF_F_HW_VLAN_RX) 1876 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1848 igb_vlan_mode(netdev, features); 1877 igb_vlan_mode(netdev, features);
1849 1878
1850 if (!(changed & NETIF_F_RXALL)) 1879 if (!(changed & NETIF_F_RXALL))
@@ -1876,6 +1905,7 @@ static const struct net_device_ops igb_netdev_ops = {
1876 .ndo_set_vf_mac = igb_ndo_set_vf_mac, 1905 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1877 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, 1906 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1878 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, 1907 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1908 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
1879 .ndo_get_vf_config = igb_ndo_get_vf_config, 1909 .ndo_get_vf_config = igb_ndo_get_vf_config,
1880#ifdef CONFIG_NET_POLL_CONTROLLER 1910#ifdef CONFIG_NET_POLL_CONTROLLER
1881 .ndo_poll_controller = igb_netpoll, 1911 .ndo_poll_controller = igb_netpoll,
@@ -1887,7 +1917,6 @@ static const struct net_device_ops igb_netdev_ops = {
1887/** 1917/**
1888 * igb_set_fw_version - Configure version string for ethtool 1918 * igb_set_fw_version - Configure version string for ethtool
1889 * @adapter: adapter struct 1919 * @adapter: adapter struct
1890 *
1891 **/ 1920 **/
1892void igb_set_fw_version(struct igb_adapter *adapter) 1921void igb_set_fw_version(struct igb_adapter *adapter)
1893{ 1922{
@@ -1923,10 +1952,10 @@ void igb_set_fw_version(struct igb_adapter *adapter)
1923 return; 1952 return;
1924} 1953}
1925 1954
1926/* igb_init_i2c - Init I2C interface 1955/**
1956 * igb_init_i2c - Init I2C interface
1927 * @adapter: pointer to adapter structure 1957 * @adapter: pointer to adapter structure
1928 * 1958 **/
1929 */
1930static s32 igb_init_i2c(struct igb_adapter *adapter) 1959static s32 igb_init_i2c(struct igb_adapter *adapter)
1931{ 1960{
1932 s32 status = E1000_SUCCESS; 1961 s32 status = E1000_SUCCESS;
@@ -1951,15 +1980,15 @@ static s32 igb_init_i2c(struct igb_adapter *adapter)
1951} 1980}
1952 1981
1953/** 1982/**
1954 * igb_probe - Device Initialization Routine 1983 * igb_probe - Device Initialization Routine
1955 * @pdev: PCI device information struct 1984 * @pdev: PCI device information struct
1956 * @ent: entry in igb_pci_tbl 1985 * @ent: entry in igb_pci_tbl
1957 * 1986 *
1958 * Returns 0 on success, negative on failure 1987 * Returns 0 on success, negative on failure
1959 * 1988 *
1960 * igb_probe initializes an adapter identified by a pci_dev structure. 1989 * igb_probe initializes an adapter identified by a pci_dev structure.
1961 * The OS initialization, configuring of the adapter private structure, 1990 * The OS initialization, configuring of the adapter private structure,
1962 * and a hardware reset occur. 1991 * and a hardware reset occur.
1963 **/ 1992 **/
1964static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1993static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1965{ 1994{
@@ -1996,18 +2025,19 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1996 } else { 2025 } else {
1997 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2026 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1998 if (err) { 2027 if (err) {
1999 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 2028 err = dma_set_coherent_mask(&pdev->dev,
2029 DMA_BIT_MASK(32));
2000 if (err) { 2030 if (err) {
2001 dev_err(&pdev->dev, "No usable DMA " 2031 dev_err(&pdev->dev,
2002 "configuration, aborting\n"); 2032 "No usable DMA configuration, aborting\n");
2003 goto err_dma; 2033 goto err_dma;
2004 } 2034 }
2005 } 2035 }
2006 } 2036 }
2007 2037
2008 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 2038 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
2009 IORESOURCE_MEM), 2039 IORESOURCE_MEM),
2010 igb_driver_name); 2040 igb_driver_name);
2011 if (err) 2041 if (err)
2012 goto err_pci_reg; 2042 goto err_pci_reg;
2013 2043
@@ -2085,8 +2115,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2085 dev_info(&pdev->dev, 2115 dev_info(&pdev->dev,
2086 "PHY reset is blocked due to SOL/IDER session.\n"); 2116 "PHY reset is blocked due to SOL/IDER session.\n");
2087 2117
2088 /* 2118 /* features is initialized to 0 in allocation, it might have bits
2089 * features is initialized to 0 in allocation, it might have bits
2090 * set by igb_sw_init so we should use an or instead of an 2119 * set by igb_sw_init so we should use an or instead of an
2091 * assignment. 2120 * assignment.
2092 */ 2121 */
@@ -2097,15 +2126,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2097 NETIF_F_TSO6 | 2126 NETIF_F_TSO6 |
2098 NETIF_F_RXHASH | 2127 NETIF_F_RXHASH |
2099 NETIF_F_RXCSUM | 2128 NETIF_F_RXCSUM |
2100 NETIF_F_HW_VLAN_RX | 2129 NETIF_F_HW_VLAN_CTAG_RX |
2101 NETIF_F_HW_VLAN_TX; 2130 NETIF_F_HW_VLAN_CTAG_TX;
2102 2131
2103 /* copy netdev features into list of user selectable features */ 2132 /* copy netdev features into list of user selectable features */
2104 netdev->hw_features |= netdev->features; 2133 netdev->hw_features |= netdev->features;
2105 netdev->hw_features |= NETIF_F_RXALL; 2134 netdev->hw_features |= NETIF_F_RXALL;
2106 2135
2107 /* set this bit last since it cannot be part of hw_features */ 2136 /* set this bit last since it cannot be part of hw_features */
2108 netdev->features |= NETIF_F_HW_VLAN_FILTER; 2137 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2109 2138
2110 netdev->vlan_features |= NETIF_F_TSO | 2139 netdev->vlan_features |= NETIF_F_TSO |
2111 NETIF_F_TSO6 | 2140 NETIF_F_TSO6 |
@@ -2130,11 +2159,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2130 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); 2159 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
2131 2160
2132 /* before reading the NVM, reset the controller to put the device in a 2161 /* before reading the NVM, reset the controller to put the device in a
2133 * known good starting state */ 2162 * known good starting state
2163 */
2134 hw->mac.ops.reset_hw(hw); 2164 hw->mac.ops.reset_hw(hw);
2135 2165
2136 /* 2166 /* make sure the NVM is good , i211 parts have special NVM that
2137 * make sure the NVM is good , i211 parts have special NVM that
2138 * doesn't contain a checksum 2167 * doesn't contain a checksum
2139 */ 2168 */
2140 if (hw->mac.type != e1000_i211) { 2169 if (hw->mac.type != e1000_i211) {
@@ -2161,9 +2190,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2161 igb_set_fw_version(adapter); 2190 igb_set_fw_version(adapter);
2162 2191
2163 setup_timer(&adapter->watchdog_timer, igb_watchdog, 2192 setup_timer(&adapter->watchdog_timer, igb_watchdog,
2164 (unsigned long) adapter); 2193 (unsigned long) adapter);
2165 setup_timer(&adapter->phy_info_timer, igb_update_phy_info, 2194 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
2166 (unsigned long) adapter); 2195 (unsigned long) adapter);
2167 2196
2168 INIT_WORK(&adapter->reset_task, igb_reset_task); 2197 INIT_WORK(&adapter->reset_task, igb_reset_task);
2169 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); 2198 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
@@ -2185,8 +2214,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2185 /* Check the NVM for wake support on non-port A ports */ 2214 /* Check the NVM for wake support on non-port A ports */
2186 if (hw->mac.type >= e1000_82580) 2215 if (hw->mac.type >= e1000_82580)
2187 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 2216 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2188 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 2217 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2189 &eeprom_data); 2218 &eeprom_data);
2190 else if (hw->bus.func == 1) 2219 else if (hw->bus.func == 1)
2191 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 2220 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
2192 2221
@@ -2195,7 +2224,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2195 2224
2196 /* now that we have the eeprom settings, apply the special cases where 2225 /* now that we have the eeprom settings, apply the special cases where
2197 * the eeprom may be wrong or the board simply won't support wake on 2226 * the eeprom may be wrong or the board simply won't support wake on
2198 * lan on a particular port */ 2227 * lan on a particular port
2228 */
2199 switch (pdev->device) { 2229 switch (pdev->device) {
2200 case E1000_DEV_ID_82575GB_QUAD_COPPER: 2230 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2201 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; 2231 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
@@ -2204,7 +2234,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2204 case E1000_DEV_ID_82576_FIBER: 2234 case E1000_DEV_ID_82576_FIBER:
2205 case E1000_DEV_ID_82576_SERDES: 2235 case E1000_DEV_ID_82576_SERDES:
2206 /* Wake events only supported on port A for dual fiber 2236 /* Wake events only supported on port A for dual fiber
2207 * regardless of eeprom setting */ 2237 * regardless of eeprom setting
2238 */
2208 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) 2239 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2209 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; 2240 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2210 break; 2241 break;
@@ -2274,8 +2305,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2274 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { 2305 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
2275 u16 ets_word; 2306 u16 ets_word;
2276 2307
2277 /* 2308 /* Read the NVM to determine if this i350 device supports an
2278 * Read the NVM to determine if this i350 device supports an
2279 * external thermal sensor. 2309 * external thermal sensor.
2280 */ 2310 */
2281 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); 2311 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
@@ -2294,17 +2324,20 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2294 igb_ptp_init(adapter); 2324 igb_ptp_init(adapter);
2295 2325
2296 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 2326 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2297 /* print bus type/speed/width info */ 2327 /* print bus type/speed/width info, not applicable to i354 */
2298 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 2328 if (hw->mac.type != e1000_i354) {
2299 netdev->name, 2329 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
2300 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : 2330 netdev->name,
2301 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : 2331 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
2302 "unknown"), 2332 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
2303 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 2333 "unknown"),
2304 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : 2334 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
2305 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : 2335 "Width x4" :
2306 "unknown"), 2336 (hw->bus.width == e1000_bus_width_pcie_x2) ?
2307 netdev->dev_addr); 2337 "Width x2" :
2338 (hw->bus.width == e1000_bus_width_pcie_x1) ?
2339 "Width x1" : "unknown"), netdev->dev_addr);
2340 }
2308 2341
2309 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH); 2342 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2310 if (ret_val) 2343 if (ret_val)
@@ -2321,6 +2354,13 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2321 case e1000_i211: 2354 case e1000_i211:
2322 igb_set_eee_i350(hw); 2355 igb_set_eee_i350(hw);
2323 break; 2356 break;
2357 case e1000_i354:
2358 if (hw->phy.media_type == e1000_media_type_copper) {
2359 if ((rd32(E1000_CTRL_EXT) &
2360 E1000_CTRL_EXT_LINK_MODE_SGMII))
2361 igb_set_eee_i354(hw);
2362 }
2363 break;
2324 default: 2364 default:
2325 break; 2365 break;
2326 } 2366 }
@@ -2344,7 +2384,7 @@ err_ioremap:
2344 free_netdev(netdev); 2384 free_netdev(netdev);
2345err_alloc_etherdev: 2385err_alloc_etherdev:
2346 pci_release_selected_regions(pdev, 2386 pci_release_selected_regions(pdev,
2347 pci_select_bars(pdev, IORESOURCE_MEM)); 2387 pci_select_bars(pdev, IORESOURCE_MEM));
2348err_pci_reg: 2388err_pci_reg:
2349err_dma: 2389err_dma:
2350 pci_disable_device(pdev); 2390 pci_disable_device(pdev);
@@ -2361,7 +2401,7 @@ static int igb_disable_sriov(struct pci_dev *pdev)
2361 /* reclaim resources allocated to VFs */ 2401 /* reclaim resources allocated to VFs */
2362 if (adapter->vf_data) { 2402 if (adapter->vf_data) {
2363 /* disable iov and allow time for transactions to clear */ 2403 /* disable iov and allow time for transactions to clear */
2364 if (igb_vfs_are_assigned(adapter)) { 2404 if (pci_vfs_assigned(pdev)) {
2365 dev_warn(&pdev->dev, 2405 dev_warn(&pdev->dev,
2366 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n"); 2406 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
2367 return -EPERM; 2407 return -EPERM;
@@ -2444,26 +2484,24 @@ out:
2444} 2484}
2445 2485
2446#endif 2486#endif
2447/* 2487/**
2448 * igb_remove_i2c - Cleanup I2C interface 2488 * igb_remove_i2c - Cleanup I2C interface
2449 * @adapter: pointer to adapter structure 2489 * @adapter: pointer to adapter structure
2450 * 2490 **/
2451 */
2452static void igb_remove_i2c(struct igb_adapter *adapter) 2491static void igb_remove_i2c(struct igb_adapter *adapter)
2453{ 2492{
2454
2455 /* free the adapter bus structure */ 2493 /* free the adapter bus structure */
2456 i2c_del_adapter(&adapter->i2c_adap); 2494 i2c_del_adapter(&adapter->i2c_adap);
2457} 2495}
2458 2496
2459/** 2497/**
2460 * igb_remove - Device Removal Routine 2498 * igb_remove - Device Removal Routine
2461 * @pdev: PCI device information struct 2499 * @pdev: PCI device information struct
2462 * 2500 *
2463 * igb_remove is called by the PCI subsystem to alert the driver 2501 * igb_remove is called by the PCI subsystem to alert the driver
2464 * that it should release a PCI device. The could be caused by a 2502 * that it should release a PCI device. The could be caused by a
2465 * Hot-Plug event, or because the driver is going to be removed from 2503 * Hot-Plug event, or because the driver is going to be removed from
2466 * memory. 2504 * memory.
2467 **/ 2505 **/
2468static void igb_remove(struct pci_dev *pdev) 2506static void igb_remove(struct pci_dev *pdev)
2469{ 2507{
@@ -2477,8 +2515,7 @@ static void igb_remove(struct pci_dev *pdev)
2477#endif 2515#endif
2478 igb_remove_i2c(adapter); 2516 igb_remove_i2c(adapter);
2479 igb_ptp_stop(adapter); 2517 igb_ptp_stop(adapter);
2480 /* 2518 /* The watchdog timer may be rescheduled, so explicitly
2481 * The watchdog timer may be rescheduled, so explicitly
2482 * disable watchdog from being rescheduled. 2519 * disable watchdog from being rescheduled.
2483 */ 2520 */
2484 set_bit(__IGB_DOWN, &adapter->state); 2521 set_bit(__IGB_DOWN, &adapter->state);
@@ -2498,7 +2535,8 @@ static void igb_remove(struct pci_dev *pdev)
2498#endif 2535#endif
2499 2536
2500 /* Release control of h/w to f/w. If f/w is AMT enabled, this 2537 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2501 * would have already happened in close and is redundant. */ 2538 * would have already happened in close and is redundant.
2539 */
2502 igb_release_hw_control(adapter); 2540 igb_release_hw_control(adapter);
2503 2541
2504 unregister_netdev(netdev); 2542 unregister_netdev(netdev);
@@ -2513,7 +2551,7 @@ static void igb_remove(struct pci_dev *pdev)
2513 if (hw->flash_address) 2551 if (hw->flash_address)
2514 iounmap(hw->flash_address); 2552 iounmap(hw->flash_address);
2515 pci_release_selected_regions(pdev, 2553 pci_release_selected_regions(pdev,
2516 pci_select_bars(pdev, IORESOURCE_MEM)); 2554 pci_select_bars(pdev, IORESOURCE_MEM));
2517 2555
2518 kfree(adapter->shadow_vfta); 2556 kfree(adapter->shadow_vfta);
2519 free_netdev(netdev); 2557 free_netdev(netdev);
@@ -2524,13 +2562,13 @@ static void igb_remove(struct pci_dev *pdev)
2524} 2562}
2525 2563
2526/** 2564/**
2527 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space 2565 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2528 * @adapter: board private structure to initialize 2566 * @adapter: board private structure to initialize
2529 * 2567 *
2530 * This function initializes the vf specific data storage and then attempts to 2568 * This function initializes the vf specific data storage and then attempts to
2531 * allocate the VFs. The reason for ordering it this way is because it is much 2569 * allocate the VFs. The reason for ordering it this way is because it is much
2532 * mor expensive time wise to disable SR-IOV than it is to allocate and free 2570 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2533 * the memory for the VFs. 2571 * the memory for the VFs.
2534 **/ 2572 **/
2535static void igb_probe_vfs(struct igb_adapter *adapter) 2573static void igb_probe_vfs(struct igb_adapter *adapter)
2536{ 2574{
@@ -2576,6 +2614,7 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
2576 } 2614 }
2577 /* fall through */ 2615 /* fall through */
2578 case e1000_82580: 2616 case e1000_82580:
2617 case e1000_i354:
2579 default: 2618 default:
2580 max_rss_queues = IGB_MAX_RX_QUEUES; 2619 max_rss_queues = IGB_MAX_RX_QUEUES;
2581 break; 2620 break;
@@ -2590,8 +2629,7 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
2590 /* Device supports enough interrupts without queue pairing. */ 2629 /* Device supports enough interrupts without queue pairing. */
2591 break; 2630 break;
2592 case e1000_82576: 2631 case e1000_82576:
2593 /* 2632 /* If VFs are going to be allocated with RSS queues then we
2594 * If VFs are going to be allocated with RSS queues then we
2595 * should pair the queues in order to conserve interrupts due 2633 * should pair the queues in order to conserve interrupts due
2596 * to limited supply. 2634 * to limited supply.
2597 */ 2635 */
@@ -2601,10 +2639,10 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
2601 /* fall through */ 2639 /* fall through */
2602 case e1000_82580: 2640 case e1000_82580:
2603 case e1000_i350: 2641 case e1000_i350:
2642 case e1000_i354:
2604 case e1000_i210: 2643 case e1000_i210:
2605 default: 2644 default:
2606 /* 2645 /* If rss_queues > half of max_rss_queues, pair the queues in
2607 * If rss_queues > half of max_rss_queues, pair the queues in
2608 * order to conserve interrupts due to limited supply. 2646 * order to conserve interrupts due to limited supply.
2609 */ 2647 */
2610 if (adapter->rss_queues > (max_rss_queues / 2)) 2648 if (adapter->rss_queues > (max_rss_queues / 2))
@@ -2614,12 +2652,12 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
2614} 2652}
2615 2653
2616/** 2654/**
2617 * igb_sw_init - Initialize general software structures (struct igb_adapter) 2655 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2618 * @adapter: board private structure to initialize 2656 * @adapter: board private structure to initialize
2619 * 2657 *
2620 * igb_sw_init initializes the Adapter private data structure. 2658 * igb_sw_init initializes the Adapter private data structure.
2621 * Fields are initialized based on PCI device information and 2659 * Fields are initialized based on PCI device information and
2622 * OS network device settings (MTU size). 2660 * OS network device settings (MTU size).
2623 **/ 2661 **/
2624static int igb_sw_init(struct igb_adapter *adapter) 2662static int igb_sw_init(struct igb_adapter *adapter)
2625{ 2663{
@@ -2689,16 +2727,16 @@ static int igb_sw_init(struct igb_adapter *adapter)
2689} 2727}
2690 2728
2691/** 2729/**
2692 * igb_open - Called when a network interface is made active 2730 * igb_open - Called when a network interface is made active
2693 * @netdev: network interface device structure 2731 * @netdev: network interface device structure
2694 * 2732 *
2695 * Returns 0 on success, negative value on failure 2733 * Returns 0 on success, negative value on failure
2696 * 2734 *
2697 * The open entry point is called when a network interface is made 2735 * The open entry point is called when a network interface is made
2698 * active by the system (IFF_UP). At this point all resources needed 2736 * active by the system (IFF_UP). At this point all resources needed
2699 * for transmit and receive operations are allocated, the interrupt 2737 * for transmit and receive operations are allocated, the interrupt
2700 * handler is registered with the OS, the watchdog timer is started, 2738 * handler is registered with the OS, the watchdog timer is started,
2701 * and the stack is notified that the interface is ready. 2739 * and the stack is notified that the interface is ready.
2702 **/ 2740 **/
2703static int __igb_open(struct net_device *netdev, bool resuming) 2741static int __igb_open(struct net_device *netdev, bool resuming)
2704{ 2742{
@@ -2734,7 +2772,8 @@ static int __igb_open(struct net_device *netdev, bool resuming)
2734 /* before we allocate an interrupt, we must be ready to handle it. 2772 /* before we allocate an interrupt, we must be ready to handle it.
2735 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 2773 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2736 * as soon as we call pci_request_irq, so we have to setup our 2774 * as soon as we call pci_request_irq, so we have to setup our
2737 * clean_rx handler before we do so. */ 2775 * clean_rx handler before we do so.
2776 */
2738 igb_configure(adapter); 2777 igb_configure(adapter);
2739 2778
2740 err = igb_request_irq(adapter); 2779 err = igb_request_irq(adapter);
@@ -2803,15 +2842,15 @@ static int igb_open(struct net_device *netdev)
2803} 2842}
2804 2843
2805/** 2844/**
2806 * igb_close - Disables a network interface 2845 * igb_close - Disables a network interface
2807 * @netdev: network interface device structure 2846 * @netdev: network interface device structure
2808 * 2847 *
2809 * Returns 0, this is not allowed to fail 2848 * Returns 0, this is not allowed to fail
2810 * 2849 *
2811 * The close entry point is called when an interface is de-activated 2850 * The close entry point is called when an interface is de-activated
2812 * by the OS. The hardware is still under the driver's control, but 2851 * by the OS. The hardware is still under the driver's control, but
2813 * needs to be disabled. A global MAC reset is issued to stop the 2852 * needs to be disabled. A global MAC reset is issued to stop the
2814 * hardware, and all transmit and receive resources are freed. 2853 * hardware, and all transmit and receive resources are freed.
2815 **/ 2854 **/
2816static int __igb_close(struct net_device *netdev, bool suspending) 2855static int __igb_close(struct net_device *netdev, bool suspending)
2817{ 2856{
@@ -2840,10 +2879,10 @@ static int igb_close(struct net_device *netdev)
2840} 2879}
2841 2880
2842/** 2881/**
2843 * igb_setup_tx_resources - allocate Tx resources (Descriptors) 2882 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
2844 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2883 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2845 * 2884 *
2846 * Return 0 on success, negative on failure 2885 * Return 0 on success, negative on failure
2847 **/ 2886 **/
2848int igb_setup_tx_resources(struct igb_ring *tx_ring) 2887int igb_setup_tx_resources(struct igb_ring *tx_ring)
2849{ 2888{
@@ -2878,11 +2917,11 @@ err:
2878} 2917}
2879 2918
2880/** 2919/**
2881 * igb_setup_all_tx_resources - wrapper to allocate Tx resources 2920 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2882 * (Descriptors) for all queues 2921 * (Descriptors) for all queues
2883 * @adapter: board private structure 2922 * @adapter: board private structure
2884 * 2923 *
2885 * Return 0 on success, negative on failure 2924 * Return 0 on success, negative on failure
2886 **/ 2925 **/
2887static int igb_setup_all_tx_resources(struct igb_adapter *adapter) 2926static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2888{ 2927{
@@ -2904,8 +2943,8 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2904} 2943}
2905 2944
2906/** 2945/**
2907 * igb_setup_tctl - configure the transmit control registers 2946 * igb_setup_tctl - configure the transmit control registers
2908 * @adapter: Board private structure 2947 * @adapter: Board private structure
2909 **/ 2948 **/
2910void igb_setup_tctl(struct igb_adapter *adapter) 2949void igb_setup_tctl(struct igb_adapter *adapter)
2911{ 2950{
@@ -2930,11 +2969,11 @@ void igb_setup_tctl(struct igb_adapter *adapter)
2930} 2969}
2931 2970
2932/** 2971/**
2933 * igb_configure_tx_ring - Configure transmit ring after Reset 2972 * igb_configure_tx_ring - Configure transmit ring after Reset
2934 * @adapter: board private structure 2973 * @adapter: board private structure
2935 * @ring: tx ring to configure 2974 * @ring: tx ring to configure
2936 * 2975 *
2937 * Configure a transmit ring after a reset. 2976 * Configure a transmit ring after a reset.
2938 **/ 2977 **/
2939void igb_configure_tx_ring(struct igb_adapter *adapter, 2978void igb_configure_tx_ring(struct igb_adapter *adapter,
2940 struct igb_ring *ring) 2979 struct igb_ring *ring)
@@ -2950,9 +2989,9 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
2950 mdelay(10); 2989 mdelay(10);
2951 2990
2952 wr32(E1000_TDLEN(reg_idx), 2991 wr32(E1000_TDLEN(reg_idx),
2953 ring->count * sizeof(union e1000_adv_tx_desc)); 2992 ring->count * sizeof(union e1000_adv_tx_desc));
2954 wr32(E1000_TDBAL(reg_idx), 2993 wr32(E1000_TDBAL(reg_idx),
2955 tdba & 0x00000000ffffffffULL); 2994 tdba & 0x00000000ffffffffULL);
2956 wr32(E1000_TDBAH(reg_idx), tdba >> 32); 2995 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2957 2996
2958 ring->tail = hw->hw_addr + E1000_TDT(reg_idx); 2997 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
@@ -2968,10 +3007,10 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
2968} 3007}
2969 3008
2970/** 3009/**
2971 * igb_configure_tx - Configure transmit Unit after Reset 3010 * igb_configure_tx - Configure transmit Unit after Reset
2972 * @adapter: board private structure 3011 * @adapter: board private structure
2973 * 3012 *
2974 * Configure the Tx unit of the MAC after a reset. 3013 * Configure the Tx unit of the MAC after a reset.
2975 **/ 3014 **/
2976static void igb_configure_tx(struct igb_adapter *adapter) 3015static void igb_configure_tx(struct igb_adapter *adapter)
2977{ 3016{
@@ -2982,10 +3021,10 @@ static void igb_configure_tx(struct igb_adapter *adapter)
2982} 3021}
2983 3022
2984/** 3023/**
2985 * igb_setup_rx_resources - allocate Rx resources (Descriptors) 3024 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2986 * @rx_ring: rx descriptor ring (for a specific queue) to setup 3025 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
2987 * 3026 *
2988 * Returns 0 on success, negative on failure 3027 * Returns 0 on success, negative on failure
2989 **/ 3028 **/
2990int igb_setup_rx_resources(struct igb_ring *rx_ring) 3029int igb_setup_rx_resources(struct igb_ring *rx_ring)
2991{ 3030{
@@ -3021,11 +3060,11 @@ err:
3021} 3060}
3022 3061
3023/** 3062/**
3024 * igb_setup_all_rx_resources - wrapper to allocate Rx resources 3063 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
3025 * (Descriptors) for all queues 3064 * (Descriptors) for all queues
3026 * @adapter: board private structure 3065 * @adapter: board private structure
3027 * 3066 *
3028 * Return 0 on success, negative on failure 3067 * Return 0 on success, negative on failure
3029 **/ 3068 **/
3030static int igb_setup_all_rx_resources(struct igb_adapter *adapter) 3069static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
3031{ 3070{
@@ -3047,8 +3086,8 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
3047} 3086}
3048 3087
3049/** 3088/**
3050 * igb_setup_mrqc - configure the multiple receive queue control registers 3089 * igb_setup_mrqc - configure the multiple receive queue control registers
3051 * @adapter: Board private structure 3090 * @adapter: Board private structure
3052 **/ 3091 **/
3053static void igb_setup_mrqc(struct igb_adapter *adapter) 3092static void igb_setup_mrqc(struct igb_adapter *adapter)
3054{ 3093{
@@ -3081,8 +3120,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3081 break; 3120 break;
3082 } 3121 }
3083 3122
3084 /* 3123 /* Populate the indirection table 4 entries at a time. To do this
3085 * Populate the indirection table 4 entries at a time. To do this
3086 * we are generating the results for n and n+2 and then interleaving 3124 * we are generating the results for n and n+2 and then interleaving
3087 * those with the results with n+1 and n+3. 3125 * those with the results with n+1 and n+3.
3088 */ 3126 */
@@ -3098,8 +3136,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3098 wr32(E1000_RETA(j), reta); 3136 wr32(E1000_RETA(j), reta);
3099 } 3137 }
3100 3138
3101 /* 3139 /* Disable raw packet checksumming so that RSS hash is placed in
3102 * Disable raw packet checksumming so that RSS hash is placed in
3103 * descriptor on writeback. No need to enable TCP/UDP/IP checksum 3140 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
3104 * offloads as they are enabled by default 3141 * offloads as they are enabled by default
3105 */ 3142 */
@@ -3129,7 +3166,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3129 3166
3130 /* If VMDq is enabled then we set the appropriate mode for that, else 3167 /* If VMDq is enabled then we set the appropriate mode for that, else
3131 * we default to RSS so that an RSS hash is calculated per packet even 3168 * we default to RSS so that an RSS hash is calculated per packet even
3132 * if we are only using one queue */ 3169 * if we are only using one queue
3170 */
3133 if (adapter->vfs_allocated_count) { 3171 if (adapter->vfs_allocated_count) {
3134 if (hw->mac.type > e1000_82575) { 3172 if (hw->mac.type > e1000_82575) {
3135 /* Set the default pool for the PF's first queue */ 3173 /* Set the default pool for the PF's first queue */
@@ -3154,8 +3192,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3154} 3192}
3155 3193
3156/** 3194/**
3157 * igb_setup_rctl - configure the receive control registers 3195 * igb_setup_rctl - configure the receive control registers
3158 * @adapter: Board private structure 3196 * @adapter: Board private structure
3159 **/ 3197 **/
3160void igb_setup_rctl(struct igb_adapter *adapter) 3198void igb_setup_rctl(struct igb_adapter *adapter)
3161{ 3199{
@@ -3170,8 +3208,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
3170 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | 3208 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
3171 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3209 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3172 3210
3173 /* 3211 /* enable stripping of CRC. It's unlikely this will break BMC
3174 * enable stripping of CRC. It's unlikely this will break BMC
3175 * redirection as it did with e1000. Newer features require 3212 * redirection as it did with e1000. Newer features require
3176 * that the HW strips the CRC. 3213 * that the HW strips the CRC.
3177 */ 3214 */
@@ -3198,7 +3235,8 @@ void igb_setup_rctl(struct igb_adapter *adapter)
3198 /* This is useful for sniffing bad packets. */ 3235 /* This is useful for sniffing bad packets. */
3199 if (adapter->netdev->features & NETIF_F_RXALL) { 3236 if (adapter->netdev->features & NETIF_F_RXALL) {
3200 /* UPE and MPE will be handled by normal PROMISC logic 3237 /* UPE and MPE will be handled by normal PROMISC logic
3201 * in e1000e_set_rx_mode */ 3238 * in e1000e_set_rx_mode
3239 */
3202 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 3240 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3203 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 3241 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3204 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 3242 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@@ -3221,7 +3259,8 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3221 u32 vmolr; 3259 u32 vmolr;
3222 3260
3223 /* if it isn't the PF check to see if VFs are enabled and 3261 /* if it isn't the PF check to see if VFs are enabled and
3224 * increase the size to support vlan tags */ 3262 * increase the size to support vlan tags
3263 */
3225 if (vfn < adapter->vfs_allocated_count && 3264 if (vfn < adapter->vfs_allocated_count &&
3226 adapter->vf_data[vfn].vlans_enabled) 3265 adapter->vf_data[vfn].vlans_enabled)
3227 size += VLAN_TAG_SIZE; 3266 size += VLAN_TAG_SIZE;
@@ -3235,10 +3274,10 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3235} 3274}
3236 3275
3237/** 3276/**
3238 * igb_rlpml_set - set maximum receive packet size 3277 * igb_rlpml_set - set maximum receive packet size
3239 * @adapter: board private structure 3278 * @adapter: board private structure
3240 * 3279 *
3241 * Configure maximum receivable packet size. 3280 * Configure maximum receivable packet size.
3242 **/ 3281 **/
3243static void igb_rlpml_set(struct igb_adapter *adapter) 3282static void igb_rlpml_set(struct igb_adapter *adapter)
3244{ 3283{
@@ -3248,8 +3287,7 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
3248 3287
3249 if (pf_id) { 3288 if (pf_id) {
3250 igb_set_vf_rlpml(adapter, max_frame_size, pf_id); 3289 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
3251 /* 3290 /* If we're in VMDQ or SR-IOV mode, then set global RLPML
3252 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3253 * to our max jumbo frame size, in case we need to enable 3291 * to our max jumbo frame size, in case we need to enable
3254 * jumbo frames on one of the rings later. 3292 * jumbo frames on one of the rings later.
3255 * This will not pass over-length frames into the default 3293 * This will not pass over-length frames into the default
@@ -3267,17 +3305,16 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
3267 struct e1000_hw *hw = &adapter->hw; 3305 struct e1000_hw *hw = &adapter->hw;
3268 u32 vmolr; 3306 u32 vmolr;
3269 3307
3270 /* 3308 /* This register exists only on 82576 and newer so if we are older then
3271 * This register exists only on 82576 and newer so if we are older then
3272 * we should exit and do nothing 3309 * we should exit and do nothing
3273 */ 3310 */
3274 if (hw->mac.type < e1000_82576) 3311 if (hw->mac.type < e1000_82576)
3275 return; 3312 return;
3276 3313
3277 vmolr = rd32(E1000_VMOLR(vfn)); 3314 vmolr = rd32(E1000_VMOLR(vfn));
3278 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ 3315 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3279 if (aupe) 3316 if (aupe)
3280 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ 3317 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3281 else 3318 else
3282 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ 3319 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
3283 3320
@@ -3286,25 +3323,24 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
3286 3323
3287 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) 3324 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
3288 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ 3325 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3289 /* 3326 /* for VMDq only allow the VFs and pool 0 to accept broadcast and
3290 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3291 * multicast packets 3327 * multicast packets
3292 */ 3328 */
3293 if (vfn <= adapter->vfs_allocated_count) 3329 if (vfn <= adapter->vfs_allocated_count)
3294 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ 3330 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3295 3331
3296 wr32(E1000_VMOLR(vfn), vmolr); 3332 wr32(E1000_VMOLR(vfn), vmolr);
3297} 3333}
3298 3334
3299/** 3335/**
3300 * igb_configure_rx_ring - Configure a receive ring after Reset 3336 * igb_configure_rx_ring - Configure a receive ring after Reset
3301 * @adapter: board private structure 3337 * @adapter: board private structure
3302 * @ring: receive ring to be configured 3338 * @ring: receive ring to be configured
3303 * 3339 *
3304 * Configure the Rx unit of the MAC after a reset. 3340 * Configure the Rx unit of the MAC after a reset.
3305 **/ 3341 **/
3306void igb_configure_rx_ring(struct igb_adapter *adapter, 3342void igb_configure_rx_ring(struct igb_adapter *adapter,
3307 struct igb_ring *ring) 3343 struct igb_ring *ring)
3308{ 3344{
3309 struct e1000_hw *hw = &adapter->hw; 3345 struct e1000_hw *hw = &adapter->hw;
3310 u64 rdba = ring->dma; 3346 u64 rdba = ring->dma;
@@ -3319,7 +3355,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
3319 rdba & 0x00000000ffffffffULL); 3355 rdba & 0x00000000ffffffffULL);
3320 wr32(E1000_RDBAH(reg_idx), rdba >> 32); 3356 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3321 wr32(E1000_RDLEN(reg_idx), 3357 wr32(E1000_RDLEN(reg_idx),
3322 ring->count * sizeof(union e1000_adv_rx_desc)); 3358 ring->count * sizeof(union e1000_adv_rx_desc));
3323 3359
3324 /* initialize head and tail */ 3360 /* initialize head and tail */
3325 ring->tail = hw->hw_addr + E1000_RDT(reg_idx); 3361 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
@@ -3351,10 +3387,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
3351} 3387}
3352 3388
3353/** 3389/**
3354 * igb_configure_rx - Configure receive Unit after Reset 3390 * igb_configure_rx - Configure receive Unit after Reset
3355 * @adapter: board private structure 3391 * @adapter: board private structure
3356 * 3392 *
3357 * Configure the Rx unit of the MAC after a reset. 3393 * Configure the Rx unit of the MAC after a reset.
3358 **/ 3394 **/
3359static void igb_configure_rx(struct igb_adapter *adapter) 3395static void igb_configure_rx(struct igb_adapter *adapter)
3360{ 3396{
@@ -3365,19 +3401,20 @@ static void igb_configure_rx(struct igb_adapter *adapter)
3365 3401
3366 /* set the correct pool for the PF default MAC address in entry 0 */ 3402 /* set the correct pool for the PF default MAC address in entry 0 */
3367 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, 3403 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3368 adapter->vfs_allocated_count); 3404 adapter->vfs_allocated_count);
3369 3405
3370 /* Setup the HW Rx Head and Tail Descriptor Pointers and 3406 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3371 * the Base and Length of the Rx Descriptor Ring */ 3407 * the Base and Length of the Rx Descriptor Ring
3408 */
3372 for (i = 0; i < adapter->num_rx_queues; i++) 3409 for (i = 0; i < adapter->num_rx_queues; i++)
3373 igb_configure_rx_ring(adapter, adapter->rx_ring[i]); 3410 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
3374} 3411}
3375 3412
3376/** 3413/**
3377 * igb_free_tx_resources - Free Tx Resources per Queue 3414 * igb_free_tx_resources - Free Tx Resources per Queue
3378 * @tx_ring: Tx descriptor ring for a specific queue 3415 * @tx_ring: Tx descriptor ring for a specific queue
3379 * 3416 *
3380 * Free all transmit software resources 3417 * Free all transmit software resources
3381 **/ 3418 **/
3382void igb_free_tx_resources(struct igb_ring *tx_ring) 3419void igb_free_tx_resources(struct igb_ring *tx_ring)
3383{ 3420{
@@ -3397,10 +3434,10 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
3397} 3434}
3398 3435
3399/** 3436/**
3400 * igb_free_all_tx_resources - Free Tx Resources for All Queues 3437 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3401 * @adapter: board private structure 3438 * @adapter: board private structure
3402 * 3439 *
3403 * Free all transmit software resources 3440 * Free all transmit software resources
3404 **/ 3441 **/
3405static void igb_free_all_tx_resources(struct igb_adapter *adapter) 3442static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3406{ 3443{
@@ -3433,8 +3470,8 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3433} 3470}
3434 3471
3435/** 3472/**
3436 * igb_clean_tx_ring - Free Tx Buffers 3473 * igb_clean_tx_ring - Free Tx Buffers
3437 * @tx_ring: ring to be cleaned 3474 * @tx_ring: ring to be cleaned
3438 **/ 3475 **/
3439static void igb_clean_tx_ring(struct igb_ring *tx_ring) 3476static void igb_clean_tx_ring(struct igb_ring *tx_ring)
3440{ 3477{
@@ -3464,8 +3501,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
3464} 3501}
3465 3502
3466/** 3503/**
3467 * igb_clean_all_tx_rings - Free Tx Buffers for all queues 3504 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3468 * @adapter: board private structure 3505 * @adapter: board private structure
3469 **/ 3506 **/
3470static void igb_clean_all_tx_rings(struct igb_adapter *adapter) 3507static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3471{ 3508{
@@ -3476,10 +3513,10 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3476} 3513}
3477 3514
3478/** 3515/**
3479 * igb_free_rx_resources - Free Rx Resources 3516 * igb_free_rx_resources - Free Rx Resources
3480 * @rx_ring: ring to clean the resources from 3517 * @rx_ring: ring to clean the resources from
3481 * 3518 *
3482 * Free all receive software resources 3519 * Free all receive software resources
3483 **/ 3520 **/
3484void igb_free_rx_resources(struct igb_ring *rx_ring) 3521void igb_free_rx_resources(struct igb_ring *rx_ring)
3485{ 3522{
@@ -3499,10 +3536,10 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
3499} 3536}
3500 3537
3501/** 3538/**
3502 * igb_free_all_rx_resources - Free Rx Resources for All Queues 3539 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3503 * @adapter: board private structure 3540 * @adapter: board private structure
3504 * 3541 *
3505 * Free all receive software resources 3542 * Free all receive software resources
3506 **/ 3543 **/
3507static void igb_free_all_rx_resources(struct igb_adapter *adapter) 3544static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3508{ 3545{
@@ -3513,8 +3550,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3513} 3550}
3514 3551
3515/** 3552/**
3516 * igb_clean_rx_ring - Free Rx Buffers per Queue 3553 * igb_clean_rx_ring - Free Rx Buffers per Queue
3517 * @rx_ring: ring to free buffers from 3554 * @rx_ring: ring to free buffers from
3518 **/ 3555 **/
3519static void igb_clean_rx_ring(struct igb_ring *rx_ring) 3556static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3520{ 3557{
@@ -3556,8 +3593,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3556} 3593}
3557 3594
3558/** 3595/**
3559 * igb_clean_all_rx_rings - Free Rx Buffers for all queues 3596 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3560 * @adapter: board private structure 3597 * @adapter: board private structure
3561 **/ 3598 **/
3562static void igb_clean_all_rx_rings(struct igb_adapter *adapter) 3599static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3563{ 3600{
@@ -3568,11 +3605,11 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3568} 3605}
3569 3606
3570/** 3607/**
3571 * igb_set_mac - Change the Ethernet Address of the NIC 3608 * igb_set_mac - Change the Ethernet Address of the NIC
3572 * @netdev: network interface device structure 3609 * @netdev: network interface device structure
3573 * @p: pointer to an address structure 3610 * @p: pointer to an address structure
3574 * 3611 *
3575 * Returns 0 on success, negative on failure 3612 * Returns 0 on success, negative on failure
3576 **/ 3613 **/
3577static int igb_set_mac(struct net_device *netdev, void *p) 3614static int igb_set_mac(struct net_device *netdev, void *p)
3578{ 3615{
@@ -3588,19 +3625,19 @@ static int igb_set_mac(struct net_device *netdev, void *p)
3588 3625
3589 /* set the correct pool for the new PF MAC address in entry 0 */ 3626 /* set the correct pool for the new PF MAC address in entry 0 */
3590 igb_rar_set_qsel(adapter, hw->mac.addr, 0, 3627 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3591 adapter->vfs_allocated_count); 3628 adapter->vfs_allocated_count);
3592 3629
3593 return 0; 3630 return 0;
3594} 3631}
3595 3632
3596/** 3633/**
3597 * igb_write_mc_addr_list - write multicast addresses to MTA 3634 * igb_write_mc_addr_list - write multicast addresses to MTA
3598 * @netdev: network interface device structure 3635 * @netdev: network interface device structure
3599 * 3636 *
3600 * Writes multicast address list to the MTA hash table. 3637 * Writes multicast address list to the MTA hash table.
3601 * Returns: -ENOMEM on failure 3638 * Returns: -ENOMEM on failure
3602 * 0 on no addresses written 3639 * 0 on no addresses written
3603 * X on writing X addresses to MTA 3640 * X on writing X addresses to MTA
3604 **/ 3641 **/
3605static int igb_write_mc_addr_list(struct net_device *netdev) 3642static int igb_write_mc_addr_list(struct net_device *netdev)
3606{ 3643{
@@ -3633,13 +3670,13 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
3633} 3670}
3634 3671
3635/** 3672/**
3636 * igb_write_uc_addr_list - write unicast addresses to RAR table 3673 * igb_write_uc_addr_list - write unicast addresses to RAR table
3637 * @netdev: network interface device structure 3674 * @netdev: network interface device structure
3638 * 3675 *
3639 * Writes unicast address list to the RAR table. 3676 * Writes unicast address list to the RAR table.
3640 * Returns: -ENOMEM on failure/insufficient address space 3677 * Returns: -ENOMEM on failure/insufficient address space
3641 * 0 on no addresses written 3678 * 0 on no addresses written
3642 * X on writing X addresses to the RAR table 3679 * X on writing X addresses to the RAR table
3643 **/ 3680 **/
3644static int igb_write_uc_addr_list(struct net_device *netdev) 3681static int igb_write_uc_addr_list(struct net_device *netdev)
3645{ 3682{
@@ -3660,8 +3697,8 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
3660 if (!rar_entries) 3697 if (!rar_entries)
3661 break; 3698 break;
3662 igb_rar_set_qsel(adapter, ha->addr, 3699 igb_rar_set_qsel(adapter, ha->addr,
3663 rar_entries--, 3700 rar_entries--,
3664 vfn); 3701 vfn);
3665 count++; 3702 count++;
3666 } 3703 }
3667 } 3704 }
@@ -3676,13 +3713,13 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
3676} 3713}
3677 3714
3678/** 3715/**
3679 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 3716 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3680 * @netdev: network interface device structure 3717 * @netdev: network interface device structure
3681 * 3718 *
3682 * The set_rx_mode entry point is called whenever the unicast or multicast 3719 * The set_rx_mode entry point is called whenever the unicast or multicast
3683 * address lists or the network interface flags are updated. This routine is 3720 * address lists or the network interface flags are updated. This routine is
3684 * responsible for configuring the hardware for proper unicast, multicast, 3721 * responsible for configuring the hardware for proper unicast, multicast,
3685 * promiscuous mode, and all-multi behavior. 3722 * promiscuous mode, and all-multi behavior.
3686 **/ 3723 **/
3687static void igb_set_rx_mode(struct net_device *netdev) 3724static void igb_set_rx_mode(struct net_device *netdev)
3688{ 3725{
@@ -3699,6 +3736,10 @@ static void igb_set_rx_mode(struct net_device *netdev)
3699 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); 3736 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3700 3737
3701 if (netdev->flags & IFF_PROMISC) { 3738 if (netdev->flags & IFF_PROMISC) {
3739 u32 mrqc = rd32(E1000_MRQC);
3740 /* retain VLAN HW filtering if in VT mode */
3741 if (mrqc & E1000_MRQC_ENABLE_VMDQ)
3742 rctl |= E1000_RCTL_VFE;
3702 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 3743 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3703 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); 3744 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
3704 } else { 3745 } else {
@@ -3706,8 +3747,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
3706 rctl |= E1000_RCTL_MPE; 3747 rctl |= E1000_RCTL_MPE;
3707 vmolr |= E1000_VMOLR_MPME; 3748 vmolr |= E1000_VMOLR_MPME;
3708 } else { 3749 } else {
3709 /* 3750 /* Write addresses to the MTA, if the attempt fails
3710 * Write addresses to the MTA, if the attempt fails
3711 * then we should just turn on promiscuous mode so 3751 * then we should just turn on promiscuous mode so
3712 * that we can at least receive multicast traffic 3752 * that we can at least receive multicast traffic
3713 */ 3753 */
@@ -3719,8 +3759,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
3719 vmolr |= E1000_VMOLR_ROMPE; 3759 vmolr |= E1000_VMOLR_ROMPE;
3720 } 3760 }
3721 } 3761 }
3722 /* 3762 /* Write addresses to available RAR registers, if there is not
3723 * Write addresses to available RAR registers, if there is not
3724 * sufficient space to store all the addresses then enable 3763 * sufficient space to store all the addresses then enable
3725 * unicast promiscuous mode 3764 * unicast promiscuous mode
3726 */ 3765 */
@@ -3733,8 +3772,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
3733 } 3772 }
3734 wr32(E1000_RCTL, rctl); 3773 wr32(E1000_RCTL, rctl);
3735 3774
3736 /* 3775 /* In order to support SR-IOV and eventually VMDq it is necessary to set
3737 * In order to support SR-IOV and eventually VMDq it is necessary to set
3738 * the VMOLR to enable the appropriate modes. Without this workaround 3776 * the VMOLR to enable the appropriate modes. Without this workaround
3739 * we will have issues with VLAN tag stripping not being done for frames 3777 * we will have issues with VLAN tag stripping not being done for frames
3740 * that are only arriving because we are the default pool 3778 * that are only arriving because we are the default pool
@@ -3743,7 +3781,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
3743 return; 3781 return;
3744 3782
3745 vmolr |= rd32(E1000_VMOLR(vfn)) & 3783 vmolr |= rd32(E1000_VMOLR(vfn)) &
3746 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); 3784 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3747 wr32(E1000_VMOLR(vfn), vmolr); 3785 wr32(E1000_VMOLR(vfn), vmolr);
3748 igb_restore_vf_multicasts(adapter); 3786 igb_restore_vf_multicasts(adapter);
3749} 3787}
@@ -3788,7 +3826,8 @@ static void igb_spoof_check(struct igb_adapter *adapter)
3788} 3826}
3789 3827
3790/* Need to wait a few seconds after link up to get diagnostic information from 3828/* Need to wait a few seconds after link up to get diagnostic information from
3791 * the phy */ 3829 * the phy
3830 */
3792static void igb_update_phy_info(unsigned long data) 3831static void igb_update_phy_info(unsigned long data)
3793{ 3832{
3794 struct igb_adapter *adapter = (struct igb_adapter *) data; 3833 struct igb_adapter *adapter = (struct igb_adapter *) data;
@@ -3796,8 +3835,8 @@ static void igb_update_phy_info(unsigned long data)
3796} 3835}
3797 3836
3798/** 3837/**
3799 * igb_has_link - check shared code for link and determine up/down 3838 * igb_has_link - check shared code for link and determine up/down
3800 * @adapter: pointer to driver private info 3839 * @adapter: pointer to driver private info
3801 **/ 3840 **/
3802bool igb_has_link(struct igb_adapter *adapter) 3841bool igb_has_link(struct igb_adapter *adapter)
3803{ 3842{
@@ -3842,17 +3881,16 @@ static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3842 ctrl_ext = rd32(E1000_CTRL_EXT); 3881 ctrl_ext = rd32(E1000_CTRL_EXT);
3843 3882
3844 if ((hw->phy.media_type == e1000_media_type_copper) && 3883 if ((hw->phy.media_type == e1000_media_type_copper) &&
3845 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) { 3884 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
3846 ret = !!(thstat & event); 3885 ret = !!(thstat & event);
3847 }
3848 } 3886 }
3849 3887
3850 return ret; 3888 return ret;
3851} 3889}
3852 3890
3853/** 3891/**
3854 * igb_watchdog - Timer Call-back 3892 * igb_watchdog - Timer Call-back
3855 * @data: pointer to adapter cast into an unsigned long 3893 * @data: pointer to adapter cast into an unsigned long
3856 **/ 3894 **/
3857static void igb_watchdog(unsigned long data) 3895static void igb_watchdog(unsigned long data)
3858{ 3896{
@@ -3864,9 +3902,10 @@ static void igb_watchdog(unsigned long data)
3864static void igb_watchdog_task(struct work_struct *work) 3902static void igb_watchdog_task(struct work_struct *work)
3865{ 3903{
3866 struct igb_adapter *adapter = container_of(work, 3904 struct igb_adapter *adapter = container_of(work,
3867 struct igb_adapter, 3905 struct igb_adapter,
3868 watchdog_task); 3906 watchdog_task);
3869 struct e1000_hw *hw = &adapter->hw; 3907 struct e1000_hw *hw = &adapter->hw;
3908 struct e1000_phy_info *phy = &hw->phy;
3870 struct net_device *netdev = adapter->netdev; 3909 struct net_device *netdev = adapter->netdev;
3871 u32 link; 3910 u32 link;
3872 int i; 3911 int i;
@@ -3879,8 +3918,8 @@ static void igb_watchdog_task(struct work_struct *work)
3879 if (!netif_carrier_ok(netdev)) { 3918 if (!netif_carrier_ok(netdev)) {
3880 u32 ctrl; 3919 u32 ctrl;
3881 hw->mac.ops.get_speed_and_duplex(hw, 3920 hw->mac.ops.get_speed_and_duplex(hw,
3882 &adapter->link_speed, 3921 &adapter->link_speed,
3883 &adapter->link_duplex); 3922 &adapter->link_duplex);
3884 3923
3885 ctrl = rd32(E1000_CTRL); 3924 ctrl = rd32(E1000_CTRL);
3886 /* Links status message must follow this format */ 3925 /* Links status message must follow this format */
@@ -3895,6 +3934,11 @@ static void igb_watchdog_task(struct work_struct *work)
3895 (ctrl & E1000_CTRL_RFCE) ? "RX" : 3934 (ctrl & E1000_CTRL_RFCE) ? "RX" :
3896 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); 3935 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
3897 3936
3937 /* check if SmartSpeed worked */
3938 igb_check_downshift(hw);
3939 if (phy->speed_downgraded)
3940 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
3941
3898 /* check for thermal sensor event */ 3942 /* check for thermal sensor event */
3899 if (igb_thermal_sensor_event(hw, 3943 if (igb_thermal_sensor_event(hw,
3900 E1000_THSTAT_LINK_THROTTLE)) { 3944 E1000_THSTAT_LINK_THROTTLE)) {
@@ -3963,7 +4007,8 @@ static void igb_watchdog_task(struct work_struct *work)
3963 /* We've lost link, so the controller stops DMA, 4007 /* We've lost link, so the controller stops DMA,
3964 * but we've got queued Tx work that's never going 4008 * but we've got queued Tx work that's never going
3965 * to get done, so reset controller to flush Tx. 4009 * to get done, so reset controller to flush Tx.
3966 * (Do the reset outside of interrupt context). */ 4010 * (Do the reset outside of interrupt context).
4011 */
3967 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { 4012 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3968 adapter->tx_timeout_count++; 4013 adapter->tx_timeout_count++;
3969 schedule_work(&adapter->reset_task); 4014 schedule_work(&adapter->reset_task);
@@ -3976,7 +4021,7 @@ static void igb_watchdog_task(struct work_struct *work)
3976 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 4021 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
3977 } 4022 }
3978 4023
3979 /* Cause software interrupt to ensure rx ring is cleaned */ 4024 /* Cause software interrupt to ensure Rx ring is cleaned */
3980 if (adapter->msix_entries) { 4025 if (adapter->msix_entries) {
3981 u32 eics = 0; 4026 u32 eics = 0;
3982 for (i = 0; i < adapter->num_q_vectors; i++) 4027 for (i = 0; i < adapter->num_q_vectors; i++)
@@ -4003,20 +4048,20 @@ enum latency_range {
4003}; 4048};
4004 4049
4005/** 4050/**
4006 * igb_update_ring_itr - update the dynamic ITR value based on packet size 4051 * igb_update_ring_itr - update the dynamic ITR value based on packet size
4052 * @q_vector: pointer to q_vector
4007 * 4053 *
4008 * Stores a new ITR value based on strictly on packet size. This 4054 * Stores a new ITR value based on strictly on packet size. This
4009 * algorithm is less sophisticated than that used in igb_update_itr, 4055 * algorithm is less sophisticated than that used in igb_update_itr,
4010 * due to the difficulty of synchronizing statistics across multiple 4056 * due to the difficulty of synchronizing statistics across multiple
4011 * receive rings. The divisors and thresholds used by this function 4057 * receive rings. The divisors and thresholds used by this function
4012 * were determined based on theoretical maximum wire speed and testing 4058 * were determined based on theoretical maximum wire speed and testing
4013 * data, in order to minimize response time while increasing bulk 4059 * data, in order to minimize response time while increasing bulk
4014 * throughput. 4060 * throughput.
4015 * This functionality is controlled by the InterruptThrottleRate module 4061 * This functionality is controlled by the InterruptThrottleRate module
4016 * parameter (see igb_param.c) 4062 * parameter (see igb_param.c)
4017 * NOTE: This function is called only when operating in a multiqueue 4063 * NOTE: This function is called only when operating in a multiqueue
4018 * receive environment. 4064 * receive environment.
4019 * @q_vector: pointer to q_vector
4020 **/ 4065 **/
4021static void igb_update_ring_itr(struct igb_q_vector *q_vector) 4066static void igb_update_ring_itr(struct igb_q_vector *q_vector)
4022{ 4067{
@@ -4077,20 +4122,21 @@ clear_counts:
4077} 4122}
4078 4123
4079/** 4124/**
4080 * igb_update_itr - update the dynamic ITR value based on statistics 4125 * igb_update_itr - update the dynamic ITR value based on statistics
4081 * Stores a new ITR value based on packets and byte 4126 * @q_vector: pointer to q_vector
4082 * counts during the last interrupt. The advantage of per interrupt 4127 * @ring_container: ring info to update the itr for
4083 * computation is faster updates and more accurate ITR for the current 4128 *
4084 * traffic pattern. Constants in this function were computed 4129 * Stores a new ITR value based on packets and byte
4085 * based on theoretical maximum wire speed and thresholds were set based 4130 * counts during the last interrupt. The advantage of per interrupt
4086 * on testing data as well as attempting to minimize response time 4131 * computation is faster updates and more accurate ITR for the current
4087 * while increasing bulk throughput. 4132 * traffic pattern. Constants in this function were computed
4088 * this functionality is controlled by the InterruptThrottleRate module 4133 * based on theoretical maximum wire speed and thresholds were set based
4089 * parameter (see igb_param.c) 4134 * on testing data as well as attempting to minimize response time
4090 * NOTE: These calculations are only valid when operating in a single- 4135 * while increasing bulk throughput.
4091 * queue environment. 4136 * this functionality is controlled by the InterruptThrottleRate module
4092 * @q_vector: pointer to q_vector 4137 * parameter (see igb_param.c)
4093 * @ring_container: ring info to update the itr for 4138 * NOTE: These calculations are only valid when operating in a single-
4139 * queue environment.
4094 **/ 4140 **/
4095static void igb_update_itr(struct igb_q_vector *q_vector, 4141static void igb_update_itr(struct igb_q_vector *q_vector,
4096 struct igb_ring_container *ring_container) 4142 struct igb_ring_container *ring_container)
@@ -4188,12 +4234,12 @@ set_itr_now:
4188 if (new_itr != q_vector->itr_val) { 4234 if (new_itr != q_vector->itr_val) {
4189 /* this attempts to bias the interrupt rate towards Bulk 4235 /* this attempts to bias the interrupt rate towards Bulk
4190 * by adding intermediate steps when interrupt rate is 4236 * by adding intermediate steps when interrupt rate is
4191 * increasing */ 4237 * increasing
4238 */
4192 new_itr = new_itr > q_vector->itr_val ? 4239 new_itr = new_itr > q_vector->itr_val ?
4193 max((new_itr * q_vector->itr_val) / 4240 max((new_itr * q_vector->itr_val) /
4194 (new_itr + (q_vector->itr_val >> 2)), 4241 (new_itr + (q_vector->itr_val >> 2)),
4195 new_itr) : 4242 new_itr) : new_itr;
4196 new_itr;
4197 /* Don't write the value here; it resets the adapter's 4243 /* Don't write the value here; it resets the adapter's
4198 * internal timer, and causes us to delay far longer than 4244 * internal timer, and causes us to delay far longer than
4199 * we should between interrupts. Instead, we write the ITR 4245 * we should between interrupts. Instead, we write the ITR
@@ -4320,8 +4366,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
4320 default: 4366 default:
4321 if (unlikely(net_ratelimit())) { 4367 if (unlikely(net_ratelimit())) {
4322 dev_warn(tx_ring->dev, 4368 dev_warn(tx_ring->dev,
4323 "partial checksum but proto=%x!\n", 4369 "partial checksum but proto=%x!\n",
4324 first->protocol); 4370 first->protocol);
4325 } 4371 }
4326 break; 4372 break;
4327 } 4373 }
@@ -4344,8 +4390,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
4344 default: 4390 default:
4345 if (unlikely(net_ratelimit())) { 4391 if (unlikely(net_ratelimit())) {
4346 dev_warn(tx_ring->dev, 4392 dev_warn(tx_ring->dev,
4347 "partial checksum but l4 proto=%x!\n", 4393 "partial checksum but l4 proto=%x!\n",
4348 l4_hdr); 4394 l4_hdr);
4349 } 4395 }
4350 break; 4396 break;
4351 } 4397 }
@@ -4497,8 +4543,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4497 /* set the timestamp */ 4543 /* set the timestamp */
4498 first->time_stamp = jiffies; 4544 first->time_stamp = jiffies;
4499 4545
4500 /* 4546 /* Force memory writes to complete before letting h/w know there
4501 * Force memory writes to complete before letting h/w know there
4502 * are new descriptors to fetch. (Only applicable for weak-ordered 4547 * are new descriptors to fetch. (Only applicable for weak-ordered
4503 * memory model archs, such as IA-64). 4548 * memory model archs, such as IA-64).
4504 * 4549 *
@@ -4519,7 +4564,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4519 writel(i, tx_ring->tail); 4564 writel(i, tx_ring->tail);
4520 4565
4521 /* we need this if more than one processor can write to our tail 4566 /* we need this if more than one processor can write to our tail
4522 * at a time, it syncronizes IO on IA64/Altix systems */ 4567 * at a time, it synchronizes IO on IA64/Altix systems
4568 */
4523 mmiowb(); 4569 mmiowb();
4524 4570
4525 return; 4571 return;
@@ -4549,11 +4595,13 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4549 4595
4550 /* Herbert's original patch had: 4596 /* Herbert's original patch had:
4551 * smp_mb__after_netif_stop_queue(); 4597 * smp_mb__after_netif_stop_queue();
4552 * but since that doesn't exist yet, just open code it. */ 4598 * but since that doesn't exist yet, just open code it.
4599 */
4553 smp_mb(); 4600 smp_mb();
4554 4601
4555 /* We need to check again in a case another CPU has just 4602 /* We need to check again in a case another CPU has just
4556 * made room available. */ 4603 * made room available.
4604 */
4557 if (igb_desc_unused(tx_ring) < size) 4605 if (igb_desc_unused(tx_ring) < size)
4558 return -EBUSY; 4606 return -EBUSY;
4559 4607
@@ -4577,7 +4625,6 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4577netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, 4625netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4578 struct igb_ring *tx_ring) 4626 struct igb_ring *tx_ring)
4579{ 4627{
4580 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4581 struct igb_tx_buffer *first; 4628 struct igb_tx_buffer *first;
4582 int tso; 4629 int tso;
4583 u32 tx_flags = 0; 4630 u32 tx_flags = 0;
@@ -4612,15 +4659,18 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4612 4659
4613 skb_tx_timestamp(skb); 4660 skb_tx_timestamp(skb);
4614 4661
4615 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4662 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4616 !(adapter->ptp_tx_skb))) { 4663 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4617 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4618 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4619 4664
4620 adapter->ptp_tx_skb = skb_get(skb); 4665 if (!(adapter->ptp_tx_skb)) {
4621 adapter->ptp_tx_start = jiffies; 4666 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4622 if (adapter->hw.mac.type == e1000_82576) 4667 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4623 schedule_work(&adapter->ptp_tx_work); 4668
4669 adapter->ptp_tx_skb = skb_get(skb);
4670 adapter->ptp_tx_start = jiffies;
4671 if (adapter->hw.mac.type == e1000_82576)
4672 schedule_work(&adapter->ptp_tx_work);
4673 }
4624 } 4674 }
4625 4675
4626 if (vlan_tx_tag_present(skb)) { 4676 if (vlan_tx_tag_present(skb)) {
@@ -4677,8 +4727,7 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4677 return NETDEV_TX_OK; 4727 return NETDEV_TX_OK;
4678 } 4728 }
4679 4729
4680 /* 4730 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
4681 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4682 * in order to meet this minimum size requirement. 4731 * in order to meet this minimum size requirement.
4683 */ 4732 */
4684 if (unlikely(skb->len < 17)) { 4733 if (unlikely(skb->len < 17)) {
@@ -4692,8 +4741,8 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4692} 4741}
4693 4742
4694/** 4743/**
4695 * igb_tx_timeout - Respond to a Tx Hang 4744 * igb_tx_timeout - Respond to a Tx Hang
4696 * @netdev: network interface device structure 4745 * @netdev: network interface device structure
4697 **/ 4746 **/
4698static void igb_tx_timeout(struct net_device *netdev) 4747static void igb_tx_timeout(struct net_device *netdev)
4699{ 4748{
@@ -4722,13 +4771,12 @@ static void igb_reset_task(struct work_struct *work)
4722} 4771}
4723 4772
4724/** 4773/**
4725 * igb_get_stats64 - Get System Network Statistics 4774 * igb_get_stats64 - Get System Network Statistics
4726 * @netdev: network interface device structure 4775 * @netdev: network interface device structure
4727 * @stats: rtnl_link_stats64 pointer 4776 * @stats: rtnl_link_stats64 pointer
4728 *
4729 **/ 4777 **/
4730static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, 4778static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4731 struct rtnl_link_stats64 *stats) 4779 struct rtnl_link_stats64 *stats)
4732{ 4780{
4733 struct igb_adapter *adapter = netdev_priv(netdev); 4781 struct igb_adapter *adapter = netdev_priv(netdev);
4734 4782
@@ -4741,11 +4789,11 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4741} 4789}
4742 4790
4743/** 4791/**
4744 * igb_change_mtu - Change the Maximum Transfer Unit 4792 * igb_change_mtu - Change the Maximum Transfer Unit
4745 * @netdev: network interface device structure 4793 * @netdev: network interface device structure
4746 * @new_mtu: new value for maximum frame size 4794 * @new_mtu: new value for maximum frame size
4747 * 4795 *
4748 * Returns 0 on success, negative on failure 4796 * Returns 0 on success, negative on failure
4749 **/ 4797 **/
4750static int igb_change_mtu(struct net_device *netdev, int new_mtu) 4798static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4751{ 4799{
@@ -4788,10 +4836,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4788} 4836}
4789 4837
4790/** 4838/**
4791 * igb_update_stats - Update the board statistics counters 4839 * igb_update_stats - Update the board statistics counters
4792 * @adapter: board private structure 4840 * @adapter: board private structure
4793 **/ 4841 **/
4794
4795void igb_update_stats(struct igb_adapter *adapter, 4842void igb_update_stats(struct igb_adapter *adapter,
4796 struct rtnl_link_stats64 *net_stats) 4843 struct rtnl_link_stats64 *net_stats)
4797{ 4844{
@@ -4806,8 +4853,7 @@ void igb_update_stats(struct igb_adapter *adapter,
4806 4853
4807#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 4854#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4808 4855
4809 /* 4856 /* Prevent stats update while adapter is being reset, or if the pci
4810 * Prevent stats update while adapter is being reset, or if the pci
4811 * connection is down. 4857 * connection is down.
4812 */ 4858 */
4813 if (adapter->link_speed == 0) 4859 if (adapter->link_speed == 0)
@@ -4941,7 +4987,8 @@ void igb_update_stats(struct igb_adapter *adapter,
4941 /* Rx Errors */ 4987 /* Rx Errors */
4942 4988
4943 /* RLEC on some newer hardware can be incorrect so build 4989 /* RLEC on some newer hardware can be incorrect so build
4944 * our own version based on RUC and ROC */ 4990 * our own version based on RUC and ROC
4991 */
4945 net_stats->rx_errors = adapter->stats.rxerrc + 4992 net_stats->rx_errors = adapter->stats.rxerrc +
4946 adapter->stats.crcerrs + adapter->stats.algnerrc + 4993 adapter->stats.crcerrs + adapter->stats.algnerrc +
4947 adapter->stats.ruc + adapter->stats.roc + 4994 adapter->stats.ruc + adapter->stats.roc +
@@ -5000,7 +5047,8 @@ static irqreturn_t igb_msix_other(int irq, void *data)
5000 adapter->stats.doosync++; 5047 adapter->stats.doosync++;
5001 /* The DMA Out of Sync is also indication of a spoof event 5048 /* The DMA Out of Sync is also indication of a spoof event
5002 * in IOV mode. Check the Wrong VM Behavior register to 5049 * in IOV mode. Check the Wrong VM Behavior register to
5003 * see if it is really a spoof event. */ 5050 * see if it is really a spoof event.
5051 */
5004 igb_check_wvbr(adapter); 5052 igb_check_wvbr(adapter);
5005 } 5053 }
5006 5054
@@ -5074,8 +5122,7 @@ static void igb_update_tx_dca(struct igb_adapter *adapter,
5074 if (hw->mac.type != e1000_82575) 5122 if (hw->mac.type != e1000_82575)
5075 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; 5123 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
5076 5124
5077 /* 5125 /* We can enable relaxed ordering for reads, but not writes when
5078 * We can enable relaxed ordering for reads, but not writes when
5079 * DCA is enabled. This is due to a known issue in some chipsets 5126 * DCA is enabled. This is due to a known issue in some chipsets
5080 * which will cause the DCA tag to be cleared. 5127 * which will cause the DCA tag to be cleared.
5081 */ 5128 */
@@ -5096,8 +5143,7 @@ static void igb_update_rx_dca(struct igb_adapter *adapter,
5096 if (hw->mac.type != e1000_82575) 5143 if (hw->mac.type != e1000_82575)
5097 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; 5144 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
5098 5145
5099 /* 5146 /* We can enable relaxed ordering for reads, but not writes when
5100 * We can enable relaxed ordering for reads, but not writes when
5101 * DCA is enabled. This is due to a known issue in some chipsets 5147 * DCA is enabled. This is due to a known issue in some chipsets
5102 * which will cause the DCA tag to be cleared. 5148 * which will cause the DCA tag to be cleared.
5103 */ 5149 */
@@ -5166,7 +5212,8 @@ static int __igb_notify_dca(struct device *dev, void *data)
5166 case DCA_PROVIDER_REMOVE: 5212 case DCA_PROVIDER_REMOVE:
5167 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { 5213 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
5168 /* without this a class_device is left 5214 /* without this a class_device is left
5169 * hanging around in the sysfs model */ 5215 * hanging around in the sysfs model
5216 */
5170 dca_remove_requester(dev); 5217 dca_remove_requester(dev);
5171 dev_info(&pdev->dev, "DCA disabled\n"); 5218 dev_info(&pdev->dev, "DCA disabled\n");
5172 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; 5219 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
@@ -5179,12 +5226,12 @@ static int __igb_notify_dca(struct device *dev, void *data)
5179} 5226}
5180 5227
5181static int igb_notify_dca(struct notifier_block *nb, unsigned long event, 5228static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
5182 void *p) 5229 void *p)
5183{ 5230{
5184 int ret_val; 5231 int ret_val;
5185 5232
5186 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, 5233 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
5187 __igb_notify_dca); 5234 __igb_notify_dca);
5188 5235
5189 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 5236 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
5190} 5237}
@@ -5198,40 +5245,10 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf)
5198 eth_zero_addr(mac_addr); 5245 eth_zero_addr(mac_addr);
5199 igb_set_vf_mac(adapter, vf, mac_addr); 5246 igb_set_vf_mac(adapter, vf, mac_addr);
5200 5247
5201 return 0; 5248 /* By default spoof check is enabled for all VFs */
5202} 5249 adapter->vf_data[vf].spoofchk_enabled = true;
5203
5204static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
5205{
5206 struct pci_dev *pdev = adapter->pdev;
5207 struct pci_dev *vfdev;
5208 int dev_id;
5209
5210 switch (adapter->hw.mac.type) {
5211 case e1000_82576:
5212 dev_id = IGB_82576_VF_DEV_ID;
5213 break;
5214 case e1000_i350:
5215 dev_id = IGB_I350_VF_DEV_ID;
5216 break;
5217 default:
5218 return false;
5219 }
5220
5221 /* loop through all the VFs to see if we own any that are assigned */
5222 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
5223 while (vfdev) {
5224 /* if we don't own it we don't care */
5225 if (vfdev->is_virtfn && vfdev->physfn == pdev) {
5226 /* if it is assigned we cannot release it */
5227 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
5228 return true;
5229 }
5230
5231 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
5232 }
5233 5250
5234 return false; 5251 return 0;
5235} 5252}
5236 5253
5237#endif 5254#endif
@@ -5256,7 +5273,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5256 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 5273 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5257 5274
5258 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | 5275 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
5259 IGB_VF_FLAG_MULTI_PROMISC); 5276 IGB_VF_FLAG_MULTI_PROMISC);
5260 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); 5277 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5261 5278
5262 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { 5279 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
@@ -5264,8 +5281,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5264 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; 5281 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
5265 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; 5282 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5266 } else { 5283 } else {
5267 /* 5284 /* if we have hashes and we are clearing a multicast promisc
5268 * if we have hashes and we are clearing a multicast promisc
5269 * flag we need to write the hashes to the MTA as this step 5285 * flag we need to write the hashes to the MTA as this step
5270 * was previously skipped 5286 * was previously skipped
5271 */ 5287 */
@@ -5286,7 +5302,6 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5286 return -EINVAL; 5302 return -EINVAL;
5287 5303
5288 return 0; 5304 return 0;
5289
5290} 5305}
5291 5306
5292static int igb_set_vf_multicasts(struct igb_adapter *adapter, 5307static int igb_set_vf_multicasts(struct igb_adapter *adapter,
@@ -5493,30 +5508,91 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5493 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); 5508 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5494 if (test_bit(__IGB_DOWN, &adapter->state)) { 5509 if (test_bit(__IGB_DOWN, &adapter->state)) {
5495 dev_warn(&adapter->pdev->dev, 5510 dev_warn(&adapter->pdev->dev,
5496 "The VF VLAN has been set," 5511 "The VF VLAN has been set, but the PF device is not up.\n");
5497 " but the PF device is not up.\n");
5498 dev_warn(&adapter->pdev->dev, 5512 dev_warn(&adapter->pdev->dev,
5499 "Bring the PF device up before" 5513 "Bring the PF device up before attempting to use the VF device.\n");
5500 " attempting to use the VF device.\n");
5501 } 5514 }
5502 } else { 5515 } else {
5503 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, 5516 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5504 false, vf); 5517 false, vf);
5505 igb_set_vmvir(adapter, vlan, vf); 5518 igb_set_vmvir(adapter, vlan, vf);
5506 igb_set_vmolr(adapter, vf, true); 5519 igb_set_vmolr(adapter, vf, true);
5507 adapter->vf_data[vf].pf_vlan = 0; 5520 adapter->vf_data[vf].pf_vlan = 0;
5508 adapter->vf_data[vf].pf_qos = 0; 5521 adapter->vf_data[vf].pf_qos = 0;
5509 } 5522 }
5510out: 5523out:
5511 return err; 5524 return err;
5525}
5526
5527static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid)
5528{
5529 struct e1000_hw *hw = &adapter->hw;
5530 int i;
5531 u32 reg;
5532
5533 /* Find the vlan filter for this id */
5534 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5535 reg = rd32(E1000_VLVF(i));
5536 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5537 vid == (reg & E1000_VLVF_VLANID_MASK))
5538 break;
5539 }
5540
5541 if (i >= E1000_VLVF_ARRAY_SIZE)
5542 i = -1;
5543
5544 return i;
5512} 5545}
5513 5546
5514static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) 5547static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5515{ 5548{
5549 struct e1000_hw *hw = &adapter->hw;
5516 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; 5550 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5517 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); 5551 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5552 int err = 0;
5553
5554 /* If in promiscuous mode we need to make sure the PF also has
5555 * the VLAN filter set.
5556 */
5557 if (add && (adapter->netdev->flags & IFF_PROMISC))
5558 err = igb_vlvf_set(adapter, vid, add,
5559 adapter->vfs_allocated_count);
5560 if (err)
5561 goto out;
5518 5562
5519 return igb_vlvf_set(adapter, vid, add, vf); 5563 err = igb_vlvf_set(adapter, vid, add, vf);
5564
5565 if (err)
5566 goto out;
5567
5568 /* Go through all the checks to see if the VLAN filter should
5569 * be wiped completely.
5570 */
5571 if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
5572 u32 vlvf, bits;
5573
5574 int regndx = igb_find_vlvf_entry(adapter, vid);
5575 if (regndx < 0)
5576 goto out;
5577 /* See if any other pools are set for this VLAN filter
5578 * entry other than the PF.
5579 */
5580 vlvf = bits = rd32(E1000_VLVF(regndx));
5581 bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT +
5582 adapter->vfs_allocated_count);
5583 /* If the filter was removed then ensure PF pool bit
5584 * is cleared if the PF only added itself to the pool
5585 * because the PF is in promiscuous mode.
5586 */
5587 if ((vlvf & VLAN_VID_MASK) == vid &&
5588 !test_bit(vid, adapter->active_vlans) &&
5589 !bits)
5590 igb_vlvf_set(adapter, vid, add,
5591 adapter->vfs_allocated_count);
5592 }
5593
5594out:
5595 return err;
5520} 5596}
5521 5597
5522static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) 5598static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
@@ -5586,8 +5662,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
5586 5662
5587static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) 5663static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5588{ 5664{
5589 /* 5665 /* The VF MAC Address is stored in a packed array of bytes
5590 * The VF MAC Address is stored in a packed array of bytes
5591 * starting at the second 32 bit word of the msg array 5666 * starting at the second 32 bit word of the msg array
5592 */ 5667 */
5593 unsigned char *addr = (char *)&msg[1]; 5668 unsigned char *addr = (char *)&msg[1];
@@ -5636,11 +5711,9 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5636 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) 5711 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
5637 return; 5712 return;
5638 5713
5639 /* 5714 /* until the vf completes a reset it should not be
5640 * until the vf completes a reset it should not be
5641 * allowed to start any configuration. 5715 * allowed to start any configuration.
5642 */ 5716 */
5643
5644 if (msgbuf[0] == E1000_VF_RESET) { 5717 if (msgbuf[0] == E1000_VF_RESET) {
5645 igb_vf_reset_msg(adapter, vf); 5718 igb_vf_reset_msg(adapter, vf);
5646 return; 5719 return;
@@ -5660,9 +5733,8 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5660 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); 5733 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5661 else 5734 else
5662 dev_warn(&pdev->dev, 5735 dev_warn(&pdev->dev,
5663 "VF %d attempted to override administratively " 5736 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
5664 "set MAC address\nReload the VF driver to " 5737 vf);
5665 "resume operations\n", vf);
5666 break; 5738 break;
5667 case E1000_VF_SET_PROMISC: 5739 case E1000_VF_SET_PROMISC:
5668 retval = igb_set_vf_promisc(adapter, msgbuf, vf); 5740 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
@@ -5677,9 +5749,8 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5677 retval = -1; 5749 retval = -1;
5678 if (vf_data->pf_vlan) 5750 if (vf_data->pf_vlan)
5679 dev_warn(&pdev->dev, 5751 dev_warn(&pdev->dev,
5680 "VF %d attempted to override administratively " 5752 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
5681 "set VLAN tag\nReload the VF driver to " 5753 vf);
5682 "resume operations\n", vf);
5683 else 5754 else
5684 retval = igb_set_vf_vlan(adapter, msgbuf, vf); 5755 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
5685 break; 5756 break;
@@ -5748,9 +5819,9 @@ static void igb_set_uta(struct igb_adapter *adapter)
5748} 5819}
5749 5820
5750/** 5821/**
5751 * igb_intr_msi - Interrupt Handler 5822 * igb_intr_msi - Interrupt Handler
5752 * @irq: interrupt number 5823 * @irq: interrupt number
5753 * @data: pointer to a network interface device structure 5824 * @data: pointer to a network interface device structure
5754 **/ 5825 **/
5755static irqreturn_t igb_intr_msi(int irq, void *data) 5826static irqreturn_t igb_intr_msi(int irq, void *data)
5756{ 5827{
@@ -5793,9 +5864,9 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
5793} 5864}
5794 5865
5795/** 5866/**
5796 * igb_intr - Legacy Interrupt Handler 5867 * igb_intr - Legacy Interrupt Handler
5797 * @irq: interrupt number 5868 * @irq: interrupt number
5798 * @data: pointer to a network interface device structure 5869 * @data: pointer to a network interface device structure
5799 **/ 5870 **/
5800static irqreturn_t igb_intr(int irq, void *data) 5871static irqreturn_t igb_intr(int irq, void *data)
5801{ 5872{
@@ -5803,11 +5874,13 @@ static irqreturn_t igb_intr(int irq, void *data)
5803 struct igb_q_vector *q_vector = adapter->q_vector[0]; 5874 struct igb_q_vector *q_vector = adapter->q_vector[0];
5804 struct e1000_hw *hw = &adapter->hw; 5875 struct e1000_hw *hw = &adapter->hw;
5805 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 5876 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5806 * need for the IMC write */ 5877 * need for the IMC write
5878 */
5807 u32 icr = rd32(E1000_ICR); 5879 u32 icr = rd32(E1000_ICR);
5808 5880
5809 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 5881 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5810 * not set, then the adapter didn't send an interrupt */ 5882 * not set, then the adapter didn't send an interrupt
5883 */
5811 if (!(icr & E1000_ICR_INT_ASSERTED)) 5884 if (!(icr & E1000_ICR_INT_ASSERTED))
5812 return IRQ_NONE; 5885 return IRQ_NONE;
5813 5886
@@ -5866,15 +5939,15 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
5866} 5939}
5867 5940
5868/** 5941/**
5869 * igb_poll - NAPI Rx polling callback 5942 * igb_poll - NAPI Rx polling callback
5870 * @napi: napi polling structure 5943 * @napi: napi polling structure
5871 * @budget: count of how many packets we should handle 5944 * @budget: count of how many packets we should handle
5872 **/ 5945 **/
5873static int igb_poll(struct napi_struct *napi, int budget) 5946static int igb_poll(struct napi_struct *napi, int budget)
5874{ 5947{
5875 struct igb_q_vector *q_vector = container_of(napi, 5948 struct igb_q_vector *q_vector = container_of(napi,
5876 struct igb_q_vector, 5949 struct igb_q_vector,
5877 napi); 5950 napi);
5878 bool clean_complete = true; 5951 bool clean_complete = true;
5879 5952
5880#ifdef CONFIG_IGB_DCA 5953#ifdef CONFIG_IGB_DCA
@@ -5899,10 +5972,10 @@ static int igb_poll(struct napi_struct *napi, int budget)
5899} 5972}
5900 5973
5901/** 5974/**
5902 * igb_clean_tx_irq - Reclaim resources after transmit completes 5975 * igb_clean_tx_irq - Reclaim resources after transmit completes
5903 * @q_vector: pointer to q_vector containing needed info 5976 * @q_vector: pointer to q_vector containing needed info
5904 * 5977 *
5905 * returns true if ring is completely cleaned 5978 * returns true if ring is completely cleaned
5906 **/ 5979 **/
5907static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) 5980static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5908{ 5981{
@@ -6008,7 +6081,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
6008 struct e1000_hw *hw = &adapter->hw; 6081 struct e1000_hw *hw = &adapter->hw;
6009 6082
6010 /* Detect a transmit hang in hardware, this serializes the 6083 /* Detect a transmit hang in hardware, this serializes the
6011 * check with the clearing of time_stamp and movement of i */ 6084 * check with the clearing of time_stamp and movement of i
6085 */
6012 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 6086 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
6013 if (tx_buffer->next_to_watch && 6087 if (tx_buffer->next_to_watch &&
6014 time_after(jiffies, tx_buffer->time_stamp + 6088 time_after(jiffies, tx_buffer->time_stamp +
@@ -6047,8 +6121,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
6047 6121
6048#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 6122#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
6049 if (unlikely(total_packets && 6123 if (unlikely(total_packets &&
6050 netif_carrier_ok(tx_ring->netdev) && 6124 netif_carrier_ok(tx_ring->netdev) &&
6051 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { 6125 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
6052 /* Make sure that anybody stopping the queue after this 6126 /* Make sure that anybody stopping the queue after this
6053 * sees the new next_to_clean. 6127 * sees the new next_to_clean.
6054 */ 6128 */
@@ -6069,11 +6143,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
6069} 6143}
6070 6144
6071/** 6145/**
6072 * igb_reuse_rx_page - page flip buffer and store it back on the ring 6146 * igb_reuse_rx_page - page flip buffer and store it back on the ring
6073 * @rx_ring: rx descriptor ring to store buffers on 6147 * @rx_ring: rx descriptor ring to store buffers on
6074 * @old_buff: donor buffer to have page reused 6148 * @old_buff: donor buffer to have page reused
6075 * 6149 *
6076 * Synchronizes page for reuse by the adapter 6150 * Synchronizes page for reuse by the adapter
6077 **/ 6151 **/
6078static void igb_reuse_rx_page(struct igb_ring *rx_ring, 6152static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6079 struct igb_rx_buffer *old_buff) 6153 struct igb_rx_buffer *old_buff)
@@ -6133,19 +6207,19 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6133} 6207}
6134 6208
6135/** 6209/**
6136 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff 6210 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
6137 * @rx_ring: rx descriptor ring to transact packets on 6211 * @rx_ring: rx descriptor ring to transact packets on
6138 * @rx_buffer: buffer containing page to add 6212 * @rx_buffer: buffer containing page to add
6139 * @rx_desc: descriptor containing length of buffer written by hardware 6213 * @rx_desc: descriptor containing length of buffer written by hardware
6140 * @skb: sk_buff to place the data into 6214 * @skb: sk_buff to place the data into
6141 * 6215 *
6142 * This function will add the data contained in rx_buffer->page to the skb. 6216 * This function will add the data contained in rx_buffer->page to the skb.
6143 * This is done either through a direct copy if the data in the buffer is 6217 * This is done either through a direct copy if the data in the buffer is
6144 * less than the skb header size, otherwise it will just attach the page as 6218 * less than the skb header size, otherwise it will just attach the page as
6145 * a frag to the skb. 6219 * a frag to the skb.
6146 * 6220 *
6147 * The function will then update the page offset if necessary and return 6221 * The function will then update the page offset if necessary and return
6148 * true if the buffer can be reused by the adapter. 6222 * true if the buffer can be reused by the adapter.
6149 **/ 6223 **/
6150static bool igb_add_rx_frag(struct igb_ring *rx_ring, 6224static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6151 struct igb_rx_buffer *rx_buffer, 6225 struct igb_rx_buffer *rx_buffer,
@@ -6216,8 +6290,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6216 return NULL; 6290 return NULL;
6217 } 6291 }
6218 6292
6219 /* 6293 /* we will be copying header into skb->data in
6220 * we will be copying header into skb->data in
6221 * pskb_may_pull so it is in our interest to prefetch 6294 * pskb_may_pull so it is in our interest to prefetch
6222 * it now to avoid a possible cache miss 6295 * it now to avoid a possible cache miss
6223 */ 6296 */
@@ -6265,8 +6338,7 @@ static inline void igb_rx_checksum(struct igb_ring *ring,
6265 if (igb_test_staterr(rx_desc, 6338 if (igb_test_staterr(rx_desc,
6266 E1000_RXDEXT_STATERR_TCPE | 6339 E1000_RXDEXT_STATERR_TCPE |
6267 E1000_RXDEXT_STATERR_IPE)) { 6340 E1000_RXDEXT_STATERR_IPE)) {
6268 /* 6341 /* work around errata with sctp packets where the TCPE aka
6269 * work around errata with sctp packets where the TCPE aka
6270 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 6342 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
6271 * packets, (aka let the stack check the crc32c) 6343 * packets, (aka let the stack check the crc32c)
6272 */ 6344 */
@@ -6297,15 +6369,15 @@ static inline void igb_rx_hash(struct igb_ring *ring,
6297} 6369}
6298 6370
6299/** 6371/**
6300 * igb_is_non_eop - process handling of non-EOP buffers 6372 * igb_is_non_eop - process handling of non-EOP buffers
6301 * @rx_ring: Rx ring being processed 6373 * @rx_ring: Rx ring being processed
6302 * @rx_desc: Rx descriptor for current buffer 6374 * @rx_desc: Rx descriptor for current buffer
6303 * @skb: current socket buffer containing buffer in progress 6375 * @skb: current socket buffer containing buffer in progress
6304 * 6376 *
6305 * This function updates next to clean. If the buffer is an EOP buffer 6377 * This function updates next to clean. If the buffer is an EOP buffer
6306 * this function exits returning false, otherwise it will place the 6378 * this function exits returning false, otherwise it will place the
6307 * sk_buff in the next buffer to be chained and return true indicating 6379 * sk_buff in the next buffer to be chained and return true indicating
6308 * that this is in fact a non-EOP buffer. 6380 * that this is in fact a non-EOP buffer.
6309 **/ 6381 **/
6310static bool igb_is_non_eop(struct igb_ring *rx_ring, 6382static bool igb_is_non_eop(struct igb_ring *rx_ring,
6311 union e1000_adv_rx_desc *rx_desc) 6383 union e1000_adv_rx_desc *rx_desc)
@@ -6325,15 +6397,15 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring,
6325} 6397}
6326 6398
6327/** 6399/**
6328 * igb_get_headlen - determine size of header for LRO/GRO 6400 * igb_get_headlen - determine size of header for LRO/GRO
6329 * @data: pointer to the start of the headers 6401 * @data: pointer to the start of the headers
6330 * @max_len: total length of section to find headers in 6402 * @max_len: total length of section to find headers in
6331 * 6403 *
6332 * This function is meant to determine the length of headers that will 6404 * This function is meant to determine the length of headers that will
6333 * be recognized by hardware for LRO, and GRO offloads. The main 6405 * be recognized by hardware for LRO, and GRO offloads. The main
6334 * motivation of doing this is to only perform one pull for IPv4 TCP 6406 * motivation of doing this is to only perform one pull for IPv4 TCP
6335 * packets so that we can do basic things like calculating the gso_size 6407 * packets so that we can do basic things like calculating the gso_size
6336 * based on the average data per packet. 6408 * based on the average data per packet.
6337 **/ 6409 **/
6338static unsigned int igb_get_headlen(unsigned char *data, 6410static unsigned int igb_get_headlen(unsigned char *data,
6339 unsigned int max_len) 6411 unsigned int max_len)
@@ -6384,7 +6456,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
6384 return hdr.network - data; 6456 return hdr.network - data;
6385 6457
6386 /* record next protocol if header is present */ 6458 /* record next protocol if header is present */
6387 if (!hdr.ipv4->frag_off) 6459 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
6388 nexthdr = hdr.ipv4->protocol; 6460 nexthdr = hdr.ipv4->protocol;
6389 } else if (protocol == __constant_htons(ETH_P_IPV6)) { 6461 } else if (protocol == __constant_htons(ETH_P_IPV6)) {
6390 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) 6462 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
@@ -6420,8 +6492,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
6420 hdr.network += sizeof(struct udphdr); 6492 hdr.network += sizeof(struct udphdr);
6421 } 6493 }
6422 6494
6423 /* 6495 /* If everything has gone correctly hdr.network should be the
6424 * If everything has gone correctly hdr.network should be the
6425 * data section of the packet and will be the end of the header. 6496 * data section of the packet and will be the end of the header.
6426 * If not then it probably represents the end of the last recognized 6497 * If not then it probably represents the end of the last recognized
6427 * header. 6498 * header.
@@ -6433,17 +6504,17 @@ static unsigned int igb_get_headlen(unsigned char *data,
6433} 6504}
6434 6505
6435/** 6506/**
6436 * igb_pull_tail - igb specific version of skb_pull_tail 6507 * igb_pull_tail - igb specific version of skb_pull_tail
6437 * @rx_ring: rx descriptor ring packet is being transacted on 6508 * @rx_ring: rx descriptor ring packet is being transacted on
6438 * @rx_desc: pointer to the EOP Rx descriptor 6509 * @rx_desc: pointer to the EOP Rx descriptor
6439 * @skb: pointer to current skb being adjusted 6510 * @skb: pointer to current skb being adjusted
6440 * 6511 *
6441 * This function is an igb specific version of __pskb_pull_tail. The 6512 * This function is an igb specific version of __pskb_pull_tail. The
6442 * main difference between this version and the original function is that 6513 * main difference between this version and the original function is that
6443 * this function can make several assumptions about the state of things 6514 * this function can make several assumptions about the state of things
6444 * that allow for significant optimizations versus the standard function. 6515 * that allow for significant optimizations versus the standard function.
6445 * As a result we can do things like drop a frag and maintain an accurate 6516 * As a result we can do things like drop a frag and maintain an accurate
6446 * truesize for the skb. 6517 * truesize for the skb.
6447 */ 6518 */
6448static void igb_pull_tail(struct igb_ring *rx_ring, 6519static void igb_pull_tail(struct igb_ring *rx_ring,
6449 union e1000_adv_rx_desc *rx_desc, 6520 union e1000_adv_rx_desc *rx_desc,
@@ -6453,8 +6524,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
6453 unsigned char *va; 6524 unsigned char *va;
6454 unsigned int pull_len; 6525 unsigned int pull_len;
6455 6526
6456 /* 6527 /* it is valid to use page_address instead of kmap since we are
6457 * it is valid to use page_address instead of kmap since we are
6458 * working with pages allocated out of the lomem pool per 6528 * working with pages allocated out of the lomem pool per
6459 * alloc_page(GFP_ATOMIC) 6529 * alloc_page(GFP_ATOMIC)
6460 */ 6530 */
@@ -6474,8 +6544,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
6474 va += IGB_TS_HDR_LEN; 6544 va += IGB_TS_HDR_LEN;
6475 } 6545 }
6476 6546
6477 /* 6547 /* we need the header to contain the greater of either ETH_HLEN or
6478 * we need the header to contain the greater of either ETH_HLEN or
6479 * 60 bytes if the skb->len is less than 60 for skb_pad. 6548 * 60 bytes if the skb->len is less than 60 for skb_pad.
6480 */ 6549 */
6481 pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN); 6550 pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
@@ -6491,24 +6560,23 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
6491} 6560}
6492 6561
6493/** 6562/**
6494 * igb_cleanup_headers - Correct corrupted or empty headers 6563 * igb_cleanup_headers - Correct corrupted or empty headers
6495 * @rx_ring: rx descriptor ring packet is being transacted on 6564 * @rx_ring: rx descriptor ring packet is being transacted on
6496 * @rx_desc: pointer to the EOP Rx descriptor 6565 * @rx_desc: pointer to the EOP Rx descriptor
6497 * @skb: pointer to current skb being fixed 6566 * @skb: pointer to current skb being fixed
6498 * 6567 *
6499 * Address the case where we are pulling data in on pages only 6568 * Address the case where we are pulling data in on pages only
6500 * and as such no data is present in the skb header. 6569 * and as such no data is present in the skb header.
6501 * 6570 *
6502 * In addition if skb is not at least 60 bytes we need to pad it so that 6571 * In addition if skb is not at least 60 bytes we need to pad it so that
6503 * it is large enough to qualify as a valid Ethernet frame. 6572 * it is large enough to qualify as a valid Ethernet frame.
6504 * 6573 *
6505 * Returns true if an error was encountered and skb was freed. 6574 * Returns true if an error was encountered and skb was freed.
6506 **/ 6575 **/
6507static bool igb_cleanup_headers(struct igb_ring *rx_ring, 6576static bool igb_cleanup_headers(struct igb_ring *rx_ring,
6508 union e1000_adv_rx_desc *rx_desc, 6577 union e1000_adv_rx_desc *rx_desc,
6509 struct sk_buff *skb) 6578 struct sk_buff *skb)
6510{ 6579{
6511
6512 if (unlikely((igb_test_staterr(rx_desc, 6580 if (unlikely((igb_test_staterr(rx_desc,
6513 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { 6581 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
6514 struct net_device *netdev = rx_ring->netdev; 6582 struct net_device *netdev = rx_ring->netdev;
@@ -6535,14 +6603,14 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
6535} 6603}
6536 6604
6537/** 6605/**
6538 * igb_process_skb_fields - Populate skb header fields from Rx descriptor 6606 * igb_process_skb_fields - Populate skb header fields from Rx descriptor
6539 * @rx_ring: rx descriptor ring packet is being transacted on 6607 * @rx_ring: rx descriptor ring packet is being transacted on
6540 * @rx_desc: pointer to the EOP Rx descriptor 6608 * @rx_desc: pointer to the EOP Rx descriptor
6541 * @skb: pointer to current skb being populated 6609 * @skb: pointer to current skb being populated
6542 * 6610 *
6543 * This function checks the ring, descriptor, and packet information in 6611 * This function checks the ring, descriptor, and packet information in
6544 * order to populate the hash, checksum, VLAN, timestamp, protocol, and 6612 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
6545 * other fields within the skb. 6613 * other fields within the skb.
6546 **/ 6614 **/
6547static void igb_process_skb_fields(struct igb_ring *rx_ring, 6615static void igb_process_skb_fields(struct igb_ring *rx_ring,
6548 union e1000_adv_rx_desc *rx_desc, 6616 union e1000_adv_rx_desc *rx_desc,
@@ -6556,7 +6624,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
6556 6624
6557 igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); 6625 igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
6558 6626
6559 if ((dev->features & NETIF_F_HW_VLAN_RX) && 6627 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
6560 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { 6628 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
6561 u16 vid; 6629 u16 vid;
6562 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && 6630 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
@@ -6565,7 +6633,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
6565 else 6633 else
6566 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 6634 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
6567 6635
6568 __vlan_hwaccel_put_tag(skb, vid); 6636 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
6569 } 6637 }
6570 6638
6571 skb_record_rx_queue(skb, rx_ring->queue_index); 6639 skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -6670,8 +6738,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
6670 /* map page for use */ 6738 /* map page for use */
6671 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 6739 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
6672 6740
6673 /* 6741 /* if mapping failed free memory back to system since
6674 * if mapping failed free memory back to system since
6675 * there isn't much point in holding memory we can't use 6742 * there isn't much point in holding memory we can't use
6676 */ 6743 */
6677 if (dma_mapping_error(rx_ring->dev, dma)) { 6744 if (dma_mapping_error(rx_ring->dev, dma)) {
@@ -6689,8 +6756,8 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
6689} 6756}
6690 6757
6691/** 6758/**
6692 * igb_alloc_rx_buffers - Replace used receive buffers; packet split 6759 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
6693 * @adapter: address of board private structure 6760 * @adapter: address of board private structure
6694 **/ 6761 **/
6695void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) 6762void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
6696{ 6763{
@@ -6710,8 +6777,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
6710 if (!igb_alloc_mapped_page(rx_ring, bi)) 6777 if (!igb_alloc_mapped_page(rx_ring, bi))
6711 break; 6778 break;
6712 6779
6713 /* 6780 /* Refresh the desc even if buffer_addrs didn't change
6714 * Refresh the desc even if buffer_addrs didn't change
6715 * because each write-back erases this info. 6781 * because each write-back erases this info.
6716 */ 6782 */
6717 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 6783 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
@@ -6740,8 +6806,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
6740 /* update next to alloc since we have filled the ring */ 6806 /* update next to alloc since we have filled the ring */
6741 rx_ring->next_to_alloc = i; 6807 rx_ring->next_to_alloc = i;
6742 6808
6743 /* 6809 /* Force memory writes to complete before letting h/w
6744 * Force memory writes to complete before letting h/w
6745 * know there are new descriptors to fetch. (Only 6810 * know there are new descriptors to fetch. (Only
6746 * applicable for weak-ordered memory model archs, 6811 * applicable for weak-ordered memory model archs,
6747 * such as IA-64). 6812 * such as IA-64).
@@ -6826,7 +6891,7 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
6826 struct igb_adapter *adapter = netdev_priv(netdev); 6891 struct igb_adapter *adapter = netdev_priv(netdev);
6827 struct e1000_hw *hw = &adapter->hw; 6892 struct e1000_hw *hw = &adapter->hw;
6828 u32 ctrl, rctl; 6893 u32 ctrl, rctl;
6829 bool enable = !!(features & NETIF_F_HW_VLAN_RX); 6894 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
6830 6895
6831 if (enable) { 6896 if (enable) {
6832 /* enable VLAN tag insert/strip */ 6897 /* enable VLAN tag insert/strip */
@@ -6848,7 +6913,8 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
6848 igb_rlpml_set(adapter); 6913 igb_rlpml_set(adapter);
6849} 6914}
6850 6915
6851static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 6916static int igb_vlan_rx_add_vid(struct net_device *netdev,
6917 __be16 proto, u16 vid)
6852{ 6918{
6853 struct igb_adapter *adapter = netdev_priv(netdev); 6919 struct igb_adapter *adapter = netdev_priv(netdev);
6854 struct e1000_hw *hw = &adapter->hw; 6920 struct e1000_hw *hw = &adapter->hw;
@@ -6865,7 +6931,8 @@ static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
6865 return 0; 6931 return 0;
6866} 6932}
6867 6933
6868static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 6934static int igb_vlan_rx_kill_vid(struct net_device *netdev,
6935 __be16 proto, u16 vid)
6869{ 6936{
6870 struct igb_adapter *adapter = netdev_priv(netdev); 6937 struct igb_adapter *adapter = netdev_priv(netdev);
6871 struct e1000_hw *hw = &adapter->hw; 6938 struct e1000_hw *hw = &adapter->hw;
@@ -6891,7 +6958,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
6891 igb_vlan_mode(adapter->netdev, adapter->netdev->features); 6958 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6892 6959
6893 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 6960 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6894 igb_vlan_rx_add_vid(adapter->netdev, vid); 6961 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
6895} 6962}
6896 6963
6897int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) 6964int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
@@ -6902,15 +6969,24 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
6902 mac->autoneg = 0; 6969 mac->autoneg = 0;
6903 6970
6904 /* Make sure dplx is at most 1 bit and lsb of speed is not set 6971 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6905 * for the switch() below to work */ 6972 * for the switch() below to work
6973 */
6906 if ((spd & 1) || (dplx & ~1)) 6974 if ((spd & 1) || (dplx & ~1))
6907 goto err_inval; 6975 goto err_inval;
6908 6976
6909 /* Fiber NIC's only allow 1000 Gbps Full duplex */ 6977 /* Fiber NIC's only allow 1000 gbps Full duplex
6910 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) && 6978 * and 100Mbps Full duplex for 100baseFx sfp
6911 spd != SPEED_1000 && 6979 */
6912 dplx != DUPLEX_FULL) 6980 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
6913 goto err_inval; 6981 switch (spd + dplx) {
6982 case SPEED_10 + DUPLEX_HALF:
6983 case SPEED_10 + DUPLEX_FULL:
6984 case SPEED_100 + DUPLEX_HALF:
6985 goto err_inval;
6986 default:
6987 break;
6988 }
6989 }
6914 6990
6915 switch (spd + dplx) { 6991 switch (spd + dplx) {
6916 case SPEED_10 + DUPLEX_HALF: 6992 case SPEED_10 + DUPLEX_HALF:
@@ -7009,7 +7085,8 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
7009 igb_power_up_link(adapter); 7085 igb_power_up_link(adapter);
7010 7086
7011 /* Release control of h/w to f/w. If f/w is AMT enabled, this 7087 /* Release control of h/w to f/w. If f/w is AMT enabled, this
7012 * would have already happened in close and is redundant. */ 7088 * would have already happened in close and is redundant.
7089 */
7013 igb_release_hw_control(adapter); 7090 igb_release_hw_control(adapter);
7014 7091
7015 pci_disable_device(pdev); 7092 pci_disable_device(pdev);
@@ -7071,7 +7148,8 @@ static int igb_resume(struct device *dev)
7071 igb_reset(adapter); 7148 igb_reset(adapter);
7072 7149
7073 /* let the f/w know that the h/w is now under the control of the 7150 /* let the f/w know that the h/w is now under the control of the
7074 * driver. */ 7151 * driver.
7152 */
7075 igb_get_hw_control(adapter); 7153 igb_get_hw_control(adapter);
7076 7154
7077 wr32(E1000_WUS, ~0); 7155 wr32(E1000_WUS, ~0);
@@ -7207,8 +7285,7 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
7207} 7285}
7208 7286
7209#ifdef CONFIG_NET_POLL_CONTROLLER 7287#ifdef CONFIG_NET_POLL_CONTROLLER
7210/* 7288/* Polling 'interrupt' - used by things like netconsole to send skbs
7211 * Polling 'interrupt' - used by things like netconsole to send skbs
7212 * without having to re-enable interrupts. It's not called while 7289 * without having to re-enable interrupts. It's not called while
7213 * the interrupt routine is executing. 7290 * the interrupt routine is executing.
7214 */ 7291 */
@@ -7231,13 +7308,13 @@ static void igb_netpoll(struct net_device *netdev)
7231#endif /* CONFIG_NET_POLL_CONTROLLER */ 7308#endif /* CONFIG_NET_POLL_CONTROLLER */
7232 7309
7233/** 7310/**
7234 * igb_io_error_detected - called when PCI error is detected 7311 * igb_io_error_detected - called when PCI error is detected
7235 * @pdev: Pointer to PCI device 7312 * @pdev: Pointer to PCI device
7236 * @state: The current pci connection state 7313 * @state: The current pci connection state
7237 * 7314 *
7238 * This function is called after a PCI bus error affecting 7315 * This function is called after a PCI bus error affecting
7239 * this device has been detected. 7316 * this device has been detected.
7240 */ 7317 **/
7241static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, 7318static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
7242 pci_channel_state_t state) 7319 pci_channel_state_t state)
7243{ 7320{
@@ -7258,12 +7335,12 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
7258} 7335}
7259 7336
7260/** 7337/**
7261 * igb_io_slot_reset - called after the pci bus has been reset. 7338 * igb_io_slot_reset - called after the pci bus has been reset.
7262 * @pdev: Pointer to PCI device 7339 * @pdev: Pointer to PCI device
7263 * 7340 *
7264 * Restart the card from scratch, as if from a cold-boot. Implementation 7341 * Restart the card from scratch, as if from a cold-boot. Implementation
7265 * resembles the first-half of the igb_resume routine. 7342 * resembles the first-half of the igb_resume routine.
7266 */ 7343 **/
7267static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) 7344static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
7268{ 7345{
7269 struct net_device *netdev = pci_get_drvdata(pdev); 7346 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7291,8 +7368,9 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
7291 7368
7292 err = pci_cleanup_aer_uncorrect_error_status(pdev); 7369 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7293 if (err) { 7370 if (err) {
7294 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status " 7371 dev_err(&pdev->dev,
7295 "failed 0x%0x\n", err); 7372 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7373 err);
7296 /* non-fatal, continue */ 7374 /* non-fatal, continue */
7297 } 7375 }
7298 7376
@@ -7300,12 +7378,12 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
7300} 7378}
7301 7379
7302/** 7380/**
7303 * igb_io_resume - called when traffic can start flowing again. 7381 * igb_io_resume - called when traffic can start flowing again.
7304 * @pdev: Pointer to PCI device 7382 * @pdev: Pointer to PCI device
7305 * 7383 *
7306 * This callback is called when the error recovery driver tells us that 7384 * This callback is called when the error recovery driver tells us that
7307 * its OK to resume normal operation. Implementation resembles the 7385 * its OK to resume normal operation. Implementation resembles the
7308 * second-half of the igb_resume routine. 7386 * second-half of the igb_resume routine.
7309 */ 7387 */
7310static void igb_io_resume(struct pci_dev *pdev) 7388static void igb_io_resume(struct pci_dev *pdev)
7311{ 7389{
@@ -7322,12 +7400,13 @@ static void igb_io_resume(struct pci_dev *pdev)
7322 netif_device_attach(netdev); 7400 netif_device_attach(netdev);
7323 7401
7324 /* let the f/w know that the h/w is now under the control of the 7402 /* let the f/w know that the h/w is now under the control of the
7325 * driver. */ 7403 * driver.
7404 */
7326 igb_get_hw_control(adapter); 7405 igb_get_hw_control(adapter);
7327} 7406}
7328 7407
7329static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, 7408static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
7330 u8 qsel) 7409 u8 qsel)
7331{ 7410{
7332 u32 rar_low, rar_high; 7411 u32 rar_low, rar_high;
7333 struct e1000_hw *hw = &adapter->hw; 7412 struct e1000_hw *hw = &adapter->hw;
@@ -7336,7 +7415,7 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
7336 * from network order (big endian) to little endian 7415 * from network order (big endian) to little endian
7337 */ 7416 */
7338 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | 7417 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
7339 ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); 7418 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
7340 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 7419 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
7341 7420
7342 /* Indicate to hardware the Address is Valid. */ 7421 /* Indicate to hardware the Address is Valid. */
@@ -7354,11 +7433,12 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
7354} 7433}
7355 7434
7356static int igb_set_vf_mac(struct igb_adapter *adapter, 7435static int igb_set_vf_mac(struct igb_adapter *adapter,
7357 int vf, unsigned char *mac_addr) 7436 int vf, unsigned char *mac_addr)
7358{ 7437{
7359 struct e1000_hw *hw = &adapter->hw; 7438 struct e1000_hw *hw = &adapter->hw;
7360 /* VF MAC addresses start at end of receive addresses and moves 7439 /* VF MAC addresses start at end of receive addresses and moves
7361 * torwards the first, as a result a collision should not be possible */ 7440 * towards the first, as a result a collision should not be possible
7441 */
7362 int rar_entry = hw->mac.rar_entry_count - (vf + 1); 7442 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
7363 7443
7364 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); 7444 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
@@ -7375,13 +7455,13 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
7375 return -EINVAL; 7455 return -EINVAL;
7376 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; 7456 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
7377 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); 7457 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
7378 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" 7458 dev_info(&adapter->pdev->dev,
7379 " change effective."); 7459 "Reload the VF driver to make this change effective.");
7380 if (test_bit(__IGB_DOWN, &adapter->state)) { 7460 if (test_bit(__IGB_DOWN, &adapter->state)) {
7381 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," 7461 dev_warn(&adapter->pdev->dev,
7382 " but the PF device is not up.\n"); 7462 "The VF MAC address has been set, but the PF device is not up.\n");
7383 dev_warn(&adapter->pdev->dev, "Bring the PF device up before" 7463 dev_warn(&adapter->pdev->dev,
7384 " attempting to use the VF device.\n"); 7464 "Bring the PF device up before attempting to use the VF device.\n");
7385 } 7465 }
7386 return igb_set_vf_mac(adapter, vf, mac); 7466 return igb_set_vf_mac(adapter, vf, mac);
7387} 7467}
@@ -7408,19 +7488,19 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
7408 /* Calculate the rate factor values to set */ 7488 /* Calculate the rate factor values to set */
7409 rf_int = link_speed / tx_rate; 7489 rf_int = link_speed / tx_rate;
7410 rf_dec = (link_speed - (rf_int * tx_rate)); 7490 rf_dec = (link_speed - (rf_int * tx_rate));
7411 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; 7491 rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
7492 tx_rate;
7412 7493
7413 bcnrc_val = E1000_RTTBCNRC_RS_ENA; 7494 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
7414 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) & 7495 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
7415 E1000_RTTBCNRC_RF_INT_MASK); 7496 E1000_RTTBCNRC_RF_INT_MASK);
7416 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); 7497 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
7417 } else { 7498 } else {
7418 bcnrc_val = 0; 7499 bcnrc_val = 0;
7419 } 7500 }
7420 7501
7421 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ 7502 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
7422 /* 7503 /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
7423 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
7424 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. 7504 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
7425 */ 7505 */
7426 wr32(E1000_RTTBCNRM, 0x14); 7506 wr32(E1000_RTTBCNRM, 0x14);
@@ -7442,8 +7522,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7442 reset_rate = true; 7522 reset_rate = true;
7443 adapter->vf_rate_link_speed = 0; 7523 adapter->vf_rate_link_speed = 0;
7444 dev_info(&adapter->pdev->dev, 7524 dev_info(&adapter->pdev->dev,
7445 "Link speed has been changed. VF Transmit " 7525 "Link speed has been changed. VF Transmit rate is disabled\n");
7446 "rate is disabled\n");
7447 } 7526 }
7448 7527
7449 for (i = 0; i < adapter->vfs_allocated_count; i++) { 7528 for (i = 0; i < adapter->vfs_allocated_count; i++) {
@@ -7451,8 +7530,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7451 adapter->vf_data[i].tx_rate = 0; 7530 adapter->vf_data[i].tx_rate = 0;
7452 7531
7453 igb_set_vf_rate_limit(&adapter->hw, i, 7532 igb_set_vf_rate_limit(&adapter->hw, i,
7454 adapter->vf_data[i].tx_rate, 7533 adapter->vf_data[i].tx_rate,
7455 actual_link_speed); 7534 actual_link_speed);
7456 } 7535 }
7457} 7536}
7458 7537
@@ -7478,6 +7557,33 @@ static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
7478 return 0; 7557 return 0;
7479} 7558}
7480 7559
7560static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
7561 bool setting)
7562{
7563 struct igb_adapter *adapter = netdev_priv(netdev);
7564 struct e1000_hw *hw = &adapter->hw;
7565 u32 reg_val, reg_offset;
7566
7567 if (!adapter->vfs_allocated_count)
7568 return -EOPNOTSUPP;
7569
7570 if (vf >= adapter->vfs_allocated_count)
7571 return -EINVAL;
7572
7573 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
7574 reg_val = rd32(reg_offset);
7575 if (setting)
7576 reg_val |= ((1 << vf) |
7577 (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
7578 else
7579 reg_val &= ~((1 << vf) |
7580 (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
7581 wr32(reg_offset, reg_val);
7582
7583 adapter->vf_data[vf].spoofchk_enabled = setting;
7584 return E1000_SUCCESS;
7585}
7586
7481static int igb_ndo_get_vf_config(struct net_device *netdev, 7587static int igb_ndo_get_vf_config(struct net_device *netdev,
7482 int vf, struct ifla_vf_info *ivi) 7588 int vf, struct ifla_vf_info *ivi)
7483{ 7589{
@@ -7489,6 +7595,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
7489 ivi->tx_rate = adapter->vf_data[vf].tx_rate; 7595 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
7490 ivi->vlan = adapter->vf_data[vf].pf_vlan; 7596 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7491 ivi->qos = adapter->vf_data[vf].pf_qos; 7597 ivi->qos = adapter->vf_data[vf].pf_qos;
7598 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
7492 return 0; 7599 return 0;
7493} 7600}
7494 7601
@@ -7501,6 +7608,7 @@ static void igb_vmm_control(struct igb_adapter *adapter)
7501 case e1000_82575: 7608 case e1000_82575:
7502 case e1000_i210: 7609 case e1000_i210:
7503 case e1000_i211: 7610 case e1000_i211:
7611 case e1000_i354:
7504 default: 7612 default:
7505 /* replication is not supported for 82575 */ 7613 /* replication is not supported for 82575 */
7506 return; 7614 return;
@@ -7523,7 +7631,7 @@ static void igb_vmm_control(struct igb_adapter *adapter)
7523 igb_vmdq_set_loopback_pf(hw, true); 7631 igb_vmdq_set_loopback_pf(hw, true);
7524 igb_vmdq_set_replication_pf(hw, true); 7632 igb_vmdq_set_replication_pf(hw, true);
7525 igb_vmdq_set_anti_spoofing_pf(hw, true, 7633 igb_vmdq_set_anti_spoofing_pf(hw, true,
7526 adapter->vfs_allocated_count); 7634 adapter->vfs_allocated_count);
7527 } else { 7635 } else {
7528 igb_vmdq_set_loopback_pf(hw, false); 7636 igb_vmdq_set_loopback_pf(hw, false);
7529 igb_vmdq_set_replication_pf(hw, false); 7637 igb_vmdq_set_replication_pf(hw, false);
@@ -7543,8 +7651,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7543 /* force threshold to 0. */ 7651 /* force threshold to 0. */
7544 wr32(E1000_DMCTXTH, 0); 7652 wr32(E1000_DMCTXTH, 0);
7545 7653
7546 /* 7654 /* DMA Coalescing high water mark needs to be greater
7547 * DMA Coalescing high water mark needs to be greater
7548 * than the Rx threshold. Set hwm to PBA - max frame 7655 * than the Rx threshold. Set hwm to PBA - max frame
7549 * size in 16B units, capping it at PBA - 6KB. 7656 * size in 16B units, capping it at PBA - 6KB.
7550 */ 7657 */
@@ -7557,8 +7664,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7557 & E1000_FCRTC_RTH_COAL_MASK); 7664 & E1000_FCRTC_RTH_COAL_MASK);
7558 wr32(E1000_FCRTC, reg); 7665 wr32(E1000_FCRTC, reg);
7559 7666
7560 /* 7667 /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
7561 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
7562 * frame size, capping it at PBA - 10KB. 7668 * frame size, capping it at PBA - 10KB.
7563 */ 7669 */
7564 dmac_thr = pba - adapter->max_frame_size / 512; 7670 dmac_thr = pba - adapter->max_frame_size / 512;
@@ -7576,11 +7682,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7576 reg |= (1000 >> 5); 7682 reg |= (1000 >> 5);
7577 7683
7578 /* Disable BMC-to-OS Watchdog Enable */ 7684 /* Disable BMC-to-OS Watchdog Enable */
7579 reg &= ~E1000_DMACR_DC_BMC2OSW_EN; 7685 if (hw->mac.type != e1000_i354)
7686 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
7687
7580 wr32(E1000_DMACR, reg); 7688 wr32(E1000_DMACR, reg);
7581 7689
7582 /* 7690 /* no lower threshold to disable
7583 * no lower threshold to disable
7584 * coalescing(smart fifb)-UTRESH=0 7691 * coalescing(smart fifb)-UTRESH=0
7585 */ 7692 */
7586 wr32(E1000_DMCRTRH, 0); 7693 wr32(E1000_DMCRTRH, 0);
@@ -7589,15 +7696,13 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7589 7696
7590 wr32(E1000_DMCTLX, reg); 7697 wr32(E1000_DMCTLX, reg);
7591 7698
7592 /* 7699 /* free space in tx packet buffer to wake from
7593 * free space in tx packet buffer to wake from
7594 * DMA coal 7700 * DMA coal
7595 */ 7701 */
7596 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - 7702 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7597 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); 7703 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7598 7704
7599 /* 7705 /* make low power state decision controlled
7600 * make low power state decision controlled
7601 * by DMA coal 7706 * by DMA coal
7602 */ 7707 */
7603 reg = rd32(E1000_PCIEMISC); 7708 reg = rd32(E1000_PCIEMISC);
@@ -7611,7 +7716,8 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7611 } 7716 }
7612} 7717}
7613 7718
7614/* igb_read_i2c_byte - Reads 8 bit word over I2C 7719/**
7720 * igb_read_i2c_byte - Reads 8 bit word over I2C
7615 * @hw: pointer to hardware structure 7721 * @hw: pointer to hardware structure
7616 * @byte_offset: byte offset to read 7722 * @byte_offset: byte offset to read
7617 * @dev_addr: device address 7723 * @dev_addr: device address
@@ -7619,9 +7725,9 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7619 * 7725 *
7620 * Performs byte read operation over I2C interface at 7726 * Performs byte read operation over I2C interface at
7621 * a specified device address. 7727 * a specified device address.
7622 */ 7728 **/
7623s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, 7729s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7624 u8 dev_addr, u8 *data) 7730 u8 dev_addr, u8 *data)
7625{ 7731{
7626 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); 7732 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
7627 struct i2c_client *this_client = adapter->i2c_client; 7733 struct i2c_client *this_client = adapter->i2c_client;
@@ -7648,7 +7754,8 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7648 } 7754 }
7649} 7755}
7650 7756
7651/* igb_write_i2c_byte - Writes 8 bit word over I2C 7757/**
7758 * igb_write_i2c_byte - Writes 8 bit word over I2C
7652 * @hw: pointer to hardware structure 7759 * @hw: pointer to hardware structure
7653 * @byte_offset: byte offset to write 7760 * @byte_offset: byte offset to write
7654 * @dev_addr: device address 7761 * @dev_addr: device address
@@ -7656,9 +7763,9 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7656 * 7763 *
7657 * Performs byte write operation over I2C interface at 7764 * Performs byte write operation over I2C interface at
7658 * a specified device address. 7765 * a specified device address.
7659 */ 7766 **/
7660s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, 7767s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7661 u8 dev_addr, u8 data) 7768 u8 dev_addr, u8 data)
7662{ 7769{
7663 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); 7770 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
7664 struct i2c_client *this_client = adapter->i2c_client; 7771 struct i2c_client *this_client = adapter->i2c_client;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 0a237507ee85..7e8c477b0ab9 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -1,5 +1,4 @@
1/* 1/* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
2 * PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
3 * 2 *
4 * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com> 3 * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
5 * 4 *
@@ -27,8 +26,7 @@
27#define INCVALUE_MASK 0x7fffffff 26#define INCVALUE_MASK 0x7fffffff
28#define ISGN 0x80000000 27#define ISGN 0x80000000
29 28
30/* 29/* The 82580 timesync updates the system timer every 8ns by 8ns,
31 * The 82580 timesync updates the system timer every 8ns by 8ns,
32 * and this update value cannot be reprogrammed. 30 * and this update value cannot be reprogrammed.
33 * 31 *
34 * Neither the 82576 nor the 82580 offer registers wide enough to hold 32 * Neither the 82576 nor the 82580 offer registers wide enough to hold
@@ -77,10 +75,7 @@
77#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) 75#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
78#define IGB_NBITS_82580 40 76#define IGB_NBITS_82580 40
79 77
80/* 78/* SYSTIM read access for the 82576 */
81 * SYSTIM read access for the 82576
82 */
83
84static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) 79static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
85{ 80{
86 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); 81 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
@@ -97,10 +92,7 @@ static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
97 return val; 92 return val;
98} 93}
99 94
100/* 95/* SYSTIM read access for the 82580 */
101 * SYSTIM read access for the 82580
102 */
103
104static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) 96static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
105{ 97{
106 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); 98 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
@@ -108,8 +100,7 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
108 u64 val; 100 u64 val;
109 u32 lo, hi, jk; 101 u32 lo, hi, jk;
110 102
111 /* 103 /* The timestamp latches on lowest register read. For the 82580
112 * The timestamp latches on lowest register read. For the 82580
113 * the lowest register is SYSTIMR instead of SYSTIML. However we only 104 * the lowest register is SYSTIMR instead of SYSTIML. However we only
114 * need to provide nanosecond resolution, so we just ignore it. 105 * need to provide nanosecond resolution, so we just ignore it.
115 */ 106 */
@@ -123,17 +114,13 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
123 return val; 114 return val;
124} 115}
125 116
126/* 117/* SYSTIM read access for I210/I211 */
127 * SYSTIM read access for I210/I211
128 */
129
130static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) 118static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
131{ 119{
132 struct e1000_hw *hw = &adapter->hw; 120 struct e1000_hw *hw = &adapter->hw;
133 u32 sec, nsec, jk; 121 u32 sec, nsec, jk;
134 122
135 /* 123 /* The timestamp latches on lowest register read. For I210/I211, the
136 * The timestamp latches on lowest register read. For I210/I211, the
137 * lowest register is SYSTIMR. Since we only need to provide nanosecond 124 * lowest register is SYSTIMR. Since we only need to provide nanosecond
138 * resolution, we can ignore it. 125 * resolution, we can ignore it.
139 */ 126 */
@@ -150,8 +137,7 @@ static void igb_ptp_write_i210(struct igb_adapter *adapter,
150{ 137{
151 struct e1000_hw *hw = &adapter->hw; 138 struct e1000_hw *hw = &adapter->hw;
152 139
153 /* 140 /* Writing the SYSTIMR register is not necessary as it only provides
154 * Writing the SYSTIMR register is not necessary as it only provides
155 * sub-nanosecond resolution. 141 * sub-nanosecond resolution.
156 */ 142 */
157 wr32(E1000_SYSTIML, ts->tv_nsec); 143 wr32(E1000_SYSTIML, ts->tv_nsec);
@@ -185,6 +171,7 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
185 switch (adapter->hw.mac.type) { 171 switch (adapter->hw.mac.type) {
186 case e1000_82576: 172 case e1000_82576:
187 case e1000_82580: 173 case e1000_82580:
174 case e1000_i354:
188 case e1000_i350: 175 case e1000_i350:
189 spin_lock_irqsave(&adapter->tmreg_lock, flags); 176 spin_lock_irqsave(&adapter->tmreg_lock, flags);
190 177
@@ -207,10 +194,7 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
207 } 194 }
208} 195}
209 196
210/* 197/* PTP clock operations */
211 * PTP clock operations
212 */
213
214static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb) 198static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
215{ 199{
216 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, 200 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
@@ -387,7 +371,7 @@ static int igb_ptp_enable(struct ptp_clock_info *ptp,
387 * 371 *
388 * This work function polls the TSYNCTXCTL valid bit to determine when a 372 * This work function polls the TSYNCTXCTL valid bit to determine when a
389 * timestamp has been taken for the current stored skb. 373 * timestamp has been taken for the current stored skb.
390 */ 374 **/
391void igb_ptp_tx_work(struct work_struct *work) 375void igb_ptp_tx_work(struct work_struct *work)
392{ 376{
393 struct igb_adapter *adapter = container_of(work, struct igb_adapter, 377 struct igb_adapter *adapter = container_of(work, struct igb_adapter,
@@ -437,7 +421,7 @@ static void igb_ptp_overflow_check(struct work_struct *work)
437 * dropped an Rx packet that was timestamped when the ring is full. The 421 * dropped an Rx packet that was timestamped when the ring is full. The
438 * particular error is rare but leaves the device in a state unable to timestamp 422 * particular error is rare but leaves the device in a state unable to timestamp
439 * any future packets. 423 * any future packets.
440 */ 424 **/
441void igb_ptp_rx_hang(struct igb_adapter *adapter) 425void igb_ptp_rx_hang(struct igb_adapter *adapter)
442{ 426{
443 struct e1000_hw *hw = &adapter->hw; 427 struct e1000_hw *hw = &adapter->hw;
@@ -481,7 +465,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
481 * If we were asked to do hardware stamping and such a time stamp is 465 * If we were asked to do hardware stamping and such a time stamp is
482 * available, then it must have been for this skb here because we only 466 * available, then it must have been for this skb here because we only
483 * allow only one such packet into the queue. 467 * allow only one such packet into the queue.
484 */ 468 **/
485void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) 469void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
486{ 470{
487 struct e1000_hw *hw = &adapter->hw; 471 struct e1000_hw *hw = &adapter->hw;
@@ -506,15 +490,14 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
506 * This function is meant to retrieve a timestamp from the first buffer of an 490 * This function is meant to retrieve a timestamp from the first buffer of an
507 * incoming frame. The value is stored in little endian format starting on 491 * incoming frame. The value is stored in little endian format starting on
508 * byte 8. 492 * byte 8.
509 */ 493 **/
510void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, 494void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
511 unsigned char *va, 495 unsigned char *va,
512 struct sk_buff *skb) 496 struct sk_buff *skb)
513{ 497{
514 __le64 *regval = (__le64 *)va; 498 __le64 *regval = (__le64 *)va;
515 499
516 /* 500 /* The timestamp is recorded in little endian format.
517 * The timestamp is recorded in little endian format.
518 * DWORD: 0 1 2 3 501 * DWORD: 0 1 2 3
519 * Field: Reserved Reserved SYSTIML SYSTIMH 502 * Field: Reserved Reserved SYSTIML SYSTIMH
520 */ 503 */
@@ -529,7 +512,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
529 * 512 *
530 * This function is meant to retrieve a timestamp from the internal registers 513 * This function is meant to retrieve a timestamp from the internal registers
531 * of the adapter and store it in the skb. 514 * of the adapter and store it in the skb.
532 */ 515 **/
533void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, 516void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
534 struct sk_buff *skb) 517 struct sk_buff *skb)
535{ 518{
@@ -537,8 +520,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
537 struct e1000_hw *hw = &adapter->hw; 520 struct e1000_hw *hw = &adapter->hw;
538 u64 regval; 521 u64 regval;
539 522
540 /* 523 /* If this bit is set, then the RX registers contain the time stamp. No
541 * If this bit is set, then the RX registers contain the time stamp. No
542 * other packet will be time stamped until we read these registers, so 524 * other packet will be time stamped until we read these registers, so
543 * read the registers to make them available again. Because only one 525 * read the registers to make them available again. Because only one
544 * packet can be time stamped at a time, we know that the register 526 * packet can be time stamped at a time, we know that the register
@@ -574,7 +556,6 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
574 * type has to be specified. Matching the kind of event packet is 556 * type has to be specified. Matching the kind of event packet is
575 * not supported, with the exception of "all V2 events regardless of 557 * not supported, with the exception of "all V2 events regardless of
576 * level 2 or 4". 558 * level 2 or 4".
577 *
578 **/ 559 **/
579int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, 560int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
580 struct ifreq *ifr, int cmd) 561 struct ifreq *ifr, int cmd)
@@ -655,10 +636,9 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
655 return 0; 636 return 0;
656 } 637 }
657 638
658 /* 639 /* Per-packet timestamping only works if all packets are
659 * Per-packet timestamping only works if all packets are
660 * timestamped, so enable timestamping in all packets as 640 * timestamped, so enable timestamping in all packets as
661 * long as one rx filter was configured. 641 * long as one Rx filter was configured.
662 */ 642 */
663 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { 643 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
664 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; 644 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
@@ -756,6 +736,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
756 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); 736 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
757 break; 737 break;
758 case e1000_82580: 738 case e1000_82580:
739 case e1000_i354:
759 case e1000_i350: 740 case e1000_i350:
760 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); 741 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
761 adapter->ptp_caps.owner = THIS_MODULE; 742 adapter->ptp_caps.owner = THIS_MODULE;
@@ -844,6 +825,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)
844 switch (adapter->hw.mac.type) { 825 switch (adapter->hw.mac.type) {
845 case e1000_82576: 826 case e1000_82576:
846 case e1000_82580: 827 case e1000_82580:
828 case e1000_i354:
847 case e1000_i350: 829 case e1000_i350:
848 cancel_delayed_work_sync(&adapter->ptp_overflow_work); 830 cancel_delayed_work_sync(&adapter->ptp_overflow_work);
849 break; 831 break;
@@ -888,6 +870,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
888 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); 870 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
889 break; 871 break;
890 case e1000_82580: 872 case e1000_82580:
873 case e1000_i354:
891 case e1000_i350: 874 case e1000_i350:
892 case e1000_i210: 875 case e1000_i210:
893 case e1000_i211: 876 case e1000_i211:
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index d60cd4393415..93eb7ee06d3e 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -116,7 +116,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
116 else 116 else
117 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 117 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
118 if (test_bit(vid, adapter->active_vlans)) 118 if (test_bit(vid, adapter->active_vlans))
119 __vlan_hwaccel_put_tag(skb, vid); 119 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
120 } 120 }
121 121
122 napi_gro_receive(&adapter->rx_ring->napi, skb); 122 napi_gro_receive(&adapter->rx_ring->napi, skb);
@@ -447,7 +447,6 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
447 447
448 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 448 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
449 &tx_ring->dma, GFP_KERNEL); 449 &tx_ring->dma, GFP_KERNEL);
450
451 if (!tx_ring->desc) 450 if (!tx_ring->desc)
452 goto err; 451 goto err;
453 452
@@ -488,7 +487,6 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
488 487
489 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 488 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
490 &rx_ring->dma, GFP_KERNEL); 489 &rx_ring->dma, GFP_KERNEL);
491
492 if (!rx_ring->desc) 490 if (!rx_ring->desc)
493 goto err; 491 goto err;
494 492
@@ -1232,7 +1230,8 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
1232 e1000_rlpml_set_vf(hw, max_frame_size); 1230 e1000_rlpml_set_vf(hw, max_frame_size);
1233} 1231}
1234 1232
1235static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1233static int igbvf_vlan_rx_add_vid(struct net_device *netdev,
1234 __be16 proto, u16 vid)
1236{ 1235{
1237 struct igbvf_adapter *adapter = netdev_priv(netdev); 1236 struct igbvf_adapter *adapter = netdev_priv(netdev);
1238 struct e1000_hw *hw = &adapter->hw; 1237 struct e1000_hw *hw = &adapter->hw;
@@ -1245,7 +1244,8 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1245 return 0; 1244 return 0;
1246} 1245}
1247 1246
1248static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1247static int igbvf_vlan_rx_kill_vid(struct net_device *netdev,
1248 __be16 proto, u16 vid)
1249{ 1249{
1250 struct igbvf_adapter *adapter = netdev_priv(netdev); 1250 struct igbvf_adapter *adapter = netdev_priv(netdev);
1251 struct e1000_hw *hw = &adapter->hw; 1251 struct e1000_hw *hw = &adapter->hw;
@@ -1264,7 +1264,7 @@ static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
1264 u16 vid; 1264 u16 vid;
1265 1265
1266 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1266 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1267 igbvf_vlan_rx_add_vid(adapter->netdev, vid); 1267 igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
1268} 1268}
1269 1269
1270/** 1270/**
@@ -2724,9 +2724,9 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2724 NETIF_F_RXCSUM; 2724 NETIF_F_RXCSUM;
2725 2725
2726 netdev->features = netdev->hw_features | 2726 netdev->features = netdev->hw_features |
2727 NETIF_F_HW_VLAN_TX | 2727 NETIF_F_HW_VLAN_CTAG_TX |
2728 NETIF_F_HW_VLAN_RX | 2728 NETIF_F_HW_VLAN_CTAG_RX |
2729 NETIF_F_HW_VLAN_FILTER; 2729 NETIF_F_HW_VLAN_CTAG_FILTER;
2730 2730
2731 if (pci_using_dac) 2731 if (pci_using_dac)
2732 netdev->features |= NETIF_F_HIGHDMA; 2732 netdev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index b5f94abe3cff..fce3e92f9d11 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -101,8 +101,10 @@ static void ixgb_tx_timeout_task(struct work_struct *work);
101 101
102static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter); 102static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
103static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter); 103static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
104static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); 104static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
105static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); 105 __be16 proto, u16 vid);
106static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
107 __be16 proto, u16 vid);
106static void ixgb_restore_vlan(struct ixgb_adapter *adapter); 108static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
107 109
108#ifdef CONFIG_NET_POLL_CONTROLLER 110#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -332,8 +334,8 @@ ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
332 * Tx VLAN insertion does not work per HW design when Rx stripping is 334 * Tx VLAN insertion does not work per HW design when Rx stripping is
333 * disabled. 335 * disabled.
334 */ 336 */
335 if (!(features & NETIF_F_HW_VLAN_RX)) 337 if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
336 features &= ~NETIF_F_HW_VLAN_TX; 338 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
337 339
338 return features; 340 return features;
339} 341}
@@ -344,7 +346,7 @@ ixgb_set_features(struct net_device *netdev, netdev_features_t features)
344 struct ixgb_adapter *adapter = netdev_priv(netdev); 346 struct ixgb_adapter *adapter = netdev_priv(netdev);
345 netdev_features_t changed = features ^ netdev->features; 347 netdev_features_t changed = features ^ netdev->features;
346 348
347 if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX))) 349 if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
348 return 0; 350 return 0;
349 351
350 adapter->rx_csum = !!(features & NETIF_F_RXCSUM); 352 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
@@ -479,10 +481,10 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
479 netdev->hw_features = NETIF_F_SG | 481 netdev->hw_features = NETIF_F_SG |
480 NETIF_F_TSO | 482 NETIF_F_TSO |
481 NETIF_F_HW_CSUM | 483 NETIF_F_HW_CSUM |
482 NETIF_F_HW_VLAN_TX | 484 NETIF_F_HW_VLAN_CTAG_TX |
483 NETIF_F_HW_VLAN_RX; 485 NETIF_F_HW_VLAN_CTAG_RX;
484 netdev->features = netdev->hw_features | 486 netdev->features = netdev->hw_features |
485 NETIF_F_HW_VLAN_FILTER; 487 NETIF_F_HW_VLAN_CTAG_FILTER;
486 netdev->hw_features |= NETIF_F_RXCSUM; 488 netdev->hw_features |= NETIF_F_RXCSUM;
487 489
488 if (pci_using_dac) { 490 if (pci_using_dac) {
@@ -717,14 +719,11 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
717 txdr->size = ALIGN(txdr->size, 4096); 719 txdr->size = ALIGN(txdr->size, 4096);
718 720
719 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 721 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
720 GFP_KERNEL); 722 GFP_KERNEL | __GFP_ZERO);
721 if (!txdr->desc) { 723 if (!txdr->desc) {
722 vfree(txdr->buffer_info); 724 vfree(txdr->buffer_info);
723 netif_err(adapter, probe, adapter->netdev,
724 "Unable to allocate transmit descriptor memory\n");
725 return -ENOMEM; 725 return -ENOMEM;
726 } 726 }
727 memset(txdr->desc, 0, txdr->size);
728 727
729 txdr->next_to_use = 0; 728 txdr->next_to_use = 0;
730 txdr->next_to_clean = 0; 729 txdr->next_to_clean = 0;
@@ -807,8 +806,6 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
807 806
808 if (!rxdr->desc) { 807 if (!rxdr->desc) {
809 vfree(rxdr->buffer_info); 808 vfree(rxdr->buffer_info);
810 netif_err(adapter, probe, adapter->netdev,
811 "Unable to allocate receive descriptors\n");
812 return -ENOMEM; 809 return -ENOMEM;
813 } 810 }
814 memset(rxdr->desc, 0, rxdr->size); 811 memset(rxdr->desc, 0, rxdr->size);
@@ -1145,7 +1142,7 @@ ixgb_set_multi(struct net_device *netdev)
1145 } 1142 }
1146 1143
1147alloc_failed: 1144alloc_failed:
1148 if (netdev->features & NETIF_F_HW_VLAN_RX) 1145 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1149 ixgb_vlan_strip_enable(adapter); 1146 ixgb_vlan_strip_enable(adapter);
1150 else 1147 else
1151 ixgb_vlan_strip_disable(adapter); 1148 ixgb_vlan_strip_disable(adapter);
@@ -2085,8 +2082,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
2085 2082
2086 skb->protocol = eth_type_trans(skb, netdev); 2083 skb->protocol = eth_type_trans(skb, netdev);
2087 if (status & IXGB_RX_DESC_STATUS_VP) 2084 if (status & IXGB_RX_DESC_STATUS_VP)
2088 __vlan_hwaccel_put_tag(skb, 2085 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2089 le16_to_cpu(rx_desc->special)); 2086 le16_to_cpu(rx_desc->special));
2090 2087
2091 netif_receive_skb(skb); 2088 netif_receive_skb(skb);
2092 2089
@@ -2214,7 +2211,7 @@ ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
2214} 2211}
2215 2212
2216static int 2213static int
2217ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 2214ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2218{ 2215{
2219 struct ixgb_adapter *adapter = netdev_priv(netdev); 2216 struct ixgb_adapter *adapter = netdev_priv(netdev);
2220 u32 vfta, index; 2217 u32 vfta, index;
@@ -2231,7 +2228,7 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2231} 2228}
2232 2229
2233static int 2230static int
2234ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 2231ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2235{ 2232{
2236 struct ixgb_adapter *adapter = netdev_priv(netdev); 2233 struct ixgb_adapter *adapter = netdev_priv(netdev);
2237 u32 vfta, index; 2234 u32 vfta, index;
@@ -2253,7 +2250,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
2253 u16 vid; 2250 u16 vid;
2254 2251
2255 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 2252 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2256 ixgb_vlan_rx_add_vid(adapter->netdev, vid); 2253 ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2257} 2254}
2258 2255
2259#ifdef CONFIG_NET_POLL_CONTROLLER 2256#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a8e10cff7a89..ca932387a80f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -740,6 +740,11 @@ extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
740extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter); 740extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
741extern void ixgbe_dbg_init(void); 741extern void ixgbe_dbg_init(void);
742extern void ixgbe_dbg_exit(void); 742extern void ixgbe_dbg_exit(void);
743#else
744static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
745static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
746static inline void ixgbe_dbg_init(void) {}
747static inline void ixgbe_dbg_exit(void) {}
743#endif /* CONFIG_DEBUG_FS */ 748#endif /* CONFIG_DEBUG_FS */
744static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) 749static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
745{ 750{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index d0113fc97b6f..4a5bfb6b3af0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1305,6 +1305,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1305 .release_swfw_sync = &ixgbe_release_swfw_sync, 1305 .release_swfw_sync = &ixgbe_release_swfw_sync,
1306 .get_thermal_sensor_data = NULL, 1306 .get_thermal_sensor_data = NULL,
1307 .init_thermal_sensor_thresh = NULL, 1307 .init_thermal_sensor_thresh = NULL,
1308 .mng_fw_enabled = NULL,
1308}; 1309};
1309 1310
1310static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1311static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 203a00c24330..0b82d38bc97d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -59,12 +59,34 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
59 bool autoneg_wait_to_complete); 59 bool autoneg_wait_to_complete);
60static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 60static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
61 61
62static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
63{
64 u32 fwsm, manc, factps;
65
66 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
67 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
68 return false;
69
70 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
71 if (!(manc & IXGBE_MANC_RCV_TCO_EN))
72 return false;
73
74 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
75 if (factps & IXGBE_FACTPS_MNGCG)
76 return false;
77
78 return true;
79}
80
62static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 81static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
63{ 82{
64 struct ixgbe_mac_info *mac = &hw->mac; 83 struct ixgbe_mac_info *mac = &hw->mac;
65 84
66 /* enable the laser control functions for SFP+ fiber */ 85 /* enable the laser control functions for SFP+ fiber
67 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) { 86 * and MNG not enabled
87 */
88 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
89 !hw->mng_fw_enabled) {
68 mac->ops.disable_tx_laser = 90 mac->ops.disable_tx_laser =
69 &ixgbe_disable_tx_laser_multispeed_fiber; 91 &ixgbe_disable_tx_laser_multispeed_fiber;
70 mac->ops.enable_tx_laser = 92 mac->ops.enable_tx_laser =
@@ -145,9 +167,9 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
145 } 167 }
146 168
147 /* Restart DSP and set SFI mode */ 169 /* Restart DSP and set SFI mode */
148 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, 170 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
149 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL)); 171 IXGBE_AUTOC_LMS_10G_SERIAL));
150 172 hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
151 ret_val = ixgbe_reset_pipeline_82599(hw); 173 ret_val = ixgbe_reset_pipeline_82599(hw);
152 174
153 if (got_lock) { 175 if (got_lock) {
@@ -244,6 +266,8 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
244 /* Determine 1G link capabilities off of SFP+ type */ 266 /* Determine 1G link capabilities off of SFP+ type */
245 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 267 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
246 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || 268 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
269 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
270 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
247 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || 271 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
248 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { 272 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
249 *speed = IXGBE_LINK_SPEED_1GB_FULL; 273 *speed = IXGBE_LINK_SPEED_1GB_FULL;
@@ -563,7 +587,8 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
563 return status; 587 return status;
564 588
565 /* Flap the tx laser if it has not already been done */ 589 /* Flap the tx laser if it has not already been done */
566 hw->mac.ops.flap_tx_laser(hw); 590 if (hw->mac.ops.flap_tx_laser)
591 hw->mac.ops.flap_tx_laser(hw);
567 592
568 /* 593 /*
569 * Wait for the controller to acquire link. Per IEEE 802.3ap, 594 * Wait for the controller to acquire link. Per IEEE 802.3ap,
@@ -615,7 +640,8 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
615 return status; 640 return status;
616 641
617 /* Flap the tx laser if it has not already been done */ 642 /* Flap the tx laser if it has not already been done */
618 hw->mac.ops.flap_tx_laser(hw); 643 if (hw->mac.ops.flap_tx_laser)
644 hw->mac.ops.flap_tx_laser(hw);
619 645
620 /* Wait for the link partner to also set speed */ 646 /* Wait for the link partner to also set speed */
621 msleep(100); 647 msleep(100);
@@ -777,12 +803,9 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
777 bool autoneg_wait_to_complete) 803 bool autoneg_wait_to_complete)
778{ 804{
779 s32 status = 0; 805 s32 status = 0;
780 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 806 u32 autoc, pma_pmd_1g, link_mode, start_autoc;
781 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 807 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
782 u32 start_autoc = autoc;
783 u32 orig_autoc = 0; 808 u32 orig_autoc = 0;
784 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
785 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
786 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 809 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
787 u32 links_reg; 810 u32 links_reg;
788 u32 i; 811 u32 i;
@@ -805,9 +828,14 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
805 828
806 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ 829 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
807 if (hw->mac.orig_link_settings_stored) 830 if (hw->mac.orig_link_settings_stored)
808 orig_autoc = hw->mac.orig_autoc; 831 autoc = hw->mac.orig_autoc;
809 else 832 else
810 orig_autoc = autoc; 833 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
834
835 orig_autoc = autoc;
836 start_autoc = hw->mac.cached_autoc;
837 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
838 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
811 839
812 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 840 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
813 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 841 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
@@ -861,6 +889,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
861 889
862 /* Restart link */ 890 /* Restart link */
863 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 891 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
892 hw->mac.cached_autoc = autoc;
864 ixgbe_reset_pipeline_82599(hw); 893 ixgbe_reset_pipeline_82599(hw);
865 894
866 if (got_lock) 895 if (got_lock)
@@ -932,7 +961,8 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
932{ 961{
933 ixgbe_link_speed link_speed; 962 ixgbe_link_speed link_speed;
934 s32 status; 963 s32 status;
935 u32 ctrl, i, autoc, autoc2; 964 u32 ctrl, i, autoc2;
965 u32 curr_lms;
936 bool link_up = false; 966 bool link_up = false;
937 967
938 /* Call adapter stop to disable tx/rx and clear interrupts */ 968 /* Call adapter stop to disable tx/rx and clear interrupts */
@@ -964,6 +994,13 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
964 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) 994 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
965 hw->phy.ops.reset(hw); 995 hw->phy.ops.reset(hw);
966 996
997 /* remember AUTOC from before we reset */
998 if (hw->mac.cached_autoc)
999 curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK;
1000 else
1001 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) &
1002 IXGBE_AUTOC_LMS_MASK;
1003
967mac_reset_top: 1004mac_reset_top:
968 /* 1005 /*
969 * Issue global reset to the MAC. Needs to be SW reset if link is up. 1006 * Issue global reset to the MAC. Needs to be SW reset if link is up.
@@ -1012,14 +1049,35 @@ mac_reset_top:
1012 * stored off yet. Otherwise restore the stored original 1049 * stored off yet. Otherwise restore the stored original
1013 * values since the reset operation sets back to defaults. 1050 * values since the reset operation sets back to defaults.
1014 */ 1051 */
1015 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1052 hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1016 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1053 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1054
1055 /* Enable link if disabled in NVM */
1056 if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1057 autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1058 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1059 IXGBE_WRITE_FLUSH(hw);
1060 }
1061
1017 if (hw->mac.orig_link_settings_stored == false) { 1062 if (hw->mac.orig_link_settings_stored == false) {
1018 hw->mac.orig_autoc = autoc; 1063 hw->mac.orig_autoc = hw->mac.cached_autoc;
1019 hw->mac.orig_autoc2 = autoc2; 1064 hw->mac.orig_autoc2 = autoc2;
1020 hw->mac.orig_link_settings_stored = true; 1065 hw->mac.orig_link_settings_stored = true;
1021 } else { 1066 } else {
1022 if (autoc != hw->mac.orig_autoc) { 1067
1068 /* If MNG FW is running on a multi-speed device that
1069 * doesn't autoneg with out driver support we need to
1070 * leave LMS in the state it was before we MAC reset.
1071 * Likewise if we support WoL we don't want change the
1072 * LMS state either.
1073 */
1074 if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) ||
1075 hw->wol_enabled)
1076 hw->mac.orig_autoc =
1077 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1078 curr_lms;
1079
1080 if (hw->mac.cached_autoc != hw->mac.orig_autoc) {
1023 /* Need SW/FW semaphore around AUTOC writes if LESM is 1081 /* Need SW/FW semaphore around AUTOC writes if LESM is
1024 * on, likewise reset_pipeline requires us to hold 1082 * on, likewise reset_pipeline requires us to hold
1025 * this lock as it also writes to AUTOC. 1083 * this lock as it also writes to AUTOC.
@@ -1035,6 +1093,7 @@ mac_reset_top:
1035 } 1093 }
1036 1094
1037 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); 1095 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
1096 hw->mac.cached_autoc = hw->mac.orig_autoc;
1038 ixgbe_reset_pipeline_82599(hw); 1097 ixgbe_reset_pipeline_82599(hw);
1039 1098
1040 if (got_lock) 1099 if (got_lock)
@@ -2135,10 +2194,19 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2135 **/ 2194 **/
2136s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) 2195s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2137{ 2196{
2138 s32 i, autoc_reg, ret_val; 2197 s32 ret_val;
2139 s32 anlp1_reg = 0; 2198 u32 anlp1_reg = 0;
2199 u32 i, autoc_reg, autoc2_reg;
2200
2201 /* Enable link if disabled in NVM */
2202 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2203 if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2204 autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2205 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2206 IXGBE_WRITE_FLUSH(hw);
2207 }
2140 2208
2141 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2209 autoc_reg = hw->mac.cached_autoc;
2142 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2210 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2143 2211
2144 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ 2212 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
@@ -2216,7 +2284,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2216 .release_swfw_sync = &ixgbe_release_swfw_sync, 2284 .release_swfw_sync = &ixgbe_release_swfw_sync,
2217 .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, 2285 .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
2218 .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, 2286 .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
2219 2287 .mng_fw_enabled = &ixgbe_mng_enabled,
2220}; 2288};
2221 2289
2222static struct ixgbe_eeprom_operations eeprom_ops_82599 = { 2290static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 99e472ebaa75..9bcdeb89af5a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -592,6 +592,36 @@ s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
592 return 0; 592 return 0;
593} 593}
594 594
595enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status)
596{
597 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
598 case IXGBE_PCI_LINK_WIDTH_1:
599 return ixgbe_bus_width_pcie_x1;
600 case IXGBE_PCI_LINK_WIDTH_2:
601 return ixgbe_bus_width_pcie_x2;
602 case IXGBE_PCI_LINK_WIDTH_4:
603 return ixgbe_bus_width_pcie_x4;
604 case IXGBE_PCI_LINK_WIDTH_8:
605 return ixgbe_bus_width_pcie_x8;
606 default:
607 return ixgbe_bus_width_unknown;
608 }
609}
610
611enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
612{
613 switch (link_status & IXGBE_PCI_LINK_SPEED) {
614 case IXGBE_PCI_LINK_SPEED_2500:
615 return ixgbe_bus_speed_2500;
616 case IXGBE_PCI_LINK_SPEED_5000:
617 return ixgbe_bus_speed_5000;
618 case IXGBE_PCI_LINK_SPEED_8000:
619 return ixgbe_bus_speed_8000;
620 default:
621 return ixgbe_bus_speed_unknown;
622 }
623}
624
595/** 625/**
596 * ixgbe_get_bus_info_generic - Generic set PCI bus info 626 * ixgbe_get_bus_info_generic - Generic set PCI bus info
597 * @hw: pointer to hardware structure 627 * @hw: pointer to hardware structure
@@ -610,35 +640,8 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
610 pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS, 640 pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS,
611 &link_status); 641 &link_status);
612 642
613 switch (link_status & IXGBE_PCI_LINK_WIDTH) { 643 hw->bus.width = ixgbe_convert_bus_width(link_status);
614 case IXGBE_PCI_LINK_WIDTH_1: 644 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
615 hw->bus.width = ixgbe_bus_width_pcie_x1;
616 break;
617 case IXGBE_PCI_LINK_WIDTH_2:
618 hw->bus.width = ixgbe_bus_width_pcie_x2;
619 break;
620 case IXGBE_PCI_LINK_WIDTH_4:
621 hw->bus.width = ixgbe_bus_width_pcie_x4;
622 break;
623 case IXGBE_PCI_LINK_WIDTH_8:
624 hw->bus.width = ixgbe_bus_width_pcie_x8;
625 break;
626 default:
627 hw->bus.width = ixgbe_bus_width_unknown;
628 break;
629 }
630
631 switch (link_status & IXGBE_PCI_LINK_SPEED) {
632 case IXGBE_PCI_LINK_SPEED_2500:
633 hw->bus.speed = ixgbe_bus_speed_2500;
634 break;
635 case IXGBE_PCI_LINK_SPEED_5000:
636 hw->bus.speed = ixgbe_bus_speed_5000;
637 break;
638 default:
639 hw->bus.speed = ixgbe_bus_speed_unknown;
640 break;
641 }
642 645
643 mac->ops.set_lan_id(hw); 646 mac->ops.set_lan_id(hw);
644 647
@@ -1125,7 +1128,7 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1125 } 1128 }
1126 1129
1127 for (i = 0; i < words; i++) { 1130 for (i = 0; i < words; i++) {
1128 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) + 1131 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1129 IXGBE_EEPROM_RW_REG_START; 1132 IXGBE_EEPROM_RW_REG_START;
1130 1133
1131 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 1134 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index bc3948ead6e0..22eee38868f1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -40,6 +40,8 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
40s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 40s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
41 u32 pba_num_size); 41 u32 pba_num_size);
42s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); 42s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
43enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status);
44enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status);
43s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); 45s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
44void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); 46void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
45s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); 47s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index c3f1afd86906..d3754722adb4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -231,6 +231,10 @@ static int ixgbe_get_settings(struct net_device *netdev,
231 case ixgbe_sfp_type_lr: 231 case ixgbe_sfp_type_lr:
232 case ixgbe_sfp_type_srlr_core0: 232 case ixgbe_sfp_type_srlr_core0:
233 case ixgbe_sfp_type_srlr_core1: 233 case ixgbe_sfp_type_srlr_core1:
234 case ixgbe_sfp_type_1g_sx_core0:
235 case ixgbe_sfp_type_1g_sx_core1:
236 case ixgbe_sfp_type_1g_lx_core0:
237 case ixgbe_sfp_type_1g_lx_core1:
234 ecmd->supported |= SUPPORTED_FIBRE; 238 ecmd->supported |= SUPPORTED_FIBRE;
235 ecmd->advertising |= ADVERTISED_FIBRE; 239 ecmd->advertising |= ADVERTISED_FIBRE;
236 ecmd->port = PORT_FIBRE; 240 ecmd->port = PORT_FIBRE;
@@ -246,12 +250,6 @@ static int ixgbe_get_settings(struct net_device *netdev,
246 ecmd->advertising |= ADVERTISED_TP; 250 ecmd->advertising |= ADVERTISED_TP;
247 ecmd->port = PORT_TP; 251 ecmd->port = PORT_TP;
248 break; 252 break;
249 case ixgbe_sfp_type_1g_sx_core0:
250 case ixgbe_sfp_type_1g_sx_core1:
251 ecmd->supported |= SUPPORTED_FIBRE;
252 ecmd->advertising |= ADVERTISED_FIBRE;
253 ecmd->port = PORT_FIBRE;
254 break;
255 case ixgbe_sfp_type_unknown: 253 case ixgbe_sfp_type_unknown:
256 default: 254 default:
257 ecmd->supported |= SUPPORTED_FIBRE; 255 ecmd->supported |= SUPPORTED_FIBRE;
@@ -442,7 +440,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
442 440
443 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); 441 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
444 442
445 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; 443 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
444 hw->device_id;
446 445
447 /* General Registers */ 446 /* General Registers */
448 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); 447 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
@@ -1611,16 +1610,9 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1611 struct ixgbe_hw *hw = &adapter->hw; 1610 struct ixgbe_hw *hw = &adapter->hw;
1612 u32 reg_data; 1611 u32 reg_data;
1613 1612
1614 /* X540 needs to set the MACC.FLU bit to force link up */
1615 if (adapter->hw.mac.type == ixgbe_mac_X540) {
1616 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1617 reg_data |= IXGBE_MACC_FLU;
1618 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1619 }
1620 1613
1621 /* right now we only support MAC loopback in the driver */
1622 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1623 /* Setup MAC loopback */ 1614 /* Setup MAC loopback */
1615 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1624 reg_data |= IXGBE_HLREG0_LPBK; 1616 reg_data |= IXGBE_HLREG0_LPBK;
1625 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data); 1617 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1626 1618
@@ -1628,10 +1620,19 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1628 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; 1620 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1629 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); 1621 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1630 1622
1631 reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1623 /* X540 needs to set the MACC.FLU bit to force link up */
1632 reg_data &= ~IXGBE_AUTOC_LMS_MASK; 1624 if (adapter->hw.mac.type == ixgbe_mac_X540) {
1633 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; 1625 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1634 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); 1626 reg_data |= IXGBE_MACC_FLU;
1627 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1628 } else {
1629 if (hw->mac.orig_autoc) {
1630 reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1631 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1632 } else {
1633 return 10;
1634 }
1635 }
1635 IXGBE_WRITE_FLUSH(hw); 1636 IXGBE_WRITE_FLUSH(hw);
1636 usleep_range(10000, 20000); 1637 usleep_range(10000, 20000);
1637 1638
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 79f4a26ea6cc..d30fbdd81fca 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -63,7 +63,7 @@ char ixgbe_default_device_descr[] =
63static char ixgbe_default_device_descr[] = 63static char ixgbe_default_device_descr[] =
64 "Intel(R) 10 Gigabit Network Connection"; 64 "Intel(R) 10 Gigabit Network Connection";
65#endif 65#endif
66#define DRV_VERSION "3.11.33-k" 66#define DRV_VERSION "3.13.10-k"
67const char ixgbe_driver_version[] = DRV_VERSION; 67const char ixgbe_driver_version[] = DRV_VERSION;
68static const char ixgbe_copyright[] = 68static const char ixgbe_copyright[] =
69 "Copyright (c) 1999-2013 Intel Corporation."; 69 "Copyright (c) 1999-2013 Intel Corporation.";
@@ -149,6 +149,52 @@ MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
149MODULE_LICENSE("GPL"); 149MODULE_LICENSE("GPL");
150MODULE_VERSION(DRV_VERSION); 150MODULE_VERSION(DRV_VERSION);
151 151
152static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
153 u32 reg, u16 *value)
154{
155 int pos = 0;
156 struct pci_dev *parent_dev;
157 struct pci_bus *parent_bus;
158
159 parent_bus = adapter->pdev->bus->parent;
160 if (!parent_bus)
161 return -1;
162
163 parent_dev = parent_bus->self;
164 if (!parent_dev)
165 return -1;
166
167 pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP);
168 if (!pos)
169 return -1;
170
171 pci_read_config_word(parent_dev, pos + reg, value);
172 return 0;
173}
174
175static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
176{
177 struct ixgbe_hw *hw = &adapter->hw;
178 u16 link_status = 0;
179 int err;
180
181 hw->bus.type = ixgbe_bus_type_pci_express;
182
183 /* Get the negotiated link width and speed from PCI config space of the
184 * parent, as this device is behind a switch
185 */
186 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
187
188 /* assume caller will handle error case */
189 if (err)
190 return err;
191
192 hw->bus.width = ixgbe_convert_bus_width(link_status);
193 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
194
195 return 0;
196}
197
152static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) 198static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
153{ 199{
154 if (!test_bit(__IXGBE_DOWN, &adapter->state) && 200 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
@@ -1337,7 +1383,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1337 return hdr.network - data; 1383 return hdr.network - data;
1338 1384
1339 /* record next protocol if header is present */ 1385 /* record next protocol if header is present */
1340 if (!hdr.ipv4->frag_off) 1386 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
1341 nexthdr = hdr.ipv4->protocol; 1387 nexthdr = hdr.ipv4->protocol;
1342 } else if (protocol == __constant_htons(ETH_P_IPV6)) { 1388 } else if (protocol == __constant_htons(ETH_P_IPV6)) {
1343 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) 1389 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
@@ -1442,10 +1488,10 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1442 1488
1443 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 1489 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1444 1490
1445 if ((dev->features & NETIF_F_HW_VLAN_RX) && 1491 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1446 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1492 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1447 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 1493 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1448 __vlan_hwaccel_put_tag(skb, vid); 1494 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1449 } 1495 }
1450 1496
1451 skb_record_rx_queue(skb, rx_ring->queue_index); 1497 skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -2049,6 +2095,9 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2049 */ 2095 */
2050 /* what was last interrupt timeslice? */ 2096 /* what was last interrupt timeslice? */
2051 timepassed_us = q_vector->itr >> 2; 2097 timepassed_us = q_vector->itr >> 2;
2098 if (timepassed_us == 0)
2099 return;
2100
2052 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 2101 bytes_perint = bytes / timepassed_us; /* bytes/usec */
2053 2102
2054 switch (itr_setting) { 2103 switch (itr_setting) {
@@ -2405,6 +2454,16 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
2405 * with the write to EICR. 2454 * with the write to EICR.
2406 */ 2455 */
2407 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 2456 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2457
2458 /* The lower 16bits of the EICR register are for the queue interrupts
2459 * which should be masked here in order to not accidently clear them if
2460 * the bits are high when ixgbe_msix_other is called. There is a race
2461 * condition otherwise which results in possible performance loss
2462 * especially if the ixgbe_msix_other interrupt is triggering
2463 * consistently (as it would when PPS is turned on for the X540 device)
2464 */
2465 eicr &= 0xFFFF0000;
2466
2408 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 2467 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2409 2468
2410 if (eicr & IXGBE_EICR_LSC) 2469 if (eicr & IXGBE_EICR_LSC)
@@ -3421,7 +3480,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3421 hw->mac.ops.enable_rx_dma(hw, rxctrl); 3480 hw->mac.ops.enable_rx_dma(hw, rxctrl);
3422} 3481}
3423 3482
3424static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 3483static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
3484 __be16 proto, u16 vid)
3425{ 3485{
3426 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3486 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3427 struct ixgbe_hw *hw = &adapter->hw; 3487 struct ixgbe_hw *hw = &adapter->hw;
@@ -3433,7 +3493,8 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
3433 return 0; 3493 return 0;
3434} 3494}
3435 3495
3436static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 3496static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
3497 __be16 proto, u16 vid)
3437{ 3498{
3438 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3499 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3439 struct ixgbe_hw *hw = &adapter->hw; 3500 struct ixgbe_hw *hw = &adapter->hw;
@@ -3538,10 +3599,10 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3538{ 3599{
3539 u16 vid; 3600 u16 vid;
3540 3601
3541 ixgbe_vlan_rx_add_vid(adapter->netdev, 0); 3602 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
3542 3603
3543 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 3604 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
3544 ixgbe_vlan_rx_add_vid(adapter->netdev, vid); 3605 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
3545} 3606}
3546 3607
3547/** 3608/**
@@ -3676,7 +3737,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3676 3737
3677 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3738 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3678 3739
3679 if (netdev->features & NETIF_F_HW_VLAN_RX) 3740 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3680 ixgbe_vlan_strip_enable(adapter); 3741 ixgbe_vlan_strip_enable(adapter);
3681 else 3742 else
3682 ixgbe_vlan_strip_disable(adapter); 3743 ixgbe_vlan_strip_disable(adapter);
@@ -5077,14 +5138,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5077 5138
5078 netif_device_detach(netdev); 5139 netif_device_detach(netdev);
5079 5140
5141 rtnl_lock();
5080 if (netif_running(netdev)) { 5142 if (netif_running(netdev)) {
5081 rtnl_lock();
5082 ixgbe_down(adapter); 5143 ixgbe_down(adapter);
5083 ixgbe_free_irq(adapter); 5144 ixgbe_free_irq(adapter);
5084 ixgbe_free_all_tx_resources(adapter); 5145 ixgbe_free_all_tx_resources(adapter);
5085 ixgbe_free_all_rx_resources(adapter); 5146 ixgbe_free_all_rx_resources(adapter);
5086 rtnl_unlock();
5087 } 5147 }
5148 rtnl_unlock();
5088 5149
5089 ixgbe_clear_interrupt_scheme(adapter); 5150 ixgbe_clear_interrupt_scheme(adapter);
5090 5151
@@ -6425,9 +6486,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6425 struct ixgbe_tx_buffer *first; 6486 struct ixgbe_tx_buffer *first;
6426 int tso; 6487 int tso;
6427 u32 tx_flags = 0; 6488 u32 tx_flags = 0;
6428#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
6429 unsigned short f; 6489 unsigned short f;
6430#endif
6431 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 6490 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6432 __be16 protocol = skb->protocol; 6491 __be16 protocol = skb->protocol;
6433 u8 hdr_len = 0; 6492 u8 hdr_len = 0;
@@ -6439,12 +6498,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6439 * + 1 desc for context descriptor, 6498 * + 1 desc for context descriptor,
6440 * otherwise try next time 6499 * otherwise try next time
6441 */ 6500 */
6442#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
6443 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 6501 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6444 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 6502 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6445#else 6503
6446 count += skb_shinfo(skb)->nr_frags;
6447#endif
6448 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { 6504 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
6449 tx_ring->tx_stats.tx_busy++; 6505 tx_ring->tx_stats.tx_busy++;
6450 return NETDEV_TX_BUSY; 6506 return NETDEV_TX_BUSY;
@@ -6983,7 +7039,7 @@ static int ixgbe_set_features(struct net_device *netdev,
6983 break; 7039 break;
6984 } 7040 }
6985 7041
6986 if (features & NETIF_F_HW_VLAN_RX) 7042 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6987 ixgbe_vlan_strip_enable(adapter); 7043 ixgbe_vlan_strip_enable(adapter);
6988 else 7044 else
6989 ixgbe_vlan_strip_disable(adapter); 7045 ixgbe_vlan_strip_disable(adapter);
@@ -7007,7 +7063,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7007 int err; 7063 int err;
7008 7064
7009 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 7065 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7010 return -EOPNOTSUPP; 7066 return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
7011 7067
7012 /* Hardware does not support aging addresses so if a 7068 /* Hardware does not support aging addresses so if a
7013 * ndm_state is given only allow permanent addresses 7069 * ndm_state is given only allow permanent addresses
@@ -7038,44 +7094,6 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7038 return err; 7094 return err;
7039} 7095}
7040 7096
7041static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
7042 struct net_device *dev,
7043 const unsigned char *addr)
7044{
7045 struct ixgbe_adapter *adapter = netdev_priv(dev);
7046 int err = -EOPNOTSUPP;
7047
7048 if (ndm->ndm_state & NUD_PERMANENT) {
7049 pr_info("%s: FDB only supports static addresses\n",
7050 ixgbe_driver_name);
7051 return -EINVAL;
7052 }
7053
7054 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
7055 if (is_unicast_ether_addr(addr))
7056 err = dev_uc_del(dev, addr);
7057 else if (is_multicast_ether_addr(addr))
7058 err = dev_mc_del(dev, addr);
7059 else
7060 err = -EINVAL;
7061 }
7062
7063 return err;
7064}
7065
7066static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
7067 struct netlink_callback *cb,
7068 struct net_device *dev,
7069 int idx)
7070{
7071 struct ixgbe_adapter *adapter = netdev_priv(dev);
7072
7073 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7074 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
7075
7076 return idx;
7077}
7078
7079static int ixgbe_ndo_bridge_setlink(struct net_device *dev, 7097static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7080 struct nlmsghdr *nlh) 7098 struct nlmsghdr *nlh)
7081{ 7099{
@@ -7171,8 +7189,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7171 .ndo_set_features = ixgbe_set_features, 7189 .ndo_set_features = ixgbe_set_features,
7172 .ndo_fix_features = ixgbe_fix_features, 7190 .ndo_fix_features = ixgbe_fix_features,
7173 .ndo_fdb_add = ixgbe_ndo_fdb_add, 7191 .ndo_fdb_add = ixgbe_ndo_fdb_add,
7174 .ndo_fdb_del = ixgbe_ndo_fdb_del,
7175 .ndo_fdb_dump = ixgbe_ndo_fdb_dump,
7176 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, 7192 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
7177 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 7193 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7178}; 7194};
@@ -7202,9 +7218,19 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
7202 /* only support first port */ 7218 /* only support first port */
7203 if (hw->bus.func != 0) 7219 if (hw->bus.func != 0)
7204 break; 7220 break;
7221 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
7205 case IXGBE_SUBDEV_ID_82599_SFP: 7222 case IXGBE_SUBDEV_ID_82599_SFP:
7206 case IXGBE_SUBDEV_ID_82599_RNDC: 7223 case IXGBE_SUBDEV_ID_82599_RNDC:
7207 case IXGBE_SUBDEV_ID_82599_ECNA_DP: 7224 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
7225 case IXGBE_SUBDEV_ID_82599_LOM_SFP:
7226 is_wol_supported = 1;
7227 break;
7228 }
7229 break;
7230 case IXGBE_DEV_ID_82599EN_SFP:
7231 /* Only this subdevice supports WOL */
7232 switch (subdevice_id) {
7233 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
7208 is_wol_supported = 1; 7234 is_wol_supported = 1;
7209 break; 7235 break;
7210 } 7236 }
@@ -7369,6 +7395,10 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7369 if (err) 7395 if (err)
7370 goto err_sw_init; 7396 goto err_sw_init;
7371 7397
7398 /* Cache if MNG FW is up so we don't have to read the REG later */
7399 if (hw->mac.ops.mng_fw_enabled)
7400 hw->mng_fw_enabled = hw->mac.ops.mng_fw_enabled(hw);
7401
7372 /* Make it possible the adapter to be woken up via WOL */ 7402 /* Make it possible the adapter to be woken up via WOL */
7373 switch (adapter->hw.mac.type) { 7403 switch (adapter->hw.mac.type) {
7374 case ixgbe_mac_82599EB: 7404 case ixgbe_mac_82599EB:
@@ -7425,9 +7455,9 @@ skip_sriov:
7425 netdev->features = NETIF_F_SG | 7455 netdev->features = NETIF_F_SG |
7426 NETIF_F_IP_CSUM | 7456 NETIF_F_IP_CSUM |
7427 NETIF_F_IPV6_CSUM | 7457 NETIF_F_IPV6_CSUM |
7428 NETIF_F_HW_VLAN_TX | 7458 NETIF_F_HW_VLAN_CTAG_TX |
7429 NETIF_F_HW_VLAN_RX | 7459 NETIF_F_HW_VLAN_CTAG_RX |
7430 NETIF_F_HW_VLAN_FILTER | 7460 NETIF_F_HW_VLAN_CTAG_FILTER |
7431 NETIF_F_TSO | 7461 NETIF_F_TSO |
7432 NETIF_F_TSO6 | 7462 NETIF_F_TSO6 |
7433 NETIF_F_RXHASH | 7463 NETIF_F_RXHASH |
@@ -7521,7 +7551,9 @@ skip_sriov:
7521 /* WOL not supported for all devices */ 7551 /* WOL not supported for all devices */
7522 adapter->wol = 0; 7552 adapter->wol = 0;
7523 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); 7553 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
7524 if (ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device)) 7554 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
7555 pdev->subsystem_device);
7556 if (hw->wol_enabled)
7525 adapter->wol = IXGBE_WUFC_MAG; 7557 adapter->wol = IXGBE_WUFC_MAG;
7526 7558
7527 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 7559 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
@@ -7532,10 +7564,13 @@ skip_sriov:
7532 7564
7533 /* pick up the PCI bus settings for reporting later */ 7565 /* pick up the PCI bus settings for reporting later */
7534 hw->mac.ops.get_bus_info(hw); 7566 hw->mac.ops.get_bus_info(hw);
7567 if (hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP)
7568 ixgbe_get_parent_bus_info(adapter);
7535 7569
7536 /* print bus type/speed/width info */ 7570 /* print bus type/speed/width info */
7537 e_dev_info("(PCI Express:%s:%s) %pM\n", 7571 e_dev_info("(PCI Express:%s:%s) %pM\n",
7538 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" : 7572 (hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" :
7573 hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
7539 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" : 7574 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
7540 "Unknown"), 7575 "Unknown"),
7541 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" : 7576 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
@@ -7615,9 +7650,13 @@ skip_sriov:
7615 e_err(probe, "failed to allocate sysfs resources\n"); 7650 e_err(probe, "failed to allocate sysfs resources\n");
7616#endif /* CONFIG_IXGBE_HWMON */ 7651#endif /* CONFIG_IXGBE_HWMON */
7617 7652
7618#ifdef CONFIG_DEBUG_FS
7619 ixgbe_dbg_adapter_init(adapter); 7653 ixgbe_dbg_adapter_init(adapter);
7620#endif /* CONFIG_DEBUG_FS */ 7654
7655 /* Need link setup for MNG FW, else wait for IXGBE_UP */
7656 if (hw->mng_fw_enabled && hw->mac.ops.setup_link)
7657 hw->mac.ops.setup_link(hw,
7658 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
7659 true);
7621 7660
7622 return 0; 7661 return 0;
7623 7662
@@ -7653,9 +7692,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
7653 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 7692 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7654 struct net_device *netdev = adapter->netdev; 7693 struct net_device *netdev = adapter->netdev;
7655 7694
7656#ifdef CONFIG_DEBUG_FS
7657 ixgbe_dbg_adapter_exit(adapter); 7695 ixgbe_dbg_adapter_exit(adapter);
7658#endif /*CONFIG_DEBUG_FS */
7659 7696
7660 set_bit(__IXGBE_DOWN, &adapter->state); 7697 set_bit(__IXGBE_DOWN, &adapter->state);
7661 cancel_work_sync(&adapter->service_task); 7698 cancel_work_sync(&adapter->service_task);
@@ -7918,15 +7955,11 @@ static int __init ixgbe_init_module(void)
7918 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); 7955 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
7919 pr_info("%s\n", ixgbe_copyright); 7956 pr_info("%s\n", ixgbe_copyright);
7920 7957
7921#ifdef CONFIG_DEBUG_FS
7922 ixgbe_dbg_init(); 7958 ixgbe_dbg_init();
7923#endif /* CONFIG_DEBUG_FS */
7924 7959
7925 ret = pci_register_driver(&ixgbe_driver); 7960 ret = pci_register_driver(&ixgbe_driver);
7926 if (ret) { 7961 if (ret) {
7927#ifdef CONFIG_DEBUG_FS
7928 ixgbe_dbg_exit(); 7962 ixgbe_dbg_exit();
7929#endif /* CONFIG_DEBUG_FS */
7930 return ret; 7963 return ret;
7931 } 7964 }
7932 7965
@@ -7952,9 +7985,7 @@ static void __exit ixgbe_exit_module(void)
7952#endif 7985#endif
7953 pci_unregister_driver(&ixgbe_driver); 7986 pci_unregister_driver(&ixgbe_driver);
7954 7987
7955#ifdef CONFIG_DEBUG_FS
7956 ixgbe_dbg_exit(); 7988 ixgbe_dbg_exit();
7957#endif /* CONFIG_DEBUG_FS */
7958 7989
7959 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 7990 rcu_barrier(); /* Wait for completion of call_rcu()'s */
7960} 7991}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 060d2ad2ac96..e5691ccbce9d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -956,6 +956,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
956 else 956 else
957 hw->phy.sfp_type = 957 hw->phy.sfp_type =
958 ixgbe_sfp_type_1g_sx_core1; 958 ixgbe_sfp_type_1g_sx_core1;
959 } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
960 if (hw->bus.lan_id == 0)
961 hw->phy.sfp_type =
962 ixgbe_sfp_type_1g_lx_core0;
963 else
964 hw->phy.sfp_type =
965 ixgbe_sfp_type_1g_lx_core1;
959 } else { 966 } else {
960 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 967 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
961 } 968 }
@@ -1043,6 +1050,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1043 if (comp_codes_10g == 0 && 1050 if (comp_codes_10g == 0 &&
1044 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || 1051 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1045 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 1052 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1053 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1054 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1046 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || 1055 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1047 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { 1056 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1048 hw->phy.type = ixgbe_phy_sfp_unsupported; 1057 hw->phy.type = ixgbe_phy_sfp_unsupported;
@@ -1058,10 +1067,12 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1058 1067
1059 hw->mac.ops.get_device_caps(hw, &enforce_sfp); 1068 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1060 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && 1069 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
1061 !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || 1070 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1062 (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) || 1071 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1063 (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) || 1072 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1064 (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) { 1073 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1074 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1075 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1065 /* Make sure we're a supported PHY type */ 1076 /* Make sure we're a supported PHY type */
1066 if (hw->phy.type == ixgbe_phy_sfp_intel) { 1077 if (hw->phy.type == ixgbe_phy_sfp_intel) {
1067 status = 0; 1078 status = 0;
@@ -1125,10 +1136,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1125 * SR modules 1136 * SR modules
1126 */ 1137 */
1127 if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || 1138 if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
1139 sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1128 sfp_type == ixgbe_sfp_type_1g_cu_core0 || 1140 sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1129 sfp_type == ixgbe_sfp_type_1g_sx_core0) 1141 sfp_type == ixgbe_sfp_type_1g_sx_core0)
1130 sfp_type = ixgbe_sfp_type_srlr_core0; 1142 sfp_type = ixgbe_sfp_type_srlr_core0;
1131 else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || 1143 else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
1144 sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1132 sfp_type == ixgbe_sfp_type_1g_cu_core1 || 1145 sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1133 sfp_type == ixgbe_sfp_type_1g_sx_core1) 1146 sfp_type == ixgbe_sfp_type_1g_sx_core1)
1134 sfp_type = ixgbe_sfp_type_srlr_core1; 1147 sfp_type = ixgbe_sfp_type_srlr_core1;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 97e33669c0b9..1e7d587c4e57 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -35,7 +35,7 @@
35#include <linux/ip.h> 35#include <linux/ip.h>
36#include <linux/tcp.h> 36#include <linux/tcp.h>
37#include <linux/ipv6.h> 37#include <linux/ipv6.h>
38#ifdef NETIF_F_HW_VLAN_TX 38#ifdef NETIF_F_HW_VLAN_CTAG_TX
39#include <linux/if_vlan.h> 39#include <linux/if_vlan.h>
40#endif 40#endif
41 41
@@ -661,13 +661,7 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
661 bool enable = ((event_mask & 0x10000000U) != 0); 661 bool enable = ((event_mask & 0x10000000U) != 0);
662 662
663 if (enable) { 663 if (enable) {
664 eth_random_addr(vf_mac_addr); 664 eth_zero_addr(vf_mac_addr);
665 e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
666 vfn, vf_mac_addr);
667 /*
668 * Store away the VF "permananet" MAC address, it will ask
669 * for it later.
670 */
671 memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); 665 memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
672 } 666 }
673 667
@@ -688,7 +682,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
688 ixgbe_vf_reset_event(adapter, vf); 682 ixgbe_vf_reset_event(adapter, vf);
689 683
690 /* set vf mac address */ 684 /* set vf mac address */
691 ixgbe_set_vf_mac(adapter, vf, vf_mac); 685 if (!is_zero_ether_addr(vf_mac))
686 ixgbe_set_vf_mac(adapter, vf, vf_mac);
692 687
693 vf_shift = vf % 32; 688 vf_shift = vf % 32;
694 reg_offset = vf / 32; 689 reg_offset = vf / 32;
@@ -729,8 +724,16 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
729 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); 724 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
730 725
731 /* reply to reset with ack and vf mac address */ 726 /* reply to reset with ack and vf mac address */
732 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; 727 msgbuf[0] = IXGBE_VF_RESET;
733 memcpy(addr, vf_mac, ETH_ALEN); 728 if (!is_zero_ether_addr(vf_mac)) {
729 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
730 memcpy(addr, vf_mac, ETH_ALEN);
731 } else {
732 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
733 dev_warn(&adapter->pdev->dev,
734 "VF %d has no MAC address assigned, you may have to assign one manually\n",
735 vf);
736 }
734 737
735 /* 738 /*
736 * Piggyback the multicast filter type so VF can compute the 739 * Piggyback the multicast filter type so VF can compute the
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 6652e96c352d..70c6aa3d3f95 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -56,10 +56,13 @@
56#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 56#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
57#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 57#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
58#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 58#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
59#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
59#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 60#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
61#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976
60#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 62#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
61#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D 63#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
62#define IXGBE_DEV_ID_82599EN_SFP 0x1557 64#define IXGBE_DEV_ID_82599EN_SFP 0x1557
65#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001
63#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC 66#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
64#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 67#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
65#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C 68#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
@@ -729,6 +732,13 @@ struct ixgbe_thermal_sensor_data {
729#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ 732#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
730#define IXGBE_LSWFW 0x15014 733#define IXGBE_LSWFW 0x15014
731 734
735/* Management Bit Fields and Masks */
736#define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */
737
738/* Firmware Semaphore Register */
739#define IXGBE_FWSM_MODE_MASK 0xE
740#define IXGBE_FWSM_FW_MODE_PT 0x4
741
732/* ARC Subsystem registers */ 742/* ARC Subsystem registers */
733#define IXGBE_HICR 0x15F00 743#define IXGBE_HICR 0x15F00
734#define IXGBE_FWSTS 0x15F0C 744#define IXGBE_FWSTS 0x15F0C
@@ -1019,6 +1029,7 @@ struct ixgbe_thermal_sensor_data {
1019#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) 1029#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
1020 1030
1021/* FACTPS */ 1031/* FACTPS */
1032#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */
1022#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ 1033#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */
1023 1034
1024/* MHADD Bit Masks */ 1035/* MHADD Bit Masks */
@@ -1582,6 +1593,7 @@ enum {
1582#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) 1593#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
1583#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) 1594#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
1584#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) 1595#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
1596#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
1585 1597
1586#define IXGBE_MACC_FLU 0x00000001 1598#define IXGBE_MACC_FLU 0x00000001
1587#define IXGBE_MACC_FSV_10G 0x00030000 1599#define IXGBE_MACC_FSV_10G 0x00030000
@@ -1827,6 +1839,7 @@ enum {
1827#define IXGBE_PCI_LINK_SPEED 0xF 1839#define IXGBE_PCI_LINK_SPEED 0xF
1828#define IXGBE_PCI_LINK_SPEED_2500 0x1 1840#define IXGBE_PCI_LINK_SPEED_2500 0x1
1829#define IXGBE_PCI_LINK_SPEED_5000 0x2 1841#define IXGBE_PCI_LINK_SPEED_5000 0x2
1842#define IXGBE_PCI_LINK_SPEED_8000 0x3
1830#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E 1843#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
1831#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 1844#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
1832#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 1845#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
@@ -2600,6 +2613,8 @@ enum ixgbe_sfp_type {
2600 ixgbe_sfp_type_1g_cu_core1 = 10, 2613 ixgbe_sfp_type_1g_cu_core1 = 10,
2601 ixgbe_sfp_type_1g_sx_core0 = 11, 2614 ixgbe_sfp_type_1g_sx_core0 = 11,
2602 ixgbe_sfp_type_1g_sx_core1 = 12, 2615 ixgbe_sfp_type_1g_sx_core1 = 12,
2616 ixgbe_sfp_type_1g_lx_core0 = 13,
2617 ixgbe_sfp_type_1g_lx_core1 = 14,
2603 ixgbe_sfp_type_not_present = 0xFFFE, 2618 ixgbe_sfp_type_not_present = 0xFFFE,
2604 ixgbe_sfp_type_unknown = 0xFFFF 2619 ixgbe_sfp_type_unknown = 0xFFFF
2605}; 2620};
@@ -2650,6 +2665,7 @@ enum ixgbe_bus_speed {
2650 ixgbe_bus_speed_133 = 133, 2665 ixgbe_bus_speed_133 = 133,
2651 ixgbe_bus_speed_2500 = 2500, 2666 ixgbe_bus_speed_2500 = 2500,
2652 ixgbe_bus_speed_5000 = 5000, 2667 ixgbe_bus_speed_5000 = 5000,
2668 ixgbe_bus_speed_8000 = 8000,
2653 ixgbe_bus_speed_reserved 2669 ixgbe_bus_speed_reserved
2654}; 2670};
2655 2671
@@ -2859,6 +2875,7 @@ struct ixgbe_mac_operations {
2859 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); 2875 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
2860 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); 2876 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
2861 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); 2877 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
2878 bool (*mng_fw_enabled)(struct ixgbe_hw *hw);
2862}; 2879};
2863 2880
2864struct ixgbe_phy_operations { 2881struct ixgbe_phy_operations {
@@ -2912,6 +2929,7 @@ struct ixgbe_mac_info {
2912 u32 max_tx_queues; 2929 u32 max_tx_queues;
2913 u32 max_rx_queues; 2930 u32 max_rx_queues;
2914 u32 orig_autoc; 2931 u32 orig_autoc;
2932 u32 cached_autoc;
2915 u32 orig_autoc2; 2933 u32 orig_autoc2;
2916 bool orig_link_settings_stored; 2934 bool orig_link_settings_stored;
2917 bool autotry_restart; 2935 bool autotry_restart;
@@ -2986,6 +3004,8 @@ struct ixgbe_hw {
2986 bool adapter_stopped; 3004 bool adapter_stopped;
2987 bool force_full_reset; 3005 bool force_full_reset;
2988 bool allow_unsupported_sfp; 3006 bool allow_unsupported_sfp;
3007 bool mng_fw_enabled;
3008 bool wol_enabled;
2989}; 3009};
2990 3010
2991struct ixgbe_info { 3011struct ixgbe_info {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 66c5e946284e..389324f5929a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -854,6 +854,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
854 .enable_rx_buff = &ixgbe_enable_rx_buff_generic, 854 .enable_rx_buff = &ixgbe_enable_rx_buff_generic,
855 .get_thermal_sensor_data = NULL, 855 .get_thermal_sensor_data = NULL,
856 .init_thermal_sensor_thresh = NULL, 856 .init_thermal_sensor_thresh = NULL,
857 .mng_fw_enabled = NULL,
857}; 858};
858 859
859static struct ixgbe_eeprom_operations eeprom_ops_X540 = { 860static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index fc0af9a3bb35..fff0d9867529 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -44,8 +44,8 @@ struct ixgbevf_tx_buffer {
44 struct sk_buff *skb; 44 struct sk_buff *skb;
45 dma_addr_t dma; 45 dma_addr_t dma;
46 unsigned long time_stamp; 46 unsigned long time_stamp;
47 union ixgbe_adv_tx_desc *next_to_watch;
47 u16 length; 48 u16 length;
48 u16 next_to_watch;
49 u16 mapped_as_page; 49 u16 mapped_as_page;
50}; 50};
51 51
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 2b6cb5ca48ee..1f5166ad6bb5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -76,12 +76,9 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) } 77 * Class, Class Mask, private data (not used) }
78 */ 78 */
79static struct pci_device_id ixgbevf_pci_tbl[] = { 79static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), 80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 board_82599_vf}, 81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
83 board_X540_vf},
84
85 /* required last entry */ 82 /* required last entry */
86 {0, } 83 {0, }
87}; 84};
@@ -190,28 +187,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
190 struct ixgbevf_adapter *adapter = q_vector->adapter; 187 struct ixgbevf_adapter *adapter = q_vector->adapter;
191 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 188 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
192 struct ixgbevf_tx_buffer *tx_buffer_info; 189 struct ixgbevf_tx_buffer *tx_buffer_info;
193 unsigned int i, eop, count = 0; 190 unsigned int i, count = 0;
194 unsigned int total_bytes = 0, total_packets = 0; 191 unsigned int total_bytes = 0, total_packets = 0;
195 192
196 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 193 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
197 return true; 194 return true;
198 195
199 i = tx_ring->next_to_clean; 196 i = tx_ring->next_to_clean;
200 eop = tx_ring->tx_buffer_info[i].next_to_watch; 197 tx_buffer_info = &tx_ring->tx_buffer_info[i];
201 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 198 eop_desc = tx_buffer_info->next_to_watch;
202 199
203 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 200 do {
204 (count < tx_ring->count)) {
205 bool cleaned = false; 201 bool cleaned = false;
206 rmb(); /* read buffer_info after eop_desc */ 202
207 /* eop could change between read and DD-check */ 203 /* if next_to_watch is not set then there is no work pending */
208 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch)) 204 if (!eop_desc)
209 goto cont_loop; 205 break;
206
207 /* prevent any other reads prior to eop_desc */
208 read_barrier_depends();
209
210 /* if DD is not set pending work has not been completed */
211 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
212 break;
213
214 /* clear next_to_watch to prevent false hangs */
215 tx_buffer_info->next_to_watch = NULL;
216
210 for ( ; !cleaned; count++) { 217 for ( ; !cleaned; count++) {
211 struct sk_buff *skb; 218 struct sk_buff *skb;
212 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 219 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
213 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 220 cleaned = (tx_desc == eop_desc);
214 cleaned = (i == eop);
215 skb = tx_buffer_info->skb; 221 skb = tx_buffer_info->skb;
216 222
217 if (cleaned && skb) { 223 if (cleaned && skb) {
@@ -234,12 +240,12 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
234 i++; 240 i++;
235 if (i == tx_ring->count) 241 if (i == tx_ring->count)
236 i = 0; 242 i = 0;
243
244 tx_buffer_info = &tx_ring->tx_buffer_info[i];
237 } 245 }
238 246
239cont_loop: 247 eop_desc = tx_buffer_info->next_to_watch;
240 eop = tx_ring->tx_buffer_info[i].next_to_watch; 248 } while (count < tx_ring->count);
241 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
242 }
243 249
244 tx_ring->next_to_clean = i; 250 tx_ring->next_to_clean = i;
245 251
@@ -285,7 +291,7 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
285 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 291 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
286 292
287 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) 293 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
288 __vlan_hwaccel_put_tag(skb, tag); 294 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
289 295
290 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 296 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
291 napi_gro_receive(&q_vector->napi, skb); 297 napi_gro_receive(&q_vector->napi, skb);
@@ -1173,7 +1179,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1173 } 1179 }
1174} 1180}
1175 1181
1176static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1182static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1183 __be16 proto, u16 vid)
1177{ 1184{
1178 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1185 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1179 struct ixgbe_hw *hw = &adapter->hw; 1186 struct ixgbe_hw *hw = &adapter->hw;
@@ -1198,7 +1205,8 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1198 return err; 1205 return err;
1199} 1206}
1200 1207
1201static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1208static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1209 __be16 proto, u16 vid)
1202{ 1210{
1203 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1211 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1204 struct ixgbe_hw *hw = &adapter->hw; 1212 struct ixgbe_hw *hw = &adapter->hw;
@@ -1221,7 +1229,8 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1221 u16 vid; 1229 u16 vid;
1222 1230
1223 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1231 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1224 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); 1232 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1233 htons(ETH_P_8021Q), vid);
1225} 1234}
1226 1235
1227static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 1236static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
@@ -2046,6 +2055,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2046{ 2055{
2047 struct ixgbe_hw *hw = &adapter->hw; 2056 struct ixgbe_hw *hw = &adapter->hw;
2048 struct pci_dev *pdev = adapter->pdev; 2057 struct pci_dev *pdev = adapter->pdev;
2058 struct net_device *netdev = adapter->netdev;
2049 int err; 2059 int err;
2050 2060
2051 /* PCI config space info */ 2061 /* PCI config space info */
@@ -2065,18 +2075,26 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2065 err = hw->mac.ops.reset_hw(hw); 2075 err = hw->mac.ops.reset_hw(hw);
2066 if (err) { 2076 if (err) {
2067 dev_info(&pdev->dev, 2077 dev_info(&pdev->dev,
2068 "PF still in reset state, assigning new address\n"); 2078 "PF still in reset state. Is the PF interface up?\n");
2069 eth_hw_addr_random(adapter->netdev);
2070 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
2071 adapter->netdev->addr_len);
2072 } else { 2079 } else {
2073 err = hw->mac.ops.init_hw(hw); 2080 err = hw->mac.ops.init_hw(hw);
2074 if (err) { 2081 if (err) {
2075 pr_err("init_shared_code failed: %d\n", err); 2082 pr_err("init_shared_code failed: %d\n", err);
2076 goto out; 2083 goto out;
2077 } 2084 }
2078 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, 2085 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2079 adapter->netdev->addr_len); 2086 if (err)
2087 dev_info(&pdev->dev, "Error reading MAC address\n");
2088 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2089 dev_info(&pdev->dev,
2090 "MAC address not assigned by administrator.\n");
2091 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2092 }
2093
2094 if (!is_valid_ether_addr(netdev->dev_addr)) {
2095 dev_info(&pdev->dev, "Assigning random MAC address\n");
2096 eth_hw_addr_random(netdev);
2097 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2080 } 2098 }
2081 2099
2082 /* lock to protect mailbox accesses */ 2100 /* lock to protect mailbox accesses */
@@ -2425,9 +2443,6 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2425 &rx_ring->dma, GFP_KERNEL); 2443 &rx_ring->dma, GFP_KERNEL);
2426 2444
2427 if (!rx_ring->desc) { 2445 if (!rx_ring->desc) {
2428 hw_dbg(&adapter->hw,
2429 "Unable to allocate memory for "
2430 "the receive descriptor ring\n");
2431 vfree(rx_ring->rx_buffer_info); 2446 vfree(rx_ring->rx_buffer_info);
2432 rx_ring->rx_buffer_info = NULL; 2447 rx_ring->rx_buffer_info = NULL;
2433 goto alloc_failed; 2448 goto alloc_failed;
@@ -2822,8 +2837,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2822} 2837}
2823 2838
2824static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, 2839static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2825 struct sk_buff *skb, u32 tx_flags, 2840 struct sk_buff *skb, u32 tx_flags)
2826 unsigned int first)
2827{ 2841{
2828 struct ixgbevf_tx_buffer *tx_buffer_info; 2842 struct ixgbevf_tx_buffer *tx_buffer_info;
2829 unsigned int len; 2843 unsigned int len;
@@ -2848,7 +2862,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2848 size, DMA_TO_DEVICE); 2862 size, DMA_TO_DEVICE);
2849 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) 2863 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2850 goto dma_error; 2864 goto dma_error;
2851 tx_buffer_info->next_to_watch = i;
2852 2865
2853 len -= size; 2866 len -= size;
2854 total -= size; 2867 total -= size;
@@ -2878,7 +2891,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2878 tx_buffer_info->dma)) 2891 tx_buffer_info->dma))
2879 goto dma_error; 2892 goto dma_error;
2880 tx_buffer_info->mapped_as_page = true; 2893 tx_buffer_info->mapped_as_page = true;
2881 tx_buffer_info->next_to_watch = i;
2882 2894
2883 len -= size; 2895 len -= size;
2884 total -= size; 2896 total -= size;
@@ -2897,8 +2909,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2897 else 2909 else
2898 i = i - 1; 2910 i = i - 1;
2899 tx_ring->tx_buffer_info[i].skb = skb; 2911 tx_ring->tx_buffer_info[i].skb = skb;
2900 tx_ring->tx_buffer_info[first].next_to_watch = i;
2901 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
2902 2912
2903 return count; 2913 return count;
2904 2914
@@ -2907,7 +2917,6 @@ dma_error:
2907 2917
2908 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 2918 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2909 tx_buffer_info->dma = 0; 2919 tx_buffer_info->dma = 0;
2910 tx_buffer_info->next_to_watch = 0;
2911 count--; 2920 count--;
2912 2921
2913 /* clear timestamp and dma mappings for remaining portion of packet */ 2922 /* clear timestamp and dma mappings for remaining portion of packet */
@@ -2924,7 +2933,8 @@ dma_error:
2924} 2933}
2925 2934
2926static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, 2935static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2927 int count, u32 paylen, u8 hdr_len) 2936 int count, unsigned int first, u32 paylen,
2937 u8 hdr_len)
2928{ 2938{
2929 union ixgbe_adv_tx_desc *tx_desc = NULL; 2939 union ixgbe_adv_tx_desc *tx_desc = NULL;
2930 struct ixgbevf_tx_buffer *tx_buffer_info; 2940 struct ixgbevf_tx_buffer *tx_buffer_info;
@@ -2975,6 +2985,16 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2975 2985
2976 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 2986 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2977 2987
2988 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
2989
2990 /* Force memory writes to complete before letting h/w
2991 * know there are new descriptors to fetch. (Only
2992 * applicable for weak-ordered memory model archs,
2993 * such as IA-64).
2994 */
2995 wmb();
2996
2997 tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
2978 tx_ring->next_to_use = i; 2998 tx_ring->next_to_use = i;
2979} 2999}
2980 3000
@@ -3066,15 +3086,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3066 tx_flags |= IXGBE_TX_FLAGS_CSUM; 3086 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3067 3087
3068 ixgbevf_tx_queue(tx_ring, tx_flags, 3088 ixgbevf_tx_queue(tx_ring, tx_flags,
3069 ixgbevf_tx_map(tx_ring, skb, tx_flags, first), 3089 ixgbevf_tx_map(tx_ring, skb, tx_flags),
3070 skb->len, hdr_len); 3090 first, skb->len, hdr_len);
3071 /*
3072 * Force memory writes to complete before letting h/w
3073 * know there are new descriptors to fetch. (Only
3074 * applicable for weak-ordered memory model archs,
3075 * such as IA-64).
3076 */
3077 wmb();
3078 3091
3079 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail); 3092 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
3080 3093
@@ -3400,9 +3413,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3400 NETIF_F_RXCSUM; 3413 NETIF_F_RXCSUM;
3401 3414
3402 netdev->features = netdev->hw_features | 3415 netdev->features = netdev->hw_features |
3403 NETIF_F_HW_VLAN_TX | 3416 NETIF_F_HW_VLAN_CTAG_TX |
3404 NETIF_F_HW_VLAN_RX | 3417 NETIF_F_HW_VLAN_CTAG_RX |
3405 NETIF_F_HW_VLAN_FILTER; 3418 NETIF_F_HW_VLAN_CTAG_FILTER;
3406 3419
3407 netdev->vlan_features |= NETIF_F_TSO; 3420 netdev->vlan_features |= NETIF_F_TSO;
3408 netdev->vlan_features |= NETIF_F_TSO6; 3421 netdev->vlan_features |= NETIF_F_TSO6;
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 0c94557b53df..387b52635bc0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -109,7 +109,12 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
109 if (ret_val) 109 if (ret_val)
110 return ret_val; 110 return ret_val;
111 111
112 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) 112 /* New versions of the PF may NACK the reset return message
113 * to indicate that no MAC address has yet been assigned for
114 * the VF.
115 */
116 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
117 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
113 return IXGBE_ERR_INVALID_MAC_ADDR; 118 return IXGBE_ERR_INVALID_MAC_ADDR;
114 119
115 memcpy(hw->mac.perm_addr, addr, ETH_ALEN); 120 memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 0519afa413d2..070a6f1a0577 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1059,7 +1059,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
1059 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { 1059 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
1060 u16 vid = le16_to_cpu(rxdesc->descwb.vlan); 1060 u16 vid = le16_to_cpu(rxdesc->descwb.vlan);
1061 1061
1062 __vlan_hwaccel_put_tag(skb, vid); 1062 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1063 NET_STAT(jme).rx_bytes += 4; 1063 NET_STAT(jme).rx_bytes += 4;
1064 } 1064 }
1065 jme->jme_rx(skb); 1065 jme->jme_rx(skb);
@@ -3030,8 +3030,8 @@ jme_init_one(struct pci_dev *pdev,
3030 NETIF_F_SG | 3030 NETIF_F_SG |
3031 NETIF_F_TSO | 3031 NETIF_F_TSO |
3032 NETIF_F_TSO6 | 3032 NETIF_F_TSO6 |
3033 NETIF_F_HW_VLAN_TX | 3033 NETIF_F_HW_VLAN_CTAG_TX |
3034 NETIF_F_HW_VLAN_RX; 3034 NETIF_F_HW_VLAN_CTAG_RX;
3035 if (using_dac) 3035 if (using_dac)
3036 netdev->features |= NETIF_F_HIGHDMA; 3036 netdev->features |= NETIF_F_HIGHDMA;
3037 3037
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 434e33c527df..a49e81bdf8e8 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -21,8 +21,8 @@ if NET_VENDOR_MARVELL
21config MV643XX_ETH 21config MV643XX_ETH
22 tristate "Marvell Discovery (643XX) and Orion ethernet support" 22 tristate "Marvell Discovery (643XX) and Orion ethernet support"
23 depends on (MV64X60 || PPC32 || PLAT_ORION) && INET 23 depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
24 select INET_LRO
25 select PHYLIB 24 select PHYLIB
25 select MVMDIO
26 ---help--- 26 ---help---
27 This driver supports the gigabit ethernet MACs in the 27 This driver supports the gigabit ethernet MACs in the
28 Marvell Discovery PPC/MIPS chipset family (MV643XX) and 28 Marvell Discovery PPC/MIPS chipset family (MV643XX) and
@@ -39,9 +39,7 @@ config MVMDIO
39 interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, 39 interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
40 Dove, Armada 370 and Armada XP). 40 Dove, Armada 370 and Armada XP).
41 41
42 For now, this driver is only needed for the MVNETA driver 42 This driver is used by the MV643XX_ETH and MVNETA drivers.
43 (used on Armada 370 and XP), but it could be used in the
44 future by the MV643XX_ETH driver.
45 43
46config MVNETA 44config MVNETA
47 tristate "Marvell Armada 370/XP network interface support" 45 tristate "Marvell Armada 370/XP network interface support"
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 7f63b4aac434..5c4a7765ff0e 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -2,8 +2,8 @@
2# Makefile for the Marvell device drivers. 2# Makefile for the Marvell device drivers.
3# 3#
4 4
5obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
6obj-$(CONFIG_MVMDIO) += mvmdio.o 5obj-$(CONFIG_MVMDIO) += mvmdio.o
6obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
7obj-$(CONFIG_MVNETA) += mvneta.o 7obj-$(CONFIG_MVNETA) += mvneta.o
8obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o 8obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
9obj-$(CONFIG_SKGE) += skge.o 9obj-$(CONFIG_SKGE) += skge.o
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 6562c736a1d8..d0afeea181fb 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -20,6 +20,8 @@
20 * Copyright (C) 2007-2008 Marvell Semiconductor 20 * Copyright (C) 2007-2008 Marvell Semiconductor
21 * Lennert Buytenhek <buytenh@marvell.com> 21 * Lennert Buytenhek <buytenh@marvell.com>
22 * 22 *
23 * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
24 *
23 * This program is free software; you can redistribute it and/or 25 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License 26 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version 2 27 * as published by the Free Software Foundation; either version 2
@@ -54,8 +56,8 @@
54#include <linux/phy.h> 56#include <linux/phy.h>
55#include <linux/mv643xx_eth.h> 57#include <linux/mv643xx_eth.h>
56#include <linux/io.h> 58#include <linux/io.h>
59#include <linux/interrupt.h>
57#include <linux/types.h> 60#include <linux/types.h>
58#include <linux/inet_lro.h>
59#include <linux/slab.h> 61#include <linux/slab.h>
60#include <linux/clk.h> 62#include <linux/clk.h>
61 63
@@ -67,14 +69,6 @@ static char mv643xx_eth_driver_version[] = "1.4";
67 * Registers shared between all ports. 69 * Registers shared between all ports.
68 */ 70 */
69#define PHY_ADDR 0x0000 71#define PHY_ADDR 0x0000
70#define SMI_REG 0x0004
71#define SMI_BUSY 0x10000000
72#define SMI_READ_VALID 0x08000000
73#define SMI_OPCODE_READ 0x04000000
74#define SMI_OPCODE_WRITE 0x00000000
75#define ERR_INT_CAUSE 0x0080
76#define ERR_INT_SMI_DONE 0x00000010
77#define ERR_INT_MASK 0x0084
78#define WINDOW_BASE(w) (0x0200 + ((w) << 3)) 72#define WINDOW_BASE(w) (0x0200 + ((w) << 3))
79#define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) 73#define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
80#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) 74#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
@@ -264,25 +258,6 @@ struct mv643xx_eth_shared_private {
264 void __iomem *base; 258 void __iomem *base;
265 259
266 /* 260 /*
267 * Points at the right SMI instance to use.
268 */
269 struct mv643xx_eth_shared_private *smi;
270
271 /*
272 * Provides access to local SMI interface.
273 */
274 struct mii_bus *smi_bus;
275
276 /*
277 * If we have access to the error interrupt pin (which is
278 * somewhat misnamed as it not only reflects internal errors
279 * but also reflects SMI completion), use that to wait for
280 * SMI access completion instead of polling the SMI busy bit.
281 */
282 int err_interrupt;
283 wait_queue_head_t smi_busy_wait;
284
285 /*
286 * Per-port MBUS window access register value. 261 * Per-port MBUS window access register value.
287 */ 262 */
288 u32 win_protect; 263 u32 win_protect;
@@ -293,7 +268,7 @@ struct mv643xx_eth_shared_private {
293 int extended_rx_coal_limit; 268 int extended_rx_coal_limit;
294 int tx_bw_control; 269 int tx_bw_control;
295 int tx_csum_limit; 270 int tx_csum_limit;
296 271 struct clk *clk;
297}; 272};
298 273
299#define TX_BW_CONTROL_ABSENT 0 274#define TX_BW_CONTROL_ABSENT 0
@@ -341,12 +316,6 @@ struct mib_counters {
341 u32 rx_overrun; 316 u32 rx_overrun;
342}; 317};
343 318
344struct lro_counters {
345 u32 lro_aggregated;
346 u32 lro_flushed;
347 u32 lro_no_desc;
348};
349
350struct rx_queue { 319struct rx_queue {
351 int index; 320 int index;
352 321
@@ -360,9 +329,6 @@ struct rx_queue {
360 dma_addr_t rx_desc_dma; 329 dma_addr_t rx_desc_dma;
361 int rx_desc_area_size; 330 int rx_desc_area_size;
362 struct sk_buff **rx_skb; 331 struct sk_buff **rx_skb;
363
364 struct net_lro_mgr lro_mgr;
365 struct net_lro_desc lro_arr[8];
366}; 332};
367 333
368struct tx_queue { 334struct tx_queue {
@@ -398,8 +364,6 @@ struct mv643xx_eth_private {
398 spinlock_t mib_counters_lock; 364 spinlock_t mib_counters_lock;
399 struct mib_counters mib_counters; 365 struct mib_counters mib_counters;
400 366
401 struct lro_counters lro_counters;
402
403 struct work_struct tx_timeout_task; 367 struct work_struct tx_timeout_task;
404 368
405 struct napi_struct napi; 369 struct napi_struct napi;
@@ -435,9 +399,7 @@ struct mv643xx_eth_private {
435 /* 399 /*
436 * Hardware-specific parameters. 400 * Hardware-specific parameters.
437 */ 401 */
438#if defined(CONFIG_HAVE_CLK)
439 struct clk *clk; 402 struct clk *clk;
440#endif
441 unsigned int t_clk; 403 unsigned int t_clk;
442}; 404};
443 405
@@ -530,42 +492,12 @@ static void txq_maybe_wake(struct tx_queue *txq)
530 } 492 }
531} 493}
532 494
533
534/* rx napi ******************************************************************/
535static int
536mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph,
537 u64 *hdr_flags, void *priv)
538{
539 unsigned long cmd_sts = (unsigned long)priv;
540
541 /*
542 * Make sure that this packet is Ethernet II, is not VLAN
543 * tagged, is IPv4, has a valid IP header, and is TCP.
544 */
545 if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
546 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK |
547 RX_PKT_IS_VLAN_TAGGED)) !=
548 (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
549 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4))
550 return -1;
551
552 skb_reset_network_header(skb);
553 skb_set_transport_header(skb, ip_hdrlen(skb));
554 *iphdr = ip_hdr(skb);
555 *tcph = tcp_hdr(skb);
556 *hdr_flags = LRO_IPV4 | LRO_TCP;
557
558 return 0;
559}
560
561static int rxq_process(struct rx_queue *rxq, int budget) 495static int rxq_process(struct rx_queue *rxq, int budget)
562{ 496{
563 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 497 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
564 struct net_device_stats *stats = &mp->dev->stats; 498 struct net_device_stats *stats = &mp->dev->stats;
565 int lro_flush_needed;
566 int rx; 499 int rx;
567 500
568 lro_flush_needed = 0;
569 rx = 0; 501 rx = 0;
570 while (rx < budget && rxq->rx_desc_count) { 502 while (rx < budget && rxq->rx_desc_count) {
571 struct rx_desc *rx_desc; 503 struct rx_desc *rx_desc;
@@ -626,12 +558,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
626 skb->ip_summed = CHECKSUM_UNNECESSARY; 558 skb->ip_summed = CHECKSUM_UNNECESSARY;
627 skb->protocol = eth_type_trans(skb, mp->dev); 559 skb->protocol = eth_type_trans(skb, mp->dev);
628 560
629 if (skb->dev->features & NETIF_F_LRO && 561 napi_gro_receive(&mp->napi, skb);
630 skb->ip_summed == CHECKSUM_UNNECESSARY) {
631 lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts);
632 lro_flush_needed = 1;
633 } else
634 netif_receive_skb(skb);
635 562
636 continue; 563 continue;
637 564
@@ -651,9 +578,6 @@ err:
651 dev_kfree_skb(skb); 578 dev_kfree_skb(skb);
652 } 579 }
653 580
654 if (lro_flush_needed)
655 lro_flush_all(&rxq->lro_mgr);
656
657 if (rx < budget) 581 if (rx < budget)
658 mp->work_rx &= ~(1 << rxq->index); 582 mp->work_rx &= ~(1 << rxq->index);
659 583
@@ -1120,97 +1044,6 @@ out_write:
1120 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 1044 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1121} 1045}
1122 1046
1123static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
1124{
1125 struct mv643xx_eth_shared_private *msp = dev_id;
1126
1127 if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
1128 writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
1129 wake_up(&msp->smi_busy_wait);
1130 return IRQ_HANDLED;
1131 }
1132
1133 return IRQ_NONE;
1134}
1135
1136static int smi_is_done(struct mv643xx_eth_shared_private *msp)
1137{
1138 return !(readl(msp->base + SMI_REG) & SMI_BUSY);
1139}
1140
1141static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
1142{
1143 if (msp->err_interrupt == NO_IRQ) {
1144 int i;
1145
1146 for (i = 0; !smi_is_done(msp); i++) {
1147 if (i == 10)
1148 return -ETIMEDOUT;
1149 msleep(10);
1150 }
1151
1152 return 0;
1153 }
1154
1155 if (!smi_is_done(msp)) {
1156 wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
1157 msecs_to_jiffies(100));
1158 if (!smi_is_done(msp))
1159 return -ETIMEDOUT;
1160 }
1161
1162 return 0;
1163}
1164
1165static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
1166{
1167 struct mv643xx_eth_shared_private *msp = bus->priv;
1168 void __iomem *smi_reg = msp->base + SMI_REG;
1169 int ret;
1170
1171 if (smi_wait_ready(msp)) {
1172 pr_warn("SMI bus busy timeout\n");
1173 return -ETIMEDOUT;
1174 }
1175
1176 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
1177
1178 if (smi_wait_ready(msp)) {
1179 pr_warn("SMI bus busy timeout\n");
1180 return -ETIMEDOUT;
1181 }
1182
1183 ret = readl(smi_reg);
1184 if (!(ret & SMI_READ_VALID)) {
1185 pr_warn("SMI bus read not valid\n");
1186 return -ENODEV;
1187 }
1188
1189 return ret & 0xffff;
1190}
1191
1192static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1193{
1194 struct mv643xx_eth_shared_private *msp = bus->priv;
1195 void __iomem *smi_reg = msp->base + SMI_REG;
1196
1197 if (smi_wait_ready(msp)) {
1198 pr_warn("SMI bus busy timeout\n");
1199 return -ETIMEDOUT;
1200 }
1201
1202 writel(SMI_OPCODE_WRITE | (reg << 21) |
1203 (addr << 16) | (val & 0xffff), smi_reg);
1204
1205 if (smi_wait_ready(msp)) {
1206 pr_warn("SMI bus busy timeout\n");
1207 return -ETIMEDOUT;
1208 }
1209
1210 return 0;
1211}
1212
1213
1214/* statistics ***************************************************************/ 1047/* statistics ***************************************************************/
1215static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) 1048static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1216{ 1049{
@@ -1236,26 +1069,6 @@ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1236 return stats; 1069 return stats;
1237} 1070}
1238 1071
1239static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp)
1240{
1241 u32 lro_aggregated = 0;
1242 u32 lro_flushed = 0;
1243 u32 lro_no_desc = 0;
1244 int i;
1245
1246 for (i = 0; i < mp->rxq_count; i++) {
1247 struct rx_queue *rxq = mp->rxq + i;
1248
1249 lro_aggregated += rxq->lro_mgr.stats.aggregated;
1250 lro_flushed += rxq->lro_mgr.stats.flushed;
1251 lro_no_desc += rxq->lro_mgr.stats.no_desc;
1252 }
1253
1254 mp->lro_counters.lro_aggregated = lro_aggregated;
1255 mp->lro_counters.lro_flushed = lro_flushed;
1256 mp->lro_counters.lro_no_desc = lro_no_desc;
1257}
1258
1259static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) 1072static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1260{ 1073{
1261 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); 1074 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
@@ -1419,10 +1232,6 @@ struct mv643xx_eth_stats {
1419 { #m, FIELD_SIZEOF(struct mib_counters, m), \ 1232 { #m, FIELD_SIZEOF(struct mib_counters, m), \
1420 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } 1233 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
1421 1234
1422#define LROSTAT(m) \
1423 { #m, FIELD_SIZEOF(struct lro_counters, m), \
1424 -1, offsetof(struct mv643xx_eth_private, lro_counters.m) }
1425
1426static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { 1235static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
1427 SSTAT(rx_packets), 1236 SSTAT(rx_packets),
1428 SSTAT(tx_packets), 1237 SSTAT(tx_packets),
@@ -1464,9 +1273,6 @@ static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
1464 MIBSTAT(late_collision), 1273 MIBSTAT(late_collision),
1465 MIBSTAT(rx_discard), 1274 MIBSTAT(rx_discard),
1466 MIBSTAT(rx_overrun), 1275 MIBSTAT(rx_overrun),
1467 LROSTAT(lro_aggregated),
1468 LROSTAT(lro_flushed),
1469 LROSTAT(lro_no_desc),
1470}; 1276};
1471 1277
1472static int 1278static int
@@ -1523,6 +1329,34 @@ mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
1523 return 0; 1329 return 0;
1524} 1330}
1525 1331
1332static void
1333mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1334{
1335 struct mv643xx_eth_private *mp = netdev_priv(dev);
1336 wol->supported = 0;
1337 wol->wolopts = 0;
1338 if (mp->phy)
1339 phy_ethtool_get_wol(mp->phy, wol);
1340}
1341
1342static int
1343mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1344{
1345 struct mv643xx_eth_private *mp = netdev_priv(dev);
1346 int err;
1347
1348 if (mp->phy == NULL)
1349 return -EOPNOTSUPP;
1350
1351 err = phy_ethtool_set_wol(mp->phy, wol);
1352 /* Given that mv643xx_eth works without the marvell-specific PHY driver,
1353 * this debugging hint is useful to have.
1354 */
1355 if (err == -EOPNOTSUPP)
1356 netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
1357 return err;
1358}
1359
1526static int 1360static int
1527mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1361mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1528{ 1362{
@@ -1668,7 +1502,6 @@ static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1668 1502
1669 mv643xx_eth_get_stats(dev); 1503 mv643xx_eth_get_stats(dev);
1670 mib_counters_update(mp); 1504 mib_counters_update(mp);
1671 mv643xx_eth_grab_lro_stats(mp);
1672 1505
1673 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1506 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1674 const struct mv643xx_eth_stats *stat; 1507 const struct mv643xx_eth_stats *stat;
@@ -1708,6 +1541,8 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1708 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1541 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
1709 .get_sset_count = mv643xx_eth_get_sset_count, 1542 .get_sset_count = mv643xx_eth_get_sset_count,
1710 .get_ts_info = ethtool_op_get_ts_info, 1543 .get_ts_info = ethtool_op_get_ts_info,
1544 .get_wol = mv643xx_eth_get_wol,
1545 .set_wol = mv643xx_eth_set_wol,
1711}; 1546};
1712 1547
1713 1548
@@ -1939,19 +1774,6 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1939 nexti * sizeof(struct rx_desc); 1774 nexti * sizeof(struct rx_desc);
1940 } 1775 }
1941 1776
1942 rxq->lro_mgr.dev = mp->dev;
1943 memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats));
1944 rxq->lro_mgr.features = LRO_F_NAPI;
1945 rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1946 rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1947 rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr);
1948 rxq->lro_mgr.max_aggr = 32;
1949 rxq->lro_mgr.frag_align_pad = 0;
1950 rxq->lro_mgr.lro_arr = rxq->lro_arr;
1951 rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header;
1952
1953 memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr));
1954
1955 return 0; 1777 return 0;
1956 1778
1957 1779
@@ -2635,66 +2457,26 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2635 struct mv643xx_eth_shared_private *msp; 2457 struct mv643xx_eth_shared_private *msp;
2636 const struct mbus_dram_target_info *dram; 2458 const struct mbus_dram_target_info *dram;
2637 struct resource *res; 2459 struct resource *res;
2638 int ret;
2639 2460
2640 if (!mv643xx_eth_version_printed++) 2461 if (!mv643xx_eth_version_printed++)
2641 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", 2462 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
2642 mv643xx_eth_driver_version); 2463 mv643xx_eth_driver_version);
2643 2464
2644 ret = -EINVAL;
2645 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2465 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2646 if (res == NULL) 2466 if (res == NULL)
2647 goto out; 2467 return -EINVAL;
2648 2468
2649 ret = -ENOMEM; 2469 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
2650 msp = kzalloc(sizeof(*msp), GFP_KERNEL);
2651 if (msp == NULL) 2470 if (msp == NULL)
2652 goto out; 2471 return -ENOMEM;
2653 2472
2654 msp->base = ioremap(res->start, resource_size(res)); 2473 msp->base = ioremap(res->start, resource_size(res));
2655 if (msp->base == NULL) 2474 if (msp->base == NULL)
2656 goto out_free; 2475 return -ENOMEM;
2657
2658 /*
2659 * Set up and register SMI bus.
2660 */
2661 if (pd == NULL || pd->shared_smi == NULL) {
2662 msp->smi_bus = mdiobus_alloc();
2663 if (msp->smi_bus == NULL)
2664 goto out_unmap;
2665
2666 msp->smi_bus->priv = msp;
2667 msp->smi_bus->name = "mv643xx_eth smi";
2668 msp->smi_bus->read = smi_bus_read;
2669 msp->smi_bus->write = smi_bus_write,
2670 snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
2671 pdev->name, pdev->id);
2672 msp->smi_bus->parent = &pdev->dev;
2673 msp->smi_bus->phy_mask = 0xffffffff;
2674 if (mdiobus_register(msp->smi_bus) < 0)
2675 goto out_free_mii_bus;
2676 msp->smi = msp;
2677 } else {
2678 msp->smi = platform_get_drvdata(pd->shared_smi);
2679 }
2680
2681 msp->err_interrupt = NO_IRQ;
2682 init_waitqueue_head(&msp->smi_busy_wait);
2683 2476
2684 /* 2477 msp->clk = devm_clk_get(&pdev->dev, NULL);
2685 * Check whether the error interrupt is hooked up. 2478 if (!IS_ERR(msp->clk))
2686 */ 2479 clk_prepare_enable(msp->clk);
2687 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2688 if (res != NULL) {
2689 int err;
2690
2691 err = request_irq(res->start, mv643xx_eth_err_irq,
2692 IRQF_SHARED, "mv643xx_eth", msp);
2693 if (!err) {
2694 writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
2695 msp->err_interrupt = res->start;
2696 }
2697 }
2698 2480
2699 /* 2481 /*
2700 * (Re-)program MBUS remapping windows if we are asked to. 2482 * (Re-)program MBUS remapping windows if we are asked to.
@@ -2710,30 +2492,15 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2710 platform_set_drvdata(pdev, msp); 2492 platform_set_drvdata(pdev, msp);
2711 2493
2712 return 0; 2494 return 0;
2713
2714out_free_mii_bus:
2715 mdiobus_free(msp->smi_bus);
2716out_unmap:
2717 iounmap(msp->base);
2718out_free:
2719 kfree(msp);
2720out:
2721 return ret;
2722} 2495}
2723 2496
2724static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2497static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2725{ 2498{
2726 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); 2499 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2727 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2728 2500
2729 if (pd == NULL || pd->shared_smi == NULL) {
2730 mdiobus_unregister(msp->smi_bus);
2731 mdiobus_free(msp->smi_bus);
2732 }
2733 if (msp->err_interrupt != NO_IRQ)
2734 free_irq(msp->err_interrupt, msp);
2735 iounmap(msp->base); 2501 iounmap(msp->base);
2736 kfree(msp); 2502 if (!IS_ERR(msp->clk))
2503 clk_disable_unprepare(msp->clk);
2737 2504
2738 return 0; 2505 return 0;
2739} 2506}
@@ -2794,14 +2561,21 @@ static void set_params(struct mv643xx_eth_private *mp,
2794 mp->txq_count = pd->tx_queue_count ? : 1; 2561 mp->txq_count = pd->tx_queue_count ? : 1;
2795} 2562}
2796 2563
2564static void mv643xx_eth_adjust_link(struct net_device *dev)
2565{
2566 struct mv643xx_eth_private *mp = netdev_priv(dev);
2567
2568 mv643xx_adjust_pscr(mp);
2569}
2570
2797static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2571static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2798 int phy_addr) 2572 int phy_addr)
2799{ 2573{
2800 struct mii_bus *bus = mp->shared->smi->smi_bus;
2801 struct phy_device *phydev; 2574 struct phy_device *phydev;
2802 int start; 2575 int start;
2803 int num; 2576 int num;
2804 int i; 2577 int i;
2578 char phy_id[MII_BUS_ID_SIZE + 3];
2805 2579
2806 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { 2580 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
2807 start = phy_addr_get(mp) & 0x1f; 2581 start = phy_addr_get(mp) & 0x1f;
@@ -2811,17 +2585,19 @@ static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2811 num = 1; 2585 num = 1;
2812 } 2586 }
2813 2587
2814 phydev = NULL; 2588 /* Attempt to connect to the PHY using orion-mdio */
2589 phydev = ERR_PTR(-ENODEV);
2815 for (i = 0; i < num; i++) { 2590 for (i = 0; i < num; i++) {
2816 int addr = (start + i) & 0x1f; 2591 int addr = (start + i) & 0x1f;
2817 2592
2818 if (bus->phy_map[addr] == NULL) 2593 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
2819 mdiobus_scan(bus, addr); 2594 "orion-mdio-mii", addr);
2820 2595
2821 if (phydev == NULL) { 2596 phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
2822 phydev = bus->phy_map[addr]; 2597 PHY_INTERFACE_MODE_GMII);
2823 if (phydev != NULL) 2598 if (!IS_ERR(phydev)) {
2824 phy_addr_set(mp, addr); 2599 phy_addr_set(mp, addr);
2600 break;
2825 } 2601 }
2826 } 2602 }
2827 2603
@@ -2834,8 +2610,6 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
2834 2610
2835 phy_reset(mp); 2611 phy_reset(mp);
2836 2612
2837 phy_attach(mp->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_GMII);
2838
2839 if (speed == 0) { 2613 if (speed == 0) {
2840 phy->autoneg = AUTONEG_ENABLE; 2614 phy->autoneg = AUTONEG_ENABLE;
2841 phy->speed = 0; 2615 phy->speed = 0;
@@ -2932,22 +2706,27 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2932 * it to override the default. 2706 * it to override the default.
2933 */ 2707 */
2934 mp->t_clk = 133000000; 2708 mp->t_clk = 133000000;
2935#if defined(CONFIG_HAVE_CLK) 2709 mp->clk = devm_clk_get(&pdev->dev, NULL);
2936 mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
2937 if (!IS_ERR(mp->clk)) { 2710 if (!IS_ERR(mp->clk)) {
2938 clk_prepare_enable(mp->clk); 2711 clk_prepare_enable(mp->clk);
2939 mp->t_clk = clk_get_rate(mp->clk); 2712 mp->t_clk = clk_get_rate(mp->clk);
2940 } 2713 }
2941#endif 2714
2942 set_params(mp, pd); 2715 set_params(mp, pd);
2943 netif_set_real_num_tx_queues(dev, mp->txq_count); 2716 netif_set_real_num_tx_queues(dev, mp->txq_count);
2944 netif_set_real_num_rx_queues(dev, mp->rxq_count); 2717 netif_set_real_num_rx_queues(dev, mp->rxq_count);
2945 2718
2946 if (pd->phy_addr != MV643XX_ETH_PHY_NONE) 2719 if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
2947 mp->phy = phy_scan(mp, pd->phy_addr); 2720 mp->phy = phy_scan(mp, pd->phy_addr);
2948 2721
2949 if (mp->phy != NULL) 2722 if (IS_ERR(mp->phy)) {
2723 err = PTR_ERR(mp->phy);
2724 if (err == -ENODEV)
2725 err = -EPROBE_DEFER;
2726 goto out;
2727 }
2950 phy_init(mp, pd->speed, pd->duplex); 2728 phy_init(mp, pd->speed, pd->duplex);
2729 }
2951 2730
2952 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 2731 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
2953 2732
@@ -2982,8 +2761,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2982 dev->watchdog_timeo = 2 * HZ; 2761 dev->watchdog_timeo = 2 * HZ;
2983 dev->base_addr = 0; 2762 dev->base_addr = 0;
2984 2763
2985 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | 2764 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
2986 NETIF_F_RXCSUM | NETIF_F_LRO;
2987 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 2765 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
2988 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; 2766 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2989 2767
@@ -3014,12 +2792,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
3014 return 0; 2792 return 0;
3015 2793
3016out: 2794out:
3017#if defined(CONFIG_HAVE_CLK) 2795 if (!IS_ERR(mp->clk))
3018 if (!IS_ERR(mp->clk)) {
3019 clk_disable_unprepare(mp->clk); 2796 clk_disable_unprepare(mp->clk);
3020 clk_put(mp->clk);
3021 }
3022#endif
3023 free_netdev(dev); 2797 free_netdev(dev);
3024 2798
3025 return err; 2799 return err;
@@ -3034,12 +2808,8 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
3034 phy_detach(mp->phy); 2808 phy_detach(mp->phy);
3035 cancel_work_sync(&mp->tx_timeout_task); 2809 cancel_work_sync(&mp->tx_timeout_task);
3036 2810
3037#if defined(CONFIG_HAVE_CLK) 2811 if (!IS_ERR(mp->clk))
3038 if (!IS_ERR(mp->clk)) {
3039 clk_disable_unprepare(mp->clk); 2812 clk_disable_unprepare(mp->clk);
3040 clk_put(mp->clk);
3041 }
3042#endif
3043 2813
3044 free_netdev(mp->dev); 2814 free_netdev(mp->dev);
3045 2815
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 77b7c80262f4..e2f662660313 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -24,10 +24,14 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/phy.h> 26#include <linux/phy.h>
27#include <linux/of_address.h> 27#include <linux/interrupt.h>
28#include <linux/of_mdio.h>
29#include <linux/platform_device.h> 28#include <linux/platform_device.h>
30#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/io.h>
31#include <linux/clk.h>
32#include <linux/of_mdio.h>
33#include <linux/sched.h>
34#include <linux/wait.h>
31 35
32#define MVMDIO_SMI_DATA_SHIFT 0 36#define MVMDIO_SMI_DATA_SHIFT 0
33#define MVMDIO_SMI_PHY_ADDR_SHIFT 16 37#define MVMDIO_SMI_PHY_ADDR_SHIFT 16
@@ -36,33 +40,59 @@
36#define MVMDIO_SMI_WRITE_OPERATION 0 40#define MVMDIO_SMI_WRITE_OPERATION 0
37#define MVMDIO_SMI_READ_VALID BIT(27) 41#define MVMDIO_SMI_READ_VALID BIT(27)
38#define MVMDIO_SMI_BUSY BIT(28) 42#define MVMDIO_SMI_BUSY BIT(28)
43#define MVMDIO_ERR_INT_CAUSE 0x007C
44#define MVMDIO_ERR_INT_SMI_DONE 0x00000010
45#define MVMDIO_ERR_INT_MASK 0x0080
39 46
40struct orion_mdio_dev { 47struct orion_mdio_dev {
41 struct mutex lock; 48 struct mutex lock;
42 void __iomem *smireg; 49 void __iomem *regs;
50 struct clk *clk;
51 /*
52 * If we have access to the error interrupt pin (which is
53 * somewhat misnamed as it not only reflects internal errors
54 * but also reflects SMI completion), use that to wait for
55 * SMI access completion instead of polling the SMI busy bit.
56 */
57 int err_interrupt;
58 wait_queue_head_t smi_busy_wait;
43}; 59};
44 60
61static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
62{
63 return !(readl(dev->regs) & MVMDIO_SMI_BUSY);
64}
65
45/* Wait for the SMI unit to be ready for another operation 66/* Wait for the SMI unit to be ready for another operation
46 */ 67 */
47static int orion_mdio_wait_ready(struct mii_bus *bus) 68static int orion_mdio_wait_ready(struct mii_bus *bus)
48{ 69{
49 struct orion_mdio_dev *dev = bus->priv; 70 struct orion_mdio_dev *dev = bus->priv;
50 int count; 71 int count;
51 u32 val;
52 72
53 count = 0; 73 if (dev->err_interrupt <= 0) {
54 while (1) { 74 count = 0;
55 val = readl(dev->smireg); 75 while (1) {
56 if (!(val & MVMDIO_SMI_BUSY)) 76 if (orion_mdio_smi_is_done(dev))
57 break; 77 break;
58 78
59 if (count > 100) { 79 if (count > 100) {
60 dev_err(bus->parent, "Timeout: SMI busy for too long\n"); 80 dev_err(bus->parent,
61 return -ETIMEDOUT; 81 "Timeout: SMI busy for too long\n");
62 } 82 return -ETIMEDOUT;
83 }
63 84
64 udelay(10); 85 udelay(10);
65 count++; 86 count++;
87 }
88 } else {
89 if (!orion_mdio_smi_is_done(dev)) {
90 wait_event_timeout(dev->smi_busy_wait,
91 orion_mdio_smi_is_done(dev),
92 msecs_to_jiffies(100));
93 if (!orion_mdio_smi_is_done(dev))
94 return -ETIMEDOUT;
95 }
66 } 96 }
67 97
68 return 0; 98 return 0;
@@ -87,12 +117,12 @@ static int orion_mdio_read(struct mii_bus *bus, int mii_id,
87 writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) | 117 writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
88 (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | 118 (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
89 MVMDIO_SMI_READ_OPERATION), 119 MVMDIO_SMI_READ_OPERATION),
90 dev->smireg); 120 dev->regs);
91 121
92 /* Wait for the value to become available */ 122 /* Wait for the value to become available */
93 count = 0; 123 count = 0;
94 while (1) { 124 while (1) {
95 val = readl(dev->smireg); 125 val = readl(dev->regs);
96 if (val & MVMDIO_SMI_READ_VALID) 126 if (val & MVMDIO_SMI_READ_VALID)
97 break; 127 break;
98 128
@@ -129,7 +159,7 @@ static int orion_mdio_write(struct mii_bus *bus, int mii_id,
129 (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | 159 (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
130 MVMDIO_SMI_WRITE_OPERATION | 160 MVMDIO_SMI_WRITE_OPERATION |
131 (value << MVMDIO_SMI_DATA_SHIFT)), 161 (value << MVMDIO_SMI_DATA_SHIFT)),
132 dev->smireg); 162 dev->regs);
133 163
134 mutex_unlock(&dev->lock); 164 mutex_unlock(&dev->lock);
135 165
@@ -141,13 +171,34 @@ static int orion_mdio_reset(struct mii_bus *bus)
141 return 0; 171 return 0;
142} 172}
143 173
174static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
175{
176 struct orion_mdio_dev *dev = dev_id;
177
178 if (readl(dev->regs + MVMDIO_ERR_INT_CAUSE) &
179 MVMDIO_ERR_INT_SMI_DONE) {
180 writel(~MVMDIO_ERR_INT_SMI_DONE,
181 dev->regs + MVMDIO_ERR_INT_CAUSE);
182 wake_up(&dev->smi_busy_wait);
183 return IRQ_HANDLED;
184 }
185
186 return IRQ_NONE;
187}
188
144static int orion_mdio_probe(struct platform_device *pdev) 189static int orion_mdio_probe(struct platform_device *pdev)
145{ 190{
146 struct device_node *np = pdev->dev.of_node; 191 struct resource *r;
147 struct mii_bus *bus; 192 struct mii_bus *bus;
148 struct orion_mdio_dev *dev; 193 struct orion_mdio_dev *dev;
149 int i, ret; 194 int i, ret;
150 195
196 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
197 if (!r) {
198 dev_err(&pdev->dev, "No SMI register address given\n");
199 return -ENODEV;
200 }
201
151 bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev)); 202 bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev));
152 if (!bus) { 203 if (!bus) {
153 dev_err(&pdev->dev, "Cannot allocate MDIO bus\n"); 204 dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
@@ -172,36 +223,66 @@ static int orion_mdio_probe(struct platform_device *pdev)
172 bus->irq[i] = PHY_POLL; 223 bus->irq[i] = PHY_POLL;
173 224
174 dev = bus->priv; 225 dev = bus->priv;
175 dev->smireg = of_iomap(pdev->dev.of_node, 0); 226 dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
176 if (!dev->smireg) { 227 if (!dev->regs) {
177 dev_err(&pdev->dev, "No SMI register address given in DT\n"); 228 dev_err(&pdev->dev, "Unable to remap SMI register\n");
178 kfree(bus->irq); 229 ret = -ENODEV;
179 mdiobus_free(bus); 230 goto out_mdio;
180 return -ENODEV; 231 }
232
233 init_waitqueue_head(&dev->smi_busy_wait);
234
235 dev->clk = devm_clk_get(&pdev->dev, NULL);
236 if (!IS_ERR(dev->clk))
237 clk_prepare_enable(dev->clk);
238
239 dev->err_interrupt = platform_get_irq(pdev, 0);
240 if (dev->err_interrupt != -ENXIO) {
241 ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
242 orion_mdio_err_irq,
243 IRQF_SHARED, pdev->name, dev);
244 if (ret)
245 goto out_mdio;
246
247 writel(MVMDIO_ERR_INT_SMI_DONE,
248 dev->regs + MVMDIO_ERR_INT_MASK);
181 } 249 }
182 250
183 mutex_init(&dev->lock); 251 mutex_init(&dev->lock);
184 252
185 ret = of_mdiobus_register(bus, np); 253 if (pdev->dev.of_node)
254 ret = of_mdiobus_register(bus, pdev->dev.of_node);
255 else
256 ret = mdiobus_register(bus);
186 if (ret < 0) { 257 if (ret < 0) {
187 dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); 258 dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
188 iounmap(dev->smireg); 259 goto out_mdio;
189 kfree(bus->irq);
190 mdiobus_free(bus);
191 return ret;
192 } 260 }
193 261
194 platform_set_drvdata(pdev, bus); 262 platform_set_drvdata(pdev, bus);
195 263
196 return 0; 264 return 0;
265
266out_mdio:
267 if (!IS_ERR(dev->clk))
268 clk_disable_unprepare(dev->clk);
269 kfree(bus->irq);
270 mdiobus_free(bus);
271 return ret;
197} 272}
198 273
199static int orion_mdio_remove(struct platform_device *pdev) 274static int orion_mdio_remove(struct platform_device *pdev)
200{ 275{
201 struct mii_bus *bus = platform_get_drvdata(pdev); 276 struct mii_bus *bus = platform_get_drvdata(pdev);
277 struct orion_mdio_dev *dev = bus->priv;
278
279 writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
202 mdiobus_unregister(bus); 280 mdiobus_unregister(bus);
203 kfree(bus->irq); 281 kfree(bus->irq);
204 mdiobus_free(bus); 282 mdiobus_free(bus);
283 if (!IS_ERR(dev->clk))
284 clk_disable_unprepare(dev->clk);
285
205 return 0; 286 return 0;
206} 287}
207 288
@@ -225,3 +306,4 @@ module_platform_driver(orion_mdio_driver);
225MODULE_DESCRIPTION("Marvell MDIO interface driver"); 306MODULE_DESCRIPTION("Marvell MDIO interface driver");
226MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); 307MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
227MODULE_LICENSE("GPL"); 308MODULE_LICENSE("GPL");
309MODULE_ALIAS("platform:orion-mdio");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a47a097c21e1..c96678555233 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1969,13 +1969,8 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
1969 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, 1969 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
1970 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 1970 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
1971 &rxq->descs_phys, GFP_KERNEL); 1971 &rxq->descs_phys, GFP_KERNEL);
1972 if (rxq->descs == NULL) { 1972 if (rxq->descs == NULL)
1973 netdev_err(pp->dev,
1974 "rxq=%d: Can't allocate %d bytes for %d RX descr\n",
1975 rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
1976 rxq->size);
1977 return -ENOMEM; 1973 return -ENOMEM;
1978 }
1979 1974
1980 BUG_ON(rxq->descs != 1975 BUG_ON(rxq->descs !=
1981 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); 1976 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
@@ -2029,13 +2024,8 @@ static int mvneta_txq_init(struct mvneta_port *pp,
2029 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, 2024 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2030 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2025 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2031 &txq->descs_phys, GFP_KERNEL); 2026 &txq->descs_phys, GFP_KERNEL);
2032 if (txq->descs == NULL) { 2027 if (txq->descs == NULL)
2033 netdev_err(pp->dev,
2034 "txQ=%d: Can't allocate %d bytes for %d TX descr\n",
2035 txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
2036 txq->size);
2037 return -ENOMEM; 2028 return -ENOMEM;
2038 }
2039 2029
2040 /* Make sure descriptor address is cache line size aligned */ 2030 /* Make sure descriptor address is cache line size aligned */
2041 BUG_ON(txq->descs != 2031 BUG_ON(txq->descs !=
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 037ed866c22f..339bb323cb0c 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -584,12 +584,14 @@ static int init_hash_table(struct pxa168_eth_private *pep)
584 */ 584 */
585 if (pep->htpr == NULL) { 585 if (pep->htpr == NULL) {
586 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, 586 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
587 HASH_ADDR_TABLE_SIZE, 587 HASH_ADDR_TABLE_SIZE,
588 &pep->htpr_dma, GFP_KERNEL); 588 &pep->htpr_dma,
589 GFP_KERNEL | __GFP_ZERO);
589 if (pep->htpr == NULL) 590 if (pep->htpr == NULL)
590 return -ENOMEM; 591 return -ENOMEM;
592 } else {
593 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
591 } 594 }
592 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
593 wrl(pep, HTPR, pep->htpr_dma); 595 wrl(pep, HTPR, pep->htpr_dma);
594 return 0; 596 return 0;
595} 597}
@@ -1023,13 +1025,11 @@ static int rxq_init(struct net_device *dev)
1023 size = pep->rx_ring_size * sizeof(struct rx_desc); 1025 size = pep->rx_ring_size * sizeof(struct rx_desc);
1024 pep->rx_desc_area_size = size; 1026 pep->rx_desc_area_size = size;
1025 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, 1027 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1026 &pep->rx_desc_dma, GFP_KERNEL); 1028 &pep->rx_desc_dma,
1027 if (!pep->p_rx_desc_area) { 1029 GFP_KERNEL | __GFP_ZERO);
1028 printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n", 1030 if (!pep->p_rx_desc_area)
1029 dev->name, size);
1030 goto out; 1031 goto out;
1031 } 1032
1032 memset((void *)pep->p_rx_desc_area, 0, size);
1033 /* initialize the next_desc_ptr links in the Rx descriptors ring */ 1033 /* initialize the next_desc_ptr links in the Rx descriptors ring */
1034 p_rx_desc = pep->p_rx_desc_area; 1034 p_rx_desc = pep->p_rx_desc_area;
1035 for (i = 0; i < rx_desc_num; i++) { 1035 for (i = 0; i < rx_desc_num; i++) {
@@ -1086,13 +1086,10 @@ static int txq_init(struct net_device *dev)
1086 size = pep->tx_ring_size * sizeof(struct tx_desc); 1086 size = pep->tx_ring_size * sizeof(struct tx_desc);
1087 pep->tx_desc_area_size = size; 1087 pep->tx_desc_area_size = size;
1088 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, 1088 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1089 &pep->tx_desc_dma, GFP_KERNEL); 1089 &pep->tx_desc_dma,
1090 if (!pep->p_tx_desc_area) { 1090 GFP_KERNEL | __GFP_ZERO);
1091 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", 1091 if (!pep->p_tx_desc_area)
1092 dev->name, size);
1093 goto out; 1092 goto out;
1094 }
1095 memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
1096 /* Initialize the next_desc_ptr links in the Tx descriptors ring */ 1093 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
1097 p_tx_desc = pep->p_tx_desc_area; 1094 p_tx_desc = pep->p_tx_desc_area;
1098 for (i = 0; i < tx_desc_num; i++) { 1095 for (i = 0; i < tx_desc_num; i++) {
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 6a0e671fcecd..256ae789c143 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1421,14 +1421,14 @@ static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features)
1421 struct sky2_hw *hw = sky2->hw; 1421 struct sky2_hw *hw = sky2->hw;
1422 u16 port = sky2->port; 1422 u16 port = sky2->port;
1423 1423
1424 if (features & NETIF_F_HW_VLAN_RX) 1424 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1425 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1425 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1426 RX_VLAN_STRIP_ON); 1426 RX_VLAN_STRIP_ON);
1427 else 1427 else
1428 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1428 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1429 RX_VLAN_STRIP_OFF); 1429 RX_VLAN_STRIP_OFF);
1430 1430
1431 if (features & NETIF_F_HW_VLAN_TX) { 1431 if (features & NETIF_F_HW_VLAN_CTAG_TX) {
1432 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1432 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1433 TX_VLAN_TAG_ON); 1433 TX_VLAN_TAG_ON);
1434 1434
@@ -2713,7 +2713,7 @@ static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
2713 struct sk_buff *skb; 2713 struct sk_buff *skb;
2714 2714
2715 skb = sky2->rx_ring[sky2->rx_next].skb; 2715 skb = sky2->rx_ring[sky2->rx_next].skb;
2716 __vlan_hwaccel_put_tag(skb, be16_to_cpu(length)); 2716 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(length));
2717} 2717}
2718 2718
2719static void sky2_rx_hash(struct sky2_port *sky2, u32 status) 2719static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
@@ -4406,7 +4406,7 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
4406 if (changed & NETIF_F_RXHASH) 4406 if (changed & NETIF_F_RXHASH)
4407 rx_set_rss(dev, features); 4407 rx_set_rss(dev, features);
4408 4408
4409 if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) 4409 if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
4410 sky2_vlan_mode(dev, features); 4410 sky2_vlan_mode(dev, features);
4411 4411
4412 return 0; 4412 return 0;
@@ -4793,7 +4793,8 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
4793 dev->hw_features |= NETIF_F_RXHASH; 4793 dev->hw_features |= NETIF_F_RXHASH;
4794 4794
4795 if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) { 4795 if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) {
4796 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 4796 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
4797 NETIF_F_HW_VLAN_CTAG_RX;
4797 dev->vlan_features |= SKY2_VLAN_OFFLOADS; 4798 dev->vlan_features |= SKY2_VLAN_OFFLOADS;
4798 } 4799 }
4799 4800
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index 293127d28b33..3e9c70f15b42 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -6,5 +6,5 @@ mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
6obj-$(CONFIG_MLX4_EN) += mlx4_en.o 6obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7 7
8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \ 8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
9 en_resources.o en_netdev.o en_selftest.o 9 en_resources.o en_netdev.o en_selftest.o en_clock.o
10mlx4_en-$(CONFIG_MLX4_EN_DCB) += en_dcb_nl.o 10mlx4_en-$(CONFIG_MLX4_EN_DCB) += en_dcb_nl.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index fdc5f23d8e9f..1df56cc50ee9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1490,6 +1490,69 @@ out:
1490 return ret; 1490 return ret;
1491} 1491}
1492 1492
1493static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1494{
1495 int port, err;
1496 struct mlx4_vport_state *vp_admin;
1497 struct mlx4_vport_oper_state *vp_oper;
1498
1499 for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1500 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1501 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1502 vp_oper->state = *vp_admin;
1503 if (MLX4_VGT != vp_admin->default_vlan) {
1504 err = __mlx4_register_vlan(&priv->dev, port,
1505 vp_admin->default_vlan, &(vp_oper->vlan_idx));
1506 if (err) {
1507 vp_oper->vlan_idx = NO_INDX;
1508 mlx4_warn((&priv->dev),
1509 "No vlan resorces slave %d, port %d\n",
1510 slave, port);
1511 return err;
1512 }
1513 mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n",
1514 (int)(vp_oper->state.default_vlan),
1515 vp_oper->vlan_idx, slave, port);
1516 }
1517 if (vp_admin->spoofchk) {
1518 vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
1519 port,
1520 vp_admin->mac);
1521 if (0 > vp_oper->mac_idx) {
1522 err = vp_oper->mac_idx;
1523 vp_oper->mac_idx = NO_INDX;
1524 mlx4_warn((&priv->dev),
1525 "No mac resorces slave %d, port %d\n",
1526 slave, port);
1527 return err;
1528 }
1529 mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n",
1530 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1531 }
1532 }
1533 return 0;
1534}
1535
1536static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
1537{
1538 int port;
1539 struct mlx4_vport_oper_state *vp_oper;
1540
1541 for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1542 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1543 if (NO_INDX != vp_oper->vlan_idx) {
1544 __mlx4_unregister_vlan(&priv->dev,
1545 port, vp_oper->vlan_idx);
1546 vp_oper->vlan_idx = NO_INDX;
1547 }
1548 if (NO_INDX != vp_oper->mac_idx) {
1549 __mlx4_unregister_mac(&priv->dev, port, vp_oper->mac_idx);
1550 vp_oper->mac_idx = NO_INDX;
1551 }
1552 }
1553 return;
1554}
1555
1493static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, 1556static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1494 u16 param, u8 toggle) 1557 u16 param, u8 toggle)
1495{ 1558{
@@ -1510,6 +1573,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1510 if (cmd == MLX4_COMM_CMD_RESET) { 1573 if (cmd == MLX4_COMM_CMD_RESET) {
1511 mlx4_warn(dev, "Received reset from slave:%d\n", slave); 1574 mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1512 slave_state[slave].active = false; 1575 slave_state[slave].active = false;
1576 mlx4_master_deactivate_admin_state(priv, slave);
1513 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) { 1577 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
1514 slave_state[slave].event_eq[i].eqn = -1; 1578 slave_state[slave].event_eq[i].eqn = -1;
1515 slave_state[slave].event_eq[i].token = 0; 1579 slave_state[slave].event_eq[i].token = 0;
@@ -1556,6 +1620,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1556 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2) 1620 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
1557 goto reset_slave; 1621 goto reset_slave;
1558 slave_state[slave].vhcr_dma |= param; 1622 slave_state[slave].vhcr_dma |= param;
1623 if (mlx4_master_activate_admin_state(priv, slave))
1624 goto reset_slave;
1559 slave_state[slave].active = true; 1625 slave_state[slave].active = true;
1560 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave); 1626 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
1561 break; 1627 break;
@@ -1732,6 +1798,18 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1732 if (!priv->mfunc.master.slave_state) 1798 if (!priv->mfunc.master.slave_state)
1733 goto err_comm; 1799 goto err_comm;
1734 1800
1801 priv->mfunc.master.vf_admin =
1802 kzalloc(dev->num_slaves *
1803 sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
1804 if (!priv->mfunc.master.vf_admin)
1805 goto err_comm_admin;
1806
1807 priv->mfunc.master.vf_oper =
1808 kzalloc(dev->num_slaves *
1809 sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
1810 if (!priv->mfunc.master.vf_oper)
1811 goto err_comm_oper;
1812
1735 for (i = 0; i < dev->num_slaves; ++i) { 1813 for (i = 0; i < dev->num_slaves; ++i) {
1736 s_state = &priv->mfunc.master.slave_state[i]; 1814 s_state = &priv->mfunc.master.slave_state[i];
1737 s_state->last_cmd = MLX4_COMM_CMD_RESET; 1815 s_state->last_cmd = MLX4_COMM_CMD_RESET;
@@ -1752,6 +1830,10 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1752 goto err_slaves; 1830 goto err_slaves;
1753 } 1831 }
1754 INIT_LIST_HEAD(&s_state->mcast_filters[port]); 1832 INIT_LIST_HEAD(&s_state->mcast_filters[port]);
1833 priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
1834 priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
1835 priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
1836 priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
1755 } 1837 }
1756 spin_lock_init(&s_state->lock); 1838 spin_lock_init(&s_state->lock);
1757 } 1839 }
@@ -1800,6 +1882,10 @@ err_slaves:
1800 for (port = 1; port <= MLX4_MAX_PORTS; port++) 1882 for (port = 1; port <= MLX4_MAX_PORTS; port++)
1801 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); 1883 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
1802 } 1884 }
1885 kfree(priv->mfunc.master.vf_oper);
1886err_comm_oper:
1887 kfree(priv->mfunc.master.vf_admin);
1888err_comm_admin:
1803 kfree(priv->mfunc.master.slave_state); 1889 kfree(priv->mfunc.master.slave_state);
1804err_comm: 1890err_comm:
1805 iounmap(priv->mfunc.comm); 1891 iounmap(priv->mfunc.comm);
@@ -1837,10 +1923,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
1837 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, 1923 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
1838 &priv->mfunc.vhcr_dma, 1924 &priv->mfunc.vhcr_dma,
1839 GFP_KERNEL); 1925 GFP_KERNEL);
1840 if (!priv->mfunc.vhcr) { 1926 if (!priv->mfunc.vhcr)
1841 mlx4_err(dev, "Couldn't allocate VHCR.\n");
1842 goto err_hcr; 1927 goto err_hcr;
1843 }
1844 } 1928 }
1845 1929
1846 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, 1930 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
@@ -1876,6 +1960,8 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
1876 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); 1960 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
1877 } 1961 }
1878 kfree(priv->mfunc.master.slave_state); 1962 kfree(priv->mfunc.master.slave_state);
1963 kfree(priv->mfunc.master.vf_admin);
1964 kfree(priv->mfunc.master.vf_oper);
1879 } 1965 }
1880 1966
1881 iounmap(priv->mfunc.comm); 1967 iounmap(priv->mfunc.comm);
@@ -1986,3 +2072,115 @@ u32 mlx4_comm_get_version(void)
1986{ 2072{
1987 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER; 2073 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
1988} 2074}
2075
2076static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2077{
2078 if ((vf < 0) || (vf >= dev->num_vfs)) {
2079 mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs);
2080 return -EINVAL;
2081 }
2082
2083 return vf+1;
2084}
2085
2086int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2087{
2088 struct mlx4_priv *priv = mlx4_priv(dev);
2089 struct mlx4_vport_state *s_info;
2090 int slave;
2091
2092 if (!mlx4_is_master(dev))
2093 return -EPROTONOSUPPORT;
2094
2095 slave = mlx4_get_slave_indx(dev, vf);
2096 if (slave < 0)
2097 return -EINVAL;
2098
2099 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2100 s_info->mac = mac;
2101 mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
2102 vf, port, s_info->mac);
2103 return 0;
2104}
2105EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2106
2107int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2108{
2109 struct mlx4_priv *priv = mlx4_priv(dev);
2110 struct mlx4_vport_state *s_info;
2111 int slave;
2112
2113 if ((!mlx4_is_master(dev)) ||
2114 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2115 return -EPROTONOSUPPORT;
2116
2117 if ((vlan > 4095) || (qos > 7))
2118 return -EINVAL;
2119
2120 slave = mlx4_get_slave_indx(dev, vf);
2121 if (slave < 0)
2122 return -EINVAL;
2123
2124 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2125 if ((0 == vlan) && (0 == qos))
2126 s_info->default_vlan = MLX4_VGT;
2127 else
2128 s_info->default_vlan = vlan;
2129 s_info->default_qos = qos;
2130 return 0;
2131}
2132EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2133
2134int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2135{
2136 struct mlx4_priv *priv = mlx4_priv(dev);
2137 struct mlx4_vport_state *s_info;
2138 int slave;
2139
2140 if ((!mlx4_is_master(dev)) ||
2141 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
2142 return -EPROTONOSUPPORT;
2143
2144 slave = mlx4_get_slave_indx(dev, vf);
2145 if (slave < 0)
2146 return -EINVAL;
2147
2148 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2149 s_info->spoofchk = setting;
2150
2151 return 0;
2152}
2153EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
2154
2155int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
2156{
2157 struct mlx4_priv *priv = mlx4_priv(dev);
2158 struct mlx4_vport_state *s_info;
2159 int slave;
2160
2161 if (!mlx4_is_master(dev))
2162 return -EPROTONOSUPPORT;
2163
2164 slave = mlx4_get_slave_indx(dev, vf);
2165 if (slave < 0)
2166 return -EINVAL;
2167
2168 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2169 ivf->vf = vf;
2170
2171 /* need to convert it to a func */
2172 ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
2173 ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
2174 ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
2175 ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
2176 ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
2177 ivf->mac[5] = ((s_info->mac) & 0xff);
2178
2179 ivf->vlan = s_info->default_vlan;
2180 ivf->qos = s_info->default_qos;
2181 ivf->tx_rate = s_info->tx_rate;
2182 ivf->spoofchk = s_info->spoofchk;
2183
2184 return 0;
2185}
2186EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 0706623cfb96..004e4231af67 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -240,9 +240,10 @@ static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
240 __mlx4_cq_free_icm(dev, cqn); 240 __mlx4_cq_free_icm(dev, cqn);
241} 241}
242 242
243int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, 243int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
244 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, 244 struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
245 unsigned vector, int collapsed) 245 struct mlx4_cq *cq, unsigned vector, int collapsed,
246 int timestamp_en)
246{ 247{
247 struct mlx4_priv *priv = mlx4_priv(dev); 248 struct mlx4_priv *priv = mlx4_priv(dev);
248 struct mlx4_cq_table *cq_table = &priv->cq_table; 249 struct mlx4_cq_table *cq_table = &priv->cq_table;
@@ -276,6 +277,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
276 memset(cq_context, 0, sizeof *cq_context); 277 memset(cq_context, 0, sizeof *cq_context);
277 278
278 cq_context->flags = cpu_to_be32(!!collapsed << 18); 279 cq_context->flags = cpu_to_be32(!!collapsed << 18);
280 if (timestamp_en)
281 cq_context->flags |= cpu_to_be32(1 << 19);
282
279 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); 283 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
280 cq_context->comp_eqn = priv->eq_table.eq[vector].eqn; 284 cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
281 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; 285 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
new file mode 100644
index 000000000000..fd6441071319
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -0,0 +1,151 @@
1/*
2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx4/device.h>
35
36#include "mlx4_en.h"
37
38int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter)
39{
40 struct mlx4_en_priv *priv = netdev_priv(dev);
41 struct mlx4_en_dev *mdev = priv->mdev;
42 int port_up = 0;
43 int err = 0;
44
45 mutex_lock(&mdev->state_lock);
46 if (priv->port_up) {
47 port_up = 1;
48 mlx4_en_stop_port(dev, 1);
49 }
50
51 mlx4_en_free_resources(priv);
52
53 en_warn(priv, "Changing Time Stamp configuration\n");
54
55 priv->hwtstamp_config.tx_type = tx_type;
56 priv->hwtstamp_config.rx_filter = rx_filter;
57
58 if (rx_filter != HWTSTAMP_FILTER_NONE)
59 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
60 else
61 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
62
63 err = mlx4_en_alloc_resources(priv);
64 if (err) {
65 en_err(priv, "Failed reallocating port resources\n");
66 goto out;
67 }
68 if (port_up) {
69 err = mlx4_en_start_port(dev);
70 if (err)
71 en_err(priv, "Failed starting port\n");
72 }
73
74out:
75 mutex_unlock(&mdev->state_lock);
76 netdev_features_change(dev);
77 return err;
78}
79
80/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
81 */
82static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc)
83{
84 struct mlx4_en_dev *mdev =
85 container_of(tc, struct mlx4_en_dev, cycles);
86 struct mlx4_dev *dev = mdev->dev;
87
88 return mlx4_read_clock(dev) & tc->mask;
89}
90
91u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe)
92{
93 u64 hi, lo;
94 struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe;
95
96 lo = (u64)be16_to_cpu(ts_cqe->timestamp_lo);
97 hi = ((u64)be32_to_cpu(ts_cqe->timestamp_hi) + !lo) << 16;
98
99 return hi | lo;
100}
101
102void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
103 struct skb_shared_hwtstamps *hwts,
104 u64 timestamp)
105{
106 u64 nsec;
107
108 nsec = timecounter_cyc2time(&mdev->clock, timestamp);
109
110 memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
111 hwts->hwtstamp = ns_to_ktime(nsec);
112}
113
114void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
115{
116 struct mlx4_dev *dev = mdev->dev;
117 u64 ns;
118
119 memset(&mdev->cycles, 0, sizeof(mdev->cycles));
120 mdev->cycles.read = mlx4_en_read_clock;
121 mdev->cycles.mask = CLOCKSOURCE_MASK(48);
122 /* Using shift to make calculation more accurate. Since current HW
123 * clock frequency is 427 MHz, and cycles are given using a 48 bits
124 * register, the biggest shift when calculating using u64, is 14
125 * (max_cycles * multiplier < 2^64)
126 */
127 mdev->cycles.shift = 14;
128 mdev->cycles.mult =
129 clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
130
131 timecounter_init(&mdev->clock, &mdev->cycles,
132 ktime_to_ns(ktime_get_real()));
133
134 /* Calculate period in seconds to call the overflow watchdog - to make
135 * sure counter is checked at least once every wrap around.
136 */
137 ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask);
138 do_div(ns, NSEC_PER_SEC / 2 / HZ);
139 mdev->overflow_period = ns;
140}
141
142void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
143{
144 bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
145 mdev->overflow_period);
146
147 if (timeout) {
148 timecounter_read(&mdev->clock);
149 mdev->last_overflow_check = jiffies;
150 }
151}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index b8d0854a7ad1..1e6c594d6d04 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -77,6 +77,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
77 struct mlx4_en_dev *mdev = priv->mdev; 77 struct mlx4_en_dev *mdev = priv->mdev;
78 int err = 0; 78 int err = 0;
79 char name[25]; 79 char name[25];
80 int timestamp_en = 0;
80 struct cpu_rmap *rmap = 81 struct cpu_rmap *rmap =
81#ifdef CONFIG_RFS_ACCEL 82#ifdef CONFIG_RFS_ACCEL
82 priv->dev->rx_cpu_rmap; 83 priv->dev->rx_cpu_rmap;
@@ -123,8 +124,13 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
123 if (!cq->is_tx) 124 if (!cq->is_tx)
124 cq->size = priv->rx_ring[cq->ring].actual_size; 125 cq->size = priv->rx_ring[cq->ring].actual_size;
125 126
126 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, 127 if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
127 cq->wqres.db.dma, &cq->mcq, cq->vector, 0); 128 (!cq->is_tx && priv->hwtstamp_config.rx_filter))
129 timestamp_en = 1;
130
131 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
132 &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
133 cq->vector, 0, timestamp_en);
128 if (err) 134 if (err)
129 return err; 135 return err;
130 136
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index b799ab12a291..0f91222ea3d7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -186,7 +186,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
186 186
187static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev) 187static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
188{ 188{
189 return DCB_CAP_DCBX_VER_IEEE; 189 return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
190} 190}
191 191
192static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) 192static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -253,3 +253,11 @@ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
253 .getdcbx = mlx4_en_dcbnl_getdcbx, 253 .getdcbx = mlx4_en_dcbnl_getdcbx,
254 .setdcbx = mlx4_en_dcbnl_setdcbx, 254 .setdcbx = mlx4_en_dcbnl_setdcbx,
255}; 255};
256
257const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
258 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
259 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
260
261 .getdcbx = mlx4_en_dcbnl_getdcbx,
262 .setdcbx = mlx4_en_dcbnl_setdcbx,
263};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 00f25b5f297f..bcf4d118e98c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1147,6 +1147,35 @@ out:
1147 return err; 1147 return err;
1148} 1148}
1149 1149
1150static int mlx4_en_get_ts_info(struct net_device *dev,
1151 struct ethtool_ts_info *info)
1152{
1153 struct mlx4_en_priv *priv = netdev_priv(dev);
1154 struct mlx4_en_dev *mdev = priv->mdev;
1155 int ret;
1156
1157 ret = ethtool_op_get_ts_info(dev, info);
1158 if (ret)
1159 return ret;
1160
1161 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
1162 info->so_timestamping |=
1163 SOF_TIMESTAMPING_TX_HARDWARE |
1164 SOF_TIMESTAMPING_RX_HARDWARE |
1165 SOF_TIMESTAMPING_RAW_HARDWARE;
1166
1167 info->tx_types =
1168 (1 << HWTSTAMP_TX_OFF) |
1169 (1 << HWTSTAMP_TX_ON);
1170
1171 info->rx_filters =
1172 (1 << HWTSTAMP_FILTER_NONE) |
1173 (1 << HWTSTAMP_FILTER_ALL);
1174 }
1175
1176 return ret;
1177}
1178
1150const struct ethtool_ops mlx4_en_ethtool_ops = { 1179const struct ethtool_ops mlx4_en_ethtool_ops = {
1151 .get_drvinfo = mlx4_en_get_drvinfo, 1180 .get_drvinfo = mlx4_en_get_drvinfo,
1152 .get_settings = mlx4_en_get_settings, 1181 .get_settings = mlx4_en_get_settings,
@@ -1173,6 +1202,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
1173 .set_rxfh_indir = mlx4_en_set_rxfh_indir, 1202 .set_rxfh_indir = mlx4_en_set_rxfh_indir,
1174 .get_channels = mlx4_en_get_channels, 1203 .get_channels = mlx4_en_get_channels,
1175 .set_channels = mlx4_en_set_channels, 1204 .set_channels = mlx4_en_set_channels,
1205 .get_ts_info = mlx4_en_get_ts_info,
1176}; 1206};
1177 1207
1178 1208
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index fc27800e9c38..a5c9df07a7d0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -300,6 +300,11 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
300 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) 300 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
301 mdev->pndev[i] = NULL; 301 mdev->pndev[i] = NULL;
302 } 302 }
303
304 /* Initialize time stamp mechanism */
305 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
306 mlx4_en_init_timestamp(mdev);
307
303 return mdev; 308 return mdev;
304 309
305err_mr: 310err_mr:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 30d78f806dc3..a69a908614e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -356,7 +356,8 @@ static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
356} 356}
357#endif 357#endif
358 358
359static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 359static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
360 __be16 proto, u16 vid)
360{ 361{
361 struct mlx4_en_priv *priv = netdev_priv(dev); 362 struct mlx4_en_priv *priv = netdev_priv(dev);
362 struct mlx4_en_dev *mdev = priv->mdev; 363 struct mlx4_en_dev *mdev = priv->mdev;
@@ -381,7 +382,8 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
381 return 0; 382 return 0;
382} 383}
383 384
384static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 385static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
386 __be16 proto, u16 vid)
385{ 387{
386 struct mlx4_en_priv *priv = netdev_priv(dev); 388 struct mlx4_en_priv *priv = netdev_priv(dev);
387 struct mlx4_en_dev *mdev = priv->mdev; 389 struct mlx4_en_dev *mdev = priv->mdev;
@@ -1359,6 +1361,27 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
1359 mutex_unlock(&mdev->state_lock); 1361 mutex_unlock(&mdev->state_lock);
1360} 1362}
1361 1363
1364/* mlx4_en_service_task - Run service task for tasks that needed to be done
1365 * periodically
1366 */
1367static void mlx4_en_service_task(struct work_struct *work)
1368{
1369 struct delayed_work *delay = to_delayed_work(work);
1370 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1371 service_task);
1372 struct mlx4_en_dev *mdev = priv->mdev;
1373
1374 mutex_lock(&mdev->state_lock);
1375 if (mdev->device_up) {
1376 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1377 mlx4_en_ptp_overflow_check(mdev);
1378
1379 queue_delayed_work(mdev->workqueue, &priv->service_task,
1380 SERVICE_TASK_DELAY);
1381 }
1382 mutex_unlock(&mdev->state_lock);
1383}
1384
1362static void mlx4_en_linkstate(struct work_struct *work) 1385static void mlx4_en_linkstate(struct work_struct *work)
1363{ 1386{
1364 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1387 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
@@ -1863,6 +1886,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
1863 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 1886 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
1864 1887
1865 cancel_delayed_work(&priv->stats_task); 1888 cancel_delayed_work(&priv->stats_task);
1889 cancel_delayed_work(&priv->service_task);
1866 /* flush any pending task for this netdev */ 1890 /* flush any pending task for this netdev */
1867 flush_workqueue(mdev->workqueue); 1891 flush_workqueue(mdev->workqueue);
1868 1892
@@ -1914,6 +1938,75 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
1914 return 0; 1938 return 0;
1915} 1939}
1916 1940
1941static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
1942{
1943 struct mlx4_en_priv *priv = netdev_priv(dev);
1944 struct mlx4_en_dev *mdev = priv->mdev;
1945 struct hwtstamp_config config;
1946
1947 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1948 return -EFAULT;
1949
1950 /* reserved for future extensions */
1951 if (config.flags)
1952 return -EINVAL;
1953
1954 /* device doesn't support time stamping */
1955 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
1956 return -EINVAL;
1957
1958 /* TX HW timestamp */
1959 switch (config.tx_type) {
1960 case HWTSTAMP_TX_OFF:
1961 case HWTSTAMP_TX_ON:
1962 break;
1963 default:
1964 return -ERANGE;
1965 }
1966
1967 /* RX HW timestamp */
1968 switch (config.rx_filter) {
1969 case HWTSTAMP_FILTER_NONE:
1970 break;
1971 case HWTSTAMP_FILTER_ALL:
1972 case HWTSTAMP_FILTER_SOME:
1973 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1974 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1975 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1976 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1977 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1978 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1979 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1980 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1981 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1982 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1983 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1984 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1985 config.rx_filter = HWTSTAMP_FILTER_ALL;
1986 break;
1987 default:
1988 return -ERANGE;
1989 }
1990
1991 if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) {
1992 config.tx_type = HWTSTAMP_TX_OFF;
1993 config.rx_filter = HWTSTAMP_FILTER_NONE;
1994 }
1995
1996 return copy_to_user(ifr->ifr_data, &config,
1997 sizeof(config)) ? -EFAULT : 0;
1998}
1999
2000static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2001{
2002 switch (cmd) {
2003 case SIOCSHWTSTAMP:
2004 return mlx4_en_hwtstamp_ioctl(dev, ifr);
2005 default:
2006 return -EOPNOTSUPP;
2007 }
2008}
2009
1917static int mlx4_en_set_features(struct net_device *netdev, 2010static int mlx4_en_set_features(struct net_device *netdev,
1918 netdev_features_t features) 2011 netdev_features_t features)
1919{ 2012{
@@ -1931,77 +2024,40 @@ static int mlx4_en_set_features(struct net_device *netdev,
1931 2024
1932} 2025}
1933 2026
1934static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 2027static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
1935 struct net_device *dev,
1936 const unsigned char *addr, u16 flags)
1937{ 2028{
1938 struct mlx4_en_priv *priv = netdev_priv(dev); 2029 struct mlx4_en_priv *en_priv = netdev_priv(dev);
1939 struct mlx4_dev *mdev = priv->mdev->dev; 2030 struct mlx4_en_dev *mdev = en_priv->mdev;
1940 int err; 2031 u64 mac_u64 = mlx4_en_mac_to_u64(mac);
1941
1942 if (!mlx4_is_mfunc(mdev))
1943 return -EOPNOTSUPP;
1944 2032
1945 /* Hardware does not support aging addresses, allow only 2033 if (!is_valid_ether_addr(mac))
1946 * permanent addresses if ndm_state is given
1947 */
1948 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
1949 en_info(priv, "Add FDB only supports static addresses\n");
1950 return -EINVAL; 2034 return -EINVAL;
1951 }
1952 2035
1953 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 2036 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
1954 err = dev_uc_add_excl(dev, addr);
1955 else if (is_multicast_ether_addr(addr))
1956 err = dev_mc_add_excl(dev, addr);
1957 else
1958 err = -EINVAL;
1959
1960 /* Only return duplicate errors if NLM_F_EXCL is set */
1961 if (err == -EEXIST && !(flags & NLM_F_EXCL))
1962 err = 0;
1963
1964 return err;
1965} 2037}
1966 2038
1967static int mlx4_en_fdb_del(struct ndmsg *ndm, 2039static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
1968 struct nlattr *tb[],
1969 struct net_device *dev,
1970 const unsigned char *addr)
1971{ 2040{
1972 struct mlx4_en_priv *priv = netdev_priv(dev); 2041 struct mlx4_en_priv *en_priv = netdev_priv(dev);
1973 struct mlx4_dev *mdev = priv->mdev->dev; 2042 struct mlx4_en_dev *mdev = en_priv->mdev;
1974 int err;
1975
1976 if (!mlx4_is_mfunc(mdev))
1977 return -EOPNOTSUPP;
1978 2043
1979 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 2044 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
1980 en_info(priv, "Del FDB only supports static addresses\n"); 2045}
1981 return -EINVAL;
1982 }
1983 2046
1984 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 2047static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
1985 err = dev_uc_del(dev, addr); 2048{
1986 else if (is_multicast_ether_addr(addr)) 2049 struct mlx4_en_priv *en_priv = netdev_priv(dev);
1987 err = dev_mc_del(dev, addr); 2050 struct mlx4_en_dev *mdev = en_priv->mdev;
1988 else
1989 err = -EINVAL;
1990 2051
1991 return err; 2052 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
1992} 2053}
1993 2054
1994static int mlx4_en_fdb_dump(struct sk_buff *skb, 2055static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
1995 struct netlink_callback *cb,
1996 struct net_device *dev, int idx)
1997{ 2056{
1998 struct mlx4_en_priv *priv = netdev_priv(dev); 2057 struct mlx4_en_priv *en_priv = netdev_priv(dev);
1999 struct mlx4_dev *mdev = priv->mdev->dev; 2058 struct mlx4_en_dev *mdev = en_priv->mdev;
2000
2001 if (mlx4_is_mfunc(mdev))
2002 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
2003 2059
2004 return idx; 2060 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2005} 2061}
2006 2062
2007static const struct net_device_ops mlx4_netdev_ops = { 2063static const struct net_device_ops mlx4_netdev_ops = {
@@ -2014,6 +2070,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
2014 .ndo_set_mac_address = mlx4_en_set_mac, 2070 .ndo_set_mac_address = mlx4_en_set_mac,
2015 .ndo_validate_addr = eth_validate_addr, 2071 .ndo_validate_addr = eth_validate_addr,
2016 .ndo_change_mtu = mlx4_en_change_mtu, 2072 .ndo_change_mtu = mlx4_en_change_mtu,
2073 .ndo_do_ioctl = mlx4_en_ioctl,
2017 .ndo_tx_timeout = mlx4_en_tx_timeout, 2074 .ndo_tx_timeout = mlx4_en_tx_timeout,
2018 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2075 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2019 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2076 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
@@ -2025,9 +2082,33 @@ static const struct net_device_ops mlx4_netdev_ops = {
2025#ifdef CONFIG_RFS_ACCEL 2082#ifdef CONFIG_RFS_ACCEL
2026 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2083 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2027#endif 2084#endif
2028 .ndo_fdb_add = mlx4_en_fdb_add, 2085};
2029 .ndo_fdb_del = mlx4_en_fdb_del, 2086
2030 .ndo_fdb_dump = mlx4_en_fdb_dump, 2087static const struct net_device_ops mlx4_netdev_ops_master = {
2088 .ndo_open = mlx4_en_open,
2089 .ndo_stop = mlx4_en_close,
2090 .ndo_start_xmit = mlx4_en_xmit,
2091 .ndo_select_queue = mlx4_en_select_queue,
2092 .ndo_get_stats = mlx4_en_get_stats,
2093 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2094 .ndo_set_mac_address = mlx4_en_set_mac,
2095 .ndo_validate_addr = eth_validate_addr,
2096 .ndo_change_mtu = mlx4_en_change_mtu,
2097 .ndo_tx_timeout = mlx4_en_tx_timeout,
2098 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2099 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2100 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
2101 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
2102 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
2103 .ndo_get_vf_config = mlx4_en_get_vf_config,
2104#ifdef CONFIG_NET_POLL_CONTROLLER
2105 .ndo_poll_controller = mlx4_en_netpoll,
2106#endif
2107 .ndo_set_features = mlx4_en_set_features,
2108 .ndo_setup_tc = mlx4_en_setup_tc,
2109#ifdef CONFIG_RFS_ACCEL
2110 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2111#endif
2031}; 2112};
2032 2113
2033int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 2114int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -2088,9 +2169,16 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2088 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 2169 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2089 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2170 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2090 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2171 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2172 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
2091#ifdef CONFIG_MLX4_EN_DCB 2173#ifdef CONFIG_MLX4_EN_DCB
2092 if (!mlx4_is_slave(priv->mdev->dev)) 2174 if (!mlx4_is_slave(priv->mdev->dev)) {
2093 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 2175 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
2176 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2177 } else {
2178 en_info(priv, "enabling only PFC DCB ops\n");
2179 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
2180 }
2181 }
2094#endif 2182#endif
2095 2183
2096 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) 2184 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
@@ -2122,6 +2210,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2122 spin_lock_init(&priv->filters_lock); 2210 spin_lock_init(&priv->filters_lock);
2123#endif 2211#endif
2124 2212
2213 /* Initialize time stamping config */
2214 priv->hwtstamp_config.flags = 0;
2215 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
2216 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2217
2125 /* Allocate page for receive rings */ 2218 /* Allocate page for receive rings */
2126 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 2219 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
2127 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 2220 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
@@ -2134,7 +2227,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2134 /* 2227 /*
2135 * Initialize netdev entry points 2228 * Initialize netdev entry points
2136 */ 2229 */
2137 dev->netdev_ops = &mlx4_netdev_ops; 2230 if (mlx4_is_master(priv->mdev->dev))
2231 dev->netdev_ops = &mlx4_netdev_ops_master;
2232 else
2233 dev->netdev_ops = &mlx4_netdev_ops;
2138 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 2234 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
2139 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 2235 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
2140 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 2236 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
@@ -2152,8 +2248,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2152 2248
2153 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; 2249 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
2154 dev->features = dev->hw_features | NETIF_F_HIGHDMA | 2250 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
2155 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 2251 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2156 NETIF_F_HW_VLAN_FILTER; 2252 NETIF_F_HW_VLAN_CTAG_FILTER;
2157 dev->hw_features |= NETIF_F_LOOPBACK; 2253 dev->hw_features |= NETIF_F_LOOPBACK;
2158 2254
2159 if (mdev->dev->caps.steering_mode == 2255 if (mdev->dev->caps.steering_mode ==
@@ -2199,6 +2295,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2199 } 2295 }
2200 mlx4_en_set_default_moderation(priv); 2296 mlx4_en_set_default_moderation(priv);
2201 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 2297 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
2298
2299 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2300 queue_delayed_work(mdev->workqueue, &priv->service_task,
2301 SERVICE_TASK_DELAY);
2302
2202 return 0; 2303 return 0;
2203 2304
2204out: 2305out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 10c24c784b70..91f2b2c43c12 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -42,6 +42,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
42 int user_prio, struct mlx4_qp_context *context) 42 int user_prio, struct mlx4_qp_context *context)
43{ 43{
44 struct mlx4_en_dev *mdev = priv->mdev; 44 struct mlx4_en_dev *mdev = priv->mdev;
45 struct net_device *dev = priv->dev;
45 46
46 memset(context, 0, sizeof *context); 47 memset(context, 0, sizeof *context);
47 context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); 48 context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
@@ -65,6 +66,8 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
65 context->cqn_send = cpu_to_be32(cqn); 66 context->cqn_send = cpu_to_be32(cqn);
66 context->cqn_recv = cpu_to_be32(cqn); 67 context->cqn_recv = cpu_to_be32(cqn);
67 context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); 68 context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
69 if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
70 context->param3 |= cpu_to_be32(1 << 30);
68} 71}
69 72
70 73
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index c7f856308e1a..02aee1ebd203 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -320,6 +320,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
320 } 320 }
321 ring->buf = ring->wqres.buf.direct.buf; 321 ring->buf = ring->wqres.buf.direct.buf;
322 322
323 ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
324
323 return 0; 325 return 0;
324 326
325err_hwq: 327err_hwq:
@@ -554,6 +556,7 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
554int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) 556int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
555{ 557{
556 struct mlx4_en_priv *priv = netdev_priv(dev); 558 struct mlx4_en_priv *priv = netdev_priv(dev);
559 struct mlx4_en_dev *mdev = priv->mdev;
557 struct mlx4_cqe *cqe; 560 struct mlx4_cqe *cqe;
558 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; 561 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
559 struct mlx4_en_rx_alloc *frags; 562 struct mlx4_en_rx_alloc *frags;
@@ -565,6 +568,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
565 int polled = 0; 568 int polled = 0;
566 int ip_summed; 569 int ip_summed;
567 int factor = priv->cqe_factor; 570 int factor = priv->cqe_factor;
571 u64 timestamp;
568 572
569 if (!priv->port_up) 573 if (!priv->port_up)
570 return 0; 574 return 0;
@@ -669,19 +673,27 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
669 gro_skb->data_len = length; 673 gro_skb->data_len = length;
670 gro_skb->ip_summed = CHECKSUM_UNNECESSARY; 674 gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
671 675
672 if (cqe->vlan_my_qpn & 676 if ((cqe->vlan_my_qpn &
673 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) { 677 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
678 (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
674 u16 vid = be16_to_cpu(cqe->sl_vid); 679 u16 vid = be16_to_cpu(cqe->sl_vid);
675 680
676 __vlan_hwaccel_put_tag(gro_skb, vid); 681 __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
677 } 682 }
678 683
679 if (dev->features & NETIF_F_RXHASH) 684 if (dev->features & NETIF_F_RXHASH)
680 gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid); 685 gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
681 686
682 skb_record_rx_queue(gro_skb, cq->ring); 687 skb_record_rx_queue(gro_skb, cq->ring);
683 napi_gro_frags(&cq->napi);
684 688
689 if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
690 timestamp = mlx4_en_get_cqe_ts(cqe);
691 mlx4_en_fill_hwtstamps(mdev,
692 skb_hwtstamps(gro_skb),
693 timestamp);
694 }
695
696 napi_gro_frags(&cq->napi);
685 goto next; 697 goto next;
686 } 698 }
687 699
@@ -714,9 +726,16 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
714 if (dev->features & NETIF_F_RXHASH) 726 if (dev->features & NETIF_F_RXHASH)
715 skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid); 727 skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
716 728
717 if (be32_to_cpu(cqe->vlan_my_qpn) & 729 if ((be32_to_cpu(cqe->vlan_my_qpn) &
718 MLX4_CQE_VLAN_PRESENT_MASK) 730 MLX4_CQE_VLAN_PRESENT_MASK) &&
719 __vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid)); 731 (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
732 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
733
734 if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
735 timestamp = mlx4_en_get_cqe_ts(cqe);
736 mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
737 timestamp);
738 }
720 739
721 /* Push it up the stack */ 740 /* Push it up the stack */
722 netif_receive_skb(skb); 741 netif_receive_skb(skb);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 3488c6d9e6b5..2448f0d669e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -58,10 +58,9 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
58 58
59 /* build the pkt before xmit */ 59 /* build the pkt before xmit */
60 skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN); 60 skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
61 if (!skb) { 61 if (!skb)
62 en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
63 return -ENOMEM; 62 return -ENOMEM;
64 } 63
65 skb_reserve(skb, NET_IP_ALIGN); 64 skb_reserve(skb, NET_IP_ALIGN);
66 65
67 ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); 66 ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 49308cc65ee7..4e6877a032a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -118,6 +118,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
118 } else 118 } else
119 ring->bf_enabled = true; 119 ring->bf_enabled = true;
120 120
121 ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
122
121 return 0; 123 return 0;
122 124
123err_map: 125err_map:
@@ -192,8 +194,9 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
192 194
193static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, 195static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
194 struct mlx4_en_tx_ring *ring, 196 struct mlx4_en_tx_ring *ring,
195 int index, u8 owner) 197 int index, u8 owner, u64 timestamp)
196{ 198{
199 struct mlx4_en_dev *mdev = priv->mdev;
197 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; 200 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
198 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; 201 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
199 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; 202 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
@@ -204,6 +207,12 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
204 int i; 207 int i;
205 __be32 *ptr = (__be32 *)tx_desc; 208 __be32 *ptr = (__be32 *)tx_desc;
206 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); 209 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
210 struct skb_shared_hwtstamps hwts;
211
212 if (timestamp) {
213 mlx4_en_fill_hwtstamps(mdev, &hwts, timestamp);
214 skb_tstamp_tx(skb, &hwts);
215 }
207 216
208 /* Optimize the common case when there are no wraparounds */ 217 /* Optimize the common case when there are no wraparounds */
209 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { 218 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
@@ -289,7 +298,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
289 while (ring->cons != ring->prod) { 298 while (ring->cons != ring->prod) {
290 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, 299 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
291 ring->cons & ring->size_mask, 300 ring->cons & ring->size_mask,
292 !!(ring->cons & ring->size)); 301 !!(ring->cons & ring->size), 0);
293 ring->cons += ring->last_nr_txbb; 302 ring->cons += ring->last_nr_txbb;
294 cnt++; 303 cnt++;
295 } 304 }
@@ -318,6 +327,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
318 u32 packets = 0; 327 u32 packets = 0;
319 u32 bytes = 0; 328 u32 bytes = 0;
320 int factor = priv->cqe_factor; 329 int factor = priv->cqe_factor;
330 u64 timestamp = 0;
321 331
322 if (!priv->port_up) 332 if (!priv->port_up)
323 return; 333 return;
@@ -341,11 +351,14 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
341 do { 351 do {
342 txbbs_skipped += ring->last_nr_txbb; 352 txbbs_skipped += ring->last_nr_txbb;
343 ring_index = (ring_index + ring->last_nr_txbb) & size_mask; 353 ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
354 if (ring->tx_info[ring_index].ts_requested)
355 timestamp = mlx4_en_get_cqe_ts(cqe);
356
344 /* free next descriptor */ 357 /* free next descriptor */
345 ring->last_nr_txbb = mlx4_en_free_tx_desc( 358 ring->last_nr_txbb = mlx4_en_free_tx_desc(
346 priv, ring, ring_index, 359 priv, ring, ring_index,
347 !!((ring->cons + txbbs_skipped) & 360 !!((ring->cons + txbbs_skipped) &
348 ring->size)); 361 ring->size), timestamp);
349 packets++; 362 packets++;
350 bytes += ring->tx_info[ring_index].nr_bytes; 363 bytes += ring->tx_info[ring_index].nr_bytes;
351 } while (ring_index != new_index); 364 } while (ring_index != new_index);
@@ -629,6 +642,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
629 tx_info->skb = skb; 642 tx_info->skb = skb;
630 tx_info->nr_txbb = nr_txbb; 643 tx_info->nr_txbb = nr_txbb;
631 644
645 /*
646 * For timestamping add flag to skb_shinfo and
647 * set flag for further reference
648 */
649 if (ring->hwtstamp_tx_type == HWTSTAMP_TX_ON &&
650 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
651 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
652 tx_info->ts_requested = 1;
653 }
654
632 /* Prepare ctrl segement apart opcode+ownership, which depends on 655 /* Prepare ctrl segement apart opcode+ownership, which depends on
633 * whether LSO is used */ 656 * whether LSO is used */
634 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); 657 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
@@ -729,6 +752,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
729 if (bounce) 752 if (bounce)
730 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 753 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
731 754
755 skb_tx_timestamp(skb);
756
732 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) { 757 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
733 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); 758 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
734 op_own |= htonl((bf_index & 0xffff) << 8); 759 op_own |= htonl((bf_index & 0xffff) << 8);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index f6245579962d..b147bdd40768 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -91,7 +91,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
91 [ 8] = "P_Key violation counter", 91 [ 8] = "P_Key violation counter",
92 [ 9] = "Q_Key violation counter", 92 [ 9] = "Q_Key violation counter",
93 [10] = "VMM", 93 [10] = "VMM",
94 [12] = "DPDP", 94 [12] = "Dual Port Different Protocol (DPDP) support",
95 [15] = "Big LSO headers", 95 [15] = "Big LSO headers",
96 [16] = "MW support", 96 [16] = "MW support",
97 [17] = "APM support", 97 [17] = "APM support",
@@ -109,6 +109,8 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
109 [41] = "Unicast VEP steering support", 109 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support", 110 [42] = "Multicast VEP steering support",
111 [48] = "Counters support", 111 [48] = "Counters support",
112 [53] = "Port ETS Scheduler support",
113 [55] = "Port link type sensing support",
112 [59] = "Port management change event support", 114 [59] = "Port management change event support",
113 [61] = "64 byte EQE support", 115 [61] = "64 byte EQE support",
114 [62] = "64 byte CQE support", 116 [62] = "64 byte CQE support",
@@ -128,7 +130,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
128 [1] = "RSS Toeplitz Hash Function support", 130 [1] = "RSS Toeplitz Hash Function support",
129 [2] = "RSS XOR Hash Function support", 131 [2] = "RSS XOR Hash Function support",
130 [3] = "Device manage flow steering support", 132 [3] = "Device manage flow steering support",
131 [4] = "Automatic mac reassignment support" 133 [4] = "Automatic MAC reassignment support",
134 [5] = "Time stamping support"
132 }; 135 };
133 int i; 136 int i;
134 137
@@ -442,6 +445,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
442#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 445#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
443#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b 446#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
444#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c 447#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
448#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
445#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 449#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
446#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 450#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
447#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 451#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
@@ -464,6 +468,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
464#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 468#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
465#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 469#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
466#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 470#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
471#define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
467#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 472#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
468#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 473#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
469#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 474#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
@@ -558,6 +563,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
558 dev_cap->fs_max_num_qp_per_entry = field; 563 dev_cap->fs_max_num_qp_per_entry = field;
559 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 564 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
560 dev_cap->stat_rate_support = stat_rate; 565 dev_cap->stat_rate_support = stat_rate;
566 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
567 if (field & 0x80)
568 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
561 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 569 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
562 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 570 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
563 dev_cap->flags = flags | (u64)ext_flags << 32; 571 dev_cap->flags = flags | (u64)ext_flags << 32;
@@ -648,6 +656,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
648 MLX4_GET(dev_cap->max_counters, outbox, 656 MLX4_GET(dev_cap->max_counters, outbox,
649 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); 657 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
650 658
659 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
660 if (field32 & (1 << 26))
661 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
662 if (field32 & (1 << 20))
663 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
664
651 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 665 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
652 for (i = 1; i <= dev_cap->num_ports; ++i) { 666 for (i = 1; i <= dev_cap->num_ports; ++i) {
653 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 667 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
@@ -777,6 +791,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
777 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; 791 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
778 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 792 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
779 793
794 /* For guests, disable timestamp */
795 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
796 field &= 0x7f;
797 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
798
780 /* For guests, report Blueflame disabled */ 799 /* For guests, report Blueflame disabled */
781 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); 800 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
782 field &= 0x7f; 801 field &= 0x7f;
@@ -804,6 +823,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
804 struct mlx4_cmd_mailbox *outbox, 823 struct mlx4_cmd_mailbox *outbox,
805 struct mlx4_cmd_info *cmd) 824 struct mlx4_cmd_info *cmd)
806{ 825{
826 struct mlx4_priv *priv = mlx4_priv(dev);
807 u64 def_mac; 827 u64 def_mac;
808 u8 port_type; 828 u8 port_type;
809 u16 short_field; 829 u16 short_field;
@@ -821,6 +841,9 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
821 /* set slave default_mac address */ 841 /* set slave default_mac address */
822 MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET); 842 MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
823 def_mac += slave << 8; 843 def_mac += slave << 8;
844 /* if config MAC in DB use it */
845 if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
846 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
824 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); 847 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
825 848
826 /* get port type - currently only eth is enabled */ 849 /* get port type - currently only eth is enabled */
@@ -1006,6 +1029,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
1006#define QUERY_FW_COMM_BASE_OFFSET 0x40 1029#define QUERY_FW_COMM_BASE_OFFSET 0x40
1007#define QUERY_FW_COMM_BAR_OFFSET 0x48 1030#define QUERY_FW_COMM_BAR_OFFSET 0x48
1008 1031
1032#define QUERY_FW_CLOCK_OFFSET 0x50
1033#define QUERY_FW_CLOCK_BAR 0x58
1034
1009 mailbox = mlx4_alloc_cmd_mailbox(dev); 1035 mailbox = mlx4_alloc_cmd_mailbox(dev);
1010 if (IS_ERR(mailbox)) 1036 if (IS_ERR(mailbox))
1011 return PTR_ERR(mailbox); 1037 return PTR_ERR(mailbox);
@@ -1080,6 +1106,12 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
1080 fw->comm_bar, fw->comm_base); 1106 fw->comm_bar, fw->comm_base);
1081 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); 1107 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
1082 1108
1109 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
1110 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR);
1111 fw->clock_bar = (fw->clock_bar >> 6) * 2;
1112 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
1113 fw->clock_bar, fw->clock_offset);
1114
1083 /* 1115 /*
1084 * Round up number of system pages needed in case 1116 * Round up number of system pages needed in case
1085 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. 1117 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
@@ -1367,6 +1399,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1367 u8 byte_field; 1399 u8 byte_field;
1368 1400
1369#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 1401#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1402#define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
1370 1403
1371 mailbox = mlx4_alloc_cmd_mailbox(dev); 1404 mailbox = mlx4_alloc_cmd_mailbox(dev);
1372 if (IS_ERR(mailbox)) 1405 if (IS_ERR(mailbox))
@@ -1381,6 +1414,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1381 goto out; 1414 goto out;
1382 1415
1383 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET); 1416 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
1417 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1384 1418
1385 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 1419 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1386 1420
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 151c2bb380a6..fdf41665a059 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -162,6 +162,7 @@ struct mlx4_init_hca_param {
162 u64 global_caps; 162 u64 global_caps;
163 u16 log_mc_entry_sz; 163 u16 log_mc_entry_sz;
164 u16 log_mc_hash_sz; 164 u16 log_mc_hash_sz;
165 u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */
165 u8 log_num_qps; 166 u8 log_num_qps;
166 u8 log_num_srqs; 167 u8 log_num_srqs;
167 u8 log_num_cqs; 168 u8 log_num_cqs;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 16abde20e1fc..0d32a82458bf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -513,6 +513,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
513 513
514 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 514 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
515 515
516 dev->caps.hca_core_clock = hca_param.hca_core_clock;
517
516 memset(&dev_cap, 0, sizeof(dev_cap)); 518 memset(&dev_cap, 0, sizeof(dev_cap));
517 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 519 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
518 err = mlx4_dev_cap(dev, &dev_cap); 520 err = mlx4_dev_cap(dev, &dev_cap);
@@ -1226,8 +1228,53 @@ static void unmap_bf_area(struct mlx4_dev *dev)
1226 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1228 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1227} 1229}
1228 1230
1231cycle_t mlx4_read_clock(struct mlx4_dev *dev)
1232{
1233 u32 clockhi, clocklo, clockhi1;
1234 cycle_t cycles;
1235 int i;
1236 struct mlx4_priv *priv = mlx4_priv(dev);
1237
1238 for (i = 0; i < 10; i++) {
1239 clockhi = swab32(readl(priv->clock_mapping));
1240 clocklo = swab32(readl(priv->clock_mapping + 4));
1241 clockhi1 = swab32(readl(priv->clock_mapping));
1242 if (clockhi == clockhi1)
1243 break;
1244 }
1245
1246 cycles = (u64) clockhi << 32 | (u64) clocklo;
1247
1248 return cycles;
1249}
1250EXPORT_SYMBOL_GPL(mlx4_read_clock);
1251
1252
1253static int map_internal_clock(struct mlx4_dev *dev)
1254{
1255 struct mlx4_priv *priv = mlx4_priv(dev);
1256
1257 priv->clock_mapping =
1258 ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) +
1259 priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1260
1261 if (!priv->clock_mapping)
1262 return -ENOMEM;
1263
1264 return 0;
1265}
1266
1267static void unmap_internal_clock(struct mlx4_dev *dev)
1268{
1269 struct mlx4_priv *priv = mlx4_priv(dev);
1270
1271 if (priv->clock_mapping)
1272 iounmap(priv->clock_mapping);
1273}
1274
1229static void mlx4_close_hca(struct mlx4_dev *dev) 1275static void mlx4_close_hca(struct mlx4_dev *dev)
1230{ 1276{
1277 unmap_internal_clock(dev);
1231 unmap_bf_area(dev); 1278 unmap_bf_area(dev);
1232 if (mlx4_is_slave(dev)) 1279 if (mlx4_is_slave(dev))
1233 mlx4_slave_exit(dev); 1280 mlx4_slave_exit(dev);
@@ -1445,6 +1492,37 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1445 mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); 1492 mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
1446 goto err_free_icm; 1493 goto err_free_icm;
1447 } 1494 }
1495 /*
1496 * If TS is supported by FW
1497 * read HCA frequency by QUERY_HCA command
1498 */
1499 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
1500 memset(&init_hca, 0, sizeof(init_hca));
1501 err = mlx4_QUERY_HCA(dev, &init_hca);
1502 if (err) {
1503 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n");
1504 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1505 } else {
1506 dev->caps.hca_core_clock =
1507 init_hca.hca_core_clock;
1508 }
1509
1510 /* In case we got HCA frequency 0 - disable timestamping
1511 * to avoid dividing by zero
1512 */
1513 if (!dev->caps.hca_core_clock) {
1514 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1515 mlx4_err(dev,
1516 "HCA frequency is 0. Timestamping is not supported.");
1517 } else if (map_internal_clock(dev)) {
1518 /*
1519 * Map internal clock,
1520 * in case of failure disable timestamping
1521 */
1522 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1523 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n");
1524 }
1525 }
1448 } else { 1526 } else {
1449 err = mlx4_init_slave(dev); 1527 err = mlx4_init_slave(dev);
1450 if (err) { 1528 if (err) {
@@ -1478,6 +1556,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1478 return 0; 1556 return 0;
1479 1557
1480unmap_bf: 1558unmap_bf:
1559 unmap_internal_clock(dev);
1481 unmap_bf_area(dev); 1560 unmap_bf_area(dev);
1482 1561
1483err_close: 1562err_close:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 52685524708d..ffc78d2cb0cf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1125,28 +1125,11 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
1125 return err; 1125 return err;
1126} 1126}
1127 1127
1128int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1128int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
1129 u8 port, int block_mcast_loopback, 1129 u8 gid[16], u8 port,
1130 enum mlx4_protocol prot, u64 *reg_id) 1130 int block_mcast_loopback,
1131 enum mlx4_protocol prot, u64 *reg_id)
1131{ 1132{
1132
1133 switch (dev->caps.steering_mode) {
1134 case MLX4_STEERING_MODE_A0:
1135 if (prot == MLX4_PROT_ETH)
1136 return 0;
1137
1138 case MLX4_STEERING_MODE_B0:
1139 if (prot == MLX4_PROT_ETH)
1140 gid[7] |= (MLX4_MC_STEER << 1);
1141
1142 if (mlx4_is_mfunc(dev))
1143 return mlx4_QP_ATTACH(dev, qp, gid, 1,
1144 block_mcast_loopback, prot);
1145 return mlx4_qp_attach_common(dev, qp, gid,
1146 block_mcast_loopback, prot,
1147 MLX4_MC_STEER);
1148
1149 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
1150 struct mlx4_spec_list spec = { {NULL} }; 1133 struct mlx4_spec_list spec = { {NULL} };
1151 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 1134 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
1152 1135
@@ -1180,8 +1163,32 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1180 list_add_tail(&spec.list, &rule.list); 1163 list_add_tail(&spec.list, &rule.list);
1181 1164
1182 return mlx4_flow_attach(dev, &rule, reg_id); 1165 return mlx4_flow_attach(dev, &rule, reg_id);
1183 } 1166}
1184 1167
1168int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1169 u8 port, int block_mcast_loopback,
1170 enum mlx4_protocol prot, u64 *reg_id)
1171{
1172 switch (dev->caps.steering_mode) {
1173 case MLX4_STEERING_MODE_A0:
1174 if (prot == MLX4_PROT_ETH)
1175 return 0;
1176
1177 case MLX4_STEERING_MODE_B0:
1178 if (prot == MLX4_PROT_ETH)
1179 gid[7] |= (MLX4_MC_STEER << 1);
1180
1181 if (mlx4_is_mfunc(dev))
1182 return mlx4_QP_ATTACH(dev, qp, gid, 1,
1183 block_mcast_loopback, prot);
1184 return mlx4_qp_attach_common(dev, qp, gid,
1185 block_mcast_loopback, prot,
1186 MLX4_MC_STEER);
1187
1188 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1189 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
1190 block_mcast_loopback,
1191 prot, reg_id);
1185 default: 1192 default:
1186 return -EINVAL; 1193 return -EINVAL;
1187 } 1194 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index d738454116a0..eac3dae10efe 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -87,7 +87,8 @@ enum {
87 MLX4_HCR_SIZE = 0x0001c, 87 MLX4_HCR_SIZE = 0x0001c,
88 MLX4_CLR_INT_SIZE = 0x00008, 88 MLX4_CLR_INT_SIZE = 0x00008,
89 MLX4_SLAVE_COMM_BASE = 0x0, 89 MLX4_SLAVE_COMM_BASE = 0x0,
90 MLX4_COMM_PAGESIZE = 0x1000 90 MLX4_COMM_PAGESIZE = 0x1000,
91 MLX4_CLOCK_SIZE = 0x00008
91}; 92};
92 93
93enum { 94enum {
@@ -403,6 +404,7 @@ struct mlx4_fw {
403 u64 clr_int_base; 404 u64 clr_int_base;
404 u64 catas_offset; 405 u64 catas_offset;
405 u64 comm_base; 406 u64 comm_base;
407 u64 clock_offset;
406 struct mlx4_icm *fw_icm; 408 struct mlx4_icm *fw_icm;
407 struct mlx4_icm *aux_icm; 409 struct mlx4_icm *aux_icm;
408 u32 catas_size; 410 u32 catas_size;
@@ -410,6 +412,7 @@ struct mlx4_fw {
410 u8 clr_int_bar; 412 u8 clr_int_bar;
411 u8 catas_bar; 413 u8 catas_bar;
412 u8 comm_bar; 414 u8 comm_bar;
415 u8 clock_bar;
413}; 416};
414 417
415struct mlx4_comm { 418struct mlx4_comm {
@@ -470,6 +473,30 @@ struct mlx4_slave_state {
470 enum slave_port_state port_state[MLX4_MAX_PORTS + 1]; 473 enum slave_port_state port_state[MLX4_MAX_PORTS + 1];
471}; 474};
472 475
476#define MLX4_VGT 4095
477#define NO_INDX (-1)
478
479struct mlx4_vport_state {
480 u64 mac;
481 u16 default_vlan;
482 u8 default_qos;
483 u32 tx_rate;
484 bool spoofchk;
485};
486
487struct mlx4_vf_admin_state {
488 struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1];
489};
490
491struct mlx4_vport_oper_state {
492 struct mlx4_vport_state state;
493 int mac_idx;
494 int vlan_idx;
495};
496struct mlx4_vf_oper_state {
497 struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1];
498};
499
473struct slave_list { 500struct slave_list {
474 struct mutex mutex; 501 struct mutex mutex;
475 struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE]; 502 struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
@@ -500,6 +527,8 @@ struct mlx4_master_qp0_state {
500 527
501struct mlx4_mfunc_master_ctx { 528struct mlx4_mfunc_master_ctx {
502 struct mlx4_slave_state *slave_state; 529 struct mlx4_slave_state *slave_state;
530 struct mlx4_vf_admin_state *vf_admin;
531 struct mlx4_vf_oper_state *vf_oper;
503 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; 532 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
504 int init_port_ref[MLX4_MAX_PORTS + 1]; 533 int init_port_ref[MLX4_MAX_PORTS + 1];
505 u16 max_mtu[MLX4_MAX_PORTS + 1]; 534 u16 max_mtu[MLX4_MAX_PORTS + 1];
@@ -826,6 +855,7 @@ struct mlx4_priv {
826 struct list_head bf_list; 855 struct list_head bf_list;
827 struct mutex bf_mutex; 856 struct mutex bf_mutex;
828 struct io_mapping *bf_mapping; 857 struct io_mapping *bf_mapping;
858 void __iomem *clock_mapping;
829 int reserved_mtts; 859 int reserved_mtts;
830 int fs_hash_mode; 860 int fs_hash_mode;
831 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; 861 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
@@ -1127,6 +1157,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
1127 1157
1128void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); 1158void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
1129void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); 1159void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
1160void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
1161int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
1130 1162
1131int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); 1163int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
1132/* resource tracker functions*/ 1164/* resource tracker functions*/
@@ -1190,6 +1222,10 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1190int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1222int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1191 int block_mcast_loopback, enum mlx4_protocol prot, 1223 int block_mcast_loopback, enum mlx4_protocol prot,
1192 enum mlx4_steer_type steer); 1224 enum mlx4_steer_type steer);
1225int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
1226 u8 gid[16], u8 port,
1227 int block_mcast_loopback,
1228 enum mlx4_protocol prot, u64 *reg_id);
1193int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, 1229int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1194 struct mlx4_vhcr *vhcr, 1230 struct mlx4_vhcr *vhcr,
1195 struct mlx4_cmd_mailbox *inbox, 1231 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f710b7ce0dcb..b1d7657b2bf5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -40,6 +40,7 @@
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/netdevice.h> 41#include <linux/netdevice.h>
42#include <linux/if_vlan.h> 42#include <linux/if_vlan.h>
43#include <linux/net_tstamp.h>
43#ifdef CONFIG_MLX4_EN_DCB 44#ifdef CONFIG_MLX4_EN_DCB
44#include <linux/dcbnl.h> 45#include <linux/dcbnl.h>
45#endif 46#endif
@@ -77,6 +78,7 @@
77#define STAMP_SHIFT 31 78#define STAMP_SHIFT 31
78#define STAMP_VAL 0x7fffffff 79#define STAMP_VAL 0x7fffffff
79#define STATS_DELAY (HZ / 4) 80#define STATS_DELAY (HZ / 4)
81#define SERVICE_TASK_DELAY (HZ / 4)
80#define MAX_NUM_OF_FS_RULES 256 82#define MAX_NUM_OF_FS_RULES 256
81 83
82#define MLX4_EN_FILTER_HASH_SHIFT 4 84#define MLX4_EN_FILTER_HASH_SHIFT 4
@@ -207,6 +209,7 @@ struct mlx4_en_tx_info {
207 u8 linear; 209 u8 linear;
208 u8 data_offset; 210 u8 data_offset;
209 u8 inl; 211 u8 inl;
212 u8 ts_requested;
210}; 213};
211 214
212 215
@@ -262,6 +265,7 @@ struct mlx4_en_tx_ring {
262 struct mlx4_bf bf; 265 struct mlx4_bf bf;
263 bool bf_enabled; 266 bool bf_enabled;
264 struct netdev_queue *tx_queue; 267 struct netdev_queue *tx_queue;
268 int hwtstamp_tx_type;
265}; 269};
266 270
267struct mlx4_en_rx_desc { 271struct mlx4_en_rx_desc {
@@ -288,6 +292,7 @@ struct mlx4_en_rx_ring {
288 unsigned long packets; 292 unsigned long packets;
289 unsigned long csum_ok; 293 unsigned long csum_ok;
290 unsigned long csum_none; 294 unsigned long csum_none;
295 int hwtstamp_rx_filter;
291}; 296};
292 297
293struct mlx4_en_cq { 298struct mlx4_en_cq {
@@ -348,6 +353,10 @@ struct mlx4_en_dev {
348 u32 priv_pdn; 353 u32 priv_pdn;
349 spinlock_t uar_lock; 354 spinlock_t uar_lock;
350 u8 mac_removed[MLX4_MAX_PORTS + 1]; 355 u8 mac_removed[MLX4_MAX_PORTS + 1];
356 struct cyclecounter cycles;
357 struct timecounter clock;
358 unsigned long last_overflow_check;
359 unsigned long overflow_period;
351}; 360};
352 361
353 362
@@ -512,6 +521,7 @@ struct mlx4_en_priv {
512 struct work_struct watchdog_task; 521 struct work_struct watchdog_task;
513 struct work_struct linkstate_task; 522 struct work_struct linkstate_task;
514 struct delayed_work stats_task; 523 struct delayed_work stats_task;
524 struct delayed_work service_task;
515 struct mlx4_en_perf_stats pstats; 525 struct mlx4_en_perf_stats pstats;
516 struct mlx4_en_pkt_stats pkstats; 526 struct mlx4_en_pkt_stats pkstats;
517 struct mlx4_en_port_stats port_stats; 527 struct mlx4_en_port_stats port_stats;
@@ -525,6 +535,7 @@ struct mlx4_en_priv {
525 struct device *ddev; 535 struct device *ddev;
526 int base_tx_qpn; 536 int base_tx_qpn;
527 struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE]; 537 struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
538 struct hwtstamp_config hwtstamp_config;
528 539
529#ifdef CONFIG_MLX4_EN_DCB 540#ifdef CONFIG_MLX4_EN_DCB
530 struct ieee_ets ets; 541 struct ieee_ets ets;
@@ -624,6 +635,7 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
624 635
625#ifdef CONFIG_MLX4_EN_DCB 636#ifdef CONFIG_MLX4_EN_DCB
626extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops; 637extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
638extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
627#endif 639#endif
628 640
629int mlx4_en_setup_tc(struct net_device *dev, u8 up); 641int mlx4_en_setup_tc(struct net_device *dev, u8 up);
@@ -636,9 +648,21 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
636#define MLX4_EN_NUM_SELF_TEST 5 648#define MLX4_EN_NUM_SELF_TEST 5
637void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); 649void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
638u64 mlx4_en_mac_to_u64(u8 *addr); 650u64 mlx4_en_mac_to_u64(u8 *addr);
651void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
639 652
640/* 653/*
641 * Globals 654 * Functions for time stamping
655 */
656u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
657void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
658 struct skb_shared_hwtstamps *hwts,
659 u64 timestamp);
660void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
661int mlx4_en_timestamp_config(struct net_device *dev,
662 int tx_type,
663 int rx_filter);
664
665/* Globals
642 */ 666 */
643extern const struct ethtool_ops mlx4_en_ethtool_ops; 667extern const struct ethtool_ops mlx4_en_ethtool_ops;
644 668
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 10c57c86388b..946e0af5faef 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/errno.h> 33#include <linux/errno.h>
34#include <linux/if_ether.h> 34#include <linux/if_ether.h>
35#include <linux/if_vlan.h>
35#include <linux/export.h> 36#include <linux/export.h>
36 37
37#include <linux/mlx4/cmd.h> 38#include <linux/mlx4/cmd.h>
@@ -140,8 +141,9 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
140 } 141 }
141 142
142 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { 143 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
143 /* MAC already registered, Must not have duplicates */ 144 /* MAC already registered, increment ref count */
144 err = -EEXIST; 145 err = i;
146 ++table->refs[i];
145 goto out; 147 goto out;
146 } 148 }
147 } 149 }
@@ -164,7 +166,7 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
164 table->entries[free] = 0; 166 table->entries[free] = 0;
165 goto out; 167 goto out;
166 } 168 }
167 169 table->refs[free] = 1;
168 err = free; 170 err = free;
169 ++table->total; 171 ++table->total;
170out: 172out:
@@ -205,12 +207,16 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
205 struct mlx4_mac_table *table = &info->mac_table; 207 struct mlx4_mac_table *table = &info->mac_table;
206 int index; 208 int index;
207 209
208 index = find_index(dev, table, mac);
209
210 mutex_lock(&table->mutex); 210 mutex_lock(&table->mutex);
211 index = find_index(dev, table, mac);
211 212
212 if (validate_index(dev, table, index)) 213 if (validate_index(dev, table, index))
213 goto out; 214 goto out;
215 if (--table->refs[index]) {
216 mlx4_dbg(dev, "Have more references for index %d,"
217 "no need to modify mac table\n", index);
218 goto out;
219 }
214 220
215 table->entries[index] = 0; 221 table->entries[index] = 0;
216 mlx4_set_port_mac_table(dev, port, table->entries); 222 mlx4_set_port_mac_table(dev, port, table->entries);
@@ -304,7 +310,7 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
304} 310}
305EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan); 311EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
306 312
307static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, 313int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
308 int *index) 314 int *index)
309{ 315{
310 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 316 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
@@ -378,7 +384,7 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
378} 384}
379EXPORT_SYMBOL_GPL(mlx4_register_vlan); 385EXPORT_SYMBOL_GPL(mlx4_register_vlan);
380 386
381static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) 387void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
382{ 388{
383 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 389 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
384 390
@@ -517,7 +523,8 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
517 /* Mtu is configured as the max MTU among all the 523 /* Mtu is configured as the max MTU among all the
518 * the functions on the port. */ 524 * the functions on the port. */
519 mtu = be16_to_cpu(gen_context->mtu); 525 mtu = be16_to_cpu(gen_context->mtu);
520 mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]); 526 mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
527 ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
521 prev_mtu = slave_st->mtu[port]; 528 prev_mtu = slave_st->mtu[port];
522 slave_st->mtu[port] = mtu; 529 slave_st->mtu[port] = mtu;
523 if (mtu > master->max_mtu[port]) 530 if (mtu > master->max_mtu[port])
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1391b52f443a..e12e0d2e0ee0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -75,6 +75,7 @@ struct res_gid {
75 u8 gid[16]; 75 u8 gid[16];
76 enum mlx4_protocol prot; 76 enum mlx4_protocol prot;
77 enum mlx4_steer_type steer; 77 enum mlx4_steer_type steer;
78 u64 reg_id;
78}; 79};
79 80
80enum res_qp_states { 81enum res_qp_states {
@@ -352,6 +353,47 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
352 } 353 }
353} 354}
354 355
356static int update_vport_qp_param(struct mlx4_dev *dev,
357 struct mlx4_cmd_mailbox *inbox,
358 u8 slave)
359{
360 struct mlx4_qp_context *qpc = inbox->buf + 8;
361 struct mlx4_vport_oper_state *vp_oper;
362 struct mlx4_priv *priv;
363 u32 qp_type;
364 int port;
365
366 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
367 priv = mlx4_priv(dev);
368 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
369
370 if (MLX4_VGT != vp_oper->state.default_vlan) {
371 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
372 if (MLX4_QP_ST_RC == qp_type)
373 return -EINVAL;
374
375 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
376 qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/
377 qpc->pri_path.feup |= 1 << 3; /* set fvl bit */
378 qpc->pri_path.sched_queue &= 0xC7;
379 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
380 mlx4_dbg(dev, "qp %d port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n",
381 be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
382 (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan,
383 vp_oper->vlan_idx, (int)(qpc->pri_path.feup),
384 (int)(qpc->pri_path.fl));
385 }
386 if (vp_oper->state.spoofchk) {
387 qpc->pri_path.feup |= 1 << 5; /* set fsm bit */;
388 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
389 mlx4_dbg(dev, "spoof qp %d port %d feup 0x%x, myLmc 0x%x mindx %d\n",
390 be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
391 (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc,
392 vp_oper->mac_idx);
393 }
394 return 0;
395}
396
355static int mpt_mask(struct mlx4_dev *dev) 397static int mpt_mask(struct mlx4_dev *dev)
356{ 398{
357 return dev->caps.num_mpts - 1; 399 return dev->caps.num_mpts - 1;
@@ -2797,6 +2839,9 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2797 update_pkey_index(dev, slave, inbox); 2839 update_pkey_index(dev, slave, inbox);
2798 update_gid(dev, inbox, (u8)slave); 2840 update_gid(dev, inbox, (u8)slave);
2799 adjust_proxy_tun_qkey(dev, vhcr, qpc); 2841 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2842 err = update_vport_qp_param(dev, inbox, slave);
2843 if (err)
2844 return err;
2800 2845
2801 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2846 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2802} 2847}
@@ -2934,7 +2979,7 @@ static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2934 2979
2935static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, 2980static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2936 u8 *gid, enum mlx4_protocol prot, 2981 u8 *gid, enum mlx4_protocol prot,
2937 enum mlx4_steer_type steer) 2982 enum mlx4_steer_type steer, u64 reg_id)
2938{ 2983{
2939 struct res_gid *res; 2984 struct res_gid *res;
2940 int err; 2985 int err;
@@ -2951,6 +2996,7 @@ static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2951 memcpy(res->gid, gid, 16); 2996 memcpy(res->gid, gid, 16);
2952 res->prot = prot; 2997 res->prot = prot;
2953 res->steer = steer; 2998 res->steer = steer;
2999 res->reg_id = reg_id;
2954 list_add_tail(&res->list, &rqp->mcg_list); 3000 list_add_tail(&res->list, &rqp->mcg_list);
2955 err = 0; 3001 err = 0;
2956 } 3002 }
@@ -2961,7 +3007,7 @@ static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2961 3007
2962static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, 3008static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2963 u8 *gid, enum mlx4_protocol prot, 3009 u8 *gid, enum mlx4_protocol prot,
2964 enum mlx4_steer_type steer) 3010 enum mlx4_steer_type steer, u64 *reg_id)
2965{ 3011{
2966 struct res_gid *res; 3012 struct res_gid *res;
2967 int err; 3013 int err;
@@ -2971,6 +3017,7 @@ static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2971 if (!res || res->prot != prot || res->steer != steer) 3017 if (!res || res->prot != prot || res->steer != steer)
2972 err = -EINVAL; 3018 err = -EINVAL;
2973 else { 3019 else {
3020 *reg_id = res->reg_id;
2974 list_del(&res->list); 3021 list_del(&res->list);
2975 kfree(res); 3022 kfree(res);
2976 err = 0; 3023 err = 0;
@@ -2980,6 +3027,37 @@ static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2980 return err; 3027 return err;
2981} 3028}
2982 3029
3030static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3031 int block_loopback, enum mlx4_protocol prot,
3032 enum mlx4_steer_type type, u64 *reg_id)
3033{
3034 switch (dev->caps.steering_mode) {
3035 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3036 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3037 block_loopback, prot,
3038 reg_id);
3039 case MLX4_STEERING_MODE_B0:
3040 return mlx4_qp_attach_common(dev, qp, gid,
3041 block_loopback, prot, type);
3042 default:
3043 return -EINVAL;
3044 }
3045}
3046
3047static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3048 enum mlx4_protocol prot, enum mlx4_steer_type type,
3049 u64 reg_id)
3050{
3051 switch (dev->caps.steering_mode) {
3052 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3053 return mlx4_flow_detach(dev, reg_id);
3054 case MLX4_STEERING_MODE_B0:
3055 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3056 default:
3057 return -EINVAL;
3058 }
3059}
3060
2983int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 3061int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2984 struct mlx4_vhcr *vhcr, 3062 struct mlx4_vhcr *vhcr,
2985 struct mlx4_cmd_mailbox *inbox, 3063 struct mlx4_cmd_mailbox *inbox,
@@ -2992,14 +3070,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2992 int err; 3070 int err;
2993 int qpn; 3071 int qpn;
2994 struct res_qp *rqp; 3072 struct res_qp *rqp;
3073 u64 reg_id = 0;
2995 int attach = vhcr->op_modifier; 3074 int attach = vhcr->op_modifier;
2996 int block_loopback = vhcr->in_modifier >> 31; 3075 int block_loopback = vhcr->in_modifier >> 31;
2997 u8 steer_type_mask = 2; 3076 u8 steer_type_mask = 2;
2998 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; 3077 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2999 3078
3000 if (dev->caps.steering_mode != MLX4_STEERING_MODE_B0)
3001 return -EINVAL;
3002
3003 qpn = vhcr->in_modifier & 0xffffff; 3079 qpn = vhcr->in_modifier & 0xffffff;
3004 err = get_res(dev, slave, qpn, RES_QP, &rqp); 3080 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3005 if (err) 3081 if (err)
@@ -3007,30 +3083,32 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3007 3083
3008 qp.qpn = qpn; 3084 qp.qpn = qpn;
3009 if (attach) { 3085 if (attach) {
3010 err = add_mcg_res(dev, slave, rqp, gid, prot, type); 3086 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3011 if (err) 3087 type, &reg_id);
3088 if (err) {
3089 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3012 goto ex_put; 3090 goto ex_put;
3013 3091 }
3014 err = mlx4_qp_attach_common(dev, &qp, gid, 3092 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3015 block_loopback, prot, type);
3016 if (err) 3093 if (err)
3017 goto ex_rem; 3094 goto ex_detach;
3018 } else { 3095 } else {
3019 err = rem_mcg_res(dev, slave, rqp, gid, prot, type); 3096 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3020 if (err) 3097 if (err)
3021 goto ex_put; 3098 goto ex_put;
3022 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
3023 }
3024 3099
3100 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3101 if (err)
3102 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3103 qpn, reg_id);
3104 }
3025 put_res(dev, slave, qpn, RES_QP); 3105 put_res(dev, slave, qpn, RES_QP);
3026 return 0; 3106 return err;
3027 3107
3028ex_rem: 3108ex_detach:
3029 /* ignore error return below, already in error */ 3109 qp_detach(dev, &qp, gid, prot, type, reg_id);
3030 (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
3031ex_put: 3110ex_put:
3032 put_res(dev, slave, qpn, RES_QP); 3111 put_res(dev, slave, qpn, RES_QP);
3033
3034 return err; 3112 return err;
3035} 3113}
3036 3114
@@ -3266,9 +3344,16 @@ static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3266 struct mlx4_qp qp; /* dummy for calling attach/detach */ 3344 struct mlx4_qp qp; /* dummy for calling attach/detach */
3267 3345
3268 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) { 3346 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3269 qp.qpn = rqp->local_qpn; 3347 switch (dev->caps.steering_mode) {
3270 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot, 3348 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3271 rgid->steer); 3349 mlx4_flow_detach(dev, rgid->reg_id);
3350 break;
3351 case MLX4_STEERING_MODE_B0:
3352 qp.qpn = rqp->local_qpn;
3353 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3354 rgid->prot, rgid->steer);
3355 break;
3356 }
3272 list_del(&rgid->list); 3357 list_del(&rgid->list);
3273 kfree(rgid); 3358 kfree(rgid);
3274 } 3359 }
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 07a6ebc47c92..b6c60fdef4ff 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1622,25 +1622,7 @@ static struct platform_driver ks8695_driver = {
1622 .resume = ks8695_drv_resume, 1622 .resume = ks8695_drv_resume,
1623}; 1623};
1624 1624
1625/* Module interface */ 1625module_platform_driver(ks8695_driver);
1626
1627static int __init
1628ks8695_init(void)
1629{
1630 printk(KERN_INFO "%s Ethernet driver, V%s\n",
1631 MODULENAME, MODULEVERSION);
1632
1633 return platform_driver_register(&ks8695_driver);
1634}
1635
1636static void __exit
1637ks8695_cleanup(void)
1638{
1639 platform_driver_unregister(&ks8695_driver);
1640}
1641
1642module_init(ks8695_init);
1643module_exit(ks8695_cleanup);
1644 1626
1645MODULE_AUTHOR("Simtec Electronics"); 1627MODULE_AUTHOR("Simtec Electronics");
1646MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver"); 1628MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 8fb481252e2c..727b546a9eb8 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1364,37 +1364,37 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
1364 1364
1365/* driver bus management functions */ 1365/* driver bus management functions */
1366 1366
1367#ifdef CONFIG_PM 1367#ifdef CONFIG_PM_SLEEP
1368static int ks8851_suspend(struct spi_device *spi, pm_message_t state) 1368
1369static int ks8851_suspend(struct device *dev)
1369{ 1370{
1370 struct ks8851_net *ks = dev_get_drvdata(&spi->dev); 1371 struct ks8851_net *ks = dev_get_drvdata(dev);
1371 struct net_device *dev = ks->netdev; 1372 struct net_device *netdev = ks->netdev;
1372 1373
1373 if (netif_running(dev)) { 1374 if (netif_running(netdev)) {
1374 netif_device_detach(dev); 1375 netif_device_detach(netdev);
1375 ks8851_net_stop(dev); 1376 ks8851_net_stop(netdev);
1376 } 1377 }
1377 1378
1378 return 0; 1379 return 0;
1379} 1380}
1380 1381
1381static int ks8851_resume(struct spi_device *spi) 1382static int ks8851_resume(struct device *dev)
1382{ 1383{
1383 struct ks8851_net *ks = dev_get_drvdata(&spi->dev); 1384 struct ks8851_net *ks = dev_get_drvdata(dev);
1384 struct net_device *dev = ks->netdev; 1385 struct net_device *netdev = ks->netdev;
1385 1386
1386 if (netif_running(dev)) { 1387 if (netif_running(netdev)) {
1387 ks8851_net_open(dev); 1388 ks8851_net_open(netdev);
1388 netif_device_attach(dev); 1389 netif_device_attach(netdev);
1389 } 1390 }
1390 1391
1391 return 0; 1392 return 0;
1392} 1393}
1393#else
1394#define ks8851_suspend NULL
1395#define ks8851_resume NULL
1396#endif 1394#endif
1397 1395
1396static SIMPLE_DEV_PM_OPS(ks8851_pm_ops, ks8851_suspend, ks8851_resume);
1397
1398static int ks8851_probe(struct spi_device *spi) 1398static int ks8851_probe(struct spi_device *spi)
1399{ 1399{
1400 struct net_device *ndev; 1400 struct net_device *ndev;
@@ -1456,7 +1456,7 @@ static int ks8851_probe(struct spi_device *spi)
1456 SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops); 1456 SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops);
1457 SET_NETDEV_DEV(ndev, &spi->dev); 1457 SET_NETDEV_DEV(ndev, &spi->dev);
1458 1458
1459 dev_set_drvdata(&spi->dev, ks); 1459 spi_set_drvdata(spi, ks);
1460 1460
1461 ndev->if_port = IF_PORT_100BASET; 1461 ndev->if_port = IF_PORT_100BASET;
1462 ndev->netdev_ops = &ks8851_netdev_ops; 1462 ndev->netdev_ops = &ks8851_netdev_ops;
@@ -1516,7 +1516,7 @@ err_irq:
1516 1516
1517static int ks8851_remove(struct spi_device *spi) 1517static int ks8851_remove(struct spi_device *spi)
1518{ 1518{
1519 struct ks8851_net *priv = dev_get_drvdata(&spi->dev); 1519 struct ks8851_net *priv = spi_get_drvdata(spi);
1520 1520
1521 if (netif_msg_drv(priv)) 1521 if (netif_msg_drv(priv))
1522 dev_info(&spi->dev, "remove\n"); 1522 dev_info(&spi->dev, "remove\n");
@@ -1532,25 +1532,12 @@ static struct spi_driver ks8851_driver = {
1532 .driver = { 1532 .driver = {
1533 .name = "ks8851", 1533 .name = "ks8851",
1534 .owner = THIS_MODULE, 1534 .owner = THIS_MODULE,
1535 .pm = &ks8851_pm_ops,
1535 }, 1536 },
1536 .probe = ks8851_probe, 1537 .probe = ks8851_probe,
1537 .remove = ks8851_remove, 1538 .remove = ks8851_remove,
1538 .suspend = ks8851_suspend,
1539 .resume = ks8851_resume,
1540}; 1539};
1541 1540module_spi_driver(ks8851_driver);
1542static int __init ks8851_init(void)
1543{
1544 return spi_register_driver(&ks8851_driver);
1545}
1546
1547static void __exit ks8851_exit(void)
1548{
1549 spi_unregister_driver(&ks8851_driver);
1550}
1551
1552module_init(ks8851_init);
1553module_exit(ks8851_exit);
1554 1541
1555MODULE_DESCRIPTION("KS8851 Network driver"); 1542MODULE_DESCRIPTION("KS8851 Network driver");
1556MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 1543MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index a343066f7b43..ddaf138ce0d4 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -792,20 +792,35 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
792 792
793 frame_hdr = ks->frame_head_info; 793 frame_hdr = ks->frame_head_info;
794 while (ks->frame_cnt--) { 794 while (ks->frame_cnt--) {
795 if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
796 frame_hdr->len >= RX_BUF_SIZE ||
797 frame_hdr->len <= 0)) {
798
799 /* discard an invalid packet */
800 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
801 netdev->stats.rx_dropped++;
802 if (!(frame_hdr->sts & RXFSHR_RXFV))
803 netdev->stats.rx_frame_errors++;
804 else
805 netdev->stats.rx_length_errors++;
806 frame_hdr++;
807 continue;
808 }
809
795 skb = netdev_alloc_skb(netdev, frame_hdr->len + 16); 810 skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
796 if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) && 811 if (likely(skb)) {
797 (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
798 skb_reserve(skb, 2); 812 skb_reserve(skb, 2);
799 /* read data block including CRC 4 bytes */ 813 /* read data block including CRC 4 bytes */
800 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len); 814 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
801 skb_put(skb, frame_hdr->len); 815 skb_put(skb, frame_hdr->len - 4);
802 skb->protocol = eth_type_trans(skb, netdev); 816 skb->protocol = eth_type_trans(skb, netdev);
803 netif_rx(skb); 817 netif_rx(skb);
818 /* exclude CRC size */
819 netdev->stats.rx_bytes += frame_hdr->len - 4;
820 netdev->stats.rx_packets++;
804 } else { 821 } else {
805 pr_err("%s: err:skb alloc\n", __func__);
806 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF)); 822 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
807 if (skb) 823 netdev->stats.rx_dropped++;
808 dev_kfree_skb_irq(skb);
809 } 824 }
810 frame_hdr++; 825 frame_hdr++;
811 } 826 }
@@ -877,6 +892,8 @@ static irqreturn_t ks_irq(int irq, void *pw)
877 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK); 892 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
878 } 893 }
879 894
895 if (unlikely(status & IRQ_RXOI))
896 ks->netdev->stats.rx_over_errors++;
880 /* this should be the last in IRQ handler*/ 897 /* this should be the last in IRQ handler*/
881 ks_restore_cmd_reg(ks); 898 ks_restore_cmd_reg(ks);
882 return IRQ_HANDLED; 899 return IRQ_HANDLED;
@@ -1015,6 +1032,9 @@ static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1015 1032
1016 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) { 1033 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
1017 ks_write_qmu(ks, skb->data, skb->len); 1034 ks_write_qmu(ks, skb->data, skb->len);
1035 /* add tx statistics */
1036 netdev->stats.tx_bytes += skb->len;
1037 netdev->stats.tx_packets++;
1018 dev_kfree_skb(skb); 1038 dev_kfree_skb(skb);
1019 } else 1039 } else
1020 retv = NETDEV_TX_BUSY; 1040 retv = NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 5d98a9f7bfc7..c7b40aa21f22 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1566,7 +1566,7 @@ static int enc28j60_probe(struct spi_device *spi)
1566 INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler); 1566 INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler);
1567 INIT_WORK(&priv->irq_work, enc28j60_irq_work_handler); 1567 INIT_WORK(&priv->irq_work, enc28j60_irq_work_handler);
1568 INIT_WORK(&priv->restart_work, enc28j60_restart_work_handler); 1568 INIT_WORK(&priv->restart_work, enc28j60_restart_work_handler);
1569 dev_set_drvdata(&spi->dev, priv); /* spi to priv reference */ 1569 spi_set_drvdata(spi, priv); /* spi to priv reference */
1570 SET_NETDEV_DEV(dev, &spi->dev); 1570 SET_NETDEV_DEV(dev, &spi->dev);
1571 1571
1572 if (!enc28j60_chipset_init(dev)) { 1572 if (!enc28j60_chipset_init(dev)) {
@@ -1618,7 +1618,7 @@ error_alloc:
1618 1618
1619static int enc28j60_remove(struct spi_device *spi) 1619static int enc28j60_remove(struct spi_device *spi)
1620{ 1620{
1621 struct enc28j60_net *priv = dev_get_drvdata(&spi->dev); 1621 struct enc28j60_net *priv = spi_get_drvdata(spi);
1622 1622
1623 if (netif_msg_drv(priv)) 1623 if (netif_msg_drv(priv))
1624 printk(KERN_DEBUG DRV_NAME ": remove\n"); 1624 printk(KERN_DEBUG DRV_NAME ": remove\n");
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 4f9937e026e5..7be9788ed0f6 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1281,7 +1281,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
1281 va = addr; 1281 va = addr;
1282 va += MXGEFW_PAD; 1282 va += MXGEFW_PAD;
1283 veh = (struct vlan_ethhdr *)va; 1283 veh = (struct vlan_ethhdr *)va;
1284 if ((dev->features & NETIF_F_HW_VLAN_RX) == NETIF_F_HW_VLAN_RX && 1284 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
1285 NETIF_F_HW_VLAN_CTAG_RX &&
1285 veh->h_vlan_proto == htons(ETH_P_8021Q)) { 1286 veh->h_vlan_proto == htons(ETH_P_8021Q)) {
1286 /* fixup csum if needed */ 1287 /* fixup csum if needed */
1287 if (skb->ip_summed == CHECKSUM_COMPLETE) { 1288 if (skb->ip_summed == CHECKSUM_COMPLETE) {
@@ -1289,7 +1290,7 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
1289 skb->csum = csum_sub(skb->csum, vsum); 1290 skb->csum = csum_sub(skb->csum, vsum);
1290 } 1291 }
1291 /* pop tag */ 1292 /* pop tag */
1292 __vlan_hwaccel_put_tag(skb, ntohs(veh->h_vlan_TCI)); 1293 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(veh->h_vlan_TCI));
1293 memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN); 1294 memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN);
1294 skb->len -= VLAN_HLEN; 1295 skb->len -= VLAN_HLEN;
1295 skb->data_len -= VLAN_HLEN; 1296 skb->data_len -= VLAN_HLEN;
@@ -3592,10 +3593,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
3592 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); 3593 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
3593 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, 3594 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
3594 &ss->rx_done.bus, 3595 &ss->rx_done.bus,
3595 GFP_KERNEL); 3596 GFP_KERNEL | __GFP_ZERO);
3596 if (ss->rx_done.entry == NULL) 3597 if (ss->rx_done.entry == NULL)
3597 goto abort; 3598 goto abort;
3598 memset(ss->rx_done.entry, 0, bytes);
3599 bytes = sizeof(*ss->fw_stats); 3599 bytes = sizeof(*ss->fw_stats);
3600 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes, 3600 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
3601 &ss->fw_stats_bus, 3601 &ss->fw_stats_bus,
@@ -3888,8 +3888,8 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3888 netdev->mtu = myri10ge_initial_mtu; 3888 netdev->mtu = myri10ge_initial_mtu;
3889 netdev->hw_features = mgp->features | NETIF_F_RXCSUM; 3889 netdev->hw_features = mgp->features | NETIF_F_RXCSUM;
3890 3890
3891 /* fake NETIF_F_HW_VLAN_RX for good GRO performance */ 3891 /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
3892 netdev->hw_features |= NETIF_F_HW_VLAN_RX; 3892 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3893 3893
3894 netdev->features = netdev->hw_features; 3894 netdev->features = netdev->hw_features;
3895 3895
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index b0b361546365..c20766c2f65b 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -175,13 +175,13 @@ static int sonic_probe1(struct net_device *dev)
175 175
176 /* Allocate the entire chunk of memory for the descriptors. 176 /* Allocate the entire chunk of memory for the descriptors.
177 Note that this cannot cross a 64K boundary. */ 177 Note that this cannot cross a 64K boundary. */
178 if ((lp->descriptors = dma_alloc_coherent(lp->device, 178 lp->descriptors = dma_alloc_coherent(lp->device,
179 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), 179 SIZEOF_SONIC_DESC *
180 &lp->descriptors_laddr, GFP_KERNEL)) == NULL) { 180 SONIC_BUS_SCALE(lp->dma_bitmode),
181 printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", 181 &lp->descriptors_laddr,
182 dev_name(lp->device)); 182 GFP_KERNEL);
183 if (lp->descriptors == NULL)
183 goto out; 184 goto out;
184 }
185 185
186 /* Now set up the pointers to point to the appropriate places */ 186 /* Now set up the pointers to point to the appropriate places */
187 lp->cda = lp->descriptors; 187 lp->cda = lp->descriptors;
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 0ffde69c8d01..346a4e025c34 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -202,13 +202,13 @@ static int macsonic_init(struct net_device *dev)
202 202
203 /* Allocate the entire chunk of memory for the descriptors. 203 /* Allocate the entire chunk of memory for the descriptors.
204 Note that this cannot cross a 64K boundary. */ 204 Note that this cannot cross a 64K boundary. */
205 if ((lp->descriptors = dma_alloc_coherent(lp->device, 205 lp->descriptors = dma_alloc_coherent(lp->device,
206 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), 206 SIZEOF_SONIC_DESC *
207 &lp->descriptors_laddr, GFP_KERNEL)) == NULL) { 207 SONIC_BUS_SCALE(lp->dma_bitmode),
208 printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", 208 &lp->descriptors_laddr,
209 dev_name(lp->device)); 209 GFP_KERNEL);
210 if (lp->descriptors == NULL)
210 return -ENOMEM; 211 return -ENOMEM;
211 }
212 212
213 /* Now set up the pointers to point to the appropriate places */ 213 /* Now set up the pointers to point to the appropriate places */
214 lp->cda = lp->descriptors; 214 lp->cda = lp->descriptors;
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 77c070de621e..d3b47003a575 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -911,7 +911,7 @@ static void rx_irq(struct net_device *ndev)
911 unsigned short tag; 911 unsigned short tag;
912 912
913 tag = ntohs(extsts & EXTSTS_VTG_MASK); 913 tag = ntohs(extsts & EXTSTS_VTG_MASK);
914 __vlan_hwaccel_put_tag(skb, tag); 914 __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
915 } 915 }
916#endif 916#endif
917 rx_rc = netif_rx(skb); 917 rx_rc = netif_rx(skb);
@@ -2193,7 +2193,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
2193 2193
2194#ifdef NS83820_VLAN_ACCEL_SUPPORT 2194#ifdef NS83820_VLAN_ACCEL_SUPPORT
2195 /* We also support hardware vlan acceleration */ 2195 /* We also support hardware vlan acceleration */
2196 ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 2196 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2197#endif 2197#endif
2198 2198
2199 if (using_dac) { 2199 if (using_dac) {
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 46795e403467..1bd419dbda6d 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -424,7 +424,6 @@ static void sonic_rx(struct net_device *dev)
424 /* Malloc up new buffer. */ 424 /* Malloc up new buffer. */
425 new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); 425 new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
426 if (new_skb == NULL) { 426 if (new_skb == NULL) {
427 printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
428 lp->stats.rx_dropped++; 427 lp->stats.rx_dropped++;
429 break; 428 break;
430 } 429 }
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 5e4748e855f6..c2e0256fe3df 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -197,14 +197,12 @@ static int __init sonic_probe1(struct net_device *dev)
197 * We also allocate extra space for a pointer to allow freeing 197 * We also allocate extra space for a pointer to allow freeing
198 * this structure later on (in xtsonic_cleanup_module()). 198 * this structure later on (in xtsonic_cleanup_module()).
199 */ 199 */
200 lp->descriptors = 200 lp->descriptors = dma_alloc_coherent(lp->device,
201 dma_alloc_coherent(lp->device, 201 SIZEOF_SONIC_DESC *
202 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), 202 SONIC_BUS_SCALE(lp->dma_bitmode),
203 &lp->descriptors_laddr, GFP_KERNEL); 203 &lp->descriptors_laddr,
204 204 GFP_KERNEL);
205 if (lp->descriptors == NULL) { 205 if (lp->descriptors == NULL) {
206 printk(KERN_ERR "%s: couldn't alloc DMA memory for "
207 " descriptors.\n", dev_name(lp->device));
208 err = -ENOMEM; 206 err = -ENOMEM;
209 goto out; 207 goto out;
210 } 208 }
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index bfd887382e19..51b00941302c 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -80,6 +80,7 @@
80#include <linux/slab.h> 80#include <linux/slab.h>
81#include <linux/prefetch.h> 81#include <linux/prefetch.h>
82#include <net/tcp.h> 82#include <net/tcp.h>
83#include <net/checksum.h>
83 84
84#include <asm/div64.h> 85#include <asm/div64.h>
85#include <asm/irq.h> 86#include <asm/irq.h>
@@ -7919,7 +7920,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7919 NETIF_F_TSO | NETIF_F_TSO6 | 7920 NETIF_F_TSO | NETIF_F_TSO6 |
7920 NETIF_F_RXCSUM | NETIF_F_LRO; 7921 NETIF_F_RXCSUM | NETIF_F_LRO;
7921 dev->features |= dev->hw_features | 7922 dev->features |= dev->hw_features |
7922 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 7923 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7923 if (sp->device_type & XFRAME_II_DEVICE) { 7924 if (sp->device_type & XFRAME_II_DEVICE) {
7924 dev->hw_features |= NETIF_F_UFO; 7925 dev->hw_features |= NETIF_F_UFO;
7925 if (ufo) 7926 if (ufo)
@@ -8337,16 +8338,13 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8337{ 8338{
8338 struct iphdr *ip = lro->iph; 8339 struct iphdr *ip = lro->iph;
8339 struct tcphdr *tcp = lro->tcph; 8340 struct tcphdr *tcp = lro->tcph;
8340 __sum16 nchk;
8341 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; 8341 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8342 8342
8343 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); 8343 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8344 8344
8345 /* Update L3 header */ 8345 /* Update L3 header */
8346 csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8346 ip->tot_len = htons(lro->total_len); 8347 ip->tot_len = htons(lro->total_len);
8347 ip->check = 0;
8348 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8349 ip->check = nchk;
8350 8348
8351 /* Update L4 header */ 8349 /* Update L4 header */
8352 tcp->ack_seq = lro->tcp_ack; 8350 tcp->ack_seq = lro->tcp_ack;
@@ -8557,7 +8555,7 @@ static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8557 8555
8558 skb->protocol = eth_type_trans(skb, dev); 8556 skb->protocol = eth_type_trans(skb, dev);
8559 if (vlan_tag && sp->vlan_strip_flag) 8557 if (vlan_tag && sp->vlan_strip_flag)
8560 __vlan_hwaccel_put_tag(skb, vlan_tag); 8558 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8561 if (sp->config.napi) 8559 if (sp->config.napi)
8562 netif_receive_skb(skb); 8560 netif_receive_skb(skb);
8563 else 8561 else
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 794444e09492..cbfaed5f2f8d 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -312,7 +312,7 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
312 312
313 if (ext_info->vlan && 313 if (ext_info->vlan &&
314 ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) 314 ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
315 __vlan_hwaccel_put_tag(skb, ext_info->vlan); 315 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ext_info->vlan);
316 napi_gro_receive(ring->napi_p, skb); 316 napi_gro_receive(ring->napi_p, skb);
317 317
318 vxge_debug_entryexit(VXGE_TRACE, 318 vxge_debug_entryexit(VXGE_TRACE,
@@ -3300,12 +3300,13 @@ static void vxge_tx_watchdog(struct net_device *dev)
3300/** 3300/**
3301 * vxge_vlan_rx_add_vid 3301 * vxge_vlan_rx_add_vid
3302 * @dev: net device pointer. 3302 * @dev: net device pointer.
3303 * @proto: vlan protocol
3303 * @vid: vid 3304 * @vid: vid
3304 * 3305 *
3305 * Add the vlan id to the devices vlan id table 3306 * Add the vlan id to the devices vlan id table
3306 */ 3307 */
3307static int 3308static int
3308vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 3309vxge_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
3309{ 3310{
3310 struct vxgedev *vdev = netdev_priv(dev); 3311 struct vxgedev *vdev = netdev_priv(dev);
3311 struct vxge_vpath *vpath; 3312 struct vxge_vpath *vpath;
@@ -3323,14 +3324,15 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3323} 3324}
3324 3325
3325/** 3326/**
3326 * vxge_vlan_rx_add_vid 3327 * vxge_vlan_rx_kill_vid
3327 * @dev: net device pointer. 3328 * @dev: net device pointer.
3329 * @proto: vlan protocol
3328 * @vid: vid 3330 * @vid: vid
3329 * 3331 *
3330 * Remove the vlan id from the device's vlan id table 3332 * Remove the vlan id from the device's vlan id table
3331 */ 3333 */
3332static int 3334static int
3333vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 3335vxge_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
3334{ 3336{
3335 struct vxgedev *vdev = netdev_priv(dev); 3337 struct vxgedev *vdev = netdev_priv(dev);
3336 struct vxge_vpath *vpath; 3338 struct vxge_vpath *vpath;
@@ -3415,12 +3417,12 @@ static int vxge_device_register(struct __vxge_hw_device *hldev,
3415 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | 3417 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
3416 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 3418 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3417 NETIF_F_TSO | NETIF_F_TSO6 | 3419 NETIF_F_TSO | NETIF_F_TSO6 |
3418 NETIF_F_HW_VLAN_TX; 3420 NETIF_F_HW_VLAN_CTAG_TX;
3419 if (vdev->config.rth_steering != NO_STEERING) 3421 if (vdev->config.rth_steering != NO_STEERING)
3420 ndev->hw_features |= NETIF_F_RXHASH; 3422 ndev->hw_features |= NETIF_F_RXHASH;
3421 3423
3422 ndev->features |= ndev->hw_features | 3424 ndev->features |= ndev->hw_features |
3423 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; 3425 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3424 3426
3425 3427
3426 ndev->netdev_ops = &vxge_netdev_ops; 3428 ndev->netdev_ops = &vxge_netdev_ops;
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index 63e7af44366f..cb9e63831500 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -152,8 +152,6 @@ static void netx_eth_receive(struct net_device *ndev)
152 152
153 skb = netdev_alloc_skb(ndev, len); 153 skb = netdev_alloc_skb(ndev, len);
154 if (unlikely(skb == NULL)) { 154 if (unlikely(skb == NULL)) {
155 printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
156 ndev->name);
157 ndev->stats.rx_dropped++; 155 ndev->stats.rx_dropped++;
158 return; 156 return;
159 } 157 }
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 162da8975b05..3df8287b7452 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -287,23 +287,16 @@ static int w90p910_init_desc(struct net_device *dev)
287 ether = netdev_priv(dev); 287 ether = netdev_priv(dev);
288 pdev = ether->pdev; 288 pdev = ether->pdev;
289 289
290 ether->tdesc = (struct tran_pdesc *) 290 ether->tdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
291 dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc), 291 &ether->tdesc_phys, GFP_KERNEL);
292 &ether->tdesc_phys, GFP_KERNEL); 292 if (!ether->tdesc)
293
294 if (!ether->tdesc) {
295 dev_err(&pdev->dev, "Failed to allocate memory for tx desc\n");
296 return -ENOMEM; 293 return -ENOMEM;
297 }
298
299 ether->rdesc = (struct recv_pdesc *)
300 dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
301 &ether->rdesc_phys, GFP_KERNEL);
302 294
295 ether->rdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
296 &ether->rdesc_phys, GFP_KERNEL);
303 if (!ether->rdesc) { 297 if (!ether->rdesc) {
304 dev_err(&pdev->dev, "Failed to allocate memory for rx desc\n");
305 dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc), 298 dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
306 ether->tdesc, ether->tdesc_phys); 299 ether->tdesc, ether->tdesc_phys);
307 return -ENOMEM; 300 return -ENOMEM;
308 } 301 }
309 302
@@ -737,7 +730,6 @@ static void netdev_rx(struct net_device *dev)
737 data = ether->rdesc->recv_buf[ether->cur_rx]; 730 data = ether->rdesc->recv_buf[ether->cur_rx];
738 skb = netdev_alloc_skb(dev, length + 2); 731 skb = netdev_alloc_skb(dev, length + 2);
739 if (!skb) { 732 if (!skb) {
740 dev_err(&pdev->dev, "get skb buffer error\n");
741 ether->stats.rx_dropped++; 733 ether->stats.rx_dropped++;
742 return; 734 return;
743 } 735 }
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 0b8de12bcbca..b003fe53c8e2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2200,6 +2200,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2200 struct ring_desc *start_tx; 2200 struct ring_desc *start_tx;
2201 struct ring_desc *prev_tx; 2201 struct ring_desc *prev_tx;
2202 struct nv_skb_map *prev_tx_ctx; 2202 struct nv_skb_map *prev_tx_ctx;
2203 struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
2203 unsigned long flags; 2204 unsigned long flags;
2204 2205
2205 /* add fragments to entries count */ 2206 /* add fragments to entries count */
@@ -2261,12 +2262,31 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2261 do { 2262 do {
2262 prev_tx = put_tx; 2263 prev_tx = put_tx;
2263 prev_tx_ctx = np->put_tx_ctx; 2264 prev_tx_ctx = np->put_tx_ctx;
2265 if (!start_tx_ctx)
2266 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2267
2264 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; 2268 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2265 np->put_tx_ctx->dma = skb_frag_dma_map( 2269 np->put_tx_ctx->dma = skb_frag_dma_map(
2266 &np->pci_dev->dev, 2270 &np->pci_dev->dev,
2267 frag, offset, 2271 frag, offset,
2268 bcnt, 2272 bcnt,
2269 DMA_TO_DEVICE); 2273 DMA_TO_DEVICE);
2274 if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
2275
2276 /* Unwind the mapped fragments */
2277 do {
2278 nv_unmap_txskb(np, start_tx_ctx);
2279 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2280 tmp_tx_ctx = np->first_tx_ctx;
2281 } while (tmp_tx_ctx != np->put_tx_ctx);
2282 kfree_skb(skb);
2283 np->put_tx_ctx = start_tx_ctx;
2284 u64_stats_update_begin(&np->swstats_tx_syncp);
2285 np->stat_tx_dropped++;
2286 u64_stats_update_end(&np->swstats_tx_syncp);
2287 return NETDEV_TX_OK;
2288 }
2289
2270 np->put_tx_ctx->dma_len = bcnt; 2290 np->put_tx_ctx->dma_len = bcnt;
2271 np->put_tx_ctx->dma_single = 0; 2291 np->put_tx_ctx->dma_single = 0;
2272 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2292 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
@@ -2327,7 +2347,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2327 struct ring_desc_ex *start_tx; 2347 struct ring_desc_ex *start_tx;
2328 struct ring_desc_ex *prev_tx; 2348 struct ring_desc_ex *prev_tx;
2329 struct nv_skb_map *prev_tx_ctx; 2349 struct nv_skb_map *prev_tx_ctx;
2330 struct nv_skb_map *start_tx_ctx; 2350 struct nv_skb_map *start_tx_ctx = NULL;
2351 struct nv_skb_map *tmp_tx_ctx = NULL;
2331 unsigned long flags; 2352 unsigned long flags;
2332 2353
2333 /* add fragments to entries count */ 2354 /* add fragments to entries count */
@@ -2392,11 +2413,29 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2392 prev_tx = put_tx; 2413 prev_tx = put_tx;
2393 prev_tx_ctx = np->put_tx_ctx; 2414 prev_tx_ctx = np->put_tx_ctx;
2394 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; 2415 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2416 if (!start_tx_ctx)
2417 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2395 np->put_tx_ctx->dma = skb_frag_dma_map( 2418 np->put_tx_ctx->dma = skb_frag_dma_map(
2396 &np->pci_dev->dev, 2419 &np->pci_dev->dev,
2397 frag, offset, 2420 frag, offset,
2398 bcnt, 2421 bcnt,
2399 DMA_TO_DEVICE); 2422 DMA_TO_DEVICE);
2423
2424 if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
2425
2426 /* Unwind the mapped fragments */
2427 do {
2428 nv_unmap_txskb(np, start_tx_ctx);
2429 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2430 tmp_tx_ctx = np->first_tx_ctx;
2431 } while (tmp_tx_ctx != np->put_tx_ctx);
2432 kfree_skb(skb);
2433 np->put_tx_ctx = start_tx_ctx;
2434 u64_stats_update_begin(&np->swstats_tx_syncp);
2435 np->stat_tx_dropped++;
2436 u64_stats_update_end(&np->swstats_tx_syncp);
2437 return NETDEV_TX_OK;
2438 }
2400 np->put_tx_ctx->dma_len = bcnt; 2439 np->put_tx_ctx->dma_len = bcnt;
2401 np->put_tx_ctx->dma_single = 0; 2440 np->put_tx_ctx->dma_single = 0;
2402 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2441 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
@@ -2922,15 +2961,15 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2922 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2961 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2923 2962
2924 /* 2963 /*
2925 * There's need to check for NETIF_F_HW_VLAN_RX here. 2964 * There's need to check for NETIF_F_HW_VLAN_CTAG_RX
2926 * Even if vlan rx accel is disabled, 2965 * here. Even if vlan rx accel is disabled,
2927 * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set. 2966 * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
2928 */ 2967 */
2929 if (dev->features & NETIF_F_HW_VLAN_RX && 2968 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2930 vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2969 vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2931 u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK; 2970 u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
2932 2971
2933 __vlan_hwaccel_put_tag(skb, vid); 2972 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2934 } 2973 }
2935 napi_gro_receive(&np->napi, skb); 2974 napi_gro_receive(&np->napi, skb);
2936 u64_stats_update_begin(&np->swstats_rx_syncp); 2975 u64_stats_update_begin(&np->swstats_rx_syncp);
@@ -4777,7 +4816,7 @@ static netdev_features_t nv_fix_features(struct net_device *dev,
4777 netdev_features_t features) 4816 netdev_features_t features)
4778{ 4817{
4779 /* vlan is dependent on rx checksum offload */ 4818 /* vlan is dependent on rx checksum offload */
4780 if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) 4819 if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
4781 features |= NETIF_F_RXCSUM; 4820 features |= NETIF_F_RXCSUM;
4782 4821
4783 return features; 4822 return features;
@@ -4789,12 +4828,12 @@ static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
4789 4828
4790 spin_lock_irq(&np->lock); 4829 spin_lock_irq(&np->lock);
4791 4830
4792 if (features & NETIF_F_HW_VLAN_RX) 4831 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4793 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP; 4832 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
4794 else 4833 else
4795 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; 4834 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4796 4835
4797 if (features & NETIF_F_HW_VLAN_TX) 4836 if (features & NETIF_F_HW_VLAN_CTAG_TX)
4798 np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS; 4837 np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
4799 else 4838 else
4800 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; 4839 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
@@ -4831,7 +4870,7 @@ static int nv_set_features(struct net_device *dev, netdev_features_t features)
4831 spin_unlock_irq(&np->lock); 4870 spin_unlock_irq(&np->lock);
4832 } 4871 }
4833 4872
4834 if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)) 4873 if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))
4835 nv_vlan_mode(dev, features); 4874 nv_vlan_mode(dev, features);
4836 4875
4837 return 0; 4876 return 0;
@@ -5025,7 +5064,6 @@ static int nv_loopback_test(struct net_device *dev)
5025 pkt_len = ETH_DATA_LEN; 5064 pkt_len = ETH_DATA_LEN;
5026 tx_skb = netdev_alloc_skb(dev, pkt_len); 5065 tx_skb = netdev_alloc_skb(dev, pkt_len);
5027 if (!tx_skb) { 5066 if (!tx_skb) {
5028 netdev_err(dev, "netdev_alloc_skb() failed during loopback test\n");
5029 ret = 0; 5067 ret = 0;
5030 goto out; 5068 goto out;
5031 } 5069 }
@@ -5667,7 +5705,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5667 np->vlanctl_bits = 0; 5705 np->vlanctl_bits = 0;
5668 if (id->driver_data & DEV_HAS_VLAN) { 5706 if (id->driver_data & DEV_HAS_VLAN) {
5669 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 5707 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5670 dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; 5708 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
5709 NETIF_F_HW_VLAN_CTAG_TX;
5671 } 5710 }
5672 5711
5673 dev->features |= dev->hw_features; 5712 dev->features |= dev->hw_features;
@@ -5958,7 +5997,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5958 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5997 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5959 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? 5998 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
5960 "csum " : "", 5999 "csum " : "",
5961 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 6000 dev->features & (NETIF_F_HW_VLAN_CTAG_RX |
6001 NETIF_F_HW_VLAN_CTAG_TX) ?
5962 "vlan " : "", 6002 "vlan " : "",
5963 dev->features & (NETIF_F_LOOPBACK) ? 6003 dev->features & (NETIF_F_LOOPBACK) ?
5964 "loopback " : "", 6004 "loopback " : "",
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index efa29b712d5f..55a5548d6add 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1409,9 +1409,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1409 dma_alloc_coherent(&pldat->pdev->dev, 1409 dma_alloc_coherent(&pldat->pdev->dev,
1410 pldat->dma_buff_size, &dma_handle, 1410 pldat->dma_buff_size, &dma_handle,
1411 GFP_KERNEL); 1411 GFP_KERNEL);
1412
1413 if (pldat->dma_buff_base_v == NULL) { 1412 if (pldat->dma_buff_base_v == NULL) {
1414 dev_err(&pdev->dev, "error getting DMA region.\n");
1415 ret = -ENOMEM; 1413 ret = -ENOMEM;
1416 goto err_out_free_irq; 1414 goto err_out_free_irq;
1417 } 1415 }
@@ -1434,13 +1432,11 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1434 /* Get MAC address from current HW setting (POR state is all zeros) */ 1432 /* Get MAC address from current HW setting (POR state is all zeros) */
1435 __lpc_get_mac(pldat, ndev->dev_addr); 1433 __lpc_get_mac(pldat, ndev->dev_addr);
1436 1434
1437#ifdef CONFIG_OF_NET
1438 if (!is_valid_ether_addr(ndev->dev_addr)) { 1435 if (!is_valid_ether_addr(ndev->dev_addr)) {
1439 const char *macaddr = of_get_mac_address(pdev->dev.of_node); 1436 const char *macaddr = of_get_mac_address(pdev->dev.of_node);
1440 if (macaddr) 1437 if (macaddr)
1441 memcpy(ndev->dev_addr, macaddr, ETH_ALEN); 1438 memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
1442 } 1439 }
1443#endif
1444 if (!is_valid_ether_addr(ndev->dev_addr)) 1440 if (!is_valid_ether_addr(ndev->dev_addr))
1445 eth_hw_addr_random(ndev); 1441 eth_hw_addr_random(ndev);
1446 1442
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 73ce7dd6b954..0c1c65a9ce5e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1469,13 +1469,11 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1469 1469
1470 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; 1470 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1471 rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, 1471 rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
1472 &rx_ring->rx_buff_pool_logic, 1472 &rx_ring->rx_buff_pool_logic,
1473 GFP_KERNEL); 1473 GFP_KERNEL | __GFP_ZERO);
1474 if (!rx_ring->rx_buff_pool) { 1474 if (!rx_ring->rx_buff_pool)
1475 pr_err("Unable to allocate memory for the receive pool buffer\n");
1476 return -ENOMEM; 1475 return -ENOMEM;
1477 } 1476
1478 memset(rx_ring->rx_buff_pool, 0, size);
1479 rx_ring->rx_buff_pool_size = size; 1477 rx_ring->rx_buff_pool_size = size;
1480 for (i = 0; i < rx_ring->count; i++) { 1478 for (i = 0; i < rx_ring->count; i++) {
1481 buffer_info = &rx_ring->buffer_info[i]; 1479 buffer_info = &rx_ring->buffer_info[i];
@@ -1774,13 +1772,12 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1774 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); 1772 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1775 1773
1776 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 1774 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1777 &tx_ring->dma, GFP_KERNEL); 1775 &tx_ring->dma,
1776 GFP_KERNEL | __GFP_ZERO);
1778 if (!tx_ring->desc) { 1777 if (!tx_ring->desc) {
1779 vfree(tx_ring->buffer_info); 1778 vfree(tx_ring->buffer_info);
1780 pr_err("Unable to allocate memory for the transmit descriptor ring\n");
1781 return -ENOMEM; 1779 return -ENOMEM;
1782 } 1780 }
1783 memset(tx_ring->desc, 0, tx_ring->size);
1784 1781
1785 tx_ring->next_to_use = 0; 1782 tx_ring->next_to_use = 0;
1786 tx_ring->next_to_clean = 0; 1783 tx_ring->next_to_clean = 0;
@@ -1820,14 +1817,12 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1820 1817
1821 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); 1818 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1822 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1819 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1823 &rx_ring->dma, GFP_KERNEL); 1820 &rx_ring->dma,
1824 1821 GFP_KERNEL | __GFP_ZERO);
1825 if (!rx_ring->desc) { 1822 if (!rx_ring->desc) {
1826 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1827 vfree(rx_ring->buffer_info); 1823 vfree(rx_ring->buffer_info);
1828 return -ENOMEM; 1824 return -ENOMEM;
1829 } 1825 }
1830 memset(rx_ring->desc, 0, rx_ring->size);
1831 rx_ring->next_to_clean = 0; 1826 rx_ring->next_to_clean = 0;
1832 rx_ring->next_to_use = 0; 1827 rx_ring->next_to_use = 0;
1833 for (desNo = 0; desNo < rx_ring->count; desNo++) { 1828 for (desNo = 0; desNo < rx_ring->count; desNo++) {
@@ -2268,7 +2263,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2268 if (err) { 2263 if (err) {
2269 adapter->rx_buffer_len = old_rx_buffer_len; 2264 adapter->rx_buffer_len = old_rx_buffer_len;
2270 pch_gbe_up(adapter); 2265 pch_gbe_up(adapter);
2271 return -ENOMEM; 2266 return err;
2272 } else { 2267 } else {
2273 netdev->mtu = new_mtu; 2268 netdev->mtu = new_mtu;
2274 adapter->hw.mac.max_frame_size = max_frame; 2269 adapter->hw.mac.max_frame_size = max_frame;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index b1cfbb75ff1e..a5f0b5da6149 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -441,12 +441,11 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
441 441
442 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, 442 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
443 RX_RING_SIZE * sizeof(u64), 443 RX_RING_SIZE * sizeof(u64),
444 &ring->buf_dma, GFP_KERNEL); 444 &ring->buf_dma,
445 GFP_KERNEL | __GFP_ZERO);
445 if (!ring->buffers) 446 if (!ring->buffers)
446 goto out_ring_desc; 447 goto out_ring_desc;
447 448
448 memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
449
450 write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno), 449 write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno),
451 PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma)); 450 PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma));
452 451
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index a8669adecc97..0e1797295a48 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -35,6 +35,16 @@ config QLCNIC
35 This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet 35 This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
36 devices. 36 devices.
37 37
38config QLCNIC_SRIOV
39 bool "QLOGIC QLCNIC 83XX family SR-IOV Support"
40 depends on QLCNIC && PCI_IOV
41 default y
42 ---help---
43 This configuration parameter enables Single Root Input Output
44 Virtualization support for QLE83XX Converged Ethernet devices.
45 This allows for virtual function acceleration in virtualized
46 environments.
47
38config QLGE 48config QLGE
39 tristate "QLogic QLGE 10Gb Ethernet Driver Support" 49 tristate "QLogic QLGE 10Gb Ethernet Driver Support"
40 depends on PCI 50 depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index eb3dfdbb642b..322a36b76727 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -955,9 +955,10 @@ typedef struct nx_mac_list_s {
955 uint8_t mac_addr[ETH_ALEN+2]; 955 uint8_t mac_addr[ETH_ALEN+2];
956} nx_mac_list_t; 956} nx_mac_list_t;
957 957
958struct nx_vlan_ip_list { 958struct nx_ip_list {
959 struct list_head list; 959 struct list_head list;
960 __be32 ip_addr; 960 __be32 ip_addr;
961 bool master;
961}; 962};
962 963
963/* 964/*
@@ -1605,7 +1606,7 @@ struct netxen_adapter {
1605 struct net_device *netdev; 1606 struct net_device *netdev;
1606 struct pci_dev *pdev; 1607 struct pci_dev *pdev;
1607 struct list_head mac_list; 1608 struct list_head mac_list;
1608 struct list_head vlan_ip_list; 1609 struct list_head ip_list;
1609 1610
1610 spinlock_t tx_clean_lock; 1611 spinlock_t tx_clean_lock;
1611 1612
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 4782dcfde736..7692dfd4f262 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -27,6 +27,7 @@
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/if_vlan.h> 29#include <linux/if_vlan.h>
30#include <net/checksum.h>
30#include "netxen_nic.h" 31#include "netxen_nic.h"
31#include "netxen_nic_hw.h" 32#include "netxen_nic_hw.h"
32 33
@@ -1641,9 +1642,8 @@ netxen_process_lro(struct netxen_adapter *adapter,
1641 th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2)); 1642 th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2));
1642 1643
1643 length = (iph->ihl << 2) + (th->doff << 2) + lro_length; 1644 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1645 csum_replace2(&iph->check, iph->tot_len, htons(length));
1644 iph->tot_len = htons(length); 1646 iph->tot_len = htons(length);
1645 iph->check = 0;
1646 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1647 th->psh = push; 1647 th->psh = push;
1648 th->seq = htonl(seq_number); 1648 th->seq = htonl(seq_number);
1649 1649
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 501f49207da5..af951f343ff6 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -90,7 +90,7 @@ static irqreturn_t netxen_intr(int irq, void *data);
90static irqreturn_t netxen_msi_intr(int irq, void *data); 90static irqreturn_t netxen_msi_intr(int irq, void *data);
91static irqreturn_t netxen_msix_intr(int irq, void *data); 91static irqreturn_t netxen_msix_intr(int irq, void *data);
92 92
93static void netxen_free_vlan_ip_list(struct netxen_adapter *); 93static void netxen_free_ip_list(struct netxen_adapter *, bool);
94static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); 94static void netxen_restore_indev_addr(struct net_device *dev, unsigned long);
95static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, 95static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
96 struct rtnl_link_stats64 *stats); 96 struct rtnl_link_stats64 *stats);
@@ -1345,7 +1345,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
1345 } 1345 }
1346 1346
1347 if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX) 1347 if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
1348 netdev->hw_features |= NETIF_F_HW_VLAN_TX; 1348 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1349 1349
1350 if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO) 1350 if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
1351 netdev->hw_features |= NETIF_F_LRO; 1351 netdev->hw_features |= NETIF_F_LRO;
@@ -1450,7 +1450,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1450 1450
1451 spin_lock_init(&adapter->tx_clean_lock); 1451 spin_lock_init(&adapter->tx_clean_lock);
1452 INIT_LIST_HEAD(&adapter->mac_list); 1452 INIT_LIST_HEAD(&adapter->mac_list);
1453 INIT_LIST_HEAD(&adapter->vlan_ip_list); 1453 INIT_LIST_HEAD(&adapter->ip_list);
1454 1454
1455 err = netxen_setup_pci_map(adapter); 1455 err = netxen_setup_pci_map(adapter);
1456 if (err) 1456 if (err)
@@ -1585,7 +1585,7 @@ static void netxen_nic_remove(struct pci_dev *pdev)
1585 1585
1586 cancel_work_sync(&adapter->tx_timeout_task); 1586 cancel_work_sync(&adapter->tx_timeout_task);
1587 1587
1588 netxen_free_vlan_ip_list(adapter); 1588 netxen_free_ip_list(adapter, false);
1589 netxen_nic_detach(adapter); 1589 netxen_nic_detach(adapter);
1590 1590
1591 nx_decr_dev_ref_cnt(adapter); 1591 nx_decr_dev_ref_cnt(adapter);
@@ -3137,62 +3137,77 @@ netxen_destip_supported(struct netxen_adapter *adapter)
3137} 3137}
3138 3138
3139static void 3139static void
3140netxen_free_vlan_ip_list(struct netxen_adapter *adapter) 3140netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
3141{ 3141{
3142 struct nx_vlan_ip_list *cur; 3142 struct nx_ip_list *cur, *tmp_cur;
3143 struct list_head *head = &adapter->vlan_ip_list;
3144 3143
3145 while (!list_empty(head)) { 3144 list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) {
3146 cur = list_entry(head->next, struct nx_vlan_ip_list, list); 3145 if (master) {
3147 netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); 3146 if (cur->master) {
3148 list_del(&cur->list); 3147 netxen_config_ipaddr(adapter, cur->ip_addr,
3149 kfree(cur); 3148 NX_IP_DOWN);
3149 list_del(&cur->list);
3150 kfree(cur);
3151 }
3152 } else {
3153 netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
3154 list_del(&cur->list);
3155 kfree(cur);
3156 }
3150 } 3157 }
3151
3152} 3158}
3153static void 3159
3154netxen_list_config_vlan_ip(struct netxen_adapter *adapter, 3160static bool
3161netxen_list_config_ip(struct netxen_adapter *adapter,
3155 struct in_ifaddr *ifa, unsigned long event) 3162 struct in_ifaddr *ifa, unsigned long event)
3156{ 3163{
3157 struct net_device *dev; 3164 struct net_device *dev;
3158 struct nx_vlan_ip_list *cur, *tmp_cur; 3165 struct nx_ip_list *cur, *tmp_cur;
3159 struct list_head *head; 3166 struct list_head *head;
3167 bool ret = false;
3160 3168
3161 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; 3169 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
3162 3170
3163 if (dev == NULL) 3171 if (dev == NULL)
3164 return; 3172 goto out;
3165
3166 if (!is_vlan_dev(dev))
3167 return;
3168 3173
3169 switch (event) { 3174 switch (event) {
3170 case NX_IP_UP: 3175 case NX_IP_UP:
3171 list_for_each(head, &adapter->vlan_ip_list) { 3176 list_for_each(head, &adapter->ip_list) {
3172 cur = list_entry(head, struct nx_vlan_ip_list, list); 3177 cur = list_entry(head, struct nx_ip_list, list);
3173 3178
3174 if (cur->ip_addr == ifa->ifa_address) 3179 if (cur->ip_addr == ifa->ifa_address)
3175 return; 3180 goto out;
3176 } 3181 }
3177 3182
3178 cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC); 3183 cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC);
3179 if (cur == NULL) 3184 if (cur == NULL)
3180 return; 3185 goto out;
3181 3186 if (dev->priv_flags & IFF_802_1Q_VLAN)
3187 dev = vlan_dev_real_dev(dev);
3188 cur->master = !!netif_is_bond_master(dev);
3182 cur->ip_addr = ifa->ifa_address; 3189 cur->ip_addr = ifa->ifa_address;
3183 list_add_tail(&cur->list, &adapter->vlan_ip_list); 3190 list_add_tail(&cur->list, &adapter->ip_list);
3191 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
3192 ret = true;
3184 break; 3193 break;
3185 case NX_IP_DOWN: 3194 case NX_IP_DOWN:
3186 list_for_each_entry_safe(cur, tmp_cur, 3195 list_for_each_entry_safe(cur, tmp_cur,
3187 &adapter->vlan_ip_list, list) { 3196 &adapter->ip_list, list) {
3188 if (cur->ip_addr == ifa->ifa_address) { 3197 if (cur->ip_addr == ifa->ifa_address) {
3189 list_del(&cur->list); 3198 list_del(&cur->list);
3190 kfree(cur); 3199 kfree(cur);
3200 netxen_config_ipaddr(adapter, ifa->ifa_address,
3201 NX_IP_DOWN);
3202 ret = true;
3191 break; 3203 break;
3192 } 3204 }
3193 } 3205 }
3194 } 3206 }
3207out:
3208 return ret;
3195} 3209}
3210
3196static void 3211static void
3197netxen_config_indev_addr(struct netxen_adapter *adapter, 3212netxen_config_indev_addr(struct netxen_adapter *adapter,
3198 struct net_device *dev, unsigned long event) 3213 struct net_device *dev, unsigned long event)
@@ -3209,14 +3224,10 @@ netxen_config_indev_addr(struct netxen_adapter *adapter,
3209 for_ifa(indev) { 3224 for_ifa(indev) {
3210 switch (event) { 3225 switch (event) {
3211 case NETDEV_UP: 3226 case NETDEV_UP:
3212 netxen_config_ipaddr(adapter, 3227 netxen_list_config_ip(adapter, ifa, NX_IP_UP);
3213 ifa->ifa_address, NX_IP_UP);
3214 netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP);
3215 break; 3228 break;
3216 case NETDEV_DOWN: 3229 case NETDEV_DOWN:
3217 netxen_config_ipaddr(adapter, 3230 netxen_list_config_ip(adapter, ifa, NX_IP_DOWN);
3218 ifa->ifa_address, NX_IP_DOWN);
3219 netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
3220 break; 3231 break;
3221 default: 3232 default:
3222 break; 3233 break;
@@ -3231,23 +3242,78 @@ netxen_restore_indev_addr(struct net_device *netdev, unsigned long event)
3231 3242
3232{ 3243{
3233 struct netxen_adapter *adapter = netdev_priv(netdev); 3244 struct netxen_adapter *adapter = netdev_priv(netdev);
3234 struct nx_vlan_ip_list *pos, *tmp_pos; 3245 struct nx_ip_list *pos, *tmp_pos;
3235 unsigned long ip_event; 3246 unsigned long ip_event;
3236 3247
3237 ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; 3248 ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
3238 netxen_config_indev_addr(adapter, netdev, event); 3249 netxen_config_indev_addr(adapter, netdev, event);
3239 3250
3240 list_for_each_entry_safe(pos, tmp_pos, &adapter->vlan_ip_list, list) { 3251 list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) {
3241 netxen_config_ipaddr(adapter, pos->ip_addr, ip_event); 3252 netxen_config_ipaddr(adapter, pos->ip_addr, ip_event);
3242 } 3253 }
3243} 3254}
3244 3255
3256static inline bool
3257netxen_config_checkdev(struct net_device *dev)
3258{
3259 struct netxen_adapter *adapter;
3260
3261 if (!is_netxen_netdev(dev))
3262 return false;
3263 adapter = netdev_priv(dev);
3264 if (!adapter)
3265 return false;
3266 if (!netxen_destip_supported(adapter))
3267 return false;
3268 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
3269 return false;
3270
3271 return true;
3272}
3273
3274/**
3275 * netxen_config_master - configure addresses based on master
3276 * @dev: netxen device
3277 * @event: netdev event
3278 */
3279static void netxen_config_master(struct net_device *dev, unsigned long event)
3280{
3281 struct net_device *master, *slave;
3282 struct netxen_adapter *adapter = netdev_priv(dev);
3283
3284 rcu_read_lock();
3285 master = netdev_master_upper_dev_get_rcu(dev);
3286 /*
3287 * This is the case where the netxen nic is being
3288 * enslaved and is dev_open()ed in bond_enslave()
3289 * Now we should program the bond's (and its vlans')
3290 * addresses in the netxen NIC.
3291 */
3292 if (master && netif_is_bond_master(master) &&
3293 !netif_is_bond_slave(dev)) {
3294 netxen_config_indev_addr(adapter, master, event);
3295 for_each_netdev_rcu(&init_net, slave)
3296 if (slave->priv_flags & IFF_802_1Q_VLAN &&
3297 vlan_dev_real_dev(slave) == master)
3298 netxen_config_indev_addr(adapter, slave, event);
3299 }
3300 rcu_read_unlock();
3301 /*
3302 * This is the case where the netxen nic is being
3303 * released and is dev_close()ed in bond_release()
3304 * just before IFF_BONDING is stripped.
3305 */
3306 if (!master && dev->priv_flags & IFF_BONDING)
3307 netxen_free_ip_list(adapter, true);
3308}
3309
3245static int netxen_netdev_event(struct notifier_block *this, 3310static int netxen_netdev_event(struct notifier_block *this,
3246 unsigned long event, void *ptr) 3311 unsigned long event, void *ptr)
3247{ 3312{
3248 struct netxen_adapter *adapter; 3313 struct netxen_adapter *adapter;
3249 struct net_device *dev = (struct net_device *)ptr; 3314 struct net_device *dev = (struct net_device *)ptr;
3250 struct net_device *orig_dev = dev; 3315 struct net_device *orig_dev = dev;
3316 struct net_device *slave;
3251 3317
3252recheck: 3318recheck:
3253 if (dev == NULL) 3319 if (dev == NULL)
@@ -3257,19 +3323,28 @@ recheck:
3257 dev = vlan_dev_real_dev(dev); 3323 dev = vlan_dev_real_dev(dev);
3258 goto recheck; 3324 goto recheck;
3259 } 3325 }
3260 3326 if (event == NETDEV_UP || event == NETDEV_DOWN) {
3261 if (!is_netxen_netdev(dev)) 3327 /* If this is a bonding device, look for netxen-based slaves*/
3262 goto done; 3328 if (netif_is_bond_master(dev)) {
3263 3329 rcu_read_lock();
3264 adapter = netdev_priv(dev); 3330 for_each_netdev_in_bond_rcu(dev, slave) {
3265 3331 if (!netxen_config_checkdev(slave))
3266 if (!adapter) 3332 continue;
3267 goto done; 3333 adapter = netdev_priv(slave);
3268 3334 netxen_config_indev_addr(adapter,
3269 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 3335 orig_dev, event);
3270 goto done; 3336 }
3271 3337 rcu_read_unlock();
3272 netxen_config_indev_addr(adapter, orig_dev, event); 3338 } else {
3339 if (!netxen_config_checkdev(dev))
3340 goto done;
3341 adapter = netdev_priv(dev);
3342 /* Act only if the actual netxen is the target */
3343 if (orig_dev == dev)
3344 netxen_config_master(dev, event);
3345 netxen_config_indev_addr(adapter, orig_dev, event);
3346 }
3347 }
3273done: 3348done:
3274 return NOTIFY_DONE; 3349 return NOTIFY_DONE;
3275} 3350}
@@ -3279,12 +3354,12 @@ netxen_inetaddr_event(struct notifier_block *this,
3279 unsigned long event, void *ptr) 3354 unsigned long event, void *ptr)
3280{ 3355{
3281 struct netxen_adapter *adapter; 3356 struct netxen_adapter *adapter;
3282 struct net_device *dev; 3357 struct net_device *dev, *slave;
3283
3284 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 3358 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3359 unsigned long ip_event;
3285 3360
3286 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; 3361 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
3287 3362 ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
3288recheck: 3363recheck:
3289 if (dev == NULL) 3364 if (dev == NULL)
3290 goto done; 3365 goto done;
@@ -3293,31 +3368,24 @@ recheck:
3293 dev = vlan_dev_real_dev(dev); 3368 dev = vlan_dev_real_dev(dev);
3294 goto recheck; 3369 goto recheck;
3295 } 3370 }
3296 3371 if (event == NETDEV_UP || event == NETDEV_DOWN) {
3297 if (!is_netxen_netdev(dev)) 3372 /* If this is a bonding device, look for netxen-based slaves*/
3298 goto done; 3373 if (netif_is_bond_master(dev)) {
3299 3374 rcu_read_lock();
3300 adapter = netdev_priv(dev); 3375 for_each_netdev_in_bond_rcu(dev, slave) {
3301 3376 if (!netxen_config_checkdev(slave))
3302 if (!adapter || !netxen_destip_supported(adapter)) 3377 continue;
3303 goto done; 3378 adapter = netdev_priv(slave);
3304 3379 netxen_list_config_ip(adapter, ifa, ip_event);
3305 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 3380 }
3306 goto done; 3381 rcu_read_unlock();
3307 3382 } else {
3308 switch (event) { 3383 if (!netxen_config_checkdev(dev))
3309 case NETDEV_UP: 3384 goto done;
3310 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP); 3385 adapter = netdev_priv(dev);
3311 netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP); 3386 netxen_list_config_ip(adapter, ifa, ip_event);
3312 break; 3387 }
3313 case NETDEV_DOWN:
3314 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN);
3315 netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
3316 break;
3317 default:
3318 break;
3319 } 3388 }
3320
3321done: 3389done:
3322 return NOTIFY_DONE; 3390 return NOTIFY_DONE;
3323} 3391}
@@ -3334,7 +3402,7 @@ static void
3334netxen_restore_indev_addr(struct net_device *dev, unsigned long event) 3402netxen_restore_indev_addr(struct net_device *dev, unsigned long event)
3335{ } 3403{ }
3336static void 3404static void
3337netxen_free_vlan_ip_list(struct netxen_adapter *adapter) 3405netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
3338{ } 3406{ }
3339#endif 3407#endif
3340 3408
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 8fd38cb6d26a..91a8fcd6c246 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -312,7 +312,6 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
312 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 312 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
313 qdev->lrg_buffer_len); 313 qdev->lrg_buffer_len);
314 if (unlikely(!lrg_buf_cb->skb)) { 314 if (unlikely(!lrg_buf_cb->skb)) {
315 netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
316 qdev->lrg_buf_skb_check++; 315 qdev->lrg_buf_skb_check++;
317 } else { 316 } else {
318 /* 317 /*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index 7722a203e388..4b1fb3faa3b7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -8,4 +8,6 @@ qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
8 qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \ 8 qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
9 qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \ 9 qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \
10 qlcnic_83xx_init.o qlcnic_83xx_vnic.o \ 10 qlcnic_83xx_init.o qlcnic_83xx_vnic.o \
11 qlcnic_minidump.o 11 qlcnic_minidump.o qlcnic_sriov_common.o
12
13qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index ba3c72fce1f2..90c253b145ef 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,9 +37,9 @@
37#include "qlcnic_83xx_hw.h" 37#include "qlcnic_83xx_hw.h"
38 38
39#define _QLCNIC_LINUX_MAJOR 5 39#define _QLCNIC_LINUX_MAJOR 5
40#define _QLCNIC_LINUX_MINOR 1 40#define _QLCNIC_LINUX_MINOR 2
41#define _QLCNIC_LINUX_SUBVERSION 35 41#define _QLCNIC_LINUX_SUBVERSION 42
42#define QLCNIC_LINUX_VERSIONID "5.1.35" 42#define QLCNIC_LINUX_VERSIONID "5.2.42"
43#define QLCNIC_DRV_IDC_VER 0x01 43#define QLCNIC_DRV_IDC_VER 0x01
44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -347,8 +347,14 @@ struct qlcnic_rx_buffer {
347 * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is 347 * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
348 * adjusted based on configured MTU. 348 * adjusted based on configured MTU.
349 */ 349 */
350#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3 350#define QLCNIC_INTR_COAL_TYPE_RX 1
351#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256 351#define QLCNIC_INTR_COAL_TYPE_TX 2
352
353#define QLCNIC_DEF_INTR_COALESCE_RX_TIME_US 3
354#define QLCNIC_DEF_INTR_COALESCE_RX_PACKETS 256
355
356#define QLCNIC_DEF_INTR_COALESCE_TX_TIME_US 64
357#define QLCNIC_DEF_INTR_COALESCE_TX_PACKETS 64
352 358
353#define QLCNIC_INTR_DEFAULT 0x04 359#define QLCNIC_INTR_DEFAULT 0x04
354#define QLCNIC_CONFIG_INTR_COALESCE 3 360#define QLCNIC_CONFIG_INTR_COALESCE 3
@@ -359,6 +365,8 @@ struct qlcnic_nic_intr_coalesce {
359 u8 sts_ring_mask; 365 u8 sts_ring_mask;
360 u16 rx_packets; 366 u16 rx_packets;
361 u16 rx_time_us; 367 u16 rx_time_us;
368 u16 tx_packets;
369 u16 tx_time_us;
362 u16 flag; 370 u16 flag;
363 u32 timer_out; 371 u32 timer_out;
364}; 372};
@@ -449,6 +457,7 @@ struct qlcnic_hardware_context {
449 struct qlc_83xx_idc idc; 457 struct qlc_83xx_idc idc;
450 struct qlc_83xx_fw_info fw_info; 458 struct qlc_83xx_fw_info fw_info;
451 struct qlcnic_intrpt_config *intr_tbl; 459 struct qlcnic_intrpt_config *intr_tbl;
460 struct qlcnic_sriov *sriov;
452 u32 *reg_tbl; 461 u32 *reg_tbl;
453 u32 *ext_reg_tbl; 462 u32 *ext_reg_tbl;
454 u32 mbox_aen[QLC_83XX_MBX_AEN_CNT]; 463 u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
@@ -510,13 +519,13 @@ struct qlcnic_host_sds_ring {
510 int irq; 519 int irq;
511 520
512 dma_addr_t phys_addr; 521 dma_addr_t phys_addr;
513 char name[IFNAMSIZ+4]; 522 char name[IFNAMSIZ + 12];
514} ____cacheline_internodealigned_in_smp; 523} ____cacheline_internodealigned_in_smp;
515 524
516struct qlcnic_host_tx_ring { 525struct qlcnic_host_tx_ring {
517 int irq; 526 int irq;
518 void __iomem *crb_intr_mask; 527 void __iomem *crb_intr_mask;
519 char name[IFNAMSIZ+4]; 528 char name[IFNAMSIZ + 12];
520 u16 ctx_id; 529 u16 ctx_id;
521 u32 producer; 530 u32 producer;
522 u32 sw_consumer; 531 u32 sw_consumer;
@@ -896,6 +905,7 @@ struct qlcnic_ipaddr {
896#define QLCNIC_FW_RESET_OWNER 0x2000 905#define QLCNIC_FW_RESET_OWNER 0x2000
897#define QLCNIC_FW_HANG 0x4000 906#define QLCNIC_FW_HANG 0x4000
898#define QLCNIC_FW_LRO_MSS_CAP 0x8000 907#define QLCNIC_FW_LRO_MSS_CAP 0x8000
908#define QLCNIC_TX_INTR_SHARED 0x10000
899#define QLCNIC_IS_MSI_FAMILY(adapter) \ 909#define QLCNIC_IS_MSI_FAMILY(adapter) \
900 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 910 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
901 911
@@ -914,7 +924,10 @@ struct qlcnic_ipaddr {
914#define __QLCNIC_AER 5 924#define __QLCNIC_AER 5
915#define __QLCNIC_DIAG_RES_ALLOC 6 925#define __QLCNIC_DIAG_RES_ALLOC 6
916#define __QLCNIC_LED_ENABLE 7 926#define __QLCNIC_LED_ENABLE 7
917#define __QLCNIC_ELB_INPROGRESS 8 927#define __QLCNIC_ELB_INPROGRESS 8
928#define __QLCNIC_SRIOV_ENABLE 10
929#define __QLCNIC_SRIOV_CAPABLE 11
930#define __QLCNIC_MBX_POLL_ENABLE 12
918 931
919#define QLCNIC_INTERRUPT_TEST 1 932#define QLCNIC_INTERRUPT_TEST 1
920#define QLCNIC_LOOPBACK_TEST 2 933#define QLCNIC_LOOPBACK_TEST 2
@@ -935,7 +948,7 @@ struct qlcnic_ipaddr {
935struct qlcnic_filter { 948struct qlcnic_filter {
936 struct hlist_node fnode; 949 struct hlist_node fnode;
937 u8 faddr[ETH_ALEN]; 950 u8 faddr[ETH_ALEN];
938 __le16 vlan_id; 951 u16 vlan_id;
939 unsigned long ftime; 952 unsigned long ftime;
940}; 953};
941 954
@@ -972,9 +985,11 @@ struct qlcnic_adapter {
972 u8 fw_fail_cnt; 985 u8 fw_fail_cnt;
973 u8 tx_timeo_cnt; 986 u8 tx_timeo_cnt;
974 u8 need_fw_reset; 987 u8 need_fw_reset;
988 u8 reset_ctx_cnt;
975 989
976 u16 is_up; 990 u16 is_up;
977 u16 pvid; 991 u16 rx_pvid;
992 u16 tx_pvid;
978 993
979 u32 irq; 994 u32 irq;
980 u32 heartbeat; 995 u32 heartbeat;
@@ -1006,9 +1021,11 @@ struct qlcnic_adapter {
1006 struct workqueue_struct *qlcnic_wq; 1021 struct workqueue_struct *qlcnic_wq;
1007 struct delayed_work fw_work; 1022 struct delayed_work fw_work;
1008 struct delayed_work idc_aen_work; 1023 struct delayed_work idc_aen_work;
1024 struct delayed_work mbx_poll_work;
1009 1025
1010 struct qlcnic_filter_hash fhash; 1026 struct qlcnic_filter_hash fhash;
1011 struct qlcnic_filter_hash rx_fhash; 1027 struct qlcnic_filter_hash rx_fhash;
1028 struct list_head vf_mc_list;
1012 1029
1013 spinlock_t tx_clean_lock; 1030 spinlock_t tx_clean_lock;
1014 spinlock_t mac_learn_lock; 1031 spinlock_t mac_learn_lock;
@@ -1051,7 +1068,11 @@ struct qlcnic_info_le {
1051 u8 total_pf; 1068 u8 total_pf;
1052 u8 total_rss_engines; 1069 u8 total_rss_engines;
1053 __le16 max_vports; 1070 __le16 max_vports;
1054 u8 reserved2[64]; 1071 __le16 linkstate_reg_offset;
1072 __le16 bit_offsets;
1073 __le16 max_local_ipv6_addrs;
1074 __le16 max_remote_ipv6_addrs;
1075 u8 reserved2[56];
1055} __packed; 1076} __packed;
1056 1077
1057struct qlcnic_info { 1078struct qlcnic_info {
@@ -1083,6 +1104,10 @@ struct qlcnic_info {
1083 u8 total_pf; 1104 u8 total_pf;
1084 u8 total_rss_engines; 1105 u8 total_rss_engines;
1085 u16 max_vports; 1106 u16 max_vports;
1107 u16 linkstate_reg_offset;
1108 u16 bit_offsets;
1109 u16 max_local_ipv6_addrs;
1110 u16 max_remote_ipv6_addrs;
1086}; 1111};
1087 1112
1088struct qlcnic_pci_info_le { 1113struct qlcnic_pci_info_le {
@@ -1348,6 +1373,7 @@ struct _cdrp_cmd {
1348struct qlcnic_cmd_args { 1373struct qlcnic_cmd_args {
1349 struct _cdrp_cmd req; 1374 struct _cdrp_cmd req;
1350 struct _cdrp_cmd rsp; 1375 struct _cdrp_cmd rsp;
1376 int op_type;
1351}; 1377};
1352 1378
1353int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter); 1379int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
@@ -1430,9 +1456,10 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1430 struct qlcnic_host_rds_ring *rds_ring, u8 ring_id); 1456 struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
1431int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max); 1457int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
1432void qlcnic_set_multi(struct net_device *netdev); 1458void qlcnic_set_multi(struct net_device *netdev);
1433int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *); 1459void __qlcnic_set_multi(struct net_device *, u16);
1460int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16);
1434int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *); 1461int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
1435void qlcnic_free_mac_list(struct qlcnic_adapter *adapter); 1462void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
1436 1463
1437int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); 1464int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
1438int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *); 1465int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *);
@@ -1455,7 +1482,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
1455int qlcnic_diag_alloc_res(struct net_device *netdev, int test); 1482int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
1456netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 1483netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1457int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t); 1484int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t);
1458int qlcnic_validate_max_rss(u8, u8); 1485int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
1459void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); 1486void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
1460int qlcnic_enable_msix(struct qlcnic_adapter *, u32); 1487int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
1461 1488
@@ -1509,8 +1536,13 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *);
1509int qlcnic_set_default_offload_settings(struct qlcnic_adapter *); 1536int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
1510int qlcnic_reset_npar_config(struct qlcnic_adapter *); 1537int qlcnic_reset_npar_config(struct qlcnic_adapter *);
1511int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *); 1538int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
1512void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, 1539void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16);
1513 __le16); 1540int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
1541int qlcnic_read_mac_addr(struct qlcnic_adapter *);
1542int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
1543void qlcnic_sriov_vf_schedule_multi(struct net_device *);
1544void qlcnic_vf_add_mc_list(struct net_device *, u16);
1545
1514/* 1546/*
1515 * QLOGIC Board information 1547 * QLOGIC Board information
1516 */ 1548 */
@@ -1567,11 +1599,14 @@ struct qlcnic_hardware_ops {
1567 int (*create_rx_ctx) (struct qlcnic_adapter *); 1599 int (*create_rx_ctx) (struct qlcnic_adapter *);
1568 int (*create_tx_ctx) (struct qlcnic_adapter *, 1600 int (*create_tx_ctx) (struct qlcnic_adapter *,
1569 struct qlcnic_host_tx_ring *, int); 1601 struct qlcnic_host_tx_ring *, int);
1602 void (*del_rx_ctx) (struct qlcnic_adapter *);
1603 void (*del_tx_ctx) (struct qlcnic_adapter *,
1604 struct qlcnic_host_tx_ring *);
1570 int (*setup_link_event) (struct qlcnic_adapter *, int); 1605 int (*setup_link_event) (struct qlcnic_adapter *, int);
1571 int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8); 1606 int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8);
1572 int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *); 1607 int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *);
1573 int (*set_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *); 1608 int (*set_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *);
1574 int (*change_macvlan) (struct qlcnic_adapter *, u8*, __le16, u8); 1609 int (*change_macvlan) (struct qlcnic_adapter *, u8*, u16, u8);
1575 void (*napi_enable) (struct qlcnic_adapter *); 1610 void (*napi_enable) (struct qlcnic_adapter *);
1576 void (*napi_disable) (struct qlcnic_adapter *); 1611 void (*napi_disable) (struct qlcnic_adapter *);
1577 void (*config_intr_coal) (struct qlcnic_adapter *); 1612 void (*config_intr_coal) (struct qlcnic_adapter *);
@@ -1580,8 +1615,9 @@ struct qlcnic_hardware_ops {
1580 int (*config_loopback) (struct qlcnic_adapter *, u8); 1615 int (*config_loopback) (struct qlcnic_adapter *, u8);
1581 int (*clear_loopback) (struct qlcnic_adapter *, u8); 1616 int (*clear_loopback) (struct qlcnic_adapter *, u8);
1582 int (*config_promisc_mode) (struct qlcnic_adapter *, u32); 1617 int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
1583 void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, __le16); 1618 void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
1584 int (*get_board_info) (struct qlcnic_adapter *); 1619 int (*get_board_info) (struct qlcnic_adapter *);
1620 void (*free_mac_list) (struct qlcnic_adapter *);
1585}; 1621};
1586 1622
1587extern struct qlcnic_nic_template qlcnic_vf_ops; 1623extern struct qlcnic_nic_template qlcnic_vf_ops;
@@ -1635,7 +1671,10 @@ static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
1635static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter, 1671static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
1636 struct qlcnic_cmd_args *cmd) 1672 struct qlcnic_cmd_args *cmd)
1637{ 1673{
1638 return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd); 1674 if (adapter->ahw->hw_ops->mbx_cmd)
1675 return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd);
1676
1677 return -EIO;
1639} 1678}
1640 1679
1641static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter) 1680static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter)
@@ -1655,12 +1694,14 @@ static inline void qlcnic_api_unlock(struct qlcnic_adapter *adapter)
1655 1694
1656static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter) 1695static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter)
1657{ 1696{
1658 adapter->ahw->hw_ops->add_sysfs(adapter); 1697 if (adapter->ahw->hw_ops->add_sysfs)
1698 adapter->ahw->hw_ops->add_sysfs(adapter);
1659} 1699}
1660 1700
1661static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter) 1701static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter)
1662{ 1702{
1663 adapter->ahw->hw_ops->remove_sysfs(adapter); 1703 if (adapter->ahw->hw_ops->remove_sysfs)
1704 adapter->ahw->hw_ops->remove_sysfs(adapter);
1664} 1705}
1665 1706
1666static inline void 1707static inline void
@@ -1681,6 +1722,17 @@ static inline int qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
1681 return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring); 1722 return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring);
1682} 1723}
1683 1724
1725static inline void qlcnic_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
1726{
1727 return adapter->ahw->hw_ops->del_rx_ctx(adapter);
1728}
1729
1730static inline void qlcnic_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
1731 struct qlcnic_host_tx_ring *ptr)
1732{
1733 return adapter->ahw->hw_ops->del_tx_ctx(adapter, ptr);
1734}
1735
1684static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, 1736static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter,
1685 int enable) 1737 int enable)
1686{ 1738{
@@ -1706,7 +1758,7 @@ static inline int qlcnic_set_nic_info(struct qlcnic_adapter *adapter,
1706} 1758}
1707 1759
1708static inline int qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, 1760static inline int qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter,
1709 u8 *addr, __le16 id, u8 cmd) 1761 u8 *addr, u16 id, u8 cmd)
1710{ 1762{
1711 return adapter->ahw->hw_ops->change_macvlan(adapter, addr, id, cmd); 1763 return adapter->ahw->hw_ops->change_macvlan(adapter, addr, id, cmd);
1712} 1764}
@@ -1765,7 +1817,7 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
1765} 1817}
1766 1818
1767static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter, 1819static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
1768 u64 *addr, __le16 id) 1820 u64 *addr, u16 id)
1769{ 1821{
1770 adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id); 1822 adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
1771} 1823}
@@ -1775,15 +1827,22 @@ static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1775 return adapter->ahw->hw_ops->get_board_info(adapter); 1827 return adapter->ahw->hw_ops->get_board_info(adapter);
1776} 1828}
1777 1829
1830static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
1831{
1832 return adapter->ahw->hw_ops->free_mac_list(adapter);
1833}
1834
1778static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, 1835static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
1779 u32 key) 1836 u32 key)
1780{ 1837{
1781 adapter->nic_ops->request_reset(adapter, key); 1838 if (adapter->nic_ops->request_reset)
1839 adapter->nic_ops->request_reset(adapter, key);
1782} 1840}
1783 1841
1784static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter) 1842static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter)
1785{ 1843{
1786 adapter->nic_ops->cancel_idc_work(adapter); 1844 if (adapter->nic_ops->cancel_idc_work)
1845 adapter->nic_ops->cancel_idc_work(adapter);
1787} 1846}
1788 1847
1789static inline irqreturn_t 1848static inline irqreturn_t
@@ -1819,6 +1878,7 @@ static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
1819 writel(0xfbff, adapter->tgt_mask_reg); 1878 writel(0xfbff, adapter->tgt_mask_reg);
1820} 1879}
1821 1880
1881extern const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops;
1822extern const struct ethtool_ops qlcnic_ethtool_ops; 1882extern const struct ethtool_ops qlcnic_ethtool_ops;
1823extern const struct ethtool_ops qlcnic_ethtool_failed_ops; 1883extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
1824 1884
@@ -1830,7 +1890,9 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
1830 } while (0) 1890 } while (0)
1831 1891
1832#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030 1892#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
1893#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
1833#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 1894#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
1895
1834static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter) 1896static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
1835{ 1897{
1836 unsigned short device = adapter->pdev->device; 1898 unsigned short device = adapter->pdev->device;
@@ -1840,8 +1902,23 @@ static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
1840static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter) 1902static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
1841{ 1903{
1842 unsigned short device = adapter->pdev->device; 1904 unsigned short device = adapter->pdev->device;
1843 return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false; 1905 bool status;
1906
1907 status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
1908 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
1909
1910 return status;
1844} 1911}
1845 1912
1913static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter)
1914{
1915 return (adapter->ahw->op_mode == QLCNIC_SRIOV_PF_FUNC) ? true : false;
1916}
1846 1917
1918static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
1919{
1920 unsigned short device = adapter->pdev->device;
1921
1922 return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
1923}
1847#endif /* __QLCNIC_H_ */ 1924#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index edd63f1230f3..ea790a93ee7c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include "qlcnic.h" 8#include "qlcnic.h"
9#include "qlcnic_sriov.h"
9#include <linux/if_vlan.h> 10#include <linux/if_vlan.h>
10#include <linux/ipv6.h> 11#include <linux/ipv6.h>
11#include <linux/ethtool.h> 12#include <linux/ethtool.h>
@@ -13,100 +14,7 @@
13 14
14#define QLCNIC_MAX_TX_QUEUES 1 15#define QLCNIC_MAX_TX_QUEUES 1
15#define RSS_HASHTYPE_IP_TCP 0x3 16#define RSS_HASHTYPE_IP_TCP 0x3
16 17#define QLC_83XX_FW_MBX_CMD 0
17/* status descriptor mailbox data
18 * @phy_addr: physical address of buffer
19 * @sds_ring_size: buffer size
20 * @intrpt_id: interrupt id
21 * @intrpt_val: source of interrupt
22 */
23struct qlcnic_sds_mbx {
24 u64 phy_addr;
25 u8 rsvd1[16];
26 u16 sds_ring_size;
27 u16 rsvd2[3];
28 u16 intrpt_id;
29 u8 intrpt_val;
30 u8 rsvd3[5];
31} __packed;
32
33/* receive descriptor buffer data
34 * phy_addr_reg: physical address of regular buffer
35 * phy_addr_jmb: physical address of jumbo buffer
36 * reg_ring_sz: size of regular buffer
37 * reg_ring_len: no. of entries in regular buffer
38 * jmb_ring_len: no. of entries in jumbo buffer
39 * jmb_ring_sz: size of jumbo buffer
40 */
41struct qlcnic_rds_mbx {
42 u64 phy_addr_reg;
43 u64 phy_addr_jmb;
44 u16 reg_ring_sz;
45 u16 reg_ring_len;
46 u16 jmb_ring_sz;
47 u16 jmb_ring_len;
48} __packed;
49
50/* host producers for regular and jumbo rings */
51struct __host_producer_mbx {
52 u32 reg_buf;
53 u32 jmb_buf;
54} __packed;
55
56/* Receive context mailbox data outbox registers
57 * @state: state of the context
58 * @vport_id: virtual port id
59 * @context_id: receive context id
60 * @num_pci_func: number of pci functions of the port
61 * @phy_port: physical port id
62 */
63struct qlcnic_rcv_mbx_out {
64 u8 rcv_num;
65 u8 sts_num;
66 u16 ctx_id;
67 u8 state;
68 u8 num_pci_func;
69 u8 phy_port;
70 u8 vport_id;
71 u32 host_csmr[QLCNIC_MAX_RING_SETS];
72 struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
73} __packed;
74
75struct qlcnic_add_rings_mbx_out {
76 u8 rcv_num;
77 u8 sts_num;
78 u16 ctx_id;
79 u32 host_csmr[QLCNIC_MAX_RING_SETS];
80 struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
81} __packed;
82
83/* Transmit context mailbox inbox registers
84 * @phys_addr: DMA address of the transmit buffer
85 * @cnsmr_index: host consumer index
86 * @size: legth of transmit buffer ring
87 * @intr_id: interrput id
88 * @src: src of interrupt
89 */
90struct qlcnic_tx_mbx {
91 u64 phys_addr;
92 u64 cnsmr_index;
93 u16 size;
94 u16 intr_id;
95 u8 src;
96 u8 rsvd[3];
97} __packed;
98
99/* Transmit context mailbox outbox registers
100 * @host_prod: host producer index
101 * @ctx_id: transmit context id
102 * @state: state of the transmit context
103 */
104struct qlcnic_tx_mbx_out {
105 u32 host_prod;
106 u16 ctx_id;
107 u8 state;
108 u8 rsvd;
109} __packed;
110 18
111static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { 19static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
112 {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1}, 20 {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -156,9 +64,11 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
156 {QLCNIC_CMD_SET_LED_CONFIG, 5, 1}, 64 {QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
157 {QLCNIC_CMD_GET_LED_CONFIG, 1, 5}, 65 {QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
158 {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26}, 66 {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
67 {QLCNIC_CMD_CONFIG_VPORT, 4, 4},
68 {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
159}; 69};
160 70
161static const u32 qlcnic_83xx_ext_reg_tbl[] = { 71const u32 qlcnic_83xx_ext_reg_tbl[] = {
162 0x38CC, /* Global Reset */ 72 0x38CC, /* Global Reset */
163 0x38F0, /* Wildcard */ 73 0x38F0, /* Wildcard */
164 0x38FC, /* Informant */ 74 0x38FC, /* Informant */
@@ -204,7 +114,7 @@ static const u32 qlcnic_83xx_ext_reg_tbl[] = {
204 0x34A4, /* QLC_83XX_ASIC_TEMP */ 114 0x34A4, /* QLC_83XX_ASIC_TEMP */
205}; 115};
206 116
207static const u32 qlcnic_83xx_reg_tbl[] = { 117const u32 qlcnic_83xx_reg_tbl[] = {
208 0x34A8, /* PEG_HALT_STAT1 */ 118 0x34A8, /* PEG_HALT_STAT1 */
209 0x34AC, /* PEG_HALT_STAT2 */ 119 0x34AC, /* PEG_HALT_STAT2 */
210 0x34B0, /* FW_HEARTBEAT */ 120 0x34B0, /* FW_HEARTBEAT */
@@ -247,6 +157,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
247 .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag, 157 .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
248 .create_rx_ctx = qlcnic_83xx_create_rx_ctx, 158 .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
249 .create_tx_ctx = qlcnic_83xx_create_tx_ctx, 159 .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
160 .del_rx_ctx = qlcnic_83xx_del_rx_ctx,
161 .del_tx_ctx = qlcnic_83xx_del_tx_ctx,
250 .setup_link_event = qlcnic_83xx_setup_link_event, 162 .setup_link_event = qlcnic_83xx_setup_link_event,
251 .get_nic_info = qlcnic_83xx_get_nic_info, 163 .get_nic_info = qlcnic_83xx_get_nic_info,
252 .get_pci_info = qlcnic_83xx_get_pci_info, 164 .get_pci_info = qlcnic_83xx_get_pci_info,
@@ -260,6 +172,7 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
260 .config_promisc_mode = qlcnic_83xx_nic_set_promisc, 172 .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
261 .change_l2_filter = qlcnic_83xx_change_l2_filter, 173 .change_l2_filter = qlcnic_83xx_change_l2_filter,
262 .get_board_info = qlcnic_83xx_get_port_info, 174 .get_board_info = qlcnic_83xx_get_port_info,
175 .free_mac_list = qlcnic_82xx_free_mac_list,
263}; 176};
264 177
265static struct qlcnic_nic_template qlcnic_83xx_ops = { 178static struct qlcnic_nic_template qlcnic_83xx_ops = {
@@ -355,14 +268,20 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
355 num_intr)); 268 num_intr));
356 /* account for AEN interrupt MSI-X based interrupts */ 269 /* account for AEN interrupt MSI-X based interrupts */
357 num_msix += 1; 270 num_msix += 1;
358 num_msix += adapter->max_drv_tx_rings; 271
272 if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
273 num_msix += adapter->max_drv_tx_rings;
274
359 err = qlcnic_enable_msix(adapter, num_msix); 275 err = qlcnic_enable_msix(adapter, num_msix);
360 if (err == -ENOMEM) 276 if (err == -ENOMEM)
361 return err; 277 return err;
362 if (adapter->flags & QLCNIC_MSIX_ENABLED) 278 if (adapter->flags & QLCNIC_MSIX_ENABLED)
363 num_msix = adapter->ahw->num_msix; 279 num_msix = adapter->ahw->num_msix;
364 else 280 else {
281 if (qlcnic_sriov_vf_check(adapter))
282 return -EINVAL;
365 num_msix = 1; 283 num_msix = 1;
284 }
366 /* setup interrupt mapping table for fw */ 285 /* setup interrupt mapping table for fw */
367 ahw->intr_tbl = vzalloc(num_msix * 286 ahw->intr_tbl = vzalloc(num_msix *
368 sizeof(struct qlcnic_intrpt_config)); 287 sizeof(struct qlcnic_intrpt_config));
@@ -421,12 +340,13 @@ inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
421 writel(0, adapter->ahw->pci_base0 + mask); 340 writel(0, adapter->ahw->pci_base0 + mask);
422} 341}
423 342
424inline void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter) 343void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter)
425{ 344{
426 u32 mask; 345 u32 mask;
427 346
428 mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); 347 mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
429 writel(1, adapter->ahw->pci_base0 + mask); 348 writel(1, adapter->ahw->pci_base0 + mask);
349 QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, 0);
430} 350}
431 351
432static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter, 352static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
@@ -482,7 +402,8 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
482 402
483 event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); 403 event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
484 if (event & QLCNIC_MBX_ASYNC_EVENT) 404 if (event & QLCNIC_MBX_ASYNC_EVENT)
485 qlcnic_83xx_process_aen(adapter); 405 __qlcnic_83xx_process_aen(adapter);
406
486out: 407out:
487 qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); 408 qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
488 spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); 409 spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
@@ -535,17 +456,15 @@ done:
535 456
536void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter) 457void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter)
537{ 458{
538 u32 val = 0, num_msix = adapter->ahw->num_msix - 1; 459 u32 num_msix;
460
461 qlcnic_83xx_disable_mbx_intr(adapter);
539 462
540 if (adapter->flags & QLCNIC_MSIX_ENABLED) 463 if (adapter->flags & QLCNIC_MSIX_ENABLED)
541 num_msix = adapter->ahw->num_msix - 1; 464 num_msix = adapter->ahw->num_msix - 1;
542 else 465 else
543 num_msix = 0; 466 num_msix = 0;
544 467
545 QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val);
546
547 qlcnic_83xx_disable_mbx_intr(adapter);
548
549 msleep(20); 468 msleep(20);
550 synchronize_irq(adapter->msix_entries[num_msix].vector); 469 synchronize_irq(adapter->msix_entries[num_msix].vector);
551 free_irq(adapter->msix_entries[num_msix].vector, adapter); 470 free_irq(adapter->msix_entries[num_msix].vector, adapter);
@@ -595,7 +514,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
595void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter) 514void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter)
596{ 515{
597 u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT); 516 u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT);
598 adapter->ahw->pci_func = val & 0xf; 517 adapter->ahw->pci_func = (val >> 24) & 0xff;
599} 518}
600 519
601int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter) 520int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter)
@@ -707,6 +626,11 @@ void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter,
707 ahw->fw_hal_version = 2; 626 ahw->fw_hal_version = 2;
708 qlcnic_get_func_no(adapter); 627 qlcnic_get_func_no(adapter);
709 628
629 if (qlcnic_sriov_vf_check(adapter)) {
630 qlcnic_sriov_vf_set_ops(adapter);
631 return;
632 }
633
710 /* Determine function privilege level */ 634 /* Determine function privilege level */
711 op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); 635 op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
712 if (op_mode == QLC_83XX_DEFAULT_OPMODE) 636 if (op_mode == QLC_83XX_DEFAULT_OPMODE)
@@ -722,6 +646,9 @@ void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter,
722 ahw->fw_hal_version); 646 ahw->fw_hal_version);
723 adapter->nic_ops = &qlcnic_vf_ops; 647 adapter->nic_ops = &qlcnic_vf_ops;
724 } else { 648 } else {
649 if (pci_find_ext_capability(adapter->pdev,
650 PCI_EXT_CAP_ID_SRIOV))
651 set_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state);
725 adapter->nic_ops = &qlcnic_83xx_ops; 652 adapter->nic_ops = &qlcnic_83xx_ops;
726 } 653 }
727} 654}
@@ -755,7 +682,7 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
755} 682}
756 683
757/* Mailbox response for mac rcode */ 684/* Mailbox response for mac rcode */
758static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) 685u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
759{ 686{
760 u32 fw_data; 687 u32 fw_data;
761 u8 mac_cmd_rcode; 688 u8 mac_cmd_rcode;
@@ -769,7 +696,7 @@ static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
769 return 1; 696 return 1;
770} 697}
771 698
772static u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter) 699u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
773{ 700{
774 u32 data; 701 u32 data;
775 unsigned long wait_time = 0; 702 unsigned long wait_time = 0;
@@ -832,7 +759,7 @@ poll:
832 /* Get the FW response data */ 759 /* Get the FW response data */
833 fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); 760 fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
834 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { 761 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
835 qlcnic_83xx_process_aen(adapter); 762 __qlcnic_83xx_process_aen(adapter);
836 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); 763 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
837 if (mbx_val) 764 if (mbx_val)
838 goto poll; 765 goto poll;
@@ -884,6 +811,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
884 size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl); 811 size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
885 for (i = 0; i < size; i++) { 812 for (i = 0; i < size; i++) {
886 if (type == mbx_tbl[i].cmd) { 813 if (type == mbx_tbl[i].cmd) {
814 mbx->op_type = QLC_83XX_FW_MBX_CMD;
887 mbx->req.num = mbx_tbl[i].in_args; 815 mbx->req.num = mbx_tbl[i].in_args;
888 mbx->rsp.num = mbx_tbl[i].out_args; 816 mbx->rsp.num = mbx_tbl[i].out_args;
889 mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), 817 mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
@@ -901,10 +829,10 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
901 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); 829 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
902 temp = adapter->ahw->fw_hal_version << 29; 830 temp = adapter->ahw->fw_hal_version << 29;
903 mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp); 831 mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
904 break; 832 return 0;
905 } 833 }
906 } 834 }
907 return 0; 835 return -EINVAL;
908} 836}
909 837
910void qlcnic_83xx_idc_aen_work(struct work_struct *work) 838void qlcnic_83xx_idc_aen_work(struct work_struct *work)
@@ -935,7 +863,7 @@ static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
935 return; 863 return;
936} 864}
937 865
938void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) 866void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
939{ 867{
940 u32 event[QLC_83XX_MBX_AEN_CNT]; 868 u32 event[QLC_83XX_MBX_AEN_CNT];
941 int i; 869 int i;
@@ -960,6 +888,9 @@ void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
960 break; 888 break;
961 case QLCNIC_MBX_TIME_EXTEND_EVENT: 889 case QLCNIC_MBX_TIME_EXTEND_EVENT:
962 break; 890 break;
891 case QLCNIC_MBX_BC_EVENT:
892 qlcnic_sriov_handle_bc_event(adapter, event[1]);
893 break;
963 case QLCNIC_MBX_SFP_INSERT_EVENT: 894 case QLCNIC_MBX_SFP_INSERT_EVENT:
964 dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n", 895 dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n",
965 QLCNIC_MBX_RSP(event[0])); 896 QLCNIC_MBX_RSP(event[0]));
@@ -977,6 +908,53 @@ void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
977 QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); 908 QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
978} 909}
979 910
911static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
912{
913 struct qlcnic_hardware_context *ahw = adapter->ahw;
914 u32 resp, event;
915 unsigned long flags;
916
917 spin_lock_irqsave(&ahw->mbx_lock, flags);
918
919 resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
920 if (resp & QLCNIC_SET_OWNER) {
921 event = readl(QLCNIC_MBX_FW(ahw, 0));
922 if (event & QLCNIC_MBX_ASYNC_EVENT)
923 __qlcnic_83xx_process_aen(adapter);
924 }
925
926 spin_unlock_irqrestore(&ahw->mbx_lock, flags);
927}
928
929static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
930{
931 struct qlcnic_adapter *adapter;
932
933 adapter = container_of(work, struct qlcnic_adapter, mbx_poll_work.work);
934
935 if (!test_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
936 return;
937
938 qlcnic_83xx_process_aen(adapter);
939 queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work,
940 (HZ / 10));
941}
942
943void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter)
944{
945 if (test_and_set_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
946 return;
947
948 INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
949}
950
951void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
952{
953 if (!test_and_clear_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
954 return;
955 cancel_delayed_work_sync(&adapter->mbx_poll_work);
956}
957
980static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter) 958static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
981{ 959{
982 int index, i, err, sds_mbx_size; 960 int index, i, err, sds_mbx_size;
@@ -1004,7 +982,8 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
1004 sds = &recv_ctx->sds_rings[i]; 982 sds = &recv_ctx->sds_rings[i];
1005 sds->consumer = 0; 983 sds->consumer = 0;
1006 memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); 984 memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
1007 sds_mbx.phy_addr = sds->phys_addr; 985 sds_mbx.phy_addr_low = LSD(sds->phys_addr);
986 sds_mbx.phy_addr_high = MSD(sds->phys_addr);
1008 sds_mbx.sds_ring_size = sds->num_desc; 987 sds_mbx.sds_ring_size = sds->num_desc;
1009 988
1010 if (adapter->flags & QLCNIC_MSIX_ENABLED) 989 if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -1050,6 +1029,32 @@ out:
1050 return err; 1029 return err;
1051} 1030}
1052 1031
1032void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *adapter)
1033{
1034 int err;
1035 u32 temp = 0;
1036 struct qlcnic_cmd_args cmd;
1037 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1038
1039 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
1040 return;
1041
1042 if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
1043 cmd.req.arg[0] |= (0x3 << 29);
1044
1045 if (qlcnic_sriov_pf_check(adapter))
1046 qlcnic_pf_set_interface_id_del_rx_ctx(adapter, &temp);
1047
1048 cmd.req.arg[1] = recv_ctx->context_id | temp;
1049 err = qlcnic_issue_cmd(adapter, &cmd);
1050 if (err)
1051 dev_err(&adapter->pdev->dev,
1052 "Failed to destroy rx ctx in firmware\n");
1053
1054 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
1055 qlcnic_free_mbx_args(&cmd);
1056}
1057
1053int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) 1058int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
1054{ 1059{
1055 int i, err, index, sds_mbx_size, rds_mbx_size; 1060 int i, err, index, sds_mbx_size, rds_mbx_size;
@@ -1080,9 +1085,17 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
1080 /* set mailbox hdr and capabilities */ 1085 /* set mailbox hdr and capabilities */
1081 qlcnic_alloc_mbx_args(&cmd, adapter, 1086 qlcnic_alloc_mbx_args(&cmd, adapter,
1082 QLCNIC_CMD_CREATE_RX_CTX); 1087 QLCNIC_CMD_CREATE_RX_CTX);
1088
1089 if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
1090 cmd.req.arg[0] |= (0x3 << 29);
1091
1083 cmd.req.arg[1] = cap; 1092 cmd.req.arg[1] = cap;
1084 cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) | 1093 cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) |
1085 (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16); 1094 (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16);
1095
1096 if (qlcnic_sriov_pf_check(adapter))
1097 qlcnic_pf_set_interface_id_create_rx_ctx(adapter,
1098 &cmd.req.arg[6]);
1086 /* set up status rings, mbx 8-57/87 */ 1099 /* set up status rings, mbx 8-57/87 */
1087 index = QLC_83XX_HOST_SDS_MBX_IDX; 1100 index = QLC_83XX_HOST_SDS_MBX_IDX;
1088 for (i = 0; i < num_sds; i++) { 1101 for (i = 0; i < num_sds; i++) {
@@ -1090,7 +1103,8 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
1090 sds = &recv_ctx->sds_rings[i]; 1103 sds = &recv_ctx->sds_rings[i];
1091 sds->consumer = 0; 1104 sds->consumer = 0;
1092 memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); 1105 memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
1093 sds_mbx.phy_addr = sds->phys_addr; 1106 sds_mbx.phy_addr_low = LSD(sds->phys_addr);
1107 sds_mbx.phy_addr_high = MSD(sds->phys_addr);
1094 sds_mbx.sds_ring_size = sds->num_desc; 1108 sds_mbx.sds_ring_size = sds->num_desc;
1095 if (adapter->flags & QLCNIC_MSIX_ENABLED) 1109 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1096 intrpt_id = ahw->intr_tbl[i].id; 1110 intrpt_id = ahw->intr_tbl[i].id;
@@ -1110,13 +1124,15 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
1110 rds = &recv_ctx->rds_rings[0]; 1124 rds = &recv_ctx->rds_rings[0];
1111 rds->producer = 0; 1125 rds->producer = 0;
1112 memset(&rds_mbx, 0, rds_mbx_size); 1126 memset(&rds_mbx, 0, rds_mbx_size);
1113 rds_mbx.phy_addr_reg = rds->phys_addr; 1127 rds_mbx.phy_addr_reg_low = LSD(rds->phys_addr);
1128 rds_mbx.phy_addr_reg_high = MSD(rds->phys_addr);
1114 rds_mbx.reg_ring_sz = rds->dma_size; 1129 rds_mbx.reg_ring_sz = rds->dma_size;
1115 rds_mbx.reg_ring_len = rds->num_desc; 1130 rds_mbx.reg_ring_len = rds->num_desc;
1116 /* Jumbo ring */ 1131 /* Jumbo ring */
1117 rds = &recv_ctx->rds_rings[1]; 1132 rds = &recv_ctx->rds_rings[1];
1118 rds->producer = 0; 1133 rds->producer = 0;
1119 rds_mbx.phy_addr_jmb = rds->phys_addr; 1134 rds_mbx.phy_addr_jmb_low = LSD(rds->phys_addr);
1135 rds_mbx.phy_addr_jmb_high = MSD(rds->phys_addr);
1120 rds_mbx.jmb_ring_sz = rds->dma_size; 1136 rds_mbx.jmb_ring_sz = rds->dma_size;
1121 rds_mbx.jmb_ring_len = rds->num_desc; 1137 rds_mbx.jmb_ring_len = rds->num_desc;
1122 buf = &cmd.req.arg[index]; 1138 buf = &cmd.req.arg[index];
@@ -1163,16 +1179,39 @@ out:
1163 return err; 1179 return err;
1164} 1180}
1165 1181
1182void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *adapter,
1183 struct qlcnic_host_tx_ring *tx_ring)
1184{
1185 struct qlcnic_cmd_args cmd;
1186 u32 temp = 0;
1187
1188 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
1189 return;
1190
1191 if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
1192 cmd.req.arg[0] |= (0x3 << 29);
1193
1194 if (qlcnic_sriov_pf_check(adapter))
1195 qlcnic_pf_set_interface_id_del_tx_ctx(adapter, &temp);
1196
1197 cmd.req.arg[1] = tx_ring->ctx_id | temp;
1198 if (qlcnic_issue_cmd(adapter, &cmd))
1199 dev_err(&adapter->pdev->dev,
1200 "Failed to destroy tx ctx in firmware\n");
1201 qlcnic_free_mbx_args(&cmd);
1202}
1203
1166int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, 1204int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
1167 struct qlcnic_host_tx_ring *tx, int ring) 1205 struct qlcnic_host_tx_ring *tx, int ring)
1168{ 1206{
1169 int err; 1207 int err;
1170 u16 msix_id; 1208 u16 msix_id;
1171 u32 *buf, intr_mask; 1209 u32 *buf, intr_mask, temp = 0;
1172 struct qlcnic_cmd_args cmd; 1210 struct qlcnic_cmd_args cmd;
1173 struct qlcnic_tx_mbx mbx; 1211 struct qlcnic_tx_mbx mbx;
1174 struct qlcnic_tx_mbx_out *mbx_out; 1212 struct qlcnic_tx_mbx_out *mbx_out;
1175 struct qlcnic_hardware_context *ahw = adapter->ahw; 1213 struct qlcnic_hardware_context *ahw = adapter->ahw;
1214 u32 msix_vector;
1176 1215
1177 /* Reset host resources */ 1216 /* Reset host resources */
1178 tx->producer = 0; 1217 tx->producer = 0;
@@ -1182,13 +1221,21 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
1182 memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx)); 1221 memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx));
1183 1222
1184 /* setup mailbox inbox registerss */ 1223 /* setup mailbox inbox registerss */
1185 mbx.phys_addr = tx->phys_addr; 1224 mbx.phys_addr_low = LSD(tx->phys_addr);
1186 mbx.cnsmr_index = tx->hw_cons_phys_addr; 1225 mbx.phys_addr_high = MSD(tx->phys_addr);
1226 mbx.cnsmr_index_low = LSD(tx->hw_cons_phys_addr);
1227 mbx.cnsmr_index_high = MSD(tx->hw_cons_phys_addr);
1187 mbx.size = tx->num_desc; 1228 mbx.size = tx->num_desc;
1188 if (adapter->flags & QLCNIC_MSIX_ENABLED) 1229 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1189 msix_id = ahw->intr_tbl[adapter->max_sds_rings + ring].id; 1230 if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
1190 else 1231 msix_vector = adapter->max_sds_rings + ring;
1232 else
1233 msix_vector = adapter->max_sds_rings - 1;
1234 msix_id = ahw->intr_tbl[msix_vector].id;
1235 } else {
1191 msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); 1236 msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
1237 }
1238
1192 if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) 1239 if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
1193 mbx.intr_id = msix_id; 1240 mbx.intr_id = msix_id;
1194 else 1241 else
@@ -1196,8 +1243,15 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
1196 mbx.src = 0; 1243 mbx.src = 0;
1197 1244
1198 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); 1245 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
1246
1247 if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
1248 cmd.req.arg[0] |= (0x3 << 29);
1249
1250 if (qlcnic_sriov_pf_check(adapter))
1251 qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp);
1252
1199 cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT; 1253 cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
1200 cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES; 1254 cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES | temp;
1201 buf = &cmd.req.arg[6]; 1255 buf = &cmd.req.arg[6];
1202 memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx)); 1256 memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
1203 /* send the mailbox command*/ 1257 /* send the mailbox command*/
@@ -1210,7 +1264,8 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
1210 mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2]; 1264 mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
1211 tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod; 1265 tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod;
1212 tx->ctx_id = mbx_out->ctx_id; 1266 tx->ctx_id = mbx_out->ctx_id;
1213 if (adapter->flags & QLCNIC_MSIX_ENABLED) { 1267 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1268 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1214 intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src; 1269 intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src;
1215 tx->crb_intr_mask = ahw->pci_base0 + intr_mask; 1270 tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
1216 } 1271 }
@@ -1267,7 +1322,8 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test)
1267 1322
1268 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { 1323 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1269 /* disable and free mailbox interrupt */ 1324 /* disable and free mailbox interrupt */
1270 qlcnic_83xx_free_mbx_intr(adapter); 1325 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
1326 qlcnic_83xx_free_mbx_intr(adapter);
1271 adapter->ahw->loopback_state = 0; 1327 adapter->ahw->loopback_state = 0;
1272 adapter->ahw->hw_ops->setup_link_event(adapter, 1); 1328 adapter->ahw->hw_ops->setup_link_event(adapter, 1);
1273 } 1329 }
@@ -1295,12 +1351,14 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
1295 qlcnic_detach(adapter); 1351 qlcnic_detach(adapter);
1296 1352
1297 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { 1353 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1298 err = qlcnic_83xx_setup_mbx_intr(adapter); 1354 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
1299 if (err) { 1355 err = qlcnic_83xx_setup_mbx_intr(adapter);
1300 dev_err(&adapter->pdev->dev, 1356 if (err) {
1301 "%s: failed to setup mbx interrupt\n", 1357 dev_err(&adapter->pdev->dev,
1302 __func__); 1358 "%s: failed to setup mbx interrupt\n",
1303 goto out; 1359 __func__);
1360 goto out;
1361 }
1304 } 1362 }
1305 } 1363 }
1306 adapter->ahw->diag_test = 0; 1364 adapter->ahw->diag_test = 0;
@@ -1373,12 +1431,60 @@ mbx_err:
1373 } 1431 }
1374} 1432}
1375 1433
1434int qlcnic_83xx_set_led(struct net_device *netdev,
1435 enum ethtool_phys_id_state state)
1436{
1437 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1438 int err = -EIO, active = 1;
1439
1440 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
1441 netdev_warn(netdev,
1442 "LED test is not supported in non-privileged mode\n");
1443 return -EOPNOTSUPP;
1444 }
1445
1446 switch (state) {
1447 case ETHTOOL_ID_ACTIVE:
1448 if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
1449 return -EBUSY;
1450
1451 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1452 break;
1453
1454 err = qlcnic_83xx_config_led(adapter, active, 0);
1455 if (err)
1456 netdev_err(netdev, "Failed to set LED blink state\n");
1457 break;
1458 case ETHTOOL_ID_INACTIVE:
1459 active = 0;
1460
1461 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1462 break;
1463
1464 err = qlcnic_83xx_config_led(adapter, active, 0);
1465 if (err)
1466 netdev_err(netdev, "Failed to reset LED blink state\n");
1467 break;
1468
1469 default:
1470 return -EINVAL;
1471 }
1472
1473 if (!active || err)
1474 clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
1475
1476 return err;
1477}
1478
1376void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter, 1479void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
1377 int enable) 1480 int enable)
1378{ 1481{
1379 struct qlcnic_cmd_args cmd; 1482 struct qlcnic_cmd_args cmd;
1380 int status; 1483 int status;
1381 1484
1485 if (qlcnic_sriov_vf_check(adapter))
1486 return;
1487
1382 if (enable) { 1488 if (enable) {
1383 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC); 1489 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC);
1384 cmd.req.arg[1] = BIT_0 | BIT_31; 1490 cmd.req.arg[1] = BIT_0 | BIT_31;
@@ -1441,24 +1547,35 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable)
1441 return err; 1547 return err;
1442} 1548}
1443 1549
1550static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
1551 u32 *interface_id)
1552{
1553 if (qlcnic_sriov_pf_check(adapter)) {
1554 qlcnic_pf_set_interface_id_promisc(adapter, interface_id);
1555 } else {
1556 if (!qlcnic_sriov_vf_check(adapter))
1557 *interface_id = adapter->recv_ctx->context_id << 16;
1558 }
1559}
1560
1444int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) 1561int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
1445{ 1562{
1446 int err; 1563 int err;
1447 u32 temp; 1564 u32 temp = 0;
1448 struct qlcnic_cmd_args cmd; 1565 struct qlcnic_cmd_args cmd;
1449 1566
1450 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) 1567 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
1451 return -EIO; 1568 return -EIO;
1452 1569
1453 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); 1570 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
1454 temp = adapter->recv_ctx->context_id << 16; 1571 qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
1455 cmd.req.arg[1] = (mode ? 1 : 0) | temp; 1572 cmd.req.arg[1] = (mode ? 1 : 0) | temp;
1456 err = qlcnic_issue_cmd(adapter, &cmd); 1573 err = qlcnic_issue_cmd(adapter, &cmd);
1457 if (err) 1574 if (err)
1458 dev_info(&adapter->pdev->dev, 1575 dev_info(&adapter->pdev->dev,
1459 "Promiscous mode config failed\n"); 1576 "Promiscous mode config failed\n");
1460 qlcnic_free_mbx_args(&cmd);
1461 1577
1578 qlcnic_free_mbx_args(&cmd);
1462 return err; 1579 return err;
1463} 1580}
1464 1581
@@ -1490,7 +1607,9 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1490 /* Poll for link up event before running traffic */ 1607 /* Poll for link up event before running traffic */
1491 do { 1608 do {
1492 msleep(500); 1609 msleep(500);
1493 qlcnic_83xx_process_aen(adapter); 1610 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
1611 qlcnic_83xx_process_aen(adapter);
1612
1494 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { 1613 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
1495 dev_info(&adapter->pdev->dev, 1614 dev_info(&adapter->pdev->dev,
1496 "Firmware didn't sent link up event to loopback request\n"); 1615 "Firmware didn't sent link up event to loopback request\n");
@@ -1550,7 +1669,9 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1550 /* Wait for Link and IDC Completion AEN */ 1669 /* Wait for Link and IDC Completion AEN */
1551 do { 1670 do {
1552 msleep(300); 1671 msleep(300);
1553 qlcnic_83xx_process_aen(adapter); 1672 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
1673 qlcnic_83xx_process_aen(adapter);
1674
1554 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { 1675 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
1555 dev_err(&adapter->pdev->dev, 1676 dev_err(&adapter->pdev->dev,
1556 "FW did not generate IDC completion AEN\n"); 1677 "FW did not generate IDC completion AEN\n");
@@ -1590,7 +1711,9 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1590 /* Wait for Link and IDC Completion AEN */ 1711 /* Wait for Link and IDC Completion AEN */
1591 do { 1712 do {
1592 msleep(300); 1713 msleep(300);
1593 qlcnic_83xx_process_aen(adapter); 1714 if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
1715 qlcnic_83xx_process_aen(adapter);
1716
1594 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { 1717 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
1595 dev_err(&adapter->pdev->dev, 1718 dev_err(&adapter->pdev->dev,
1596 "Firmware didn't sent IDC completion AEN\n"); 1719 "Firmware didn't sent IDC completion AEN\n");
@@ -1604,21 +1727,31 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1604 return status; 1727 return status;
1605} 1728}
1606 1729
1730static void qlcnic_83xx_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
1731 u32 *interface_id)
1732{
1733 if (qlcnic_sriov_pf_check(adapter)) {
1734 qlcnic_pf_set_interface_id_ipaddr(adapter, interface_id);
1735 } else {
1736 if (!qlcnic_sriov_vf_check(adapter))
1737 *interface_id = adapter->recv_ctx->context_id << 16;
1738 }
1739}
1740
1607void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, 1741void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
1608 int mode) 1742 int mode)
1609{ 1743{
1610 int err; 1744 int err;
1611 u32 temp, temp_ip; 1745 u32 temp = 0, temp_ip;
1612 struct qlcnic_cmd_args cmd; 1746 struct qlcnic_cmd_args cmd;
1613 1747
1614 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR); 1748 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR);
1615 if (mode == QLCNIC_IP_UP) { 1749 qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp);
1616 temp = adapter->recv_ctx->context_id << 16; 1750
1751 if (mode == QLCNIC_IP_UP)
1617 cmd.req.arg[1] = 1 | temp; 1752 cmd.req.arg[1] = 1 | temp;
1618 } else { 1753 else
1619 temp = adapter->recv_ctx->context_id << 16;
1620 cmd.req.arg[1] = 2 | temp; 1754 cmd.req.arg[1] = 2 | temp;
1621 }
1622 1755
1623 /* 1756 /*
1624 * Adapter needs IP address in network byte order. 1757 * Adapter needs IP address in network byte order.
@@ -1635,6 +1768,7 @@ void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
1635 dev_err(&adapter->netdev->dev, 1768 dev_err(&adapter->netdev->dev,
1636 "could not notify %s IP 0x%x request\n", 1769 "could not notify %s IP 0x%x request\n",
1637 (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip); 1770 (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
1771
1638 qlcnic_free_mbx_args(&cmd); 1772 qlcnic_free_mbx_args(&cmd);
1639} 1773}
1640 1774
@@ -1701,11 +1835,22 @@ int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
1701 1835
1702} 1836}
1703 1837
1838static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
1839 u32 *interface_id)
1840{
1841 if (qlcnic_sriov_pf_check(adapter)) {
1842 qlcnic_pf_set_interface_id_macaddr(adapter, interface_id);
1843 } else {
1844 if (!qlcnic_sriov_vf_check(adapter))
1845 *interface_id = adapter->recv_ctx->context_id << 16;
1846 }
1847}
1848
1704int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, 1849int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
1705 __le16 vlan_id, u8 op) 1850 u16 vlan_id, u8 op)
1706{ 1851{
1707 int err; 1852 int err;
1708 u32 *buf; 1853 u32 *buf, temp = 0;
1709 struct qlcnic_cmd_args cmd; 1854 struct qlcnic_cmd_args cmd;
1710 struct qlcnic_macvlan_mbx mv; 1855 struct qlcnic_macvlan_mbx mv;
1711 1856
@@ -1715,11 +1860,21 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
1715 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN); 1860 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
1716 if (err) 1861 if (err)
1717 return err; 1862 return err;
1718 cmd.req.arg[1] = op | (1 << 8) |
1719 (adapter->recv_ctx->context_id << 16);
1720 1863
1721 mv.vlan = le16_to_cpu(vlan_id); 1864 if (vlan_id)
1722 memcpy(&mv.mac, addr, ETH_ALEN); 1865 op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
1866 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
1867
1868 cmd.req.arg[1] = op | (1 << 8);
1869 qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
1870 cmd.req.arg[1] |= temp;
1871 mv.vlan = vlan_id;
1872 mv.mac_addr0 = addr[0];
1873 mv.mac_addr1 = addr[1];
1874 mv.mac_addr2 = addr[2];
1875 mv.mac_addr3 = addr[3];
1876 mv.mac_addr4 = addr[4];
1877 mv.mac_addr5 = addr[5];
1723 buf = &cmd.req.arg[2]; 1878 buf = &cmd.req.arg[2];
1724 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); 1879 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
1725 err = qlcnic_issue_cmd(adapter, &cmd); 1880 err = qlcnic_issue_cmd(adapter, &cmd);
@@ -1732,7 +1887,7 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
1732} 1887}
1733 1888
1734void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr, 1889void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
1735 __le16 vlan_id) 1890 u16 vlan_id)
1736{ 1891{
1737 u8 mac[ETH_ALEN]; 1892 u8 mac[ETH_ALEN];
1738 memcpy(&mac, addr, ETH_ALEN); 1893 memcpy(&mac, addr, ETH_ALEN);
@@ -1782,7 +1937,7 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
1782void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter) 1937void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
1783{ 1938{
1784 int err; 1939 int err;
1785 u32 temp; 1940 u16 temp;
1786 struct qlcnic_cmd_args cmd; 1941 struct qlcnic_cmd_args cmd;
1787 struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; 1942 struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
1788 1943
@@ -1790,10 +1945,18 @@ void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
1790 return; 1945 return;
1791 1946
1792 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL); 1947 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
1793 cmd.req.arg[1] = 1 | (adapter->recv_ctx->context_id << 16); 1948 if (coal->type == QLCNIC_INTR_COAL_TYPE_RX) {
1949 temp = adapter->recv_ctx->context_id;
1950 cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
1951 temp = coal->rx_time_us;
1952 cmd.req.arg[2] = coal->rx_packets | temp << 16;
1953 } else if (coal->type == QLCNIC_INTR_COAL_TYPE_TX) {
1954 temp = adapter->tx_ring->ctx_id;
1955 cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16;
1956 temp = coal->tx_time_us;
1957 cmd.req.arg[2] = coal->tx_packets | temp << 16;
1958 }
1794 cmd.req.arg[3] = coal->flag; 1959 cmd.req.arg[3] = coal->flag;
1795 temp = coal->rx_time_us << 16;
1796 cmd.req.arg[2] = coal->rx_packets | temp;
1797 err = qlcnic_issue_cmd(adapter, &cmd); 1960 err = qlcnic_issue_cmd(adapter, &cmd);
1798 if (err != QLCNIC_RCODE_SUCCESS) 1961 if (err != QLCNIC_RCODE_SUCCESS)
1799 dev_info(&adapter->pdev->dev, 1962 dev_info(&adapter->pdev->dev,
@@ -1832,7 +1995,7 @@ irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
1832 1995
1833 event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); 1996 event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
1834 if (event & QLCNIC_MBX_ASYNC_EVENT) 1997 if (event & QLCNIC_MBX_ASYNC_EVENT)
1835 qlcnic_83xx_process_aen(adapter); 1998 __qlcnic_83xx_process_aen(adapter);
1836out: 1999out:
1837 mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); 2000 mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
1838 writel(0, adapter->ahw->pci_base0 + mask); 2001 writel(0, adapter->ahw->pci_base0 + mask);
@@ -2008,14 +2171,17 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
2008int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type) 2171int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type)
2009{ 2172{
2010 int i, index, err; 2173 int i, index, err;
2011 bool type;
2012 u8 max_ints; 2174 u8 max_ints;
2013 u32 val, temp; 2175 u32 val, temp, type;
2014 struct qlcnic_cmd_args cmd; 2176 struct qlcnic_cmd_args cmd;
2015 2177
2016 max_ints = adapter->ahw->num_msix - 1; 2178 max_ints = adapter->ahw->num_msix - 1;
2017 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT); 2179 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT);
2018 cmd.req.arg[1] = max_ints; 2180 cmd.req.arg[1] = max_ints;
2181
2182 if (qlcnic_sriov_vf_check(adapter))
2183 cmd.req.arg[1] |= (adapter->ahw->pci_func << 8) | BIT_16;
2184
2019 for (i = 0, index = 2; i < max_ints; i++) { 2185 for (i = 0, index = 2; i < max_ints; i++) {
2020 type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL; 2186 type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
2021 val = type | (adapter->ahw->intr_tbl[i].type << 4); 2187 val = type | (adapter->ahw->intr_tbl[i].type << 4);
@@ -2169,7 +2335,7 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
2169 return 0; 2335 return 0;
2170} 2336}
2171 2337
2172static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter) 2338int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *adapter)
2173{ 2339{
2174 int ret; 2340 int ret;
2175 u32 cmd; 2341 u32 cmd;
@@ -2187,7 +2353,7 @@ static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter)
2187 return 0; 2353 return 0;
2188} 2354}
2189 2355
2190static int qlcnic_83xx_disable_flash_write_op(struct qlcnic_adapter *adapter) 2356int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter)
2191{ 2357{
2192 int ret; 2358 int ret;
2193 2359
@@ -2261,7 +2427,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
2261 return -EIO; 2427 return -EIO;
2262 2428
2263 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { 2429 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
2264 ret = qlcnic_83xx_enable_flash_write_op(adapter); 2430 ret = qlcnic_83xx_enable_flash_write(adapter);
2265 if (ret) { 2431 if (ret) {
2266 qlcnic_83xx_unlock_flash(adapter); 2432 qlcnic_83xx_unlock_flash(adapter);
2267 dev_err(&adapter->pdev->dev, 2433 dev_err(&adapter->pdev->dev,
@@ -2303,7 +2469,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
2303 } 2469 }
2304 2470
2305 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { 2471 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
2306 ret = qlcnic_83xx_disable_flash_write_op(adapter); 2472 ret = qlcnic_83xx_disable_flash_write(adapter);
2307 if (ret) { 2473 if (ret) {
2308 qlcnic_83xx_unlock_flash(adapter); 2474 qlcnic_83xx_unlock_flash(adapter);
2309 dev_err(&adapter->pdev->dev, 2475 dev_err(&adapter->pdev->dev,
@@ -2343,8 +2509,8 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
2343 u32 temp; 2509 u32 temp;
2344 int ret = -EIO; 2510 int ret = -EIO;
2345 2511
2346 if ((count < QLC_83XX_FLASH_BULK_WRITE_MIN) || 2512 if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
2347 (count > QLC_83XX_FLASH_BULK_WRITE_MAX)) { 2513 (count > QLC_83XX_FLASH_WRITE_MAX)) {
2348 dev_err(&adapter->pdev->dev, 2514 dev_err(&adapter->pdev->dev,
2349 "%s: Invalid word count\n", __func__); 2515 "%s: Invalid word count\n", __func__);
2350 return -EIO; 2516 return -EIO;
@@ -2622,13 +2788,19 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
2622 2788
2623int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) 2789int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
2624{ 2790{
2791 u8 pci_func;
2625 int err; 2792 int err;
2626 u32 config = 0, state; 2793 u32 config = 0, state;
2627 struct qlcnic_cmd_args cmd; 2794 struct qlcnic_cmd_args cmd;
2628 struct qlcnic_hardware_context *ahw = adapter->ahw; 2795 struct qlcnic_hardware_context *ahw = adapter->ahw;
2629 2796
2630 state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(ahw->pci_func)); 2797 if (qlcnic_sriov_vf_check(adapter))
2631 if (!QLC_83xx_FUNC_VAL(state, ahw->pci_func)) { 2798 pci_func = adapter->portnum;
2799 else
2800 pci_func = ahw->pci_func;
2801
2802 state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(pci_func));
2803 if (!QLC_83xx_FUNC_VAL(state, pci_func)) {
2632 dev_info(&adapter->pdev->dev, "link state down\n"); 2804 dev_info(&adapter->pdev->dev, "link state down\n");
2633 return config; 2805 return config;
2634 } 2806 }
@@ -2758,6 +2930,9 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
2758 /* fill in MAC rx frame stats */ 2930 /* fill in MAC rx frame stats */
2759 for (k += 6; k < 80; k += 2) 2931 for (k += 6; k < 80; k += 2)
2760 data = qlcnic_83xx_copy_stats(cmd, data, k); 2932 data = qlcnic_83xx_copy_stats(cmd, data, k);
2933 /* fill in eSwitch stats */
2934 for (; k < total_regs; k += 2)
2935 data = qlcnic_83xx_copy_stats(cmd, data, k);
2761 break; 2936 break;
2762 case QLC_83XX_STAT_RX: 2937 case QLC_83XX_STAT_RX:
2763 for (k = 2; k < 8; k += 2) 2938 for (k = 2; k < 8; k += 2)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 61f81f6c84a9..1f1d85e6f2af 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -12,6 +12,8 @@
12#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
13#include "qlcnic_hw.h" 13#include "qlcnic_hw.h"
14 14
15#define QLCNIC_83XX_BAR0_LENGTH 0x4000
16
15/* Directly mapped registers */ 17/* Directly mapped registers */
16#define QLC_83XX_CRB_WIN_BASE 0x3800 18#define QLC_83XX_CRB_WIN_BASE 0x3800
17#define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4)) 19#define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4))
@@ -86,6 +88,153 @@
86 88
87#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16 89#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
88 90
91/* status descriptor mailbox data
92 * @phy_addr_{low|high}: physical address of buffer
93 * @sds_ring_size: buffer size
94 * @intrpt_id: interrupt id
95 * @intrpt_val: source of interrupt
96 */
97struct qlcnic_sds_mbx {
98 u32 phy_addr_low;
99 u32 phy_addr_high;
100 u32 rsvd1[4];
101#if defined(__LITTLE_ENDIAN)
102 u16 sds_ring_size;
103 u16 rsvd2;
104 u16 rsvd3[2];
105 u16 intrpt_id;
106 u8 intrpt_val;
107 u8 rsvd4;
108#elif defined(__BIG_ENDIAN)
109 u16 rsvd2;
110 u16 sds_ring_size;
111 u16 rsvd3[2];
112 u8 rsvd4;
113 u8 intrpt_val;
114 u16 intrpt_id;
115#endif
116 u32 rsvd5;
117} __packed;
118
119/* receive descriptor buffer data
120 * phy_addr_reg_{low|high}: physical address of regular buffer
121 * phy_addr_jmb_{low|high}: physical address of jumbo buffer
122 * reg_ring_sz: size of regular buffer
123 * reg_ring_len: no. of entries in regular buffer
124 * jmb_ring_len: no. of entries in jumbo buffer
125 * jmb_ring_sz: size of jumbo buffer
126 */
127struct qlcnic_rds_mbx {
128 u32 phy_addr_reg_low;
129 u32 phy_addr_reg_high;
130 u32 phy_addr_jmb_low;
131 u32 phy_addr_jmb_high;
132#if defined(__LITTLE_ENDIAN)
133 u16 reg_ring_sz;
134 u16 reg_ring_len;
135 u16 jmb_ring_sz;
136 u16 jmb_ring_len;
137#elif defined(__BIG_ENDIAN)
138 u16 reg_ring_len;
139 u16 reg_ring_sz;
140 u16 jmb_ring_len;
141 u16 jmb_ring_sz;
142#endif
143} __packed;
144
145/* host producers for regular and jumbo rings */
146struct __host_producer_mbx {
147 u32 reg_buf;
148 u32 jmb_buf;
149} __packed;
150
151/* Receive context mailbox data outbox registers
152 * @state: state of the context
153 * @vport_id: virtual port id
154 * @context_id: receive context id
155 * @num_pci_func: number of pci functions of the port
156 * @phy_port: physical port id
157 */
158struct qlcnic_rcv_mbx_out {
159#if defined(__LITTLE_ENDIAN)
160 u8 rcv_num;
161 u8 sts_num;
162 u16 ctx_id;
163 u8 state;
164 u8 num_pci_func;
165 u8 phy_port;
166 u8 vport_id;
167#elif defined(__BIG_ENDIAN)
168 u16 ctx_id;
169 u8 sts_num;
170 u8 rcv_num;
171 u8 vport_id;
172 u8 phy_port;
173 u8 num_pci_func;
174 u8 state;
175#endif
176 u32 host_csmr[QLCNIC_MAX_RING_SETS];
177 struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
178} __packed;
179
180struct qlcnic_add_rings_mbx_out {
181#if defined(__LITTLE_ENDIAN)
182 u8 rcv_num;
183 u8 sts_num;
184 u16 ctx_id;
185#elif defined(__BIG_ENDIAN)
186 u16 ctx_id;
187 u8 sts_num;
188 u8 rcv_num;
189#endif
190 u32 host_csmr[QLCNIC_MAX_RING_SETS];
191 struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
192} __packed;
193
194/* Transmit context mailbox inbox registers
195 * @phys_addr_{low|high}: DMA address of the transmit buffer
196 * @cnsmr_index_{low|high}: host consumer index
197 * @size: legth of transmit buffer ring
198 * @intr_id: interrput id
199 * @src: src of interrupt
200 */
201struct qlcnic_tx_mbx {
202 u32 phys_addr_low;
203 u32 phys_addr_high;
204 u32 cnsmr_index_low;
205 u32 cnsmr_index_high;
206#if defined(__LITTLE_ENDIAN)
207 u16 size;
208 u16 intr_id;
209 u8 src;
210 u8 rsvd[3];
211#elif defined(__BIG_ENDIAN)
212 u16 intr_id;
213 u16 size;
214 u8 rsvd[3];
215 u8 src;
216#endif
217} __packed;
218
219/* Transmit context mailbox outbox registers
220 * @host_prod: host producer index
221 * @ctx_id: transmit context id
222 * @state: state of the transmit context
223 */
224
225struct qlcnic_tx_mbx_out {
226 u32 host_prod;
227#if defined(__LITTLE_ENDIAN)
228 u16 ctx_id;
229 u8 state;
230 u8 rsvd;
231#elif defined(__BIG_ENDIAN)
232 u8 rsvd;
233 u8 state;
234 u16 ctx_id;
235#endif
236} __packed;
237
89struct qlcnic_intrpt_config { 238struct qlcnic_intrpt_config {
90 u8 type; 239 u8 type;
91 u8 enabled; 240 u8 enabled;
@@ -94,8 +243,23 @@ struct qlcnic_intrpt_config {
94}; 243};
95 244
96struct qlcnic_macvlan_mbx { 245struct qlcnic_macvlan_mbx {
97 u8 mac[ETH_ALEN]; 246#if defined(__LITTLE_ENDIAN)
247 u8 mac_addr0;
248 u8 mac_addr1;
249 u8 mac_addr2;
250 u8 mac_addr3;
251 u8 mac_addr4;
252 u8 mac_addr5;
98 u16 vlan; 253 u16 vlan;
254#elif defined(__BIG_ENDIAN)
255 u8 mac_addr3;
256 u8 mac_addr2;
257 u8 mac_addr1;
258 u8 mac_addr0;
259 u16 vlan;
260 u8 mac_addr5;
261 u8 mac_addr4;
262#endif
99}; 263};
100 264
101struct qlc_83xx_fw_info { 265struct qlc_83xx_fw_info {
@@ -153,6 +317,18 @@ struct qlc_83xx_idc {
153 char **name; 317 char **name;
154}; 318};
155 319
320/* Device States */
321enum qlcnic_83xx_states {
322 QLC_83XX_IDC_DEV_UNKNOWN,
323 QLC_83XX_IDC_DEV_COLD,
324 QLC_83XX_IDC_DEV_INIT,
325 QLC_83XX_IDC_DEV_READY,
326 QLC_83XX_IDC_DEV_NEED_RESET,
327 QLC_83XX_IDC_DEV_NEED_QUISCENT,
328 QLC_83XX_IDC_DEV_FAILED,
329 QLC_83XX_IDC_DEV_QUISCENT
330};
331
156#define QLCNIC_MBX_RSP(reg) LSW(reg) 332#define QLCNIC_MBX_RSP(reg) LSW(reg)
157#define QLCNIC_MBX_NUM_REGS(reg) (MSW(reg) & 0x1FF) 333#define QLCNIC_MBX_NUM_REGS(reg) (MSW(reg) & 0x1FF)
158#define QLCNIC_MBX_STATUS(reg) (((reg) >> 25) & 0x7F) 334#define QLCNIC_MBX_STATUS(reg) (((reg) >> 25) & 0x7F)
@@ -205,7 +381,7 @@ struct qlc_83xx_idc {
205#define QLC_83XX_STAT_MAC 1 381#define QLC_83XX_STAT_MAC 1
206#define QLC_83XX_TX_STAT_REGS 14 382#define QLC_83XX_TX_STAT_REGS 14
207#define QLC_83XX_RX_STAT_REGS 40 383#define QLC_83XX_RX_STAT_REGS 40
208#define QLC_83XX_MAC_STAT_REGS 80 384#define QLC_83XX_MAC_STAT_REGS 94
209 385
210#define QLC_83XX_GET_FUNC_PRIVILEGE(VAL, FN) (0x3 & ((VAL) >> (FN * 2))) 386#define QLC_83XX_GET_FUNC_PRIVILEGE(VAL, FN) (0x3 & ((VAL) >> (FN * 2)))
211#define QLC_83XX_SET_FUNC_OPMODE(VAL, FN) ((VAL) << (FN * 2)) 387#define QLC_83XX_SET_FUNC_OPMODE(VAL, FN) ((VAL) << (FN * 2))
@@ -226,6 +402,7 @@ struct qlc_83xx_idc {
226#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000) 402#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
227#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF 403#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
228#define QLC_83XX_DEFAULT_MODE 0x0 404#define QLC_83XX_DEFAULT_MODE 0x0
405#define QLC_83XX_SRIOV_MODE 0x1
229#define QLCNIC_BRDTYPE_83XX_10G 0x0083 406#define QLCNIC_BRDTYPE_83XX_10G 0x0083
230 407
231#define QLC_83XX_FLASH_SPI_STATUS 0x2808E010 408#define QLC_83XX_FLASH_SPI_STATUS 0x2808E010
@@ -242,8 +419,8 @@ struct qlc_83xx_idc {
242#define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca 419#define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca
243#define QLC_83XX_FLASH_READ_RETRY_COUNT 5000 420#define QLC_83XX_FLASH_READ_RETRY_COUNT 5000
244#define QLC_83XX_FLASH_STATUS_READY 0x6 421#define QLC_83XX_FLASH_STATUS_READY 0x6
245#define QLC_83XX_FLASH_BULK_WRITE_MIN 2 422#define QLC_83XX_FLASH_WRITE_MIN 2
246#define QLC_83XX_FLASH_BULK_WRITE_MAX 64 423#define QLC_83XX_FLASH_WRITE_MAX 64
247#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1 424#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1
248#define QLC_83XX_ERASE_MODE 1 425#define QLC_83XX_ERASE_MODE 1
249#define QLC_83XX_WRITE_MODE 2 426#define QLC_83XX_WRITE_MODE 2
@@ -336,7 +513,7 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
336int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int); 513int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
337int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int); 514int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
338int qlcnic_83xx_config_intr_coalesce(struct qlcnic_adapter *); 515int qlcnic_83xx_config_intr_coalesce(struct qlcnic_adapter *);
339void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, __le16); 516void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
340int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *); 517int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
341int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); 518int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
342void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *, int); 519void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *, int);
@@ -351,11 +528,14 @@ int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
351int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *); 528int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
352int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *, 529int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
353 struct qlcnic_host_tx_ring *, int); 530 struct qlcnic_host_tx_ring *, int);
531void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *);
532void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *,
533 struct qlcnic_host_tx_ring *);
354int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); 534int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
355int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int); 535int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
356void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *); 536void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
357int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool); 537int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
358int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8); 538int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
359int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *); 539int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *);
360void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8, 540void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
361 struct qlcnic_cmd_args *); 541 struct qlcnic_cmd_args *);
@@ -368,6 +548,7 @@ void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
368irqreturn_t qlcnic_83xx_handle_aen(int, void *); 548irqreturn_t qlcnic_83xx_handle_aen(int, void *);
369int qlcnic_83xx_get_port_info(struct qlcnic_adapter *); 549int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
370void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *); 550void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *);
551void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
371irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *); 552irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
372irqreturn_t qlcnic_83xx_intr(int, void *); 553irqreturn_t qlcnic_83xx_intr(int, void *);
373irqreturn_t qlcnic_83xx_tmp_intr(int, void *); 554irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
@@ -377,7 +558,7 @@ void qlcnic_83xx_disable_intr(struct qlcnic_adapter *,
377 struct qlcnic_host_sds_ring *); 558 struct qlcnic_host_sds_ring *);
378void qlcnic_83xx_check_vf(struct qlcnic_adapter *, 559void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
379 const struct pci_device_id *); 560 const struct pci_device_id *);
380void qlcnic_83xx_process_aen(struct qlcnic_adapter *); 561void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
381int qlcnic_83xx_get_port_config(struct qlcnic_adapter *); 562int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
382int qlcnic_83xx_set_port_config(struct qlcnic_adapter *); 563int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
383int qlcnic_enable_eswitch(struct qlcnic_adapter *, u8, u8); 564int qlcnic_enable_eswitch(struct qlcnic_adapter *, u8, u8);
@@ -401,7 +582,7 @@ int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
401int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int); 582int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
402int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *, 583int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
403 u32, u8 *, int); 584 u32, u8 *, int);
404int qlcnic_83xx_init(struct qlcnic_adapter *); 585int qlcnic_83xx_init(struct qlcnic_adapter *, int);
405int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *); 586int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
406int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev); 587int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
407void qlcnic_83xx_idc_poll_dev_state(struct work_struct *); 588void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
@@ -434,5 +615,12 @@ int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
434int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *); 615int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
435int qlcnic_83xx_loopback_test(struct net_device *, u8); 616int qlcnic_83xx_loopback_test(struct net_device *, u8);
436int qlcnic_83xx_interrupt_test(struct net_device *); 617int qlcnic_83xx_interrupt_test(struct net_device *);
618int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state);
437int qlcnic_83xx_flash_test(struct qlcnic_adapter *); 619int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
620int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
621int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
622u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
623u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *);
624void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
625void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
438#endif 626#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 5c033f268ca5..ab1d8d99cbd5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qlcnic for copyright and licensing details. 5 * See LICENSE.qlcnic for copyright and licensing details.
6 */ 6 */
7 7
8#include "qlcnic_sriov.h"
8#include "qlcnic.h" 9#include "qlcnic.h"
9#include "qlcnic_hw.h" 10#include "qlcnic_hw.h"
10 11
@@ -24,13 +25,24 @@
24#define QLC_83XX_OPCODE_TMPL_END 0x0080 25#define QLC_83XX_OPCODE_TMPL_END 0x0080
25#define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100 26#define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100
26 27
28/* EPORT control registers */
29#define QLC_83XX_RESET_CONTROL 0x28084E50
30#define QLC_83XX_RESET_REG 0x28084E60
31#define QLC_83XX_RESET_PORT0 0x28084E70
32#define QLC_83XX_RESET_PORT1 0x28084E80
33#define QLC_83XX_RESET_PORT2 0x28084E90
34#define QLC_83XX_RESET_PORT3 0x28084EA0
35#define QLC_83XX_RESET_SRESHIM 0x28084EB0
36#define QLC_83XX_RESET_EPGSHIM 0x28084EC0
37#define QLC_83XX_RESET_ETHERPCS 0x28084ED0
38
27static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter); 39static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter);
28static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
29static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev); 40static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev);
30static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter); 41static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
31 42
32/* Template header */ 43/* Template header */
33struct qlc_83xx_reset_hdr { 44struct qlc_83xx_reset_hdr {
45#if defined(__LITTLE_ENDIAN)
34 u16 version; 46 u16 version;
35 u16 signature; 47 u16 signature;
36 u16 size; 48 u16 size;
@@ -39,14 +51,31 @@ struct qlc_83xx_reset_hdr {
39 u16 checksum; 51 u16 checksum;
40 u16 init_offset; 52 u16 init_offset;
41 u16 start_offset; 53 u16 start_offset;
54#elif defined(__BIG_ENDIAN)
55 u16 signature;
56 u16 version;
57 u16 entries;
58 u16 size;
59 u16 checksum;
60 u16 hdr_size;
61 u16 start_offset;
62 u16 init_offset;
63#endif
42} __packed; 64} __packed;
43 65
44/* Command entry header. */ 66/* Command entry header. */
45struct qlc_83xx_entry_hdr { 67struct qlc_83xx_entry_hdr {
46 u16 cmd; 68#if defined(__LITTLE_ENDIAN)
47 u16 size; 69 u16 cmd;
48 u16 count; 70 u16 size;
49 u16 delay; 71 u16 count;
72 u16 delay;
73#elif defined(__BIG_ENDIAN)
74 u16 size;
75 u16 cmd;
76 u16 delay;
77 u16 count;
78#endif
50} __packed; 79} __packed;
51 80
52/* Generic poll command */ 81/* Generic poll command */
@@ -60,10 +89,17 @@ struct qlc_83xx_rmw {
60 u32 mask; 89 u32 mask;
61 u32 xor_value; 90 u32 xor_value;
62 u32 or_value; 91 u32 or_value;
92#if defined(__LITTLE_ENDIAN)
63 u8 shl; 93 u8 shl;
64 u8 shr; 94 u8 shr;
65 u8 index_a; 95 u8 index_a;
66 u8 rsvd; 96 u8 rsvd;
97#elif defined(__BIG_ENDIAN)
98 u8 rsvd;
99 u8 index_a;
100 u8 shr;
101 u8 shl;
102#endif
67} __packed; 103} __packed;
68 104
69/* Generic command with 2 DWORD */ 105/* Generic command with 2 DWORD */
@@ -90,18 +126,6 @@ static const char *const qlc_83xx_idc_states[] = {
90 "Quiesce" 126 "Quiesce"
91}; 127};
92 128
93/* Device States */
94enum qlcnic_83xx_states {
95 QLC_83XX_IDC_DEV_UNKNOWN,
96 QLC_83XX_IDC_DEV_COLD,
97 QLC_83XX_IDC_DEV_INIT,
98 QLC_83XX_IDC_DEV_READY,
99 QLC_83XX_IDC_DEV_NEED_RESET,
100 QLC_83XX_IDC_DEV_NEED_QUISCENT,
101 QLC_83XX_IDC_DEV_FAILED,
102 QLC_83XX_IDC_DEV_QUISCENT
103};
104
105static int 129static int
106qlcnic_83xx_idc_check_driver_presence_reg(struct qlcnic_adapter *adapter) 130qlcnic_83xx_idc_check_driver_presence_reg(struct qlcnic_adapter *adapter)
107{ 131{
@@ -137,7 +161,8 @@ static int qlcnic_83xx_idc_update_audit_reg(struct qlcnic_adapter *adapter,
137 return -EBUSY; 161 return -EBUSY;
138 } 162 }
139 163
140 val = adapter->portnum & 0xf; 164 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT);
165 val |= (adapter->portnum & 0xf);
141 val |= mode << 7; 166 val |= mode << 7;
142 if (mode) 167 if (mode)
143 seconds = jiffies / HZ - adapter->ahw->idc.sec_counter; 168 seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
@@ -376,14 +401,18 @@ static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter)
376 struct net_device *netdev = adapter->netdev; 401 struct net_device *netdev = adapter->netdev;
377 402
378 netif_device_detach(netdev); 403 netif_device_detach(netdev);
404
379 /* Disable mailbox interrupt */ 405 /* Disable mailbox interrupt */
380 QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, 0); 406 qlcnic_83xx_disable_mbx_intr(adapter);
381 qlcnic_down(adapter, netdev); 407 qlcnic_down(adapter, netdev);
382 for (i = 0; i < adapter->ahw->num_msix; i++) { 408 for (i = 0; i < adapter->ahw->num_msix; i++) {
383 adapter->ahw->intr_tbl[i].id = i; 409 adapter->ahw->intr_tbl[i].id = i;
384 adapter->ahw->intr_tbl[i].enabled = 0; 410 adapter->ahw->intr_tbl[i].enabled = 0;
385 adapter->ahw->intr_tbl[i].src = 0; 411 adapter->ahw->intr_tbl[i].src = 0;
386 } 412 }
413
414 if (qlcnic_sriov_pf_check(adapter))
415 qlcnic_sriov_pf_reset(adapter);
387} 416}
388 417
389/** 418/**
@@ -585,9 +614,15 @@ static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter)
585 614
586static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) 615static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
587{ 616{
617 int err;
618
588 /* register for NIC IDC AEN Events */ 619 /* register for NIC IDC AEN Events */
589 qlcnic_83xx_register_nic_idc_func(adapter, 1); 620 qlcnic_83xx_register_nic_idc_func(adapter, 1);
590 621
622 err = qlcnic_sriov_pf_reinit(adapter);
623 if (err)
624 return err;
625
591 qlcnic_83xx_enable_mbx_intrpt(adapter); 626 qlcnic_83xx_enable_mbx_intrpt(adapter);
592 627
593 if (qlcnic_83xx_configure_opmode(adapter)) { 628 if (qlcnic_83xx_configure_opmode(adapter)) {
@@ -1350,6 +1385,19 @@ static void qlcnic_83xx_disable_pause_frames(struct qlcnic_adapter *adapter)
1350 qlcnic_83xx_unlock_driver(adapter); 1385 qlcnic_83xx_unlock_driver(adapter);
1351} 1386}
1352 1387
1388static void qlcnic_83xx_take_eport_out_of_reset(struct qlcnic_adapter *adapter)
1389{
1390 QLCWR32(adapter, QLC_83XX_RESET_REG, 0);
1391 QLCWR32(adapter, QLC_83XX_RESET_PORT0, 0);
1392 QLCWR32(adapter, QLC_83XX_RESET_PORT1, 0);
1393 QLCWR32(adapter, QLC_83XX_RESET_PORT2, 0);
1394 QLCWR32(adapter, QLC_83XX_RESET_PORT3, 0);
1395 QLCWR32(adapter, QLC_83XX_RESET_SRESHIM, 0);
1396 QLCWR32(adapter, QLC_83XX_RESET_EPGSHIM, 0);
1397 QLCWR32(adapter, QLC_83XX_RESET_ETHERPCS, 0);
1398 QLCWR32(adapter, QLC_83XX_RESET_CONTROL, 1);
1399}
1400
1353static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev) 1401static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
1354{ 1402{
1355 u32 heartbeat, peg_status; 1403 u32 heartbeat, peg_status;
@@ -1371,6 +1419,7 @@ static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
1371 1419
1372 if (ret) { 1420 if (ret) {
1373 dev_err(&p_dev->pdev->dev, "firmware hang detected\n"); 1421 dev_err(&p_dev->pdev->dev, "firmware hang detected\n");
1422 qlcnic_83xx_take_eport_out_of_reset(p_dev);
1374 qlcnic_83xx_disable_pause_frames(p_dev); 1423 qlcnic_83xx_disable_pause_frames(p_dev);
1375 peg_status = QLC_SHARED_REG_RD32(p_dev, 1424 peg_status = QLC_SHARED_REG_RD32(p_dev,
1376 QLCNIC_PEG_HALT_STATUS1); 1425 QLCNIC_PEG_HALT_STATUS1);
@@ -1893,6 +1942,9 @@ int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
1893 qlcnic_get_func_no(adapter); 1942 qlcnic_get_func_no(adapter);
1894 op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE); 1943 op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
1895 1944
1945 if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
1946 op_mode = QLC_83XX_DEFAULT_OPMODE;
1947
1896 if (op_mode == QLC_83XX_DEFAULT_OPMODE) { 1948 if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
1897 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; 1949 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
1898 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; 1950 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
@@ -1922,6 +1974,16 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
1922 ahw->max_mac_filters = nic_info.max_mac_filters; 1974 ahw->max_mac_filters = nic_info.max_mac_filters;
1923 ahw->max_mtu = nic_info.max_mtu; 1975 ahw->max_mtu = nic_info.max_mtu;
1924 1976
1977 /* VNIC mode is detected by BIT_23 in capabilities. This bit is also
1978 * set in case device is SRIOV capable. VNIC and SRIOV are mutually
1979 * exclusive. So in case of sriov capable device load driver in
1980 * default mode
1981 */
1982 if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) {
1983 ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
1984 return ahw->nic_mode;
1985 }
1986
1925 if (ahw->capabilities & BIT_23) 1987 if (ahw->capabilities & BIT_23)
1926 ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE; 1988 ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
1927 else 1989 else
@@ -1930,7 +1992,7 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
1930 return ahw->nic_mode; 1992 return ahw->nic_mode;
1931} 1993}
1932 1994
1933static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) 1995int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
1934{ 1996{
1935 int ret; 1997 int ret;
1936 1998
@@ -2008,10 +2070,13 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
2008 } 2070 }
2009} 2071}
2010 2072
2011int qlcnic_83xx_init(struct qlcnic_adapter *adapter) 2073int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2012{ 2074{
2013 struct qlcnic_hardware_context *ahw = adapter->ahw; 2075 struct qlcnic_hardware_context *ahw = adapter->ahw;
2014 2076
2077 if (qlcnic_sriov_vf_check(adapter))
2078 return qlcnic_sriov_vf_init(adapter, pci_using_dac);
2079
2015 if (qlcnic_83xx_check_hw_status(adapter)) 2080 if (qlcnic_83xx_check_hw_status(adapter))
2016 return -EIO; 2081 return -EIO;
2017 2082
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index a69097c6b84d..43562c256379 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -382,8 +382,7 @@ out_free_rq:
382 return err; 382 return err;
383} 383}
384 384
385static void 385void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
386qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
387{ 386{
388 int err; 387 int err;
389 struct qlcnic_cmd_args cmd; 388 struct qlcnic_cmd_args cmd;
@@ -422,22 +421,20 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
422 421
423 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 422 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
424 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 423 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
425 &rq_phys_addr, GFP_KERNEL); 424 &rq_phys_addr, GFP_KERNEL | __GFP_ZERO);
426 if (!rq_addr) 425 if (!rq_addr)
427 return -ENOMEM; 426 return -ENOMEM;
428 427
429 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 428 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
430 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 429 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
431 &rsp_phys_addr, GFP_KERNEL); 430 &rsp_phys_addr, GFP_KERNEL | __GFP_ZERO);
432 if (!rsp_addr) { 431 if (!rsp_addr) {
433 err = -ENOMEM; 432 err = -ENOMEM;
434 goto out_free_rq; 433 goto out_free_rq;
435 } 434 }
436 435
437 memset(rq_addr, 0, rq_size);
438 prq = rq_addr; 436 prq = rq_addr;
439 437
440 memset(rsp_addr, 0, rsp_size);
441 prsp = rsp_addr; 438 prsp = rsp_addr;
442 439
443 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); 440 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
@@ -486,13 +483,13 @@ out_free_rq:
486 return err; 483 return err;
487} 484}
488 485
489static void 486void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
490qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter, 487 struct qlcnic_host_tx_ring *tx_ring)
491 struct qlcnic_host_tx_ring *tx_ring)
492{ 488{
493 struct qlcnic_cmd_args cmd; 489 struct qlcnic_cmd_args cmd;
494 490
495 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX); 491 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
492
496 cmd.req.arg[1] = tx_ring->ctx_id; 493 cmd.req.arg[1] = tx_ring->ctx_id;
497 if (qlcnic_issue_cmd(adapter, &cmd)) 494 if (qlcnic_issue_cmd(adapter, &cmd))
498 dev_err(&adapter->pdev->dev, 495 dev_err(&adapter->pdev->dev,
@@ -532,20 +529,15 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
532 ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32), 529 ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
533 &tx_ring->hw_cons_phys_addr, 530 &tx_ring->hw_cons_phys_addr,
534 GFP_KERNEL); 531 GFP_KERNEL);
535 532 if (ptr == NULL)
536 if (ptr == NULL) {
537 dev_err(&pdev->dev, "failed to allocate tx consumer\n");
538 return -ENOMEM; 533 return -ENOMEM;
539 } 534
540 tx_ring->hw_consumer = ptr; 535 tx_ring->hw_consumer = ptr;
541 /* cmd desc ring */ 536 /* cmd desc ring */
542 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), 537 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
543 &tx_ring->phys_addr, 538 &tx_ring->phys_addr,
544 GFP_KERNEL); 539 GFP_KERNEL);
545
546 if (addr == NULL) { 540 if (addr == NULL) {
547 dev_err(&pdev->dev,
548 "failed to allocate tx desc ring\n");
549 err = -ENOMEM; 541 err = -ENOMEM;
550 goto err_out_free; 542 goto err_out_free;
551 } 543 }
@@ -556,11 +548,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
556 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 548 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
557 rds_ring = &recv_ctx->rds_rings[ring]; 549 rds_ring = &recv_ctx->rds_rings[ring];
558 addr = dma_alloc_coherent(&adapter->pdev->dev, 550 addr = dma_alloc_coherent(&adapter->pdev->dev,
559 RCV_DESC_RINGSIZE(rds_ring), 551 RCV_DESC_RINGSIZE(rds_ring),
560 &rds_ring->phys_addr, GFP_KERNEL); 552 &rds_ring->phys_addr, GFP_KERNEL);
561 if (addr == NULL) { 553 if (addr == NULL) {
562 dev_err(&pdev->dev,
563 "failed to allocate rds ring [%d]\n", ring);
564 err = -ENOMEM; 554 err = -ENOMEM;
565 goto err_out_free; 555 goto err_out_free;
566 } 556 }
@@ -572,11 +562,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
572 sds_ring = &recv_ctx->sds_rings[ring]; 562 sds_ring = &recv_ctx->sds_rings[ring];
573 563
574 addr = dma_alloc_coherent(&adapter->pdev->dev, 564 addr = dma_alloc_coherent(&adapter->pdev->dev,
575 STATUS_DESC_RINGSIZE(sds_ring), 565 STATUS_DESC_RINGSIZE(sds_ring),
576 &sds_ring->phys_addr, GFP_KERNEL); 566 &sds_ring->phys_addr, GFP_KERNEL);
577 if (addr == NULL) { 567 if (addr == NULL) {
578 dev_err(&pdev->dev,
579 "failed to allocate sds ring [%d]\n", ring);
580 err = -ENOMEM; 568 err = -ENOMEM;
581 goto err_out_free; 569 goto err_out_free;
582 } 570 }
@@ -616,13 +604,12 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
616 &dev->tx_ring[ring], 604 &dev->tx_ring[ring],
617 ring); 605 ring);
618 if (err) { 606 if (err) {
619 qlcnic_fw_cmd_destroy_rx_ctx(dev); 607 qlcnic_fw_cmd_del_rx_ctx(dev);
620 if (ring == 0) 608 if (ring == 0)
621 goto err_out; 609 goto err_out;
622 610
623 for (i = 0; i < ring; i++) 611 for (i = 0; i < ring; i++)
624 qlcnic_fw_cmd_destroy_tx_ctx(dev, 612 qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]);
625 &dev->tx_ring[i]);
626 613
627 goto err_out; 614 goto err_out;
628 } 615 }
@@ -644,10 +631,10 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
644 int ring; 631 int ring;
645 632
646 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { 633 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
647 qlcnic_fw_cmd_destroy_rx_ctx(adapter); 634 qlcnic_fw_cmd_del_rx_ctx(adapter);
648 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) 635 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
649 qlcnic_fw_cmd_destroy_tx_ctx(adapter, 636 qlcnic_fw_cmd_del_tx_ctx(adapter,
650 &adapter->tx_ring[ring]); 637 &adapter->tx_ring[ring]);
651 638
652 if (qlcnic_83xx_check(adapter) && 639 if (qlcnic_83xx_check(adapter) &&
653 (adapter->flags & QLCNIC_MSIX_ENABLED)) { 640 (adapter->flags & QLCNIC_MSIX_ENABLED)) {
@@ -655,7 +642,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
655 qlcnic_83xx_config_intrpt(adapter, 0); 642 qlcnic_83xx_config_intrpt(adapter, 0);
656 } 643 }
657 /* Allow dma queues to drain after context reset */ 644 /* Allow dma queues to drain after context reset */
658 mdelay(20); 645 msleep(20);
659 } 646 }
660} 647}
661 648
@@ -753,10 +740,9 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
753 size_t nic_size = sizeof(struct qlcnic_info_le); 740 size_t nic_size = sizeof(struct qlcnic_info_le);
754 741
755 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 742 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
756 &nic_dma_t, GFP_KERNEL); 743 &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
757 if (!nic_info_addr) 744 if (!nic_info_addr)
758 return -ENOMEM; 745 return -ENOMEM;
759 memset(nic_info_addr, 0, nic_size);
760 746
761 nic_info = nic_info_addr; 747 nic_info = nic_info_addr;
762 748
@@ -804,11 +790,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
804 return err; 790 return err;
805 791
806 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 792 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
807 &nic_dma_t, GFP_KERNEL); 793 &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
808 if (!nic_info_addr) 794 if (!nic_info_addr)
809 return -ENOMEM; 795 return -ENOMEM;
810 796
811 memset(nic_info_addr, 0, nic_size);
812 nic_info = nic_info_addr; 797 nic_info = nic_info_addr;
813 798
814 nic_info->pci_func = cpu_to_le16(nic->pci_func); 799 nic_info->pci_func = cpu_to_le16(nic->pci_func);
@@ -854,10 +839,10 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
854 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; 839 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
855 840
856 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, 841 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
857 &pci_info_dma_t, GFP_KERNEL); 842 &pci_info_dma_t,
843 GFP_KERNEL | __GFP_ZERO);
858 if (!pci_info_addr) 844 if (!pci_info_addr)
859 return -ENOMEM; 845 return -ENOMEM;
860 memset(pci_info_addr, 0, pci_size);
861 846
862 npar = pci_info_addr; 847 npar = pci_info_addr;
863 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); 848 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
@@ -949,12 +934,9 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
949 } 934 }
950 935
951 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 936 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
952 &stats_dma_t, GFP_KERNEL); 937 &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
953 if (!stats_addr) { 938 if (!stats_addr)
954 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
955 return -ENOMEM; 939 return -ENOMEM;
956 }
957 memset(stats_addr, 0, stats_size);
958 940
959 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; 941 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
960 arg1 |= rx_tx << 15 | stats_size << 16; 942 arg1 |= rx_tx << 15 | stats_size << 16;
@@ -1003,13 +985,10 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
1003 return -ENOMEM; 985 return -ENOMEM;
1004 986
1005 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 987 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
1006 &stats_dma_t, GFP_KERNEL); 988 &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
1007 if (!stats_addr) { 989 if (!stats_addr)
1008 dev_err(&adapter->pdev->dev,
1009 "%s: Unable to allocate memory.\n", __func__);
1010 return -ENOMEM; 990 return -ENOMEM;
1011 } 991
1012 memset(stats_addr, 0, stats_size);
1013 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); 992 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
1014 cmd.req.arg[1] = stats_size << 16; 993 cmd.req.arg[1] = stats_size << 16;
1015 cmd.req.arg[2] = MSD(stats_dma_t); 994 cmd.req.arg[2] = MSD(stats_dma_t);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 5641f8ec49ab..08efb4635007 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -115,6 +115,13 @@ static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
115 "mac_rx_dropped", 115 "mac_rx_dropped",
116 "mac_crc_error", 116 "mac_crc_error",
117 "mac_align_error", 117 "mac_align_error",
118 "eswitch_frames",
119 "eswitch_bytes",
120 "eswitch_multicast_frames",
121 "eswitch_broadcast_frames",
122 "eswitch_unicast_frames",
123 "eswitch_error_free_frames",
124 "eswitch_error_free_bytes",
118}; 125};
119 126
120#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) 127#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
@@ -149,7 +156,8 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
149 156
150static inline int qlcnic_82xx_statistics(void) 157static inline int qlcnic_82xx_statistics(void)
151{ 158{
152 return QLCNIC_STATS_LEN + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); 159 return ARRAY_SIZE(qlcnic_device_gstrings_stats) +
160 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
153} 161}
154 162
155static inline int qlcnic_83xx_statistics(void) 163static inline int qlcnic_83xx_statistics(void)
@@ -634,7 +642,7 @@ static int qlcnic_set_channels(struct net_device *dev,
634 channel->tx_count != channel->max_tx) 642 channel->tx_count != channel->max_tx)
635 return -EINVAL; 643 return -EINVAL;
636 644
637 err = qlcnic_validate_max_rss(channel->max_rx, channel->rx_count); 645 err = qlcnic_validate_max_rss(adapter, channel->rx_count);
638 if (err) 646 if (err)
639 return err; 647 return err;
640 648
@@ -858,9 +866,11 @@ clear_diag_irq:
858 return ret; 866 return ret;
859} 867}
860 868
861#define QLCNIC_ILB_PKT_SIZE 64 869#define QLCNIC_ILB_PKT_SIZE 64
862#define QLCNIC_NUM_ILB_PKT 16 870#define QLCNIC_NUM_ILB_PKT 16
863#define QLCNIC_ILB_MAX_RCV_LOOP 10 871#define QLCNIC_ILB_MAX_RCV_LOOP 10
872#define QLCNIC_LB_PKT_POLL_DELAY_MSEC 1
873#define QLCNIC_LB_PKT_POLL_COUNT 20
864 874
865static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[]) 875static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[])
866{ 876{
@@ -897,9 +907,9 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
897 loop = 0; 907 loop = 0;
898 908
899 do { 909 do {
900 msleep(1); 910 msleep(QLCNIC_LB_PKT_POLL_DELAY_MSEC);
901 qlcnic_process_rcv_ring_diag(sds_ring); 911 qlcnic_process_rcv_ring_diag(sds_ring);
902 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) 912 if (loop++ > QLCNIC_LB_PKT_POLL_COUNT)
903 break; 913 break;
904 } while (!adapter->ahw->diag_cnt); 914 } while (!adapter->ahw->diag_cnt);
905 915
@@ -1070,8 +1080,7 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1070 } 1080 }
1071} 1081}
1072 1082
1073static void 1083static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
1074qlcnic_fill_stats(u64 *data, void *stats, int type)
1075{ 1084{
1076 if (type == QLCNIC_MAC_STATS) { 1085 if (type == QLCNIC_MAC_STATS) {
1077 struct qlcnic_mac_statistics *mac_stats = 1086 struct qlcnic_mac_statistics *mac_stats =
@@ -1120,6 +1129,7 @@ qlcnic_fill_stats(u64 *data, void *stats, int type)
1120 *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames); 1129 *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames);
1121 *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes); 1130 *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes);
1122 } 1131 }
1132 return data;
1123} 1133}
1124 1134
1125static void qlcnic_get_ethtool_stats(struct net_device *dev, 1135static void qlcnic_get_ethtool_stats(struct net_device *dev,
@@ -1147,7 +1157,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
1147 /* Retrieve MAC statistics from firmware */ 1157 /* Retrieve MAC statistics from firmware */
1148 memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics)); 1158 memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
1149 qlcnic_get_mac_stats(adapter, &mac_stats); 1159 qlcnic_get_mac_stats(adapter, &mac_stats);
1150 qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS); 1160 data = qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
1151 } 1161 }
1152 1162
1153 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 1163 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
@@ -1159,7 +1169,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
1159 if (ret) 1169 if (ret)
1160 return; 1170 return;
1161 1171
1162 qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS); 1172 data = qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
1163 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, 1173 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
1164 QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); 1174 QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
1165 if (ret) 1175 if (ret)
@@ -1176,7 +1186,8 @@ static int qlcnic_set_led(struct net_device *dev,
1176 int err = -EIO, active = 1; 1186 int err = -EIO, active = 1;
1177 1187
1178 if (qlcnic_83xx_check(adapter)) 1188 if (qlcnic_83xx_check(adapter))
1179 return -EOPNOTSUPP; 1189 return qlcnic_83xx_set_led(dev, state);
1190
1180 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { 1191 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
1181 netdev_warn(dev, "LED test not supported for non " 1192 netdev_warn(dev, "LED test not supported for non "
1182 "privilege function\n"); 1193 "privilege function\n");
@@ -1292,6 +1303,9 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
1292 struct ethtool_coalesce *ethcoal) 1303 struct ethtool_coalesce *ethcoal)
1293{ 1304{
1294 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1305 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1306 struct qlcnic_nic_intr_coalesce *coal;
1307 u32 rx_coalesce_usecs, rx_max_frames;
1308 u32 tx_coalesce_usecs, tx_max_frames;
1295 1309
1296 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) 1310 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
1297 return -EINVAL; 1311 return -EINVAL;
@@ -1302,8 +1316,8 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
1302 */ 1316 */
1303 if (ethcoal->rx_coalesce_usecs > 0xffff || 1317 if (ethcoal->rx_coalesce_usecs > 0xffff ||
1304 ethcoal->rx_max_coalesced_frames > 0xffff || 1318 ethcoal->rx_max_coalesced_frames > 0xffff ||
1305 ethcoal->tx_coalesce_usecs || 1319 ethcoal->tx_coalesce_usecs > 0xffff ||
1306 ethcoal->tx_max_coalesced_frames || 1320 ethcoal->tx_max_coalesced_frames > 0xffff ||
1307 ethcoal->rx_coalesce_usecs_irq || 1321 ethcoal->rx_coalesce_usecs_irq ||
1308 ethcoal->rx_max_coalesced_frames_irq || 1322 ethcoal->rx_max_coalesced_frames_irq ||
1309 ethcoal->tx_coalesce_usecs_irq || 1323 ethcoal->tx_coalesce_usecs_irq ||
@@ -1323,18 +1337,55 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
1323 ethcoal->tx_max_coalesced_frames_high) 1337 ethcoal->tx_max_coalesced_frames_high)
1324 return -EINVAL; 1338 return -EINVAL;
1325 1339
1326 if (!ethcoal->rx_coalesce_usecs || 1340 coal = &adapter->ahw->coal;
1327 !ethcoal->rx_max_coalesced_frames) { 1341
1328 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT; 1342 if (qlcnic_83xx_check(adapter)) {
1329 adapter->ahw->coal.rx_time_us = 1343 if (!ethcoal->tx_coalesce_usecs ||
1330 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US; 1344 !ethcoal->tx_max_coalesced_frames ||
1331 adapter->ahw->coal.rx_packets = 1345 !ethcoal->rx_coalesce_usecs ||
1332 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS; 1346 !ethcoal->rx_max_coalesced_frames) {
1347 coal->flag = QLCNIC_INTR_DEFAULT;
1348 coal->type = QLCNIC_INTR_COAL_TYPE_RX;
1349 coal->rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
1350 coal->rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
1351 coal->tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
1352 coal->tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
1353 } else {
1354 tx_coalesce_usecs = ethcoal->tx_coalesce_usecs;
1355 tx_max_frames = ethcoal->tx_max_coalesced_frames;
1356 rx_coalesce_usecs = ethcoal->rx_coalesce_usecs;
1357 rx_max_frames = ethcoal->rx_max_coalesced_frames;
1358 coal->flag = 0;
1359
1360 if ((coal->rx_time_us == rx_coalesce_usecs) &&
1361 (coal->rx_packets == rx_max_frames)) {
1362 coal->type = QLCNIC_INTR_COAL_TYPE_TX;
1363 coal->tx_time_us = tx_coalesce_usecs;
1364 coal->tx_packets = tx_max_frames;
1365 } else if ((coal->tx_time_us == tx_coalesce_usecs) &&
1366 (coal->tx_packets == tx_max_frames)) {
1367 coal->type = QLCNIC_INTR_COAL_TYPE_RX;
1368 coal->rx_time_us = rx_coalesce_usecs;
1369 coal->rx_packets = rx_max_frames;
1370 } else {
1371 coal->type = QLCNIC_INTR_COAL_TYPE_RX;
1372 coal->rx_time_us = rx_coalesce_usecs;
1373 coal->rx_packets = rx_max_frames;
1374 coal->tx_time_us = tx_coalesce_usecs;
1375 coal->tx_packets = tx_max_frames;
1376 }
1377 }
1333 } else { 1378 } else {
1334 adapter->ahw->coal.flag = 0; 1379 if (!ethcoal->rx_coalesce_usecs ||
1335 adapter->ahw->coal.rx_time_us = ethcoal->rx_coalesce_usecs; 1380 !ethcoal->rx_max_coalesced_frames) {
1336 adapter->ahw->coal.rx_packets = 1381 coal->flag = QLCNIC_INTR_DEFAULT;
1337 ethcoal->rx_max_coalesced_frames; 1382 coal->rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
1383 coal->rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
1384 } else {
1385 coal->flag = 0;
1386 coal->rx_time_us = ethcoal->rx_coalesce_usecs;
1387 coal->rx_packets = ethcoal->rx_max_coalesced_frames;
1388 }
1338 } 1389 }
1339 1390
1340 qlcnic_config_intr_coalesce(adapter); 1391 qlcnic_config_intr_coalesce(adapter);
@@ -1352,6 +1403,8 @@ static int qlcnic_get_intr_coalesce(struct net_device *netdev,
1352 1403
1353 ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us; 1404 ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us;
1354 ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets; 1405 ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets;
1406 ethcoal->tx_coalesce_usecs = adapter->ahw->coal.tx_time_us;
1407 ethcoal->tx_max_coalesced_frames = adapter->ahw->coal.tx_packets;
1355 1408
1356 return 0; 1409 return 0;
1357} 1410}
@@ -1537,3 +1590,25 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
1537 .get_dump_data = qlcnic_get_dump_data, 1590 .get_dump_data = qlcnic_get_dump_data,
1538 .set_dump = qlcnic_set_dump, 1591 .set_dump = qlcnic_set_dump,
1539}; 1592};
1593
1594const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
1595 .get_settings = qlcnic_get_settings,
1596 .get_drvinfo = qlcnic_get_drvinfo,
1597 .get_regs_len = qlcnic_get_regs_len,
1598 .get_regs = qlcnic_get_regs,
1599 .get_link = ethtool_op_get_link,
1600 .get_eeprom_len = qlcnic_get_eeprom_len,
1601 .get_eeprom = qlcnic_get_eeprom,
1602 .get_ringparam = qlcnic_get_ringparam,
1603 .set_ringparam = qlcnic_set_ringparam,
1604 .get_channels = qlcnic_get_channels,
1605 .get_pauseparam = qlcnic_get_pauseparam,
1606 .get_wol = qlcnic_get_wol,
1607 .get_strings = qlcnic_get_strings,
1608 .get_ethtool_stats = qlcnic_get_ethtool_stats,
1609 .get_sset_count = qlcnic_get_sset_count,
1610 .get_coalesce = qlcnic_get_intr_coalesce,
1611 .set_coalesce = qlcnic_set_intr_coalesce,
1612 .set_msglevel = qlcnic_set_msglevel,
1613 .get_msglevel = qlcnic_get_msglevel,
1614};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 44197ca1456c..c0f0c0d0a790 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -669,7 +669,7 @@ enum {
669#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60 669#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60
670#define QLCNIC_CMDPEG_CHECK_DELAY 500 670#define QLCNIC_CMDPEG_CHECK_DELAY 500
671#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200 671#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200
672#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45 672#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 10
673 673
674#define QLCNIC_MAX_MC_COUNT 38 674#define QLCNIC_MAX_MC_COUNT 38
675#define QLCNIC_WATCHDOG_TIMEOUTVALUE 5 675#define QLCNIC_WATCHDOG_TIMEOUTVALUE 5
@@ -714,7 +714,9 @@ enum {
714 QLCNIC_MGMT_FUNC = 0, 714 QLCNIC_MGMT_FUNC = 0,
715 QLCNIC_PRIV_FUNC = 1, 715 QLCNIC_PRIV_FUNC = 1,
716 QLCNIC_NON_PRIV_FUNC = 2, 716 QLCNIC_NON_PRIV_FUNC = 2,
717 QLCNIC_UNKNOWN_FUNC_MODE = 3 717 QLCNIC_SRIOV_PF_FUNC = 3,
718 QLCNIC_SRIOV_VF_FUNC = 4,
719 QLCNIC_UNKNOWN_FUNC_MODE = 5
718}; 720};
719 721
720enum { 722enum {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index f89cc7a3fe6c..6a6512ba9f38 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -423,7 +423,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
423} 423}
424 424
425int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, 425int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
426 __le16 vlan_id, u8 op) 426 u16 vlan_id, u8 op)
427{ 427{
428 struct qlcnic_nic_req req; 428 struct qlcnic_nic_req req;
429 struct qlcnic_mac_req *mac_req; 429 struct qlcnic_mac_req *mac_req;
@@ -441,7 +441,7 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
441 memcpy(mac_req->mac_addr, addr, 6); 441 memcpy(mac_req->mac_addr, addr, 6);
442 442
443 vlan_req = (struct qlcnic_vlan_req *)&req.words[1]; 443 vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
444 vlan_req->vlan_id = vlan_id; 444 vlan_req->vlan_id = cpu_to_le16(vlan_id);
445 445
446 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); 446 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
447} 447}
@@ -468,7 +468,7 @@ int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
468 return err; 468 return err;
469} 469}
470 470
471int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr) 471int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
472{ 472{
473 struct list_head *head; 473 struct list_head *head;
474 struct qlcnic_mac_list_s *cur; 474 struct qlcnic_mac_list_s *cur;
@@ -487,7 +487,7 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
487 memcpy(cur->mac_addr, addr, ETH_ALEN); 487 memcpy(cur->mac_addr, addr, ETH_ALEN);
488 488
489 if (qlcnic_sre_macaddr_change(adapter, 489 if (qlcnic_sre_macaddr_change(adapter,
490 cur->mac_addr, 0, QLCNIC_MAC_ADD)) { 490 cur->mac_addr, vlan, QLCNIC_MAC_ADD)) {
491 kfree(cur); 491 kfree(cur);
492 return -EIO; 492 return -EIO;
493 } 493 }
@@ -496,7 +496,7 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
496 return 0; 496 return 0;
497} 497}
498 498
499void qlcnic_set_multi(struct net_device *netdev) 499void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
500{ 500{
501 struct qlcnic_adapter *adapter = netdev_priv(netdev); 501 struct qlcnic_adapter *adapter = netdev_priv(netdev);
502 struct netdev_hw_addr *ha; 502 struct netdev_hw_addr *ha;
@@ -508,8 +508,9 @@ void qlcnic_set_multi(struct net_device *netdev)
508 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) 508 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
509 return; 509 return;
510 510
511 qlcnic_nic_add_mac(adapter, adapter->mac_addr); 511 if (!qlcnic_sriov_vf_check(adapter))
512 qlcnic_nic_add_mac(adapter, bcast_addr); 512 qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan);
513 qlcnic_nic_add_mac(adapter, bcast_addr, vlan);
513 514
514 if (netdev->flags & IFF_PROMISC) { 515 if (netdev->flags & IFF_PROMISC) {
515 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) 516 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
@@ -523,23 +524,55 @@ void qlcnic_set_multi(struct net_device *netdev)
523 goto send_fw_cmd; 524 goto send_fw_cmd;
524 } 525 }
525 526
526 if (!netdev_mc_empty(netdev)) { 527 if (!netdev_mc_empty(netdev) && !qlcnic_sriov_vf_check(adapter)) {
527 netdev_for_each_mc_addr(ha, netdev) { 528 netdev_for_each_mc_addr(ha, netdev) {
528 qlcnic_nic_add_mac(adapter, ha->addr); 529 qlcnic_nic_add_mac(adapter, ha->addr, vlan);
529 } 530 }
530 } 531 }
531 532
533 if (qlcnic_sriov_vf_check(adapter))
534 qlcnic_vf_add_mc_list(netdev, vlan);
535
532send_fw_cmd: 536send_fw_cmd:
533 if (mode == VPORT_MISS_MODE_ACCEPT_ALL && !adapter->fdb_mac_learn) { 537 if (!qlcnic_sriov_vf_check(adapter)) {
534 qlcnic_alloc_lb_filters_mem(adapter); 538 if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
535 adapter->drv_mac_learn = true; 539 !adapter->fdb_mac_learn) {
536 } else { 540 qlcnic_alloc_lb_filters_mem(adapter);
537 adapter->drv_mac_learn = false; 541 adapter->drv_mac_learn = true;
542 } else {
543 adapter->drv_mac_learn = false;
544 }
538 } 545 }
539 546
540 qlcnic_nic_set_promisc(adapter, mode); 547 qlcnic_nic_set_promisc(adapter, mode);
541} 548}
542 549
550void qlcnic_set_multi(struct net_device *netdev)
551{
552 struct qlcnic_adapter *adapter = netdev_priv(netdev);
553 struct netdev_hw_addr *ha;
554 struct qlcnic_mac_list_s *cur;
555
556 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
557 return;
558 if (qlcnic_sriov_vf_check(adapter)) {
559 if (!netdev_mc_empty(netdev)) {
560 netdev_for_each_mc_addr(ha, netdev) {
561 cur = kzalloc(sizeof(struct qlcnic_mac_list_s),
562 GFP_ATOMIC);
563 if (cur == NULL)
564 break;
565 memcpy(cur->mac_addr,
566 ha->addr, ETH_ALEN);
567 list_add_tail(&cur->list, &adapter->vf_mc_list);
568 }
569 }
570 qlcnic_sriov_vf_schedule_multi(adapter->netdev);
571 return;
572 }
573 __qlcnic_set_multi(netdev, 0);
574}
575
543int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) 576int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
544{ 577{
545 struct qlcnic_nic_req req; 578 struct qlcnic_nic_req req;
@@ -559,7 +592,7 @@ int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
559 (struct cmd_desc_type0 *)&req, 1); 592 (struct cmd_desc_type0 *)&req, 1);
560} 593}
561 594
562void qlcnic_free_mac_list(struct qlcnic_adapter *adapter) 595void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter)
563{ 596{
564 struct qlcnic_mac_list_s *cur; 597 struct qlcnic_mac_list_s *cur;
565 struct list_head *head = &adapter->mac_list; 598 struct list_head *head = &adapter->mac_list;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 5b8749eda11f..95b1b5732838 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -83,6 +83,8 @@ enum qlcnic_regs {
83#define QLCNIC_CMD_CONFIG_PORT 0x2e 83#define QLCNIC_CMD_CONFIG_PORT 0x2e
84#define QLCNIC_CMD_TEMP_SIZE 0x2f 84#define QLCNIC_CMD_TEMP_SIZE 0x2f
85#define QLCNIC_CMD_GET_TEMP_HDR 0x30 85#define QLCNIC_CMD_GET_TEMP_HDR 0x30
86#define QLCNIC_CMD_BC_EVENT_SETUP 0x31
87#define QLCNIC_CMD_CONFIG_VPORT 0x32
86#define QLCNIC_CMD_GET_MAC_STATS 0x37 88#define QLCNIC_CMD_GET_MAC_STATS 0x37
87#define QLCNIC_CMD_SET_DRV_VER 0x38 89#define QLCNIC_CMD_SET_DRV_VER 0x38
88#define QLCNIC_CMD_CONFIGURE_RSS 0x41 90#define QLCNIC_CMD_CONFIGURE_RSS 0x41
@@ -114,6 +116,7 @@ enum qlcnic_regs {
114#define QLCNIC_SET_FAC_DEF_MAC 5 116#define QLCNIC_SET_FAC_DEF_MAC 5
115 117
116#define QLCNIC_MBX_LINK_EVENT 0x8001 118#define QLCNIC_MBX_LINK_EVENT 0x8001
119#define QLCNIC_MBX_BC_EVENT 0x8002
117#define QLCNIC_MBX_COMP_EVENT 0x8100 120#define QLCNIC_MBX_COMP_EVENT 0x8100
118#define QLCNIC_MBX_REQUEST_EVENT 0x8101 121#define QLCNIC_MBX_REQUEST_EVENT 0x8101
119#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102 122#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102
@@ -156,7 +159,7 @@ int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
156int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, 159int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
157 struct net_device *netdev); 160 struct net_device *netdev);
158void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, 161void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
159 u64 *uaddr, __le16 vlan_id); 162 u64 *uaddr, u16 vlan_id);
160void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter); 163void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter);
161int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int); 164int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int);
162void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter, 165void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
@@ -175,7 +178,10 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
175int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *); 178int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
176int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *, 179int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
177 struct qlcnic_host_tx_ring *tx_ring, int); 180 struct qlcnic_host_tx_ring *tx_ring, int);
178int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8); 181void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *);
182void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *,
183 struct qlcnic_host_tx_ring *);
184int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
179int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*); 185int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*);
180int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); 186int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
181int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); 187int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 5fa847fe388a..d3f8797efcc3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -9,6 +9,7 @@
9#include <linux/if_vlan.h> 9#include <linux/if_vlan.h>
10#include <net/ip.h> 10#include <net/ip.h>
11#include <linux/ipv6.h> 11#include <linux/ipv6.h>
12#include <net/checksum.h>
12 13
13#include "qlcnic.h" 14#include "qlcnic.h"
14 15
@@ -146,7 +147,10 @@ static inline u8 qlcnic_mac_hash(u64 mac)
146static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter, 147static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
147 u16 handle, u8 ring_id) 148 u16 handle, u8 ring_id)
148{ 149{
149 if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X) 150 unsigned short device = adapter->pdev->device;
151
152 if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
153 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X))
150 return handle | (ring_id << 15); 154 return handle | (ring_id << 15);
151 else 155 else
152 return handle; 156 return handle;
@@ -158,7 +162,7 @@ static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
158} 162}
159 163
160void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb, 164void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
161 int loopback_pkt, __le16 vlan_id) 165 int loopback_pkt, u16 vlan_id)
162{ 166{
163 struct ethhdr *phdr = (struct ethhdr *)(skb->data); 167 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
164 struct qlcnic_filter *fil, *tmp_fil; 168 struct qlcnic_filter *fil, *tmp_fil;
@@ -236,7 +240,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
236} 240}
237 241
238void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr, 242void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
239 __le16 vlan_id) 243 u16 vlan_id)
240{ 244{
241 struct cmd_desc_type0 *hwdesc; 245 struct cmd_desc_type0 *hwdesc;
242 struct qlcnic_nic_req *req; 246 struct qlcnic_nic_req *req;
@@ -261,7 +265,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
261 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN); 265 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
262 266
263 vlan_req = (struct qlcnic_vlan_req *)&req->words[1]; 267 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
264 vlan_req->vlan_id = vlan_id; 268 vlan_req->vlan_id = cpu_to_le16(vlan_id);
265 269
266 tx_ring->producer = get_next_index(producer, tx_ring->num_desc); 270 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
267 smp_mb(); 271 smp_mb();
@@ -277,7 +281,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
277 struct net_device *netdev = adapter->netdev; 281 struct net_device *netdev = adapter->netdev;
278 struct ethhdr *phdr = (struct ethhdr *)(skb->data); 282 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
279 u64 src_addr = 0; 283 u64 src_addr = 0;
280 __le16 vlan_id = 0; 284 u16 vlan_id = 0;
281 u8 hindex; 285 u8 hindex;
282 286
283 if (ether_addr_equal(phdr->h_source, adapter->mac_addr)) 287 if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
@@ -340,14 +344,14 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
340 flags = FLAGS_VLAN_OOB; 344 flags = FLAGS_VLAN_OOB;
341 vlan_tci = vlan_tx_tag_get(skb); 345 vlan_tci = vlan_tx_tag_get(skb);
342 } 346 }
343 if (unlikely(adapter->pvid)) { 347 if (unlikely(adapter->tx_pvid)) {
344 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) 348 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
345 return -EIO; 349 return -EIO;
346 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) 350 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
347 goto set_flags; 351 goto set_flags;
348 352
349 flags = FLAGS_VLAN_OOB; 353 flags = FLAGS_VLAN_OOB;
350 vlan_tci = adapter->pvid; 354 vlan_tci = adapter->tx_pvid;
351 } 355 }
352set_flags: 356set_flags:
353 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci); 357 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
@@ -975,10 +979,10 @@ static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
975 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); 979 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
976 skb_pull(skb, VLAN_HLEN); 980 skb_pull(skb, VLAN_HLEN);
977 } 981 }
978 if (!adapter->pvid) 982 if (!adapter->rx_pvid)
979 return 0; 983 return 0;
980 984
981 if (*vlan_tag == adapter->pvid) { 985 if (*vlan_tag == adapter->rx_pvid) {
982 /* Outer vlan tag. Packet should follow non-vlan path */ 986 /* Outer vlan tag. Packet should follow non-vlan path */
983 *vlan_tag = 0xffff; 987 *vlan_tag = 0xffff;
984 return 0; 988 return 0;
@@ -1024,8 +1028,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1024 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) { 1028 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1025 t_vid = 0; 1029 t_vid = 0;
1026 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0); 1030 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1027 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, 1031 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1028 cpu_to_le16(t_vid));
1029 } 1032 }
1030 1033
1031 if (length > rds_ring->skb_size) 1034 if (length > rds_ring->skb_size)
@@ -1045,7 +1048,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1045 skb->protocol = eth_type_trans(skb, netdev); 1048 skb->protocol = eth_type_trans(skb, netdev);
1046 1049
1047 if (vid != 0xffff) 1050 if (vid != 0xffff)
1048 __vlan_hwaccel_put_tag(skb, vid); 1051 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1049 1052
1050 napi_gro_receive(&sds_ring->napi, skb); 1053 napi_gro_receive(&sds_ring->napi, skb);
1051 1054
@@ -1102,8 +1105,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1102 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) { 1105 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1103 t_vid = 0; 1106 t_vid = 0;
1104 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0); 1107 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
1105 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, 1108 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1106 cpu_to_le16(t_vid));
1107 } 1109 }
1108 1110
1109 if (timestamp) 1111 if (timestamp)
@@ -1131,9 +1133,8 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1131 iph = (struct iphdr *)skb->data; 1133 iph = (struct iphdr *)skb->data;
1132 th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); 1134 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1133 length = (iph->ihl << 2) + (th->doff << 2) + lro_length; 1135 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1136 csum_replace2(&iph->check, iph->tot_len, htons(length));
1134 iph->tot_len = htons(length); 1137 iph->tot_len = htons(length);
1135 iph->check = 0;
1136 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1137 } 1138 }
1138 1139
1139 th->psh = push; 1140 th->psh = push;
@@ -1149,7 +1150,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1149 } 1150 }
1150 1151
1151 if (vid != 0xffff) 1152 if (vid != 0xffff)
1152 __vlan_hwaccel_put_tag(skb, vid); 1153 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1153 netif_receive_skb(skb); 1154 netif_receive_skb(skb);
1154 1155
1155 adapter->stats.lro_pkts++; 1156 adapter->stats.lro_pkts++;
@@ -1496,8 +1497,7 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1496 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) { 1497 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1497 t_vid = 0; 1498 t_vid = 0;
1498 is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0); 1499 is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
1499 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, 1500 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1500 cpu_to_le16(t_vid));
1501 } 1501 }
1502 1502
1503 if (length > rds_ring->skb_size) 1503 if (length > rds_ring->skb_size)
@@ -1514,7 +1514,7 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1514 skb->protocol = eth_type_trans(skb, netdev); 1514 skb->protocol = eth_type_trans(skb, netdev);
1515 1515
1516 if (vid != 0xffff) 1516 if (vid != 0xffff)
1517 __vlan_hwaccel_put_tag(skb, vid); 1517 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1518 1518
1519 napi_gro_receive(&sds_ring->napi, skb); 1519 napi_gro_receive(&sds_ring->napi, skb);
1520 1520
@@ -1566,8 +1566,7 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
1566 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) { 1566 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1567 t_vid = 0; 1567 t_vid = 0;
1568 is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1); 1568 is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
1569 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, 1569 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1570 cpu_to_le16(t_vid));
1571 } 1570 }
1572 if (qlcnic_83xx_is_tstamp(sts_data[1])) 1571 if (qlcnic_83xx_is_tstamp(sts_data[1]))
1573 data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE; 1572 data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
@@ -1594,9 +1593,8 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
1594 iph = (struct iphdr *)skb->data; 1593 iph = (struct iphdr *)skb->data;
1595 th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); 1594 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1596 length = (iph->ihl << 2) + (th->doff << 2) + lro_length; 1595 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1596 csum_replace2(&iph->check, iph->tot_len, htons(length));
1597 iph->tot_len = htons(length); 1597 iph->tot_len = htons(length);
1598 iph->check = 0;
1599 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1600 } 1598 }
1601 1599
1602 th->psh = push; 1600 th->psh = push;
@@ -1612,7 +1610,7 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
1612 } 1610 }
1613 1611
1614 if (vid != 0xffff) 1612 if (vid != 0xffff)
1615 __vlan_hwaccel_put_tag(skb, vid); 1613 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1616 1614
1617 netif_receive_skb(skb); 1615 netif_receive_skb(skb);
1618 1616
@@ -1691,6 +1689,29 @@ skip:
1691 return count; 1689 return count;
1692} 1690}
1693 1691
1692static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
1693{
1694 int tx_complete;
1695 int work_done;
1696 struct qlcnic_host_sds_ring *sds_ring;
1697 struct qlcnic_adapter *adapter;
1698 struct qlcnic_host_tx_ring *tx_ring;
1699
1700 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1701 adapter = sds_ring->adapter;
1702 /* tx ring count = 1 */
1703 tx_ring = adapter->tx_ring;
1704
1705 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1706 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1707 if ((work_done < budget) && tx_complete) {
1708 napi_complete(&sds_ring->napi);
1709 qlcnic_83xx_enable_intr(adapter, sds_ring);
1710 }
1711
1712 return work_done;
1713}
1714
1694static int qlcnic_83xx_poll(struct napi_struct *napi, int budget) 1715static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
1695{ 1716{
1696 int tx_complete; 1717 int tx_complete;
@@ -1768,7 +1789,8 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
1768 qlcnic_83xx_enable_intr(adapter, sds_ring); 1789 qlcnic_83xx_enable_intr(adapter, sds_ring);
1769 } 1790 }
1770 1791
1771 if (adapter->flags & QLCNIC_MSIX_ENABLED) { 1792 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1793 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1772 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 1794 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1773 tx_ring = &adapter->tx_ring[ring]; 1795 tx_ring = &adapter->tx_ring[ring];
1774 napi_enable(&tx_ring->napi); 1796 napi_enable(&tx_ring->napi);
@@ -1795,7 +1817,8 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
1795 napi_disable(&sds_ring->napi); 1817 napi_disable(&sds_ring->napi);
1796 } 1818 }
1797 1819
1798 if (adapter->flags & QLCNIC_MSIX_ENABLED) { 1820 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1821 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1799 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 1822 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1800 tx_ring = &adapter->tx_ring[ring]; 1823 tx_ring = &adapter->tx_ring[ring];
1801 qlcnic_83xx_disable_tx_intr(adapter, tx_ring); 1824 qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
@@ -1808,7 +1831,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
1808int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, 1831int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1809 struct net_device *netdev) 1832 struct net_device *netdev)
1810{ 1833{
1811 int ring, max_sds_rings; 1834 int ring, max_sds_rings, temp;
1812 struct qlcnic_host_sds_ring *sds_ring; 1835 struct qlcnic_host_sds_ring *sds_ring;
1813 struct qlcnic_host_tx_ring *tx_ring; 1836 struct qlcnic_host_tx_ring *tx_ring;
1814 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1837 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
@@ -1819,14 +1842,23 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1819 max_sds_rings = adapter->max_sds_rings; 1842 max_sds_rings = adapter->max_sds_rings;
1820 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1843 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1821 sds_ring = &recv_ctx->sds_rings[ring]; 1844 sds_ring = &recv_ctx->sds_rings[ring];
1822 if (adapter->flags & QLCNIC_MSIX_ENABLED) 1845 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1823 netif_napi_add(netdev, &sds_ring->napi, 1846 if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1824 qlcnic_83xx_rx_poll, 1847 netif_napi_add(netdev, &sds_ring->napi,
1825 QLCNIC_NETDEV_WEIGHT * 2); 1848 qlcnic_83xx_rx_poll,
1826 else 1849 QLCNIC_NETDEV_WEIGHT * 2);
1850 } else {
1851 temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings;
1852 netif_napi_add(netdev, &sds_ring->napi,
1853 qlcnic_83xx_msix_sriov_vf_poll,
1854 temp);
1855 }
1856
1857 } else {
1827 netif_napi_add(netdev, &sds_ring->napi, 1858 netif_napi_add(netdev, &sds_ring->napi,
1828 qlcnic_83xx_poll, 1859 qlcnic_83xx_poll,
1829 QLCNIC_NETDEV_WEIGHT / max_sds_rings); 1860 QLCNIC_NETDEV_WEIGHT / max_sds_rings);
1861 }
1830 } 1862 }
1831 1863
1832 if (qlcnic_alloc_tx_rings(adapter, netdev)) { 1864 if (qlcnic_alloc_tx_rings(adapter, netdev)) {
@@ -1834,7 +1866,8 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1834 return -ENOMEM; 1866 return -ENOMEM;
1835 } 1867 }
1836 1868
1837 if (adapter->flags & QLCNIC_MSIX_ENABLED) { 1869 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1870 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1838 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 1871 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1839 tx_ring = &adapter->tx_ring[ring]; 1872 tx_ring = &adapter->tx_ring[ring];
1840 netif_napi_add(netdev, &tx_ring->napi, 1873 netif_napi_add(netdev, &tx_ring->napi,
@@ -1860,7 +1893,8 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
1860 1893
1861 qlcnic_free_sds_rings(adapter->recv_ctx); 1894 qlcnic_free_sds_rings(adapter->recv_ctx);
1862 1895
1863 if ((adapter->flags & QLCNIC_MSIX_ENABLED)) { 1896 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1897 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1864 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 1898 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1865 tx_ring = &adapter->tx_ring[ring]; 1899 tx_ring = &adapter->tx_ring[ring];
1866 netif_napi_del(&tx_ring->napi); 1900 netif_napi_del(&tx_ring->napi);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 28a6d4838364..264d5a4f8153 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -9,6 +9,7 @@
9#include <linux/interrupt.h> 9#include <linux/interrupt.h>
10 10
11#include "qlcnic.h" 11#include "qlcnic.h"
12#include "qlcnic_sriov.h"
12#include "qlcnic_hw.h" 13#include "qlcnic_hw.h"
13 14
14#include <linux/swab.h> 15#include <linux/swab.h>
@@ -85,8 +86,8 @@ static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
85static int qlcnicvf_start_firmware(struct qlcnic_adapter *); 86static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
86static void qlcnic_set_netdev_features(struct qlcnic_adapter *, 87static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
87 struct qlcnic_esw_func_cfg *); 88 struct qlcnic_esw_func_cfg *);
88static int qlcnic_vlan_rx_add(struct net_device *, u16); 89static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
89static int qlcnic_vlan_rx_del(struct net_device *, u16); 90static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
90 91
91#define QLCNIC_IS_TSO_CAPABLE(adapter) \ 92#define QLCNIC_IS_TSO_CAPABLE(adapter) \
92 ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) 93 ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
@@ -109,6 +110,7 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
109static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = { 110static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
110 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), 111 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
111 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X), 112 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
113 ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
112 {0,} 114 {0,}
113}; 115};
114 116
@@ -154,25 +156,112 @@ static const u32 qlcnic_reg_tbl[] = {
154}; 156};
155 157
156static const struct qlcnic_board_info qlcnic_boards[] = { 158static const struct qlcnic_board_info qlcnic_boards[] = {
157 {0x1077, 0x8020, 0x1077, 0x203, 159 { PCI_VENDOR_ID_QLOGIC,
158 "8200 Series Single Port 10GbE Converged Network Adapter" 160 PCI_DEVICE_ID_QLOGIC_QLE834X,
159 "(TCP/IP Networking)"}, 161 PCI_VENDOR_ID_QLOGIC,
160 {0x1077, 0x8020, 0x1077, 0x207, 162 0x24e,
161 "8200 Series Dual Port 10GbE Converged Network Adapter" 163 "8300 Series Dual Port 10GbE Converged Network Adapter "
162 "(TCP/IP Networking)"}, 164 "(TCP/IP Networking)" },
163 {0x1077, 0x8020, 0x1077, 0x20b, 165 { PCI_VENDOR_ID_QLOGIC,
164 "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"}, 166 PCI_DEVICE_ID_QLOGIC_QLE834X,
165 {0x1077, 0x8020, 0x1077, 0x20c, 167 PCI_VENDOR_ID_QLOGIC,
166 "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"}, 168 0x243,
167 {0x1077, 0x8020, 0x1077, 0x20f, 169 "8300 Series Single Port 10GbE Converged Network Adapter "
168 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"}, 170 "(TCP/IP Networking)" },
169 {0x1077, 0x8020, 0x103c, 0x3733, 171 { PCI_VENDOR_ID_QLOGIC,
170 "NC523SFP 10Gb 2-port Server Adapter"}, 172 PCI_DEVICE_ID_QLOGIC_QLE834X,
171 {0x1077, 0x8020, 0x103c, 0x3346, 173 PCI_VENDOR_ID_QLOGIC,
172 "CN1000Q Dual Port Converged Network Adapter"}, 174 0x24a,
173 {0x1077, 0x8020, 0x1077, 0x210, 175 "8300 Series Dual Port 10GbE Converged Network Adapter "
174 "QME8242-k 10GbE Dual Port Mezzanine Card"}, 176 "(TCP/IP Networking)" },
175 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"}, 177 { PCI_VENDOR_ID_QLOGIC,
178 PCI_DEVICE_ID_QLOGIC_QLE834X,
179 PCI_VENDOR_ID_QLOGIC,
180 0x246,
181 "8300 Series Dual Port 10GbE Converged Network Adapter "
182 "(TCP/IP Networking)" },
183 { PCI_VENDOR_ID_QLOGIC,
184 PCI_DEVICE_ID_QLOGIC_QLE834X,
185 PCI_VENDOR_ID_QLOGIC,
186 0x252,
187 "8300 Series Dual Port 10GbE Converged Network Adapter "
188 "(TCP/IP Networking)" },
189 { PCI_VENDOR_ID_QLOGIC,
190 PCI_DEVICE_ID_QLOGIC_QLE834X,
191 PCI_VENDOR_ID_QLOGIC,
192 0x26e,
193 "8300 Series Dual Port 10GbE Converged Network Adapter "
194 "(TCP/IP Networking)" },
195 { PCI_VENDOR_ID_QLOGIC,
196 PCI_DEVICE_ID_QLOGIC_QLE834X,
197 PCI_VENDOR_ID_QLOGIC,
198 0x260,
199 "8300 Series Dual Port 10GbE Converged Network Adapter "
200 "(TCP/IP Networking)" },
201 { PCI_VENDOR_ID_QLOGIC,
202 PCI_DEVICE_ID_QLOGIC_QLE834X,
203 PCI_VENDOR_ID_QLOGIC,
204 0x266,
205 "8300 Series Single Port 10GbE Converged Network Adapter "
206 "(TCP/IP Networking)" },
207 { PCI_VENDOR_ID_QLOGIC,
208 PCI_DEVICE_ID_QLOGIC_QLE834X,
209 PCI_VENDOR_ID_QLOGIC,
210 0x269,
211 "8300 Series Dual Port 10GbE Converged Network Adapter "
212 "(TCP/IP Networking)" },
213 { PCI_VENDOR_ID_QLOGIC,
214 PCI_DEVICE_ID_QLOGIC_QLE834X,
215 PCI_VENDOR_ID_QLOGIC,
216 0x271,
217 "8300 Series Dual Port 10GbE Converged Network Adapter "
218 "(TCP/IP Networking)" },
219 { PCI_VENDOR_ID_QLOGIC,
220 PCI_DEVICE_ID_QLOGIC_QLE834X,
221 0x0, 0x0, "8300 Series 1/10GbE Controller" },
222 { PCI_VENDOR_ID_QLOGIC,
223 PCI_DEVICE_ID_QLOGIC_QLE824X,
224 PCI_VENDOR_ID_QLOGIC,
225 0x203,
226 "8200 Series Single Port 10GbE Converged Network Adapter"
227 "(TCP/IP Networking)" },
228 { PCI_VENDOR_ID_QLOGIC,
229 PCI_DEVICE_ID_QLOGIC_QLE824X,
230 PCI_VENDOR_ID_QLOGIC,
231 0x207,
232 "8200 Series Dual Port 10GbE Converged Network Adapter"
233 "(TCP/IP Networking)" },
234 { PCI_VENDOR_ID_QLOGIC,
235 PCI_DEVICE_ID_QLOGIC_QLE824X,
236 PCI_VENDOR_ID_QLOGIC,
237 0x20b,
238 "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter" },
239 { PCI_VENDOR_ID_QLOGIC,
240 PCI_DEVICE_ID_QLOGIC_QLE824X,
241 PCI_VENDOR_ID_QLOGIC,
242 0x20c,
243 "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter" },
244 { PCI_VENDOR_ID_QLOGIC,
245 PCI_DEVICE_ID_QLOGIC_QLE824X,
246 PCI_VENDOR_ID_QLOGIC,
247 0x20f,
248 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter" },
249 { PCI_VENDOR_ID_QLOGIC,
250 PCI_DEVICE_ID_QLOGIC_QLE824X,
251 0x103c, 0x3733,
252 "NC523SFP 10Gb 2-port Server Adapter" },
253 { PCI_VENDOR_ID_QLOGIC,
254 PCI_DEVICE_ID_QLOGIC_QLE824X,
255 0x103c, 0x3346,
256 "CN1000Q Dual Port Converged Network Adapter" },
257 { PCI_VENDOR_ID_QLOGIC,
258 PCI_DEVICE_ID_QLOGIC_QLE824X,
259 PCI_VENDOR_ID_QLOGIC,
260 0x210,
261 "QME8242-k 10GbE Dual Port Mezzanine Card" },
262 { PCI_VENDOR_ID_QLOGIC,
263 PCI_DEVICE_ID_QLOGIC_QLE824X,
264 0x0, 0x0, "cLOM8214 1/10GbE Controller" },
176}; 265};
177 266
178#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards) 267#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
@@ -198,8 +287,7 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
198 recv_ctx->sds_rings = NULL; 287 recv_ctx->sds_rings = NULL;
199} 288}
200 289
201static int 290int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
202qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
203{ 291{
204 u8 mac_addr[ETH_ALEN]; 292 u8 mac_addr[ETH_ALEN];
205 struct net_device *netdev = adapter->netdev; 293 struct net_device *netdev = adapter->netdev;
@@ -225,6 +313,9 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
225 struct qlcnic_adapter *adapter = netdev_priv(netdev); 313 struct qlcnic_adapter *adapter = netdev_priv(netdev);
226 struct sockaddr *addr = p; 314 struct sockaddr *addr = p;
227 315
316 if (qlcnic_sriov_vf_check(adapter))
317 return -EINVAL;
318
228 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED)) 319 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
229 return -EOPNOTSUPP; 320 return -EOPNOTSUPP;
230 321
@@ -253,11 +344,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
253 struct qlcnic_adapter *adapter = netdev_priv(netdev); 344 struct qlcnic_adapter *adapter = netdev_priv(netdev);
254 int err = -EOPNOTSUPP; 345 int err = -EOPNOTSUPP;
255 346
256 if (!adapter->fdb_mac_learn) { 347 if (!adapter->fdb_mac_learn)
257 pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n", 348 return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
258 __func__);
259 return err;
260 }
261 349
262 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { 350 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
263 if (is_unicast_ether_addr(addr)) 351 if (is_unicast_ether_addr(addr))
@@ -277,11 +365,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
277 struct qlcnic_adapter *adapter = netdev_priv(netdev); 365 struct qlcnic_adapter *adapter = netdev_priv(netdev);
278 int err = 0; 366 int err = 0;
279 367
280 if (!adapter->fdb_mac_learn) { 368 if (!adapter->fdb_mac_learn)
281 pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n", 369 return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
282 __func__);
283 return -EOPNOTSUPP;
284 }
285 370
286 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) { 371 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
287 pr_info("%s: FDB e-switch is not enabled\n", __func__); 372 pr_info("%s: FDB e-switch is not enabled\n", __func__);
@@ -292,7 +377,7 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
292 return err; 377 return err;
293 378
294 if (is_unicast_ether_addr(addr)) 379 if (is_unicast_ether_addr(addr))
295 err = qlcnic_nic_add_mac(adapter, addr); 380 err = qlcnic_nic_add_mac(adapter, addr, 0);
296 else if (is_multicast_ether_addr(addr)) 381 else if (is_multicast_ether_addr(addr))
297 err = dev_mc_add_excl(netdev, addr); 382 err = dev_mc_add_excl(netdev, addr);
298 else 383 else
@@ -306,11 +391,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
306{ 391{
307 struct qlcnic_adapter *adapter = netdev_priv(netdev); 392 struct qlcnic_adapter *adapter = netdev_priv(netdev);
308 393
309 if (!adapter->fdb_mac_learn) { 394 if (!adapter->fdb_mac_learn)
310 pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n", 395 return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
311 __func__);
312 return -EOPNOTSUPP;
313 }
314 396
315 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) 397 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
316 idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx); 398 idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
@@ -346,6 +428,12 @@ static const struct net_device_ops qlcnic_netdev_ops = {
346#ifdef CONFIG_NET_POLL_CONTROLLER 428#ifdef CONFIG_NET_POLL_CONTROLLER
347 .ndo_poll_controller = qlcnic_poll_controller, 429 .ndo_poll_controller = qlcnic_poll_controller,
348#endif 430#endif
431#ifdef CONFIG_QLCNIC_SRIOV
432 .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
433 .ndo_set_vf_tx_rate = qlcnic_sriov_set_vf_tx_rate,
434 .ndo_get_vf_config = qlcnic_sriov_get_vf_config,
435 .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan,
436#endif
349}; 437};
350 438
351static const struct net_device_ops qlcnic_netdev_failed_ops = { 439static const struct net_device_ops qlcnic_netdev_failed_ops = {
@@ -387,6 +475,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
387 .process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag, 475 .process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag,
388 .create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx, 476 .create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx,
389 .create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx, 477 .create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx,
478 .del_rx_ctx = qlcnic_82xx_fw_cmd_del_rx_ctx,
479 .del_tx_ctx = qlcnic_82xx_fw_cmd_del_tx_ctx,
390 .setup_link_event = qlcnic_82xx_linkevent_request, 480 .setup_link_event = qlcnic_82xx_linkevent_request,
391 .get_nic_info = qlcnic_82xx_get_nic_info, 481 .get_nic_info = qlcnic_82xx_get_nic_info,
392 .get_pci_info = qlcnic_82xx_get_pci_info, 482 .get_pci_info = qlcnic_82xx_get_pci_info,
@@ -402,13 +492,22 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
402 .config_promisc_mode = qlcnic_82xx_nic_set_promisc, 492 .config_promisc_mode = qlcnic_82xx_nic_set_promisc,
403 .change_l2_filter = qlcnic_82xx_change_filter, 493 .change_l2_filter = qlcnic_82xx_change_filter,
404 .get_board_info = qlcnic_82xx_get_board_info, 494 .get_board_info = qlcnic_82xx_get_board_info,
495 .free_mac_list = qlcnic_82xx_free_mac_list,
405}; 496};
406 497
407int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) 498int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
408{ 499{
409 struct pci_dev *pdev = adapter->pdev; 500 struct pci_dev *pdev = adapter->pdev;
410 int err = -1, i; 501 int err = -1, i;
411 int max_tx_rings; 502 int max_tx_rings, tx_vector;
503
504 if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
505 max_tx_rings = 0;
506 tx_vector = 0;
507 } else {
508 max_tx_rings = adapter->max_drv_tx_rings;
509 tx_vector = 1;
510 }
412 511
413 if (!adapter->msix_entries) { 512 if (!adapter->msix_entries) {
414 adapter->msix_entries = kcalloc(num_msix, 513 adapter->msix_entries = kcalloc(num_msix,
@@ -431,7 +530,6 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
431 if (qlcnic_83xx_check(adapter)) { 530 if (qlcnic_83xx_check(adapter)) {
432 adapter->ahw->num_msix = num_msix; 531 adapter->ahw->num_msix = num_msix;
433 /* subtract mail box and tx ring vectors */ 532 /* subtract mail box and tx ring vectors */
434 max_tx_rings = adapter->max_drv_tx_rings;
435 adapter->max_sds_rings = num_msix - 533 adapter->max_sds_rings = num_msix -
436 max_tx_rings - 1; 534 max_tx_rings - 1;
437 } else { 535 } else {
@@ -444,11 +542,11 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
444 "Unable to allocate %d MSI-X interrupt vectors\n", 542 "Unable to allocate %d MSI-X interrupt vectors\n",
445 num_msix); 543 num_msix);
446 if (qlcnic_83xx_check(adapter)) { 544 if (qlcnic_83xx_check(adapter)) {
447 if (err < QLC_83XX_MINIMUM_VECTOR) 545 if (err < (QLC_83XX_MINIMUM_VECTOR - tx_vector))
448 return err; 546 return err;
449 err -= (adapter->max_drv_tx_rings + 1); 547 err -= (max_tx_rings + 1);
450 num_msix = rounddown_pow_of_two(err); 548 num_msix = rounddown_pow_of_two(err);
451 num_msix += (adapter->max_drv_tx_rings + 1); 549 num_msix += (max_tx_rings + 1);
452 } else { 550 } else {
453 num_msix = rounddown_pow_of_two(err); 551 num_msix = rounddown_pow_of_two(err);
454 } 552 }
@@ -542,11 +640,10 @@ void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
542 } 640 }
543} 641}
544 642
545static void 643static void qlcnic_cleanup_pci_map(struct qlcnic_hardware_context *ahw)
546qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
547{ 644{
548 if (adapter->ahw->pci_base0 != NULL) 645 if (ahw->pci_base0 != NULL)
549 iounmap(adapter->ahw->pci_base0); 646 iounmap(ahw->pci_base0);
550} 647}
551 648
552static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter) 649static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
@@ -721,6 +818,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
721 *bar = QLCNIC_82XX_BAR0_LENGTH; 818 *bar = QLCNIC_82XX_BAR0_LENGTH;
722 break; 819 break;
723 case PCI_DEVICE_ID_QLOGIC_QLE834X: 820 case PCI_DEVICE_ID_QLOGIC_QLE834X:
821 case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
724 *bar = QLCNIC_83XX_BAR0_LENGTH; 822 *bar = QLCNIC_83XX_BAR0_LENGTH;
725 break; 823 break;
726 default: 824 default:
@@ -751,7 +849,7 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
751 return -EIO; 849 return -EIO;
752 } 850 }
753 851
754 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); 852 dev_info(&pdev->dev, "%dKB memory map\n", (int)(mem_len >> 10));
755 853
756 ahw->pci_base0 = mem_ptr0; 854 ahw->pci_base0 = mem_ptr0;
757 ahw->pci_len0 = pci_len0; 855 ahw->pci_len0 = pci_len0;
@@ -891,24 +989,50 @@ void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
891 else 989 else
892 adapter->flags |= QLCNIC_TAGGING_ENABLED; 990 adapter->flags |= QLCNIC_TAGGING_ENABLED;
893 991
894 if (esw_cfg->vlan_id) 992 if (esw_cfg->vlan_id) {
895 adapter->pvid = esw_cfg->vlan_id; 993 adapter->rx_pvid = esw_cfg->vlan_id;
896 else 994 adapter->tx_pvid = esw_cfg->vlan_id;
897 adapter->pvid = 0; 995 } else {
996 adapter->rx_pvid = 0;
997 adapter->tx_pvid = 0;
998 }
898} 999}
899 1000
900static int 1001static int
901qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid) 1002qlcnic_vlan_rx_add(struct net_device *netdev, __be16 proto, u16 vid)
902{ 1003{
903 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1004 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1005 int err;
1006
1007 if (qlcnic_sriov_vf_check(adapter)) {
1008 err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 1);
1009 if (err) {
1010 netdev_err(netdev,
1011 "Cannot add VLAN filter for VLAN id %d, err=%d",
1012 vid, err);
1013 return err;
1014 }
1015 }
1016
904 set_bit(vid, adapter->vlans); 1017 set_bit(vid, adapter->vlans);
905 return 0; 1018 return 0;
906} 1019}
907 1020
908static int 1021static int
909qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid) 1022qlcnic_vlan_rx_del(struct net_device *netdev, __be16 proto, u16 vid)
910{ 1023{
911 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1024 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1025 int err;
1026
1027 if (qlcnic_sriov_vf_check(adapter)) {
1028 err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 0);
1029 if (err) {
1030 netdev_err(netdev,
1031 "Cannot delete VLAN filter for VLAN id %d, err=%d",
1032 vid, err);
1033 return err;
1034 }
1035 }
912 1036
913 qlcnic_restore_indev_addr(netdev, NETDEV_DOWN); 1037 qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
914 clear_bit(vid, adapter->vlans); 1038 clear_bit(vid, adapter->vlans);
@@ -1250,7 +1374,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
1250 irq_handler_t handler; 1374 irq_handler_t handler;
1251 struct qlcnic_host_sds_ring *sds_ring; 1375 struct qlcnic_host_sds_ring *sds_ring;
1252 struct qlcnic_host_tx_ring *tx_ring; 1376 struct qlcnic_host_tx_ring *tx_ring;
1253 int err, ring; 1377 int err, ring, num_sds_rings;
1254 1378
1255 unsigned long flags = 0; 1379 unsigned long flags = 0;
1256 struct net_device *netdev = adapter->netdev; 1380 struct net_device *netdev = adapter->netdev;
@@ -1281,10 +1405,20 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
1281 if (qlcnic_82xx_check(adapter) || 1405 if (qlcnic_82xx_check(adapter) ||
1282 (qlcnic_83xx_check(adapter) && 1406 (qlcnic_83xx_check(adapter) &&
1283 (adapter->flags & QLCNIC_MSIX_ENABLED))) { 1407 (adapter->flags & QLCNIC_MSIX_ENABLED))) {
1284 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1408 num_sds_rings = adapter->max_sds_rings;
1409 for (ring = 0; ring < num_sds_rings; ring++) {
1285 sds_ring = &recv_ctx->sds_rings[ring]; 1410 sds_ring = &recv_ctx->sds_rings[ring];
1286 snprintf(sds_ring->name, sizeof(int) + IFNAMSIZ, 1411 if (qlcnic_82xx_check(adapter) &&
1287 "%s[%d]", netdev->name, ring); 1412 (ring == (num_sds_rings - 1)))
1413 snprintf(sds_ring->name,
1414 sizeof(sds_ring->name),
1415 "qlcnic-%s[Tx0+Rx%d]",
1416 netdev->name, ring);
1417 else
1418 snprintf(sds_ring->name,
1419 sizeof(sds_ring->name),
1420 "qlcnic-%s[Rx%d]",
1421 netdev->name, ring);
1288 err = request_irq(sds_ring->irq, handler, flags, 1422 err = request_irq(sds_ring->irq, handler, flags,
1289 sds_ring->name, sds_ring); 1423 sds_ring->name, sds_ring);
1290 if (err) 1424 if (err)
@@ -1292,14 +1426,14 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
1292 } 1426 }
1293 } 1427 }
1294 if (qlcnic_83xx_check(adapter) && 1428 if (qlcnic_83xx_check(adapter) &&
1295 (adapter->flags & QLCNIC_MSIX_ENABLED)) { 1429 (adapter->flags & QLCNIC_MSIX_ENABLED) &&
1430 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1296 handler = qlcnic_msix_tx_intr; 1431 handler = qlcnic_msix_tx_intr;
1297 for (ring = 0; ring < adapter->max_drv_tx_rings; 1432 for (ring = 0; ring < adapter->max_drv_tx_rings;
1298 ring++) { 1433 ring++) {
1299 tx_ring = &adapter->tx_ring[ring]; 1434 tx_ring = &adapter->tx_ring[ring];
1300 snprintf(tx_ring->name, sizeof(int) + IFNAMSIZ, 1435 snprintf(tx_ring->name, sizeof(tx_ring->name),
1301 "%s[%d]", netdev->name, 1436 "qlcnic-%s[Tx%d]", netdev->name, ring);
1302 adapter->max_sds_rings + ring);
1303 err = request_irq(tx_ring->irq, handler, flags, 1437 err = request_irq(tx_ring->irq, handler, flags,
1304 tx_ring->name, tx_ring); 1438 tx_ring->name, tx_ring);
1305 if (err) 1439 if (err)
@@ -1328,7 +1462,8 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
1328 free_irq(sds_ring->irq, sds_ring); 1462 free_irq(sds_ring->irq, sds_ring);
1329 } 1463 }
1330 } 1464 }
1331 if (qlcnic_83xx_check(adapter)) { 1465 if (qlcnic_83xx_check(adapter) &&
1466 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1332 for (ring = 0; ring < adapter->max_drv_tx_rings; 1467 for (ring = 0; ring < adapter->max_drv_tx_rings;
1333 ring++) { 1468 ring++) {
1334 tx_ring = &adapter->tx_ring[ring]; 1469 tx_ring = &adapter->tx_ring[ring];
@@ -1418,9 +1553,12 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1418 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state)) 1553 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1419 return; 1554 return;
1420 1555
1556 if (qlcnic_sriov_vf_check(adapter))
1557 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
1421 smp_mb(); 1558 smp_mb();
1422 spin_lock(&adapter->tx_clean_lock); 1559 spin_lock(&adapter->tx_clean_lock);
1423 netif_carrier_off(netdev); 1560 netif_carrier_off(netdev);
1561 adapter->ahw->linkup = 0;
1424 netif_tx_disable(netdev); 1562 netif_tx_disable(netdev);
1425 1563
1426 qlcnic_free_mac_list(adapter); 1564 qlcnic_free_mac_list(adapter);
@@ -1545,7 +1683,9 @@ out:
1545 1683
1546static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter) 1684static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
1547{ 1685{
1686 struct qlcnic_hardware_context *ahw = adapter->ahw;
1548 int err = 0; 1687 int err = 0;
1688
1549 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context), 1689 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
1550 GFP_KERNEL); 1690 GFP_KERNEL);
1551 if (!adapter->recv_ctx) { 1691 if (!adapter->recv_ctx) {
@@ -1553,9 +1693,14 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
1553 goto err_out; 1693 goto err_out;
1554 } 1694 }
1555 /* Initialize interrupt coalesce parameters */ 1695 /* Initialize interrupt coalesce parameters */
1556 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT; 1696 ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1557 adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US; 1697 ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
1558 adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS; 1698 ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
1699 ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
1700 if (qlcnic_83xx_check(adapter)) {
1701 ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
1702 ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
1703 }
1559 /* clear stats */ 1704 /* clear stats */
1560 memset(&adapter->stats, 0, sizeof(adapter->stats)); 1705 memset(&adapter->stats, 0, sizeof(adapter->stats));
1561err_out: 1706err_out:
@@ -1685,7 +1830,7 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
1685 return err; 1830 return err;
1686} 1831}
1687 1832
1688static int 1833int
1689qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, 1834qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
1690 int pci_using_dac) 1835 int pci_using_dac)
1691{ 1836{
@@ -1701,11 +1846,14 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
1701 1846
1702 qlcnic_change_mtu(netdev, netdev->mtu); 1847 qlcnic_change_mtu(netdev, netdev->mtu);
1703 1848
1704 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops); 1849 if (qlcnic_sriov_vf_check(adapter))
1850 SET_ETHTOOL_OPS(netdev, &qlcnic_sriov_vf_ethtool_ops);
1851 else
1852 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1705 1853
1706 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 1854 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1707 NETIF_F_IPV6_CSUM | NETIF_F_GRO | 1855 NETIF_F_IPV6_CSUM | NETIF_F_GRO |
1708 NETIF_F_HW_VLAN_RX); 1856 NETIF_F_HW_VLAN_CTAG_RX);
1709 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | 1857 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
1710 NETIF_F_IPV6_CSUM); 1858 NETIF_F_IPV6_CSUM);
1711 1859
@@ -1720,7 +1868,10 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
1720 } 1868 }
1721 1869
1722 if (qlcnic_vlan_tx_check(adapter)) 1870 if (qlcnic_vlan_tx_check(adapter))
1723 netdev->features |= (NETIF_F_HW_VLAN_TX); 1871 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX);
1872
1873 if (qlcnic_sriov_vf_check(adapter))
1874 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1724 1875
1725 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) 1876 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1726 netdev->features |= NETIF_F_LRO; 1877 netdev->features |= NETIF_F_LRO;
@@ -1820,6 +1971,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1820 u32 capab2; 1971 u32 capab2;
1821 char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */ 1972 char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
1822 1973
1974 if (pdev->is_virtfn)
1975 return -ENODEV;
1976
1823 err = pci_enable_device(pdev); 1977 err = pci_enable_device(pdev);
1824 if (err) 1978 if (err)
1825 return err; 1979 return err;
@@ -1844,12 +1998,18 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1844 if (!ahw) 1998 if (!ahw)
1845 goto err_out_free_res; 1999 goto err_out_free_res;
1846 2000
1847 if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE824X) { 2001 switch (ent->device) {
2002 case PCI_DEVICE_ID_QLOGIC_QLE824X:
1848 ahw->hw_ops = &qlcnic_hw_ops; 2003 ahw->hw_ops = &qlcnic_hw_ops;
1849 ahw->reg_tbl = (u32 *)qlcnic_reg_tbl; 2004 ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
1850 } else if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE834X) { 2005 break;
2006 case PCI_DEVICE_ID_QLOGIC_QLE834X:
1851 qlcnic_83xx_register_map(ahw); 2007 qlcnic_83xx_register_map(ahw);
1852 } else { 2008 break;
2009 case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
2010 qlcnic_sriov_vf_register_map(ahw);
2011 break;
2012 default:
1853 goto err_out_free_hw_res; 2013 goto err_out_free_hw_res;
1854 } 2014 }
1855 2015
@@ -1911,11 +2071,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1911 } else if (qlcnic_83xx_check(adapter)) { 2071 } else if (qlcnic_83xx_check(adapter)) {
1912 qlcnic_83xx_check_vf(adapter, ent); 2072 qlcnic_83xx_check_vf(adapter, ent);
1913 adapter->portnum = adapter->ahw->pci_func; 2073 adapter->portnum = adapter->ahw->pci_func;
1914 err = qlcnic_83xx_init(adapter); 2074 err = qlcnic_83xx_init(adapter, pci_using_dac);
1915 if (err) { 2075 if (err) {
1916 dev_err(&pdev->dev, "%s: failed\n", __func__); 2076 dev_err(&pdev->dev, "%s: failed\n", __func__);
1917 goto err_out_free_hw; 2077 goto err_out_free_hw;
1918 } 2078 }
2079 if (qlcnic_sriov_vf_check(adapter))
2080 return 0;
1919 } else { 2081 } else {
1920 dev_err(&pdev->dev, 2082 dev_err(&pdev->dev,
1921 "%s: failed. Please Reboot\n", __func__); 2083 "%s: failed. Please Reboot\n", __func__);
@@ -1932,6 +2094,12 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1932 module_name(THIS_MODULE), 2094 module_name(THIS_MODULE),
1933 board_name, adapter->ahw->revision_id); 2095 board_name, adapter->ahw->revision_id);
1934 } 2096 }
2097
2098 if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x &&
2099 !!qlcnic_use_msi)
2100 dev_warn(&pdev->dev,
2101 "83xx adapter do not support MSI interrupts\n");
2102
1935 err = qlcnic_setup_intr(adapter, 0); 2103 err = qlcnic_setup_intr(adapter, 0);
1936 if (err) { 2104 if (err) {
1937 dev_err(&pdev->dev, "Failed to setup interrupt\n"); 2105 dev_err(&pdev->dev, "Failed to setup interrupt\n");
@@ -1999,7 +2167,7 @@ err_out_free_netdev:
1999 free_netdev(netdev); 2167 free_netdev(netdev);
2000 2168
2001err_out_iounmap: 2169err_out_iounmap:
2002 qlcnic_cleanup_pci_map(adapter); 2170 qlcnic_cleanup_pci_map(ahw);
2003 2171
2004err_out_free_hw_res: 2172err_out_free_hw_res:
2005 kfree(ahw); 2173 kfree(ahw);
@@ -2024,11 +2192,13 @@ static void qlcnic_remove(struct pci_dev *pdev)
2024 return; 2192 return;
2025 2193
2026 netdev = adapter->netdev; 2194 netdev = adapter->netdev;
2195 qlcnic_sriov_pf_disable(adapter);
2027 2196
2028 qlcnic_cancel_idc_work(adapter); 2197 qlcnic_cancel_idc_work(adapter);
2029 ahw = adapter->ahw; 2198 ahw = adapter->ahw;
2030 2199
2031 unregister_netdev(netdev); 2200 unregister_netdev(netdev);
2201 qlcnic_sriov_cleanup(adapter);
2032 2202
2033 if (qlcnic_83xx_check(adapter)) { 2203 if (qlcnic_83xx_check(adapter)) {
2034 qlcnic_83xx_free_mbx_intr(adapter); 2204 qlcnic_83xx_free_mbx_intr(adapter);
@@ -2054,7 +2224,7 @@ static void qlcnic_remove(struct pci_dev *pdev)
2054 2224
2055 qlcnic_remove_sysfs(adapter); 2225 qlcnic_remove_sysfs(adapter);
2056 2226
2057 qlcnic_cleanup_pci_map(adapter); 2227 qlcnic_cleanup_pci_map(adapter->ahw);
2058 2228
2059 qlcnic_release_firmware(adapter); 2229 qlcnic_release_firmware(adapter);
2060 2230
@@ -2084,6 +2254,7 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
2084 if (netif_running(netdev)) 2254 if (netif_running(netdev))
2085 qlcnic_down(adapter, netdev); 2255 qlcnic_down(adapter, netdev);
2086 2256
2257 qlcnic_sriov_cleanup(adapter);
2087 if (qlcnic_82xx_check(adapter)) 2258 if (qlcnic_82xx_check(adapter))
2088 qlcnic_clr_all_drv_state(adapter, 0); 2259 qlcnic_clr_all_drv_state(adapter, 0);
2089 2260
@@ -3205,20 +3376,40 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3205 return err; 3376 return err;
3206} 3377}
3207 3378
3208int qlcnic_validate_max_rss(u8 max_hw, u8 val) 3379int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
3380 __u32 val)
3209{ 3381{
3382 struct net_device *netdev = adapter->netdev;
3383 u8 max_hw = adapter->ahw->max_rx_ques;
3210 u32 max_allowed; 3384 u32 max_allowed;
3211 3385
3212 if (max_hw > QLC_MAX_SDS_RINGS) { 3386 if (val > QLC_MAX_SDS_RINGS) {
3213 max_hw = QLC_MAX_SDS_RINGS; 3387 netdev_err(netdev, "RSS value should not be higher than %u\n",
3214 pr_info("max rss reset to %d\n", QLC_MAX_SDS_RINGS); 3388 QLC_MAX_SDS_RINGS);
3389 return -EINVAL;
3215 } 3390 }
3216 3391
3217 max_allowed = rounddown_pow_of_two(min_t(int, max_hw, 3392 max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
3218 num_online_cpus())); 3393 num_online_cpus()));
3219 if ((val > max_allowed) || (val < 2) || !is_power_of_2(val)) { 3394 if ((val > max_allowed) || (val < 2) || !is_power_of_2(val)) {
3220 pr_info("rss_ring valid range [2 - %x] in powers of 2\n", 3395 if (!is_power_of_2(val))
3221 max_allowed); 3396 netdev_err(netdev, "RSS value should be a power of 2\n");
3397
3398 if (val < 2)
3399 netdev_err(netdev, "RSS value should not be lower than 2\n");
3400
3401 if (val > max_hw)
3402 netdev_err(netdev,
3403 "RSS value should not be higher than[%u], the max RSS rings supported by the adapter\n",
3404 max_hw);
3405
3406 if (val > num_online_cpus())
3407 netdev_err(netdev,
3408 "RSS value should not be higher than[%u], number of online CPUs in the system\n",
3409 num_online_cpus());
3410
3411 netdev_err(netdev, "Unable to configure %u RSS rings\n", val);
3412
3222 return -EINVAL; 3413 return -EINVAL;
3223 } 3414 }
3224 return 0; 3415 return 0;
@@ -3238,8 +3429,10 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
3238 3429
3239 qlcnic_detach(adapter); 3430 qlcnic_detach(adapter);
3240 3431
3241 if (qlcnic_83xx_check(adapter)) 3432 if (qlcnic_83xx_check(adapter)) {
3242 qlcnic_83xx_free_mbx_intr(adapter); 3433 qlcnic_83xx_free_mbx_intr(adapter);
3434 qlcnic_83xx_enable_mbx_poll(adapter);
3435 }
3243 3436
3244 qlcnic_teardown_intr(adapter); 3437 qlcnic_teardown_intr(adapter);
3245 err = qlcnic_setup_intr(adapter, data); 3438 err = qlcnic_setup_intr(adapter, data);
@@ -3253,6 +3446,7 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
3253 /* register for NIC IDC AEN Events */ 3446 /* register for NIC IDC AEN Events */
3254 qlcnic_83xx_register_nic_idc_func(adapter, 1); 3447 qlcnic_83xx_register_nic_idc_func(adapter, 1);
3255 err = qlcnic_83xx_setup_mbx_intr(adapter); 3448 err = qlcnic_83xx_setup_mbx_intr(adapter);
3449 qlcnic_83xx_disable_mbx_poll(adapter);
3256 if (err) { 3450 if (err) {
3257 dev_err(&adapter->pdev->dev, 3451 dev_err(&adapter->pdev->dev,
3258 "failed to setup mbx interrupt\n"); 3452 "failed to setup mbx interrupt\n");
@@ -3318,7 +3512,7 @@ void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
3318 3512
3319 rcu_read_lock(); 3513 rcu_read_lock();
3320 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) { 3514 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
3321 dev = __vlan_find_dev_deep(netdev, vid); 3515 dev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), vid);
3322 if (!dev) 3516 if (!dev)
3323 continue; 3517 continue;
3324 qlcnic_config_indev_addr(adapter, dev, event); 3518 qlcnic_config_indev_addr(adapter, dev, event);
@@ -3432,7 +3626,10 @@ static struct pci_driver qlcnic_driver = {
3432 .resume = qlcnic_resume, 3626 .resume = qlcnic_resume,
3433#endif 3627#endif
3434 .shutdown = qlcnic_shutdown, 3628 .shutdown = qlcnic_shutdown,
3435 .err_handler = &qlcnic_err_handler 3629 .err_handler = &qlcnic_err_handler,
3630#ifdef CONFIG_QLCNIC_SRIOV
3631 .sriov_configure = qlcnic_pci_sriov_configure,
3632#endif
3436 3633
3437}; 3634};
3438 3635
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index abbd22c814a6..4b9bab18ebd9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -810,11 +810,8 @@ static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
810 810
811 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size, 811 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
812 &tmp_addr_t, GFP_KERNEL); 812 &tmp_addr_t, GFP_KERNEL);
813 if (!tmp_addr) { 813 if (!tmp_addr)
814 dev_err(&adapter->pdev->dev,
815 "Can't get memory for FW dump template\n");
816 return -ENOMEM; 814 return -ENOMEM;
817 }
818 815
819 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) { 816 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
820 err = -ENOMEM; 817 err = -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
new file mode 100644
index 000000000000..d85fbb57c25b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -0,0 +1,263 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#ifndef _QLCNIC_83XX_SRIOV_H_
9#define _QLCNIC_83XX_SRIOV_H_
10
11#include "qlcnic.h"
12#include <linux/types.h>
13#include <linux/pci.h>
14
15extern const u32 qlcnic_83xx_reg_tbl[];
16extern const u32 qlcnic_83xx_ext_reg_tbl[];
17
18struct qlcnic_bc_payload {
19 u64 payload[126];
20};
21
22struct qlcnic_bc_hdr {
23#if defined(__LITTLE_ENDIAN)
24 u8 version;
25 u8 msg_type:4;
26 u8 rsvd1:3;
27 u8 op_type:1;
28 u8 num_cmds;
29 u8 num_frags;
30 u8 frag_num;
31 u8 cmd_op;
32 u16 seq_id;
33 u64 rsvd3;
34#elif defined(__BIG_ENDIAN)
35 u8 num_frags;
36 u8 num_cmds;
37 u8 op_type:1;
38 u8 rsvd1:3;
39 u8 msg_type:4;
40 u8 version;
41 u16 seq_id;
42 u8 cmd_op;
43 u8 frag_num;
44 u64 rsvd3;
45#endif
46};
47
48enum qlcnic_bc_commands {
49 QLCNIC_BC_CMD_CHANNEL_INIT = 0x0,
50 QLCNIC_BC_CMD_CHANNEL_TERM = 0x1,
51 QLCNIC_BC_CMD_GET_ACL = 0x2,
52 QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3,
53};
54
55#define QLC_BC_CMD 1
56
57struct qlcnic_trans_list {
58 /* Lock for manipulating list */
59 spinlock_t lock;
60 struct list_head wait_list;
61 int count;
62};
63
64enum qlcnic_trans_state {
65 QLC_INIT = 0,
66 QLC_WAIT_FOR_CHANNEL_FREE,
67 QLC_WAIT_FOR_RESP,
68 QLC_ABORT,
69 QLC_END,
70};
71
72struct qlcnic_bc_trans {
73 u8 func_id;
74 u8 active;
75 u8 curr_rsp_frag;
76 u8 curr_req_frag;
77 u16 cmd_id;
78 u16 req_pay_size;
79 u16 rsp_pay_size;
80 u32 trans_id;
81 enum qlcnic_trans_state trans_state;
82 struct list_head list;
83 struct qlcnic_bc_hdr *req_hdr;
84 struct qlcnic_bc_hdr *rsp_hdr;
85 struct qlcnic_bc_payload *req_pay;
86 struct qlcnic_bc_payload *rsp_pay;
87 struct completion resp_cmpl;
88 struct qlcnic_vf_info *vf;
89};
90
91enum qlcnic_vf_state {
92 QLC_BC_VF_SEND = 0,
93 QLC_BC_VF_RECV,
94 QLC_BC_VF_CHANNEL,
95 QLC_BC_VF_STATE,
96 QLC_BC_VF_FLR,
97 QLC_BC_VF_SOFT_FLR,
98};
99
100enum qlcnic_vlan_mode {
101 QLC_NO_VLAN_MODE = 0,
102 QLC_PVID_MODE,
103 QLC_GUEST_VLAN_MODE,
104};
105
106struct qlcnic_resources {
107 u16 num_tx_mac_filters;
108 u16 num_rx_ucast_mac_filters;
109 u16 num_rx_mcast_mac_filters;
110
111 u16 num_txvlan_keys;
112
113 u16 num_rx_queues;
114 u16 num_tx_queues;
115
116 u16 num_rx_buf_rings;
117 u16 num_rx_status_rings;
118
119 u16 num_destip;
120 u32 num_lro_flows_supported;
121 u16 max_local_ipv6_addrs;
122 u16 max_remote_ipv6_addrs;
123};
124
125struct qlcnic_vport {
126 u16 handle;
127 u16 max_tx_bw;
128 u16 min_tx_bw;
129 u8 vlan_mode;
130 u16 vlan;
131 u8 qos;
132 u8 mac[6];
133};
134
135struct qlcnic_vf_info {
136 u8 pci_func;
137 u16 rx_ctx_id;
138 u16 tx_ctx_id;
139 unsigned long state;
140 struct completion ch_free_cmpl;
141 struct work_struct trans_work;
142 struct work_struct flr_work;
143 /* It synchronizes commands sent from VF */
144 struct mutex send_cmd_lock;
145 struct qlcnic_bc_trans *send_cmd;
146 struct qlcnic_bc_trans *flr_trans;
147 struct qlcnic_trans_list rcv_act;
148 struct qlcnic_trans_list rcv_pend;
149 struct qlcnic_adapter *adapter;
150 struct qlcnic_vport *vp;
151};
152
153struct qlcnic_async_work_list {
154 struct list_head list;
155 struct work_struct work;
156 void *ptr;
157};
158
159struct qlcnic_back_channel {
160 u16 trans_counter;
161 struct workqueue_struct *bc_trans_wq;
162 struct workqueue_struct *bc_async_wq;
163 struct workqueue_struct *bc_flr_wq;
164 struct list_head async_list;
165};
166
167struct qlcnic_sriov {
168 u16 vp_handle;
169 u8 num_vfs;
170 u8 any_vlan;
171 u8 vlan_mode;
172 u16 num_allowed_vlans;
173 u16 *allowed_vlans;
174 u16 vlan;
175 struct qlcnic_resources ff_max;
176 struct qlcnic_back_channel bc;
177 struct qlcnic_vf_info *vf_info;
178};
179
180int qlcnic_sriov_init(struct qlcnic_adapter *, int);
181void qlcnic_sriov_cleanup(struct qlcnic_adapter *);
182void __qlcnic_sriov_cleanup(struct qlcnic_adapter *);
183void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *);
184int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int);
185void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *);
186int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8);
187int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
188void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32);
189int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *, u8);
190void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *);
191void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *);
192int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *, struct qlcnic_vf_info *,
193 struct qlcnic_bc_trans *);
194int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
195 struct qlcnic_info *, u16);
196int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
197
198static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter)
199{
200 return test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state) ? true : false;
201}
202
203#ifdef CONFIG_QLCNIC_SRIOV
204void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *,
205 struct qlcnic_bc_trans *,
206 struct qlcnic_cmd_args *);
207void qlcnic_sriov_pf_disable(struct qlcnic_adapter *);
208void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *);
209int qlcnic_pci_sriov_configure(struct pci_dev *, int);
210void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *, u32 *);
211void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *, u32 *);
212void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *, u32 *);
213void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *, u32 *);
214void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *, u32 *);
215void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *, u32 *);
216void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *, u32 *);
217void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *, struct qlcnic_vf_info *);
218bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *,
219 struct qlcnic_bc_trans *,
220 struct qlcnic_vf_info *);
221void qlcnic_sriov_pf_reset(struct qlcnic_adapter *);
222int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *);
223int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
224int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int);
225int qlcnic_sriov_get_vf_config(struct net_device *, int ,
226 struct ifla_vf_info *);
227int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
228#else
229static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {}
230static inline void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) {}
231static inline void
232qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
233 u32 *int_id) {}
234static inline void
235qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
236 u32 *int_id) {}
237static inline void
238qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
239 u32 *int_id) {}
240static inline void
241qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
242 u32 *int_id) {}
243static inline void
244qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, u32 *int_id)
245{}
246static inline void
247qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter, u32 *int_id)
248{}
249static inline void
250qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter, u32 *int_id)
251{}
252static inline void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
253 struct qlcnic_vf_info *vf) {}
254static inline bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter,
255 struct qlcnic_bc_trans *trans,
256 struct qlcnic_vf_info *vf)
257{ return false; }
258static inline void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter) {}
259static inline int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter)
260{ return 0; }
261#endif
262
263#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
new file mode 100644
index 000000000000..44d547d78b84
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -0,0 +1,1954 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include "qlcnic_sriov.h"
9#include "qlcnic.h"
10#include "qlcnic_83xx_hw.h"
11#include <linux/types.h>
12
13#define QLC_BC_COMMAND 0
14#define QLC_BC_RESPONSE 1
15
16#define QLC_MBOX_RESP_TIMEOUT (10 * HZ)
17#define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ)
18
19#define QLC_BC_MSG 0
20#define QLC_BC_CFREE 1
21#define QLC_BC_FLR 2
22#define QLC_BC_HDR_SZ 16
23#define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ)
24
25#define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048
26#define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512
27
28#define QLC_83XX_VF_RESET_FAIL_THRESH 8
29#define QLC_BC_CMD_MAX_RETRY_CNT 5
30
31static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
32static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
33static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
34static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
35static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
36static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *,
37 struct qlcnic_cmd_args *);
38
39static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
40 .read_crb = qlcnic_83xx_read_crb,
41 .write_crb = qlcnic_83xx_write_crb,
42 .read_reg = qlcnic_83xx_rd_reg_indirect,
43 .write_reg = qlcnic_83xx_wrt_reg_indirect,
44 .get_mac_address = qlcnic_83xx_get_mac_address,
45 .setup_intr = qlcnic_83xx_setup_intr,
46 .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
47 .mbx_cmd = qlcnic_sriov_vf_mbx_op,
48 .get_func_no = qlcnic_83xx_get_func_no,
49 .api_lock = qlcnic_83xx_cam_lock,
50 .api_unlock = qlcnic_83xx_cam_unlock,
51 .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
52 .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
53 .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
54 .del_rx_ctx = qlcnic_83xx_del_rx_ctx,
55 .del_tx_ctx = qlcnic_83xx_del_tx_ctx,
56 .setup_link_event = qlcnic_83xx_setup_link_event,
57 .get_nic_info = qlcnic_83xx_get_nic_info,
58 .get_pci_info = qlcnic_83xx_get_pci_info,
59 .set_nic_info = qlcnic_83xx_set_nic_info,
60 .change_macvlan = qlcnic_83xx_sre_macaddr_change,
61 .napi_enable = qlcnic_83xx_napi_enable,
62 .napi_disable = qlcnic_83xx_napi_disable,
63 .config_intr_coal = qlcnic_83xx_config_intr_coal,
64 .config_rss = qlcnic_83xx_config_rss,
65 .config_hw_lro = qlcnic_83xx_config_hw_lro,
66 .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
67 .change_l2_filter = qlcnic_83xx_change_l2_filter,
68 .get_board_info = qlcnic_83xx_get_port_info,
69 .free_mac_list = qlcnic_sriov_vf_free_mac_list,
70};
71
72static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
73 .config_bridged_mode = qlcnic_config_bridged_mode,
74 .config_led = qlcnic_config_led,
75 .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work,
76 .napi_add = qlcnic_83xx_napi_add,
77 .napi_del = qlcnic_83xx_napi_del,
78 .config_ipaddr = qlcnic_83xx_config_ipaddr,
79 .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
80};
81
82static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
83 {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
84 {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
85 {QLCNIC_BC_CMD_GET_ACL, 3, 14},
86 {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
87};
88
89static inline bool qlcnic_sriov_bc_msg_check(u32 val)
90{
91 return (val & (1 << QLC_BC_MSG)) ? true : false;
92}
93
94static inline bool qlcnic_sriov_channel_free_check(u32 val)
95{
96 return (val & (1 << QLC_BC_CFREE)) ? true : false;
97}
98
99static inline bool qlcnic_sriov_flr_check(u32 val)
100{
101 return (val & (1 << QLC_BC_FLR)) ? true : false;
102}
103
104static inline u8 qlcnic_sriov_target_func_id(u32 val)
105{
106 return (val >> 4) & 0xff;
107}
108
109static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
110{
111 struct pci_dev *dev = adapter->pdev;
112 int pos;
113 u16 stride, offset;
114
115 if (qlcnic_sriov_vf_check(adapter))
116 return 0;
117
118 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
119 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
120 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
121
122 return (dev->devfn + offset + stride * vf_id) & 0xff;
123}
124
125int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
126{
127 struct qlcnic_sriov *sriov;
128 struct qlcnic_back_channel *bc;
129 struct workqueue_struct *wq;
130 struct qlcnic_vport *vp;
131 struct qlcnic_vf_info *vf;
132 int err, i;
133
134 if (!qlcnic_sriov_enable_check(adapter))
135 return -EIO;
136
137 sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
138 if (!sriov)
139 return -ENOMEM;
140
141 adapter->ahw->sriov = sriov;
142 sriov->num_vfs = num_vfs;
143 bc = &sriov->bc;
144 sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
145 num_vfs, GFP_KERNEL);
146 if (!sriov->vf_info) {
147 err = -ENOMEM;
148 goto qlcnic_free_sriov;
149 }
150
151 wq = create_singlethread_workqueue("bc-trans");
152 if (wq == NULL) {
153 err = -ENOMEM;
154 dev_err(&adapter->pdev->dev,
155 "Cannot create bc-trans workqueue\n");
156 goto qlcnic_free_vf_info;
157 }
158
159 bc->bc_trans_wq = wq;
160
161 wq = create_singlethread_workqueue("async");
162 if (wq == NULL) {
163 err = -ENOMEM;
164 dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
165 goto qlcnic_destroy_trans_wq;
166 }
167
168 bc->bc_async_wq = wq;
169 INIT_LIST_HEAD(&bc->async_list);
170
171 for (i = 0; i < num_vfs; i++) {
172 vf = &sriov->vf_info[i];
173 vf->adapter = adapter;
174 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
175 mutex_init(&vf->send_cmd_lock);
176 INIT_LIST_HEAD(&vf->rcv_act.wait_list);
177 INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
178 spin_lock_init(&vf->rcv_act.lock);
179 spin_lock_init(&vf->rcv_pend.lock);
180 init_completion(&vf->ch_free_cmpl);
181
182 if (qlcnic_sriov_pf_check(adapter)) {
183 vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
184 if (!vp) {
185 err = -ENOMEM;
186 goto qlcnic_destroy_async_wq;
187 }
188 sriov->vf_info[i].vp = vp;
189 vp->max_tx_bw = MAX_BW;
190 random_ether_addr(vp->mac);
191 dev_info(&adapter->pdev->dev,
192 "MAC Address %pM is configured for VF %d\n",
193 vp->mac, i);
194 }
195 }
196
197 return 0;
198
199qlcnic_destroy_async_wq:
200 destroy_workqueue(bc->bc_async_wq);
201
202qlcnic_destroy_trans_wq:
203 destroy_workqueue(bc->bc_trans_wq);
204
205qlcnic_free_vf_info:
206 kfree(sriov->vf_info);
207
208qlcnic_free_sriov:
209 kfree(adapter->ahw->sriov);
210 return err;
211}
212
213void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
214{
215 struct qlcnic_bc_trans *trans;
216 struct qlcnic_cmd_args cmd;
217 unsigned long flags;
218
219 spin_lock_irqsave(&t_list->lock, flags);
220
221 while (!list_empty(&t_list->wait_list)) {
222 trans = list_first_entry(&t_list->wait_list,
223 struct qlcnic_bc_trans, list);
224 list_del(&trans->list);
225 t_list->count--;
226 cmd.req.arg = (u32 *)trans->req_pay;
227 cmd.rsp.arg = (u32 *)trans->rsp_pay;
228 qlcnic_free_mbx_args(&cmd);
229 qlcnic_sriov_cleanup_transaction(trans);
230 }
231
232 spin_unlock_irqrestore(&t_list->lock, flags);
233}
234
235void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
236{
237 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
238 struct qlcnic_back_channel *bc = &sriov->bc;
239 struct qlcnic_vf_info *vf;
240 int i;
241
242 if (!qlcnic_sriov_enable_check(adapter))
243 return;
244
245 qlcnic_sriov_cleanup_async_list(bc);
246 destroy_workqueue(bc->bc_async_wq);
247
248 for (i = 0; i < sriov->num_vfs; i++) {
249 vf = &sriov->vf_info[i];
250 qlcnic_sriov_cleanup_list(&vf->rcv_pend);
251 cancel_work_sync(&vf->trans_work);
252 qlcnic_sriov_cleanup_list(&vf->rcv_act);
253 }
254
255 destroy_workqueue(bc->bc_trans_wq);
256
257 for (i = 0; i < sriov->num_vfs; i++)
258 kfree(sriov->vf_info[i].vp);
259
260 kfree(sriov->vf_info);
261 kfree(adapter->ahw->sriov);
262}
263
264static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
265{
266 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
267 qlcnic_sriov_cfg_bc_intr(adapter, 0);
268 __qlcnic_sriov_cleanup(adapter);
269}
270
271void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
272{
273 if (qlcnic_sriov_pf_check(adapter))
274 qlcnic_sriov_pf_cleanup(adapter);
275
276 if (qlcnic_sriov_vf_check(adapter))
277 qlcnic_sriov_vf_cleanup(adapter);
278}
279
280static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
281 u32 *pay, u8 pci_func, u8 size)
282{
283 struct qlcnic_hardware_context *ahw = adapter->ahw;
284 unsigned long flags;
285 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val;
286 u16 opcode;
287 u8 mbx_err_code;
288 int i, j;
289
290 opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
291
292 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
293 dev_info(&adapter->pdev->dev,
294 "Mailbox cmd attempted, 0x%x\n", opcode);
295 dev_info(&adapter->pdev->dev, "Mailbox detached\n");
296 return 0;
297 }
298
299 spin_lock_irqsave(&ahw->mbx_lock, flags);
300
301 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
302 if (mbx_val) {
303 QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode);
304 spin_unlock_irqrestore(&ahw->mbx_lock, flags);
305 return QLCNIC_RCODE_TIMEOUT;
306 }
307 /* Fill in mailbox registers */
308 val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
309 mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29);
310
311 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
312 mbx_cmd = 0x1 | (1 << 4);
313
314 if (qlcnic_sriov_pf_check(adapter))
315 mbx_cmd |= (pci_func << 5);
316
317 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
318 for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
319 i++, j++) {
320 writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i));
321 }
322 for (j = 0; j < size; j++, i++)
323 writel(*(pay++), QLCNIC_MBX_HOST(ahw, i));
324
325 /* Signal FW about the impending command */
326 QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
327
328 /* Waiting for the mailbox cmd to complete and while waiting here
329 * some AEN might arrive. If more than 5 seconds expire we can
330 * assume something is wrong.
331 */
332poll:
333 rsp = qlcnic_83xx_mbx_poll(adapter);
334 if (rsp != QLCNIC_RCODE_TIMEOUT) {
335 /* Get the FW response data */
336 fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
337 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
338 __qlcnic_83xx_process_aen(adapter);
339 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
340 if (mbx_val)
341 goto poll;
342 }
343 mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
344 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
345 opcode = QLCNIC_MBX_RSP(fw_data);
346
347 switch (mbx_err_code) {
348 case QLCNIC_MBX_RSP_OK:
349 case QLCNIC_MBX_PORT_RSP_OK:
350 rsp = QLCNIC_RCODE_SUCCESS;
351 break;
352 default:
353 if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
354 rsp = qlcnic_83xx_mac_rcode(adapter);
355 if (!rsp)
356 goto out;
357 }
358 dev_err(&adapter->pdev->dev,
359 "MBX command 0x%x failed with err:0x%x\n",
360 opcode, mbx_err_code);
361 rsp = mbx_err_code;
362 break;
363 }
364 goto out;
365 }
366
367 dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
368 QLCNIC_MBX_RSP(mbx_cmd));
369 rsp = QLCNIC_RCODE_TIMEOUT;
370out:
371 /* clear fw mbx control register */
372 QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
373 spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
374 return rsp;
375}
376
377static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
378{
379 adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
380 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
381 adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
382 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
383 adapter->num_txd = MAX_CMD_DESCRIPTORS;
384 adapter->max_rds_rings = MAX_RDS_RINGS;
385}
386
387int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
388 struct qlcnic_info *npar_info, u16 vport_id)
389{
390 struct device *dev = &adapter->pdev->dev;
391 struct qlcnic_cmd_args cmd;
392 int err;
393 u32 status;
394
395 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
396 if (err)
397 return err;
398
399 cmd.req.arg[1] = vport_id << 16 | 0x1;
400 err = qlcnic_issue_cmd(adapter, &cmd);
401 if (err) {
402 dev_err(&adapter->pdev->dev,
403 "Failed to get vport info, err=%d\n", err);
404 qlcnic_free_mbx_args(&cmd);
405 return err;
406 }
407
408 status = cmd.rsp.arg[2] & 0xffff;
409 if (status & BIT_0)
410 npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
411 if (status & BIT_1)
412 npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
413 if (status & BIT_2)
414 npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
415 if (status & BIT_3)
416 npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
417 if (status & BIT_4)
418 npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
419 if (status & BIT_5)
420 npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
421 if (status & BIT_6)
422 npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
423 if (status & BIT_7)
424 npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
425 if (status & BIT_8)
426 npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
427 if (status & BIT_9)
428 npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
429
430 npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
431 npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
432 npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
433 npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
434
435 dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
436 "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
437 "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
438 "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
439 "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
440 "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
441 npar_info->min_tx_bw, npar_info->max_tx_bw,
442 npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
443 npar_info->max_rx_mcast_mac_filters,
444 npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
445 npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
446 npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
447 npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
448 npar_info->max_remote_ipv6_addrs);
449
450 qlcnic_free_mbx_args(&cmd);
451 return err;
452}
453
454static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
455 struct qlcnic_cmd_args *cmd)
456{
457 adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff;
458 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
459 return 0;
460}
461
462static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
463 struct qlcnic_cmd_args *cmd)
464{
465 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
466 int i, num_vlans;
467 u16 *vlans;
468
469 if (sriov->allowed_vlans)
470 return 0;
471
472 sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
473 if (!sriov->any_vlan)
474 return 0;
475
476 sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
477 num_vlans = sriov->num_allowed_vlans;
478 sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
479 if (!sriov->allowed_vlans)
480 return -ENOMEM;
481
482 vlans = (u16 *)&cmd->rsp.arg[3];
483 for (i = 0; i < num_vlans; i++)
484 sriov->allowed_vlans[i] = vlans[i];
485
486 return 0;
487}
488
489static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
490{
491 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
492 struct qlcnic_cmd_args cmd;
493 int ret;
494
495 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
496 if (ret)
497 return ret;
498
499 ret = qlcnic_issue_cmd(adapter, &cmd);
500 if (ret) {
501 dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
502 ret);
503 } else {
504 sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
505 switch (sriov->vlan_mode) {
506 case QLC_GUEST_VLAN_MODE:
507 ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
508 break;
509 case QLC_PVID_MODE:
510 ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
511 break;
512 }
513 }
514
515 qlcnic_free_mbx_args(&cmd);
516 return ret;
517}
518
519static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
520{
521 struct qlcnic_info nic_info;
522 struct qlcnic_hardware_context *ahw = adapter->ahw;
523 int err;
524
525 err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
526 if (err)
527 return err;
528
529 err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
530 if (err)
531 return -EIO;
532
533 err = qlcnic_sriov_get_vf_acl(adapter);
534 if (err)
535 return err;
536
537 if (qlcnic_83xx_get_port_info(adapter))
538 return -EIO;
539
540 qlcnic_sriov_vf_cfg_buff_desc(adapter);
541 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
542 dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
543 adapter->ahw->fw_hal_version);
544
545 ahw->physical_port = (u8) nic_info.phys_port;
546 ahw->switch_mode = nic_info.switch_mode;
547 ahw->max_mtu = nic_info.max_mtu;
548 ahw->op_mode = nic_info.op_mode;
549 ahw->capabilities = nic_info.capabilities;
550 return 0;
551}
552
553static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
554 int pci_using_dac)
555{
556 int err;
557
558 INIT_LIST_HEAD(&adapter->vf_mc_list);
559 if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
560 dev_warn(&adapter->pdev->dev,
561 "83xx adapter do not support MSI interrupts\n");
562
563 err = qlcnic_setup_intr(adapter, 1);
564 if (err) {
565 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
566 goto err_out_disable_msi;
567 }
568
569 err = qlcnic_83xx_setup_mbx_intr(adapter);
570 if (err)
571 goto err_out_disable_msi;
572
573 err = qlcnic_sriov_init(adapter, 1);
574 if (err)
575 goto err_out_disable_mbx_intr;
576
577 err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
578 if (err)
579 goto err_out_cleanup_sriov;
580
581 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
582 if (err)
583 goto err_out_disable_bc_intr;
584
585 err = qlcnic_sriov_vf_init_driver(adapter);
586 if (err)
587 goto err_out_send_channel_term;
588
589 err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
590 if (err)
591 goto err_out_send_channel_term;
592
593 pci_set_drvdata(adapter->pdev, adapter);
594 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
595 adapter->netdev->name);
596 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
597 adapter->ahw->idc.delay);
598 return 0;
599
600err_out_send_channel_term:
601 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
602
603err_out_disable_bc_intr:
604 qlcnic_sriov_cfg_bc_intr(adapter, 0);
605
606err_out_cleanup_sriov:
607 __qlcnic_sriov_cleanup(adapter);
608
609err_out_disable_mbx_intr:
610 qlcnic_83xx_free_mbx_intr(adapter);
611
612err_out_disable_msi:
613 qlcnic_teardown_intr(adapter);
614 return err;
615}
616
617static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
618{
619 u32 state;
620
621 do {
622 msleep(20);
623 if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
624 return -EIO;
625 state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
626 } while (state != QLC_83XX_IDC_DEV_READY);
627
628 return 0;
629}
630
631int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
632{
633 struct qlcnic_hardware_context *ahw = adapter->ahw;
634 int err;
635
636 spin_lock_init(&ahw->mbx_lock);
637 set_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
638 set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
639 ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
640 ahw->reset_context = 0;
641 adapter->fw_fail_cnt = 0;
642 ahw->msix_supported = 1;
643 adapter->need_fw_reset = 0;
644 adapter->flags |= QLCNIC_TX_INTR_SHARED;
645
646 err = qlcnic_sriov_check_dev_ready(adapter);
647 if (err)
648 return err;
649
650 err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
651 if (err)
652 return err;
653
654 if (qlcnic_read_mac_addr(adapter))
655 dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
656
657 clear_bit(__QLCNIC_RESETTING, &adapter->state);
658 return 0;
659}
660
661void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
662{
663 struct qlcnic_hardware_context *ahw = adapter->ahw;
664
665 ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
666 dev_info(&adapter->pdev->dev,
667 "HAL Version: %d Non Privileged SRIOV function\n",
668 ahw->fw_hal_version);
669 adapter->nic_ops = &qlcnic_sriov_vf_ops;
670 set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
671 return;
672}
673
674void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
675{
676 ahw->hw_ops = &qlcnic_sriov_vf_hw_ops;
677 ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl;
678 ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl;
679}
680
681static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
682{
683 u32 pay_size;
684
685 pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
686
687 if (pay_size)
688 pay_size = QLC_BC_PAYLOAD_SZ;
689 else
690 pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
691
692 return pay_size;
693}
694
695int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
696{
697 struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
698 u8 i;
699
700 if (qlcnic_sriov_vf_check(adapter))
701 return 0;
702
703 for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
704 if (vf_info[i].pci_func == pci_func)
705 return i;
706 }
707
708 return -EINVAL;
709}
710
711static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
712{
713 *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
714 if (!*trans)
715 return -ENOMEM;
716
717 init_completion(&(*trans)->resp_cmpl);
718 return 0;
719}
720
721static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
722 u32 size)
723{
724 *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
725 if (!*hdr)
726 return -ENOMEM;
727
728 return 0;
729}
730
731static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
732{
733 const struct qlcnic_mailbox_metadata *mbx_tbl;
734 int i, size;
735
736 mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
737 size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
738
739 for (i = 0; i < size; i++) {
740 if (type == mbx_tbl[i].cmd) {
741 mbx->op_type = QLC_BC_CMD;
742 mbx->req.num = mbx_tbl[i].in_args;
743 mbx->rsp.num = mbx_tbl[i].out_args;
744 mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
745 GFP_ATOMIC);
746 if (!mbx->req.arg)
747 return -ENOMEM;
748 mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
749 GFP_ATOMIC);
750 if (!mbx->rsp.arg) {
751 kfree(mbx->req.arg);
752 mbx->req.arg = NULL;
753 return -ENOMEM;
754 }
755 memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
756 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
757 mbx->req.arg[0] = (type | (mbx->req.num << 16) |
758 (3 << 29));
759 return 0;
760 }
761 }
762 return -EINVAL;
763}
764
765static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
766 struct qlcnic_cmd_args *cmd,
767 u16 seq, u8 msg_type)
768{
769 struct qlcnic_bc_hdr *hdr;
770 int i;
771 u32 num_regs, bc_pay_sz;
772 u16 remainder;
773 u8 cmd_op, num_frags, t_num_frags;
774
775 bc_pay_sz = QLC_BC_PAYLOAD_SZ;
776 if (msg_type == QLC_BC_COMMAND) {
777 trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
778 trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
779 num_regs = cmd->req.num;
780 trans->req_pay_size = (num_regs * 4);
781 num_regs = cmd->rsp.num;
782 trans->rsp_pay_size = (num_regs * 4);
783 cmd_op = cmd->req.arg[0] & 0xff;
784 remainder = (trans->req_pay_size) % (bc_pay_sz);
785 num_frags = (trans->req_pay_size) / (bc_pay_sz);
786 if (remainder)
787 num_frags++;
788 t_num_frags = num_frags;
789 if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
790 return -ENOMEM;
791 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
792 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
793 if (remainder)
794 num_frags++;
795 if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
796 return -ENOMEM;
797 num_frags = t_num_frags;
798 hdr = trans->req_hdr;
799 } else {
800 cmd->req.arg = (u32 *)trans->req_pay;
801 cmd->rsp.arg = (u32 *)trans->rsp_pay;
802 cmd_op = cmd->req.arg[0] & 0xff;
803 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
804 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
805 if (remainder)
806 num_frags++;
807 cmd->req.num = trans->req_pay_size / 4;
808 cmd->rsp.num = trans->rsp_pay_size / 4;
809 hdr = trans->rsp_hdr;
810 }
811
812 trans->trans_id = seq;
813 trans->cmd_id = cmd_op;
814 for (i = 0; i < num_frags; i++) {
815 hdr[i].version = 2;
816 hdr[i].msg_type = msg_type;
817 hdr[i].op_type = cmd->op_type;
818 hdr[i].num_cmds = 1;
819 hdr[i].num_frags = num_frags;
820 hdr[i].frag_num = i + 1;
821 hdr[i].cmd_op = cmd_op;
822 hdr[i].seq_id = seq;
823 }
824 return 0;
825}
826
827static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
828{
829 if (!trans)
830 return;
831 kfree(trans->req_hdr);
832 kfree(trans->rsp_hdr);
833 kfree(trans);
834}
835
836static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
837 struct qlcnic_bc_trans *trans, u8 type)
838{
839 struct qlcnic_trans_list *t_list;
840 unsigned long flags;
841 int ret = 0;
842
843 if (type == QLC_BC_RESPONSE) {
844 t_list = &vf->rcv_act;
845 spin_lock_irqsave(&t_list->lock, flags);
846 t_list->count--;
847 list_del(&trans->list);
848 if (t_list->count > 0)
849 ret = 1;
850 spin_unlock_irqrestore(&t_list->lock, flags);
851 }
852 if (type == QLC_BC_COMMAND) {
853 while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
854 msleep(100);
855 vf->send_cmd = NULL;
856 clear_bit(QLC_BC_VF_SEND, &vf->state);
857 }
858 return ret;
859}
860
861static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
862 struct qlcnic_vf_info *vf,
863 work_func_t func)
864{
865 if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
866 vf->adapter->need_fw_reset)
867 return;
868
869 INIT_WORK(&vf->trans_work, func);
870 queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
871}
872
873static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
874{
875 struct completion *cmpl = &trans->resp_cmpl;
876
877 if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
878 trans->trans_state = QLC_END;
879 else
880 trans->trans_state = QLC_ABORT;
881
882 return;
883}
884
885static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
886 u8 type)
887{
888 if (type == QLC_BC_RESPONSE) {
889 trans->curr_rsp_frag++;
890 if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
891 trans->trans_state = QLC_INIT;
892 else
893 trans->trans_state = QLC_END;
894 } else {
895 trans->curr_req_frag++;
896 if (trans->curr_req_frag < trans->req_hdr->num_frags)
897 trans->trans_state = QLC_INIT;
898 else
899 trans->trans_state = QLC_WAIT_FOR_RESP;
900 }
901}
902
903static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
904 u8 type)
905{
906 struct qlcnic_vf_info *vf = trans->vf;
907 struct completion *cmpl = &vf->ch_free_cmpl;
908
909 if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
910 trans->trans_state = QLC_ABORT;
911 return;
912 }
913
914 clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
915 qlcnic_sriov_handle_multi_frags(trans, type);
916}
917
918static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
919 u32 *hdr, u32 *pay, u32 size)
920{
921 struct qlcnic_hardware_context *ahw = adapter->ahw;
922 u32 fw_mbx;
923 u8 i, max = 2, hdr_size, j;
924
925 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
926 max = (size / sizeof(u32)) + hdr_size;
927
928 fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
929 for (i = 2, j = 0; j < hdr_size; i++, j++)
930 *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
931 for (; j < max; i++, j++)
932 *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
933}
934
935static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
936{
937 int ret = -EBUSY;
938 u32 timeout = 10000;
939
940 do {
941 if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
942 ret = 0;
943 break;
944 }
945 mdelay(1);
946 } while (--timeout);
947
948 return ret;
949}
950
951static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
952{
953 struct qlcnic_vf_info *vf = trans->vf;
954 u32 pay_size, hdr_size;
955 u32 *hdr, *pay;
956 int ret;
957 u8 pci_func = trans->func_id;
958
959 if (__qlcnic_sriov_issue_bc_post(vf))
960 return -EBUSY;
961
962 if (type == QLC_BC_COMMAND) {
963 hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
964 pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
965 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
966 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
967 trans->curr_req_frag);
968 pay_size = (pay_size / sizeof(u32));
969 } else {
970 hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
971 pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
972 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
973 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
974 trans->curr_rsp_frag);
975 pay_size = (pay_size / sizeof(u32));
976 }
977
978 ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
979 pci_func, pay_size);
980 return ret;
981}
982
983static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
984 struct qlcnic_vf_info *vf, u8 type)
985{
986 bool flag = true;
987 int err = -EIO;
988
989 while (flag) {
990 if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
991 vf->adapter->need_fw_reset)
992 trans->trans_state = QLC_ABORT;
993
994 switch (trans->trans_state) {
995 case QLC_INIT:
996 trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
997 if (qlcnic_sriov_issue_bc_post(trans, type))
998 trans->trans_state = QLC_ABORT;
999 break;
1000 case QLC_WAIT_FOR_CHANNEL_FREE:
1001 qlcnic_sriov_wait_for_channel_free(trans, type);
1002 break;
1003 case QLC_WAIT_FOR_RESP:
1004 qlcnic_sriov_wait_for_resp(trans);
1005 break;
1006 case QLC_END:
1007 err = 0;
1008 flag = false;
1009 break;
1010 case QLC_ABORT:
1011 err = -EIO;
1012 flag = false;
1013 clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
1014 break;
1015 default:
1016 err = -EIO;
1017 flag = false;
1018 }
1019 }
1020 return err;
1021}
1022
1023static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
1024 struct qlcnic_bc_trans *trans, int pci_func)
1025{
1026 struct qlcnic_vf_info *vf;
1027 int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
1028
1029 if (index < 0)
1030 return -EIO;
1031
1032 vf = &adapter->ahw->sriov->vf_info[index];
1033 trans->vf = vf;
1034 trans->func_id = pci_func;
1035
1036 if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
1037 if (qlcnic_sriov_pf_check(adapter))
1038 return -EIO;
1039 if (qlcnic_sriov_vf_check(adapter) &&
1040 trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
1041 return -EIO;
1042 }
1043
1044 mutex_lock(&vf->send_cmd_lock);
1045 vf->send_cmd = trans;
1046 err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
1047 qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
1048 mutex_unlock(&vf->send_cmd_lock);
1049 return err;
1050}
1051
1052static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
1053 struct qlcnic_bc_trans *trans,
1054 struct qlcnic_cmd_args *cmd)
1055{
1056#ifdef CONFIG_QLCNIC_SRIOV
1057 if (qlcnic_sriov_pf_check(adapter)) {
1058 qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
1059 return;
1060 }
1061#endif
1062 cmd->rsp.arg[0] |= (0x9 << 25);
1063 return;
1064}
1065
1066static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
1067{
1068 struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
1069 trans_work);
1070 struct qlcnic_bc_trans *trans = NULL;
1071 struct qlcnic_adapter *adapter = vf->adapter;
1072 struct qlcnic_cmd_args cmd;
1073 u8 req;
1074
1075 if (adapter->need_fw_reset)
1076 return;
1077
1078 if (test_bit(QLC_BC_VF_FLR, &vf->state))
1079 return;
1080
1081 trans = list_first_entry(&vf->rcv_act.wait_list,
1082 struct qlcnic_bc_trans, list);
1083 adapter = vf->adapter;
1084
1085 if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
1086 QLC_BC_RESPONSE))
1087 goto cleanup_trans;
1088
1089 __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
1090 trans->trans_state = QLC_INIT;
1091 __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
1092
1093cleanup_trans:
1094 qlcnic_free_mbx_args(&cmd);
1095 req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
1096 qlcnic_sriov_cleanup_transaction(trans);
1097 if (req)
1098 qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
1099 qlcnic_sriov_process_bc_cmd);
1100}
1101
1102static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
1103 struct qlcnic_vf_info *vf)
1104{
1105 struct qlcnic_bc_trans *trans;
1106 u32 pay_size;
1107
1108 if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
1109 return;
1110
1111 trans = vf->send_cmd;
1112
1113 if (trans == NULL)
1114 goto clear_send;
1115
1116 if (trans->trans_id != hdr->seq_id)
1117 goto clear_send;
1118
1119 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
1120 trans->curr_rsp_frag);
1121 qlcnic_sriov_pull_bc_msg(vf->adapter,
1122 (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
1123 (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
1124 pay_size);
1125 if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
1126 goto clear_send;
1127
1128 complete(&trans->resp_cmpl);
1129
1130clear_send:
1131 clear_bit(QLC_BC_VF_SEND, &vf->state);
1132}
1133
1134int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1135 struct qlcnic_vf_info *vf,
1136 struct qlcnic_bc_trans *trans)
1137{
1138 struct qlcnic_trans_list *t_list = &vf->rcv_act;
1139
1140 t_list->count++;
1141 list_add_tail(&trans->list, &t_list->wait_list);
1142 if (t_list->count == 1)
1143 qlcnic_sriov_schedule_bc_cmd(sriov, vf,
1144 qlcnic_sriov_process_bc_cmd);
1145 return 0;
1146}
1147
1148static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1149 struct qlcnic_vf_info *vf,
1150 struct qlcnic_bc_trans *trans)
1151{
1152 struct qlcnic_trans_list *t_list = &vf->rcv_act;
1153
1154 spin_lock(&t_list->lock);
1155
1156 __qlcnic_sriov_add_act_list(sriov, vf, trans);
1157
1158 spin_unlock(&t_list->lock);
1159 return 0;
1160}
1161
1162static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
1163 struct qlcnic_vf_info *vf,
1164 struct qlcnic_bc_hdr *hdr)
1165{
1166 struct qlcnic_bc_trans *trans = NULL;
1167 struct list_head *node;
1168 u32 pay_size, curr_frag;
1169 u8 found = 0, active = 0;
1170
1171 spin_lock(&vf->rcv_pend.lock);
1172 if (vf->rcv_pend.count > 0) {
1173 list_for_each(node, &vf->rcv_pend.wait_list) {
1174 trans = list_entry(node, struct qlcnic_bc_trans, list);
1175 if (trans->trans_id == hdr->seq_id) {
1176 found = 1;
1177 break;
1178 }
1179 }
1180 }
1181
1182 if (found) {
1183 curr_frag = trans->curr_req_frag;
1184 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1185 curr_frag);
1186 qlcnic_sriov_pull_bc_msg(vf->adapter,
1187 (u32 *)(trans->req_hdr + curr_frag),
1188 (u32 *)(trans->req_pay + curr_frag),
1189 pay_size);
1190 trans->curr_req_frag++;
1191 if (trans->curr_req_frag >= hdr->num_frags) {
1192 vf->rcv_pend.count--;
1193 list_del(&trans->list);
1194 active = 1;
1195 }
1196 }
1197 spin_unlock(&vf->rcv_pend.lock);
1198
1199 if (active)
1200 if (qlcnic_sriov_add_act_list(sriov, vf, trans))
1201 qlcnic_sriov_cleanup_transaction(trans);
1202
1203 return;
1204}
1205
1206static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
1207 struct qlcnic_bc_hdr *hdr,
1208 struct qlcnic_vf_info *vf)
1209{
1210 struct qlcnic_bc_trans *trans;
1211 struct qlcnic_adapter *adapter = vf->adapter;
1212 struct qlcnic_cmd_args cmd;
1213 u32 pay_size;
1214 int err;
1215 u8 cmd_op;
1216
1217 if (adapter->need_fw_reset)
1218 return;
1219
1220 if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
1221 hdr->op_type != QLC_BC_CMD &&
1222 hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
1223 return;
1224
1225 if (hdr->frag_num > 1) {
1226 qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
1227 return;
1228 }
1229
1230 cmd_op = hdr->cmd_op;
1231 if (qlcnic_sriov_alloc_bc_trans(&trans))
1232 return;
1233
1234 if (hdr->op_type == QLC_BC_CMD)
1235 err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
1236 else
1237 err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
1238
1239 if (err) {
1240 qlcnic_sriov_cleanup_transaction(trans);
1241 return;
1242 }
1243
1244 cmd.op_type = hdr->op_type;
1245 if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
1246 QLC_BC_COMMAND)) {
1247 qlcnic_free_mbx_args(&cmd);
1248 qlcnic_sriov_cleanup_transaction(trans);
1249 return;
1250 }
1251
1252 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1253 trans->curr_req_frag);
1254 qlcnic_sriov_pull_bc_msg(vf->adapter,
1255 (u32 *)(trans->req_hdr + trans->curr_req_frag),
1256 (u32 *)(trans->req_pay + trans->curr_req_frag),
1257 pay_size);
1258 trans->func_id = vf->pci_func;
1259 trans->vf = vf;
1260 trans->trans_id = hdr->seq_id;
1261 trans->curr_req_frag++;
1262
1263 if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
1264 return;
1265
1266 if (trans->curr_req_frag == trans->req_hdr->num_frags) {
1267 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
1268 qlcnic_free_mbx_args(&cmd);
1269 qlcnic_sriov_cleanup_transaction(trans);
1270 }
1271 } else {
1272 spin_lock(&vf->rcv_pend.lock);
1273 list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
1274 vf->rcv_pend.count++;
1275 spin_unlock(&vf->rcv_pend.lock);
1276 }
1277}
1278
1279static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
1280 struct qlcnic_vf_info *vf)
1281{
1282 struct qlcnic_bc_hdr hdr;
1283 u32 *ptr = (u32 *)&hdr;
1284 u8 msg_type, i;
1285
1286 for (i = 2; i < 6; i++)
1287 ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
1288 msg_type = hdr.msg_type;
1289
1290 switch (msg_type) {
1291 case QLC_BC_COMMAND:
1292 qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
1293 break;
1294 case QLC_BC_RESPONSE:
1295 qlcnic_sriov_handle_bc_resp(&hdr, vf);
1296 break;
1297 }
1298}
1299
1300static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
1301 struct qlcnic_vf_info *vf)
1302{
1303 struct qlcnic_adapter *adapter = vf->adapter;
1304
1305 if (qlcnic_sriov_pf_check(adapter))
1306 qlcnic_sriov_pf_handle_flr(sriov, vf);
1307 else
1308 dev_err(&adapter->pdev->dev,
1309 "Invalid event to VF. VF should not get FLR event\n");
1310}
1311
1312void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
1313{
1314 struct qlcnic_vf_info *vf;
1315 struct qlcnic_sriov *sriov;
1316 int index;
1317 u8 pci_func;
1318
1319 sriov = adapter->ahw->sriov;
1320 pci_func = qlcnic_sriov_target_func_id(event);
1321 index = qlcnic_sriov_func_to_index(adapter, pci_func);
1322
1323 if (index < 0)
1324 return;
1325
1326 vf = &sriov->vf_info[index];
1327 vf->pci_func = pci_func;
1328
1329 if (qlcnic_sriov_channel_free_check(event))
1330 complete(&vf->ch_free_cmpl);
1331
1332 if (qlcnic_sriov_flr_check(event)) {
1333 qlcnic_sriov_handle_flr_event(sriov, vf);
1334 return;
1335 }
1336
1337 if (qlcnic_sriov_bc_msg_check(event))
1338 qlcnic_sriov_handle_msg_event(sriov, vf);
1339}
1340
1341int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
1342{
1343 struct qlcnic_cmd_args cmd;
1344 int err;
1345
1346 if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
1347 return 0;
1348
1349 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
1350 return -ENOMEM;
1351
1352 if (enable)
1353 cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1354
1355 err = qlcnic_83xx_mbx_op(adapter, &cmd);
1356
1357 if (err != QLCNIC_RCODE_SUCCESS) {
1358 dev_err(&adapter->pdev->dev,
1359 "Failed to %s bc events, err=%d\n",
1360 (enable ? "enable" : "disable"), err);
1361 }
1362
1363 qlcnic_free_mbx_args(&cmd);
1364 return err;
1365}
1366
1367static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
1368 struct qlcnic_bc_trans *trans)
1369{
1370 u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
1371 u32 state;
1372
1373 state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1374 if (state == QLC_83XX_IDC_DEV_READY) {
1375 msleep(20);
1376 clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
1377 trans->trans_state = QLC_INIT;
1378 if (++adapter->fw_fail_cnt > max)
1379 return -EIO;
1380 else
1381 return 0;
1382 }
1383
1384 return -EIO;
1385}
1386
1387static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
1388 struct qlcnic_cmd_args *cmd)
1389{
1390 struct qlcnic_hardware_context *ahw = adapter->ahw;
1391 struct device *dev = &adapter->pdev->dev;
1392 struct qlcnic_bc_trans *trans;
1393 int err;
1394 u32 rsp_data, opcode, mbx_err_code, rsp;
1395 u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
1396 u8 func = ahw->pci_func;
1397
1398 rsp = qlcnic_sriov_alloc_bc_trans(&trans);
1399 if (rsp)
1400 return rsp;
1401
1402 rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
1403 if (rsp)
1404 goto cleanup_transaction;
1405
1406retry:
1407 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
1408 rsp = -EIO;
1409 QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1410 QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
1411 goto err_out;
1412 }
1413
1414 err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
1415 if (err) {
1416 dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
1417 (cmd->req.arg[0] & 0xffff), func);
1418 rsp = QLCNIC_RCODE_TIMEOUT;
1419
1420 /* After adapter reset PF driver may take some time to
1421 * respond to VF's request. Retry request till maximum retries.
1422 */
1423 if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
1424 !qlcnic_sriov_retry_bc_cmd(adapter, trans))
1425 goto retry;
1426
1427 goto err_out;
1428 }
1429
1430 rsp_data = cmd->rsp.arg[0];
1431 mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
1432 opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
1433
1434 if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
1435 (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
1436 rsp = QLCNIC_RCODE_SUCCESS;
1437 } else {
1438 rsp = mbx_err_code;
1439 if (!rsp)
1440 rsp = 1;
1441 dev_err(dev,
1442 "MBX command 0x%x failed with err:0x%x for VF %d\n",
1443 opcode, mbx_err_code, func);
1444 }
1445
1446err_out:
1447 if (rsp == QLCNIC_RCODE_TIMEOUT) {
1448 ahw->reset_context = 1;
1449 adapter->need_fw_reset = 1;
1450 clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
1451 }
1452
1453cleanup_transaction:
1454 qlcnic_sriov_cleanup_transaction(trans);
1455 return rsp;
1456}
1457
1458int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
1459{
1460 struct qlcnic_cmd_args cmd;
1461 struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
1462 int ret;
1463
1464 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
1465 return -ENOMEM;
1466
1467 ret = qlcnic_issue_cmd(adapter, &cmd);
1468 if (ret) {
1469 dev_err(&adapter->pdev->dev,
1470 "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
1471 ret);
1472 goto out;
1473 }
1474
1475 cmd_op = (cmd.rsp.arg[0] & 0xff);
1476 if (cmd.rsp.arg[0] >> 25 == 2)
1477 return 2;
1478 if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
1479 set_bit(QLC_BC_VF_STATE, &vf->state);
1480 else
1481 clear_bit(QLC_BC_VF_STATE, &vf->state);
1482
1483out:
1484 qlcnic_free_mbx_args(&cmd);
1485 return ret;
1486}
1487
1488void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan)
1489{
1490 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1491 struct qlcnic_mac_list_s *cur;
1492 struct list_head *head, tmp_list;
1493
1494 INIT_LIST_HEAD(&tmp_list);
1495 head = &adapter->vf_mc_list;
1496 netif_addr_lock_bh(netdev);
1497
1498 while (!list_empty(head)) {
1499 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
1500 list_move(&cur->list, &tmp_list);
1501 }
1502
1503 netif_addr_unlock_bh(netdev);
1504
1505 while (!list_empty(&tmp_list)) {
1506 cur = list_entry((&tmp_list)->next,
1507 struct qlcnic_mac_list_s, list);
1508 qlcnic_nic_add_mac(adapter, cur->mac_addr, vlan);
1509 list_del(&cur->list);
1510 kfree(cur);
1511 }
1512}
1513
1514void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1515{
1516 struct list_head *head = &bc->async_list;
1517 struct qlcnic_async_work_list *entry;
1518
1519 while (!list_empty(head)) {
1520 entry = list_entry(head->next, struct qlcnic_async_work_list,
1521 list);
1522 cancel_work_sync(&entry->work);
1523 list_del(&entry->list);
1524 kfree(entry);
1525 }
1526}
1527
1528static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1529{
1530 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1531 u16 vlan;
1532
1533 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
1534 return;
1535
1536 vlan = adapter->ahw->sriov->vlan;
1537 __qlcnic_set_multi(netdev, vlan);
1538}
1539
1540static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
1541{
1542 struct qlcnic_async_work_list *entry;
1543 struct net_device *netdev;
1544
1545 entry = container_of(work, struct qlcnic_async_work_list, work);
1546 netdev = (struct net_device *)entry->ptr;
1547
1548 qlcnic_sriov_vf_set_multi(netdev);
1549 return;
1550}
1551
1552static struct qlcnic_async_work_list *
1553qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
1554{
1555 struct list_head *node;
1556 struct qlcnic_async_work_list *entry = NULL;
1557 u8 empty = 0;
1558
1559 list_for_each(node, &bc->async_list) {
1560 entry = list_entry(node, struct qlcnic_async_work_list, list);
1561 if (!work_pending(&entry->work)) {
1562 empty = 1;
1563 break;
1564 }
1565 }
1566
1567 if (!empty) {
1568 entry = kzalloc(sizeof(struct qlcnic_async_work_list),
1569 GFP_ATOMIC);
1570 if (entry == NULL)
1571 return NULL;
1572 list_add_tail(&entry->list, &bc->async_list);
1573 }
1574
1575 return entry;
1576}
1577
1578static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
1579 work_func_t func, void *data)
1580{
1581 struct qlcnic_async_work_list *entry = NULL;
1582
1583 entry = qlcnic_sriov_get_free_node_async_work(bc);
1584 if (!entry)
1585 return;
1586
1587 entry->ptr = data;
1588 INIT_WORK(&entry->work, func);
1589 queue_work(bc->bc_async_wq, &entry->work);
1590}
1591
1592void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
1593{
1594
1595 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1596 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
1597
1598 if (adapter->need_fw_reset)
1599 return;
1600
1601 qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
1602 netdev);
1603}
1604
1605static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1606{
1607 int err;
1608
1609 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
1610 qlcnic_83xx_enable_mbx_intrpt(adapter);
1611
1612 err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1613 if (err)
1614 return err;
1615
1616 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
1617 if (err)
1618 goto err_out_cleanup_bc_intr;
1619
1620 err = qlcnic_sriov_vf_init_driver(adapter);
1621 if (err)
1622 goto err_out_term_channel;
1623
1624 return 0;
1625
1626err_out_term_channel:
1627 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
1628
1629err_out_cleanup_bc_intr:
1630 qlcnic_sriov_cfg_bc_intr(adapter, 0);
1631 return err;
1632}
1633
1634static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
1635{
1636 struct net_device *netdev = adapter->netdev;
1637
1638 if (netif_running(netdev)) {
1639 if (!qlcnic_up(adapter, netdev))
1640 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1641 }
1642
1643 netif_device_attach(netdev);
1644}
1645
1646static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
1647{
1648 struct qlcnic_hardware_context *ahw = adapter->ahw;
1649 struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
1650 struct net_device *netdev = adapter->netdev;
1651 u8 i, max_ints = ahw->num_msix - 1;
1652
1653 qlcnic_83xx_disable_mbx_intr(adapter);
1654 netif_device_detach(netdev);
1655 if (netif_running(netdev))
1656 qlcnic_down(adapter, netdev);
1657
1658 for (i = 0; i < max_ints; i++) {
1659 intr_tbl[i].id = i;
1660 intr_tbl[i].enabled = 0;
1661 intr_tbl[i].src = 0;
1662 }
1663 ahw->reset_context = 0;
1664}
1665
1666static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
1667{
1668 struct qlcnic_hardware_context *ahw = adapter->ahw;
1669 struct device *dev = &adapter->pdev->dev;
1670 struct qlc_83xx_idc *idc = &ahw->idc;
1671 u8 func = ahw->pci_func;
1672 u32 state;
1673
1674 if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
1675 (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
1676 if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1677 qlcnic_sriov_vf_attach(adapter);
1678 adapter->fw_fail_cnt = 0;
1679 dev_info(dev,
1680 "%s: Reinitalization of VF 0x%x done after FW reset\n",
1681 __func__, func);
1682 } else {
1683 dev_err(dev,
1684 "%s: Reinitialization of VF 0x%x failed after FW reset\n",
1685 __func__, func);
1686 state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1687 dev_info(dev, "Current state 0x%x after FW reset\n",
1688 state);
1689 }
1690 }
1691
1692 return 0;
1693}
1694
1695static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1696{
1697 struct qlcnic_hardware_context *ahw = adapter->ahw;
1698 struct device *dev = &adapter->pdev->dev;
1699 struct qlc_83xx_idc *idc = &ahw->idc;
1700 u8 func = ahw->pci_func;
1701 u32 state;
1702
1703 adapter->reset_ctx_cnt++;
1704
1705 /* Skip the context reset and check if FW is hung */
1706 if (adapter->reset_ctx_cnt < 3) {
1707 adapter->need_fw_reset = 1;
1708 clear_bit(QLC_83XX_MBX_READY, &idc->status);
1709 dev_info(dev,
1710 "Resetting context, wait here to check if FW is in failed state\n");
1711 return 0;
1712 }
1713
1714 /* Check if number of resets exceed the threshold.
1715 * If it exceeds the threshold just fail the VF.
1716 */
1717 if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
1718 clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1719 adapter->tx_timeo_cnt = 0;
1720 adapter->fw_fail_cnt = 0;
1721 adapter->reset_ctx_cnt = 0;
1722 qlcnic_sriov_vf_detach(adapter);
1723 dev_err(dev,
1724 "Device context resets have exceeded the threshold, device interface will be shutdown\n");
1725 return -EIO;
1726 }
1727
1728 dev_info(dev, "Resetting context of VF 0x%x\n", func);
1729 dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
1730 __func__, adapter->reset_ctx_cnt, func);
1731 set_bit(__QLCNIC_RESETTING, &adapter->state);
1732 adapter->need_fw_reset = 1;
1733 clear_bit(QLC_83XX_MBX_READY, &idc->status);
1734 qlcnic_sriov_vf_detach(adapter);
1735 adapter->need_fw_reset = 0;
1736
1737 if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1738 qlcnic_sriov_vf_attach(adapter);
1739 adapter->netdev->trans_start = jiffies;
1740 adapter->tx_timeo_cnt = 0;
1741 adapter->reset_ctx_cnt = 0;
1742 adapter->fw_fail_cnt = 0;
1743 dev_info(dev, "Done resetting context for VF 0x%x\n", func);
1744 } else {
1745 dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
1746 __func__, func);
1747 state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1748 dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
1749 }
1750
1751 return 0;
1752}
1753
1754static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
1755{
1756 struct qlcnic_hardware_context *ahw = adapter->ahw;
1757 int ret = 0;
1758
1759 if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
1760 ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
1761 else if (ahw->reset_context)
1762 ret = qlcnic_sriov_vf_handle_context_reset(adapter);
1763
1764 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1765 return ret;
1766}
1767
1768static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
1769{
1770 struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1771
1772 dev_err(&adapter->pdev->dev, "Device is in failed state\n");
1773 if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
1774 qlcnic_sriov_vf_detach(adapter);
1775
1776 clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1777 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1778 return -EIO;
1779}
1780
1781static int
1782qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1783{
1784 struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1785
1786 dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
1787 if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1788 set_bit(__QLCNIC_RESETTING, &adapter->state);
1789 adapter->tx_timeo_cnt = 0;
1790 adapter->reset_ctx_cnt = 0;
1791 clear_bit(QLC_83XX_MBX_READY, &idc->status);
1792 qlcnic_sriov_vf_detach(adapter);
1793 }
1794
1795 return 0;
1796}
1797
1798static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
1799{
1800 struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1801 u8 func = adapter->ahw->pci_func;
1802
1803 if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1804 dev_err(&adapter->pdev->dev,
1805 "Firmware hang detected by VF 0x%x\n", func);
1806 set_bit(__QLCNIC_RESETTING, &adapter->state);
1807 adapter->tx_timeo_cnt = 0;
1808 adapter->reset_ctx_cnt = 0;
1809 clear_bit(QLC_83XX_MBX_READY, &idc->status);
1810 qlcnic_sriov_vf_detach(adapter);
1811 }
1812 return 0;
1813}
1814
1815static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
1816{
1817 dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
1818 return 0;
1819}
1820
1821static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1822{
1823 struct qlcnic_adapter *adapter;
1824 struct qlc_83xx_idc *idc;
1825 int ret = 0;
1826
1827 adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
1828 idc = &adapter->ahw->idc;
1829 idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1830
1831 switch (idc->curr_state) {
1832 case QLC_83XX_IDC_DEV_READY:
1833 ret = qlcnic_sriov_vf_idc_ready_state(adapter);
1834 break;
1835 case QLC_83XX_IDC_DEV_NEED_RESET:
1836 case QLC_83XX_IDC_DEV_INIT:
1837 ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
1838 break;
1839 case QLC_83XX_IDC_DEV_NEED_QUISCENT:
1840 ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
1841 break;
1842 case QLC_83XX_IDC_DEV_FAILED:
1843 ret = qlcnic_sriov_vf_idc_failed_state(adapter);
1844 break;
1845 case QLC_83XX_IDC_DEV_QUISCENT:
1846 break;
1847 default:
1848 ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
1849 }
1850
1851 idc->prev_state = idc->curr_state;
1852 if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
1853 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
1854 idc->delay);
1855}
1856
1857static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
1858{
1859 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1860 msleep(20);
1861
1862 clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
1863 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1864 cancel_delayed_work_sync(&adapter->fw_work);
1865}
1866
1867static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov,
1868 u16 vid, u8 enable)
1869{
1870 u16 vlan = sriov->vlan;
1871 u8 allowed = 0;
1872 int i;
1873
1874 if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
1875 return -EINVAL;
1876
1877 if (enable) {
1878 if (vlan)
1879 return -EINVAL;
1880
1881 if (sriov->any_vlan) {
1882 for (i = 0; i < sriov->num_allowed_vlans; i++) {
1883 if (sriov->allowed_vlans[i] == vid)
1884 allowed = 1;
1885 }
1886
1887 if (!allowed)
1888 return -EINVAL;
1889 }
1890 } else {
1891 if (!vlan || vlan != vid)
1892 return -EINVAL;
1893 }
1894
1895 return 0;
1896}
1897
1898int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
1899 u16 vid, u8 enable)
1900{
1901 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1902 struct qlcnic_cmd_args cmd;
1903 int ret;
1904
1905 if (vid == 0)
1906 return 0;
1907
1908 ret = qlcnic_sriov_validate_vlan_cfg(sriov, vid, enable);
1909 if (ret)
1910 return ret;
1911
1912 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
1913 QLCNIC_BC_CMD_CFG_GUEST_VLAN);
1914 if (ret)
1915 return ret;
1916
1917 cmd.req.arg[1] = (enable & 1) | vid << 16;
1918
1919 qlcnic_sriov_cleanup_async_list(&sriov->bc);
1920 ret = qlcnic_issue_cmd(adapter, &cmd);
1921 if (ret) {
1922 dev_err(&adapter->pdev->dev,
1923 "Failed to configure guest VLAN, err=%d\n", ret);
1924 } else {
1925 qlcnic_free_mac_list(adapter);
1926
1927 if (enable)
1928 sriov->vlan = vid;
1929 else
1930 sriov->vlan = 0;
1931
1932 qlcnic_sriov_vf_set_multi(adapter->netdev);
1933 }
1934
1935 qlcnic_free_mbx_args(&cmd);
1936 return ret;
1937}
1938
1939static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
1940{
1941 struct list_head *head = &adapter->mac_list;
1942 struct qlcnic_mac_list_s *cur;
1943 u16 vlan;
1944
1945 vlan = adapter->ahw->sriov->vlan;
1946
1947 while (!list_empty(head)) {
1948 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
1949 qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
1950 vlan, QLCNIC_MAC_DEL);
1951 list_del(&cur->list);
1952 kfree(cur);
1953 }
1954}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
new file mode 100644
index 000000000000..c81be2da119b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -0,0 +1,1780 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include "qlcnic_sriov.h"
9#include "qlcnic.h"
10#include <linux/types.h>
11
12#define QLCNIC_SRIOV_VF_MAX_MAC 1
13#define QLC_VF_MIN_TX_RATE 100
14#define QLC_VF_MAX_TX_RATE 9999
15
16static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
17
18struct qlcnic_sriov_cmd_handler {
19 int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
20};
21
22struct qlcnic_sriov_fw_cmd_handler {
23 u32 cmd;
24 int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
25};
26
27static int qlcnic_sriov_pf_set_vport_info(struct qlcnic_adapter *adapter,
28 struct qlcnic_info *npar_info,
29 u16 vport_id)
30{
31 struct qlcnic_cmd_args cmd;
32 int err;
33
34 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO))
35 return -ENOMEM;
36
37 cmd.req.arg[1] = (vport_id << 16) | 0x1;
38 cmd.req.arg[2] = npar_info->bit_offsets;
39 cmd.req.arg[2] |= npar_info->min_tx_bw << 16;
40 cmd.req.arg[3] = npar_info->max_tx_bw | (npar_info->max_tx_ques << 16);
41 cmd.req.arg[4] = npar_info->max_tx_mac_filters;
42 cmd.req.arg[4] |= npar_info->max_rx_mcast_mac_filters << 16;
43 cmd.req.arg[5] = npar_info->max_rx_ucast_mac_filters |
44 (npar_info->max_rx_ip_addr << 16);
45 cmd.req.arg[6] = npar_info->max_rx_lro_flow |
46 (npar_info->max_rx_status_rings << 16);
47 cmd.req.arg[7] = npar_info->max_rx_buf_rings |
48 (npar_info->max_rx_ques << 16);
49 cmd.req.arg[8] = npar_info->max_tx_vlan_keys;
50 cmd.req.arg[8] |= npar_info->max_local_ipv6_addrs << 16;
51 cmd.req.arg[9] = npar_info->max_remote_ipv6_addrs;
52
53 err = qlcnic_issue_cmd(adapter, &cmd);
54 if (err)
55 dev_err(&adapter->pdev->dev,
56 "Failed to set vport info, err=%d\n", err);
57
58 qlcnic_free_mbx_args(&cmd);
59 return err;
60}
61
62static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
63 struct qlcnic_info *info, u16 func)
64{
65 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
66 struct qlcnic_resources *res = &sriov->ff_max;
67 u32 temp, num_vf_macs, num_vfs, max;
68 int ret = -EIO, vpid, id;
69 struct qlcnic_vport *vp;
70
71 vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
72 if (vpid < 0)
73 return -EINVAL;
74
75 num_vfs = sriov->num_vfs;
76 max = num_vfs + 1;
77 info->bit_offsets = 0xffff;
78 info->max_tx_ques = res->num_tx_queues / max;
79 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
80 num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
81
82 if (adapter->ahw->pci_func == func) {
83 temp = res->num_rx_mcast_mac_filters - (num_vfs * num_vf_macs);
84 info->max_rx_ucast_mac_filters = temp;
85 temp = res->num_tx_mac_filters - (num_vfs * num_vf_macs);
86 info->max_tx_mac_filters = temp;
87 info->min_tx_bw = 0;
88 info->max_tx_bw = MAX_BW;
89 } else {
90 id = qlcnic_sriov_func_to_index(adapter, func);
91 if (id < 0)
92 return id;
93 vp = sriov->vf_info[id].vp;
94 info->min_tx_bw = vp->min_tx_bw;
95 info->max_tx_bw = vp->max_tx_bw;
96 info->max_rx_ucast_mac_filters = num_vf_macs;
97 info->max_tx_mac_filters = num_vf_macs;
98 }
99
100 info->max_rx_ip_addr = res->num_destip / max;
101 info->max_rx_status_rings = res->num_rx_status_rings / max;
102 info->max_rx_buf_rings = res->num_rx_buf_rings / max;
103 info->max_rx_ques = res->num_rx_queues / max;
104 info->max_rx_lro_flow = res->num_lro_flows_supported / max;
105 info->max_tx_vlan_keys = res->num_txvlan_keys;
106 info->max_local_ipv6_addrs = res->max_local_ipv6_addrs;
107 info->max_remote_ipv6_addrs = res->max_remote_ipv6_addrs;
108
109 ret = qlcnic_sriov_pf_set_vport_info(adapter, info, vpid);
110 if (ret)
111 return ret;
112
113 return 0;
114}
115
116static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter,
117 struct qlcnic_info *info)
118{
119 struct qlcnic_resources *ff_max = &adapter->ahw->sriov->ff_max;
120
121 ff_max->num_tx_mac_filters = info->max_tx_mac_filters;
122 ff_max->num_rx_ucast_mac_filters = info->max_rx_ucast_mac_filters;
123 ff_max->num_rx_mcast_mac_filters = info->max_rx_mcast_mac_filters;
124 ff_max->num_txvlan_keys = info->max_tx_vlan_keys;
125 ff_max->num_rx_queues = info->max_rx_ques;
126 ff_max->num_tx_queues = info->max_tx_ques;
127 ff_max->num_lro_flows_supported = info->max_rx_lro_flow;
128 ff_max->num_destip = info->max_rx_ip_addr;
129 ff_max->num_rx_buf_rings = info->max_rx_buf_rings;
130 ff_max->num_rx_status_rings = info->max_rx_status_rings;
131 ff_max->max_remote_ipv6_addrs = info->max_remote_ipv6_addrs;
132 ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
133}
134
135static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
136 struct qlcnic_info *npar_info)
137{
138 int err;
139 struct qlcnic_cmd_args cmd;
140
141 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO))
142 return -ENOMEM;
143
144 cmd.req.arg[1] = 0x2;
145 err = qlcnic_issue_cmd(adapter, &cmd);
146 if (err) {
147 dev_err(&adapter->pdev->dev,
148 "Failed to get PF info, err=%d\n", err);
149 goto out;
150 }
151
152 npar_info->total_pf = cmd.rsp.arg[2] & 0xff;
153 npar_info->total_rss_engines = (cmd.rsp.arg[2] >> 8) & 0xff;
154 npar_info->max_vports = MSW(cmd.rsp.arg[2]);
155 npar_info->max_tx_ques = LSW(cmd.rsp.arg[3]);
156 npar_info->max_tx_mac_filters = MSW(cmd.rsp.arg[3]);
157 npar_info->max_rx_mcast_mac_filters = LSW(cmd.rsp.arg[4]);
158 npar_info->max_rx_ucast_mac_filters = MSW(cmd.rsp.arg[4]);
159 npar_info->max_rx_ip_addr = LSW(cmd.rsp.arg[5]);
160 npar_info->max_rx_lro_flow = MSW(cmd.rsp.arg[5]);
161 npar_info->max_rx_status_rings = LSW(cmd.rsp.arg[6]);
162 npar_info->max_rx_buf_rings = MSW(cmd.rsp.arg[6]);
163 npar_info->max_rx_ques = LSW(cmd.rsp.arg[7]);
164 npar_info->max_tx_vlan_keys = MSW(cmd.rsp.arg[7]);
165 npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
166 npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
167
168 qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info);
169 dev_info(&adapter->pdev->dev,
170 "\n\ttotal_pf: %d,\n"
171 "\n\ttotal_rss_engines: %d max_vports: %d max_tx_ques %d,\n"
172 "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
173 "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
174 "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
175 "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
176 "\tmax_local_ipv6_addrs: %d, max_remote_ipv6_addrs: %d\n",
177 npar_info->total_pf, npar_info->total_rss_engines,
178 npar_info->max_vports, npar_info->max_tx_ques,
179 npar_info->max_tx_mac_filters,
180 npar_info->max_rx_mcast_mac_filters,
181 npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
182 npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
183 npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
184 npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
185 npar_info->max_remote_ipv6_addrs);
186
187out:
188 qlcnic_free_mbx_args(&cmd);
189 return err;
190}
191
192static void qlcnic_sriov_pf_reset_vport_handle(struct qlcnic_adapter *adapter,
193 u8 func)
194{
195 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
196 struct qlcnic_vport *vp;
197 int index;
198
199 if (adapter->ahw->pci_func == func) {
200 sriov->vp_handle = 0;
201 } else {
202 index = qlcnic_sriov_func_to_index(adapter, func);
203 if (index < 0)
204 return;
205 vp = sriov->vf_info[index].vp;
206 vp->handle = 0;
207 }
208}
209
210static void qlcnic_sriov_pf_set_vport_handle(struct qlcnic_adapter *adapter,
211 u16 vport_handle, u8 func)
212{
213 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
214 struct qlcnic_vport *vp;
215 int index;
216
217 if (adapter->ahw->pci_func == func) {
218 sriov->vp_handle = vport_handle;
219 } else {
220 index = qlcnic_sriov_func_to_index(adapter, func);
221 if (index < 0)
222 return;
223 vp = sriov->vf_info[index].vp;
224 vp->handle = vport_handle;
225 }
226}
227
228static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *adapter,
229 u8 func)
230{
231 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
232 struct qlcnic_vf_info *vf_info;
233 int index;
234
235 if (adapter->ahw->pci_func == func) {
236 return sriov->vp_handle;
237 } else {
238 index = qlcnic_sriov_func_to_index(adapter, func);
239 if (index >= 0) {
240 vf_info = &sriov->vf_info[index];
241 return vf_info->vp->handle;
242 }
243 }
244
245 return -EINVAL;
246}
247
248static int qlcnic_sriov_pf_config_vport(struct qlcnic_adapter *adapter,
249 u8 flag, u16 func)
250{
251 struct qlcnic_cmd_args cmd;
252 int ret;
253 int vpid;
254
255 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_VPORT))
256 return -ENOMEM;
257
258 if (flag) {
259 cmd.req.arg[3] = func << 8;
260 } else {
261 vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
262 if (vpid < 0) {
263 ret = -EINVAL;
264 goto out;
265 }
266 cmd.req.arg[3] = ((vpid & 0xffff) << 8) | 1;
267 }
268
269 ret = qlcnic_issue_cmd(adapter, &cmd);
270 if (ret) {
271 dev_err(&adapter->pdev->dev,
272 "Failed %s vport, err %d for func 0x%x\n",
273 (flag ? "enable" : "disable"), ret, func);
274 goto out;
275 }
276
277 if (flag) {
278 vpid = cmd.rsp.arg[2] & 0xffff;
279 qlcnic_sriov_pf_set_vport_handle(adapter, vpid, func);
280 } else {
281 qlcnic_sriov_pf_reset_vport_handle(adapter, func);
282 }
283
284out:
285 qlcnic_free_mbx_args(&cmd);
286 return ret;
287}
288
289static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
290 u8 enable)
291{
292 struct qlcnic_cmd_args cmd;
293 int err;
294
295 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
296 if (err)
297 return err;
298
299 cmd.req.arg[1] = 0x4;
300 if (enable)
301 cmd.req.arg[1] |= BIT_16;
302
303 err = qlcnic_issue_cmd(adapter, &cmd);
304 if (err)
305 dev_err(&adapter->pdev->dev,
306 "Failed to configure VLAN filtering, err=%d\n", err);
307
308 qlcnic_free_mbx_args(&cmd);
309 return err;
310}
311
312static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter,
313 u8 func, u8 enable)
314{
315 struct qlcnic_cmd_args cmd;
316 int err = -EIO;
317
318 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH))
319 return -ENOMEM;
320
321 cmd.req.arg[0] |= (3 << 29);
322 cmd.req.arg[1] = ((func & 0xf) << 2) | BIT_6 | BIT_1;
323 if (enable)
324 cmd.req.arg[1] |= BIT_0;
325
326 err = qlcnic_issue_cmd(adapter, &cmd);
327
328 if (err != QLCNIC_RCODE_SUCCESS) {
329 dev_err(&adapter->pdev->dev,
330 "Failed to enable sriov eswitch%d\n", err);
331 err = -EIO;
332 }
333
334 qlcnic_free_mbx_args(&cmd);
335 return err;
336}
337
338static void qlcnic_sriov_pf_del_flr_queue(struct qlcnic_adapter *adapter)
339{
340 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
341 struct qlcnic_back_channel *bc = &sriov->bc;
342 int i;
343
344 for (i = 0; i < sriov->num_vfs; i++)
345 cancel_work_sync(&sriov->vf_info[i].flr_work);
346
347 destroy_workqueue(bc->bc_flr_wq);
348}
349
350static int qlcnic_sriov_pf_create_flr_queue(struct qlcnic_adapter *adapter)
351{
352 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
353 struct workqueue_struct *wq;
354
355 wq = create_singlethread_workqueue("qlcnic-flr");
356 if (wq == NULL) {
357 dev_err(&adapter->pdev->dev, "Cannot create FLR workqueue\n");
358 return -ENOMEM;
359 }
360
361 bc->bc_flr_wq = wq;
362 return 0;
363}
364
365void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter)
366{
367 u8 func = adapter->ahw->pci_func;
368
369 if (!qlcnic_sriov_enable_check(adapter))
370 return;
371
372 qlcnic_sriov_pf_del_flr_queue(adapter);
373 qlcnic_sriov_cfg_bc_intr(adapter, 0);
374 qlcnic_sriov_pf_config_vport(adapter, 0, func);
375 qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
376 qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0);
377 __qlcnic_sriov_cleanup(adapter);
378 adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
379 clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
380}
381
382void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter)
383{
384 if (!qlcnic_sriov_pf_check(adapter))
385 return;
386
387 if (!qlcnic_sriov_enable_check(adapter))
388 return;
389
390 pci_disable_sriov(adapter->pdev);
391 netdev_info(adapter->netdev,
392 "SR-IOV is disabled successfully on port %d\n",
393 adapter->portnum);
394}
395
396static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
397{
398 struct net_device *netdev = adapter->netdev;
399
400 if (netif_running(netdev))
401 __qlcnic_down(adapter, netdev);
402
403 qlcnic_sriov_pf_disable(adapter);
404
405 qlcnic_sriov_pf_cleanup(adapter);
406
407 /* After disabling SRIOV re-init the driver in default mode
408 configure opmode based on op_mode of function
409 */
410 if (qlcnic_83xx_configure_opmode(adapter))
411 return -EIO;
412
413 if (netif_running(netdev))
414 __qlcnic_up(adapter, netdev);
415
416 return 0;
417}
418
419static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter)
420{
421 struct qlcnic_hardware_context *ahw = adapter->ahw;
422 struct qlcnic_info nic_info, pf_info, vp_info;
423 int err;
424 u8 func = ahw->pci_func;
425
426 if (!qlcnic_sriov_enable_check(adapter))
427 return 0;
428
429 err = qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 1);
430 if (err)
431 return err;
432
433 err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1);
434 if (err)
435 goto disable_vlan_filtering;
436
437 err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
438 if (err)
439 goto disable_eswitch;
440
441 err = qlcnic_sriov_get_pf_info(adapter, &pf_info);
442 if (err)
443 goto delete_vport;
444
445 err = qlcnic_get_nic_info(adapter, &nic_info, func);
446 if (err)
447 goto delete_vport;
448
449 err = qlcnic_sriov_pf_cal_res_limit(adapter, &vp_info, func);
450 if (err)
451 goto delete_vport;
452
453 err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
454 if (err)
455 goto delete_vport;
456
457 ahw->physical_port = (u8) nic_info.phys_port;
458 ahw->switch_mode = nic_info.switch_mode;
459 ahw->max_mtu = nic_info.max_mtu;
460 ahw->capabilities = nic_info.capabilities;
461 ahw->nic_mode = QLC_83XX_SRIOV_MODE;
462 return err;
463
464delete_vport:
465 qlcnic_sriov_pf_config_vport(adapter, 0, func);
466
467disable_eswitch:
468 qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
469
470disable_vlan_filtering:
471 qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0);
472
473 return err;
474}
475
476static int qlcnic_sriov_pf_enable(struct qlcnic_adapter *adapter, int num_vfs)
477{
478 int err;
479
480 if (!qlcnic_sriov_enable_check(adapter))
481 return 0;
482
483 err = pci_enable_sriov(adapter->pdev, num_vfs);
484 if (err)
485 qlcnic_sriov_pf_cleanup(adapter);
486
487 return err;
488}
489
490static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
491 int num_vfs)
492{
493 int err = 0;
494
495 set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
496 adapter->ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
497
498 err = qlcnic_sriov_init(adapter, num_vfs);
499 if (err)
500 goto clear_op_mode;
501
502 err = qlcnic_sriov_pf_create_flr_queue(adapter);
503 if (err)
504 goto sriov_cleanup;
505
506 err = qlcnic_sriov_pf_init(adapter);
507 if (err)
508 goto del_flr_queue;
509
510 err = qlcnic_sriov_pf_enable(adapter, num_vfs);
511 return err;
512
513del_flr_queue:
514 qlcnic_sriov_pf_del_flr_queue(adapter);
515
516sriov_cleanup:
517 __qlcnic_sriov_cleanup(adapter);
518
519clear_op_mode:
520 clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
521 adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
522 return err;
523}
524
525static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
526{
527 struct net_device *netdev = adapter->netdev;
528 int err;
529
530 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
531 netdev_err(netdev,
532 "SR-IOV cannot be enabled, when legacy interrupts are enabled\n");
533 return -EIO;
534 }
535
536 if (netif_running(netdev))
537 __qlcnic_down(adapter, netdev);
538
539 err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
540 if (err) {
541 netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
542 adapter->portnum);
543
544 err = -EIO;
545 if (qlcnic_83xx_configure_opmode(adapter))
546 goto error;
547 } else {
548 netdev_info(netdev,
549 "SR-IOV is enabled successfully on port %d\n",
550 adapter->portnum);
551 /* Return number of vfs enabled */
552 err = num_vfs;
553 }
554 if (netif_running(netdev))
555 __qlcnic_up(adapter, netdev);
556
557error:
558 return err;
559}
560
561int qlcnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
562{
563 struct qlcnic_adapter *adapter = pci_get_drvdata(dev);
564 int err;
565
566 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
567 return -EBUSY;
568
569 if (num_vfs == 0)
570 err = qlcnic_pci_sriov_disable(adapter);
571 else
572 err = qlcnic_pci_sriov_enable(adapter, num_vfs);
573
574 clear_bit(__QLCNIC_RESETTING, &adapter->state);
575 return err;
576}
577
578static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
579{
580 struct qlcnic_cmd_args cmd;
581 struct qlcnic_vport *vp;
582 int err, id;
583
584 id = qlcnic_sriov_func_to_index(adapter, func);
585 if (id < 0)
586 return id;
587
588 vp = adapter->ahw->sriov->vf_info[id].vp;
589 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
590 if (err)
591 return err;
592
593 cmd.req.arg[1] = 0x3 | func << 16;
594 if (vp->vlan_mode == QLC_PVID_MODE) {
595 cmd.req.arg[2] |= BIT_6;
596 cmd.req.arg[3] |= vp->vlan << 8;
597 }
598
599 err = qlcnic_issue_cmd(adapter, &cmd);
600 if (err)
601 dev_err(&adapter->pdev->dev, "Failed to set ACL, err=%d\n",
602 err);
603
604 qlcnic_free_mbx_args(&cmd);
605 return err;
606}
607
608static int qlcnic_sriov_set_vf_vport_info(struct qlcnic_adapter *adapter,
609 u16 func)
610{
611 struct qlcnic_info defvp_info;
612 int err;
613
614 err = qlcnic_sriov_pf_cal_res_limit(adapter, &defvp_info, func);
615 if (err)
616 return -EIO;
617
618 err = qlcnic_sriov_set_vf_acl(adapter, func);
619 if (err)
620 return err;
621
622 return 0;
623}
624
625static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
626 struct qlcnic_cmd_args *cmd)
627{
628 struct qlcnic_vf_info *vf = trans->vf;
629 struct qlcnic_adapter *adapter = vf->adapter;
630 int err;
631 u16 func = vf->pci_func;
632
633 cmd->rsp.arg[0] = trans->req_hdr->cmd_op;
634 cmd->rsp.arg[0] |= (1 << 16);
635
636 if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
637 err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
638 if (!err) {
639 err = qlcnic_sriov_set_vf_vport_info(adapter, func);
640 if (err)
641 qlcnic_sriov_pf_config_vport(adapter, 0, func);
642 }
643 } else {
644 err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
645 }
646
647 if (err)
648 goto err_out;
649
650 cmd->rsp.arg[0] |= (1 << 25);
651
652 if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
653 set_bit(QLC_BC_VF_STATE, &vf->state);
654 else
655 clear_bit(QLC_BC_VF_STATE, &vf->state);
656
657 return err;
658
659err_out:
660 cmd->rsp.arg[0] |= (2 << 25);
661 return err;
662}
663
664static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
665 struct qlcnic_vport *vp,
666 u16 func, u16 vlan, u8 op)
667{
668 struct qlcnic_cmd_args cmd;
669 struct qlcnic_macvlan_mbx mv;
670 u8 *addr;
671 int err;
672 u32 *buf;
673 int vpid;
674
675 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
676 return -ENOMEM;
677
678 vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
679 if (vpid < 0) {
680 err = -EINVAL;
681 goto out;
682 }
683
684 if (vlan)
685 op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
686 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
687
688 cmd.req.arg[1] = op | (1 << 8) | (3 << 6);
689 cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
690
691 addr = vp->mac;
692 mv.vlan = vlan;
693 mv.mac_addr0 = addr[0];
694 mv.mac_addr1 = addr[1];
695 mv.mac_addr2 = addr[2];
696 mv.mac_addr3 = addr[3];
697 mv.mac_addr4 = addr[4];
698 mv.mac_addr5 = addr[5];
699 buf = &cmd.req.arg[2];
700 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
701
702 err = qlcnic_issue_cmd(adapter, &cmd);
703
704 if (err)
705 dev_err(&adapter->pdev->dev,
706 "MAC-VLAN %s to CAM failed, err=%d.\n",
707 ((op == 1) ? "add " : "delete "), err);
708
709out:
710 qlcnic_free_mbx_args(&cmd);
711 return err;
712}
713
714static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd)
715{
716 if ((cmd->req.arg[0] >> 29) != 0x3)
717 return -EINVAL;
718
719 return 0;
720}
721
722static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
723 struct qlcnic_cmd_args *cmd)
724{
725 struct qlcnic_vf_info *vf = tran->vf;
726 struct qlcnic_adapter *adapter = vf->adapter;
727 struct qlcnic_rcv_mbx_out *mbx_out;
728 int err;
729 u16 vlan;
730
731 err = qlcnic_sriov_validate_create_rx_ctx(cmd);
732 if (err) {
733 cmd->rsp.arg[0] |= (0x6 << 25);
734 return err;
735 }
736
737 cmd->req.arg[6] = vf->vp->handle;
738 err = qlcnic_issue_cmd(adapter, cmd);
739
740 vlan = vf->vp->vlan;
741 if (!err) {
742 mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
743 vf->rx_ctx_id = mbx_out->ctx_id;
744 qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
745 vlan, QLCNIC_MAC_ADD);
746 } else {
747 vf->rx_ctx_id = 0;
748 }
749
750 return err;
751}
752
753static int qlcnic_sriov_pf_mac_address_cmd(struct qlcnic_bc_trans *trans,
754 struct qlcnic_cmd_args *cmd)
755{
756 struct qlcnic_vf_info *vf = trans->vf;
757 u8 type, *mac;
758
759 type = cmd->req.arg[1];
760 switch (type) {
761 case QLCNIC_SET_STATION_MAC:
762 case QLCNIC_SET_FAC_DEF_MAC:
763 cmd->rsp.arg[0] = (2 << 25);
764 break;
765 case QLCNIC_GET_CURRENT_MAC:
766 cmd->rsp.arg[0] = (1 << 25);
767 mac = vf->vp->mac;
768 cmd->rsp.arg[2] = mac[1] | ((mac[0] << 8) & 0xff00);
769 cmd->rsp.arg[1] = mac[5] | ((mac[4] << 8) & 0xff00) |
770 ((mac[3]) << 16 & 0xff0000) |
771 ((mac[2]) << 24 & 0xff000000);
772 }
773
774 return 0;
775}
776
777static int qlcnic_sriov_validate_create_tx_ctx(struct qlcnic_cmd_args *cmd)
778{
779 if ((cmd->req.arg[0] >> 29) != 0x3)
780 return -EINVAL;
781
782 return 0;
783}
784
785static int qlcnic_sriov_pf_create_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
786 struct qlcnic_cmd_args *cmd)
787{
788 struct qlcnic_vf_info *vf = trans->vf;
789 struct qlcnic_adapter *adapter = vf->adapter;
790 struct qlcnic_tx_mbx_out *mbx_out;
791 int err;
792
793 err = qlcnic_sriov_validate_create_tx_ctx(cmd);
794 if (err) {
795 cmd->rsp.arg[0] |= (0x6 << 25);
796 return err;
797 }
798
799 cmd->req.arg[5] |= vf->vp->handle << 16;
800 err = qlcnic_issue_cmd(adapter, cmd);
801 if (!err) {
802 mbx_out = (struct qlcnic_tx_mbx_out *)&cmd->rsp.arg[2];
803 vf->tx_ctx_id = mbx_out->ctx_id;
804 } else {
805 vf->tx_ctx_id = 0;
806 }
807
808 return err;
809}
810
811static int qlcnic_sriov_validate_del_rx_ctx(struct qlcnic_vf_info *vf,
812 struct qlcnic_cmd_args *cmd)
813{
814 if ((cmd->req.arg[0] >> 29) != 0x3)
815 return -EINVAL;
816
817 if ((cmd->req.arg[1] & 0xffff) != vf->rx_ctx_id)
818 return -EINVAL;
819
820 return 0;
821}
822
823static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
824 struct qlcnic_cmd_args *cmd)
825{
826 struct qlcnic_vf_info *vf = trans->vf;
827 struct qlcnic_adapter *adapter = vf->adapter;
828 int err;
829 u16 vlan;
830
831 err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
832 if (err) {
833 cmd->rsp.arg[0] |= (0x6 << 25);
834 return err;
835 }
836
837 vlan = vf->vp->vlan;
838 qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
839 vlan, QLCNIC_MAC_DEL);
840 cmd->req.arg[1] |= vf->vp->handle << 16;
841 err = qlcnic_issue_cmd(adapter, cmd);
842
843 if (!err)
844 vf->rx_ctx_id = 0;
845
846 return err;
847}
848
849static int qlcnic_sriov_validate_del_tx_ctx(struct qlcnic_vf_info *vf,
850 struct qlcnic_cmd_args *cmd)
851{
852 if ((cmd->req.arg[0] >> 29) != 0x3)
853 return -EINVAL;
854
855 if ((cmd->req.arg[1] & 0xffff) != vf->tx_ctx_id)
856 return -EINVAL;
857
858 return 0;
859}
860
861static int qlcnic_sriov_pf_del_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
862 struct qlcnic_cmd_args *cmd)
863{
864 struct qlcnic_vf_info *vf = trans->vf;
865 struct qlcnic_adapter *adapter = vf->adapter;
866 int err;
867
868 err = qlcnic_sriov_validate_del_tx_ctx(vf, cmd);
869 if (err) {
870 cmd->rsp.arg[0] |= (0x6 << 25);
871 return err;
872 }
873
874 cmd->req.arg[1] |= vf->vp->handle << 16;
875 err = qlcnic_issue_cmd(adapter, cmd);
876
877 if (!err)
878 vf->tx_ctx_id = 0;
879
880 return err;
881}
882
883static int qlcnic_sriov_validate_cfg_lro(struct qlcnic_vf_info *vf,
884 struct qlcnic_cmd_args *cmd)
885{
886 if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
887 return -EINVAL;
888
889 return 0;
890}
891
892static int qlcnic_sriov_pf_cfg_lro_cmd(struct qlcnic_bc_trans *trans,
893 struct qlcnic_cmd_args *cmd)
894{
895 struct qlcnic_vf_info *vf = trans->vf;
896 struct qlcnic_adapter *adapter = vf->adapter;
897 int err;
898
899 err = qlcnic_sriov_validate_cfg_lro(vf, cmd);
900 if (err) {
901 cmd->rsp.arg[0] |= (0x6 << 25);
902 return err;
903 }
904
905 err = qlcnic_issue_cmd(adapter, cmd);
906 return err;
907}
908
909static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans,
910 struct qlcnic_cmd_args *cmd)
911{
912 struct qlcnic_vf_info *vf = trans->vf;
913 struct qlcnic_adapter *adapter = vf->adapter;
914 int err = -EIO;
915 u8 op;
916
917 op = cmd->req.arg[1] & 0xff;
918
919 cmd->req.arg[1] |= vf->vp->handle << 16;
920 cmd->req.arg[1] |= BIT_31;
921
922 err = qlcnic_issue_cmd(adapter, cmd);
923 return err;
924}
925
926static int qlcnic_sriov_validate_cfg_intrpt(struct qlcnic_vf_info *vf,
927 struct qlcnic_cmd_args *cmd)
928{
929 if (((cmd->req.arg[1] >> 8) & 0xff) != vf->pci_func)
930 return -EINVAL;
931
932 if (!(cmd->req.arg[1] & BIT_16))
933 return -EINVAL;
934
935 if ((cmd->req.arg[1] & 0xff) != 0x1)
936 return -EINVAL;
937
938 return 0;
939}
940
941static int qlcnic_sriov_pf_cfg_intrpt_cmd(struct qlcnic_bc_trans *trans,
942 struct qlcnic_cmd_args *cmd)
943{
944 struct qlcnic_vf_info *vf = trans->vf;
945 struct qlcnic_adapter *adapter = vf->adapter;
946 int err;
947
948 err = qlcnic_sriov_validate_cfg_intrpt(vf, cmd);
949 if (err)
950 cmd->rsp.arg[0] |= (0x6 << 25);
951 else
952 err = qlcnic_issue_cmd(adapter, cmd);
953
954 return err;
955}
956
957static int qlcnic_sriov_validate_mtu(struct qlcnic_adapter *adapter,
958 struct qlcnic_vf_info *vf,
959 struct qlcnic_cmd_args *cmd)
960{
961 if (cmd->req.arg[1] != vf->rx_ctx_id)
962 return -EINVAL;
963
964 if (cmd->req.arg[2] > adapter->ahw->max_mtu)
965 return -EINVAL;
966
967 return 0;
968}
969
970static int qlcnic_sriov_pf_set_mtu_cmd(struct qlcnic_bc_trans *trans,
971 struct qlcnic_cmd_args *cmd)
972{
973 struct qlcnic_vf_info *vf = trans->vf;
974 struct qlcnic_adapter *adapter = vf->adapter;
975 int err;
976
977 err = qlcnic_sriov_validate_mtu(adapter, vf, cmd);
978 if (err)
979 cmd->rsp.arg[0] |= (0x6 << 25);
980 else
981 err = qlcnic_issue_cmd(adapter, cmd);
982
983 return err;
984}
985
986static int qlcnic_sriov_validate_get_nic_info(struct qlcnic_vf_info *vf,
987 struct qlcnic_cmd_args *cmd)
988{
989 if (cmd->req.arg[1] & BIT_31) {
990 if (((cmd->req.arg[1] >> 16) & 0x7fff) != vf->pci_func)
991 return -EINVAL;
992 } else {
993 cmd->req.arg[1] |= vf->vp->handle << 16;
994 }
995
996 return 0;
997}
998
999static int qlcnic_sriov_pf_get_nic_info_cmd(struct qlcnic_bc_trans *trans,
1000 struct qlcnic_cmd_args *cmd)
1001{
1002 struct qlcnic_vf_info *vf = trans->vf;
1003 struct qlcnic_adapter *adapter = vf->adapter;
1004 int err;
1005
1006 err = qlcnic_sriov_validate_get_nic_info(vf, cmd);
1007 if (err) {
1008 cmd->rsp.arg[0] |= (0x6 << 25);
1009 return err;
1010 }
1011
1012 err = qlcnic_issue_cmd(adapter, cmd);
1013 return err;
1014}
1015
1016static int qlcnic_sriov_validate_cfg_rss(struct qlcnic_vf_info *vf,
1017 struct qlcnic_cmd_args *cmd)
1018{
1019 if (cmd->req.arg[1] != vf->rx_ctx_id)
1020 return -EINVAL;
1021
1022 return 0;
1023}
1024
1025static int qlcnic_sriov_pf_cfg_rss_cmd(struct qlcnic_bc_trans *trans,
1026 struct qlcnic_cmd_args *cmd)
1027{
1028 struct qlcnic_vf_info *vf = trans->vf;
1029 struct qlcnic_adapter *adapter = vf->adapter;
1030 int err;
1031
1032 err = qlcnic_sriov_validate_cfg_rss(vf, cmd);
1033 if (err)
1034 cmd->rsp.arg[0] |= (0x6 << 25);
1035 else
1036 err = qlcnic_issue_cmd(adapter, cmd);
1037
1038 return err;
1039}
1040
1041static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
1042 struct qlcnic_vf_info *vf,
1043 struct qlcnic_cmd_args *cmd)
1044{
1045 struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
1046 u16 ctx_id, pkts, time;
1047
1048 ctx_id = cmd->req.arg[1] >> 16;
1049 pkts = cmd->req.arg[2] & 0xffff;
1050 time = cmd->req.arg[2] >> 16;
1051
1052 if (ctx_id != vf->rx_ctx_id)
1053 return -EINVAL;
1054 if (pkts > coal->rx_packets)
1055 return -EINVAL;
1056 if (time < coal->rx_time_us)
1057 return -EINVAL;
1058
1059 return 0;
1060}
1061
1062static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
1063 struct qlcnic_cmd_args *cmd)
1064{
1065 struct qlcnic_vf_info *vf = tran->vf;
1066 struct qlcnic_adapter *adapter = vf->adapter;
1067 int err;
1068
1069 err = qlcnic_sriov_validate_cfg_intrcoal(adapter, vf, cmd);
1070 if (err) {
1071 cmd->rsp.arg[0] |= (0x6 << 25);
1072 return err;
1073 }
1074
1075 err = qlcnic_issue_cmd(adapter, cmd);
1076 return err;
1077}
1078
1079static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
1080 struct qlcnic_vf_info *vf,
1081 struct qlcnic_cmd_args *cmd)
1082{
1083 struct qlcnic_macvlan_mbx *macvlan;
1084 struct qlcnic_vport *vp = vf->vp;
1085 u8 op, new_op;
1086
1087 if (!(cmd->req.arg[1] & BIT_8))
1088 return -EINVAL;
1089
1090 cmd->req.arg[1] |= (vf->vp->handle << 16);
1091 cmd->req.arg[1] |= BIT_31;
1092
1093 macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2];
1094 if (!(macvlan->mac_addr0 & BIT_0)) {
1095 dev_err(&adapter->pdev->dev,
1096 "MAC address change is not allowed from VF %d",
1097 vf->pci_func);
1098 return -EINVAL;
1099 }
1100
1101 if (vp->vlan_mode == QLC_PVID_MODE) {
1102 op = cmd->req.arg[1] & 0x7;
1103 cmd->req.arg[1] &= ~0x7;
1104 new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
1105 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
1106 cmd->req.arg[3] |= vp->vlan << 16;
1107 cmd->req.arg[1] |= new_op;
1108 }
1109
1110 return 0;
1111}
1112
1113static int qlcnic_sriov_pf_cfg_macvlan_cmd(struct qlcnic_bc_trans *trans,
1114 struct qlcnic_cmd_args *cmd)
1115{
1116 struct qlcnic_vf_info *vf = trans->vf;
1117 struct qlcnic_adapter *adapter = vf->adapter;
1118 int err;
1119
1120 err = qlcnic_sriov_validate_cfg_macvlan(adapter, vf, cmd);
1121 if (err) {
1122 cmd->rsp.arg[0] |= (0x6 << 25);
1123 return err;
1124 }
1125
1126 err = qlcnic_issue_cmd(adapter, cmd);
1127 return err;
1128}
1129
1130static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf,
1131 struct qlcnic_cmd_args *cmd)
1132{
1133 if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
1134 return -EINVAL;
1135
1136 if (!(cmd->req.arg[1] & BIT_8))
1137 return -EINVAL;
1138
1139 return 0;
1140}
1141
1142static int qlcnic_sriov_pf_linkevent_cmd(struct qlcnic_bc_trans *trans,
1143 struct qlcnic_cmd_args *cmd)
1144{
1145 struct qlcnic_vf_info *vf = trans->vf;
1146 struct qlcnic_adapter *adapter = vf->adapter;
1147 int err;
1148
1149 err = qlcnic_sriov_validate_linkevent(vf, cmd);
1150 if (err) {
1151 cmd->rsp.arg[0] |= (0x6 << 25);
1152 return err;
1153 }
1154
1155 err = qlcnic_issue_cmd(adapter, cmd);
1156 return err;
1157}
1158
1159static int qlcnic_sriov_pf_cfg_promisc_cmd(struct qlcnic_bc_trans *trans,
1160 struct qlcnic_cmd_args *cmd)
1161{
1162 struct qlcnic_vf_info *vf = trans->vf;
1163 struct qlcnic_adapter *adapter = vf->adapter;
1164 int err;
1165
1166 cmd->req.arg[1] |= vf->vp->handle << 16;
1167 cmd->req.arg[1] |= BIT_31;
1168 err = qlcnic_issue_cmd(adapter, cmd);
1169 return err;
1170}
1171
1172static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
1173 struct qlcnic_cmd_args *cmd)
1174{
1175 struct qlcnic_vf_info *vf = trans->vf;
1176 struct qlcnic_vport *vp = vf->vp;
1177 u8 cmd_op, mode = vp->vlan_mode;
1178
1179 cmd_op = trans->req_hdr->cmd_op;
1180 cmd->rsp.arg[0] = (cmd_op & 0xffff) | 14 << 16 | 1 << 25;
1181
1182 switch (mode) {
1183 case QLC_GUEST_VLAN_MODE:
1184 cmd->rsp.arg[1] = mode | 1 << 8;
1185 cmd->rsp.arg[2] = 1 << 16;
1186 break;
1187 case QLC_PVID_MODE:
1188 cmd->rsp.arg[1] = mode | 1 << 8 | vp->vlan << 16;
1189 break;
1190 }
1191
1192 return 0;
1193}
1194
1195static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter,
1196 struct qlcnic_vf_info *vf)
1197
1198{
1199 struct qlcnic_vport *vp = vf->vp;
1200
1201 if (!vp->vlan)
1202 return -EINVAL;
1203
1204 if (!vf->rx_ctx_id) {
1205 vp->vlan = 0;
1206 return 0;
1207 }
1208
1209 qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
1210 vp->vlan, QLCNIC_MAC_DEL);
1211 vp->vlan = 0;
1212 qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
1213 0, QLCNIC_MAC_ADD);
1214 return 0;
1215}
1216
1217static int qlcnic_sriov_pf_add_guest_vlan(struct qlcnic_adapter *adapter,
1218 struct qlcnic_vf_info *vf,
1219 struct qlcnic_cmd_args *cmd)
1220{
1221 struct qlcnic_vport *vp = vf->vp;
1222 int err = -EIO;
1223
1224 if (vp->vlan)
1225 return err;
1226
1227 if (!vf->rx_ctx_id) {
1228 vp->vlan = cmd->req.arg[1] >> 16;
1229 return 0;
1230 }
1231
1232 err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
1233 0, QLCNIC_MAC_DEL);
1234 if (err)
1235 return err;
1236
1237 vp->vlan = cmd->req.arg[1] >> 16;
1238 err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
1239 vp->vlan, QLCNIC_MAC_ADD);
1240
1241 if (err) {
1242 qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
1243 0, QLCNIC_MAC_ADD);
1244 vp->vlan = 0;
1245 }
1246
1247 return err;
1248}
1249
1250static int qlcnic_sriov_pf_cfg_guest_vlan_cmd(struct qlcnic_bc_trans *tran,
1251 struct qlcnic_cmd_args *cmd)
1252{
1253 struct qlcnic_vf_info *vf = tran->vf;
1254 struct qlcnic_adapter *adapter = vf->adapter;
1255 struct qlcnic_vport *vp = vf->vp;
1256 int err = -EIO;
1257 u8 op;
1258
1259 if (vp->vlan_mode != QLC_GUEST_VLAN_MODE) {
1260 cmd->rsp.arg[0] |= 2 << 25;
1261 return err;
1262 }
1263
1264 op = cmd->req.arg[1] & 0xf;
1265
1266 if (op)
1267 err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd);
1268 else
1269 err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf);
1270
1271 cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25;
1272 return err;
1273}
1274
1275static const int qlcnic_pf_passthru_supp_cmds[] = {
1276 QLCNIC_CMD_GET_STATISTICS,
1277 QLCNIC_CMD_GET_PORT_CONFIG,
1278 QLCNIC_CMD_GET_LINK_STATUS,
1279};
1280
1281static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = {
1282 [QLCNIC_BC_CMD_CHANNEL_INIT] = {&qlcnic_sriov_pf_channel_cfg_cmd},
1283 [QLCNIC_BC_CMD_CHANNEL_TERM] = {&qlcnic_sriov_pf_channel_cfg_cmd},
1284 [QLCNIC_BC_CMD_GET_ACL] = {&qlcnic_sriov_pf_get_acl_cmd},
1285 [QLCNIC_BC_CMD_CFG_GUEST_VLAN] = {&qlcnic_sriov_pf_cfg_guest_vlan_cmd},
1286};
1287
1288static const struct qlcnic_sriov_fw_cmd_handler qlcnic_pf_fw_cmd_hdlr[] = {
1289 {QLCNIC_CMD_CREATE_RX_CTX, qlcnic_sriov_pf_create_rx_ctx_cmd},
1290 {QLCNIC_CMD_CREATE_TX_CTX, qlcnic_sriov_pf_create_tx_ctx_cmd},
1291 {QLCNIC_CMD_MAC_ADDRESS, qlcnic_sriov_pf_mac_address_cmd},
1292 {QLCNIC_CMD_DESTROY_RX_CTX, qlcnic_sriov_pf_del_rx_ctx_cmd},
1293 {QLCNIC_CMD_DESTROY_TX_CTX, qlcnic_sriov_pf_del_tx_ctx_cmd},
1294 {QLCNIC_CMD_CONFIGURE_HW_LRO, qlcnic_sriov_pf_cfg_lro_cmd},
1295 {QLCNIC_CMD_CONFIGURE_IP_ADDR, qlcnic_sriov_pf_cfg_ip_cmd},
1296 {QLCNIC_CMD_CONFIG_INTRPT, qlcnic_sriov_pf_cfg_intrpt_cmd},
1297 {QLCNIC_CMD_SET_MTU, qlcnic_sriov_pf_set_mtu_cmd},
1298 {QLCNIC_CMD_GET_NIC_INFO, qlcnic_sriov_pf_get_nic_info_cmd},
1299 {QLCNIC_CMD_CONFIGURE_RSS, qlcnic_sriov_pf_cfg_rss_cmd},
1300 {QLCNIC_CMD_CONFIG_INTR_COAL, qlcnic_sriov_pf_cfg_intrcoal_cmd},
1301 {QLCNIC_CMD_CONFIG_MAC_VLAN, qlcnic_sriov_pf_cfg_macvlan_cmd},
1302 {QLCNIC_CMD_GET_LINK_EVENT, qlcnic_sriov_pf_linkevent_cmd},
1303 {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, qlcnic_sriov_pf_cfg_promisc_cmd},
1304};
1305
1306void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *adapter,
1307 struct qlcnic_bc_trans *trans,
1308 struct qlcnic_cmd_args *cmd)
1309{
1310 u8 size, cmd_op;
1311
1312 cmd_op = trans->req_hdr->cmd_op;
1313
1314 if (trans->req_hdr->op_type == QLC_BC_CMD) {
1315 size = ARRAY_SIZE(qlcnic_pf_bc_cmd_hdlr);
1316 if (cmd_op < size) {
1317 qlcnic_pf_bc_cmd_hdlr[cmd_op].fn(trans, cmd);
1318 return;
1319 }
1320 } else {
1321 int i;
1322 size = ARRAY_SIZE(qlcnic_pf_fw_cmd_hdlr);
1323 for (i = 0; i < size; i++) {
1324 if (cmd_op == qlcnic_pf_fw_cmd_hdlr[i].cmd) {
1325 qlcnic_pf_fw_cmd_hdlr[i].fn(trans, cmd);
1326 return;
1327 }
1328 }
1329
1330 size = ARRAY_SIZE(qlcnic_pf_passthru_supp_cmds);
1331 for (i = 0; i < size; i++) {
1332 if (cmd_op == qlcnic_pf_passthru_supp_cmds[i]) {
1333 qlcnic_issue_cmd(adapter, cmd);
1334 return;
1335 }
1336 }
1337 }
1338
1339 cmd->rsp.arg[0] |= (0x9 << 25);
1340}
1341
1342void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
1343 u32 *int_id)
1344{
1345 u16 vpid;
1346
1347 vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
1348 adapter->ahw->pci_func);
1349 *int_id |= vpid;
1350}
1351
1352void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
1353 u32 *int_id)
1354{
1355 u16 vpid;
1356
1357 vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
1358 adapter->ahw->pci_func);
1359 *int_id |= vpid << 16;
1360}
1361
1362void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
1363 u32 *int_id)
1364{
1365 int vpid;
1366
1367 vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
1368 adapter->ahw->pci_func);
1369 *int_id |= vpid << 16;
1370}
1371
1372void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
1373 u32 *int_id)
1374{
1375 u16 vpid;
1376
1377 vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
1378 adapter->ahw->pci_func);
1379 *int_id |= vpid << 16;
1380}
1381
1382void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter,
1383 u32 *int_id)
1384{
1385 u16 vpid;
1386
1387 vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
1388 adapter->ahw->pci_func);
1389 *int_id |= (vpid << 16) | BIT_31;
1390}
1391
1392void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
1393 u32 *int_id)
1394{
1395 u16 vpid;
1396
1397 vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
1398 adapter->ahw->pci_func);
1399 *int_id |= (vpid << 16) | BIT_31;
1400}
1401
1402void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
1403 u32 *int_id)
1404{
1405 u16 vpid;
1406
1407 vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
1408 adapter->ahw->pci_func);
1409 *int_id |= (vpid << 16) | BIT_31;
1410}
1411
1412static void qlcnic_sriov_del_rx_ctx(struct qlcnic_adapter *adapter,
1413 struct qlcnic_vf_info *vf)
1414{
1415 struct qlcnic_cmd_args cmd;
1416 int vpid;
1417
1418 if (!vf->rx_ctx_id)
1419 return;
1420
1421 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
1422 return;
1423
1424 vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
1425 if (vpid >= 0) {
1426 cmd.req.arg[1] = vf->rx_ctx_id | (vpid & 0xffff) << 16;
1427 if (qlcnic_issue_cmd(adapter, &cmd))
1428 dev_err(&adapter->pdev->dev,
1429 "Failed to delete Tx ctx in firmware for func 0x%x\n",
1430 vf->pci_func);
1431 else
1432 vf->rx_ctx_id = 0;
1433 }
1434
1435 qlcnic_free_mbx_args(&cmd);
1436}
1437
1438static void qlcnic_sriov_del_tx_ctx(struct qlcnic_adapter *adapter,
1439 struct qlcnic_vf_info *vf)
1440{
1441 struct qlcnic_cmd_args cmd;
1442 int vpid;
1443
1444 if (!vf->tx_ctx_id)
1445 return;
1446
1447 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
1448 return;
1449
1450 vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
1451 if (vpid >= 0) {
1452 cmd.req.arg[1] |= vf->tx_ctx_id | (vpid & 0xffff) << 16;
1453 if (qlcnic_issue_cmd(adapter, &cmd))
1454 dev_err(&adapter->pdev->dev,
1455 "Failed to delete Tx ctx in firmware for func 0x%x\n",
1456 vf->pci_func);
1457 else
1458 vf->tx_ctx_id = 0;
1459 }
1460
1461 qlcnic_free_mbx_args(&cmd);
1462}
1463
1464static int qlcnic_sriov_add_act_list_irqsave(struct qlcnic_sriov *sriov,
1465 struct qlcnic_vf_info *vf,
1466 struct qlcnic_bc_trans *trans)
1467{
1468 struct qlcnic_trans_list *t_list = &vf->rcv_act;
1469 unsigned long flag;
1470
1471 spin_lock_irqsave(&t_list->lock, flag);
1472
1473 __qlcnic_sriov_add_act_list(sriov, vf, trans);
1474
1475 spin_unlock_irqrestore(&t_list->lock, flag);
1476 return 0;
1477}
1478
1479static void __qlcnic_sriov_process_flr(struct qlcnic_vf_info *vf)
1480{
1481 struct qlcnic_adapter *adapter = vf->adapter;
1482
1483 qlcnic_sriov_cleanup_list(&vf->rcv_pend);
1484 cancel_work_sync(&vf->trans_work);
1485 qlcnic_sriov_cleanup_list(&vf->rcv_act);
1486
1487 if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) {
1488 qlcnic_sriov_del_tx_ctx(adapter, vf);
1489 qlcnic_sriov_del_rx_ctx(adapter, vf);
1490 }
1491
1492 qlcnic_sriov_pf_config_vport(adapter, 0, vf->pci_func);
1493
1494 clear_bit(QLC_BC_VF_FLR, &vf->state);
1495 if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) {
1496 qlcnic_sriov_add_act_list_irqsave(adapter->ahw->sriov, vf,
1497 vf->flr_trans);
1498 clear_bit(QLC_BC_VF_SOFT_FLR, &vf->state);
1499 vf->flr_trans = NULL;
1500 }
1501}
1502
1503static void qlcnic_sriov_pf_process_flr(struct work_struct *work)
1504{
1505 struct qlcnic_vf_info *vf;
1506
1507 vf = container_of(work, struct qlcnic_vf_info, flr_work);
1508 __qlcnic_sriov_process_flr(vf);
1509 return;
1510}
1511
1512static void qlcnic_sriov_schedule_flr(struct qlcnic_sriov *sriov,
1513 struct qlcnic_vf_info *vf,
1514 work_func_t func)
1515{
1516 if (test_bit(__QLCNIC_RESETTING, &vf->adapter->state))
1517 return;
1518
1519 INIT_WORK(&vf->flr_work, func);
1520 queue_work(sriov->bc.bc_flr_wq, &vf->flr_work);
1521}
1522
1523static void qlcnic_sriov_handle_soft_flr(struct qlcnic_adapter *adapter,
1524 struct qlcnic_bc_trans *trans,
1525 struct qlcnic_vf_info *vf)
1526{
1527 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1528
1529 set_bit(QLC_BC_VF_FLR, &vf->state);
1530 clear_bit(QLC_BC_VF_STATE, &vf->state);
1531 set_bit(QLC_BC_VF_SOFT_FLR, &vf->state);
1532 vf->flr_trans = trans;
1533 qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
1534 netdev_info(adapter->netdev, "Software FLR for PCI func %d\n",
1535 vf->pci_func);
1536}
1537
1538bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter,
1539 struct qlcnic_bc_trans *trans,
1540 struct qlcnic_vf_info *vf)
1541{
1542 struct qlcnic_bc_hdr *hdr = trans->req_hdr;
1543
1544 if ((hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
1545 (hdr->op_type == QLC_BC_CMD) &&
1546 test_bit(QLC_BC_VF_STATE, &vf->state)) {
1547 qlcnic_sriov_handle_soft_flr(adapter, trans, vf);
1548 return true;
1549 }
1550
1551 return false;
1552}
1553
1554void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
1555 struct qlcnic_vf_info *vf)
1556{
1557 struct net_device *dev = vf->adapter->netdev;
1558
1559 if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) {
1560 clear_bit(QLC_BC_VF_FLR, &vf->state);
1561 return;
1562 }
1563
1564 if (test_and_set_bit(QLC_BC_VF_FLR, &vf->state)) {
1565 netdev_info(dev, "FLR for PCI func %d in progress\n",
1566 vf->pci_func);
1567 return;
1568 }
1569
1570 qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
1571 netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
1572}
1573
1574void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter)
1575{
1576 struct qlcnic_hardware_context *ahw = adapter->ahw;
1577 struct qlcnic_sriov *sriov = ahw->sriov;
1578 struct qlcnic_vf_info *vf;
1579 u16 num_vfs = sriov->num_vfs;
1580 int i;
1581
1582 for (i = 0; i < num_vfs; i++) {
1583 vf = &sriov->vf_info[i];
1584 vf->rx_ctx_id = 0;
1585 vf->tx_ctx_id = 0;
1586 cancel_work_sync(&vf->flr_work);
1587 __qlcnic_sriov_process_flr(vf);
1588 clear_bit(QLC_BC_VF_STATE, &vf->state);
1589 }
1590
1591 qlcnic_sriov_pf_reset_vport_handle(adapter, ahw->pci_func);
1592 QLCWRX(ahw, QLCNIC_MBX_INTR_ENBL, (ahw->num_msix - 1) << 8);
1593}
1594
1595int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter)
1596{
1597 struct qlcnic_hardware_context *ahw = adapter->ahw;
1598 int err;
1599
1600 if (!qlcnic_sriov_enable_check(adapter))
1601 return 0;
1602
1603 ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
1604
1605 err = qlcnic_sriov_pf_init(adapter);
1606 if (err)
1607 return err;
1608
1609 dev_info(&adapter->pdev->dev, "%s: op_mode %d\n",
1610 __func__, ahw->op_mode);
1611 return err;
1612}
1613
1614int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1615{
1616 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1617 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1618 int i, num_vfs = sriov->num_vfs;
1619 struct qlcnic_vf_info *vf_info;
1620 u8 *curr_mac;
1621
1622 if (!qlcnic_sriov_pf_check(adapter))
1623 return -EOPNOTSUPP;
1624
1625 if (!is_valid_ether_addr(mac) || vf >= num_vfs)
1626 return -EINVAL;
1627
1628 if (!compare_ether_addr(adapter->mac_addr, mac)) {
1629 netdev_err(netdev, "MAC address is already in use by the PF\n");
1630 return -EINVAL;
1631 }
1632
1633 for (i = 0; i < num_vfs; i++) {
1634 vf_info = &sriov->vf_info[i];
1635 if (!compare_ether_addr(vf_info->vp->mac, mac)) {
1636 netdev_err(netdev,
1637 "MAC address is already in use by VF %d\n",
1638 i);
1639 return -EINVAL;
1640 }
1641 }
1642
1643 vf_info = &sriov->vf_info[vf];
1644 curr_mac = vf_info->vp->mac;
1645
1646 if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
1647 netdev_err(netdev,
1648 "MAC address change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
1649 vf);
1650 return -EOPNOTSUPP;
1651 }
1652
1653 memcpy(curr_mac, mac, netdev->addr_len);
1654 netdev_info(netdev, "MAC Address %pM is configured for VF %d\n",
1655 mac, vf);
1656 return 0;
1657}
1658
1659int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate)
1660{
1661 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1662 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1663 struct qlcnic_vf_info *vf_info;
1664 struct qlcnic_info nic_info;
1665 struct qlcnic_vport *vp;
1666 u16 vpid;
1667
1668 if (!qlcnic_sriov_pf_check(adapter))
1669 return -EOPNOTSUPP;
1670
1671 if (vf >= sriov->num_vfs)
1672 return -EINVAL;
1673
1674 if (tx_rate >= 10000 || tx_rate < 100) {
1675 netdev_err(netdev,
1676 "Invalid Tx rate, allowed range is [%d - %d]",
1677 QLC_VF_MIN_TX_RATE, QLC_VF_MAX_TX_RATE);
1678 return -EINVAL;
1679 }
1680
1681 if (tx_rate == 0)
1682 tx_rate = 10000;
1683
1684 vf_info = &sriov->vf_info[vf];
1685 vp = vf_info->vp;
1686 vpid = vp->handle;
1687
1688 if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
1689 if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid))
1690 return -EIO;
1691
1692 nic_info.max_tx_bw = tx_rate / 100;
1693 nic_info.bit_offsets = BIT_0;
1694
1695 if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid))
1696 return -EIO;
1697 }
1698
1699 vp->max_tx_bw = tx_rate / 100;
1700 netdev_info(netdev,
1701 "Setting Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
1702 tx_rate, vp->max_tx_bw, vf);
1703 return 0;
1704}
1705
1706int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
1707 u16 vlan, u8 qos)
1708{
1709 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1710 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1711 struct qlcnic_vf_info *vf_info;
1712 struct qlcnic_vport *vp;
1713
1714 if (!qlcnic_sriov_pf_check(adapter))
1715 return -EOPNOTSUPP;
1716
1717 if (vf >= sriov->num_vfs || qos > 7)
1718 return -EINVAL;
1719
1720 if (vlan > MAX_VLAN_ID) {
1721 netdev_err(netdev,
1722 "Invalid VLAN ID, allowed range is [0 - %d]\n",
1723 MAX_VLAN_ID);
1724 return -EINVAL;
1725 }
1726
1727 vf_info = &sriov->vf_info[vf];
1728 vp = vf_info->vp;
1729 if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
1730 netdev_err(netdev,
1731 "VLAN change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
1732 vf);
1733 return -EOPNOTSUPP;
1734 }
1735
1736 switch (vlan) {
1737 case 4095:
1738 vp->vlan_mode = QLC_GUEST_VLAN_MODE;
1739 break;
1740 case 0:
1741 vp->vlan_mode = QLC_NO_VLAN_MODE;
1742 vp->vlan = 0;
1743 vp->qos = 0;
1744 break;
1745 default:
1746 vp->vlan_mode = QLC_PVID_MODE;
1747 vp->vlan = vlan;
1748 vp->qos = qos;
1749 }
1750
1751 netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n",
1752 vlan, qos, vf);
1753 return 0;
1754}
1755
1756int qlcnic_sriov_get_vf_config(struct net_device *netdev,
1757 int vf, struct ifla_vf_info *ivi)
1758{
1759 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1760 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1761 struct qlcnic_vport *vp;
1762
1763 if (!qlcnic_sriov_pf_check(adapter))
1764 return -EOPNOTSUPP;
1765
1766 if (vf >= sriov->num_vfs)
1767 return -EINVAL;
1768
1769 vp = sriov->vf_info[vf].vp;
1770 memcpy(&ivi->mac, vp->mac, ETH_ALEN);
1771 ivi->vlan = vp->vlan;
1772 ivi->qos = vp->qos;
1773 if (vp->max_tx_bw == MAX_BW)
1774 ivi->tx_rate = 0;
1775 else
1776 ivi->tx_rate = vp->max_tx_bw * 100;
1777
1778 ivi->vf = vf;
1779 return 0;
1780}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 5ef328af61d0..4e22e794a186 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -21,8 +21,6 @@
21#include <linux/aer.h> 21#include <linux/aer.h>
22#include <linux/log2.h> 22#include <linux/log2.h>
23 23
24#include <linux/sysfs.h>
25
26#define QLC_STATUS_UNSUPPORTED_CMD -2 24#define QLC_STATUS_UNSUPPORTED_CMD -2
27 25
28int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) 26int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
@@ -886,6 +884,244 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
886 return size; 884 return size;
887} 885}
888 886
887static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
888 struct kobject *kobj,
889 struct bin_attribute *attr,
890 char *buf, loff_t offset,
891 size_t size)
892{
893 unsigned char *p_read_buf;
894 int ret, count;
895 struct device *dev = container_of(kobj, struct device, kobj);
896 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
897
898 if (!size)
899 return QL_STATUS_INVALID_PARAM;
900 if (!buf)
901 return QL_STATUS_INVALID_PARAM;
902
903 count = size / sizeof(u32);
904
905 if (size % sizeof(u32))
906 count++;
907
908 p_read_buf = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
909 if (!p_read_buf)
910 return -ENOMEM;
911 if (qlcnic_83xx_lock_flash(adapter) != 0) {
912 kfree(p_read_buf);
913 return -EIO;
914 }
915
916 ret = qlcnic_83xx_lockless_flash_read32(adapter, offset, p_read_buf,
917 count);
918
919 if (ret) {
920 qlcnic_83xx_unlock_flash(adapter);
921 kfree(p_read_buf);
922 return ret;
923 }
924
925 qlcnic_83xx_unlock_flash(adapter);
926 memcpy(buf, p_read_buf, size);
927 kfree(p_read_buf);
928
929 return size;
930}
931
932static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter,
933 char *buf, loff_t offset,
934 size_t size)
935{
936 int i, ret, count;
937 unsigned char *p_cache, *p_src;
938
939 p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
940 if (!p_cache)
941 return -ENOMEM;
942
943 memcpy(p_cache, buf, size);
944 p_src = p_cache;
945 count = size / sizeof(u32);
946
947 if (qlcnic_83xx_lock_flash(adapter) != 0) {
948 kfree(p_cache);
949 return -EIO;
950 }
951
952 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
953 ret = qlcnic_83xx_enable_flash_write(adapter);
954 if (ret) {
955 kfree(p_cache);
956 qlcnic_83xx_unlock_flash(adapter);
957 return -EIO;
958 }
959 }
960
961 for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) {
962 ret = qlcnic_83xx_flash_bulk_write(adapter, offset,
963 (u32 *)p_src,
964 QLC_83XX_FLASH_WRITE_MAX);
965
966 if (ret) {
967 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
968 ret = qlcnic_83xx_disable_flash_write(adapter);
969 if (ret) {
970 kfree(p_cache);
971 qlcnic_83xx_unlock_flash(adapter);
972 return -EIO;
973 }
974 }
975
976 kfree(p_cache);
977 qlcnic_83xx_unlock_flash(adapter);
978 return -EIO;
979 }
980
981 p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
982 offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
983 }
984
985 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
986 ret = qlcnic_83xx_disable_flash_write(adapter);
987 if (ret) {
988 kfree(p_cache);
989 qlcnic_83xx_unlock_flash(adapter);
990 return -EIO;
991 }
992 }
993
994 kfree(p_cache);
995 qlcnic_83xx_unlock_flash(adapter);
996
997 return 0;
998}
999
1000static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
1001 char *buf, loff_t offset, size_t size)
1002{
1003 int i, ret, count;
1004 unsigned char *p_cache, *p_src;
1005
1006 p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
1007 if (!p_cache)
1008 return -ENOMEM;
1009
1010 memcpy(p_cache, buf, size);
1011 p_src = p_cache;
1012 count = size / sizeof(u32);
1013
1014 if (qlcnic_83xx_lock_flash(adapter) != 0) {
1015 kfree(p_cache);
1016 return -EIO;
1017 }
1018
1019 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
1020 ret = qlcnic_83xx_enable_flash_write(adapter);
1021 if (ret) {
1022 kfree(p_cache);
1023 qlcnic_83xx_unlock_flash(adapter);
1024 return -EIO;
1025 }
1026 }
1027
1028 for (i = 0; i < count; i++) {
1029 ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src);
1030 if (ret) {
1031 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
1032 ret = qlcnic_83xx_disable_flash_write(adapter);
1033 if (ret) {
1034 kfree(p_cache);
1035 qlcnic_83xx_unlock_flash(adapter);
1036 return -EIO;
1037 }
1038 }
1039 kfree(p_cache);
1040 qlcnic_83xx_unlock_flash(adapter);
1041 return -EIO;
1042 }
1043
1044 p_src = p_src + sizeof(u32);
1045 offset = offset + sizeof(u32);
1046 }
1047
1048 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
1049 ret = qlcnic_83xx_disable_flash_write(adapter);
1050 if (ret) {
1051 kfree(p_cache);
1052 qlcnic_83xx_unlock_flash(adapter);
1053 return -EIO;
1054 }
1055 }
1056
1057 kfree(p_cache);
1058 qlcnic_83xx_unlock_flash(adapter);
1059
1060 return 0;
1061}
1062
1063static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
1064 struct kobject *kobj,
1065 struct bin_attribute *attr,
1066 char *buf, loff_t offset,
1067 size_t size)
1068{
1069 int ret;
1070 static int flash_mode;
1071 unsigned long data;
1072 struct device *dev = container_of(kobj, struct device, kobj);
1073 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
1074
1075 if (!buf)
1076 return QL_STATUS_INVALID_PARAM;
1077
1078 ret = kstrtoul(buf, 16, &data);
1079
1080 switch (data) {
1081 case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
1082 flash_mode = QLC_83XX_ERASE_MODE;
1083 ret = qlcnic_83xx_erase_flash_sector(adapter, offset);
1084 if (ret) {
1085 dev_err(&adapter->pdev->dev,
1086 "%s failed at %d\n", __func__, __LINE__);
1087 return -EIO;
1088 }
1089 break;
1090
1091 case QLC_83XX_FLASH_BULK_WRITE_CMD:
1092 flash_mode = QLC_83XX_BULK_WRITE_MODE;
1093 break;
1094
1095 case QLC_83XX_FLASH_WRITE_CMD:
1096 flash_mode = QLC_83XX_WRITE_MODE;
1097 break;
1098 default:
1099 if (flash_mode == QLC_83XX_BULK_WRITE_MODE) {
1100 ret = qlcnic_83xx_sysfs_flash_bulk_write(adapter, buf,
1101 offset, size);
1102 if (ret) {
1103 dev_err(&adapter->pdev->dev,
1104 "%s failed at %d\n",
1105 __func__, __LINE__);
1106 return -EIO;
1107 }
1108 }
1109
1110 if (flash_mode == QLC_83XX_WRITE_MODE) {
1111 ret = qlcnic_83xx_sysfs_flash_write(adapter, buf,
1112 offset, size);
1113 if (ret) {
1114 dev_err(&adapter->pdev->dev,
1115 "%s failed at %d\n", __func__,
1116 __LINE__);
1117 return -EIO;
1118 }
1119 }
1120 }
1121
1122 return size;
1123}
1124
889static struct device_attribute dev_attr_bridged_mode = { 1125static struct device_attribute dev_attr_bridged_mode = {
890 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, 1126 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
891 .show = qlcnic_show_bridged_mode, 1127 .show = qlcnic_show_bridged_mode,
@@ -960,6 +1196,13 @@ static struct bin_attribute bin_attr_pm_config = {
960 .write = qlcnic_sysfs_write_pm_config, 1196 .write = qlcnic_sysfs_write_pm_config,
961}; 1197};
962 1198
1199static struct bin_attribute bin_attr_flash = {
1200 .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)},
1201 .size = 0,
1202 .read = qlcnic_83xx_sysfs_flash_read_handler,
1203 .write = qlcnic_83xx_sysfs_flash_write_handler,
1204};
1205
963void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter) 1206void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
964{ 1207{
965 struct device *dev = &adapter->pdev->dev; 1208 struct device *dev = &adapter->pdev->dev;
@@ -1048,10 +1291,18 @@ void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter)
1048 1291
1049void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter) 1292void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter)
1050{ 1293{
1294 struct device *dev = &adapter->pdev->dev;
1295
1051 qlcnic_create_diag_entries(adapter); 1296 qlcnic_create_diag_entries(adapter);
1297
1298 if (sysfs_create_bin_file(&dev->kobj, &bin_attr_flash))
1299 dev_info(dev, "failed to create flash sysfs entry\n");
1052} 1300}
1053 1301
1054void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter) 1302void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter)
1055{ 1303{
1304 struct device *dev = &adapter->pdev->dev;
1305
1056 qlcnic_remove_diag_entries(adapter); 1306 qlcnic_remove_diag_entries(adapter);
1307 sysfs_remove_bin_file(&dev->kobj, &bin_attr_flash);
1057} 1308}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 8033555e53c2..87463bc701a6 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -409,7 +409,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
409 (qdev-> 409 (qdev->
410 func << CAM_OUT_FUNC_SHIFT) | 410 func << CAM_OUT_FUNC_SHIFT) |
411 (0 << CAM_OUT_CQ_ID_SHIFT)); 411 (0 << CAM_OUT_CQ_ID_SHIFT));
412 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX) 412 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
413 cam_output |= CAM_OUT_RV; 413 cam_output |= CAM_OUT_RV;
414 /* route to NIC core */ 414 /* route to NIC core */
415 ql_write32(qdev, MAC_ADDR_DATA, cam_output); 415 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
@@ -1211,8 +1211,6 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1211 netdev_alloc_skb(qdev->ndev, 1211 netdev_alloc_skb(qdev->ndev,
1212 SMALL_BUFFER_SIZE); 1212 SMALL_BUFFER_SIZE);
1213 if (sbq_desc->p.skb == NULL) { 1213 if (sbq_desc->p.skb == NULL) {
1214 netif_err(qdev, probe, qdev->ndev,
1215 "Couldn't get an skb.\n");
1216 rx_ring->sbq_clean_idx = clean_idx; 1214 rx_ring->sbq_clean_idx = clean_idx;
1217 return; 1215 return;
1218 } 1216 }
@@ -1508,7 +1506,7 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1508 skb->ip_summed = CHECKSUM_UNNECESSARY; 1506 skb->ip_summed = CHECKSUM_UNNECESSARY;
1509 skb_record_rx_queue(skb, rx_ring->cq_id); 1507 skb_record_rx_queue(skb, rx_ring->cq_id);
1510 if (vlan_id != 0xffff) 1508 if (vlan_id != 0xffff)
1511 __vlan_hwaccel_put_tag(skb, vlan_id); 1509 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1512 napi_gro_frags(napi); 1510 napi_gro_frags(napi);
1513} 1511}
1514 1512
@@ -1527,8 +1525,6 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1527 1525
1528 skb = netdev_alloc_skb(ndev, length); 1526 skb = netdev_alloc_skb(ndev, length);
1529 if (!skb) { 1527 if (!skb) {
1530 netif_err(qdev, drv, qdev->ndev,
1531 "Couldn't get an skb, need to unwind!.\n");
1532 rx_ring->rx_dropped++; 1528 rx_ring->rx_dropped++;
1533 put_page(lbq_desc->p.pg_chunk.page); 1529 put_page(lbq_desc->p.pg_chunk.page);
1534 return; 1530 return;
@@ -1592,7 +1588,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1592 1588
1593 skb_record_rx_queue(skb, rx_ring->cq_id); 1589 skb_record_rx_queue(skb, rx_ring->cq_id);
1594 if (vlan_id != 0xffff) 1590 if (vlan_id != 0xffff)
1595 __vlan_hwaccel_put_tag(skb, vlan_id); 1591 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1596 if (skb->ip_summed == CHECKSUM_UNNECESSARY) 1592 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1597 napi_gro_receive(napi, skb); 1593 napi_gro_receive(napi, skb);
1598 else 1594 else
@@ -1619,8 +1615,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1619 /* Allocate new_skb and copy */ 1615 /* Allocate new_skb and copy */
1620 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); 1616 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1621 if (new_skb == NULL) { 1617 if (new_skb == NULL) {
1622 netif_err(qdev, probe, qdev->ndev,
1623 "No skb available, drop the packet.\n");
1624 rx_ring->rx_dropped++; 1618 rx_ring->rx_dropped++;
1625 return; 1619 return;
1626 } 1620 }
@@ -1697,7 +1691,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1697 1691
1698 skb_record_rx_queue(skb, rx_ring->cq_id); 1692 skb_record_rx_queue(skb, rx_ring->cq_id);
1699 if (vlan_id != 0xffff) 1693 if (vlan_id != 0xffff)
1700 __vlan_hwaccel_put_tag(skb, vlan_id); 1694 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1701 if (skb->ip_summed == CHECKSUM_UNNECESSARY) 1695 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1702 napi_gro_receive(&rx_ring->napi, skb); 1696 napi_gro_receive(&rx_ring->napi, skb);
1703 else 1697 else
@@ -2009,7 +2003,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
2009 rx_ring->rx_bytes += skb->len; 2003 rx_ring->rx_bytes += skb->len;
2010 skb_record_rx_queue(skb, rx_ring->cq_id); 2004 skb_record_rx_queue(skb, rx_ring->cq_id);
2011 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0)) 2005 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2012 __vlan_hwaccel_put_tag(skb, vlan_id); 2006 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2013 if (skb->ip_summed == CHECKSUM_UNNECESSARY) 2007 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2014 napi_gro_receive(&rx_ring->napi, skb); 2008 napi_gro_receive(&rx_ring->napi, skb);
2015 else 2009 else
@@ -2307,7 +2301,7 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2307{ 2301{
2308 struct ql_adapter *qdev = netdev_priv(ndev); 2302 struct ql_adapter *qdev = netdev_priv(ndev);
2309 2303
2310 if (features & NETIF_F_HW_VLAN_RX) { 2304 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2311 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | 2305 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2312 NIC_RCV_CFG_VLAN_MATCH_AND_NON); 2306 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2313 } else { 2307 } else {
@@ -2322,10 +2316,10 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
2322 * Since there is no support for separate rx/tx vlan accel 2316 * Since there is no support for separate rx/tx vlan accel
2323 * enable/disable make sure tx flag is always in same state as rx. 2317 * enable/disable make sure tx flag is always in same state as rx.
2324 */ 2318 */
2325 if (features & NETIF_F_HW_VLAN_RX) 2319 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2326 features |= NETIF_F_HW_VLAN_TX; 2320 features |= NETIF_F_HW_VLAN_CTAG_TX;
2327 else 2321 else
2328 features &= ~NETIF_F_HW_VLAN_TX; 2322 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2329 2323
2330 return features; 2324 return features;
2331} 2325}
@@ -2335,7 +2329,7 @@ static int qlge_set_features(struct net_device *ndev,
2335{ 2329{
2336 netdev_features_t changed = ndev->features ^ features; 2330 netdev_features_t changed = ndev->features ^ features;
2337 2331
2338 if (changed & NETIF_F_HW_VLAN_RX) 2332 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2339 qlge_vlan_mode(ndev, features); 2333 qlge_vlan_mode(ndev, features);
2340 2334
2341 return 0; 2335 return 0;
@@ -2354,7 +2348,7 @@ static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2354 return err; 2348 return err;
2355} 2349}
2356 2350
2357static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) 2351static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2358{ 2352{
2359 struct ql_adapter *qdev = netdev_priv(ndev); 2353 struct ql_adapter *qdev = netdev_priv(ndev);
2360 int status; 2354 int status;
@@ -2385,7 +2379,7 @@ static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2385 return err; 2379 return err;
2386} 2380}
2387 2381
2388static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) 2382static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2389{ 2383{
2390 struct ql_adapter *qdev = netdev_priv(ndev); 2384 struct ql_adapter *qdev = netdev_priv(ndev);
2391 int status; 2385 int status;
@@ -4693,9 +4687,9 @@ static int qlge_probe(struct pci_dev *pdev,
4693 SET_NETDEV_DEV(ndev, &pdev->dev); 4687 SET_NETDEV_DEV(ndev, &pdev->dev);
4694 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | 4688 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4695 NETIF_F_TSO | NETIF_F_TSO_ECN | 4689 NETIF_F_TSO | NETIF_F_TSO_ECN |
4696 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM; 4690 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
4697 ndev->features = ndev->hw_features | 4691 ndev->features = ndev->hw_features |
4698 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; 4692 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4699 ndev->vlan_features = ndev->hw_features; 4693 ndev->vlan_features = ndev->hw_features;
4700 4694
4701 if (test_bit(QL_DMA64, &qdev->flags)) 4695 if (test_bit(QL_DMA64, &qdev->flags))
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 5b4103db70f5..e9dc84943cfc 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -224,11 +224,14 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
224 break; 224 break;
225 } 225 }
226 226
227 if (limit < 0)
228 return -ETIMEDOUT;
229
227 return ioread16(ioaddr + MMRD); 230 return ioread16(ioaddr + MMRD);
228} 231}
229 232
230/* Write a word data from PHY Chip */ 233/* Write a word data from PHY Chip */
231static void r6040_phy_write(void __iomem *ioaddr, 234static int r6040_phy_write(void __iomem *ioaddr,
232 int phy_addr, int reg, u16 val) 235 int phy_addr, int reg, u16 val)
233{ 236{
234 int limit = MAC_DEF_TIMEOUT; 237 int limit = MAC_DEF_TIMEOUT;
@@ -243,6 +246,8 @@ static void r6040_phy_write(void __iomem *ioaddr,
243 if (!(cmd & MDIO_WRITE)) 246 if (!(cmd & MDIO_WRITE))
244 break; 247 break;
245 } 248 }
249
250 return (limit < 0) ? -ETIMEDOUT : 0;
246} 251}
247 252
248static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg) 253static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
@@ -261,9 +266,7 @@ static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
261 struct r6040_private *lp = netdev_priv(dev); 266 struct r6040_private *lp = netdev_priv(dev);
262 void __iomem *ioaddr = lp->base; 267 void __iomem *ioaddr = lp->base;
263 268
264 r6040_phy_write(ioaddr, phy_addr, reg, value); 269 return r6040_phy_write(ioaddr, phy_addr, reg, value);
265
266 return 0;
267} 270}
268 271
269static int r6040_mdiobus_reset(struct mii_bus *bus) 272static int r6040_mdiobus_reset(struct mii_bus *bus)
@@ -347,7 +350,6 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
347 do { 350 do {
348 skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); 351 skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
349 if (!skb) { 352 if (!skb) {
350 netdev_err(dev, "failed to alloc skb for rx\n");
351 rc = -ENOMEM; 353 rc = -ENOMEM;
352 goto err_exit; 354 goto err_exit;
353 } 355 }
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index b62a32484f6a..7d1fb9ad1296 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -431,7 +431,7 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
431 cp->dev->stats.rx_bytes += skb->len; 431 cp->dev->stats.rx_bytes += skb->len;
432 432
433 if (opts2 & RxVlanTagged) 433 if (opts2 & RxVlanTagged)
434 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); 434 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
435 435
436 napi_gro_receive(&cp->napi, skb); 436 napi_gro_receive(&cp->napi, skb);
437} 437}
@@ -1438,7 +1438,7 @@ static int cp_set_features(struct net_device *dev, netdev_features_t features)
1438 else 1438 else
1439 cp->cpcmd &= ~RxChkSum; 1439 cp->cpcmd &= ~RxChkSum;
1440 1440
1441 if (features & NETIF_F_HW_VLAN_RX) 1441 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1442 cp->cpcmd |= RxVlanOn; 1442 cp->cpcmd |= RxVlanOn;
1443 else 1443 else
1444 cp->cpcmd &= ~RxVlanOn; 1444 cp->cpcmd &= ~RxVlanOn;
@@ -1955,14 +1955,14 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1955 dev->ethtool_ops = &cp_ethtool_ops; 1955 dev->ethtool_ops = &cp_ethtool_ops;
1956 dev->watchdog_timeo = TX_TIMEOUT; 1956 dev->watchdog_timeo = TX_TIMEOUT;
1957 1957
1958 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1958 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1959 1959
1960 if (pci_using_dac) 1960 if (pci_using_dac)
1961 dev->features |= NETIF_F_HIGHDMA; 1961 dev->features |= NETIF_F_HIGHDMA;
1962 1962
1963 /* disabled by default until verified */ 1963 /* disabled by default until verified */
1964 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 1964 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1965 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1965 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1966 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 1966 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1967 NETIF_F_HIGHDMA; 1967 NETIF_F_HIGHDMA;
1968 1968
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 1276ac71353a..3ccedeb8aba0 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2041,8 +2041,6 @@ keep_pkt:
2041 2041
2042 netif_receive_skb (skb); 2042 netif_receive_skb (skb);
2043 } else { 2043 } else {
2044 if (net_ratelimit())
2045 netdev_warn(dev, "Memory squeeze, dropping packet\n");
2046 dev->stats.rx_dropped++; 2044 dev->stats.rx_dropped++;
2047 } 2045 }
2048 received++; 2046 received++;
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 9f2d416de750..d77d60ea8202 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -782,8 +782,6 @@ static void net_rx(struct net_device *dev)
782 782
783 skb = netdev_alloc_skb(dev, pkt_len + 2); 783 skb = netdev_alloc_skb(dev, pkt_len + 2);
784 if (skb == NULL) { 784 if (skb == NULL) {
785 printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n",
786 dev->name);
787 dev->stats.rx_dropped++; 785 dev->stats.rx_dropped++;
788 goto done; 786 goto done;
789 } 787 }
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 4ecbe64a758d..79c520b64fdd 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -47,7 +47,9 @@
47#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" 47#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" 48#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" 49#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50#define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw" 50#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw"
51#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw"
52#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw"
51 53
52#ifdef RTL8169_DEBUG 54#ifdef RTL8169_DEBUG
53#define assert(expr) \ 55#define assert(expr) \
@@ -140,6 +142,8 @@ enum mac_version {
140 RTL_GIGA_MAC_VER_39, 142 RTL_GIGA_MAC_VER_39,
141 RTL_GIGA_MAC_VER_40, 143 RTL_GIGA_MAC_VER_40,
142 RTL_GIGA_MAC_VER_41, 144 RTL_GIGA_MAC_VER_41,
145 RTL_GIGA_MAC_VER_42,
146 RTL_GIGA_MAC_VER_43,
143 RTL_GIGA_MAC_NONE = 0xff, 147 RTL_GIGA_MAC_NONE = 0xff,
144}; 148};
145 149
@@ -262,10 +266,16 @@ static const struct {
262 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1, 266 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
263 JUMBO_1K, true), 267 JUMBO_1K, true),
264 [RTL_GIGA_MAC_VER_40] = 268 [RTL_GIGA_MAC_VER_40] =
265 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1, 269 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2,
266 JUMBO_9K, false), 270 JUMBO_9K, false),
267 [RTL_GIGA_MAC_VER_41] = 271 [RTL_GIGA_MAC_VER_41] =
268 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false), 272 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
273 [RTL_GIGA_MAC_VER_42] =
274 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3,
275 JUMBO_9K, false),
276 [RTL_GIGA_MAC_VER_43] =
277 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2,
278 JUMBO_1K, true),
269}; 279};
270#undef _R 280#undef _R
271 281
@@ -329,6 +339,7 @@ enum rtl_registers {
329#define RXCFG_FIFO_SHIFT 13 339#define RXCFG_FIFO_SHIFT 13
330 /* No threshold before first PCI xfer */ 340 /* No threshold before first PCI xfer */
331#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) 341#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
342#define RX_EARLY_OFF (1 << 11)
332#define RXCFG_DMA_SHIFT 8 343#define RXCFG_DMA_SHIFT 8
333 /* Unlimited maximum PCI burst. */ 344 /* Unlimited maximum PCI burst. */
334#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) 345#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
@@ -513,6 +524,7 @@ enum rtl_register_content {
513 PMEnable = (1 << 0), /* Power Management Enable */ 524 PMEnable = (1 << 0), /* Power Management Enable */
514 525
515 /* Config2 register p. 25 */ 526 /* Config2 register p. 25 */
527 ClkReqEn = (1 << 7), /* Clock Request Enable */
516 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ 528 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
517 PCI_Clock_66MHz = 0x01, 529 PCI_Clock_66MHz = 0x01,
518 PCI_Clock_33MHz = 0x00, 530 PCI_Clock_33MHz = 0x00,
@@ -533,6 +545,7 @@ enum rtl_register_content {
533 Spi_en = (1 << 3), 545 Spi_en = (1 << 3),
534 LanWake = (1 << 1), /* LanWake enable/disable */ 546 LanWake = (1 << 1), /* LanWake enable/disable */
535 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ 547 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
548 ASPM_en = (1 << 0), /* ASPM enable */
536 549
537 /* TBICSR p.28 */ 550 /* TBICSR p.28 */
538 TBIReset = 0x80000000, 551 TBIReset = 0x80000000,
@@ -814,7 +827,9 @@ MODULE_FIRMWARE(FIRMWARE_8168F_2);
814MODULE_FIRMWARE(FIRMWARE_8402_1); 827MODULE_FIRMWARE(FIRMWARE_8402_1);
815MODULE_FIRMWARE(FIRMWARE_8411_1); 828MODULE_FIRMWARE(FIRMWARE_8411_1);
816MODULE_FIRMWARE(FIRMWARE_8106E_1); 829MODULE_FIRMWARE(FIRMWARE_8106E_1);
817MODULE_FIRMWARE(FIRMWARE_8168G_1); 830MODULE_FIRMWARE(FIRMWARE_8106E_2);
831MODULE_FIRMWARE(FIRMWARE_8168G_2);
832MODULE_FIRMWARE(FIRMWARE_8168G_3);
818 833
819static void rtl_lock_work(struct rtl8169_private *tp) 834static void rtl_lock_work(struct rtl8169_private *tp)
820{ 835{
@@ -1024,14 +1039,6 @@ static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1024 (RTL_R32(GPHY_OCP) & 0xffff) : ~0; 1039 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1025} 1040}
1026 1041
1027static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1028{
1029 int val;
1030
1031 val = r8168_phy_ocp_read(tp, reg);
1032 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1033}
1034
1035static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) 1042static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1036{ 1043{
1037 void __iomem *ioaddr = tp->mmio_addr; 1044 void __iomem *ioaddr = tp->mmio_addr;
@@ -1077,6 +1084,21 @@ static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1077 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); 1084 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1078} 1085}
1079 1086
1087static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value)
1088{
1089 if (reg == 0x1f) {
1090 tp->ocp_base = value << 4;
1091 return;
1092 }
1093
1094 r8168_mac_ocp_write(tp, tp->ocp_base + reg, value);
1095}
1096
1097static int mac_mcu_read(struct rtl8169_private *tp, int reg)
1098{
1099 return r8168_mac_ocp_read(tp, tp->ocp_base + reg);
1100}
1101
1080DECLARE_RTL_COND(rtl_phyar_cond) 1102DECLARE_RTL_COND(rtl_phyar_cond)
1081{ 1103{
1082 void __iomem *ioaddr = tp->mmio_addr; 1104 void __iomem *ioaddr = tp->mmio_addr;
@@ -1771,16 +1793,17 @@ static void __rtl8169_set_features(struct net_device *dev,
1771 netdev_features_t changed = features ^ dev->features; 1793 netdev_features_t changed = features ^ dev->features;
1772 void __iomem *ioaddr = tp->mmio_addr; 1794 void __iomem *ioaddr = tp->mmio_addr;
1773 1795
1774 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX))) 1796 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM |
1797 NETIF_F_HW_VLAN_CTAG_RX)))
1775 return; 1798 return;
1776 1799
1777 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) { 1800 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) {
1778 if (features & NETIF_F_RXCSUM) 1801 if (features & NETIF_F_RXCSUM)
1779 tp->cp_cmd |= RxChkSum; 1802 tp->cp_cmd |= RxChkSum;
1780 else 1803 else
1781 tp->cp_cmd &= ~RxChkSum; 1804 tp->cp_cmd &= ~RxChkSum;
1782 1805
1783 if (dev->features & NETIF_F_HW_VLAN_RX) 1806 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
1784 tp->cp_cmd |= RxVlan; 1807 tp->cp_cmd |= RxVlan;
1785 else 1808 else
1786 tp->cp_cmd &= ~RxVlan; 1809 tp->cp_cmd &= ~RxVlan;
@@ -1820,7 +1843,7 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1820 u32 opts2 = le32_to_cpu(desc->opts2); 1843 u32 opts2 = le32_to_cpu(desc->opts2);
1821 1844
1822 if (opts2 & RxVlanTag) 1845 if (opts2 & RxVlanTag)
1823 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); 1846 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
1824} 1847}
1825 1848
1826static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) 1849static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2028,6 +2051,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2028 int mac_version; 2051 int mac_version;
2029 } mac_info[] = { 2052 } mac_info[] = {
2030 /* 8168G family. */ 2053 /* 8168G family. */
2054 { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 },
2031 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 }, 2055 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2032 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 }, 2056 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2033 2057
@@ -2116,6 +2140,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2116 netif_notice(tp, probe, dev, 2140 netif_notice(tp, probe, dev,
2117 "unknown MAC, using family default\n"); 2141 "unknown MAC, using family default\n");
2118 tp->mac_version = default_version; 2142 tp->mac_version = default_version;
2143 } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
2144 tp->mac_version = tp->mii.supports_gmii ?
2145 RTL_GIGA_MAC_VER_42 :
2146 RTL_GIGA_MAC_VER_43;
2119 } 2147 }
2120} 2148}
2121 2149
@@ -2142,9 +2170,7 @@ static void rtl_writephy_batch(struct rtl8169_private *tp,
2142#define PHY_DATA_OR 0x10000000 2170#define PHY_DATA_OR 0x10000000
2143#define PHY_DATA_AND 0x20000000 2171#define PHY_DATA_AND 0x20000000
2144#define PHY_BJMPN 0x30000000 2172#define PHY_BJMPN 0x30000000
2145#define PHY_READ_EFUSE 0x40000000 2173#define PHY_MDIO_CHG 0x40000000
2146#define PHY_READ_MAC_BYTE 0x50000000
2147#define PHY_WRITE_MAC_BYTE 0x60000000
2148#define PHY_CLEAR_READCOUNT 0x70000000 2174#define PHY_CLEAR_READCOUNT 0x70000000
2149#define PHY_WRITE 0x80000000 2175#define PHY_WRITE 0x80000000
2150#define PHY_READCOUNT_EQ_SKIP 0x90000000 2176#define PHY_READCOUNT_EQ_SKIP 0x90000000
@@ -2153,7 +2179,6 @@ static void rtl_writephy_batch(struct rtl8169_private *tp,
2153#define PHY_WRITE_PREVIOUS 0xc0000000 2179#define PHY_WRITE_PREVIOUS 0xc0000000
2154#define PHY_SKIPN 0xd0000000 2180#define PHY_SKIPN 0xd0000000
2155#define PHY_DELAY_MS 0xe0000000 2181#define PHY_DELAY_MS 0xe0000000
2156#define PHY_WRITE_ERI_WORD 0xf0000000
2157 2182
2158struct fw_info { 2183struct fw_info {
2159 u32 magic; 2184 u32 magic;
@@ -2230,7 +2255,7 @@ static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2230 case PHY_READ: 2255 case PHY_READ:
2231 case PHY_DATA_OR: 2256 case PHY_DATA_OR:
2232 case PHY_DATA_AND: 2257 case PHY_DATA_AND:
2233 case PHY_READ_EFUSE: 2258 case PHY_MDIO_CHG:
2234 case PHY_CLEAR_READCOUNT: 2259 case PHY_CLEAR_READCOUNT:
2235 case PHY_WRITE: 2260 case PHY_WRITE:
2236 case PHY_WRITE_PREVIOUS: 2261 case PHY_WRITE_PREVIOUS:
@@ -2261,9 +2286,6 @@ static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2261 } 2286 }
2262 break; 2287 break;
2263 2288
2264 case PHY_READ_MAC_BYTE:
2265 case PHY_WRITE_MAC_BYTE:
2266 case PHY_WRITE_ERI_WORD:
2267 default: 2289 default:
2268 netif_err(tp, ifup, tp->dev, 2290 netif_err(tp, ifup, tp->dev,
2269 "Invalid action 0x%08x\n", action); 2291 "Invalid action 0x%08x\n", action);
@@ -2294,10 +2316,13 @@ out:
2294static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) 2316static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2295{ 2317{
2296 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; 2318 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2319 struct mdio_ops org, *ops = &tp->mdio_ops;
2297 u32 predata, count; 2320 u32 predata, count;
2298 size_t index; 2321 size_t index;
2299 2322
2300 predata = count = 0; 2323 predata = count = 0;
2324 org.write = ops->write;
2325 org.read = ops->read;
2301 2326
2302 for (index = 0; index < pa->size; ) { 2327 for (index = 0; index < pa->size; ) {
2303 u32 action = le32_to_cpu(pa->code[index]); 2328 u32 action = le32_to_cpu(pa->code[index]);
@@ -2324,8 +2349,15 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2324 case PHY_BJMPN: 2349 case PHY_BJMPN:
2325 index -= regno; 2350 index -= regno;
2326 break; 2351 break;
2327 case PHY_READ_EFUSE: 2352 case PHY_MDIO_CHG:
2328 predata = rtl8168d_efuse_read(tp, regno); 2353 if (data == 0) {
2354 ops->write = org.write;
2355 ops->read = org.read;
2356 } else if (data == 1) {
2357 ops->write = mac_mcu_write;
2358 ops->read = mac_mcu_read;
2359 }
2360
2329 index++; 2361 index++;
2330 break; 2362 break;
2331 case PHY_CLEAR_READCOUNT: 2363 case PHY_CLEAR_READCOUNT:
@@ -2361,13 +2393,13 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2361 index++; 2393 index++;
2362 break; 2394 break;
2363 2395
2364 case PHY_READ_MAC_BYTE:
2365 case PHY_WRITE_MAC_BYTE:
2366 case PHY_WRITE_ERI_WORD:
2367 default: 2396 default:
2368 BUG(); 2397 BUG();
2369 } 2398 }
2370 } 2399 }
2400
2401 ops->write = org.write;
2402 ops->read = org.read;
2371} 2403}
2372 2404
2373static void rtl_release_firmware(struct rtl8169_private *tp) 2405static void rtl_release_firmware(struct rtl8169_private *tp)
@@ -3368,51 +3400,68 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3368 3400
3369static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) 3401static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3370{ 3402{
3371 static const u16 mac_ocp_patch[] = { 3403 rtl_apply_firmware(tp);
3372 0xe008, 0xe01b, 0xe01d, 0xe01f,
3373 0xe021, 0xe023, 0xe025, 0xe027,
3374 0x49d2, 0xf10d, 0x766c, 0x49e2,
3375 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3376
3377 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3378 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3379 0xbe00, 0xb416, 0x0076, 0xe86c,
3380 0xc602, 0xbe00, 0x0000, 0xc602,
3381
3382 0xbe00, 0x0000, 0xc602, 0xbe00,
3383 0x0000, 0xc602, 0xbe00, 0x0000,
3384 0xc602, 0xbe00, 0x0000, 0xc602,
3385 0xbe00, 0x0000, 0xc602, 0xbe00,
3386
3387 0x0000, 0x0000, 0x0000, 0x0000
3388 };
3389 u32 i;
3390 3404
3391 /* Patch code for GPHY reset */ 3405 rtl_writephy(tp, 0x1f, 0x0a46);
3392 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++) 3406 if (rtl_readphy(tp, 0x10) & 0x0100) {
3393 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]); 3407 rtl_writephy(tp, 0x1f, 0x0bcc);
3394 r8168_mac_ocp_write(tp, 0xfc26, 0x8000); 3408 rtl_w1w0_phy(tp, 0x12, 0x0000, 0x8000);
3395 r8168_mac_ocp_write(tp, 0xfc28, 0x0075); 3409 } else {
3410 rtl_writephy(tp, 0x1f, 0x0bcc);
3411 rtl_w1w0_phy(tp, 0x12, 0x8000, 0x0000);
3412 }
3396 3413
3397 rtl_apply_firmware(tp); 3414 rtl_writephy(tp, 0x1f, 0x0a46);
3415 if (rtl_readphy(tp, 0x13) & 0x0100) {
3416 rtl_writephy(tp, 0x1f, 0x0c41);
3417 rtl_w1w0_phy(tp, 0x15, 0x0002, 0x0000);
3418 } else {
3419 rtl_writephy(tp, 0x1f, 0x0c41);
3420 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0002);
3421 }
3398 3422
3399 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100) 3423 /* Enable PHY auto speed down */
3400 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000); 3424 rtl_writephy(tp, 0x1f, 0x0a44);
3401 else 3425 rtl_w1w0_phy(tp, 0x11, 0x000c, 0x0000);
3402 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000); 3426
3427 rtl_writephy(tp, 0x1f, 0x0bcc);
3428 rtl_w1w0_phy(tp, 0x14, 0x0100, 0x0000);
3429 rtl_writephy(tp, 0x1f, 0x0a44);
3430 rtl_w1w0_phy(tp, 0x11, 0x00c0, 0x0000);
3431 rtl_writephy(tp, 0x1f, 0x0a43);
3432 rtl_writephy(tp, 0x13, 0x8084);
3433 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x6000);
3434 rtl_w1w0_phy(tp, 0x10, 0x1003, 0x0000);
3435
3436 /* EEE auto-fallback function */
3437 rtl_writephy(tp, 0x1f, 0x0a4b);
3438 rtl_w1w0_phy(tp, 0x11, 0x0004, 0x0000);
3439
3440 /* Enable UC LPF tune function */
3441 rtl_writephy(tp, 0x1f, 0x0a43);
3442 rtl_writephy(tp, 0x13, 0x8012);
3443 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3403 3444
3404 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100) 3445 rtl_writephy(tp, 0x1f, 0x0c42);
3405 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000); 3446 rtl_w1w0_phy(tp, 0x11, 0x4000, 0x2000);
3406 else
3407 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3408 3447
3409 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000); 3448 /* Improve SWR Efficiency */
3410 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000); 3449 rtl_writephy(tp, 0x1f, 0x0bcd);
3450 rtl_writephy(tp, 0x14, 0x5065);
3451 rtl_writephy(tp, 0x14, 0xd065);
3452 rtl_writephy(tp, 0x1f, 0x0bc8);
3453 rtl_writephy(tp, 0x11, 0x5655);
3454 rtl_writephy(tp, 0x1f, 0x0bcd);
3455 rtl_writephy(tp, 0x14, 0x1065);
3456 rtl_writephy(tp, 0x14, 0x9065);
3457 rtl_writephy(tp, 0x14, 0x1065);
3411 3458
3412 r8168_phy_ocp_write(tp, 0xa436, 0x8012); 3459 rtl_writephy(tp, 0x1f, 0x0000);
3413 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000); 3460}
3414 3461
3415 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000); 3462static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
3463{
3464 rtl_apply_firmware(tp);
3416} 3465}
3417 3466
3418static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) 3467static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
@@ -3600,6 +3649,10 @@ static void rtl_hw_phy_config(struct net_device *dev)
3600 case RTL_GIGA_MAC_VER_40: 3649 case RTL_GIGA_MAC_VER_40:
3601 rtl8168g_1_hw_phy_config(tp); 3650 rtl8168g_1_hw_phy_config(tp);
3602 break; 3651 break;
3652 case RTL_GIGA_MAC_VER_42:
3653 case RTL_GIGA_MAC_VER_43:
3654 rtl8168g_2_hw_phy_config(tp);
3655 break;
3603 3656
3604 case RTL_GIGA_MAC_VER_41: 3657 case RTL_GIGA_MAC_VER_41:
3605 default: 3658 default:
@@ -3808,6 +3861,8 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3808 break; 3861 break;
3809 case RTL_GIGA_MAC_VER_40: 3862 case RTL_GIGA_MAC_VER_40:
3810 case RTL_GIGA_MAC_VER_41: 3863 case RTL_GIGA_MAC_VER_41:
3864 case RTL_GIGA_MAC_VER_42:
3865 case RTL_GIGA_MAC_VER_43:
3811 ops->write = r8168g_mdio_write; 3866 ops->write = r8168g_mdio_write;
3812 ops->read = r8168g_mdio_read; 3867 ops->read = r8168g_mdio_read;
3813 break; 3868 break;
@@ -3859,6 +3914,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3859 case RTL_GIGA_MAC_VER_39: 3914 case RTL_GIGA_MAC_VER_39:
3860 case RTL_GIGA_MAC_VER_40: 3915 case RTL_GIGA_MAC_VER_40:
3861 case RTL_GIGA_MAC_VER_41: 3916 case RTL_GIGA_MAC_VER_41:
3917 case RTL_GIGA_MAC_VER_42:
3918 case RTL_GIGA_MAC_VER_43:
3862 RTL_W32(RxConfig, RTL_R32(RxConfig) | 3919 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3863 AcceptBroadcast | AcceptMulticast | AcceptMyPhys); 3920 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3864 break; 3921 break;
@@ -3966,6 +4023,8 @@ static void r8168_phy_power_down(struct rtl8169_private *tp)
3966 switch (tp->mac_version) { 4023 switch (tp->mac_version) {
3967 case RTL_GIGA_MAC_VER_32: 4024 case RTL_GIGA_MAC_VER_32:
3968 case RTL_GIGA_MAC_VER_33: 4025 case RTL_GIGA_MAC_VER_33:
4026 case RTL_GIGA_MAC_VER_40:
4027 case RTL_GIGA_MAC_VER_41:
3969 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); 4028 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3970 break; 4029 break;
3971 4030
@@ -4027,6 +4086,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
4027 case RTL_GIGA_MAC_VER_33: 4086 case RTL_GIGA_MAC_VER_33:
4028 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); 4087 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4029 break; 4088 break;
4089 case RTL_GIGA_MAC_VER_40:
4090 case RTL_GIGA_MAC_VER_41:
4091 rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000,
4092 0xfc000000, ERIAR_EXGMAC);
4093 break;
4030 } 4094 }
4031} 4095}
4032 4096
@@ -4044,6 +4108,11 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
4044 case RTL_GIGA_MAC_VER_33: 4108 case RTL_GIGA_MAC_VER_33:
4045 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); 4109 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4046 break; 4110 break;
4111 case RTL_GIGA_MAC_VER_40:
4112 case RTL_GIGA_MAC_VER_41:
4113 rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
4114 0x00000000, ERIAR_EXGMAC);
4115 break;
4047 } 4116 }
4048 4117
4049 r8168_phy_power_up(tp); 4118 r8168_phy_power_up(tp);
@@ -4080,6 +4149,7 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4080 case RTL_GIGA_MAC_VER_30: 4149 case RTL_GIGA_MAC_VER_30:
4081 case RTL_GIGA_MAC_VER_37: 4150 case RTL_GIGA_MAC_VER_37:
4082 case RTL_GIGA_MAC_VER_39: 4151 case RTL_GIGA_MAC_VER_39:
4152 case RTL_GIGA_MAC_VER_43:
4083 ops->down = r810x_pll_power_down; 4153 ops->down = r810x_pll_power_down;
4084 ops->up = r810x_pll_power_up; 4154 ops->up = r810x_pll_power_up;
4085 break; 4155 break;
@@ -4107,6 +4177,7 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4107 case RTL_GIGA_MAC_VER_38: 4177 case RTL_GIGA_MAC_VER_38:
4108 case RTL_GIGA_MAC_VER_40: 4178 case RTL_GIGA_MAC_VER_40:
4109 case RTL_GIGA_MAC_VER_41: 4179 case RTL_GIGA_MAC_VER_41:
4180 case RTL_GIGA_MAC_VER_42:
4110 ops->down = r8168_pll_power_down; 4181 ops->down = r8168_pll_power_down;
4111 ops->up = r8168_pll_power_up; 4182 ops->up = r8168_pll_power_up;
4112 break; 4183 break;
@@ -4149,6 +4220,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4149 case RTL_GIGA_MAC_VER_34: 4220 case RTL_GIGA_MAC_VER_34:
4150 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 4221 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4151 break; 4222 break;
4223 case RTL_GIGA_MAC_VER_40:
4224 case RTL_GIGA_MAC_VER_41:
4225 case RTL_GIGA_MAC_VER_42:
4226 case RTL_GIGA_MAC_VER_43:
4227 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
4228 break;
4152 default: 4229 default:
4153 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); 4230 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4154 break; 4231 break;
@@ -4305,6 +4382,8 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4305 */ 4382 */
4306 case RTL_GIGA_MAC_VER_40: 4383 case RTL_GIGA_MAC_VER_40:
4307 case RTL_GIGA_MAC_VER_41: 4384 case RTL_GIGA_MAC_VER_41:
4385 case RTL_GIGA_MAC_VER_42:
4386 case RTL_GIGA_MAC_VER_43:
4308 default: 4387 default:
4309 ops->disable = NULL; 4388 ops->disable = NULL;
4310 ops->enable = NULL; 4389 ops->enable = NULL;
@@ -4412,6 +4491,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
4412 tp->mac_version == RTL_GIGA_MAC_VER_37 || 4491 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4413 tp->mac_version == RTL_GIGA_MAC_VER_40 || 4492 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4414 tp->mac_version == RTL_GIGA_MAC_VER_41 || 4493 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4494 tp->mac_version == RTL_GIGA_MAC_VER_42 ||
4495 tp->mac_version == RTL_GIGA_MAC_VER_43 ||
4415 tp->mac_version == RTL_GIGA_MAC_VER_38) { 4496 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4416 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); 4497 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4417 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); 4498 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
@@ -5127,6 +5208,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5127 void __iomem *ioaddr = tp->mmio_addr; 5208 void __iomem *ioaddr = tp->mmio_addr;
5128 struct pci_dev *pdev = tp->pci_dev; 5209 struct pci_dev *pdev = tp->pci_dev;
5129 5210
5211 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5212
5130 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); 5213 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5131 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); 5214 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5132 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); 5215 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5138,6 +5221,7 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5138 5221
5139 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); 5222 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5140 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); 5223 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5224 rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
5141 5225
5142 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 5226 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5143 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); 5227 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
@@ -5149,7 +5233,26 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5149 /* Adjust EEE LED frequency */ 5233 /* Adjust EEE LED frequency */
5150 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); 5234 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5151 5235
5152 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC); 5236 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
5237 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
5238}
5239
5240static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
5241{
5242 void __iomem *ioaddr = tp->mmio_addr;
5243 static const struct ephy_info e_info_8168g_2[] = {
5244 { 0x00, 0x0000, 0x0008 },
5245 { 0x0c, 0x3df0, 0x0200 },
5246 { 0x19, 0xffff, 0xfc00 },
5247 { 0x1e, 0xffff, 0x20eb }
5248 };
5249
5250 rtl_hw_start_8168g_1(tp);
5251
5252 /* disable aspm and clock request before access ephy */
5253 RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
5254 RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
5255 rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2));
5153} 5256}
5154 5257
5155static void rtl_hw_start_8168(struct net_device *dev) 5258static void rtl_hw_start_8168(struct net_device *dev)
@@ -5177,10 +5280,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
5177 5280
5178 rtl_set_rx_tx_desc_registers(tp, ioaddr); 5281 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5179 5282
5180 rtl_set_rx_mode(dev); 5283 rtl_set_rx_tx_config_registers(tp);
5181
5182 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5183 (InterFrameGap << TxInterFrameGapShift));
5184 5284
5185 RTL_R8(IntrMask); 5285 RTL_R8(IntrMask);
5186 5286
@@ -5257,6 +5357,9 @@ static void rtl_hw_start_8168(struct net_device *dev)
5257 case RTL_GIGA_MAC_VER_41: 5357 case RTL_GIGA_MAC_VER_41:
5258 rtl_hw_start_8168g_1(tp); 5358 rtl_hw_start_8168g_1(tp);
5259 break; 5359 break;
5360 case RTL_GIGA_MAC_VER_42:
5361 rtl_hw_start_8168g_2(tp);
5362 break;
5260 5363
5261 default: 5364 default:
5262 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", 5365 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
@@ -5264,9 +5367,11 @@ static void rtl_hw_start_8168(struct net_device *dev)
5264 break; 5367 break;
5265 } 5368 }
5266 5369
5370 RTL_W8(Cfg9346, Cfg9346_Lock);
5371
5267 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 5372 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5268 5373
5269 RTL_W8(Cfg9346, Cfg9346_Lock); 5374 rtl_set_rx_mode(dev);
5270 5375
5271 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); 5376 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5272} 5377}
@@ -5424,6 +5529,17 @@ static void rtl_hw_start_8101(struct net_device *dev)
5424 5529
5425 RTL_W8(Cfg9346, Cfg9346_Unlock); 5530 RTL_W8(Cfg9346, Cfg9346_Unlock);
5426 5531
5532 RTL_W8(MaxTxPacketSize, TxPacketMax);
5533
5534 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5535
5536 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5537 RTL_W16(CPlusCmd, tp->cp_cmd);
5538
5539 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5540
5541 rtl_set_rx_tx_config_registers(tp);
5542
5427 switch (tp->mac_version) { 5543 switch (tp->mac_version) {
5428 case RTL_GIGA_MAC_VER_07: 5544 case RTL_GIGA_MAC_VER_07:
5429 rtl_hw_start_8102e_1(tp); 5545 rtl_hw_start_8102e_1(tp);
@@ -5451,28 +5567,21 @@ static void rtl_hw_start_8101(struct net_device *dev)
5451 case RTL_GIGA_MAC_VER_39: 5567 case RTL_GIGA_MAC_VER_39:
5452 rtl_hw_start_8106(tp); 5568 rtl_hw_start_8106(tp);
5453 break; 5569 break;
5570 case RTL_GIGA_MAC_VER_43:
5571 rtl_hw_start_8168g_2(tp);
5572 break;
5454 } 5573 }
5455 5574
5456 RTL_W8(Cfg9346, Cfg9346_Lock); 5575 RTL_W8(Cfg9346, Cfg9346_Lock);
5457 5576
5458 RTL_W8(MaxTxPacketSize, TxPacketMax);
5459
5460 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5461
5462 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5463 RTL_W16(CPlusCmd, tp->cp_cmd);
5464
5465 RTL_W16(IntrMitigate, 0x0000); 5577 RTL_W16(IntrMitigate, 0x0000);
5466 5578
5467 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5468
5469 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 5579 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5470 rtl_set_rx_tx_config_registers(tp);
5471
5472 RTL_R8(IntrMask);
5473 5580
5474 rtl_set_rx_mode(dev); 5581 rtl_set_rx_mode(dev);
5475 5582
5583 RTL_R8(IntrMask);
5584
5476 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); 5585 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5477} 5586}
5478 5587
@@ -5787,6 +5896,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5787 goto err_stop_0; 5896 goto err_stop_0;
5788 } 5897 }
5789 5898
5899 /* 8168evl does not automatically pad to minimum length. */
5900 if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
5901 skb->len < ETH_ZLEN)) {
5902 if (skb_padto(skb, ETH_ZLEN))
5903 goto err_update_stats;
5904 skb_put(skb, ETH_ZLEN - skb->len);
5905 }
5906
5790 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) 5907 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5791 goto err_stop_0; 5908 goto err_stop_0;
5792 5909
@@ -5858,6 +5975,7 @@ err_dma_1:
5858 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); 5975 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5859err_dma_0: 5976err_dma_0:
5860 dev_kfree_skb(skb); 5977 dev_kfree_skb(skb);
5978err_update_stats:
5861 dev->stats.tx_dropped++; 5979 dev->stats.tx_dropped++;
5862 return NETDEV_TX_OK; 5980 return NETDEV_TX_OK;
5863 5981
@@ -6744,6 +6862,8 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
6744 switch (tp->mac_version) { 6862 switch (tp->mac_version) {
6745 case RTL_GIGA_MAC_VER_40: 6863 case RTL_GIGA_MAC_VER_40:
6746 case RTL_GIGA_MAC_VER_41: 6864 case RTL_GIGA_MAC_VER_41:
6865 case RTL_GIGA_MAC_VER_42:
6866 case RTL_GIGA_MAC_VER_43:
6747 rtl_hw_init_8168g(tp); 6867 rtl_hw_init_8168g(tp);
6748 break; 6868 break;
6749 6869
@@ -6926,16 +7046,17 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6926 /* don't enable SG, IP_CSUM and TSO by default - it might not work 7046 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6927 * properly for all devices */ 7047 * properly for all devices */
6928 dev->features |= NETIF_F_RXCSUM | 7048 dev->features |= NETIF_F_RXCSUM |
6929 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 7049 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6930 7050
6931 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 7051 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6932 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 7052 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
7053 NETIF_F_HW_VLAN_CTAG_RX;
6933 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 7054 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6934 NETIF_F_HIGHDMA; 7055 NETIF_F_HIGHDMA;
6935 7056
6936 if (tp->mac_version == RTL_GIGA_MAC_VER_05) 7057 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6937 /* 8110SCd requires hardware Rx VLAN - disallow toggling */ 7058 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6938 dev->hw_features &= ~NETIF_F_HW_VLAN_RX; 7059 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
6939 7060
6940 dev->hw_features |= NETIF_F_RXALL; 7061 dev->hw_features |= NETIF_F_RXALL;
6941 dev->hw_features |= NETIF_F_RXFCS; 7062 dev->hw_features |= NETIF_F_RXFCS;
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 24c2305d7948..bed9841d728c 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -8,7 +8,8 @@ config SH_ETH
8 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \ 8 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
9 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ 9 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
10 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \ 10 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
11 CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || ARCH_R8A7779) 11 CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || \
12 ARCH_R8A7778 || ARCH_R8A7779)
12 select CRC32 13 select CRC32
13 select NET_CORE 14 select NET_CORE
14 select MII 15 select MII
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6ed333fe5c04..33dc6f2418f2 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2,7 +2,8 @@
2 * SuperH Ethernet device driver 2 * SuperH Ethernet device driver
3 * 3 *
4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu 4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2012 Renesas Solutions Corp. 5 * Copyright (C) 2008-2013 Renesas Solutions Corp.
6 * Copyright (C) 2013 Cogent Embedded, Inc.
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License, 9 * under the terms and conditions of the GNU General Public License,
@@ -49,6 +50,269 @@
49 NETIF_MSG_RX_ERR| \ 50 NETIF_MSG_RX_ERR| \
50 NETIF_MSG_TX_ERR) 51 NETIF_MSG_TX_ERR)
51 52
53static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
54 [EDSR] = 0x0000,
55 [EDMR] = 0x0400,
56 [EDTRR] = 0x0408,
57 [EDRRR] = 0x0410,
58 [EESR] = 0x0428,
59 [EESIPR] = 0x0430,
60 [TDLAR] = 0x0010,
61 [TDFAR] = 0x0014,
62 [TDFXR] = 0x0018,
63 [TDFFR] = 0x001c,
64 [RDLAR] = 0x0030,
65 [RDFAR] = 0x0034,
66 [RDFXR] = 0x0038,
67 [RDFFR] = 0x003c,
68 [TRSCER] = 0x0438,
69 [RMFCR] = 0x0440,
70 [TFTR] = 0x0448,
71 [FDR] = 0x0450,
72 [RMCR] = 0x0458,
73 [RPADIR] = 0x0460,
74 [FCFTR] = 0x0468,
75 [CSMR] = 0x04E4,
76
77 [ECMR] = 0x0500,
78 [ECSR] = 0x0510,
79 [ECSIPR] = 0x0518,
80 [PIR] = 0x0520,
81 [PSR] = 0x0528,
82 [PIPR] = 0x052c,
83 [RFLR] = 0x0508,
84 [APR] = 0x0554,
85 [MPR] = 0x0558,
86 [PFTCR] = 0x055c,
87 [PFRCR] = 0x0560,
88 [TPAUSER] = 0x0564,
89 [GECMR] = 0x05b0,
90 [BCULR] = 0x05b4,
91 [MAHR] = 0x05c0,
92 [MALR] = 0x05c8,
93 [TROCR] = 0x0700,
94 [CDCR] = 0x0708,
95 [LCCR] = 0x0710,
96 [CEFCR] = 0x0740,
97 [FRECR] = 0x0748,
98 [TSFRCR] = 0x0750,
99 [TLFRCR] = 0x0758,
100 [RFCR] = 0x0760,
101 [CERCR] = 0x0768,
102 [CEECR] = 0x0770,
103 [MAFCR] = 0x0778,
104 [RMII_MII] = 0x0790,
105
106 [ARSTR] = 0x0000,
107 [TSU_CTRST] = 0x0004,
108 [TSU_FWEN0] = 0x0010,
109 [TSU_FWEN1] = 0x0014,
110 [TSU_FCM] = 0x0018,
111 [TSU_BSYSL0] = 0x0020,
112 [TSU_BSYSL1] = 0x0024,
113 [TSU_PRISL0] = 0x0028,
114 [TSU_PRISL1] = 0x002c,
115 [TSU_FWSL0] = 0x0030,
116 [TSU_FWSL1] = 0x0034,
117 [TSU_FWSLC] = 0x0038,
118 [TSU_QTAG0] = 0x0040,
119 [TSU_QTAG1] = 0x0044,
120 [TSU_FWSR] = 0x0050,
121 [TSU_FWINMK] = 0x0054,
122 [TSU_ADQT0] = 0x0048,
123 [TSU_ADQT1] = 0x004c,
124 [TSU_VTAG0] = 0x0058,
125 [TSU_VTAG1] = 0x005c,
126 [TSU_ADSBSY] = 0x0060,
127 [TSU_TEN] = 0x0064,
128 [TSU_POST1] = 0x0070,
129 [TSU_POST2] = 0x0074,
130 [TSU_POST3] = 0x0078,
131 [TSU_POST4] = 0x007c,
132 [TSU_ADRH0] = 0x0100,
133 [TSU_ADRL0] = 0x0104,
134 [TSU_ADRH31] = 0x01f8,
135 [TSU_ADRL31] = 0x01fc,
136
137 [TXNLCR0] = 0x0080,
138 [TXALCR0] = 0x0084,
139 [RXNLCR0] = 0x0088,
140 [RXALCR0] = 0x008c,
141 [FWNLCR0] = 0x0090,
142 [FWALCR0] = 0x0094,
143 [TXNLCR1] = 0x00a0,
144 [TXALCR1] = 0x00a0,
145 [RXNLCR1] = 0x00a8,
146 [RXALCR1] = 0x00ac,
147 [FWNLCR1] = 0x00b0,
148 [FWALCR1] = 0x00b4,
149};
150
151static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
152 [ECMR] = 0x0300,
153 [RFLR] = 0x0308,
154 [ECSR] = 0x0310,
155 [ECSIPR] = 0x0318,
156 [PIR] = 0x0320,
157 [PSR] = 0x0328,
158 [RDMLR] = 0x0340,
159 [IPGR] = 0x0350,
160 [APR] = 0x0354,
161 [MPR] = 0x0358,
162 [RFCF] = 0x0360,
163 [TPAUSER] = 0x0364,
164 [TPAUSECR] = 0x0368,
165 [MAHR] = 0x03c0,
166 [MALR] = 0x03c8,
167 [TROCR] = 0x03d0,
168 [CDCR] = 0x03d4,
169 [LCCR] = 0x03d8,
170 [CNDCR] = 0x03dc,
171 [CEFCR] = 0x03e4,
172 [FRECR] = 0x03e8,
173 [TSFRCR] = 0x03ec,
174 [TLFRCR] = 0x03f0,
175 [RFCR] = 0x03f4,
176 [MAFCR] = 0x03f8,
177
178 [EDMR] = 0x0200,
179 [EDTRR] = 0x0208,
180 [EDRRR] = 0x0210,
181 [TDLAR] = 0x0218,
182 [RDLAR] = 0x0220,
183 [EESR] = 0x0228,
184 [EESIPR] = 0x0230,
185 [TRSCER] = 0x0238,
186 [RMFCR] = 0x0240,
187 [TFTR] = 0x0248,
188 [FDR] = 0x0250,
189 [RMCR] = 0x0258,
190 [TFUCR] = 0x0264,
191 [RFOCR] = 0x0268,
192 [FCFTR] = 0x0270,
193 [TRIMD] = 0x027c,
194};
195
196static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
197 [ECMR] = 0x0100,
198 [RFLR] = 0x0108,
199 [ECSR] = 0x0110,
200 [ECSIPR] = 0x0118,
201 [PIR] = 0x0120,
202 [PSR] = 0x0128,
203 [RDMLR] = 0x0140,
204 [IPGR] = 0x0150,
205 [APR] = 0x0154,
206 [MPR] = 0x0158,
207 [TPAUSER] = 0x0164,
208 [RFCF] = 0x0160,
209 [TPAUSECR] = 0x0168,
210 [BCFRR] = 0x016c,
211 [MAHR] = 0x01c0,
212 [MALR] = 0x01c8,
213 [TROCR] = 0x01d0,
214 [CDCR] = 0x01d4,
215 [LCCR] = 0x01d8,
216 [CNDCR] = 0x01dc,
217 [CEFCR] = 0x01e4,
218 [FRECR] = 0x01e8,
219 [TSFRCR] = 0x01ec,
220 [TLFRCR] = 0x01f0,
221 [RFCR] = 0x01f4,
222 [MAFCR] = 0x01f8,
223 [RTRATE] = 0x01fc,
224
225 [EDMR] = 0x0000,
226 [EDTRR] = 0x0008,
227 [EDRRR] = 0x0010,
228 [TDLAR] = 0x0018,
229 [RDLAR] = 0x0020,
230 [EESR] = 0x0028,
231 [EESIPR] = 0x0030,
232 [TRSCER] = 0x0038,
233 [RMFCR] = 0x0040,
234 [TFTR] = 0x0048,
235 [FDR] = 0x0050,
236 [RMCR] = 0x0058,
237 [TFUCR] = 0x0064,
238 [RFOCR] = 0x0068,
239 [FCFTR] = 0x0070,
240 [RPADIR] = 0x0078,
241 [TRIMD] = 0x007c,
242 [RBWAR] = 0x00c8,
243 [RDFAR] = 0x00cc,
244 [TBRAR] = 0x00d4,
245 [TDFAR] = 0x00d8,
246};
247
248static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
249 [ECMR] = 0x0160,
250 [ECSR] = 0x0164,
251 [ECSIPR] = 0x0168,
252 [PIR] = 0x016c,
253 [MAHR] = 0x0170,
254 [MALR] = 0x0174,
255 [RFLR] = 0x0178,
256 [PSR] = 0x017c,
257 [TROCR] = 0x0180,
258 [CDCR] = 0x0184,
259 [LCCR] = 0x0188,
260 [CNDCR] = 0x018c,
261 [CEFCR] = 0x0194,
262 [FRECR] = 0x0198,
263 [TSFRCR] = 0x019c,
264 [TLFRCR] = 0x01a0,
265 [RFCR] = 0x01a4,
266 [MAFCR] = 0x01a8,
267 [IPGR] = 0x01b4,
268 [APR] = 0x01b8,
269 [MPR] = 0x01bc,
270 [TPAUSER] = 0x01c4,
271 [BCFR] = 0x01cc,
272
273 [ARSTR] = 0x0000,
274 [TSU_CTRST] = 0x0004,
275 [TSU_FWEN0] = 0x0010,
276 [TSU_FWEN1] = 0x0014,
277 [TSU_FCM] = 0x0018,
278 [TSU_BSYSL0] = 0x0020,
279 [TSU_BSYSL1] = 0x0024,
280 [TSU_PRISL0] = 0x0028,
281 [TSU_PRISL1] = 0x002c,
282 [TSU_FWSL0] = 0x0030,
283 [TSU_FWSL1] = 0x0034,
284 [TSU_FWSLC] = 0x0038,
285 [TSU_QTAGM0] = 0x0040,
286 [TSU_QTAGM1] = 0x0044,
287 [TSU_ADQT0] = 0x0048,
288 [TSU_ADQT1] = 0x004c,
289 [TSU_FWSR] = 0x0050,
290 [TSU_FWINMK] = 0x0054,
291 [TSU_ADSBSY] = 0x0060,
292 [TSU_TEN] = 0x0064,
293 [TSU_POST1] = 0x0070,
294 [TSU_POST2] = 0x0074,
295 [TSU_POST3] = 0x0078,
296 [TSU_POST4] = 0x007c,
297
298 [TXNLCR0] = 0x0080,
299 [TXALCR0] = 0x0084,
300 [RXNLCR0] = 0x0088,
301 [RXALCR0] = 0x008c,
302 [FWNLCR0] = 0x0090,
303 [FWALCR0] = 0x0094,
304 [TXNLCR1] = 0x00a0,
305 [TXALCR1] = 0x00a0,
306 [RXNLCR1] = 0x00a8,
307 [RXALCR1] = 0x00ac,
308 [FWNLCR1] = 0x00b0,
309 [FWALCR1] = 0x00b4,
310
311 [TSU_ADRH0] = 0x0100,
312 [TSU_ADRL0] = 0x0104,
313 [TSU_ADRL31] = 0x01fc,
314};
315
52#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \ 316#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
53 defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 317 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
54 defined(CONFIG_ARCH_R8A7740) 318 defined(CONFIG_ARCH_R8A7740)
@@ -78,7 +342,7 @@ static void sh_eth_select_mii(struct net_device *ndev)
78#endif 342#endif
79 343
80/* There is CPU dependent code */ 344/* There is CPU dependent code */
81#if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779) 345#if defined(CONFIG_ARCH_R8A7778) || defined(CONFIG_ARCH_R8A7779)
82#define SH_ETH_RESET_DEFAULT 1 346#define SH_ETH_RESET_DEFAULT 1
83static void sh_eth_set_duplex(struct net_device *ndev) 347static void sh_eth_set_duplex(struct net_device *ndev)
84{ 348{
@@ -93,18 +357,60 @@ static void sh_eth_set_duplex(struct net_device *ndev)
93static void sh_eth_set_rate(struct net_device *ndev) 357static void sh_eth_set_rate(struct net_device *ndev)
94{ 358{
95 struct sh_eth_private *mdp = netdev_priv(ndev); 359 struct sh_eth_private *mdp = netdev_priv(ndev);
96 unsigned int bits = ECMR_RTM;
97 360
98#if defined(CONFIG_ARCH_R8A7779) 361 switch (mdp->speed) {
99 bits |= ECMR_ELB; 362 case 10: /* 10BASE */
100#endif 363 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
364 break;
365 case 100:/* 100BASE */
366 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
367 break;
368 default:
369 break;
370 }
371}
372
373/* R8A7778/9 */
374static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
375 .set_duplex = sh_eth_set_duplex,
376 .set_rate = sh_eth_set_rate,
377
378 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
379 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
380 .eesipr_value = 0x01ff009f,
381
382 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
383 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
384 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
385 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
386
387 .apr = 1,
388 .mpr = 1,
389 .tpauser = 1,
390 .hw_swap = 1,
391};
392#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
393#define SH_ETH_RESET_DEFAULT 1
394static void sh_eth_set_duplex(struct net_device *ndev)
395{
396 struct sh_eth_private *mdp = netdev_priv(ndev);
397
398 if (mdp->duplex) /* Full */
399 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
400 else /* Half */
401 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
402}
403
404static void sh_eth_set_rate(struct net_device *ndev)
405{
406 struct sh_eth_private *mdp = netdev_priv(ndev);
101 407
102 switch (mdp->speed) { 408 switch (mdp->speed) {
103 case 10: /* 10BASE */ 409 case 10: /* 10BASE */
104 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR); 410 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
105 break; 411 break;
106 case 100:/* 100BASE */ 412 case 100:/* 100BASE */
107 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR); 413 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
108 break; 414 break;
109 default: 415 default:
110 break; 416 break;
@@ -592,7 +898,7 @@ static int sh_eth_check_reset(struct net_device *ndev)
592 cnt--; 898 cnt--;
593 } 899 }
594 if (cnt < 0) { 900 if (cnt < 0) {
595 printk(KERN_ERR "Device reset fail\n"); 901 pr_err("Device reset fail\n");
596 ret = -ETIMEDOUT; 902 ret = -ETIMEDOUT;
597 } 903 }
598 return ret; 904 return ret;
@@ -908,11 +1214,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
908 /* Allocate all Rx descriptors. */ 1214 /* Allocate all Rx descriptors. */
909 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1215 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
910 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 1216 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
911 GFP_KERNEL); 1217 GFP_KERNEL);
912
913 if (!mdp->rx_ring) { 1218 if (!mdp->rx_ring) {
914 dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
915 rx_ringsize);
916 ret = -ENOMEM; 1219 ret = -ENOMEM;
917 goto desc_ring_free; 1220 goto desc_ring_free;
918 } 1221 }
@@ -922,10 +1225,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
922 /* Allocate all Tx descriptors. */ 1225 /* Allocate all Tx descriptors. */
923 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1226 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
924 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 1227 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
925 GFP_KERNEL); 1228 GFP_KERNEL);
926 if (!mdp->tx_ring) { 1229 if (!mdp->tx_ring) {
927 dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
928 tx_ringsize);
929 ret = -ENOMEM; 1230 ret = -ENOMEM;
930 goto desc_ring_free; 1231 goto desc_ring_free;
931 } 1232 }
@@ -2147,7 +2448,8 @@ static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2147 return TSU_VTAG1; 2448 return TSU_VTAG1;
2148} 2449}
2149 2450
2150static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid) 2451static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2452 __be16 proto, u16 vid)
2151{ 2453{
2152 struct sh_eth_private *mdp = netdev_priv(ndev); 2454 struct sh_eth_private *mdp = netdev_priv(ndev);
2153 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2455 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
@@ -2177,7 +2479,8 @@ static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2177 return 0; 2479 return 0;
2178} 2480}
2179 2481
2180static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) 2482static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2483 __be16 proto, u16 vid)
2181{ 2484{
2182 struct sh_eth_private *mdp = netdev_priv(ndev); 2485 struct sh_eth_private *mdp = netdev_priv(ndev);
2183 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2486 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
@@ -2228,7 +2531,6 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2228/* MDIO bus release function */ 2531/* MDIO bus release function */
2229static int sh_mdio_release(struct net_device *ndev) 2532static int sh_mdio_release(struct net_device *ndev)
2230{ 2533{
2231 struct sh_eth_private *mdp = netdev_priv(ndev);
2232 struct mii_bus *bus = dev_get_drvdata(&ndev->dev); 2534 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2233 2535
2234 /* unregister mdio bus */ 2536 /* unregister mdio bus */
@@ -2237,15 +2539,9 @@ static int sh_mdio_release(struct net_device *ndev)
2237 /* remove mdio bus info from net_device */ 2539 /* remove mdio bus info from net_device */
2238 dev_set_drvdata(&ndev->dev, NULL); 2540 dev_set_drvdata(&ndev->dev, NULL);
2239 2541
2240 /* free interrupts memory */
2241 kfree(bus->irq);
2242
2243 /* free bitbang info */ 2542 /* free bitbang info */
2244 free_mdio_bitbang(bus); 2543 free_mdio_bitbang(bus);
2245 2544
2246 /* free bitbang memory */
2247 kfree(mdp->bitbang);
2248
2249 return 0; 2545 return 0;
2250} 2546}
2251 2547
@@ -2258,7 +2554,8 @@ static int sh_mdio_init(struct net_device *ndev, int id,
2258 struct sh_eth_private *mdp = netdev_priv(ndev); 2554 struct sh_eth_private *mdp = netdev_priv(ndev);
2259 2555
2260 /* create bit control struct for PHY */ 2556 /* create bit control struct for PHY */
2261 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); 2557 bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info),
2558 GFP_KERNEL);
2262 if (!bitbang) { 2559 if (!bitbang) {
2263 ret = -ENOMEM; 2560 ret = -ENOMEM;
2264 goto out; 2561 goto out;
@@ -2267,18 +2564,17 @@ static int sh_mdio_init(struct net_device *ndev, int id,
2267 /* bitbang init */ 2564 /* bitbang init */
2268 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; 2565 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2269 bitbang->set_gate = pd->set_mdio_gate; 2566 bitbang->set_gate = pd->set_mdio_gate;
2270 bitbang->mdi_msk = 0x08; 2567 bitbang->mdi_msk = PIR_MDI;
2271 bitbang->mdo_msk = 0x04; 2568 bitbang->mdo_msk = PIR_MDO;
2272 bitbang->mmd_msk = 0x02;/* MMD */ 2569 bitbang->mmd_msk = PIR_MMD;
2273 bitbang->mdc_msk = 0x01; 2570 bitbang->mdc_msk = PIR_MDC;
2274 bitbang->ctrl.ops = &bb_ops; 2571 bitbang->ctrl.ops = &bb_ops;
2275 2572
2276 /* MII controller setting */ 2573 /* MII controller setting */
2277 mdp->bitbang = bitbang;
2278 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 2574 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2279 if (!mdp->mii_bus) { 2575 if (!mdp->mii_bus) {
2280 ret = -ENOMEM; 2576 ret = -ENOMEM;
2281 goto out_free_bitbang; 2577 goto out;
2282 } 2578 }
2283 2579
2284 /* Hook up MII support for ethtool */ 2580 /* Hook up MII support for ethtool */
@@ -2288,7 +2584,9 @@ static int sh_mdio_init(struct net_device *ndev, int id,
2288 mdp->pdev->name, id); 2584 mdp->pdev->name, id);
2289 2585
2290 /* PHY IRQ */ 2586 /* PHY IRQ */
2291 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 2587 mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
2588 sizeof(int) * PHY_MAX_ADDR,
2589 GFP_KERNEL);
2292 if (!mdp->mii_bus->irq) { 2590 if (!mdp->mii_bus->irq) {
2293 ret = -ENOMEM; 2591 ret = -ENOMEM;
2294 goto out_free_bus; 2592 goto out_free_bus;
@@ -2300,21 +2598,15 @@ static int sh_mdio_init(struct net_device *ndev, int id,
2300 /* register mdio bus */ 2598 /* register mdio bus */
2301 ret = mdiobus_register(mdp->mii_bus); 2599 ret = mdiobus_register(mdp->mii_bus);
2302 if (ret) 2600 if (ret)
2303 goto out_free_irq; 2601 goto out_free_bus;
2304 2602
2305 dev_set_drvdata(&ndev->dev, mdp->mii_bus); 2603 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2306 2604
2307 return 0; 2605 return 0;
2308 2606
2309out_free_irq:
2310 kfree(mdp->mii_bus->irq);
2311
2312out_free_bus: 2607out_free_bus:
2313 free_mdio_bitbang(mdp->mii_bus); 2608 free_mdio_bitbang(mdp->mii_bus);
2314 2609
2315out_free_bitbang:
2316 kfree(bitbang);
2317
2318out: 2610out:
2319 return ret; 2611 return ret;
2320} 2612}
@@ -2327,6 +2619,9 @@ static const u16 *sh_eth_get_register_offset(int register_type)
2327 case SH_ETH_REG_GIGABIT: 2619 case SH_ETH_REG_GIGABIT:
2328 reg_offset = sh_eth_offset_gigabit; 2620 reg_offset = sh_eth_offset_gigabit;
2329 break; 2621 break;
2622 case SH_ETH_REG_FAST_RCAR:
2623 reg_offset = sh_eth_offset_fast_rcar;
2624 break;
2330 case SH_ETH_REG_FAST_SH4: 2625 case SH_ETH_REG_FAST_SH4:
2331 reg_offset = sh_eth_offset_fast_sh4; 2626 reg_offset = sh_eth_offset_fast_sh4;
2332 break; 2627 break;
@@ -2334,7 +2629,7 @@ static const u16 *sh_eth_get_register_offset(int register_type)
2334 reg_offset = sh_eth_offset_fast_sh3_sh2; 2629 reg_offset = sh_eth_offset_fast_sh3_sh2;
2335 break; 2630 break;
2336 default: 2631 default:
2337 printk(KERN_ERR "Unknown register type (%d)\n", register_type); 2632 pr_err("Unknown register type (%d)\n", register_type);
2338 break; 2633 break;
2339 } 2634 }
2340 2635
@@ -2364,7 +2659,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2364 struct resource *res; 2659 struct resource *res;
2365 struct net_device *ndev = NULL; 2660 struct net_device *ndev = NULL;
2366 struct sh_eth_private *mdp = NULL; 2661 struct sh_eth_private *mdp = NULL;
2367 struct sh_eth_plat_data *pd; 2662 struct sh_eth_plat_data *pd = pdev->dev.platform_data;
2368 2663
2369 /* get base addr */ 2664 /* get base addr */
2370 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2665 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2402,10 +2697,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2402 mdp = netdev_priv(ndev); 2697 mdp = netdev_priv(ndev);
2403 mdp->num_tx_ring = TX_RING_SIZE; 2698 mdp->num_tx_ring = TX_RING_SIZE;
2404 mdp->num_rx_ring = RX_RING_SIZE; 2699 mdp->num_rx_ring = RX_RING_SIZE;
2405 mdp->addr = ioremap(res->start, resource_size(res)); 2700 mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2406 if (mdp->addr == NULL) { 2701 if (IS_ERR(mdp->addr)) {
2407 ret = -ENOMEM; 2702 ret = PTR_ERR(mdp->addr);
2408 dev_err(&pdev->dev, "ioremap failed.\n");
2409 goto out_release; 2703 goto out_release;
2410 } 2704 }
2411 2705
@@ -2414,7 +2708,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2414 pm_runtime_enable(&pdev->dev); 2708 pm_runtime_enable(&pdev->dev);
2415 pm_runtime_resume(&pdev->dev); 2709 pm_runtime_resume(&pdev->dev);
2416 2710
2417 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
2418 /* get PHY ID */ 2711 /* get PHY ID */
2419 mdp->phy_id = pd->phy; 2712 mdp->phy_id = pd->phy;
2420 mdp->phy_interface = pd->phy_interface; 2713 mdp->phy_interface = pd->phy_interface;
@@ -2442,6 +2735,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2442 2735
2443 /* read and set MAC address */ 2736 /* read and set MAC address */
2444 read_mac_address(ndev, pd->mac_addr); 2737 read_mac_address(ndev, pd->mac_addr);
2738 if (!is_valid_ether_addr(ndev->dev_addr)) {
2739 dev_warn(&pdev->dev,
2740 "no valid MAC address supplied, using a random one.\n");
2741 eth_hw_addr_random(ndev);
2742 }
2445 2743
2446 /* ioremap the TSU registers */ 2744 /* ioremap the TSU registers */
2447 if (mdp->cd->tsu) { 2745 if (mdp->cd->tsu) {
@@ -2452,15 +2750,13 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2452 ret = -ENODEV; 2750 ret = -ENODEV;
2453 goto out_release; 2751 goto out_release;
2454 } 2752 }
2455 mdp->tsu_addr = ioremap(rtsu->start, 2753 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2456 resource_size(rtsu)); 2754 if (IS_ERR(mdp->tsu_addr)) {
2457 if (mdp->tsu_addr == NULL) { 2755 ret = PTR_ERR(mdp->tsu_addr);
2458 ret = -ENOMEM;
2459 dev_err(&pdev->dev, "TSU ioremap failed.\n");
2460 goto out_release; 2756 goto out_release;
2461 } 2757 }
2462 mdp->port = devno % 2; 2758 mdp->port = devno % 2;
2463 ndev->features = NETIF_F_HW_VLAN_FILTER; 2759 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
2464 } 2760 }
2465 2761
2466 /* initialize first or needed device */ 2762 /* initialize first or needed device */
@@ -2497,10 +2793,6 @@ out_unregister:
2497 2793
2498out_release: 2794out_release:
2499 /* net_dev free */ 2795 /* net_dev free */
2500 if (mdp && mdp->addr)
2501 iounmap(mdp->addr);
2502 if (mdp && mdp->tsu_addr)
2503 iounmap(mdp->tsu_addr);
2504 if (ndev) 2796 if (ndev)
2505 free_netdev(ndev); 2797 free_netdev(ndev);
2506 2798
@@ -2511,14 +2803,10 @@ out:
2511static int sh_eth_drv_remove(struct platform_device *pdev) 2803static int sh_eth_drv_remove(struct platform_device *pdev)
2512{ 2804{
2513 struct net_device *ndev = platform_get_drvdata(pdev); 2805 struct net_device *ndev = platform_get_drvdata(pdev);
2514 struct sh_eth_private *mdp = netdev_priv(ndev);
2515 2806
2516 if (mdp->cd->tsu)
2517 iounmap(mdp->tsu_addr);
2518 sh_mdio_release(ndev); 2807 sh_mdio_release(ndev);
2519 unregister_netdev(ndev); 2808 unregister_netdev(ndev);
2520 pm_runtime_disable(&pdev->dev); 2809 pm_runtime_disable(&pdev->dev);
2521 iounmap(mdp->addr);
2522 free_netdev(ndev); 2810 free_netdev(ndev);
2523 platform_set_drvdata(pdev, NULL); 2811 platform_set_drvdata(pdev, NULL);
2524 2812
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 828be4515008..1ddc9f235bcb 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -156,225 +156,6 @@ enum {
156 SH_ETH_MAX_REGISTER_OFFSET, 156 SH_ETH_MAX_REGISTER_OFFSET,
157}; 157};
158 158
159static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
160 [EDSR] = 0x0000,
161 [EDMR] = 0x0400,
162 [EDTRR] = 0x0408,
163 [EDRRR] = 0x0410,
164 [EESR] = 0x0428,
165 [EESIPR] = 0x0430,
166 [TDLAR] = 0x0010,
167 [TDFAR] = 0x0014,
168 [TDFXR] = 0x0018,
169 [TDFFR] = 0x001c,
170 [RDLAR] = 0x0030,
171 [RDFAR] = 0x0034,
172 [RDFXR] = 0x0038,
173 [RDFFR] = 0x003c,
174 [TRSCER] = 0x0438,
175 [RMFCR] = 0x0440,
176 [TFTR] = 0x0448,
177 [FDR] = 0x0450,
178 [RMCR] = 0x0458,
179 [RPADIR] = 0x0460,
180 [FCFTR] = 0x0468,
181 [CSMR] = 0x04E4,
182
183 [ECMR] = 0x0500,
184 [ECSR] = 0x0510,
185 [ECSIPR] = 0x0518,
186 [PIR] = 0x0520,
187 [PSR] = 0x0528,
188 [PIPR] = 0x052c,
189 [RFLR] = 0x0508,
190 [APR] = 0x0554,
191 [MPR] = 0x0558,
192 [PFTCR] = 0x055c,
193 [PFRCR] = 0x0560,
194 [TPAUSER] = 0x0564,
195 [GECMR] = 0x05b0,
196 [BCULR] = 0x05b4,
197 [MAHR] = 0x05c0,
198 [MALR] = 0x05c8,
199 [TROCR] = 0x0700,
200 [CDCR] = 0x0708,
201 [LCCR] = 0x0710,
202 [CEFCR] = 0x0740,
203 [FRECR] = 0x0748,
204 [TSFRCR] = 0x0750,
205 [TLFRCR] = 0x0758,
206 [RFCR] = 0x0760,
207 [CERCR] = 0x0768,
208 [CEECR] = 0x0770,
209 [MAFCR] = 0x0778,
210 [RMII_MII] = 0x0790,
211
212 [ARSTR] = 0x0000,
213 [TSU_CTRST] = 0x0004,
214 [TSU_FWEN0] = 0x0010,
215 [TSU_FWEN1] = 0x0014,
216 [TSU_FCM] = 0x0018,
217 [TSU_BSYSL0] = 0x0020,
218 [TSU_BSYSL1] = 0x0024,
219 [TSU_PRISL0] = 0x0028,
220 [TSU_PRISL1] = 0x002c,
221 [TSU_FWSL0] = 0x0030,
222 [TSU_FWSL1] = 0x0034,
223 [TSU_FWSLC] = 0x0038,
224 [TSU_QTAG0] = 0x0040,
225 [TSU_QTAG1] = 0x0044,
226 [TSU_FWSR] = 0x0050,
227 [TSU_FWINMK] = 0x0054,
228 [TSU_ADQT0] = 0x0048,
229 [TSU_ADQT1] = 0x004c,
230 [TSU_VTAG0] = 0x0058,
231 [TSU_VTAG1] = 0x005c,
232 [TSU_ADSBSY] = 0x0060,
233 [TSU_TEN] = 0x0064,
234 [TSU_POST1] = 0x0070,
235 [TSU_POST2] = 0x0074,
236 [TSU_POST3] = 0x0078,
237 [TSU_POST4] = 0x007c,
238 [TSU_ADRH0] = 0x0100,
239 [TSU_ADRL0] = 0x0104,
240 [TSU_ADRH31] = 0x01f8,
241 [TSU_ADRL31] = 0x01fc,
242
243 [TXNLCR0] = 0x0080,
244 [TXALCR0] = 0x0084,
245 [RXNLCR0] = 0x0088,
246 [RXALCR0] = 0x008c,
247 [FWNLCR0] = 0x0090,
248 [FWALCR0] = 0x0094,
249 [TXNLCR1] = 0x00a0,
250 [TXALCR1] = 0x00a0,
251 [RXNLCR1] = 0x00a8,
252 [RXALCR1] = 0x00ac,
253 [FWNLCR1] = 0x00b0,
254 [FWALCR1] = 0x00b4,
255};
256
257static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
258 [ECMR] = 0x0100,
259 [RFLR] = 0x0108,
260 [ECSR] = 0x0110,
261 [ECSIPR] = 0x0118,
262 [PIR] = 0x0120,
263 [PSR] = 0x0128,
264 [RDMLR] = 0x0140,
265 [IPGR] = 0x0150,
266 [APR] = 0x0154,
267 [MPR] = 0x0158,
268 [TPAUSER] = 0x0164,
269 [RFCF] = 0x0160,
270 [TPAUSECR] = 0x0168,
271 [BCFRR] = 0x016c,
272 [MAHR] = 0x01c0,
273 [MALR] = 0x01c8,
274 [TROCR] = 0x01d0,
275 [CDCR] = 0x01d4,
276 [LCCR] = 0x01d8,
277 [CNDCR] = 0x01dc,
278 [CEFCR] = 0x01e4,
279 [FRECR] = 0x01e8,
280 [TSFRCR] = 0x01ec,
281 [TLFRCR] = 0x01f0,
282 [RFCR] = 0x01f4,
283 [MAFCR] = 0x01f8,
284 [RTRATE] = 0x01fc,
285
286 [EDMR] = 0x0000,
287 [EDTRR] = 0x0008,
288 [EDRRR] = 0x0010,
289 [TDLAR] = 0x0018,
290 [RDLAR] = 0x0020,
291 [EESR] = 0x0028,
292 [EESIPR] = 0x0030,
293 [TRSCER] = 0x0038,
294 [RMFCR] = 0x0040,
295 [TFTR] = 0x0048,
296 [FDR] = 0x0050,
297 [RMCR] = 0x0058,
298 [TFUCR] = 0x0064,
299 [RFOCR] = 0x0068,
300 [FCFTR] = 0x0070,
301 [RPADIR] = 0x0078,
302 [TRIMD] = 0x007c,
303 [RBWAR] = 0x00c8,
304 [RDFAR] = 0x00cc,
305 [TBRAR] = 0x00d4,
306 [TDFAR] = 0x00d8,
307};
308
309static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
310 [ECMR] = 0x0160,
311 [ECSR] = 0x0164,
312 [ECSIPR] = 0x0168,
313 [PIR] = 0x016c,
314 [MAHR] = 0x0170,
315 [MALR] = 0x0174,
316 [RFLR] = 0x0178,
317 [PSR] = 0x017c,
318 [TROCR] = 0x0180,
319 [CDCR] = 0x0184,
320 [LCCR] = 0x0188,
321 [CNDCR] = 0x018c,
322 [CEFCR] = 0x0194,
323 [FRECR] = 0x0198,
324 [TSFRCR] = 0x019c,
325 [TLFRCR] = 0x01a0,
326 [RFCR] = 0x01a4,
327 [MAFCR] = 0x01a8,
328 [IPGR] = 0x01b4,
329 [APR] = 0x01b8,
330 [MPR] = 0x01bc,
331 [TPAUSER] = 0x01c4,
332 [BCFR] = 0x01cc,
333
334 [ARSTR] = 0x0000,
335 [TSU_CTRST] = 0x0004,
336 [TSU_FWEN0] = 0x0010,
337 [TSU_FWEN1] = 0x0014,
338 [TSU_FCM] = 0x0018,
339 [TSU_BSYSL0] = 0x0020,
340 [TSU_BSYSL1] = 0x0024,
341 [TSU_PRISL0] = 0x0028,
342 [TSU_PRISL1] = 0x002c,
343 [TSU_FWSL0] = 0x0030,
344 [TSU_FWSL1] = 0x0034,
345 [TSU_FWSLC] = 0x0038,
346 [TSU_QTAGM0] = 0x0040,
347 [TSU_QTAGM1] = 0x0044,
348 [TSU_ADQT0] = 0x0048,
349 [TSU_ADQT1] = 0x004c,
350 [TSU_FWSR] = 0x0050,
351 [TSU_FWINMK] = 0x0054,
352 [TSU_ADSBSY] = 0x0060,
353 [TSU_TEN] = 0x0064,
354 [TSU_POST1] = 0x0070,
355 [TSU_POST2] = 0x0074,
356 [TSU_POST3] = 0x0078,
357 [TSU_POST4] = 0x007c,
358
359 [TXNLCR0] = 0x0080,
360 [TXALCR0] = 0x0084,
361 [RXNLCR0] = 0x0088,
362 [RXALCR0] = 0x008c,
363 [FWNLCR0] = 0x0090,
364 [FWALCR0] = 0x0094,
365 [TXNLCR1] = 0x00a0,
366 [TXALCR1] = 0x00a0,
367 [RXNLCR1] = 0x00a8,
368 [RXALCR1] = 0x00ac,
369 [FWNLCR1] = 0x00b0,
370 [FWALCR1] = 0x00b4,
371
372 [TSU_ADRH0] = 0x0100,
373 [TSU_ADRL0] = 0x0104,
374 [TSU_ADRL31] = 0x01fc,
375
376};
377
378/* Driver's parameters */ 159/* Driver's parameters */
379#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 160#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
380#define SH4_SKB_RX_ALIGN 32 161#define SH4_SKB_RX_ALIGN 32
@@ -705,7 +486,6 @@ struct sh_eth_private {
705 const u16 *reg_offset; 486 const u16 *reg_offset;
706 void __iomem *addr; 487 void __iomem *addr;
707 void __iomem *tsu_addr; 488 void __iomem *tsu_addr;
708 struct bb_info *bitbang;
709 u32 num_rx_ring; 489 u32 num_rx_ring;
710 u32 num_tx_ring; 490 u32 num_tx_ring;
711 dma_addr_t rx_desc_dma; 491 dma_addr_t rx_desc_dma;
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 21683e2b1ff4..b6739afeaca1 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -998,6 +998,7 @@ static int s6gmac_probe(struct platform_device *pdev)
998 mb = mdiobus_alloc(); 998 mb = mdiobus_alloc();
999 if (!mb) { 999 if (!mb) {
1000 printk(KERN_ERR DRV_PRMT "error allocating mii bus\n"); 1000 printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
1001 res = -ENOMEM;
1001 goto errmii; 1002 goto errmii;
1002 } 1003 }
1003 mb->name = "s6gmac_mii"; 1004 mb->name = "s6gmac_mii";
@@ -1053,20 +1054,7 @@ static struct platform_driver s6gmac_driver = {
1053 }, 1054 },
1054}; 1055};
1055 1056
1056static int __init s6gmac_init(void) 1057module_platform_driver(s6gmac_driver);
1057{
1058 printk(KERN_INFO DRV_PRMT "S6 GMAC ethernet driver\n");
1059 return platform_driver_register(&s6gmac_driver);
1060}
1061
1062
1063static void __exit s6gmac_exit(void)
1064{
1065 platform_driver_unregister(&s6gmac_driver);
1066}
1067
1068module_init(s6gmac_init);
1069module_exit(s6gmac_exit);
1070 1058
1071MODULE_LICENSE("GPL"); 1059MODULE_LICENSE("GPL");
1072MODULE_DESCRIPTION("S6105 on chip Ethernet driver"); 1060MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 3aca57853ed4..bdac936a68bc 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -651,8 +651,11 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
651 skb->protocol = eth_type_trans(skb, dev); 651 skb->protocol = eth_type_trans(skb, dev);
652 netif_rx(skb); 652 netif_rx(skb);
653 received ++; 653 received ++;
654 } else 654 } else {
655 goto dropping; 655 ether3_outw(next_ptr >> 8, REG_RECVEND);
656 dev->stats.rx_dropped++;
657 goto done;
658 }
656 } else { 659 } else {
657 struct net_device_stats *stats = &dev->stats; 660 struct net_device_stats *stats = &dev->stats;
658 ether3_outw(next_ptr >> 8, REG_RECVEND); 661 ether3_outw(next_ptr >> 8, REG_RECVEND);
@@ -679,21 +682,6 @@ done:
679 } 682 }
680 683
681 return maxcnt; 684 return maxcnt;
682
683dropping:{
684 static unsigned long last_warned;
685
686 ether3_outw(next_ptr >> 8, REG_RECVEND);
687 /*
688 * Don't print this message too many times...
689 */
690 if (time_after(jiffies, last_warned + 10 * HZ)) {
691 last_warned = jiffies;
692 printk("%s: memory squeeze, dropping packet.\n", dev->name);
693 }
694 dev->stats.rx_dropped++;
695 goto done;
696 }
697} 685}
698 686
699/* 687/*
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 0fde9ca28269..0ad5694b41f8 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -381,8 +381,6 @@ memory_squeeze:
381 dev->stats.rx_packets++; 381 dev->stats.rx_packets++;
382 dev->stats.rx_bytes += len; 382 dev->stats.rx_bytes += len;
383 } else { 383 } else {
384 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n",
385 dev->name);
386 dev->stats.rx_dropped++; 384 dev->stats.rx_dropped++;
387 } 385 }
388 } else { 386 } else {
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0bc00991d310..01b99206139a 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -22,6 +22,7 @@
22#include <linux/topology.h> 22#include <linux/topology.h>
23#include <linux/gfp.h> 23#include <linux/gfp.h>
24#include <linux/cpu_rmap.h> 24#include <linux/cpu_rmap.h>
25#include <linux/aer.h>
25#include "net_driver.h" 26#include "net_driver.h"
26#include "efx.h" 27#include "efx.h"
27#include "nic.h" 28#include "nic.h"
@@ -71,21 +72,21 @@ const char *const efx_loopback_mode_names[] = {
71 72
72const unsigned int efx_reset_type_max = RESET_TYPE_MAX; 73const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
73const char *const efx_reset_type_names[] = { 74const char *const efx_reset_type_names[] = {
74 [RESET_TYPE_INVISIBLE] = "INVISIBLE", 75 [RESET_TYPE_INVISIBLE] = "INVISIBLE",
75 [RESET_TYPE_ALL] = "ALL", 76 [RESET_TYPE_ALL] = "ALL",
76 [RESET_TYPE_WORLD] = "WORLD", 77 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
77 [RESET_TYPE_DISABLE] = "DISABLE", 78 [RESET_TYPE_WORLD] = "WORLD",
78 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", 79 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
79 [RESET_TYPE_INT_ERROR] = "INT_ERROR", 80 [RESET_TYPE_DISABLE] = "DISABLE",
80 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", 81 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
81 [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", 82 [RESET_TYPE_INT_ERROR] = "INT_ERROR",
82 [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", 83 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
83 [RESET_TYPE_TX_SKIP] = "TX_SKIP", 84 [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
84 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", 85 [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
86 [RESET_TYPE_TX_SKIP] = "TX_SKIP",
87 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
85}; 88};
86 89
87#define EFX_MAX_MTU (9 * 1024)
88
89/* Reset workqueue. If any NIC has a hardware failure then a reset will be 90/* Reset workqueue. If any NIC has a hardware failure then a reset will be
90 * queued onto this work queue. This is not a per-nic work queue, because 91 * queued onto this work queue. This is not a per-nic work queue, because
91 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. 92 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -117,9 +118,12 @@ MODULE_PARM_DESC(separate_tx_channels,
117static int napi_weight = 64; 118static int napi_weight = 64;
118 119
119/* This is the time (in jiffies) between invocations of the hardware 120/* This is the time (in jiffies) between invocations of the hardware
120 * monitor. On Falcon-based NICs, this will: 121 * monitor.
122 * On Falcon-based NICs, this will:
121 * - Check the on-board hardware monitor; 123 * - Check the on-board hardware monitor;
122 * - Poll the link state and reconfigure the hardware as necessary. 124 * - Poll the link state and reconfigure the hardware as necessary.
125 * On Siena-based NICs for power systems with EEH support, this will give EEH a
126 * chance to start.
123 */ 127 */
124static unsigned int efx_monitor_interval = 1 * HZ; 128static unsigned int efx_monitor_interval = 1 * HZ;
125 129
@@ -203,13 +207,14 @@ static void efx_stop_all(struct efx_nic *efx);
203#define EFX_ASSERT_RESET_SERIALISED(efx) \ 207#define EFX_ASSERT_RESET_SERIALISED(efx) \
204 do { \ 208 do { \
205 if ((efx->state == STATE_READY) || \ 209 if ((efx->state == STATE_READY) || \
210 (efx->state == STATE_RECOVERY) || \
206 (efx->state == STATE_DISABLED)) \ 211 (efx->state == STATE_DISABLED)) \
207 ASSERT_RTNL(); \ 212 ASSERT_RTNL(); \
208 } while (0) 213 } while (0)
209 214
210static int efx_check_disabled(struct efx_nic *efx) 215static int efx_check_disabled(struct efx_nic *efx)
211{ 216{
212 if (efx->state == STATE_DISABLED) { 217 if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
213 netif_err(efx, drv, efx->net_dev, 218 netif_err(efx, drv, efx->net_dev,
214 "device is disabled due to earlier errors\n"); 219 "device is disabled due to earlier errors\n");
215 return -EIO; 220 return -EIO;
@@ -242,15 +247,9 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
242 struct efx_rx_queue *rx_queue = 247 struct efx_rx_queue *rx_queue =
243 efx_channel_get_rx_queue(channel); 248 efx_channel_get_rx_queue(channel);
244 249
245 /* Deliver last RX packet. */ 250 efx_rx_flush_packet(channel);
246 if (channel->rx_pkt) { 251 if (rx_queue->enabled)
247 __efx_rx_packet(channel, channel->rx_pkt);
248 channel->rx_pkt = NULL;
249 }
250 if (rx_queue->enabled) {
251 efx_rx_strategy(channel);
252 efx_fast_push_rx_descriptors(rx_queue); 252 efx_fast_push_rx_descriptors(rx_queue);
253 }
254 } 253 }
255 254
256 return spent; 255 return spent;
@@ -625,20 +624,51 @@ fail:
625 */ 624 */
626static void efx_start_datapath(struct efx_nic *efx) 625static void efx_start_datapath(struct efx_nic *efx)
627{ 626{
627 bool old_rx_scatter = efx->rx_scatter;
628 struct efx_tx_queue *tx_queue; 628 struct efx_tx_queue *tx_queue;
629 struct efx_rx_queue *rx_queue; 629 struct efx_rx_queue *rx_queue;
630 struct efx_channel *channel; 630 struct efx_channel *channel;
631 size_t rx_buf_len;
631 632
632 /* Calculate the rx buffer allocation parameters required to 633 /* Calculate the rx buffer allocation parameters required to
633 * support the current MTU, including padding for header 634 * support the current MTU, including padding for header
634 * alignment and overruns. 635 * alignment and overruns.
635 */ 636 */
636 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + 637 efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
637 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 638 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
638 efx->type->rx_buffer_hash_size + 639 efx->type->rx_buffer_padding);
639 efx->type->rx_buffer_padding); 640 rx_buf_len = (sizeof(struct efx_rx_page_state) +
640 efx->rx_buffer_order = get_order(efx->rx_buffer_len + 641 EFX_PAGE_IP_ALIGN + efx->rx_dma_len);
641 sizeof(struct efx_rx_page_state)); 642 if (rx_buf_len <= PAGE_SIZE) {
643 efx->rx_scatter = false;
644 efx->rx_buffer_order = 0;
645 } else if (efx->type->can_rx_scatter) {
646 BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
647 EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE >
648 PAGE_SIZE / 2);
649 efx->rx_scatter = true;
650 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
651 efx->rx_buffer_order = 0;
652 } else {
653 efx->rx_scatter = false;
654 efx->rx_buffer_order = get_order(rx_buf_len);
655 }
656
657 efx_rx_config_page_split(efx);
658 if (efx->rx_buffer_order)
659 netif_dbg(efx, drv, efx->net_dev,
660 "RX buf len=%u; page order=%u batch=%u\n",
661 efx->rx_dma_len, efx->rx_buffer_order,
662 efx->rx_pages_per_batch);
663 else
664 netif_dbg(efx, drv, efx->net_dev,
665 "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
666 efx->rx_dma_len, efx->rx_page_buf_step,
667 efx->rx_bufs_per_page, efx->rx_pages_per_batch);
668
669 /* RX filters also have scatter-enabled flags */
670 if (efx->rx_scatter != old_rx_scatter)
671 efx_filter_update_rx_scatter(efx);
642 672
643 /* We must keep at least one descriptor in a TX ring empty. 673 /* We must keep at least one descriptor in a TX ring empty.
644 * We could avoid this when the queue size does not exactly 674 * We could avoid this when the queue size does not exactly
@@ -655,16 +685,12 @@ static void efx_start_datapath(struct efx_nic *efx)
655 efx_for_each_channel_tx_queue(tx_queue, channel) 685 efx_for_each_channel_tx_queue(tx_queue, channel)
656 efx_init_tx_queue(tx_queue); 686 efx_init_tx_queue(tx_queue);
657 687
658 /* The rx buffer allocation strategy is MTU dependent */
659 efx_rx_strategy(channel);
660
661 efx_for_each_channel_rx_queue(rx_queue, channel) { 688 efx_for_each_channel_rx_queue(rx_queue, channel) {
662 efx_init_rx_queue(rx_queue); 689 efx_init_rx_queue(rx_queue);
663 efx_nic_generate_fill_event(rx_queue); 690 efx_nic_generate_fill_event(rx_queue);
664 } 691 }
665 692
666 WARN_ON(channel->rx_pkt != NULL); 693 WARN_ON(channel->rx_pkt_n_frags);
667 efx_rx_strategy(channel);
668 } 694 }
669 695
670 if (netif_device_present(efx->net_dev)) 696 if (netif_device_present(efx->net_dev))
@@ -683,7 +709,7 @@ static void efx_stop_datapath(struct efx_nic *efx)
683 BUG_ON(efx->port_enabled); 709 BUG_ON(efx->port_enabled);
684 710
685 /* Only perform flush if dma is enabled */ 711 /* Only perform flush if dma is enabled */
686 if (dev->is_busmaster) { 712 if (dev->is_busmaster && efx->state != STATE_RECOVERY) {
687 rc = efx_nic_flush_queues(efx); 713 rc = efx_nic_flush_queues(efx);
688 714
689 if (rc && EFX_WORKAROUND_7803(efx)) { 715 if (rc && EFX_WORKAROUND_7803(efx)) {
@@ -1596,13 +1622,15 @@ static void efx_start_all(struct efx_nic *efx)
1596 efx_start_port(efx); 1622 efx_start_port(efx);
1597 efx_start_datapath(efx); 1623 efx_start_datapath(efx);
1598 1624
1599 /* Start the hardware monitor if there is one. Otherwise (we're link 1625 /* Start the hardware monitor if there is one */
1600 * event driven), we have to poll the PHY because after an event queue 1626 if (efx->type->monitor != NULL)
1601 * flush, we could have a missed a link state change */
1602 if (efx->type->monitor != NULL) {
1603 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1627 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1604 efx_monitor_interval); 1628 efx_monitor_interval);
1605 } else { 1629
1630 /* If link state detection is normally event-driven, we have
1631 * to poll now because we could have missed a change
1632 */
1633 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1606 mutex_lock(&efx->mac_lock); 1634 mutex_lock(&efx->mac_lock);
1607 if (efx->phy_op->poll(efx)) 1635 if (efx->phy_op->poll(efx))
1608 efx_link_status_changed(efx); 1636 efx_link_status_changed(efx);
@@ -2309,7 +2337,9 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
2309 2337
2310out: 2338out:
2311 /* Leave device stopped if necessary */ 2339 /* Leave device stopped if necessary */
2312 disabled = rc || method == RESET_TYPE_DISABLE; 2340 disabled = rc ||
2341 method == RESET_TYPE_DISABLE ||
2342 method == RESET_TYPE_RECOVER_OR_DISABLE;
2313 rc2 = efx_reset_up(efx, method, !disabled); 2343 rc2 = efx_reset_up(efx, method, !disabled);
2314 if (rc2) { 2344 if (rc2) {
2315 disabled = true; 2345 disabled = true;
@@ -2328,13 +2358,48 @@ out:
2328 return rc; 2358 return rc;
2329} 2359}
2330 2360
2361/* Try recovery mechanisms.
2362 * For now only EEH is supported.
2363 * Returns 0 if the recovery mechanisms are unsuccessful.
2364 * Returns a non-zero value otherwise.
2365 */
2366static int efx_try_recovery(struct efx_nic *efx)
2367{
2368#ifdef CONFIG_EEH
2369 /* A PCI error can occur and not be seen by EEH because nothing
2370 * happens on the PCI bus. In this case the driver may fail and
2371 * schedule a 'recover or reset', leading to this recovery handler.
2372 * Manually call the eeh failure check function.
2373 */
2374 struct eeh_dev *eehdev =
2375 of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
2376
2377 if (eeh_dev_check_failure(eehdev)) {
2378 /* The EEH mechanisms will handle the error and reset the
2379 * device if necessary.
2380 */
2381 return 1;
2382 }
2383#endif
2384 return 0;
2385}
2386
2331/* The worker thread exists so that code that cannot sleep can 2387/* The worker thread exists so that code that cannot sleep can
2332 * schedule a reset for later. 2388 * schedule a reset for later.
2333 */ 2389 */
2334static void efx_reset_work(struct work_struct *data) 2390static void efx_reset_work(struct work_struct *data)
2335{ 2391{
2336 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); 2392 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
2337 unsigned long pending = ACCESS_ONCE(efx->reset_pending); 2393 unsigned long pending;
2394 enum reset_type method;
2395
2396 pending = ACCESS_ONCE(efx->reset_pending);
2397 method = fls(pending) - 1;
2398
2399 if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2400 method == RESET_TYPE_RECOVER_OR_ALL) &&
2401 efx_try_recovery(efx))
2402 return;
2338 2403
2339 if (!pending) 2404 if (!pending)
2340 return; 2405 return;
@@ -2346,7 +2411,7 @@ static void efx_reset_work(struct work_struct *data)
2346 * it cannot change again. 2411 * it cannot change again.
2347 */ 2412 */
2348 if (efx->state == STATE_READY) 2413 if (efx->state == STATE_READY)
2349 (void)efx_reset(efx, fls(pending) - 1); 2414 (void)efx_reset(efx, method);
2350 2415
2351 rtnl_unlock(); 2416 rtnl_unlock();
2352} 2417}
@@ -2355,11 +2420,20 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2355{ 2420{
2356 enum reset_type method; 2421 enum reset_type method;
2357 2422
2423 if (efx->state == STATE_RECOVERY) {
2424 netif_dbg(efx, drv, efx->net_dev,
2425 "recovering: skip scheduling %s reset\n",
2426 RESET_TYPE(type));
2427 return;
2428 }
2429
2358 switch (type) { 2430 switch (type) {
2359 case RESET_TYPE_INVISIBLE: 2431 case RESET_TYPE_INVISIBLE:
2360 case RESET_TYPE_ALL: 2432 case RESET_TYPE_ALL:
2433 case RESET_TYPE_RECOVER_OR_ALL:
2361 case RESET_TYPE_WORLD: 2434 case RESET_TYPE_WORLD:
2362 case RESET_TYPE_DISABLE: 2435 case RESET_TYPE_DISABLE:
2436 case RESET_TYPE_RECOVER_OR_DISABLE:
2363 method = type; 2437 method = type;
2364 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", 2438 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2365 RESET_TYPE(method)); 2439 RESET_TYPE(method));
@@ -2569,6 +2643,8 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2569 efx_fini_struct(efx); 2643 efx_fini_struct(efx);
2570 pci_set_drvdata(pci_dev, NULL); 2644 pci_set_drvdata(pci_dev, NULL);
2571 free_netdev(efx->net_dev); 2645 free_netdev(efx->net_dev);
2646
2647 pci_disable_pcie_error_reporting(pci_dev);
2572}; 2648};
2573 2649
2574/* NIC VPD information 2650/* NIC VPD information
@@ -2741,6 +2817,11 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
2741 netif_warn(efx, probe, efx->net_dev, 2817 netif_warn(efx, probe, efx->net_dev,
2742 "failed to create MTDs (%d)\n", rc); 2818 "failed to create MTDs (%d)\n", rc);
2743 2819
2820 rc = pci_enable_pcie_error_reporting(pci_dev);
2821 if (rc && rc != -EINVAL)
2822 netif_warn(efx, probe, efx->net_dev,
2823 "pci_enable_pcie_error_reporting failed (%d)\n", rc);
2824
2744 return 0; 2825 return 0;
2745 2826
2746 fail4: 2827 fail4:
@@ -2865,12 +2946,112 @@ static const struct dev_pm_ops efx_pm_ops = {
2865 .restore = efx_pm_resume, 2946 .restore = efx_pm_resume,
2866}; 2947};
2867 2948
2949/* A PCI error affecting this device was detected.
2950 * At this point MMIO and DMA may be disabled.
2951 * Stop the software path and request a slot reset.
2952 */
2953static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
2954 enum pci_channel_state state)
2955{
2956 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
2957 struct efx_nic *efx = pci_get_drvdata(pdev);
2958
2959 if (state == pci_channel_io_perm_failure)
2960 return PCI_ERS_RESULT_DISCONNECT;
2961
2962 rtnl_lock();
2963
2964 if (efx->state != STATE_DISABLED) {
2965 efx->state = STATE_RECOVERY;
2966 efx->reset_pending = 0;
2967
2968 efx_device_detach_sync(efx);
2969
2970 efx_stop_all(efx);
2971 efx_stop_interrupts(efx, false);
2972
2973 status = PCI_ERS_RESULT_NEED_RESET;
2974 } else {
2975 /* If the interface is disabled we don't want to do anything
2976 * with it.
2977 */
2978 status = PCI_ERS_RESULT_RECOVERED;
2979 }
2980
2981 rtnl_unlock();
2982
2983 pci_disable_device(pdev);
2984
2985 return status;
2986}
2987
2988/* Fake a successfull reset, which will be performed later in efx_io_resume. */
2989static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
2990{
2991 struct efx_nic *efx = pci_get_drvdata(pdev);
2992 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
2993 int rc;
2994
2995 if (pci_enable_device(pdev)) {
2996 netif_err(efx, hw, efx->net_dev,
2997 "Cannot re-enable PCI device after reset.\n");
2998 status = PCI_ERS_RESULT_DISCONNECT;
2999 }
3000
3001 rc = pci_cleanup_aer_uncorrect_error_status(pdev);
3002 if (rc) {
3003 netif_err(efx, hw, efx->net_dev,
3004 "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
3005 /* Non-fatal error. Continue. */
3006 }
3007
3008 return status;
3009}
3010
3011/* Perform the actual reset and resume I/O operations. */
3012static void efx_io_resume(struct pci_dev *pdev)
3013{
3014 struct efx_nic *efx = pci_get_drvdata(pdev);
3015 int rc;
3016
3017 rtnl_lock();
3018
3019 if (efx->state == STATE_DISABLED)
3020 goto out;
3021
3022 rc = efx_reset(efx, RESET_TYPE_ALL);
3023 if (rc) {
3024 netif_err(efx, hw, efx->net_dev,
3025 "efx_reset failed after PCI error (%d)\n", rc);
3026 } else {
3027 efx->state = STATE_READY;
3028 netif_dbg(efx, hw, efx->net_dev,
3029 "Done resetting and resuming IO after PCI error.\n");
3030 }
3031
3032out:
3033 rtnl_unlock();
3034}
3035
3036/* For simplicity and reliability, we always require a slot reset and try to
3037 * reset the hardware when a pci error affecting the device is detected.
3038 * We leave both the link_reset and mmio_enabled callback unimplemented:
3039 * with our request for slot reset the mmio_enabled callback will never be
3040 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3041 */
3042static struct pci_error_handlers efx_err_handlers = {
3043 .error_detected = efx_io_error_detected,
3044 .slot_reset = efx_io_slot_reset,
3045 .resume = efx_io_resume,
3046};
3047
2868static struct pci_driver efx_pci_driver = { 3048static struct pci_driver efx_pci_driver = {
2869 .name = KBUILD_MODNAME, 3049 .name = KBUILD_MODNAME,
2870 .id_table = efx_pci_table, 3050 .id_table = efx_pci_table,
2871 .probe = efx_pci_probe, 3051 .probe = efx_pci_probe,
2872 .remove = efx_pci_remove, 3052 .remove = efx_pci_remove,
2873 .driver.pm = &efx_pm_ops, 3053 .driver.pm = &efx_pm_ops,
3054 .err_handler = &efx_err_handlers,
2874}; 3055};
2875 3056
2876/************************************************************************** 3057/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index d2f790df6dcb..8372da239b43 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -33,17 +33,22 @@ extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
33extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); 33extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
34 34
35/* RX */ 35/* RX */
36extern void efx_rx_config_page_split(struct efx_nic *efx);
36extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 37extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
37extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); 38extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
38extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); 39extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
39extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); 40extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
40extern void efx_rx_strategy(struct efx_channel *channel);
41extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); 41extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
42extern void efx_rx_slow_fill(unsigned long context); 42extern void efx_rx_slow_fill(unsigned long context);
43extern void __efx_rx_packet(struct efx_channel *channel, 43extern void __efx_rx_packet(struct efx_channel *channel);
44 struct efx_rx_buffer *rx_buf); 44extern void efx_rx_packet(struct efx_rx_queue *rx_queue,
45extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 45 unsigned int index, unsigned int n_frags,
46 unsigned int len, u16 flags); 46 unsigned int len, u16 flags);
47static inline void efx_rx_flush_packet(struct efx_channel *channel)
48{
49 if (channel->rx_pkt_n_frags)
50 __efx_rx_packet(channel);
51}
47extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); 52extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
48 53
49#define EFX_MAX_DMAQ_SIZE 4096UL 54#define EFX_MAX_DMAQ_SIZE 4096UL
@@ -67,6 +72,7 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
67extern int efx_probe_filters(struct efx_nic *efx); 72extern int efx_probe_filters(struct efx_nic *efx);
68extern void efx_restore_filters(struct efx_nic *efx); 73extern void efx_restore_filters(struct efx_nic *efx);
69extern void efx_remove_filters(struct efx_nic *efx); 74extern void efx_remove_filters(struct efx_nic *efx);
75extern void efx_filter_update_rx_scatter(struct efx_nic *efx);
70extern s32 efx_filter_insert_filter(struct efx_nic *efx, 76extern s32 efx_filter_insert_filter(struct efx_nic *efx,
71 struct efx_filter_spec *spec, 77 struct efx_filter_spec *spec,
72 bool replace); 78 bool replace);
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 182dbe2cc6e4..ab8fb5889e55 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -137,8 +137,12 @@ enum efx_loopback_mode {
137 * Reset methods are numbered in order of increasing scope. 137 * Reset methods are numbered in order of increasing scope.
138 * 138 *
139 * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only) 139 * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
140 * @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL
141 * if unsuccessful.
140 * @RESET_TYPE_ALL: Reset datapath, MAC and PHY 142 * @RESET_TYPE_ALL: Reset datapath, MAC and PHY
141 * @RESET_TYPE_WORLD: Reset as much as possible 143 * @RESET_TYPE_WORLD: Reset as much as possible
144 * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
145 * unsuccessful.
142 * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled 146 * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
143 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog 147 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
144 * @RESET_TYPE_INT_ERROR: reset due to internal error 148 * @RESET_TYPE_INT_ERROR: reset due to internal error
@@ -150,9 +154,11 @@ enum efx_loopback_mode {
150 */ 154 */
151enum reset_type { 155enum reset_type {
152 RESET_TYPE_INVISIBLE = 0, 156 RESET_TYPE_INVISIBLE = 0,
153 RESET_TYPE_ALL = 1, 157 RESET_TYPE_RECOVER_OR_ALL = 1,
154 RESET_TYPE_WORLD = 2, 158 RESET_TYPE_ALL = 2,
155 RESET_TYPE_DISABLE = 3, 159 RESET_TYPE_WORLD = 3,
160 RESET_TYPE_RECOVER_OR_DISABLE = 4,
161 RESET_TYPE_DISABLE = 5,
156 RESET_TYPE_MAX_METHOD, 162 RESET_TYPE_MAX_METHOD,
157 RESET_TYPE_TX_WATCHDOG, 163 RESET_TYPE_TX_WATCHDOG,
158 RESET_TYPE_INT_ERROR, 164 RESET_TYPE_INT_ERROR,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 8e61cd06f66a..6e768175e7e0 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -154,6 +154,7 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = {
154 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), 154 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
155 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), 155 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
156 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), 156 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
157 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
157}; 158};
158 159
159/* Number of ethtool statistics */ 160/* Number of ethtool statistics */
@@ -978,7 +979,8 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
978 rule->m_ext.data[1])) 979 rule->m_ext.data[1]))
979 return -EINVAL; 980 return -EINVAL;
980 981
981 efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 0, 982 efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
983 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
982 (rule->ring_cookie == RX_CLS_FLOW_DISC) ? 984 (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
983 0xfff : rule->ring_cookie); 985 0xfff : rule->ring_cookie);
984 986
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 49bcd196e10d..4486102fa9b3 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1546,10 +1546,6 @@ static int falcon_probe_nic(struct efx_nic *efx)
1546 1546
1547static void falcon_init_rx_cfg(struct efx_nic *efx) 1547static void falcon_init_rx_cfg(struct efx_nic *efx)
1548{ 1548{
1549 /* Prior to Siena the RX DMA engine will split each frame at
1550 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
1551 * be so large that that never happens. */
1552 const unsigned huge_buf_size = (3 * 4096) >> 5;
1553 /* RX control FIFO thresholds (32 entries) */ 1549 /* RX control FIFO thresholds (32 entries) */
1554 const unsigned ctrl_xon_thr = 20; 1550 const unsigned ctrl_xon_thr = 20;
1555 const unsigned ctrl_xoff_thr = 25; 1551 const unsigned ctrl_xoff_thr = 25;
@@ -1557,10 +1553,15 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
1557 1553
1558 efx_reado(efx, &reg, FR_AZ_RX_CFG); 1554 efx_reado(efx, &reg, FR_AZ_RX_CFG);
1559 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { 1555 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
1560 /* Data FIFO size is 5.5K */ 1556 /* Data FIFO size is 5.5K. The RX DMA engine only
1557 * supports scattering for user-mode queues, but will
1558 * split DMA writes at intervals of RX_USR_BUF_SIZE
1559 * (32-byte units) even for kernel-mode queues. We
1560 * set it to be so large that that never happens.
1561 */
1561 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); 1562 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
1562 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, 1563 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
1563 huge_buf_size); 1564 (3 * 4096) >> 5);
1564 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8); 1565 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
1565 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8); 1566 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
1566 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); 1567 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
@@ -1569,7 +1570,7 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
1569 /* Data FIFO size is 80K; register fields moved */ 1570 /* Data FIFO size is 80K; register fields moved */
1570 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); 1571 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
1571 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, 1572 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
1572 huge_buf_size); 1573 EFX_RX_USR_BUF_SIZE >> 5);
1573 /* Send XON and XOFF at ~3 * max MTU away from empty/full */ 1574 /* Send XON and XOFF at ~3 * max MTU away from empty/full */
1574 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8); 1575 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
1575 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8); 1576 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
@@ -1815,6 +1816,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
1815 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER, 1816 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
1816 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), 1817 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
1817 .rx_buffer_padding = 0x24, 1818 .rx_buffer_padding = 0x24,
1819 .can_rx_scatter = false,
1818 .max_interrupt_mode = EFX_INT_MODE_MSI, 1820 .max_interrupt_mode = EFX_INT_MODE_MSI,
1819 .phys_addr_channels = 4, 1821 .phys_addr_channels = 4,
1820 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, 1822 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
@@ -1865,6 +1867,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
1865 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), 1867 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
1866 .rx_buffer_hash_size = 0x10, 1868 .rx_buffer_hash_size = 0x10,
1867 .rx_buffer_padding = 0, 1869 .rx_buffer_padding = 0,
1870 .can_rx_scatter = true,
1868 .max_interrupt_mode = EFX_INT_MODE_MSIX, 1871 .max_interrupt_mode = EFX_INT_MODE_MSIX,
1869 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy 1872 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
1870 * interrupt handler only supports 32 1873 * interrupt handler only supports 32
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 8af42cd1feda..2397f0e8d3eb 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -66,6 +66,10 @@ struct efx_filter_state {
66#endif 66#endif
67}; 67};
68 68
69static void efx_filter_table_clear_entry(struct efx_nic *efx,
70 struct efx_filter_table *table,
71 unsigned int filter_idx);
72
69/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit 73/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
70 * key derived from the n-tuple. The initial LFSR state is 0xffff. */ 74 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
71static u16 efx_filter_hash(u32 key) 75static u16 efx_filter_hash(u32 key)
@@ -168,6 +172,25 @@ static void efx_filter_push_rx_config(struct efx_nic *efx)
168 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, 172 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
169 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags & 173 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
170 EFX_FILTER_FLAG_RX_RSS)); 174 EFX_FILTER_FLAG_RX_RSS));
175
176 /* There is a single bit to enable RX scatter for all
177 * unmatched packets. Only set it if scatter is
178 * enabled in both filter specs.
179 */
180 EFX_SET_OWORD_FIELD(
181 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
182 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
183 table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
184 EFX_FILTER_FLAG_RX_SCATTER));
185 } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
186 /* We don't expose 'default' filters because unmatched
187 * packets always go to the queue number found in the
188 * RSS table. But we still need to set the RX scatter
189 * bit here.
190 */
191 EFX_SET_OWORD_FIELD(
192 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
193 efx->rx_scatter);
171 } 194 }
172 195
173 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 196 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
@@ -409,9 +432,18 @@ static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
409 struct efx_filter_state *state = efx->filter_state; 432 struct efx_filter_state *state = efx->filter_state;
410 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF]; 433 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
411 struct efx_filter_spec *spec = &table->spec[filter_idx]; 434 struct efx_filter_spec *spec = &table->spec[filter_idx];
435 enum efx_filter_flags flags = 0;
436
437 /* If there's only one channel then disable RSS for non VF
438 * traffic, thereby allowing VFs to use RSS when the PF can't.
439 */
440 if (efx->n_rx_channels > 1)
441 flags |= EFX_FILTER_FLAG_RX_RSS;
412 442
413 efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, 443 if (efx->rx_scatter)
414 EFX_FILTER_FLAG_RX_RSS, 0); 444 flags |= EFX_FILTER_FLAG_RX_SCATTER;
445
446 efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0);
415 spec->type = EFX_FILTER_UC_DEF + filter_idx; 447 spec->type = EFX_FILTER_UC_DEF + filter_idx;
416 table->used_bitmap[0] |= 1 << filter_idx; 448 table->used_bitmap[0] |= 1 << filter_idx;
417} 449}
@@ -463,13 +495,6 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
463 break; 495 break;
464 } 496 }
465 497
466 case EFX_FILTER_TABLE_RX_DEF:
467 /* One filter spec per type */
468 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
469 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
470 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
471 return spec->type - EFX_FILTER_UC_DEF;
472
473 case EFX_FILTER_TABLE_RX_MAC: { 498 case EFX_FILTER_TABLE_RX_MAC: {
474 bool is_wild = spec->type == EFX_FILTER_MAC_WILD; 499 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
475 EFX_POPULATE_OWORD_7( 500 EFX_POPULATE_OWORD_7(
@@ -520,42 +545,6 @@ static bool efx_filter_equal(const struct efx_filter_spec *left,
520 return true; 545 return true;
521} 546}
522 547
523static int efx_filter_search(struct efx_filter_table *table,
524 struct efx_filter_spec *spec, u32 key,
525 bool for_insert, unsigned int *depth_required)
526{
527 unsigned hash, incr, filter_idx, depth, depth_max;
528
529 hash = efx_filter_hash(key);
530 incr = efx_filter_increment(key);
531
532 filter_idx = hash & (table->size - 1);
533 depth = 1;
534 depth_max = (for_insert ?
535 (spec->priority <= EFX_FILTER_PRI_HINT ?
536 FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) :
537 table->search_depth[spec->type]);
538
539 for (;;) {
540 /* Return success if entry is used and matches this spec
541 * or entry is unused and we are trying to insert.
542 */
543 if (test_bit(filter_idx, table->used_bitmap) ?
544 efx_filter_equal(spec, &table->spec[filter_idx]) :
545 for_insert) {
546 *depth_required = depth;
547 return filter_idx;
548 }
549
550 /* Return failure if we reached the maximum search depth */
551 if (depth == depth_max)
552 return for_insert ? -EBUSY : -ENOENT;
553
554 filter_idx = (filter_idx + incr) & (table->size - 1);
555 ++depth;
556 }
557}
558
559/* 548/*
560 * Construct/deconstruct external filter IDs. At least the RX filter 549 * Construct/deconstruct external filter IDs. At least the RX filter
561 * IDs must be ordered by matching priority, for RX NFC semantics. 550 * IDs must be ordered by matching priority, for RX NFC semantics.
@@ -650,44 +639,111 @@ u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
650 * efx_filter_insert_filter - add or replace a filter 639 * efx_filter_insert_filter - add or replace a filter
651 * @efx: NIC in which to insert the filter 640 * @efx: NIC in which to insert the filter
652 * @spec: Specification for the filter 641 * @spec: Specification for the filter
653 * @replace: Flag for whether the specified filter may replace a filter 642 * @replace_equal: Flag for whether the specified filter may replace an
654 * with an identical match expression and equal or lower priority 643 * existing filter with equal priority
655 * 644 *
656 * On success, return the filter ID. 645 * On success, return the filter ID.
657 * On failure, return a negative error code. 646 * On failure, return a negative error code.
647 *
648 * If an existing filter has equal match values to the new filter
649 * spec, then the new filter might replace it, depending on the
650 * relative priorities. If the existing filter has lower priority, or
651 * if @replace_equal is set and it has equal priority, then it is
652 * replaced. Otherwise the function fails, returning -%EPERM if
653 * the existing filter has higher priority or -%EEXIST if it has
654 * equal priority.
658 */ 655 */
659s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, 656s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
660 bool replace) 657 bool replace_equal)
661{ 658{
662 struct efx_filter_state *state = efx->filter_state; 659 struct efx_filter_state *state = efx->filter_state;
663 struct efx_filter_table *table = efx_filter_spec_table(state, spec); 660 struct efx_filter_table *table = efx_filter_spec_table(state, spec);
664 struct efx_filter_spec *saved_spec;
665 efx_oword_t filter; 661 efx_oword_t filter;
666 unsigned int filter_idx, depth = 0; 662 int rep_index, ins_index;
667 u32 key; 663 unsigned int depth = 0;
668 int rc; 664 int rc;
669 665
670 if (!table || table->size == 0) 666 if (!table || table->size == 0)
671 return -EINVAL; 667 return -EINVAL;
672 668
673 key = efx_filter_build(&filter, spec);
674
675 netif_vdbg(efx, hw, efx->net_dev, 669 netif_vdbg(efx, hw, efx->net_dev,
676 "%s: type %d search_depth=%d", __func__, spec->type, 670 "%s: type %d search_depth=%d", __func__, spec->type,
677 table->search_depth[spec->type]); 671 table->search_depth[spec->type]);
678 672
679 spin_lock_bh(&state->lock); 673 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
674 /* One filter spec per type */
675 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
676 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
677 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
678 rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF;
679 ins_index = rep_index;
680 680
681 rc = efx_filter_search(table, spec, key, true, &depth); 681 spin_lock_bh(&state->lock);
682 if (rc < 0) 682 } else {
683 goto out; 683 /* Search concurrently for
684 filter_idx = rc; 684 * (1) a filter to be replaced (rep_index): any filter
685 BUG_ON(filter_idx >= table->size); 685 * with the same match values, up to the current
686 saved_spec = &table->spec[filter_idx]; 686 * search depth for this type, and
687 687 * (2) the insertion point (ins_index): (1) or any
688 if (test_bit(filter_idx, table->used_bitmap)) { 688 * free slot before it or up to the maximum search
689 /* Should we replace the existing filter? */ 689 * depth for this priority
690 if (!replace) { 690 * We fail if we cannot find (2).
691 *
692 * We can stop once either
693 * (a) we find (1), in which case we have definitely
694 * found (2) as well; or
695 * (b) we have searched exhaustively for (1), and have
696 * either found (2) or searched exhaustively for it
697 */
698 u32 key = efx_filter_build(&filter, spec);
699 unsigned int hash = efx_filter_hash(key);
700 unsigned int incr = efx_filter_increment(key);
701 unsigned int max_rep_depth = table->search_depth[spec->type];
702 unsigned int max_ins_depth =
703 spec->priority <= EFX_FILTER_PRI_HINT ?
704 FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX;
705 unsigned int i = hash & (table->size - 1);
706
707 ins_index = -1;
708 depth = 1;
709
710 spin_lock_bh(&state->lock);
711
712 for (;;) {
713 if (!test_bit(i, table->used_bitmap)) {
714 if (ins_index < 0)
715 ins_index = i;
716 } else if (efx_filter_equal(spec, &table->spec[i])) {
717 /* Case (a) */
718 if (ins_index < 0)
719 ins_index = i;
720 rep_index = i;
721 break;
722 }
723
724 if (depth >= max_rep_depth &&
725 (ins_index >= 0 || depth >= max_ins_depth)) {
726 /* Case (b) */
727 if (ins_index < 0) {
728 rc = -EBUSY;
729 goto out;
730 }
731 rep_index = -1;
732 break;
733 }
734
735 i = (i + incr) & (table->size - 1);
736 ++depth;
737 }
738 }
739
740 /* If we found a filter to be replaced, check whether we
741 * should do so
742 */
743 if (rep_index >= 0) {
744 struct efx_filter_spec *saved_spec = &table->spec[rep_index];
745
746 if (spec->priority == saved_spec->priority && !replace_equal) {
691 rc = -EEXIST; 747 rc = -EEXIST;
692 goto out; 748 goto out;
693 } 749 }
@@ -695,11 +751,14 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
695 rc = -EPERM; 751 rc = -EPERM;
696 goto out; 752 goto out;
697 } 753 }
698 } else { 754 }
699 __set_bit(filter_idx, table->used_bitmap); 755
756 /* Insert the filter */
757 if (ins_index != rep_index) {
758 __set_bit(ins_index, table->used_bitmap);
700 ++table->used; 759 ++table->used;
701 } 760 }
702 *saved_spec = *spec; 761 table->spec[ins_index] = *spec;
703 762
704 if (table->id == EFX_FILTER_TABLE_RX_DEF) { 763 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
705 efx_filter_push_rx_config(efx); 764 efx_filter_push_rx_config(efx);
@@ -713,13 +772,19 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
713 } 772 }
714 773
715 efx_writeo(efx, &filter, 774 efx_writeo(efx, &filter,
716 table->offset + table->step * filter_idx); 775 table->offset + table->step * ins_index);
776
777 /* If we were able to replace a filter by inserting
778 * at a lower depth, clear the replaced filter
779 */
780 if (ins_index != rep_index && rep_index >= 0)
781 efx_filter_table_clear_entry(efx, table, rep_index);
717 } 782 }
718 783
719 netif_vdbg(efx, hw, efx->net_dev, 784 netif_vdbg(efx, hw, efx->net_dev,
720 "%s: filter type %d index %d rxq %u set", 785 "%s: filter type %d index %d rxq %u set",
721 __func__, spec->type, filter_idx, spec->dmaq_id); 786 __func__, spec->type, ins_index, spec->dmaq_id);
722 rc = efx_filter_make_id(spec, filter_idx); 787 rc = efx_filter_make_id(spec, ins_index);
723 788
724out: 789out:
725 spin_unlock_bh(&state->lock); 790 spin_unlock_bh(&state->lock);
@@ -1060,6 +1125,50 @@ void efx_remove_filters(struct efx_nic *efx)
1060 kfree(state); 1125 kfree(state);
1061} 1126}
1062 1127
1128/* Update scatter enable flags for filters pointing to our own RX queues */
1129void efx_filter_update_rx_scatter(struct efx_nic *efx)
1130{
1131 struct efx_filter_state *state = efx->filter_state;
1132 enum efx_filter_table_id table_id;
1133 struct efx_filter_table *table;
1134 efx_oword_t filter;
1135 unsigned int filter_idx;
1136
1137 spin_lock_bh(&state->lock);
1138
1139 for (table_id = EFX_FILTER_TABLE_RX_IP;
1140 table_id <= EFX_FILTER_TABLE_RX_DEF;
1141 table_id++) {
1142 table = &state->table[table_id];
1143
1144 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
1145 if (!test_bit(filter_idx, table->used_bitmap) ||
1146 table->spec[filter_idx].dmaq_id >=
1147 efx->n_rx_channels)
1148 continue;
1149
1150 if (efx->rx_scatter)
1151 table->spec[filter_idx].flags |=
1152 EFX_FILTER_FLAG_RX_SCATTER;
1153 else
1154 table->spec[filter_idx].flags &=
1155 ~EFX_FILTER_FLAG_RX_SCATTER;
1156
1157 if (table_id == EFX_FILTER_TABLE_RX_DEF)
1158 /* Pushed by efx_filter_push_rx_config() */
1159 continue;
1160
1161 efx_filter_build(&filter, &table->spec[filter_idx]);
1162 efx_writeo(efx, &filter,
1163 table->offset + table->step * filter_idx);
1164 }
1165 }
1166
1167 efx_filter_push_rx_config(efx);
1168
1169 spin_unlock_bh(&state->lock);
1170}
1171
1063#ifdef CONFIG_RFS_ACCEL 1172#ifdef CONFIG_RFS_ACCEL
1064 1173
1065int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 1174int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 0095ce95150b..97dd8f18c001 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -667,7 +667,7 @@ fail:
667int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 667int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
668 u16 *fw_subtype_list, u32 *capabilities) 668 u16 *fw_subtype_list, u32 *capabilities)
669{ 669{
670 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN]; 670 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMAX];
671 size_t outlen, offset, i; 671 size_t outlen, offset, i;
672 int port_num = efx_port_num(efx); 672 int port_num = efx_port_num(efx);
673 int rc; 673 int rc;
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 9d426d0457bd..c5c9747861ba 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -553,6 +553,7 @@
553#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */ 553#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */
554#define MC_CMD_PTP_MODE_V2 0x2 /* enum */ 554#define MC_CMD_PTP_MODE_V2 0x2 /* enum */
555#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */ 555#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */
556#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 /* enum */
556 557
557/* MC_CMD_PTP_IN_DISABLE msgrequest */ 558/* MC_CMD_PTP_IN_DISABLE msgrequest */
558#define MC_CMD_PTP_IN_DISABLE_LEN 8 559#define MC_CMD_PTP_IN_DISABLE_LEN 8
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 0a90abd2421b..9bd433a095c5 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -69,6 +69,12 @@
69#define EFX_TXQ_TYPES 4 69#define EFX_TXQ_TYPES 4
70#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) 70#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
71 71
72/* Maximum possible MTU the driver supports */
73#define EFX_MAX_MTU (9 * 1024)
74
75/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page. */
76#define EFX_RX_USR_BUF_SIZE 1824
77
72/* Forward declare Precision Time Protocol (PTP) support structure. */ 78/* Forward declare Precision Time Protocol (PTP) support structure. */
73struct efx_ptp_data; 79struct efx_ptp_data;
74 80
@@ -206,25 +212,23 @@ struct efx_tx_queue {
206/** 212/**
207 * struct efx_rx_buffer - An Efx RX data buffer 213 * struct efx_rx_buffer - An Efx RX data buffer
208 * @dma_addr: DMA base address of the buffer 214 * @dma_addr: DMA base address of the buffer
209 * @skb: The associated socket buffer. Valid iff !(@flags & %EFX_RX_BUF_PAGE). 215 * @page: The associated page buffer.
210 * Will be %NULL if the buffer slot is currently free.
211 * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
212 * Will be %NULL if the buffer slot is currently free. 216 * Will be %NULL if the buffer slot is currently free.
213 * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE. 217 * @page_offset: If pending: offset in @page of DMA base address.
214 * @len: Buffer length, in bytes. 218 * If completed: offset in @page of Ethernet header.
215 * @flags: Flags for buffer and packet state. 219 * @len: If pending: length for DMA descriptor.
220 * If completed: received length, excluding hash prefix.
221 * @flags: Flags for buffer and packet state. These are only set on the
222 * first buffer of a scattered packet.
216 */ 223 */
217struct efx_rx_buffer { 224struct efx_rx_buffer {
218 dma_addr_t dma_addr; 225 dma_addr_t dma_addr;
219 union { 226 struct page *page;
220 struct sk_buff *skb;
221 struct page *page;
222 } u;
223 u16 page_offset; 227 u16 page_offset;
224 u16 len; 228 u16 len;
225 u16 flags; 229 u16 flags;
226}; 230};
227#define EFX_RX_BUF_PAGE 0x0001 231#define EFX_RX_BUF_LAST_IN_PAGE 0x0001
228#define EFX_RX_PKT_CSUMMED 0x0002 232#define EFX_RX_PKT_CSUMMED 0x0002
229#define EFX_RX_PKT_DISCARD 0x0004 233#define EFX_RX_PKT_DISCARD 0x0004
230 234
@@ -260,14 +264,23 @@ struct efx_rx_page_state {
260 * @added_count: Number of buffers added to the receive queue. 264 * @added_count: Number of buffers added to the receive queue.
261 * @notified_count: Number of buffers given to NIC (<= @added_count). 265 * @notified_count: Number of buffers given to NIC (<= @added_count).
262 * @removed_count: Number of buffers removed from the receive queue. 266 * @removed_count: Number of buffers removed from the receive queue.
267 * @scatter_n: Number of buffers used by current packet
268 * @page_ring: The ring to store DMA mapped pages for reuse.
269 * @page_add: Counter to calculate the write pointer for the recycle ring.
270 * @page_remove: Counter to calculate the read pointer for the recycle ring.
271 * @page_recycle_count: The number of pages that have been recycled.
272 * @page_recycle_failed: The number of pages that couldn't be recycled because
273 * the kernel still held a reference to them.
274 * @page_recycle_full: The number of pages that were released because the
275 * recycle ring was full.
276 * @page_ptr_mask: The number of pages in the RX recycle ring minus 1.
263 * @max_fill: RX descriptor maximum fill level (<= ring size) 277 * @max_fill: RX descriptor maximum fill level (<= ring size)
264 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill 278 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
265 * (<= @max_fill) 279 * (<= @max_fill)
266 * @min_fill: RX descriptor minimum non-zero fill level. 280 * @min_fill: RX descriptor minimum non-zero fill level.
267 * This records the minimum fill level observed when a ring 281 * This records the minimum fill level observed when a ring
268 * refill was triggered. 282 * refill was triggered.
269 * @alloc_page_count: RX allocation strategy counter. 283 * @recycle_count: RX buffer recycle counter.
270 * @alloc_skb_count: RX allocation strategy counter.
271 * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). 284 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
272 */ 285 */
273struct efx_rx_queue { 286struct efx_rx_queue {
@@ -279,15 +292,22 @@ struct efx_rx_queue {
279 bool enabled; 292 bool enabled;
280 bool flush_pending; 293 bool flush_pending;
281 294
282 int added_count; 295 unsigned int added_count;
283 int notified_count; 296 unsigned int notified_count;
284 int removed_count; 297 unsigned int removed_count;
298 unsigned int scatter_n;
299 struct page **page_ring;
300 unsigned int page_add;
301 unsigned int page_remove;
302 unsigned int page_recycle_count;
303 unsigned int page_recycle_failed;
304 unsigned int page_recycle_full;
305 unsigned int page_ptr_mask;
285 unsigned int max_fill; 306 unsigned int max_fill;
286 unsigned int fast_fill_trigger; 307 unsigned int fast_fill_trigger;
287 unsigned int min_fill; 308 unsigned int min_fill;
288 unsigned int min_overfill; 309 unsigned int min_overfill;
289 unsigned int alloc_page_count; 310 unsigned int recycle_count;
290 unsigned int alloc_skb_count;
291 struct timer_list slow_fill; 311 struct timer_list slow_fill;
292 unsigned int slow_fill_count; 312 unsigned int slow_fill_count;
293}; 313};
@@ -336,10 +356,6 @@ enum efx_rx_alloc_method {
336 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel 356 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
337 * @irq_count: Number of IRQs since last adaptive moderation decision 357 * @irq_count: Number of IRQs since last adaptive moderation decision
338 * @irq_mod_score: IRQ moderation score 358 * @irq_mod_score: IRQ moderation score
339 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
340 * and diagnostic counters
341 * @rx_alloc_push_pages: RX allocation method currently in use for pushing
342 * descriptors
343 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors 359 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
344 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors 360 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
345 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors 361 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
@@ -347,6 +363,12 @@ enum efx_rx_alloc_method {
347 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors 363 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
348 * @n_rx_overlength: Count of RX_OVERLENGTH errors 364 * @n_rx_overlength: Count of RX_OVERLENGTH errors
349 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 365 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
366 * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
367 * lack of descriptors
368 * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
369 * __efx_rx_packet(), or zero if there is none
370 * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
371 * by __efx_rx_packet(), if @rx_pkt_n_frags != 0
350 * @rx_queue: RX queue for this channel 372 * @rx_queue: RX queue for this channel
351 * @tx_queue: TX queues for this channel 373 * @tx_queue: TX queues for this channel
352 */ 374 */
@@ -371,9 +393,6 @@ struct efx_channel {
371 unsigned int rfs_filters_added; 393 unsigned int rfs_filters_added;
372#endif 394#endif
373 395
374 int rx_alloc_level;
375 int rx_alloc_push_pages;
376
377 unsigned n_rx_tobe_disc; 396 unsigned n_rx_tobe_disc;
378 unsigned n_rx_ip_hdr_chksum_err; 397 unsigned n_rx_ip_hdr_chksum_err;
379 unsigned n_rx_tcp_udp_chksum_err; 398 unsigned n_rx_tcp_udp_chksum_err;
@@ -381,11 +400,10 @@ struct efx_channel {
381 unsigned n_rx_frm_trunc; 400 unsigned n_rx_frm_trunc;
382 unsigned n_rx_overlength; 401 unsigned n_rx_overlength;
383 unsigned n_skbuff_leaks; 402 unsigned n_skbuff_leaks;
403 unsigned int n_rx_nodesc_trunc;
384 404
385 /* Used to pipeline received packets in order to optimise memory 405 unsigned int rx_pkt_n_frags;
386 * access with prefetches. 406 unsigned int rx_pkt_index;
387 */
388 struct efx_rx_buffer *rx_pkt;
389 407
390 struct efx_rx_queue rx_queue; 408 struct efx_rx_queue rx_queue;
391 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; 409 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
@@ -410,7 +428,7 @@ struct efx_channel_type {
410 void (*post_remove)(struct efx_channel *); 428 void (*post_remove)(struct efx_channel *);
411 void (*get_name)(struct efx_channel *, char *buf, size_t len); 429 void (*get_name)(struct efx_channel *, char *buf, size_t len);
412 struct efx_channel *(*copy)(const struct efx_channel *); 430 struct efx_channel *(*copy)(const struct efx_channel *);
413 void (*receive_skb)(struct efx_channel *, struct sk_buff *); 431 bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
414 bool keep_eventq; 432 bool keep_eventq;
415}; 433};
416 434
@@ -446,6 +464,7 @@ enum nic_state {
446 STATE_UNINIT = 0, /* device being probed/removed or is frozen */ 464 STATE_UNINIT = 0, /* device being probed/removed or is frozen */
447 STATE_READY = 1, /* hardware ready and netdev registered */ 465 STATE_READY = 1, /* hardware ready and netdev registered */
448 STATE_DISABLED = 2, /* device disabled due to hardware errors */ 466 STATE_DISABLED = 2, /* device disabled due to hardware errors */
467 STATE_RECOVERY = 3, /* device recovering from PCI error */
449}; 468};
450 469
451/* 470/*
@@ -684,10 +703,13 @@ struct vfdi_status;
684 * @n_channels: Number of channels in use 703 * @n_channels: Number of channels in use
685 * @n_rx_channels: Number of channels used for RX (= number of RX queues) 704 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
686 * @n_tx_channels: Number of channels used for TX 705 * @n_tx_channels: Number of channels used for TX
687 * @rx_buffer_len: RX buffer length 706 * @rx_dma_len: Current maximum RX DMA length
688 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 707 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
708 * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
709 * for use in sk_buff::truesize
689 * @rx_hash_key: Toeplitz hash key for RSS 710 * @rx_hash_key: Toeplitz hash key for RSS
690 * @rx_indir_table: Indirection table for RSS 711 * @rx_indir_table: Indirection table for RSS
712 * @rx_scatter: Scatter mode enabled for receives
691 * @int_error_count: Number of internal errors seen recently 713 * @int_error_count: Number of internal errors seen recently
692 * @int_error_expire: Time at which error count will be expired 714 * @int_error_expire: Time at which error count will be expired
693 * @irq_status: Interrupt status buffer 715 * @irq_status: Interrupt status buffer
@@ -800,10 +822,15 @@ struct efx_nic {
800 unsigned rss_spread; 822 unsigned rss_spread;
801 unsigned tx_channel_offset; 823 unsigned tx_channel_offset;
802 unsigned n_tx_channels; 824 unsigned n_tx_channels;
803 unsigned int rx_buffer_len; 825 unsigned int rx_dma_len;
804 unsigned int rx_buffer_order; 826 unsigned int rx_buffer_order;
827 unsigned int rx_buffer_truesize;
828 unsigned int rx_page_buf_step;
829 unsigned int rx_bufs_per_page;
830 unsigned int rx_pages_per_batch;
805 u8 rx_hash_key[40]; 831 u8 rx_hash_key[40];
806 u32 rx_indir_table[128]; 832 u32 rx_indir_table[128];
833 bool rx_scatter;
807 834
808 unsigned int_error_count; 835 unsigned int_error_count;
809 unsigned long int_error_expire; 836 unsigned long int_error_expire;
@@ -934,8 +961,9 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
934 * @evq_ptr_tbl_base: Event queue pointer table base address 961 * @evq_ptr_tbl_base: Event queue pointer table base address
935 * @evq_rptr_tbl_base: Event queue read-pointer table base address 962 * @evq_rptr_tbl_base: Event queue read-pointer table base address
936 * @max_dma_mask: Maximum possible DMA mask 963 * @max_dma_mask: Maximum possible DMA mask
937 * @rx_buffer_hash_size: Size of hash at start of RX buffer 964 * @rx_buffer_hash_size: Size of hash at start of RX packet
938 * @rx_buffer_padding: Size of padding at end of RX buffer 965 * @rx_buffer_padding: Size of padding at end of RX packet
966 * @can_rx_scatter: NIC is able to scatter packet to multiple buffers
939 * @max_interrupt_mode: Highest capability interrupt mode supported 967 * @max_interrupt_mode: Highest capability interrupt mode supported
940 * from &enum efx_init_mode. 968 * from &enum efx_init_mode.
941 * @phys_addr_channels: Number of channels with physically addressed 969 * @phys_addr_channels: Number of channels with physically addressed
@@ -983,6 +1011,7 @@ struct efx_nic_type {
983 u64 max_dma_mask; 1011 u64 max_dma_mask;
984 unsigned int rx_buffer_hash_size; 1012 unsigned int rx_buffer_hash_size;
985 unsigned int rx_buffer_padding; 1013 unsigned int rx_buffer_padding;
1014 bool can_rx_scatter;
986 unsigned int max_interrupt_mode; 1015 unsigned int max_interrupt_mode;
987 unsigned int phys_addr_channels; 1016 unsigned int phys_addr_channels;
988 unsigned int timer_period_max; 1017 unsigned int timer_period_max;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index eaa8e874a3cb..b0503cd8c2a0 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -305,11 +305,11 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
305 unsigned int len) 305 unsigned int len)
306{ 306{
307 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 307 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
308 &buffer->dma_addr, GFP_ATOMIC); 308 &buffer->dma_addr,
309 GFP_ATOMIC | __GFP_ZERO);
309 if (!buffer->addr) 310 if (!buffer->addr)
310 return -ENOMEM; 311 return -ENOMEM;
311 buffer->len = len; 312 buffer->len = len;
312 memset(buffer->addr, 0, len);
313 return 0; 313 return 0;
314} 314}
315 315
@@ -592,12 +592,22 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
592 struct efx_nic *efx = rx_queue->efx; 592 struct efx_nic *efx = rx_queue->efx;
593 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 593 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
594 bool iscsi_digest_en = is_b0; 594 bool iscsi_digest_en = is_b0;
595 bool jumbo_en;
596
597 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
598 * DMA to continue after a PCIe page boundary (and scattering
599 * is not possible). In Falcon B0 and Siena, it enables
600 * scatter.
601 */
602 jumbo_en = !is_b0 || efx->rx_scatter;
595 603
596 netif_dbg(efx, hw, efx->net_dev, 604 netif_dbg(efx, hw, efx->net_dev,
597 "RX queue %d ring in special buffers %d-%d\n", 605 "RX queue %d ring in special buffers %d-%d\n",
598 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 606 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
599 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 607 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
600 608
609 rx_queue->scatter_n = 0;
610
601 /* Pin RX descriptor ring */ 611 /* Pin RX descriptor ring */
602 efx_init_special_buffer(efx, &rx_queue->rxd); 612 efx_init_special_buffer(efx, &rx_queue->rxd);
603 613
@@ -614,8 +624,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
614 FRF_AZ_RX_DESCQ_SIZE, 624 FRF_AZ_RX_DESCQ_SIZE,
615 __ffs(rx_queue->rxd.entries), 625 __ffs(rx_queue->rxd.entries),
616 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 626 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
617 /* For >=B0 this is scatter so disable */ 627 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
618 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
619 FRF_AZ_RX_DESCQ_EN, 1); 628 FRF_AZ_RX_DESCQ_EN, 1);
620 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 629 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
621 efx_rx_queue_index(rx_queue)); 630 efx_rx_queue_index(rx_queue));
@@ -969,13 +978,24 @@ static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
969 EFX_RX_PKT_DISCARD : 0; 978 EFX_RX_PKT_DISCARD : 0;
970} 979}
971 980
972/* Handle receive events that are not in-order. */ 981/* Handle receive events that are not in-order. Return true if this
973static void 982 * can be handled as a partial packet discard, false if it's more
983 * serious.
984 */
985static bool
974efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 986efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
975{ 987{
988 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
976 struct efx_nic *efx = rx_queue->efx; 989 struct efx_nic *efx = rx_queue->efx;
977 unsigned expected, dropped; 990 unsigned expected, dropped;
978 991
992 if (rx_queue->scatter_n &&
993 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
994 rx_queue->ptr_mask)) {
995 ++channel->n_rx_nodesc_trunc;
996 return true;
997 }
998
979 expected = rx_queue->removed_count & rx_queue->ptr_mask; 999 expected = rx_queue->removed_count & rx_queue->ptr_mask;
980 dropped = (index - expected) & rx_queue->ptr_mask; 1000 dropped = (index - expected) & rx_queue->ptr_mask;
981 netif_info(efx, rx_err, efx->net_dev, 1001 netif_info(efx, rx_err, efx->net_dev,
@@ -984,6 +1004,7 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
984 1004
985 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 1005 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
986 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 1006 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1007 return false;
987} 1008}
988 1009
989/* Handle a packet received event 1010/* Handle a packet received event
@@ -999,7 +1020,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
999 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 1020 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
1000 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 1021 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
1001 unsigned expected_ptr; 1022 unsigned expected_ptr;
1002 bool rx_ev_pkt_ok; 1023 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
1003 u16 flags; 1024 u16 flags;
1004 struct efx_rx_queue *rx_queue; 1025 struct efx_rx_queue *rx_queue;
1005 struct efx_nic *efx = channel->efx; 1026 struct efx_nic *efx = channel->efx;
@@ -1007,21 +1028,56 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
1007 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 1028 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1008 return; 1029 return;
1009 1030
1010 /* Basic packet information */ 1031 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
1011 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 1032 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
1012 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1013 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1014 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
1015 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
1016 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 1033 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
1017 channel->channel); 1034 channel->channel);
1018 1035
1019 rx_queue = efx_channel_get_rx_queue(channel); 1036 rx_queue = efx_channel_get_rx_queue(channel);
1020 1037
1021 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 1038 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
1022 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 1039 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
1023 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 1040 rx_queue->ptr_mask);
1024 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 1041
1042 /* Check for partial drops and other errors */
1043 if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
1044 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
1045 if (rx_ev_desc_ptr != expected_ptr &&
1046 !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
1047 return;
1048
1049 /* Discard all pending fragments */
1050 if (rx_queue->scatter_n) {
1051 efx_rx_packet(
1052 rx_queue,
1053 rx_queue->removed_count & rx_queue->ptr_mask,
1054 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
1055 rx_queue->removed_count += rx_queue->scatter_n;
1056 rx_queue->scatter_n = 0;
1057 }
1058
1059 /* Return if there is no new fragment */
1060 if (rx_ev_desc_ptr != expected_ptr)
1061 return;
1062
1063 /* Discard new fragment if not SOP */
1064 if (!rx_ev_sop) {
1065 efx_rx_packet(
1066 rx_queue,
1067 rx_queue->removed_count & rx_queue->ptr_mask,
1068 1, 0, EFX_RX_PKT_DISCARD);
1069 ++rx_queue->removed_count;
1070 return;
1071 }
1072 }
1073
1074 ++rx_queue->scatter_n;
1075 if (rx_ev_cont)
1076 return;
1077
1078 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1079 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1080 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1025 1081
1026 if (likely(rx_ev_pkt_ok)) { 1082 if (likely(rx_ev_pkt_ok)) {
1027 /* If packet is marked as OK and packet type is TCP/IP or 1083 /* If packet is marked as OK and packet type is TCP/IP or
@@ -1049,7 +1105,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
1049 channel->irq_mod_score += 2; 1105 channel->irq_mod_score += 2;
1050 1106
1051 /* Handle received packet */ 1107 /* Handle received packet */
1052 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); 1108 efx_rx_packet(rx_queue,
1109 rx_queue->removed_count & rx_queue->ptr_mask,
1110 rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1111 rx_queue->removed_count += rx_queue->scatter_n;
1112 rx_queue->scatter_n = 0;
1053} 1113}
1054 1114
1055/* If this flush done event corresponds to a &struct efx_tx_queue, then 1115/* If this flush done event corresponds to a &struct efx_tx_queue, then
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 3f93624fc273..07f6baa15c0c 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -99,6 +99,9 @@
99#define PTP_V2_VERSION_LENGTH 1 99#define PTP_V2_VERSION_LENGTH 1
100#define PTP_V2_VERSION_OFFSET 29 100#define PTP_V2_VERSION_OFFSET 29
101 101
102#define PTP_V2_UUID_LENGTH 8
103#define PTP_V2_UUID_OFFSET 48
104
102/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2), 105/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2),
103 * the MC only captures the last six bytes of the clock identity. These values 106 * the MC only captures the last six bytes of the clock identity. These values
104 * reflect those, not the ones used in the standard. The standard permits 107 * reflect those, not the ones used in the standard. The standard permits
@@ -429,13 +432,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
429 unsigned number_readings = (response_length / 432 unsigned number_readings = (response_length /
430 MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN); 433 MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN);
431 unsigned i; 434 unsigned i;
432 unsigned min;
433 unsigned min_set = 0;
434 unsigned total; 435 unsigned total;
435 unsigned ngood = 0; 436 unsigned ngood = 0;
436 unsigned last_good = 0; 437 unsigned last_good = 0;
437 struct efx_ptp_data *ptp = efx->ptp_data; 438 struct efx_ptp_data *ptp = efx->ptp_data;
438 bool min_valid = false;
439 u32 last_sec; 439 u32 last_sec;
440 u32 start_sec; 440 u32 start_sec;
441 struct timespec delta; 441 struct timespec delta;
@@ -443,35 +443,17 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
443 if (number_readings == 0) 443 if (number_readings == 0)
444 return -EAGAIN; 444 return -EAGAIN;
445 445
446 /* Find minimum value in this set of results, discarding clearly 446 /* Read the set of results and increment stats for any results that
447 * erroneous results. 447 * appera to be erroneous.
448 */ 448 */
449 for (i = 0; i < number_readings; i++) { 449 for (i = 0; i < number_readings; i++) {
450 efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]); 450 efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]);
451 synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN; 451 synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN;
452 if (ptp->timeset[i].window > SYNCHRONISATION_GRANULARITY_NS) {
453 if (min_valid) {
454 if (ptp->timeset[i].window < min_set)
455 min_set = ptp->timeset[i].window;
456 } else {
457 min_valid = true;
458 min_set = ptp->timeset[i].window;
459 }
460 }
461 }
462
463 if (min_valid) {
464 if (ptp->base_sync_valid && (min_set > ptp->base_sync_ns))
465 min = ptp->base_sync_ns;
466 else
467 min = min_set;
468 } else {
469 min = SYNCHRONISATION_GRANULARITY_NS;
470 } 452 }
471 453
472 /* Discard excessively long synchronise durations. The MC times 454 /* Find the last good host-MC synchronization result. The MC times
473 * when it finishes reading the host time so the corrected window 455 * when it finishes reading the host time so the corrected window time
474 * time should be fairly constant for a given platform. 456 * should be fairly constant for a given platform.
475 */ 457 */
476 total = 0; 458 total = 0;
477 for (i = 0; i < number_readings; i++) 459 for (i = 0; i < number_readings; i++)
@@ -489,8 +471,8 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
489 471
490 if (ngood == 0) { 472 if (ngood == 0) {
491 netif_warn(efx, drv, efx->net_dev, 473 netif_warn(efx, drv, efx->net_dev,
492 "PTP no suitable synchronisations %dns %dns\n", 474 "PTP no suitable synchronisations %dns\n",
493 ptp->base_sync_ns, min_set); 475 ptp->base_sync_ns);
494 return -EAGAIN; 476 return -EAGAIN;
495 } 477 }
496 478
@@ -1006,43 +988,53 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
1006 * the receive timestamp from the MC - this will probably occur after the 988 * the receive timestamp from the MC - this will probably occur after the
1007 * packet arrival because of the processing in the MC. 989 * packet arrival because of the processing in the MC.
1008 */ 990 */
1009static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) 991static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1010{ 992{
1011 struct efx_nic *efx = channel->efx; 993 struct efx_nic *efx = channel->efx;
1012 struct efx_ptp_data *ptp = efx->ptp_data; 994 struct efx_ptp_data *ptp = efx->ptp_data;
1013 struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; 995 struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
1014 u8 *data; 996 u8 *match_data_012, *match_data_345;
1015 unsigned int version; 997 unsigned int version;
1016 998
1017 match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); 999 match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
1018 1000
1019 /* Correct version? */ 1001 /* Correct version? */
1020 if (ptp->mode == MC_CMD_PTP_MODE_V1) { 1002 if (ptp->mode == MC_CMD_PTP_MODE_V1) {
1021 if (skb->len < PTP_V1_MIN_LENGTH) { 1003 if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
1022 netif_receive_skb(skb); 1004 return false;
1023 return;
1024 } 1005 }
1025 version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]); 1006 version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
1026 if (version != PTP_VERSION_V1) { 1007 if (version != PTP_VERSION_V1) {
1027 netif_receive_skb(skb); 1008 return false;
1028 return;
1029 } 1009 }
1010
1011 /* PTP V1 uses all six bytes of the UUID to match the packet
1012 * to the timestamp
1013 */
1014 match_data_012 = skb->data + PTP_V1_UUID_OFFSET;
1015 match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3;
1030 } else { 1016 } else {
1031 if (skb->len < PTP_V2_MIN_LENGTH) { 1017 if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
1032 netif_receive_skb(skb); 1018 return false;
1033 return;
1034 } 1019 }
1035 version = skb->data[PTP_V2_VERSION_OFFSET]; 1020 version = skb->data[PTP_V2_VERSION_OFFSET];
1036
1037 BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2);
1038 BUILD_BUG_ON(PTP_V1_UUID_OFFSET != PTP_V2_MC_UUID_OFFSET);
1039 BUILD_BUG_ON(PTP_V1_UUID_LENGTH != PTP_V2_MC_UUID_LENGTH);
1040 BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
1041 BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
1042
1043 if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { 1021 if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
1044 netif_receive_skb(skb); 1022 return false;
1045 return; 1023 }
1024
1025 /* The original V2 implementation uses bytes 2-7 of
1026 * the UUID to match the packet to the timestamp. This
1027 * discards two of the bytes of the MAC address used
1028 * to create the UUID (SF bug 33070). The PTP V2
1029 * enhanced mode fixes this issue and uses bytes 0-2
1030 * and byte 5-7 of the UUID.
1031 */
1032 match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5;
1033 if (ptp->mode == MC_CMD_PTP_MODE_V2) {
1034 match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2;
1035 } else {
1036 match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0;
1037 BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
1046 } 1038 }
1047 } 1039 }
1048 1040
@@ -1056,14 +1048,19 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1056 timestamps = skb_hwtstamps(skb); 1048 timestamps = skb_hwtstamps(skb);
1057 memset(timestamps, 0, sizeof(*timestamps)); 1049 memset(timestamps, 0, sizeof(*timestamps));
1058 1050
1051 /* We expect the sequence number to be in the same position in
1052 * the packet for PTP V1 and V2
1053 */
1054 BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
1055 BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
1056
1059 /* Extract UUID/Sequence information */ 1057 /* Extract UUID/Sequence information */
1060 data = skb->data + PTP_V1_UUID_OFFSET; 1058 match->words[0] = (match_data_012[0] |
1061 match->words[0] = (data[0] | 1059 (match_data_012[1] << 8) |
1062 (data[1] << 8) | 1060 (match_data_012[2] << 16) |
1063 (data[2] << 16) | 1061 (match_data_345[0] << 24));
1064 (data[3] << 24)); 1062 match->words[1] = (match_data_345[1] |
1065 match->words[1] = (data[4] | 1063 (match_data_345[2] << 8) |
1066 (data[5] << 8) |
1067 (skb->data[PTP_V1_SEQUENCE_OFFSET + 1064 (skb->data[PTP_V1_SEQUENCE_OFFSET +
1068 PTP_V1_SEQUENCE_LENGTH - 1] << 1065 PTP_V1_SEQUENCE_LENGTH - 1] <<
1069 16)); 1066 16));
@@ -1073,6 +1070,8 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1073 1070
1074 skb_queue_tail(&ptp->rxq, skb); 1071 skb_queue_tail(&ptp->rxq, skb);
1075 queue_work(ptp->workwq, &ptp->work); 1072 queue_work(ptp->workwq, &ptp->work);
1073
1074 return true;
1076} 1075}
1077 1076
1078/* Transmit a PTP packet. This has to be transmitted by the MC 1077/* Transmit a PTP packet. This has to be transmitted by the MC
@@ -1167,7 +1166,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
1167 * timestamped 1166 * timestamped
1168 */ 1167 */
1169 init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 1168 init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
1170 new_mode = MC_CMD_PTP_MODE_V2; 1169 new_mode = MC_CMD_PTP_MODE_V2_ENHANCED;
1171 enable_wanted = true; 1170 enable_wanted = true;
1172 break; 1171 break;
1173 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1172 case HWTSTAMP_FILTER_PTP_V2_EVENT:
@@ -1186,7 +1185,14 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
1186 if (init->tx_type != HWTSTAMP_TX_OFF) 1185 if (init->tx_type != HWTSTAMP_TX_OFF)
1187 enable_wanted = true; 1186 enable_wanted = true;
1188 1187
1188 /* Old versions of the firmware do not support the improved
1189 * UUID filtering option (SF bug 33070). If the firmware does
1190 * not accept the enhanced mode, fall back to the standard PTP
1191 * v2 UUID filtering.
1192 */
1189 rc = efx_ptp_change_mode(efx, enable_wanted, new_mode); 1193 rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
1194 if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED))
1195 rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2);
1190 if (rc != 0) 1196 if (rc != 0)
1191 return rc; 1197 return rc;
1192 1198
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index bb579a6128c8..e73e30bac10e 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -16,6 +16,7 @@
16#include <linux/udp.h> 16#include <linux/udp.h>
17#include <linux/prefetch.h> 17#include <linux/prefetch.h>
18#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
19#include <linux/iommu.h>
19#include <net/ip.h> 20#include <net/ip.h>
20#include <net/checksum.h> 21#include <net/checksum.h>
21#include "net_driver.h" 22#include "net_driver.h"
@@ -24,85 +25,39 @@
24#include "selftest.h" 25#include "selftest.h"
25#include "workarounds.h" 26#include "workarounds.h"
26 27
27/* Number of RX descriptors pushed at once. */ 28/* Preferred number of descriptors to fill at once */
28#define EFX_RX_BATCH 8 29#define EFX_RX_PREFERRED_BATCH 8U
29 30
30/* Maximum size of a buffer sharing a page */ 31/* Number of RX buffers to recycle pages for. When creating the RX page recycle
31#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state)) 32 * ring, this number is divided by the number of buffers per page to calculate
33 * the number of pages to store in the RX page recycle ring.
34 */
35#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
36#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
32 37
33/* Size of buffer allocated for skb header area. */ 38/* Size of buffer allocated for skb header area. */
34#define EFX_SKB_HEADERS 64u 39#define EFX_SKB_HEADERS 64u
35 40
36/*
37 * rx_alloc_method - RX buffer allocation method
38 *
39 * This driver supports two methods for allocating and using RX buffers:
40 * each RX buffer may be backed by an skb or by an order-n page.
41 *
42 * When GRO is in use then the second method has a lower overhead,
43 * since we don't have to allocate then free skbs on reassembled frames.
44 *
45 * Values:
46 * - RX_ALLOC_METHOD_AUTO = 0
47 * - RX_ALLOC_METHOD_SKB = 1
48 * - RX_ALLOC_METHOD_PAGE = 2
49 *
50 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
51 * controlled by the parameters below.
52 *
53 * - Since pushing and popping descriptors are separated by the rx_queue
54 * size, so the watermarks should be ~rxd_size.
55 * - The performance win by using page-based allocation for GRO is less
56 * than the performance hit of using page-based allocation of non-GRO,
57 * so the watermarks should reflect this.
58 *
59 * Per channel we maintain a single variable, updated by each channel:
60 *
61 * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
62 * RX_ALLOC_FACTOR_SKB)
63 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
64 * limits the hysteresis), and update the allocation strategy:
65 *
66 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
67 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
68 */
69static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
70
71#define RX_ALLOC_LEVEL_GRO 0x2000
72#define RX_ALLOC_LEVEL_MAX 0x3000
73#define RX_ALLOC_FACTOR_GRO 1
74#define RX_ALLOC_FACTOR_SKB (-2)
75
76/* This is the percentage fill level below which new RX descriptors 41/* This is the percentage fill level below which new RX descriptors
77 * will be added to the RX descriptor ring. 42 * will be added to the RX descriptor ring.
78 */ 43 */
79static unsigned int rx_refill_threshold; 44static unsigned int rx_refill_threshold;
80 45
46/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
47#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
48 EFX_RX_USR_BUF_SIZE)
49
81/* 50/*
82 * RX maximum head room required. 51 * RX maximum head room required.
83 * 52 *
84 * This must be at least 1 to prevent overflow and at least 2 to allow 53 * This must be at least 1 to prevent overflow, plus one packet-worth
85 * pipelined receives. 54 * to allow pipelined receives.
86 */ 55 */
87#define EFX_RXD_HEAD_ROOM 2 56#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
88 57
89/* Offset of ethernet header within page */ 58static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
90static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
91 struct efx_rx_buffer *buf)
92{ 59{
93 return buf->page_offset + efx->type->rx_buffer_hash_size; 60 return page_address(buf->page) + buf->page_offset;
94}
95static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
96{
97 return PAGE_SIZE << efx->rx_buffer_order;
98}
99
100static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
101{
102 if (buf->flags & EFX_RX_BUF_PAGE)
103 return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
104 else
105 return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
106} 61}
107 62
108static inline u32 efx_rx_buf_hash(const u8 *eh) 63static inline u32 efx_rx_buf_hash(const u8 *eh)
@@ -119,66 +74,81 @@ static inline u32 efx_rx_buf_hash(const u8 *eh)
119#endif 74#endif
120} 75}
121 76
122/** 77static inline struct efx_rx_buffer *
123 * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers 78efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
124 * 79{
125 * @rx_queue: Efx RX queue 80 if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
126 * 81 return efx_rx_buffer(rx_queue, 0);
127 * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a 82 else
128 * struct efx_rx_buffer for each one. Return a negative error code or 0 83 return rx_buf + 1;
129 * on success. May fail having only inserted fewer than EFX_RX_BATCH 84}
130 * buffers. 85
131 */ 86static inline void efx_sync_rx_buffer(struct efx_nic *efx,
132static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) 87 struct efx_rx_buffer *rx_buf,
88 unsigned int len)
89{
90 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
91 DMA_FROM_DEVICE);
92}
93
94void efx_rx_config_page_split(struct efx_nic *efx)
95{
96 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN,
97 L1_CACHE_BYTES);
98 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
99 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
100 efx->rx_page_buf_step);
101 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
102 efx->rx_bufs_per_page;
103 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
104 efx->rx_bufs_per_page);
105}
106
107/* Check the RX page recycle ring for a page that can be reused. */
108static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
133{ 109{
134 struct efx_nic *efx = rx_queue->efx; 110 struct efx_nic *efx = rx_queue->efx;
135 struct net_device *net_dev = efx->net_dev; 111 struct page *page;
136 struct efx_rx_buffer *rx_buf; 112 struct efx_rx_page_state *state;
137 struct sk_buff *skb; 113 unsigned index;
138 int skb_len = efx->rx_buffer_len;
139 unsigned index, count;
140 114
141 for (count = 0; count < EFX_RX_BATCH; ++count) { 115 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
142 index = rx_queue->added_count & rx_queue->ptr_mask; 116 page = rx_queue->page_ring[index];
143 rx_buf = efx_rx_buffer(rx_queue, index); 117 if (page == NULL)
144 118 return NULL;
145 rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len); 119
146 if (unlikely(!skb)) 120 rx_queue->page_ring[index] = NULL;
147 return -ENOMEM; 121 /* page_remove cannot exceed page_add. */
148 122 if (rx_queue->page_remove != rx_queue->page_add)
149 /* Adjust the SKB for padding */ 123 ++rx_queue->page_remove;
150 skb_reserve(skb, NET_IP_ALIGN);
151 rx_buf->len = skb_len - NET_IP_ALIGN;
152 rx_buf->flags = 0;
153
154 rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
155 skb->data, rx_buf->len,
156 DMA_FROM_DEVICE);
157 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
158 rx_buf->dma_addr))) {
159 dev_kfree_skb_any(skb);
160 rx_buf->u.skb = NULL;
161 return -EIO;
162 }
163 124
164 ++rx_queue->added_count; 125 /* If page_count is 1 then we hold the only reference to this page. */
165 ++rx_queue->alloc_skb_count; 126 if (page_count(page) == 1) {
127 ++rx_queue->page_recycle_count;
128 return page;
129 } else {
130 state = page_address(page);
131 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
132 PAGE_SIZE << efx->rx_buffer_order,
133 DMA_FROM_DEVICE);
134 put_page(page);
135 ++rx_queue->page_recycle_failed;
166 } 136 }
167 137
168 return 0; 138 return NULL;
169} 139}
170 140
171/** 141/**
172 * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers 142 * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
173 * 143 *
174 * @rx_queue: Efx RX queue 144 * @rx_queue: Efx RX queue
175 * 145 *
176 * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, 146 * This allocates a batch of pages, maps them for DMA, and populates
177 * and populates struct efx_rx_buffers for each one. Return a negative error 147 * struct efx_rx_buffers for each one. Return a negative error code or
178 * code or 0 on success. If a single page can be split between two buffers, 148 * 0 on success. If a single page can be used for multiple buffers,
179 * then the page will either be inserted fully, or not at at all. 149 * then the page will either be inserted fully, or not at all.
180 */ 150 */
181static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) 151static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
182{ 152{
183 struct efx_nic *efx = rx_queue->efx; 153 struct efx_nic *efx = rx_queue->efx;
184 struct efx_rx_buffer *rx_buf; 154 struct efx_rx_buffer *rx_buf;
@@ -188,150 +158,140 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
188 dma_addr_t dma_addr; 158 dma_addr_t dma_addr;
189 unsigned index, count; 159 unsigned index, count;
190 160
191 /* We can split a page between two buffers */ 161 count = 0;
192 BUILD_BUG_ON(EFX_RX_BATCH & 1); 162 do {
193 163 page = efx_reuse_page(rx_queue);
194 for (count = 0; count < EFX_RX_BATCH; ++count) { 164 if (page == NULL) {
195 page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, 165 page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
196 efx->rx_buffer_order); 166 efx->rx_buffer_order);
197 if (unlikely(page == NULL)) 167 if (unlikely(page == NULL))
198 return -ENOMEM; 168 return -ENOMEM;
199 dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0, 169 dma_addr =
200 efx_rx_buf_size(efx), 170 dma_map_page(&efx->pci_dev->dev, page, 0,
201 DMA_FROM_DEVICE); 171 PAGE_SIZE << efx->rx_buffer_order,
202 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) { 172 DMA_FROM_DEVICE);
203 __free_pages(page, efx->rx_buffer_order); 173 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
204 return -EIO; 174 dma_addr))) {
175 __free_pages(page, efx->rx_buffer_order);
176 return -EIO;
177 }
178 state = page_address(page);
179 state->dma_addr = dma_addr;
180 } else {
181 state = page_address(page);
182 dma_addr = state->dma_addr;
205 } 183 }
206 state = page_address(page);
207 state->refcnt = 0;
208 state->dma_addr = dma_addr;
209 184
210 dma_addr += sizeof(struct efx_rx_page_state); 185 dma_addr += sizeof(struct efx_rx_page_state);
211 page_offset = sizeof(struct efx_rx_page_state); 186 page_offset = sizeof(struct efx_rx_page_state);
212 187
213 split: 188 do {
214 index = rx_queue->added_count & rx_queue->ptr_mask; 189 index = rx_queue->added_count & rx_queue->ptr_mask;
215 rx_buf = efx_rx_buffer(rx_queue, index); 190 rx_buf = efx_rx_buffer(rx_queue, index);
216 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 191 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
217 rx_buf->u.page = page; 192 rx_buf->page = page;
218 rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; 193 rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
219 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 194 rx_buf->len = efx->rx_dma_len;
220 rx_buf->flags = EFX_RX_BUF_PAGE; 195 rx_buf->flags = 0;
221 ++rx_queue->added_count; 196 ++rx_queue->added_count;
222 ++rx_queue->alloc_page_count;
223 ++state->refcnt;
224
225 if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
226 /* Use the second half of the page */
227 get_page(page); 197 get_page(page);
228 dma_addr += (PAGE_SIZE >> 1); 198 dma_addr += efx->rx_page_buf_step;
229 page_offset += (PAGE_SIZE >> 1); 199 page_offset += efx->rx_page_buf_step;
230 ++count; 200 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
231 goto split; 201
232 } 202 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
233 } 203 } while (++count < efx->rx_pages_per_batch);
234 204
235 return 0; 205 return 0;
236} 206}
237 207
208/* Unmap a DMA-mapped page. This function is only called for the final RX
209 * buffer in a page.
210 */
238static void efx_unmap_rx_buffer(struct efx_nic *efx, 211static void efx_unmap_rx_buffer(struct efx_nic *efx,
239 struct efx_rx_buffer *rx_buf, 212 struct efx_rx_buffer *rx_buf)
240 unsigned int used_len)
241{ 213{
242 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { 214 struct page *page = rx_buf->page;
243 struct efx_rx_page_state *state; 215
244 216 if (page) {
245 state = page_address(rx_buf->u.page); 217 struct efx_rx_page_state *state = page_address(page);
246 if (--state->refcnt == 0) { 218 dma_unmap_page(&efx->pci_dev->dev,
247 dma_unmap_page(&efx->pci_dev->dev, 219 state->dma_addr,
248 state->dma_addr, 220 PAGE_SIZE << efx->rx_buffer_order,
249 efx_rx_buf_size(efx), 221 DMA_FROM_DEVICE);
250 DMA_FROM_DEVICE);
251 } else if (used_len) {
252 dma_sync_single_for_cpu(&efx->pci_dev->dev,
253 rx_buf->dma_addr, used_len,
254 DMA_FROM_DEVICE);
255 }
256 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
257 dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
258 rx_buf->len, DMA_FROM_DEVICE);
259 } 222 }
260} 223}
261 224
262static void efx_free_rx_buffer(struct efx_nic *efx, 225static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
263 struct efx_rx_buffer *rx_buf)
264{ 226{
265 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { 227 if (rx_buf->page) {
266 __free_pages(rx_buf->u.page, efx->rx_buffer_order); 228 put_page(rx_buf->page);
267 rx_buf->u.page = NULL; 229 rx_buf->page = NULL;
268 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
269 dev_kfree_skb_any(rx_buf->u.skb);
270 rx_buf->u.skb = NULL;
271 } 230 }
272} 231}
273 232
274static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, 233/* Attempt to recycle the page if there is an RX recycle ring; the page can
275 struct efx_rx_buffer *rx_buf) 234 * only be added if this is the final RX buffer, to prevent pages being used in
235 * the descriptor ring and appearing in the recycle ring simultaneously.
236 */
237static void efx_recycle_rx_page(struct efx_channel *channel,
238 struct efx_rx_buffer *rx_buf)
276{ 239{
277 efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0); 240 struct page *page = rx_buf->page;
278 efx_free_rx_buffer(rx_queue->efx, rx_buf); 241 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
279} 242 struct efx_nic *efx = rx_queue->efx;
243 unsigned index;
280 244
281/* Attempt to resurrect the other receive buffer that used to share this page, 245 /* Only recycle the page after processing the final buffer. */
282 * which had previously been passed up to the kernel and freed. */ 246 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
283static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
284 struct efx_rx_buffer *rx_buf)
285{
286 struct efx_rx_page_state *state = page_address(rx_buf->u.page);
287 struct efx_rx_buffer *new_buf;
288 unsigned fill_level, index;
289
290 /* +1 because efx_rx_packet() incremented removed_count. +1 because
291 * we'd like to insert an additional descriptor whilst leaving
292 * EFX_RXD_HEAD_ROOM for the non-recycle path */
293 fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
294 if (unlikely(fill_level > rx_queue->max_fill)) {
295 /* We could place "state" on a list, and drain the list in
296 * efx_fast_push_rx_descriptors(). For now, this will do. */
297 return; 247 return;
298 }
299 248
300 ++state->refcnt; 249 index = rx_queue->page_add & rx_queue->page_ptr_mask;
301 get_page(rx_buf->u.page); 250 if (rx_queue->page_ring[index] == NULL) {
251 unsigned read_index = rx_queue->page_remove &
252 rx_queue->page_ptr_mask;
302 253
303 index = rx_queue->added_count & rx_queue->ptr_mask; 254 /* The next slot in the recycle ring is available, but
304 new_buf = efx_rx_buffer(rx_queue, index); 255 * increment page_remove if the read pointer currently
305 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); 256 * points here.
306 new_buf->u.page = rx_buf->u.page; 257 */
307 new_buf->len = rx_buf->len; 258 if (read_index == index)
308 new_buf->flags = EFX_RX_BUF_PAGE; 259 ++rx_queue->page_remove;
309 ++rx_queue->added_count; 260 rx_queue->page_ring[index] = page;
261 ++rx_queue->page_add;
262 return;
263 }
264 ++rx_queue->page_recycle_full;
265 efx_unmap_rx_buffer(efx, rx_buf);
266 put_page(rx_buf->page);
310} 267}
311 268
312/* Recycle the given rx buffer directly back into the rx_queue. There is 269static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
313 * always room to add this buffer, because we've just popped a buffer. */ 270 struct efx_rx_buffer *rx_buf)
314static void efx_recycle_rx_buffer(struct efx_channel *channel,
315 struct efx_rx_buffer *rx_buf)
316{ 271{
317 struct efx_nic *efx = channel->efx; 272 /* Release the page reference we hold for the buffer. */
318 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 273 if (rx_buf->page)
319 struct efx_rx_buffer *new_buf; 274 put_page(rx_buf->page);
320 unsigned index; 275
321 276 /* If this is the last buffer in a page, unmap and free it. */
322 rx_buf->flags &= EFX_RX_BUF_PAGE; 277 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
323 278 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
324 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && 279 efx_free_rx_buffer(rx_buf);
325 efx->rx_buffer_len <= EFX_RX_HALF_PAGE && 280 }
326 page_count(rx_buf->u.page) == 1) 281 rx_buf->page = NULL;
327 efx_resurrect_rx_buffer(rx_queue, rx_buf); 282}
328 283
329 index = rx_queue->added_count & rx_queue->ptr_mask; 284/* Recycle the pages that are used by buffers that have just been received. */
330 new_buf = efx_rx_buffer(rx_queue, index); 285static void efx_recycle_rx_buffers(struct efx_channel *channel,
286 struct efx_rx_buffer *rx_buf,
287 unsigned int n_frags)
288{
289 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
331 290
332 memcpy(new_buf, rx_buf, sizeof(*new_buf)); 291 do {
333 rx_buf->u.page = NULL; 292 efx_recycle_rx_page(channel, rx_buf);
334 ++rx_queue->added_count; 293 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
294 } while (--n_frags);
335} 295}
336 296
337/** 297/**
@@ -348,8 +308,8 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
348 */ 308 */
349void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) 309void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
350{ 310{
351 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 311 struct efx_nic *efx = rx_queue->efx;
352 unsigned fill_level; 312 unsigned int fill_level, batch_size;
353 int space, rc = 0; 313 int space, rc = 0;
354 314
355 /* Calculate current fill level, and exit if we don't need to fill */ 315 /* Calculate current fill level, and exit if we don't need to fill */
@@ -364,28 +324,26 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
364 rx_queue->min_fill = fill_level; 324 rx_queue->min_fill = fill_level;
365 } 325 }
366 326
327 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
367 space = rx_queue->max_fill - fill_level; 328 space = rx_queue->max_fill - fill_level;
368 EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH); 329 EFX_BUG_ON_PARANOID(space < batch_size);
369 330
370 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 331 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
371 "RX queue %d fast-filling descriptor ring from" 332 "RX queue %d fast-filling descriptor ring from"
372 " level %d to level %d using %s allocation\n", 333 " level %d to level %d\n",
373 efx_rx_queue_index(rx_queue), fill_level, 334 efx_rx_queue_index(rx_queue), fill_level,
374 rx_queue->max_fill, 335 rx_queue->max_fill);
375 channel->rx_alloc_push_pages ? "page" : "skb"); 336
376 337
377 do { 338 do {
378 if (channel->rx_alloc_push_pages) 339 rc = efx_init_rx_buffers(rx_queue);
379 rc = efx_init_rx_buffers_page(rx_queue);
380 else
381 rc = efx_init_rx_buffers_skb(rx_queue);
382 if (unlikely(rc)) { 340 if (unlikely(rc)) {
383 /* Ensure that we don't leave the rx queue empty */ 341 /* Ensure that we don't leave the rx queue empty */
384 if (rx_queue->added_count == rx_queue->removed_count) 342 if (rx_queue->added_count == rx_queue->removed_count)
385 efx_schedule_slow_fill(rx_queue); 343 efx_schedule_slow_fill(rx_queue);
386 goto out; 344 goto out;
387 } 345 }
388 } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); 346 } while ((space -= batch_size) >= batch_size);
389 347
390 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 348 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
391 "RX queue %d fast-filled descriptor ring " 349 "RX queue %d fast-filled descriptor ring "
@@ -408,7 +366,7 @@ void efx_rx_slow_fill(unsigned long context)
408 366
409static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 367static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
410 struct efx_rx_buffer *rx_buf, 368 struct efx_rx_buffer *rx_buf,
411 int len, bool *leak_packet) 369 int len)
412{ 370{
413 struct efx_nic *efx = rx_queue->efx; 371 struct efx_nic *efx = rx_queue->efx;
414 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; 372 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -428,11 +386,6 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
428 "RX event (0x%x > 0x%x+0x%x). Leaking\n", 386 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
429 efx_rx_queue_index(rx_queue), len, max_len, 387 efx_rx_queue_index(rx_queue), len, max_len,
430 efx->type->rx_buffer_padding); 388 efx->type->rx_buffer_padding);
431 /* If this buffer was skb-allocated, then the meta
432 * data at the end of the skb will be trashed. So
433 * we have no choice but to leak the fragment.
434 */
435 *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
436 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); 389 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
437 } else { 390 } else {
438 if (net_ratelimit()) 391 if (net_ratelimit())
@@ -448,212 +401,238 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
448/* Pass a received packet up through GRO. GRO can handle pages 401/* Pass a received packet up through GRO. GRO can handle pages
449 * regardless of checksum state and skbs with a good checksum. 402 * regardless of checksum state and skbs with a good checksum.
450 */ 403 */
451static void efx_rx_packet_gro(struct efx_channel *channel, 404static void
452 struct efx_rx_buffer *rx_buf, 405efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
453 const u8 *eh) 406 unsigned int n_frags, u8 *eh)
454{ 407{
455 struct napi_struct *napi = &channel->napi_str; 408 struct napi_struct *napi = &channel->napi_str;
456 gro_result_t gro_result; 409 gro_result_t gro_result;
410 struct efx_nic *efx = channel->efx;
411 struct sk_buff *skb;
457 412
458 if (rx_buf->flags & EFX_RX_BUF_PAGE) { 413 skb = napi_get_frags(napi);
459 struct efx_nic *efx = channel->efx; 414 if (unlikely(!skb)) {
460 struct page *page = rx_buf->u.page; 415 while (n_frags--) {
461 struct sk_buff *skb; 416 put_page(rx_buf->page);
417 rx_buf->page = NULL;
418 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
419 }
420 return;
421 }
462 422
463 rx_buf->u.page = NULL; 423 if (efx->net_dev->features & NETIF_F_RXHASH)
424 skb->rxhash = efx_rx_buf_hash(eh);
425 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
426 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
427
428 for (;;) {
429 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
430 rx_buf->page, rx_buf->page_offset,
431 rx_buf->len);
432 rx_buf->page = NULL;
433 skb->len += rx_buf->len;
434 if (skb_shinfo(skb)->nr_frags == n_frags)
435 break;
436
437 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
438 }
464 439
465 skb = napi_get_frags(napi); 440 skb->data_len = skb->len;
466 if (!skb) { 441 skb->truesize += n_frags * efx->rx_buffer_truesize;
467 put_page(page); 442
468 return; 443 skb_record_rx_queue(skb, channel->rx_queue.core_index);
469 } 444
445 gro_result = napi_gro_frags(napi);
446 if (gro_result != GRO_DROP)
447 channel->irq_mod_score += 2;
448}
470 449
471 if (efx->net_dev->features & NETIF_F_RXHASH) 450/* Allocate and construct an SKB around page fragments */
472 skb->rxhash = efx_rx_buf_hash(eh); 451static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
452 struct efx_rx_buffer *rx_buf,
453 unsigned int n_frags,
454 u8 *eh, int hdr_len)
455{
456 struct efx_nic *efx = channel->efx;
457 struct sk_buff *skb;
473 458
474 skb_fill_page_desc(skb, 0, page, 459 /* Allocate an SKB to store the headers */
475 efx_rx_buf_offset(efx, rx_buf), rx_buf->len); 460 skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
461 if (unlikely(skb == NULL))
462 return NULL;
476 463
477 skb->len = rx_buf->len; 464 EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
478 skb->data_len = rx_buf->len;
479 skb->truesize += rx_buf->len;
480 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
481 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
482 465
483 skb_record_rx_queue(skb, channel->rx_queue.core_index); 466 skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
467 memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
484 468
485 gro_result = napi_gro_frags(napi); 469 /* Append the remaining page(s) onto the frag list */
486 } else { 470 if (rx_buf->len > hdr_len) {
487 struct sk_buff *skb = rx_buf->u.skb; 471 rx_buf->page_offset += hdr_len;
472 rx_buf->len -= hdr_len;
488 473
489 EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED)); 474 for (;;) {
490 rx_buf->u.skb = NULL; 475 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
491 skb->ip_summed = CHECKSUM_UNNECESSARY; 476 rx_buf->page, rx_buf->page_offset,
477 rx_buf->len);
478 rx_buf->page = NULL;
479 skb->len += rx_buf->len;
480 skb->data_len += rx_buf->len;
481 if (skb_shinfo(skb)->nr_frags == n_frags)
482 break;
492 483
493 gro_result = napi_gro_receive(napi, skb); 484 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
485 }
486 } else {
487 __free_pages(rx_buf->page, efx->rx_buffer_order);
488 rx_buf->page = NULL;
489 n_frags = 0;
494 } 490 }
495 491
496 if (gro_result == GRO_NORMAL) { 492 skb->truesize += n_frags * efx->rx_buffer_truesize;
497 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 493
498 } else if (gro_result != GRO_DROP) { 494 /* Move past the ethernet header */
499 channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO; 495 skb->protocol = eth_type_trans(skb, efx->net_dev);
500 channel->irq_mod_score += 2; 496
501 } 497 return skb;
502} 498}
503 499
504void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 500void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
505 unsigned int len, u16 flags) 501 unsigned int n_frags, unsigned int len, u16 flags)
506{ 502{
507 struct efx_nic *efx = rx_queue->efx; 503 struct efx_nic *efx = rx_queue->efx;
508 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 504 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
509 struct efx_rx_buffer *rx_buf; 505 struct efx_rx_buffer *rx_buf;
510 bool leak_packet = false;
511 506
512 rx_buf = efx_rx_buffer(rx_queue, index); 507 rx_buf = efx_rx_buffer(rx_queue, index);
513 rx_buf->flags |= flags; 508 rx_buf->flags |= flags;
514 509
515 /* This allows the refill path to post another buffer. 510 /* Validate the number of fragments and completed length */
516 * EFX_RXD_HEAD_ROOM ensures that the slot we are using 511 if (n_frags == 1) {
517 * isn't overwritten yet. 512 efx_rx_packet__check_len(rx_queue, rx_buf, len);
518 */ 513 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
519 rx_queue->removed_count++; 514 unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) ||
520 515 unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) ||
521 /* Validate the length encoded in the event vs the descriptor pushed */ 516 unlikely(!efx->rx_scatter)) {
522 efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet); 517 /* If this isn't an explicit discard request, either
518 * the hardware or the driver is broken.
519 */
520 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
521 rx_buf->flags |= EFX_RX_PKT_DISCARD;
522 }
523 523
524 netif_vdbg(efx, rx_status, efx->net_dev, 524 netif_vdbg(efx, rx_status, efx->net_dev,
525 "RX queue %d received id %x at %llx+%x %s%s\n", 525 "RX queue %d received ids %x-%x len %d %s%s\n",
526 efx_rx_queue_index(rx_queue), index, 526 efx_rx_queue_index(rx_queue), index,
527 (unsigned long long)rx_buf->dma_addr, len, 527 (index + n_frags - 1) & rx_queue->ptr_mask, len,
528 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", 528 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
529 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); 529 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
530 530
531 /* Discard packet, if instructed to do so */ 531 /* Discard packet, if instructed to do so. Process the
532 * previous receive first.
533 */
532 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { 534 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
533 if (unlikely(leak_packet)) 535 efx_rx_flush_packet(channel);
534 channel->n_skbuff_leaks++; 536 put_page(rx_buf->page);
535 else 537 efx_recycle_rx_buffers(channel, rx_buf, n_frags);
536 efx_recycle_rx_buffer(channel, rx_buf); 538 return;
537
538 /* Don't hold off the previous receive */
539 rx_buf = NULL;
540 goto out;
541 } 539 }
542 540
543 /* Release and/or sync DMA mapping - assumes all RX buffers 541 if (n_frags == 1)
544 * consumed in-order per RX queue 542 rx_buf->len = len;
543
544 /* Release and/or sync the DMA mapping - assumes all RX buffers
545 * consumed in-order per RX queue.
545 */ 546 */
546 efx_unmap_rx_buffer(efx, rx_buf, len); 547 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
547 548
548 /* Prefetch nice and early so data will (hopefully) be in cache by 549 /* Prefetch nice and early so data will (hopefully) be in cache by
549 * the time we look at it. 550 * the time we look at it.
550 */ 551 */
551 prefetch(efx_rx_buf_eh(efx, rx_buf)); 552 prefetch(efx_rx_buf_va(rx_buf));
553
554 rx_buf->page_offset += efx->type->rx_buffer_hash_size;
555 rx_buf->len -= efx->type->rx_buffer_hash_size;
556
557 if (n_frags > 1) {
558 /* Release/sync DMA mapping for additional fragments.
559 * Fix length for last fragment.
560 */
561 unsigned int tail_frags = n_frags - 1;
562
563 for (;;) {
564 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
565 if (--tail_frags == 0)
566 break;
567 efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE);
568 }
569 rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE;
570 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
571 }
572
573 /* All fragments have been DMA-synced, so recycle buffers and pages. */
574 rx_buf = efx_rx_buffer(rx_queue, index);
575 efx_recycle_rx_buffers(channel, rx_buf, n_frags);
552 576
553 /* Pipeline receives so that we give time for packet headers to be 577 /* Pipeline receives so that we give time for packet headers to be
554 * prefetched into cache. 578 * prefetched into cache.
555 */ 579 */
556 rx_buf->len = len - efx->type->rx_buffer_hash_size; 580 efx_rx_flush_packet(channel);
557out: 581 channel->rx_pkt_n_frags = n_frags;
558 if (channel->rx_pkt) 582 channel->rx_pkt_index = index;
559 __efx_rx_packet(channel, channel->rx_pkt);
560 channel->rx_pkt = rx_buf;
561} 583}
562 584
563static void efx_rx_deliver(struct efx_channel *channel, 585static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
564 struct efx_rx_buffer *rx_buf) 586 struct efx_rx_buffer *rx_buf,
587 unsigned int n_frags)
565{ 588{
566 struct sk_buff *skb; 589 struct sk_buff *skb;
590 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
567 591
568 /* We now own the SKB */ 592 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
569 skb = rx_buf->u.skb; 593 if (unlikely(skb == NULL)) {
570 rx_buf->u.skb = NULL; 594 efx_free_rx_buffer(rx_buf);
595 return;
596 }
597 skb_record_rx_queue(skb, channel->rx_queue.core_index);
571 598
572 /* Set the SKB flags */ 599 /* Set the SKB flags */
573 skb_checksum_none_assert(skb); 600 skb_checksum_none_assert(skb);
574 601
575 /* Record the rx_queue */
576 skb_record_rx_queue(skb, channel->rx_queue.core_index);
577
578 /* Pass the packet up */
579 if (channel->type->receive_skb) 602 if (channel->type->receive_skb)
580 channel->type->receive_skb(channel, skb); 603 if (channel->type->receive_skb(channel, skb))
581 else 604 return;
582 netif_receive_skb(skb);
583 605
584 /* Update allocation strategy method */ 606 /* Pass the packet up */
585 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 607 netif_receive_skb(skb);
586} 608}
587 609
588/* Handle a received packet. Second half: Touches packet payload. */ 610/* Handle a received packet. Second half: Touches packet payload. */
589void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf) 611void __efx_rx_packet(struct efx_channel *channel)
590{ 612{
591 struct efx_nic *efx = channel->efx; 613 struct efx_nic *efx = channel->efx;
592 u8 *eh = efx_rx_buf_eh(efx, rx_buf); 614 struct efx_rx_buffer *rx_buf =
615 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
616 u8 *eh = efx_rx_buf_va(rx_buf);
593 617
594 /* If we're in loopback test, then pass the packet directly to the 618 /* If we're in loopback test, then pass the packet directly to the
595 * loopback layer, and free the rx_buf here 619 * loopback layer, and free the rx_buf here
596 */ 620 */
597 if (unlikely(efx->loopback_selftest)) { 621 if (unlikely(efx->loopback_selftest)) {
598 efx_loopback_rx_packet(efx, eh, rx_buf->len); 622 efx_loopback_rx_packet(efx, eh, rx_buf->len);
599 efx_free_rx_buffer(efx, rx_buf); 623 efx_free_rx_buffer(rx_buf);
600 return; 624 goto out;
601 }
602
603 if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
604 struct sk_buff *skb = rx_buf->u.skb;
605
606 prefetch(skb_shinfo(skb));
607
608 skb_reserve(skb, efx->type->rx_buffer_hash_size);
609 skb_put(skb, rx_buf->len);
610
611 if (efx->net_dev->features & NETIF_F_RXHASH)
612 skb->rxhash = efx_rx_buf_hash(eh);
613
614 /* Move past the ethernet header. rx_buf->data still points
615 * at the ethernet header */
616 skb->protocol = eth_type_trans(skb, efx->net_dev);
617
618 skb_record_rx_queue(skb, channel->rx_queue.core_index);
619 } 625 }
620 626
621 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) 627 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
622 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; 628 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
623 629
624 if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) && 630 if (!channel->type->receive_skb)
625 !channel->type->receive_skb) 631 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
626 efx_rx_packet_gro(channel, rx_buf, eh);
627 else 632 else
628 efx_rx_deliver(channel, rx_buf); 633 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
629} 634out:
630 635 channel->rx_pkt_n_frags = 0;
631void efx_rx_strategy(struct efx_channel *channel)
632{
633 enum efx_rx_alloc_method method = rx_alloc_method;
634
635 if (channel->type->receive_skb) {
636 channel->rx_alloc_push_pages = false;
637 return;
638 }
639
640 /* Only makes sense to use page based allocation if GRO is enabled */
641 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
642 method = RX_ALLOC_METHOD_SKB;
643 } else if (method == RX_ALLOC_METHOD_AUTO) {
644 /* Constrain the rx_alloc_level */
645 if (channel->rx_alloc_level < 0)
646 channel->rx_alloc_level = 0;
647 else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
648 channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
649
650 /* Decide on the allocation method */
651 method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
652 RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
653 }
654
655 /* Push the option */
656 channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
657} 636}
658 637
659int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) 638int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
@@ -683,9 +662,32 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
683 kfree(rx_queue->buffer); 662 kfree(rx_queue->buffer);
684 rx_queue->buffer = NULL; 663 rx_queue->buffer = NULL;
685 } 664 }
665
686 return rc; 666 return rc;
687} 667}
688 668
669static void efx_init_rx_recycle_ring(struct efx_nic *efx,
670 struct efx_rx_queue *rx_queue)
671{
672 unsigned int bufs_in_recycle_ring, page_ring_size;
673
674 /* Set the RX recycle ring size */
675#ifdef CONFIG_PPC64
676 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
677#else
678 if (efx->pci_dev->dev.iommu_group)
679 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
680 else
681 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
682#endif /* CONFIG_PPC64 */
683
684 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
685 efx->rx_bufs_per_page);
686 rx_queue->page_ring = kcalloc(page_ring_size,
687 sizeof(*rx_queue->page_ring), GFP_KERNEL);
688 rx_queue->page_ptr_mask = page_ring_size - 1;
689}
690
689void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 691void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
690{ 692{
691 struct efx_nic *efx = rx_queue->efx; 693 struct efx_nic *efx = rx_queue->efx;
@@ -699,10 +701,18 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
699 rx_queue->notified_count = 0; 701 rx_queue->notified_count = 0;
700 rx_queue->removed_count = 0; 702 rx_queue->removed_count = 0;
701 rx_queue->min_fill = -1U; 703 rx_queue->min_fill = -1U;
704 efx_init_rx_recycle_ring(efx, rx_queue);
705
706 rx_queue->page_remove = 0;
707 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
708 rx_queue->page_recycle_count = 0;
709 rx_queue->page_recycle_failed = 0;
710 rx_queue->page_recycle_full = 0;
702 711
703 /* Initialise limit fields */ 712 /* Initialise limit fields */
704 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; 713 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
705 max_trigger = max_fill - EFX_RX_BATCH; 714 max_trigger =
715 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
706 if (rx_refill_threshold != 0) { 716 if (rx_refill_threshold != 0) {
707 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 717 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
708 if (trigger > max_trigger) 718 if (trigger > max_trigger)
@@ -722,6 +732,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
722void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) 732void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
723{ 733{
724 int i; 734 int i;
735 struct efx_nic *efx = rx_queue->efx;
725 struct efx_rx_buffer *rx_buf; 736 struct efx_rx_buffer *rx_buf;
726 737
727 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 738 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
@@ -733,13 +744,32 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
733 del_timer_sync(&rx_queue->slow_fill); 744 del_timer_sync(&rx_queue->slow_fill);
734 efx_nic_fini_rx(rx_queue); 745 efx_nic_fini_rx(rx_queue);
735 746
736 /* Release RX buffers NB start at index 0 not current HW ptr */ 747 /* Release RX buffers from the current read ptr to the write ptr */
737 if (rx_queue->buffer) { 748 if (rx_queue->buffer) {
738 for (i = 0; i <= rx_queue->ptr_mask; i++) { 749 for (i = rx_queue->removed_count; i < rx_queue->added_count;
739 rx_buf = efx_rx_buffer(rx_queue, i); 750 i++) {
751 unsigned index = i & rx_queue->ptr_mask;
752 rx_buf = efx_rx_buffer(rx_queue, index);
740 efx_fini_rx_buffer(rx_queue, rx_buf); 753 efx_fini_rx_buffer(rx_queue, rx_buf);
741 } 754 }
742 } 755 }
756
757 /* Unmap and release the pages in the recycle ring. Remove the ring. */
758 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
759 struct page *page = rx_queue->page_ring[i];
760 struct efx_rx_page_state *state;
761
762 if (page == NULL)
763 continue;
764
765 state = page_address(page);
766 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
767 PAGE_SIZE << efx->rx_buffer_order,
768 DMA_FROM_DEVICE);
769 put_page(page);
770 }
771 kfree(rx_queue->page_ring);
772 rx_queue->page_ring = NULL;
743} 773}
744 774
745void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) 775void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
@@ -754,9 +784,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
754} 784}
755 785
756 786
757module_param(rx_alloc_method, int, 0644);
758MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
759
760module_param(rx_refill_threshold, uint, 0444); 787module_param(rx_refill_threshold, uint, 0444);
761MODULE_PARM_DESC(rx_refill_threshold, 788MODULE_PARM_DESC(rx_refill_threshold,
762 "RX descriptor ring refill threshold (%)"); 789 "RX descriptor ring refill threshold (%)");
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index ba40f67e4f05..51669244d154 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -202,7 +202,7 @@ out:
202 202
203static enum reset_type siena_map_reset_reason(enum reset_type reason) 203static enum reset_type siena_map_reset_reason(enum reset_type reason)
204{ 204{
205 return RESET_TYPE_ALL; 205 return RESET_TYPE_RECOVER_OR_ALL;
206} 206}
207 207
208static int siena_map_reset_flags(u32 *flags) 208static int siena_map_reset_flags(u32 *flags)
@@ -245,6 +245,22 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
245 return efx_mcdi_reset_port(efx); 245 return efx_mcdi_reset_port(efx);
246} 246}
247 247
248#ifdef CONFIG_EEH
249/* When a PCI device is isolated from the bus, a subsequent MMIO read is
250 * required for the kernel EEH mechanisms to notice. As the Solarflare driver
251 * was written to minimise MMIO read (for latency) then a periodic call to check
252 * the EEH status of the device is required so that device recovery can happen
253 * in a timely fashion.
254 */
255static void siena_monitor(struct efx_nic *efx)
256{
257 struct eeh_dev *eehdev =
258 of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
259
260 eeh_dev_check_failure(eehdev);
261}
262#endif
263
248static int siena_probe_nvconfig(struct efx_nic *efx) 264static int siena_probe_nvconfig(struct efx_nic *efx)
249{ 265{
250 u32 caps = 0; 266 u32 caps = 0;
@@ -398,6 +414,8 @@ static int siena_init_nic(struct efx_nic *efx)
398 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); 414 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
399 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); 415 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
400 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); 416 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
417 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_USR_BUF_SIZE,
418 EFX_RX_USR_BUF_SIZE >> 5);
401 efx_writeo(efx, &temp, FR_AZ_RX_CFG); 419 efx_writeo(efx, &temp, FR_AZ_RX_CFG);
402 420
403 /* Set hash key for IPv4 */ 421 /* Set hash key for IPv4 */
@@ -665,7 +683,11 @@ const struct efx_nic_type siena_a0_nic_type = {
665 .init = siena_init_nic, 683 .init = siena_init_nic,
666 .dimension_resources = siena_dimension_resources, 684 .dimension_resources = siena_dimension_resources,
667 .fini = efx_port_dummy_op_void, 685 .fini = efx_port_dummy_op_void,
686#ifdef CONFIG_EEH
687 .monitor = siena_monitor,
688#else
668 .monitor = NULL, 689 .monitor = NULL,
690#endif
669 .map_reset_reason = siena_map_reset_reason, 691 .map_reset_reason = siena_map_reset_reason,
670 .map_reset_flags = siena_map_reset_flags, 692 .map_reset_flags = siena_map_reset_flags,
671 .reset = siena_reset_hw, 693 .reset = siena_reset_hw,
@@ -698,6 +720,7 @@ const struct efx_nic_type siena_a0_nic_type = {
698 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), 720 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
699 .rx_buffer_hash_size = 0x10, 721 .rx_buffer_hash_size = 0x10,
700 .rx_buffer_padding = 0, 722 .rx_buffer_padding = 0,
723 .can_rx_scatter = true,
701 .max_interrupt_mode = EFX_INT_MODE_MSIX, 724 .max_interrupt_mode = EFX_INT_MODE_MSIX,
702 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy 725 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
703 * interrupt handler only supports 32 726 * interrupt handler only supports 32
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 79ad9c94a21b..4bdbaad9932d 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -213,10 +213,11 @@ static int meth_init_tx_ring(struct meth_private *priv)
213{ 213{
214 /* Init TX ring */ 214 /* Init TX ring */
215 priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, 215 priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
216 &priv->tx_ring_dma, GFP_ATOMIC); 216 &priv->tx_ring_dma,
217 GFP_ATOMIC | __GFP_ZERO);
217 if (!priv->tx_ring) 218 if (!priv->tx_ring)
218 return -ENOMEM; 219 return -ENOMEM;
219 memset(priv->tx_ring, 0, TX_RING_BUFFER_SIZE); 220
220 priv->tx_count = priv->tx_read = priv->tx_write = 0; 221 priv->tx_count = priv->tx_read = priv->tx_write = 0;
221 mace->eth.tx_ring_base = priv->tx_ring_dma; 222 mace->eth.tx_ring_base = priv->tx_ring_dma;
222 /* Now init skb save area */ 223 /* Now init skb save area */
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index efca14eaefa9..eb4aea3fe793 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1187,8 +1187,14 @@ sis900_init_rx_ring(struct net_device *net_dev)
1187 } 1187 }
1188 sis_priv->rx_skbuff[i] = skb; 1188 sis_priv->rx_skbuff[i] = skb;
1189 sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE; 1189 sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
1190 sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev, 1190 sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev,
1191 skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1191 skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1192 if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
1193 sis_priv->rx_ring[i].bufptr))) {
1194 dev_kfree_skb(skb);
1195 sis_priv->rx_skbuff[i] = NULL;
1196 break;
1197 }
1192 } 1198 }
1193 sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC); 1199 sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
1194 1200
@@ -1621,6 +1627,14 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1621 /* set the transmit buffer descriptor and enable Transmit State Machine */ 1627 /* set the transmit buffer descriptor and enable Transmit State Machine */
1622 sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev, 1628 sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev,
1623 skb->data, skb->len, PCI_DMA_TODEVICE); 1629 skb->data, skb->len, PCI_DMA_TODEVICE);
1630 if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
1631 sis_priv->tx_ring[entry].bufptr))) {
1632 dev_kfree_skb(skb);
1633 sis_priv->tx_skbuff[entry] = NULL;
1634 net_dev->stats.tx_dropped++;
1635 spin_unlock_irqrestore(&sis_priv->lock, flags);
1636 return NETDEV_TX_OK;
1637 }
1624 sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len); 1638 sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
1625 sw32(cr, TxENA | sr32(cr)); 1639 sw32(cr, TxENA | sr32(cr));
1626 1640
@@ -1824,9 +1838,15 @@ static int sis900_rx(struct net_device *net_dev)
1824refill_rx_ring: 1838refill_rx_ring:
1825 sis_priv->rx_skbuff[entry] = skb; 1839 sis_priv->rx_skbuff[entry] = skb;
1826 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; 1840 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1827 sis_priv->rx_ring[entry].bufptr = 1841 sis_priv->rx_ring[entry].bufptr =
1828 pci_map_single(sis_priv->pci_dev, skb->data, 1842 pci_map_single(sis_priv->pci_dev, skb->data,
1829 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1843 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1844 if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
1845 sis_priv->rx_ring[entry].bufptr))) {
1846 dev_kfree_skb_irq(skb);
1847 sis_priv->rx_skbuff[entry] = NULL;
1848 break;
1849 }
1830 } 1850 }
1831 sis_priv->cur_rx++; 1851 sis_priv->cur_rx++;
1832 entry = sis_priv->cur_rx % NUM_RX_DESC; 1852 entry = sis_priv->cur_rx % NUM_RX_DESC;
@@ -1841,23 +1861,26 @@ refill_rx_ring:
1841 entry = sis_priv->dirty_rx % NUM_RX_DESC; 1861 entry = sis_priv->dirty_rx % NUM_RX_DESC;
1842 1862
1843 if (sis_priv->rx_skbuff[entry] == NULL) { 1863 if (sis_priv->rx_skbuff[entry] == NULL) {
1844 if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) { 1864 skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE);
1865 if (skb == NULL) {
1845 /* not enough memory for skbuff, this makes a 1866 /* not enough memory for skbuff, this makes a
1846 * "hole" on the buffer ring, it is not clear 1867 * "hole" on the buffer ring, it is not clear
1847 * how the hardware will react to this kind 1868 * how the hardware will react to this kind
1848 * of degenerated buffer */ 1869 * of degenerated buffer */
1849 if (netif_msg_rx_err(sis_priv))
1850 printk(KERN_INFO "%s: Memory squeeze, "
1851 "deferring packet.\n",
1852 net_dev->name);
1853 net_dev->stats.rx_dropped++; 1870 net_dev->stats.rx_dropped++;
1854 break; 1871 break;
1855 } 1872 }
1856 sis_priv->rx_skbuff[entry] = skb; 1873 sis_priv->rx_skbuff[entry] = skb;
1857 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; 1874 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1858 sis_priv->rx_ring[entry].bufptr = 1875 sis_priv->rx_ring[entry].bufptr =
1859 pci_map_single(sis_priv->pci_dev, skb->data, 1876 pci_map_single(sis_priv->pci_dev, skb->data,
1860 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1877 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1878 if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
1879 sis_priv->rx_ring[entry].bufptr))) {
1880 dev_kfree_skb_irq(skb);
1881 sis_priv->rx_skbuff[entry] = NULL;
1882 break;
1883 }
1861 } 1884 }
1862 } 1885 }
1863 /* re-enable the potentially idle receive state matchine */ 1886 /* re-enable the potentially idle receive state matchine */
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 50823da9dc1e..e85c2e7e8246 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -1223,9 +1223,7 @@ static void smc_rcv(struct net_device *dev)
1223 dev->stats.multicast++; 1223 dev->stats.multicast++;
1224 1224
1225 skb = netdev_alloc_skb(dev, packet_length + 5); 1225 skb = netdev_alloc_skb(dev, packet_length + 5);
1226
1227 if ( skb == NULL ) { 1226 if ( skb == NULL ) {
1228 printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n");
1229 dev->stats.rx_dropped++; 1227 dev->stats.rx_dropped++;
1230 goto done; 1228 goto done;
1231 } 1229 }
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 591650a8de38..dfbf978315df 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -465,8 +465,6 @@ static inline void smc_rcv(struct net_device *dev)
465 */ 465 */
466 skb = netdev_alloc_skb(dev, packet_len); 466 skb = netdev_alloc_skb(dev, packet_len);
467 if (unlikely(skb == NULL)) { 467 if (unlikely(skb == NULL)) {
468 printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
469 dev->name);
470 SMC_WAIT_MMU_BUSY(lp); 468 SMC_WAIT_MMU_BUSY(lp);
471 SMC_SET_MMU_CMD(lp, MC_RELEASE); 469 SMC_SET_MMU_CMD(lp, MC_RELEASE);
472 dev->stats.rx_dropped++; 470 dev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index da5cc9a3b34c..48e2b99bec51 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2115,7 +2115,7 @@ static int smsc911x_init(struct net_device *dev)
2115 spin_lock_init(&pdata->dev_lock); 2115 spin_lock_init(&pdata->dev_lock);
2116 spin_lock_init(&pdata->mac_lock); 2116 spin_lock_init(&pdata->mac_lock);
2117 2117
2118 if (pdata->ioaddr == 0) { 2118 if (pdata->ioaddr == NULL) {
2119 SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000"); 2119 SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000");
2120 return -ENODEV; 2120 return -ENODEV;
2121 } 2121 }
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index d457fa2d7509..ffa5c4ad1210 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -848,10 +848,8 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
848 BUG_ON(pd->rx_buffers[index].skb); 848 BUG_ON(pd->rx_buffers[index].skb);
849 BUG_ON(pd->rx_buffers[index].mapping); 849 BUG_ON(pd->rx_buffers[index].mapping);
850 850
851 if (unlikely(!skb)) { 851 if (unlikely(!skb))
852 smsc_warn(RX_ERR, "Failed to allocate new skb!");
853 return -ENOMEM; 852 return -ENOMEM;
854 }
855 853
856 mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb), 854 mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb),
857 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 855 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index c0ea838c78d1..f695a50bac47 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -5,6 +5,7 @@ config STMMAC_ETH
5 select MII 5 select MII
6 select PHYLIB 6 select PHYLIB
7 select CRC32 7 select CRC32
8 select PTP_1588_CLOCK
8 ---help--- 9 ---help---
9 This is the driver for the Ethernet IPs are built around a 10 This is the driver for the Ethernet IPs are built around a
10 Synopsys IP Core and only tested on the STMicroelectronics 11 Synopsys IP Core and only tested on the STMicroelectronics
@@ -54,22 +55,4 @@ config STMMAC_DA
54 By default, the DMA arbitration scheme is based on Round-robin 55 By default, the DMA arbitration scheme is based on Round-robin
55 (rx:tx priority is 1:1). 56 (rx:tx priority is 1:1).
56 57
57choice
58 prompt "Select the DMA TX/RX descriptor operating modes"
59 depends on STMMAC_ETH
60 ---help---
61 This driver supports DMA descriptor to operate both in dual buffer
62 (RING) and linked-list(CHAINED) mode. In RING mode each descriptor
63 points to two data buffer pointers whereas in CHAINED mode they
64 points to only one data buffer pointer.
65
66config STMMAC_RING
67 bool "Enable Descriptor Ring Mode"
68
69config STMMAC_CHAINED
70 bool "Enable Descriptor Chained Mode"
71
72endchoice
73
74
75endif 58endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index c8e8ea60ac19..356a9dd32be7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,9 +1,7 @@
1obj-$(CONFIG_STMMAC_ETH) += stmmac.o 1obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
3stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
4stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o 2stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
5stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o 3stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
6stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ 4stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
7 dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ 5 chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
8 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ 6 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
9 mmc_core.o $(stmmac-y) 7 mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 0668659803ed..d234ab540b29 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -28,9 +28,9 @@
28 28
29#include "stmmac.h" 29#include "stmmac.h"
30 30
31unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) 31static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
32{ 32{
33 struct stmmac_priv *priv = (struct stmmac_priv *) p; 33 struct stmmac_priv *priv = (struct stmmac_priv *)p;
34 unsigned int txsize = priv->dma_tx_size; 34 unsigned int txsize = priv->dma_tx_size;
35 unsigned int entry = priv->cur_tx % txsize; 35 unsigned int entry = priv->cur_tx % txsize;
36 struct dma_desc *desc = priv->dma_tx + entry; 36 struct dma_desc *desc = priv->dma_tx + entry;
@@ -47,7 +47,8 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
47 47
48 desc->des2 = dma_map_single(priv->device, skb->data, 48 desc->des2 = dma_map_single(priv->device, skb->data,
49 bmax, DMA_TO_DEVICE); 49 bmax, DMA_TO_DEVICE);
50 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum); 50 priv->tx_skbuff_dma[entry] = desc->des2;
51 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
51 52
52 while (len != 0) { 53 while (len != 0) {
53 entry = (++priv->cur_tx) % txsize; 54 entry = (++priv->cur_tx) % txsize;
@@ -57,8 +58,9 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
57 desc->des2 = dma_map_single(priv->device, 58 desc->des2 = dma_map_single(priv->device,
58 (skb->data + bmax * i), 59 (skb->data + bmax * i),
59 bmax, DMA_TO_DEVICE); 60 bmax, DMA_TO_DEVICE);
60 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, 61 priv->tx_skbuff_dma[entry] = desc->des2;
61 csum); 62 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
63 STMMAC_CHAIN_MODE);
62 priv->hw->desc->set_tx_owner(desc); 64 priv->hw->desc->set_tx_owner(desc);
63 priv->tx_skbuff[entry] = NULL; 65 priv->tx_skbuff[entry] = NULL;
64 len -= bmax; 66 len -= bmax;
@@ -67,8 +69,9 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
67 desc->des2 = dma_map_single(priv->device, 69 desc->des2 = dma_map_single(priv->device,
68 (skb->data + bmax * i), len, 70 (skb->data + bmax * i), len,
69 DMA_TO_DEVICE); 71 DMA_TO_DEVICE);
70 priv->hw->desc->prepare_tx_desc(desc, 0, len, 72 priv->tx_skbuff_dma[entry] = desc->des2;
71 csum); 73 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
74 STMMAC_CHAIN_MODE);
72 priv->hw->desc->set_tx_owner(desc); 75 priv->hw->desc->set_tx_owner(desc);
73 priv->tx_skbuff[entry] = NULL; 76 priv->tx_skbuff[entry] = NULL;
74 len = 0; 77 len = 0;
@@ -89,49 +92,70 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
89 return ret; 92 return ret;
90} 93}
91 94
92static void stmmac_refill_desc3(int bfsize, struct dma_desc *p) 95static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr,
93{ 96 unsigned int size, unsigned int extend_desc)
94}
95
96static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p)
97{
98}
99
100static void stmmac_clean_desc3(struct dma_desc *p)
101{
102}
103
104static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
105 unsigned int size)
106{ 97{
107 /* 98 /*
108 * In chained mode the des3 points to the next element in the ring. 99 * In chained mode the des3 points to the next element in the ring.
109 * The latest element has to point to the head. 100 * The latest element has to point to the head.
110 */ 101 */
111 int i; 102 int i;
112 struct dma_desc *p = des;
113 dma_addr_t dma_phy = phy_addr; 103 dma_addr_t dma_phy = phy_addr;
114 104
115 for (i = 0; i < (size - 1); i++) { 105 if (extend_desc) {
116 dma_phy += sizeof(struct dma_desc); 106 struct dma_extended_desc *p = (struct dma_extended_desc *)des;
117 p->des3 = (unsigned int)dma_phy; 107 for (i = 0; i < (size - 1); i++) {
118 p++; 108 dma_phy += sizeof(struct dma_extended_desc);
109 p->basic.des3 = (unsigned int)dma_phy;
110 p++;
111 }
112 p->basic.des3 = (unsigned int)phy_addr;
113
114 } else {
115 struct dma_desc *p = (struct dma_desc *)des;
116 for (i = 0; i < (size - 1); i++) {
117 dma_phy += sizeof(struct dma_desc);
118 p->des3 = (unsigned int)dma_phy;
119 p++;
120 }
121 p->des3 = (unsigned int)phy_addr;
119 } 122 }
120 p->des3 = (unsigned int)phy_addr;
121} 123}
122 124
123static int stmmac_set_16kib_bfsize(int mtu) 125static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
126{
127 struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
128
129 if (priv->hwts_rx_en && !priv->extend_desc)
130 /* NOTE: Device will overwrite des3 with timestamp value if
131 * 1588-2002 time stamping is enabled, hence reinitialize it
132 * to keep explicit chaining in the descriptor.
133 */
134 p->des3 = (unsigned int)(priv->dma_rx_phy +
135 (((priv->dirty_rx) + 1) %
136 priv->dma_rx_size) *
137 sizeof(struct dma_desc));
138}
139
140static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
124{ 141{
125 /* Not supported */ 142 struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
126 return 0; 143
144 if (priv->hw->desc->get_tx_ls(p) && !priv->extend_desc)
145 /* NOTE: Device will overwrite des3 with timestamp value if
146 * 1588-2002 time stamping is enabled, hence reinitialize it
147 * to keep explicit chaining in the descriptor.
148 */
149 p->des3 = (unsigned int)(priv->dma_tx_phy +
150 (((priv->dirty_tx + 1) %
151 priv->dma_tx_size) *
152 sizeof(struct dma_desc)));
127} 153}
128 154
129const struct stmmac_ring_mode_ops ring_mode_ops = { 155const struct stmmac_chain_mode_ops chain_mode_ops = {
156 .init = stmmac_init_dma_chain,
130 .is_jumbo_frm = stmmac_is_jumbo_frm, 157 .is_jumbo_frm = stmmac_is_jumbo_frm,
131 .jumbo_frm = stmmac_jumbo_frm, 158 .jumbo_frm = stmmac_jumbo_frm,
132 .refill_desc3 = stmmac_refill_desc3, 159 .refill_desc3 = stmmac_refill_desc3,
133 .init_desc3 = stmmac_init_desc3,
134 .init_dma_chain = stmmac_init_dma_chain,
135 .clean_desc3 = stmmac_clean_desc3, 160 .clean_desc3 = stmmac_clean_desc3,
136 .set_16kib_bfsize = stmmac_set_16kib_bfsize,
137}; 161};
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 186d14806122..7788fbe44f0a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -117,6 +117,36 @@ struct stmmac_extra_stats {
117 unsigned long irq_rx_path_in_lpi_mode_n; 117 unsigned long irq_rx_path_in_lpi_mode_n;
118 unsigned long irq_rx_path_exit_lpi_mode_n; 118 unsigned long irq_rx_path_exit_lpi_mode_n;
119 unsigned long phy_eee_wakeup_error_n; 119 unsigned long phy_eee_wakeup_error_n;
120 /* Extended RDES status */
121 unsigned long ip_hdr_err;
122 unsigned long ip_payload_err;
123 unsigned long ip_csum_bypassed;
124 unsigned long ipv4_pkt_rcvd;
125 unsigned long ipv6_pkt_rcvd;
126 unsigned long rx_msg_type_ext_no_ptp;
127 unsigned long rx_msg_type_sync;
128 unsigned long rx_msg_type_follow_up;
129 unsigned long rx_msg_type_delay_req;
130 unsigned long rx_msg_type_delay_resp;
131 unsigned long rx_msg_type_pdelay_req;
132 unsigned long rx_msg_type_pdelay_resp;
133 unsigned long rx_msg_type_pdelay_follow_up;
134 unsigned long ptp_frame_type;
135 unsigned long ptp_ver;
136 unsigned long timestamp_dropped;
137 unsigned long av_pkt_rcvd;
138 unsigned long av_tagged_pkt_rcvd;
139 unsigned long vlan_tag_priority_val;
140 unsigned long l3_filter_match;
141 unsigned long l4_filter_match;
142 unsigned long l3_l4_filter_no_match;
143 /* PCS */
144 unsigned long irq_pcs_ane_n;
145 unsigned long irq_pcs_link_n;
146 unsigned long irq_rgmii_n;
147 unsigned long pcs_link;
148 unsigned long pcs_duplex;
149 unsigned long pcs_speed;
120}; 150};
121 151
122/* CSR Frequency Access Defines*/ 152/* CSR Frequency Access Defines*/
@@ -138,37 +168,43 @@ struct stmmac_extra_stats {
138#define FLOW_TX 2 168#define FLOW_TX 2
139#define FLOW_AUTO (FLOW_TX | FLOW_RX) 169#define FLOW_AUTO (FLOW_TX | FLOW_RX)
140 170
141#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */ 171/* PCS defines */
172#define STMMAC_PCS_RGMII (1 << 0)
173#define STMMAC_PCS_SGMII (1 << 1)
174#define STMMAC_PCS_TBI (1 << 2)
175#define STMMAC_PCS_RTBI (1 << 3)
176
177#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
142 178
143/* DAM HW feature register fields */ 179/* DAM HW feature register fields */
144#define DMA_HW_FEAT_MIISEL 0x00000001 /* 10/100 Mbps Support */ 180#define DMA_HW_FEAT_MIISEL 0x00000001 /* 10/100 Mbps Support */
145#define DMA_HW_FEAT_GMIISEL 0x00000002 /* 1000 Mbps Support */ 181#define DMA_HW_FEAT_GMIISEL 0x00000002 /* 1000 Mbps Support */
146#define DMA_HW_FEAT_HDSEL 0x00000004 /* Half-Duplex Support */ 182#define DMA_HW_FEAT_HDSEL 0x00000004 /* Half-Duplex Support */
147#define DMA_HW_FEAT_EXTHASHEN 0x00000008 /* Expanded DA Hash Filter */ 183#define DMA_HW_FEAT_EXTHASHEN 0x00000008 /* Expanded DA Hash Filter */
148#define DMA_HW_FEAT_HASHSEL 0x00000010 /* HASH Filter */ 184#define DMA_HW_FEAT_HASHSEL 0x00000010 /* HASH Filter */
149#define DMA_HW_FEAT_ADDMACADRSEL 0x00000020 /* Multiple MAC Addr Reg */ 185#define DMA_HW_FEAT_ADDMAC 0x00000020 /* Multiple MAC Addr Reg */
150#define DMA_HW_FEAT_PCSSEL 0x00000040 /* PCS registers */ 186#define DMA_HW_FEAT_PCSSEL 0x00000040 /* PCS registers */
151#define DMA_HW_FEAT_L3L4FLTREN 0x00000080 /* Layer 3 & Layer 4 Feature */ 187#define DMA_HW_FEAT_L3L4FLTREN 0x00000080 /* Layer 3 & Layer 4 Feature */
152#define DMA_HW_FEAT_SMASEL 0x00000100 /* SMA(MDIO) Interface */ 188#define DMA_HW_FEAT_SMASEL 0x00000100 /* SMA(MDIO) Interface */
153#define DMA_HW_FEAT_RWKSEL 0x00000200 /* PMT Remote Wakeup */ 189#define DMA_HW_FEAT_RWKSEL 0x00000200 /* PMT Remote Wakeup */
154#define DMA_HW_FEAT_MGKSEL 0x00000400 /* PMT Magic Packet */ 190#define DMA_HW_FEAT_MGKSEL 0x00000400 /* PMT Magic Packet */
155#define DMA_HW_FEAT_MMCSEL 0x00000800 /* RMON Module */ 191#define DMA_HW_FEAT_MMCSEL 0x00000800 /* RMON Module */
156#define DMA_HW_FEAT_TSVER1SEL 0x00001000 /* Only IEEE 1588-2002 Timestamp */ 192#define DMA_HW_FEAT_TSVER1SEL 0x00001000 /* Only IEEE 1588-2002 */
157#define DMA_HW_FEAT_TSVER2SEL 0x00002000 /* IEEE 1588-2008 Adv Timestamp */ 193#define DMA_HW_FEAT_TSVER2SEL 0x00002000 /* IEEE 1588-2008 PTPv2 */
158#define DMA_HW_FEAT_EEESEL 0x00004000 /* Energy Efficient Ethernet */ 194#define DMA_HW_FEAT_EEESEL 0x00004000 /* Energy Efficient Ethernet */
159#define DMA_HW_FEAT_AVSEL 0x00008000 /* AV Feature */ 195#define DMA_HW_FEAT_AVSEL 0x00008000 /* AV Feature */
160#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* Checksum Offload in Tx */ 196#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* Checksum Offload in Tx */
161#define DMA_HW_FEAT_RXTYP1COE 0x00020000 /* IP csum Offload(Type 1) in Rx */ 197#define DMA_HW_FEAT_RXTYP1COE 0x00020000 /* IP COE (Type 1) in Rx */
162#define DMA_HW_FEAT_RXTYP2COE 0x00040000 /* IP csum Offload(Type 2) in Rx */ 198#define DMA_HW_FEAT_RXTYP2COE 0x00040000 /* IP COE (Type 2) in Rx */
163#define DMA_HW_FEAT_RXFIFOSIZE 0x00080000 /* Rx FIFO > 2048 Bytes */ 199#define DMA_HW_FEAT_RXFIFOSIZE 0x00080000 /* Rx FIFO > 2048 Bytes */
164#define DMA_HW_FEAT_RXCHCNT 0x00300000 /* No. of additional Rx Channels */ 200#define DMA_HW_FEAT_RXCHCNT 0x00300000 /* No. additional Rx Channels */
165#define DMA_HW_FEAT_TXCHCNT 0x00c00000 /* No. of additional Tx Channels */ 201#define DMA_HW_FEAT_TXCHCNT 0x00c00000 /* No. additional Tx Channels */
166#define DMA_HW_FEAT_ENHDESSEL 0x01000000 /* Alternate (Enhanced Descriptor) */ 202#define DMA_HW_FEAT_ENHDESSEL 0x01000000 /* Alternate Descriptor */
167#define DMA_HW_FEAT_INTTSEN 0x02000000 /* Timestamping with Internal 203/* Timestamping with Internal System Time */
168 System Time */ 204#define DMA_HW_FEAT_INTTSEN 0x02000000
169#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */ 205#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */
170#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN Insertion */ 206#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN */
171#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY interface */ 207#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */
172#define DEFAULT_DMA_PBL 8 208#define DEFAULT_DMA_PBL 8
173 209
174/* Max/Min RI Watchdog Timer count value */ 210/* Max/Min RI Watchdog Timer count value */
@@ -180,7 +216,8 @@ struct stmmac_extra_stats {
180#define STMMAC_TX_MAX_FRAMES 256 216#define STMMAC_TX_MAX_FRAMES 256
181#define STMMAC_TX_FRAMES 64 217#define STMMAC_TX_FRAMES 64
182 218
183enum rx_frame_status { /* IPC status */ 219/* Rx IPC status */
220enum rx_frame_status {
184 good_frame = 0, 221 good_frame = 0,
185 discard_frame = 1, 222 discard_frame = 1,
186 csum_none = 2, 223 csum_none = 2,
@@ -194,17 +231,25 @@ enum dma_irq_status {
194 handle_tx = 0x8, 231 handle_tx = 0x8,
195}; 232};
196 233
197enum core_specific_irq_mask { 234#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1)
198 core_mmc_tx_irq = 1, 235#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2)
199 core_mmc_rx_irq = 2, 236#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3)
200 core_mmc_rx_csum_offload_irq = 4, 237#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4)
201 core_irq_receive_pmt_irq = 8, 238
202 core_irq_tx_path_in_lpi_mode = 16, 239#define CORE_PCS_ANE_COMPLETE (1 << 5)
203 core_irq_tx_path_exit_lpi_mode = 32, 240#define CORE_PCS_LINK_STATUS (1 << 6)
204 core_irq_rx_path_in_lpi_mode = 64, 241#define CORE_RGMII_IRQ (1 << 7)
205 core_irq_rx_path_exit_lpi_mode = 128, 242
243struct rgmii_adv {
244 unsigned int pause;
245 unsigned int duplex;
246 unsigned int lp_pause;
247 unsigned int lp_duplex;
206}; 248};
207 249
250#define STMMAC_PCS_PAUSE 1
251#define STMMAC_PCS_ASYM_PAUSE 2
252
208/* DMA HW capabilities */ 253/* DMA HW capabilities */
209struct dma_features { 254struct dma_features {
210 unsigned int mbps_10_100; 255 unsigned int mbps_10_100;
@@ -217,9 +262,9 @@ struct dma_features {
217 unsigned int pmt_remote_wake_up; 262 unsigned int pmt_remote_wake_up;
218 unsigned int pmt_magic_frame; 263 unsigned int pmt_magic_frame;
219 unsigned int rmon; 264 unsigned int rmon;
220 /* IEEE 1588-2002*/ 265 /* IEEE 1588-2002 */
221 unsigned int time_stamp; 266 unsigned int time_stamp;
222 /* IEEE 1588-2008*/ 267 /* IEEE 1588-2008 */
223 unsigned int atime_stamp; 268 unsigned int atime_stamp;
224 /* 802.3az - Energy-Efficient Ethernet (EEE) */ 269 /* 802.3az - Energy-Efficient Ethernet (EEE) */
225 unsigned int eee; 270 unsigned int eee;
@@ -232,7 +277,7 @@ struct dma_features {
232 /* TX and RX number of channels */ 277 /* TX and RX number of channels */
233 unsigned int number_rx_channel; 278 unsigned int number_rx_channel;
234 unsigned int number_tx_channel; 279 unsigned int number_tx_channel;
235 /* Alternate (enhanced) DESC mode*/ 280 /* Alternate (enhanced) DESC mode */
236 unsigned int enh_desc; 281 unsigned int enh_desc;
237}; 282};
238 283
@@ -255,23 +300,26 @@ struct dma_features {
255#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8 300#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8
256#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0 301#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0
257 302
303#define STMMAC_CHAIN_MODE 0x1
304#define STMMAC_RING_MODE 0x2
305
258struct stmmac_desc_ops { 306struct stmmac_desc_ops {
259 /* DMA RX descriptor ring initialization */ 307 /* DMA RX descriptor ring initialization */
260 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size, 308 void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode,
261 int disable_rx_ic); 309 int end);
262 /* DMA TX descriptor ring initialization */ 310 /* DMA TX descriptor ring initialization */
263 void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size); 311 void (*init_tx_desc) (struct dma_desc *p, int mode, int end);
264 312
265 /* Invoked by the xmit function to prepare the tx descriptor */ 313 /* Invoked by the xmit function to prepare the tx descriptor */
266 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, 314 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
267 int csum_flag); 315 int csum_flag, int mode);
268 /* Set/get the owner of the descriptor */ 316 /* Set/get the owner of the descriptor */
269 void (*set_tx_owner) (struct dma_desc *p); 317 void (*set_tx_owner) (struct dma_desc *p);
270 int (*get_tx_owner) (struct dma_desc *p); 318 int (*get_tx_owner) (struct dma_desc *p);
271 /* Invoked by the xmit function to close the tx descriptor */ 319 /* Invoked by the xmit function to close the tx descriptor */
272 void (*close_tx_desc) (struct dma_desc *p); 320 void (*close_tx_desc) (struct dma_desc *p);
273 /* Clean the tx descriptor as soon as the tx irq is received */ 321 /* Clean the tx descriptor as soon as the tx irq is received */
274 void (*release_tx_desc) (struct dma_desc *p); 322 void (*release_tx_desc) (struct dma_desc *p, int mode);
275 /* Clear interrupt on tx frame completion. When this bit is 323 /* Clear interrupt on tx frame completion. When this bit is
276 * set an interrupt happens as soon as the frame is transmitted */ 324 * set an interrupt happens as soon as the frame is transmitted */
277 void (*clear_tx_ic) (struct dma_desc *p); 325 void (*clear_tx_ic) (struct dma_desc *p);
@@ -290,12 +338,22 @@ struct stmmac_desc_ops {
290 /* Return the reception status looking at the RDES1 */ 338 /* Return the reception status looking at the RDES1 */
291 int (*rx_status) (void *data, struct stmmac_extra_stats *x, 339 int (*rx_status) (void *data, struct stmmac_extra_stats *x,
292 struct dma_desc *p); 340 struct dma_desc *p);
341 void (*rx_extended_status) (void *data, struct stmmac_extra_stats *x,
342 struct dma_extended_desc *p);
343 /* Set tx timestamp enable bit */
344 void (*enable_tx_timestamp) (struct dma_desc *p);
345 /* get tx timestamp status */
346 int (*get_tx_timestamp_status) (struct dma_desc *p);
347 /* get timestamp value */
348 u64(*get_timestamp) (void *desc, u32 ats);
349 /* get rx timestamp status */
350 int (*get_rx_timestamp_status) (void *desc, u32 ats);
293}; 351};
294 352
295struct stmmac_dma_ops { 353struct stmmac_dma_ops {
296 /* DMA core initialization */ 354 /* DMA core initialization */
297 int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb, 355 int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb,
298 int burst_len, u32 dma_tx, u32 dma_rx); 356 int burst_len, u32 dma_tx, u32 dma_rx, int atds);
299 /* Dump DMA registers */ 357 /* Dump DMA registers */
300 void (*dump_regs) (void __iomem *ioaddr); 358 void (*dump_regs) (void __iomem *ioaddr);
301 /* Set tx/rx threshold in the csr6 register 359 /* Set tx/rx threshold in the csr6 register
@@ -321,13 +379,14 @@ struct stmmac_dma_ops {
321 379
322struct stmmac_ops { 380struct stmmac_ops {
323 /* MAC core initialization */ 381 /* MAC core initialization */
324 void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned; 382 void (*core_init) (void __iomem *ioaddr);
325 /* Enable and verify that the IPC module is supported */ 383 /* Enable and verify that the IPC module is supported */
326 int (*rx_ipc) (void __iomem *ioaddr); 384 int (*rx_ipc) (void __iomem *ioaddr);
327 /* Dump MAC registers */ 385 /* Dump MAC registers */
328 void (*dump_regs) (void __iomem *ioaddr); 386 void (*dump_regs) (void __iomem *ioaddr);
329 /* Handle extra events on specific interrupts hw dependent */ 387 /* Handle extra events on specific interrupts hw dependent */
330 int (*host_irq_status) (void __iomem *ioaddr); 388 int (*host_irq_status) (void __iomem *ioaddr,
389 struct stmmac_extra_stats *x);
331 /* Multicast filter setting */ 390 /* Multicast filter setting */
332 void (*set_filter) (struct net_device *dev, int id); 391 void (*set_filter) (struct net_device *dev, int id);
333 /* Flow control setting */ 392 /* Flow control setting */
@@ -344,6 +403,18 @@ struct stmmac_ops {
344 void (*reset_eee_mode) (void __iomem *ioaddr); 403 void (*reset_eee_mode) (void __iomem *ioaddr);
345 void (*set_eee_timer) (void __iomem *ioaddr, int ls, int tw); 404 void (*set_eee_timer) (void __iomem *ioaddr, int ls, int tw);
346 void (*set_eee_pls) (void __iomem *ioaddr, int link); 405 void (*set_eee_pls) (void __iomem *ioaddr, int link);
406 void (*ctrl_ane) (void __iomem *ioaddr, bool restart);
407 void (*get_adv) (void __iomem *ioaddr, struct rgmii_adv *adv);
408};
409
410struct stmmac_hwtimestamp {
411 void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
412 void (*config_sub_second_increment) (void __iomem *ioaddr);
413 int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
414 int (*config_addend) (void __iomem *ioaddr, u32 addend);
415 int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec,
416 int add_sub);
417 u64(*get_systime) (void __iomem *ioaddr);
347}; 418};
348 419
349struct mac_link { 420struct mac_link {
@@ -360,19 +431,28 @@ struct mii_regs {
360struct stmmac_ring_mode_ops { 431struct stmmac_ring_mode_ops {
361 unsigned int (*is_jumbo_frm) (int len, int ehn_desc); 432 unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
362 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); 433 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
363 void (*refill_desc3) (int bfsize, struct dma_desc *p); 434 void (*refill_desc3) (void *priv, struct dma_desc *p);
364 void (*init_desc3) (int des3_as_data_buf, struct dma_desc *p); 435 void (*init_desc3) (struct dma_desc *p);
365 void (*init_dma_chain) (struct dma_desc *des, dma_addr_t phy_addr, 436 void (*clean_desc3) (void *priv, struct dma_desc *p);
366 unsigned int size);
367 void (*clean_desc3) (struct dma_desc *p);
368 int (*set_16kib_bfsize) (int mtu); 437 int (*set_16kib_bfsize) (int mtu);
369}; 438};
370 439
440struct stmmac_chain_mode_ops {
441 void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
442 unsigned int extend_desc);
443 unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
444 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
445 void (*refill_desc3) (void *priv, struct dma_desc *p);
446 void (*clean_desc3) (void *priv, struct dma_desc *p);
447};
448
371struct mac_device_info { 449struct mac_device_info {
372 const struct stmmac_ops *mac; 450 const struct stmmac_ops *mac;
373 const struct stmmac_desc_ops *desc; 451 const struct stmmac_desc_ops *desc;
374 const struct stmmac_dma_ops *dma; 452 const struct stmmac_dma_ops *dma;
375 const struct stmmac_ring_mode_ops *ring; 453 const struct stmmac_ring_mode_ops *ring;
454 const struct stmmac_chain_mode_ops *chain;
455 const struct stmmac_hwtimestamp *ptp;
376 struct mii_regs mii; /* MII register Addresses */ 456 struct mii_regs mii; /* MII register Addresses */
377 struct mac_link link; 457 struct mac_link link;
378 unsigned int synopsys_uid; 458 unsigned int synopsys_uid;
@@ -390,5 +470,6 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
390 470
391extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); 471extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
392extern const struct stmmac_ring_mode_ops ring_mode_ops; 472extern const struct stmmac_ring_mode_ops ring_mode_ops;
473extern const struct stmmac_chain_mode_ops chain_mode_ops;
393 474
394#endif /* __COMMON_H__ */ 475#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 223adf95fd03..ad3996038018 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -24,6 +24,7 @@
24#ifndef __DESCS_H__ 24#ifndef __DESCS_H__
25#define __DESCS_H__ 25#define __DESCS_H__
26 26
27/* Basic descriptor structure for normal and alternate descriptors */
27struct dma_desc { 28struct dma_desc {
28 /* Receive descriptor */ 29 /* Receive descriptor */
29 union { 30 union {
@@ -60,7 +61,7 @@ struct dma_desc {
60 } rx; 61 } rx;
61 struct { 62 struct {
62 /* RDES0 */ 63 /* RDES0 */
63 u32 payload_csum_error:1; 64 u32 rx_mac_addr:1;
64 u32 crc_error:1; 65 u32 crc_error:1;
65 u32 dribbling:1; 66 u32 dribbling:1;
66 u32 error_gmii:1; 67 u32 error_gmii:1;
@@ -162,13 +163,57 @@ struct dma_desc {
162 unsigned int des3; 163 unsigned int des3;
163}; 164};
164 165
166/* Extended descriptor structure (supported by new SYNP GMAC generations) */
167struct dma_extended_desc {
168 struct dma_desc basic;
169 union {
170 struct {
171 u32 ip_payload_type:3;
172 u32 ip_hdr_err:1;
173 u32 ip_payload_err:1;
174 u32 ip_csum_bypassed:1;
175 u32 ipv4_pkt_rcvd:1;
176 u32 ipv6_pkt_rcvd:1;
177 u32 msg_type:4;
178 u32 ptp_frame_type:1;
179 u32 ptp_ver:1;
180 u32 timestamp_dropped:1;
181 u32 reserved:1;
182 u32 av_pkt_rcvd:1;
183 u32 av_tagged_pkt_rcvd:1;
184 u32 vlan_tag_priority_val:3;
185 u32 reserved3:3;
186 u32 l3_filter_match:1;
187 u32 l4_filter_match:1;
188 u32 l3_l4_filter_no_match:2;
189 u32 reserved4:4;
190 } erx;
191 struct {
192 u32 reserved;
193 } etx;
194 } des4;
195 unsigned int des5; /* Reserved */
196 unsigned int des6; /* Tx/Rx Timestamp Low */
197 unsigned int des7; /* Tx/Rx Timestamp High */
198};
199
165/* Transmit checksum insertion control */ 200/* Transmit checksum insertion control */
166enum tdes_csum_insertion { 201enum tdes_csum_insertion {
167 cic_disabled = 0, /* Checksum Insertion Control */ 202 cic_disabled = 0, /* Checksum Insertion Control */
168 cic_only_ip = 1, /* Only IP header */ 203 cic_only_ip = 1, /* Only IP header */
169 cic_no_pseudoheader = 2, /* IP header but pseudoheader 204 /* IP header but pseudoheader is not calculated */
170 * is not calculated */ 205 cic_no_pseudoheader = 2,
171 cic_full = 3, /* IP header and pseudoheader */ 206 cic_full = 3, /* IP header and pseudoheader */
172}; 207};
173 208
209/* Extended RDES4 definitions */
210#define RDES_EXT_NO_PTP 0
211#define RDES_EXT_SYNC 0x1
212#define RDES_EXT_FOLLOW_UP 0x2
213#define RDES_EXT_DELAY_REQ 0x3
214#define RDES_EXT_DELAY_RESP 0x4
215#define RDES_EXT_PDELAY_REQ 0x5
216#define RDES_EXT_PDELAY_RESP 0x6
217#define RDES_EXT_PDELAY_FOLLOW_UP 0x7
218
174#endif /* __DESCS_H__ */ 219#endif /* __DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 7ee9499a6e38..6f2cc78c5cf5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -30,26 +30,28 @@
30#ifndef __DESC_COM_H__ 30#ifndef __DESC_COM_H__
31#define __DESC_COM_H__ 31#define __DESC_COM_H__
32 32
33#if defined(CONFIG_STMMAC_RING) 33/* Specific functions used for Ring mode */
34static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) 34
35/* Enhanced descriptors */
36static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
35{ 37{
36 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1; 38 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
37 if (end) 39 if (end)
38 p->des01.erx.end_ring = 1; 40 p->des01.erx.end_ring = 1;
39} 41}
40 42
41static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end) 43static inline void ehn_desc_tx_set_on_ring(struct dma_desc *p, int end)
42{ 44{
43 if (end) 45 if (end)
44 p->des01.etx.end_ring = 1; 46 p->des01.etx.end_ring = 1;
45} 47}
46 48
47static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter) 49static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
48{ 50{
49 p->des01.etx.end_ring = ter; 51 p->des01.etx.end_ring = ter;
50} 52}
51 53
52static inline void enh_set_tx_desc_len(struct dma_desc *p, int len) 54static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
53{ 55{
54 if (unlikely(len > BUF_SIZE_4KiB)) { 56 if (unlikely(len > BUF_SIZE_4KiB)) {
55 p->des01.etx.buffer1_size = BUF_SIZE_4KiB; 57 p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
@@ -58,25 +60,26 @@ static inline void enh_set_tx_desc_len(struct dma_desc *p, int len)
58 p->des01.etx.buffer1_size = len; 60 p->des01.etx.buffer1_size = len;
59} 61}
60 62
61static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end) 63/* Normal descriptors */
64static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
62{ 65{
63 p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1; 66 p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1;
64 if (end) 67 if (end)
65 p->des01.rx.end_ring = 1; 68 p->des01.rx.end_ring = 1;
66} 69}
67 70
68static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int end) 71static inline void ndesc_tx_set_on_ring(struct dma_desc *p, int end)
69{ 72{
70 if (end) 73 if (end)
71 p->des01.tx.end_ring = 1; 74 p->des01.tx.end_ring = 1;
72} 75}
73 76
74static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter) 77static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
75{ 78{
76 p->des01.tx.end_ring = ter; 79 p->des01.tx.end_ring = ter;
77} 80}
78 81
79static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) 82static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
80{ 83{
81 if (unlikely(len > BUF_SIZE_2KiB)) { 84 if (unlikely(len > BUF_SIZE_2KiB)) {
82 p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1; 85 p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1;
@@ -85,47 +88,47 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
85 p->des01.tx.buffer1_size = len; 88 p->des01.tx.buffer1_size = len;
86} 89}
87 90
88#else 91/* Specific functions used for Chain mode */
89 92
90static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) 93/* Enhanced descriptors */
94static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p, int end)
91{ 95{
92 p->des01.erx.second_address_chained = 1; 96 p->des01.erx.second_address_chained = 1;
93} 97}
94 98
95static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end) 99static inline void ehn_desc_tx_set_on_chain(struct dma_desc *p, int end)
96{ 100{
97 p->des01.etx.second_address_chained = 1; 101 p->des01.etx.second_address_chained = 1;
98} 102}
99 103
100static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter) 104static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
101{ 105{
102 p->des01.etx.second_address_chained = 1; 106 p->des01.etx.second_address_chained = 1;
103} 107}
104 108
105static inline void enh_set_tx_desc_len(struct dma_desc *p, int len) 109static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
106{ 110{
107 p->des01.etx.buffer1_size = len; 111 p->des01.etx.buffer1_size = len;
108} 112}
109 113
110static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end) 114/* Normal descriptors */
115static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
111{ 116{
112 p->des01.rx.second_address_chained = 1; 117 p->des01.rx.second_address_chained = 1;
113} 118}
114 119
115static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int ring_size) 120static inline void ndesc_tx_set_on_chain(struct dma_desc *p, int ring_size)
116{ 121{
117 p->des01.tx.second_address_chained = 1; 122 p->des01.tx.second_address_chained = 1;
118} 123}
119 124
120static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter) 125static inline void ndesc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
121{ 126{
122 p->des01.tx.second_address_chained = 1; 127 p->des01.tx.second_address_chained = 1;
123} 128}
124 129
125static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) 130static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
126{ 131{
127 p->des01.tx.buffer1_size = len; 132 p->des01.tx.buffer1_size = len;
128} 133}
129#endif
130
131#endif /* __DESC_COM_H__ */ 134#endif /* __DESC_COM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 7ad56afd6324..c12aabb8cf93 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -89,13 +89,46 @@ enum power_event {
89 (reg * 8)) 89 (reg * 8))
90#define GMAC_MAX_PERFECT_ADDRESSES 32 90#define GMAC_MAX_PERFECT_ADDRESSES 32
91 91
92/* PCS registers (AN/TBI/SGMII/RGMII) offset */
92#define GMAC_AN_CTRL 0x000000c0 /* AN control */ 93#define GMAC_AN_CTRL 0x000000c0 /* AN control */
93#define GMAC_AN_STATUS 0x000000c4 /* AN status */ 94#define GMAC_AN_STATUS 0x000000c4 /* AN status */
94#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */ 95#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
95#define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */ 96#define GMAC_ANE_LPA 0x000000cc /* Auto-Neg. link partener ability */
96#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */ 97#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
97#define GMAC_TBI 0x000000d4 /* TBI extend status */ 98#define GMAC_TBI 0x000000d4 /* TBI extend status */
98#define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */ 99#define GMAC_S_R_GMII 0x000000d8 /* SGMII RGMII status */
100
101/* AN Configuration defines */
102#define GMAC_AN_CTRL_RAN 0x00000200 /* Restart Auto-Negotiation */
103#define GMAC_AN_CTRL_ANE 0x00001000 /* Auto-Negotiation Enable */
104#define GMAC_AN_CTRL_ELE 0x00004000 /* External Loopback Enable */
105#define GMAC_AN_CTRL_ECD 0x00010000 /* Enable Comma Detect */
106#define GMAC_AN_CTRL_LR 0x00020000 /* Lock to Reference */
107#define GMAC_AN_CTRL_SGMRAL 0x00040000 /* SGMII RAL Control */
108
109/* AN Status defines */
110#define GMAC_AN_STATUS_LS 0x00000004 /* Link Status 0:down 1:up */
111#define GMAC_AN_STATUS_ANA 0x00000008 /* Auto-Negotiation Ability */
112#define GMAC_AN_STATUS_ANC 0x00000020 /* Auto-Negotiation Complete */
113#define GMAC_AN_STATUS_ES 0x00000100 /* Extended Status */
114
115/* Register 54 (SGMII/RGMII status register) */
116#define GMAC_S_R_GMII_LINK 0x8
117#define GMAC_S_R_GMII_SPEED 0x5
118#define GMAC_S_R_GMII_SPEED_SHIFT 0x1
119#define GMAC_S_R_GMII_MODE 0x1
120#define GMAC_S_R_GMII_SPEED_125 2
121#define GMAC_S_R_GMII_SPEED_25 1
122
123/* Common ADV and LPA defines */
124#define GMAC_ANE_FD (1 << 5)
125#define GMAC_ANE_HD (1 << 6)
126#define GMAC_ANE_PSE (3 << 7)
127#define GMAC_ANE_PSE_SHIFT 7
128
129 /* GMAC Configuration defines */
130#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
131#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
99 132
100/* GMAC Configuration defines */ 133/* GMAC Configuration defines */
101#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */ 134#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
@@ -108,19 +141,19 @@ enum inter_frame_gap {
108 GMAC_CONTROL_IFG_80 = 0x00020000, 141 GMAC_CONTROL_IFG_80 = 0x00020000,
109 GMAC_CONTROL_IFG_40 = 0x000e0000, 142 GMAC_CONTROL_IFG_40 = 0x000e0000,
110}; 143};
111#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense during tx */ 144#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense */
112#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */ 145#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
113#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */ 146#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
114#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */ 147#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
115#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ 148#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
116#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */ 149#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
117#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ 150#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
118#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */ 151#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
119#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */ 152#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
120#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Stripping */ 153#define GMAC_CONTROL_ACS 0x00000080 /* Auto Pad/FCS Stripping */
121#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */ 154#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
122#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ 155#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
123#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ 156#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
124 157
125#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \ 158#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
126 GMAC_CONTROL_JE | GMAC_CONTROL_BE) 159 GMAC_CONTROL_JE | GMAC_CONTROL_BE)
@@ -151,15 +184,16 @@ enum inter_frame_gap {
151#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ 184#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
152#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */ 185#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
153#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ 186#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
154#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ 187#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
155/* Programmable burst length (passed thorugh platform)*/ 188/* Programmable burst length (passed thorugh platform)*/
156#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ 189#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
157#define DMA_BUS_MODE_PBL_SHIFT 8 190#define DMA_BUS_MODE_PBL_SHIFT 8
191#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
158 192
159enum rx_tx_priority_ratio { 193enum rx_tx_priority_ratio {
160 double_ratio = 0x00004000, /*2:1 */ 194 double_ratio = 0x00004000, /* 2:1 */
161 triple_ratio = 0x00008000, /*3:1 */ 195 triple_ratio = 0x00008000, /* 3:1 */
162 quadruple_ratio = 0x0000c000, /*4:1 */ 196 quadruple_ratio = 0x0000c000, /* 4:1 */
163}; 197};
164 198
165#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ 199#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
@@ -179,9 +213,10 @@ enum rx_tx_priority_ratio {
179#define DMA_BUS_FB 0x00010000 /* Fixed Burst */ 213#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
180 214
181/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/ 215/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/
182#define DMA_CONTROL_DT 0x04000000 /* Disable Drop TCP/IP csum error */ 216/* Disable Drop TCP/IP csum error */
183#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */ 217#define DMA_CONTROL_DT 0x04000000
184#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */ 218#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */
219#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */
185/* Threshold for Activating the FC */ 220/* Threshold for Activating the FC */
186enum rfa { 221enum rfa {
187 act_full_minus_1 = 0x00800000, 222 act_full_minus_1 = 0x00800000,
@@ -196,7 +231,7 @@ enum rfd {
196 deac_full_minus_3 = 0x00401000, 231 deac_full_minus_3 = 0x00401000,
197 deac_full_minus_4 = 0x00401800, 232 deac_full_minus_4 = 0x00401800,
198}; 233};
199#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */ 234#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
200 235
201enum ttc_control { 236enum ttc_control {
202 DMA_CONTROL_TTC_64 = 0x00000000, 237 DMA_CONTROL_TTC_64 = 0x00000000,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index bfe022605498..7e05e8d0f1c2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -28,6 +28,7 @@
28 28
29#include <linux/crc32.h> 29#include <linux/crc32.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/ethtool.h>
31#include <asm/io.h> 32#include <asm/io.h>
32#include "dwmac1000.h" 33#include "dwmac1000.h"
33 34
@@ -71,22 +72,22 @@ static void dwmac1000_dump_regs(void __iomem *ioaddr)
71} 72}
72 73
73static void dwmac1000_set_umac_addr(void __iomem *ioaddr, unsigned char *addr, 74static void dwmac1000_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
74 unsigned int reg_n) 75 unsigned int reg_n)
75{ 76{
76 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), 77 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
77 GMAC_ADDR_LOW(reg_n)); 78 GMAC_ADDR_LOW(reg_n));
78} 79}
79 80
80static void dwmac1000_get_umac_addr(void __iomem *ioaddr, unsigned char *addr, 81static void dwmac1000_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
81 unsigned int reg_n) 82 unsigned int reg_n)
82{ 83{
83 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), 84 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
84 GMAC_ADDR_LOW(reg_n)); 85 GMAC_ADDR_LOW(reg_n));
85} 86}
86 87
87static void dwmac1000_set_filter(struct net_device *dev, int id) 88static void dwmac1000_set_filter(struct net_device *dev, int id)
88{ 89{
89 void __iomem *ioaddr = (void __iomem *) dev->base_addr; 90 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
90 unsigned int value = 0; 91 unsigned int value = 0;
91 unsigned int perfect_addr_number; 92 unsigned int perfect_addr_number;
92 93
@@ -96,7 +97,7 @@ static void dwmac1000_set_filter(struct net_device *dev, int id)
96 if (dev->flags & IFF_PROMISC) 97 if (dev->flags & IFF_PROMISC)
97 value = GMAC_FRAME_FILTER_PR; 98 value = GMAC_FRAME_FILTER_PR;
98 else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE) 99 else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
99 || (dev->flags & IFF_ALLMULTI)) { 100 || (dev->flags & IFF_ALLMULTI)) {
100 value = GMAC_FRAME_FILTER_PM; /* pass all multi */ 101 value = GMAC_FRAME_FILTER_PM; /* pass all multi */
101 writel(0xffffffff, ioaddr + GMAC_HASH_HIGH); 102 writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
102 writel(0xffffffff, ioaddr + GMAC_HASH_LOW); 103 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
@@ -110,12 +111,13 @@ static void dwmac1000_set_filter(struct net_device *dev, int id)
110 memset(mc_filter, 0, sizeof(mc_filter)); 111 memset(mc_filter, 0, sizeof(mc_filter));
111 netdev_for_each_mc_addr(ha, dev) { 112 netdev_for_each_mc_addr(ha, dev) {
112 /* The upper 6 bits of the calculated CRC are used to 113 /* The upper 6 bits of the calculated CRC are used to
113 index the contens of the hash table */ 114 * index the contens of the hash table
114 int bit_nr = 115 */
115 bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; 116 int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
116 /* The most significant bit determines the register to 117 /* The most significant bit determines the register to
117 * use (H/L) while the other 5 bits determine the bit 118 * use (H/L) while the other 5 bits determine the bit
118 * within the register. */ 119 * within the register.
120 */
119 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 121 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
120 } 122 }
121 writel(mc_filter[0], ioaddr + GMAC_HASH_LOW); 123 writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
@@ -128,10 +130,11 @@ static void dwmac1000_set_filter(struct net_device *dev, int id)
128 else 130 else
129 perfect_addr_number = GMAC_MAX_PERFECT_ADDRESSES / 2; 131 perfect_addr_number = GMAC_MAX_PERFECT_ADDRESSES / 2;
130 132
131 /* Handle multiple unicast addresses (perfect filtering)*/ 133 /* Handle multiple unicast addresses (perfect filtering) */
132 if (netdev_uc_count(dev) > perfect_addr_number) 134 if (netdev_uc_count(dev) > perfect_addr_number)
133 /* Switch to promiscuous mode is more than 16 addrs 135 /* Switch to promiscuous mode if more than 16 addrs
134 are required */ 136 * are required
137 */
135 value |= GMAC_FRAME_FILTER_PR; 138 value |= GMAC_FRAME_FILTER_PR;
136 else { 139 else {
137 int reg = 1; 140 int reg = 1;
@@ -149,13 +152,13 @@ static void dwmac1000_set_filter(struct net_device *dev, int id)
149#endif 152#endif
150 writel(value, ioaddr + GMAC_FRAME_FILTER); 153 writel(value, ioaddr + GMAC_FRAME_FILTER);
151 154
152 CHIP_DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: " 155 CHIP_DBG(KERN_INFO "\tFilter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
153 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER), 156 readl(ioaddr + GMAC_FRAME_FILTER),
154 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW)); 157 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
155} 158}
156 159
157static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex, 160static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
158 unsigned int fc, unsigned int pause_time) 161 unsigned int fc, unsigned int pause_time)
159{ 162{
160 unsigned int flow = 0; 163 unsigned int flow = 0;
161 164
@@ -193,74 +196,106 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
193 writel(pmt, ioaddr + GMAC_PMT); 196 writel(pmt, ioaddr + GMAC_PMT);
194} 197}
195 198
196 199static int dwmac1000_irq_status(void __iomem *ioaddr,
197static int dwmac1000_irq_status(void __iomem *ioaddr) 200 struct stmmac_extra_stats *x)
198{ 201{
199 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); 202 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
200 int status = 0; 203 int ret = 0;
201 204
202 /* Not used events (e.g. MMC interrupts) are not handled. */ 205 /* Not used events (e.g. MMC interrupts) are not handled. */
203 if ((intr_status & mmc_tx_irq)) { 206 if ((intr_status & mmc_tx_irq)) {
204 CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n", 207 CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n",
205 readl(ioaddr + GMAC_MMC_TX_INTR)); 208 readl(ioaddr + GMAC_MMC_TX_INTR));
206 status |= core_mmc_tx_irq; 209 x->mmc_tx_irq_n++;
207 } 210 }
208 if (unlikely(intr_status & mmc_rx_irq)) { 211 if (unlikely(intr_status & mmc_rx_irq)) {
209 CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n", 212 CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n",
210 readl(ioaddr + GMAC_MMC_RX_INTR)); 213 readl(ioaddr + GMAC_MMC_RX_INTR));
211 status |= core_mmc_rx_irq; 214 x->mmc_rx_irq_n++;
212 } 215 }
213 if (unlikely(intr_status & mmc_rx_csum_offload_irq)) { 216 if (unlikely(intr_status & mmc_rx_csum_offload_irq)) {
214 CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n", 217 CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n",
215 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD)); 218 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
216 status |= core_mmc_rx_csum_offload_irq; 219 x->mmc_rx_csum_offload_irq_n++;
217 } 220 }
218 if (unlikely(intr_status & pmt_irq)) { 221 if (unlikely(intr_status & pmt_irq)) {
219 CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n"); 222 CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n");
220 /* clear the PMT bits 5 and 6 by reading the PMT 223 /* clear the PMT bits 5 and 6 by reading the PMT status reg */
221 * status register. */
222 readl(ioaddr + GMAC_PMT); 224 readl(ioaddr + GMAC_PMT);
223 status |= core_irq_receive_pmt_irq; 225 x->irq_receive_pmt_irq_n++;
224 } 226 }
225 /* MAC trx/rx EEE LPI entry/exit interrupts */ 227 /* MAC trx/rx EEE LPI entry/exit interrupts */
226 if (intr_status & lpiis_irq) { 228 if (intr_status & lpiis_irq) {
227 /* Clean LPI interrupt by reading the Reg 12 */ 229 /* Clean LPI interrupt by reading the Reg 12 */
228 u32 lpi_status = readl(ioaddr + LPI_CTRL_STATUS); 230 ret = readl(ioaddr + LPI_CTRL_STATUS);
229 231
230 if (lpi_status & LPI_CTRL_STATUS_TLPIEN) { 232 if (ret & LPI_CTRL_STATUS_TLPIEN) {
231 CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n"); 233 CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n");
232 status |= core_irq_tx_path_in_lpi_mode; 234 x->irq_tx_path_in_lpi_mode_n++;
233 } 235 }
234 if (lpi_status & LPI_CTRL_STATUS_TLPIEX) { 236 if (ret & LPI_CTRL_STATUS_TLPIEX) {
235 CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n"); 237 CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n");
236 status |= core_irq_tx_path_exit_lpi_mode; 238 x->irq_tx_path_exit_lpi_mode_n++;
237 } 239 }
238 if (lpi_status & LPI_CTRL_STATUS_RLPIEN) { 240 if (ret & LPI_CTRL_STATUS_RLPIEN) {
239 CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n"); 241 CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n");
240 status |= core_irq_rx_path_in_lpi_mode; 242 x->irq_rx_path_in_lpi_mode_n++;
241 } 243 }
242 if (lpi_status & LPI_CTRL_STATUS_RLPIEX) { 244 if (ret & LPI_CTRL_STATUS_RLPIEX) {
243 CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n"); 245 CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n");
244 status |= core_irq_rx_path_exit_lpi_mode; 246 x->irq_rx_path_exit_lpi_mode_n++;
247 }
248 }
249
250 if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
251 CHIP_DBG(KERN_INFO "GMAC PCS ANE IRQ\n");
252 readl(ioaddr + GMAC_AN_STATUS);
253 x->irq_pcs_ane_n++;
254 }
255 if (intr_status & rgmii_irq) {
256 u32 status = readl(ioaddr + GMAC_S_R_GMII);
257 CHIP_DBG(KERN_INFO "GMAC RGMII/SGMII interrupt\n");
258 x->irq_rgmii_n++;
259
260 /* Save and dump the link status. */
261 if (status & GMAC_S_R_GMII_LINK) {
262 int speed_value = (status & GMAC_S_R_GMII_SPEED) >>
263 GMAC_S_R_GMII_SPEED_SHIFT;
264 x->pcs_duplex = (status & GMAC_S_R_GMII_MODE);
265
266 if (speed_value == GMAC_S_R_GMII_SPEED_125)
267 x->pcs_speed = SPEED_1000;
268 else if (speed_value == GMAC_S_R_GMII_SPEED_25)
269 x->pcs_speed = SPEED_100;
270 else
271 x->pcs_speed = SPEED_10;
272
273 x->pcs_link = 1;
274 pr_debug("Link is Up - %d/%s\n", (int)x->pcs_speed,
275 x->pcs_duplex ? "Full" : "Half");
276 } else {
277 x->pcs_link = 0;
278 pr_debug("Link is Down\n");
245 } 279 }
246 } 280 }
247 281
248 return status; 282 return ret;
249} 283}
250 284
251static void dwmac1000_set_eee_mode(void __iomem *ioaddr) 285static void dwmac1000_set_eee_mode(void __iomem *ioaddr)
252{ 286{
253 u32 value; 287 u32 value;
254 288
255 /* Enable the link status receive on RGMII, SGMII ore SMII 289 /* Enable the link status receive on RGMII, SGMII ore SMII
256 * receive path and instruct the transmit to enter in LPI 290 * receive path and instruct the transmit to enter in LPI
257 * state. */ 291 * state.
292 */
258 value = readl(ioaddr + LPI_CTRL_STATUS); 293 value = readl(ioaddr + LPI_CTRL_STATUS);
259 value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA; 294 value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
260 writel(value, ioaddr + LPI_CTRL_STATUS); 295 writel(value, ioaddr + LPI_CTRL_STATUS);
261} 296}
262 297
263static void dwmac1000_reset_eee_mode(void __iomem *ioaddr) 298static void dwmac1000_reset_eee_mode(void __iomem *ioaddr)
264{ 299{
265 u32 value; 300 u32 value;
266 301
@@ -269,7 +304,7 @@ static void dwmac1000_reset_eee_mode(void __iomem *ioaddr)
269 writel(value, ioaddr + LPI_CTRL_STATUS); 304 writel(value, ioaddr + LPI_CTRL_STATUS);
270} 305}
271 306
272static void dwmac1000_set_eee_pls(void __iomem *ioaddr, int link) 307static void dwmac1000_set_eee_pls(void __iomem *ioaddr, int link)
273{ 308{
274 u32 value; 309 u32 value;
275 310
@@ -283,7 +318,7 @@ static void dwmac1000_set_eee_pls(void __iomem *ioaddr, int link)
283 writel(value, ioaddr + LPI_CTRL_STATUS); 318 writel(value, ioaddr + LPI_CTRL_STATUS);
284} 319}
285 320
286static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw) 321static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
287{ 322{
288 int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16); 323 int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
289 324
@@ -297,6 +332,41 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
297 writel(value, ioaddr + LPI_TIMER_CTRL); 332 writel(value, ioaddr + LPI_TIMER_CTRL);
298} 333}
299 334
335static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
336{
337 u32 value;
338
339 value = readl(ioaddr + GMAC_AN_CTRL);
340 /* auto negotiation enable and External Loopback enable */
341 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
342
343 if (restart)
344 value |= GMAC_AN_CTRL_RAN;
345
346 writel(value, ioaddr + GMAC_AN_CTRL);
347}
348
349static void dwmac1000_get_adv(void __iomem *ioaddr, struct rgmii_adv *adv)
350{
351 u32 value = readl(ioaddr + GMAC_ANE_ADV);
352
353 if (value & GMAC_ANE_FD)
354 adv->duplex = DUPLEX_FULL;
355 if (value & GMAC_ANE_HD)
356 adv->duplex |= DUPLEX_HALF;
357
358 adv->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
359
360 value = readl(ioaddr + GMAC_ANE_LPA);
361
362 if (value & GMAC_ANE_FD)
363 adv->lp_duplex = DUPLEX_FULL;
364 if (value & GMAC_ANE_HD)
365 adv->lp_duplex = DUPLEX_HALF;
366
367 adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
368}
369
300static const struct stmmac_ops dwmac1000_ops = { 370static const struct stmmac_ops dwmac1000_ops = {
301 .core_init = dwmac1000_core_init, 371 .core_init = dwmac1000_core_init,
302 .rx_ipc = dwmac1000_rx_ipc_enable, 372 .rx_ipc = dwmac1000_rx_ipc_enable,
@@ -307,10 +377,12 @@ static const struct stmmac_ops dwmac1000_ops = {
307 .pmt = dwmac1000_pmt, 377 .pmt = dwmac1000_pmt,
308 .set_umac_addr = dwmac1000_set_umac_addr, 378 .set_umac_addr = dwmac1000_set_umac_addr,
309 .get_umac_addr = dwmac1000_get_umac_addr, 379 .get_umac_addr = dwmac1000_get_umac_addr,
310 .set_eee_mode = dwmac1000_set_eee_mode, 380 .set_eee_mode = dwmac1000_set_eee_mode,
311 .reset_eee_mode = dwmac1000_reset_eee_mode, 381 .reset_eee_mode = dwmac1000_reset_eee_mode,
312 .set_eee_timer = dwmac1000_set_eee_timer, 382 .set_eee_timer = dwmac1000_set_eee_timer,
313 .set_eee_pls = dwmac1000_set_eee_pls, 383 .set_eee_pls = dwmac1000_set_eee_pls,
384 .ctrl_ane = dwmac1000_ctrl_ane,
385 .get_adv = dwmac1000_get_adv,
314}; 386};
315 387
316struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr) 388struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index bf83c03bfd06..2c431b616058 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -30,8 +30,8 @@
30#include "dwmac1000.h" 30#include "dwmac1000.h"
31#include "dwmac_dma.h" 31#include "dwmac_dma.h"
32 32
33static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, 33static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
34 int mb, int burst_len, u32 dma_tx, u32 dma_rx) 34 int burst_len, u32 dma_tx, u32 dma_rx, int atds)
35{ 35{
36 u32 value = readl(ioaddr + DMA_BUS_MODE); 36 u32 value = readl(ioaddr + DMA_BUS_MODE);
37 int limit; 37 int limit;
@@ -60,7 +60,7 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
60 * depending on pbl value. 60 * depending on pbl value.
61 */ 61 */
62 value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) | 62 value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
63 (pbl << DMA_BUS_MODE_RPBL_SHIFT)); 63 (pbl << DMA_BUS_MODE_RPBL_SHIFT));
64 64
65 /* Set the Fixed burst mode */ 65 /* Set the Fixed burst mode */
66 if (fb) 66 if (fb)
@@ -73,6 +73,10 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
73#ifdef CONFIG_STMMAC_DA 73#ifdef CONFIG_STMMAC_DA
74 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */ 74 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
75#endif 75#endif
76
77 if (atds)
78 value |= DMA_BUS_MODE_ATDS;
79
76 writel(value, ioaddr + DMA_BUS_MODE); 80 writel(value, ioaddr + DMA_BUS_MODE);
77 81
78 /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE 82 /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE
@@ -90,14 +94,16 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
90 * 94 *
91 * For Non Fixed Burst Mode: provide the maximum value of the 95 * For Non Fixed Burst Mode: provide the maximum value of the
92 * burst length. Any burst equal or below the provided burst 96 * burst length. Any burst equal or below the provided burst
93 * length would be allowed to perform. */ 97 * length would be allowed to perform.
98 */
94 writel(burst_len, ioaddr + DMA_AXI_BUS_MODE); 99 writel(burst_len, ioaddr + DMA_AXI_BUS_MODE);
95 100
96 /* Mask interrupts by writing to CSR7 */ 101 /* Mask interrupts by writing to CSR7 */
97 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); 102 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
98 103
99 /* The base address of the RX/TX descriptor lists must be written into 104 /* RX/TX descriptor base address lists must be written into
100 * DMA CSR3 and CSR4, respectively. */ 105 * DMA CSR3 and CSR4, respectively
106 */
101 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); 107 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
102 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); 108 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
103 109
@@ -105,7 +111,7 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
105} 111}
106 112
107static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, 113static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
108 int rxmode) 114 int rxmode)
109{ 115{
110 u32 csr6 = readl(ioaddr + DMA_CONTROL); 116 u32 csr6 = readl(ioaddr + DMA_CONTROL);
111 117
@@ -114,11 +120,12 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
114 /* Transmit COE type 2 cannot be done in cut-through mode. */ 120 /* Transmit COE type 2 cannot be done in cut-through mode. */
115 csr6 |= DMA_CONTROL_TSF; 121 csr6 |= DMA_CONTROL_TSF;
116 /* Operating on second frame increase the performance 122 /* Operating on second frame increase the performance
117 * especially when transmit store-and-forward is used.*/ 123 * especially when transmit store-and-forward is used.
124 */
118 csr6 |= DMA_CONTROL_OSF; 125 csr6 |= DMA_CONTROL_OSF;
119 } else { 126 } else {
120 CHIP_DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode" 127 CHIP_DBG(KERN_DEBUG "GMAC: disabling TX SF (threshold %d)\n",
121 " (threshold = %d)\n", txmode); 128 txmode);
122 csr6 &= ~DMA_CONTROL_TSF; 129 csr6 &= ~DMA_CONTROL_TSF;
123 csr6 &= DMA_CONTROL_TC_TX_MASK; 130 csr6 &= DMA_CONTROL_TC_TX_MASK;
124 /* Set the transmit threshold */ 131 /* Set the transmit threshold */
@@ -138,8 +145,8 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
138 CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n"); 145 CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n");
139 csr6 |= DMA_CONTROL_RSF; 146 csr6 |= DMA_CONTROL_RSF;
140 } else { 147 } else {
141 CHIP_DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode" 148 CHIP_DBG(KERN_DEBUG "GMAC: disable RX SF mode (threshold %d)\n",
142 " (threshold = %d)\n", rxmode); 149 rxmode);
143 csr6 &= ~DMA_CONTROL_RSF; 150 csr6 &= ~DMA_CONTROL_RSF;
144 csr6 &= DMA_CONTROL_TC_RX_MASK; 151 csr6 &= DMA_CONTROL_TC_RX_MASK;
145 if (rxmode <= 32) 152 if (rxmode <= 32)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index f83210e7c221..007bb2be3f10 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -47,8 +47,7 @@ static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
47{ 47{
48 pr_info("\t----------------------------------------------\n" 48 pr_info("\t----------------------------------------------\n"
49 "\t DWMAC 100 CSR (base addr = 0x%p)\n" 49 "\t DWMAC 100 CSR (base addr = 0x%p)\n"
50 "\t----------------------------------------------\n", 50 "\t----------------------------------------------\n", ioaddr);
51 ioaddr);
52 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL, 51 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
53 readl(ioaddr + MAC_CONTROL)); 52 readl(ioaddr + MAC_CONTROL));
54 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH, 53 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
@@ -72,7 +71,8 @@ static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
72 return 0; 71 return 0;
73} 72}
74 73
75static int dwmac100_irq_status(void __iomem *ioaddr) 74static int dwmac100_irq_status(void __iomem *ioaddr,
75 struct stmmac_extra_stats *x)
76{ 76{
77 return 0; 77 return 0;
78} 78}
@@ -91,7 +91,7 @@ static void dwmac100_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
91 91
92static void dwmac100_set_filter(struct net_device *dev, int id) 92static void dwmac100_set_filter(struct net_device *dev, int id)
93{ 93{
94 void __iomem *ioaddr = (void __iomem *) dev->base_addr; 94 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
95 u32 value = readl(ioaddr + MAC_CONTROL); 95 u32 value = readl(ioaddr + MAC_CONTROL);
96 96
97 if (dev->flags & IFF_PROMISC) { 97 if (dev->flags & IFF_PROMISC) {
@@ -112,7 +112,8 @@ static void dwmac100_set_filter(struct net_device *dev, int id)
112 struct netdev_hw_addr *ha; 112 struct netdev_hw_addr *ha;
113 113
114 /* Perfect filter mode for physical address and Hash 114 /* Perfect filter mode for physical address and Hash
115 filter for multicast */ 115 * filter for multicast
116 */
116 value |= MAC_CONTROL_HP; 117 value |= MAC_CONTROL_HP;
117 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | 118 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
118 MAC_CONTROL_IF | MAC_CONTROL_HO); 119 MAC_CONTROL_IF | MAC_CONTROL_HO);
@@ -120,12 +121,13 @@ static void dwmac100_set_filter(struct net_device *dev, int id)
120 memset(mc_filter, 0, sizeof(mc_filter)); 121 memset(mc_filter, 0, sizeof(mc_filter));
121 netdev_for_each_mc_addr(ha, dev) { 122 netdev_for_each_mc_addr(ha, dev) {
122 /* The upper 6 bits of the calculated CRC are used to 123 /* The upper 6 bits of the calculated CRC are used to
123 * index the contens of the hash table */ 124 * index the contens of the hash table
124 int bit_nr = 125 */
125 ether_crc(ETH_ALEN, ha->addr) >> 26; 126 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
126 /* The most significant bit determines the register to 127 /* The most significant bit determines the register to
127 * use (H/L) while the other 5 bits determine the bit 128 * use (H/L) while the other 5 bits determine the bit
128 * within the register. */ 129 * within the register.
130 */
129 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 131 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
130 } 132 }
131 writel(mc_filter[0], ioaddr + MAC_HASH_LOW); 133 writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
@@ -134,10 +136,9 @@ static void dwmac100_set_filter(struct net_device *dev, int id)
134 136
135 writel(value, ioaddr + MAC_CONTROL); 137 writel(value, ioaddr + MAC_CONTROL);
136 138
137 CHIP_DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: " 139 CHIP_DBG(KERN_INFO "%s: Filter: 0x%08x Hash: HI 0x%08x, LO 0x%08x\n",
138 "HI 0x%08x, LO 0x%08x\n", 140 __func__, readl(ioaddr + MAC_CONTROL),
139 __func__, readl(ioaddr + MAC_CONTROL), 141 readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
140 readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
141} 142}
142 143
143static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex, 144static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
@@ -150,9 +151,7 @@ static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
150 writel(flow, ioaddr + MAC_FLOW_CTRL); 151 writel(flow, ioaddr + MAC_FLOW_CTRL);
151} 152}
152 153
153/* No PMT module supported for this Ethernet Controller. 154/* No PMT module supported on ST boards with this Eth chip. */
154 * Tested on ST platforms only.
155 */
156static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode) 155static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
157{ 156{
158 return; 157 return;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index c2b4d55a79b6..67551c154138 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -32,8 +32,8 @@
32#include "dwmac100.h" 32#include "dwmac100.h"
33#include "dwmac_dma.h" 33#include "dwmac_dma.h"
34 34
35static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, 35static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
36 int mb, int burst_len, u32 dma_tx, u32 dma_rx) 36 int burst_len, u32 dma_tx, u32 dma_rx, int atds)
37{ 37{
38 u32 value = readl(ioaddr + DMA_BUS_MODE); 38 u32 value = readl(ioaddr + DMA_BUS_MODE);
39 int limit; 39 int limit;
@@ -52,22 +52,25 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb,
52 52
53 /* Enable Application Access by writing to DMA CSR0 */ 53 /* Enable Application Access by writing to DMA CSR0 */
54 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), 54 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
55 ioaddr + DMA_BUS_MODE); 55 ioaddr + DMA_BUS_MODE);
56 56
57 /* Mask interrupts by writing to CSR7 */ 57 /* Mask interrupts by writing to CSR7 */
58 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); 58 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
59 59
60 /* The base address of the RX/TX descriptor lists must be written into 60 /* RX/TX descriptor base addr lists must be written into
61 * DMA CSR3 and CSR4, respectively. */ 61 * DMA CSR3 and CSR4, respectively
62 */
62 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); 63 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
63 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); 64 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
64 65
65 return 0; 66 return 0;
66} 67}
67 68
68/* Store and Forward capability is not used at all.. 69/* Store and Forward capability is not used at all.
69 * The transmit threshold can be programmed by 70 *
70 * setting the TTC bits in the DMA control register.*/ 71 * The transmit threshold can be programmed by setting the TTC bits in the DMA
72 * control register.
73 */
71static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode, 74static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
72 int rxmode) 75 int rxmode)
73{ 76{
@@ -90,16 +93,15 @@ static void dwmac100_dump_dma_regs(void __iomem *ioaddr)
90 CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n"); 93 CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n");
91 for (i = 0; i < 9; i++) 94 for (i = 0; i < 9; i++)
92 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, 95 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
93 (DMA_BUS_MODE + i * 4), 96 (DMA_BUS_MODE + i * 4),
94 readl(ioaddr + DMA_BUS_MODE + i * 4)); 97 readl(ioaddr + DMA_BUS_MODE + i * 4));
95 CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n", 98 CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
96 DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR)); 99 DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
97 CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n", 100 CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
98 DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); 101 DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
99} 102}
100 103
101/* DMA controller has two counters to track the number of 104/* DMA controller has two counters to track the number of the missed frames. */
102 * the receive missed frames. */
103static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, 105static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
104 void __iomem *ioaddr) 106 void __iomem *ioaddr)
105{ 107{
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index ab4896ecac1c..8e5662ce488b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -102,7 +102,7 @@
102#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */ 102#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
103#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ 103#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
104#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ 104#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
105#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ 105#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
106 106
107extern void dwmac_enable_dma_transmission(void __iomem *ioaddr); 107extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
108extern void dwmac_enable_dma_irq(void __iomem *ioaddr); 108extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
@@ -112,6 +112,6 @@ extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
112extern void dwmac_dma_start_rx(void __iomem *ioaddr); 112extern void dwmac_dma_start_rx(void __iomem *ioaddr);
113extern void dwmac_dma_stop_rx(void __iomem *ioaddr); 113extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
114extern int dwmac_dma_interrupt(void __iomem *ioaddr, 114extern int dwmac_dma_interrupt(void __iomem *ioaddr,
115 struct stmmac_extra_stats *x); 115 struct stmmac_extra_stats *x);
116 116
117#endif /* __DWMAC_DMA_H__ */ 117#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 2fc8ef95f97a..0fbc8fafa706 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -150,6 +150,57 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
150 return ret; 150 return ret;
151} 151}
152 152
153static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
154 struct dma_extended_desc *p)
155{
156 if (unlikely(p->basic.des01.erx.rx_mac_addr)) {
157 if (p->des4.erx.ip_hdr_err)
158 x->ip_hdr_err++;
159 if (p->des4.erx.ip_payload_err)
160 x->ip_payload_err++;
161 if (p->des4.erx.ip_csum_bypassed)
162 x->ip_csum_bypassed++;
163 if (p->des4.erx.ipv4_pkt_rcvd)
164 x->ipv4_pkt_rcvd++;
165 if (p->des4.erx.ipv6_pkt_rcvd)
166 x->ipv6_pkt_rcvd++;
167 if (p->des4.erx.msg_type == RDES_EXT_SYNC)
168 x->rx_msg_type_sync++;
169 else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP)
170 x->rx_msg_type_follow_up++;
171 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
172 x->rx_msg_type_delay_req++;
173 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
174 x->rx_msg_type_delay_resp++;
175 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
176 x->rx_msg_type_pdelay_req++;
177 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
178 x->rx_msg_type_pdelay_resp++;
179 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP)
180 x->rx_msg_type_pdelay_follow_up++;
181 else
182 x->rx_msg_type_ext_no_ptp++;
183 if (p->des4.erx.ptp_frame_type)
184 x->ptp_frame_type++;
185 if (p->des4.erx.ptp_ver)
186 x->ptp_ver++;
187 if (p->des4.erx.timestamp_dropped)
188 x->timestamp_dropped++;
189 if (p->des4.erx.av_pkt_rcvd)
190 x->av_pkt_rcvd++;
191 if (p->des4.erx.av_tagged_pkt_rcvd)
192 x->av_tagged_pkt_rcvd++;
193 if (p->des4.erx.vlan_tag_priority_val)
194 x->vlan_tag_priority_val++;
195 if (p->des4.erx.l3_filter_match)
196 x->l3_filter_match++;
197 if (p->des4.erx.l4_filter_match)
198 x->l4_filter_match++;
199 if (p->des4.erx.l3_l4_filter_no_match)
200 x->l3_l4_filter_no_match++;
201 }
202}
203
153static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, 204static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
154 struct dma_desc *p) 205 struct dma_desc *p)
155{ 206{
@@ -198,7 +249,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
198 * At any rate, we need to understand if the CSUM hw computation is ok 249 * At any rate, we need to understand if the CSUM hw computation is ok
199 * and report this info to the upper layers. */ 250 * and report this info to the upper layers. */
200 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, 251 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
201 p->des01.erx.frame_type, p->des01.erx.payload_csum_error); 252 p->des01.erx.frame_type, p->des01.erx.rx_mac_addr);
202 253
203 if (unlikely(p->des01.erx.dribbling)) { 254 if (unlikely(p->des01.erx.dribbling)) {
204 CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n"); 255 CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
@@ -225,34 +276,32 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
225 x->rx_vlan++; 276 x->rx_vlan++;
226 } 277 }
227#endif 278#endif
279
228 return ret; 280 return ret;
229} 281}
230 282
231static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, 283static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
232 int disable_rx_ic) 284 int mode, int end)
233{ 285{
234 int i; 286 p->des01.erx.own = 1;
235 for (i = 0; i < ring_size; i++) { 287 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
236 p->des01.erx.own = 1;
237 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
238 288
239 ehn_desc_rx_set_on_ring_chain(p, (i == ring_size - 1)); 289 if (mode == STMMAC_CHAIN_MODE)
290 ehn_desc_rx_set_on_chain(p, end);
291 else
292 ehn_desc_rx_set_on_ring(p, end);
240 293
241 if (disable_rx_ic) 294 if (disable_rx_ic)
242 p->des01.erx.disable_ic = 1; 295 p->des01.erx.disable_ic = 1;
243 p++;
244 }
245} 296}
246 297
247static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) 298static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
248{ 299{
249 int i; 300 p->des01.etx.own = 0;
250 301 if (mode == STMMAC_CHAIN_MODE)
251 for (i = 0; i < ring_size; i++) { 302 ehn_desc_tx_set_on_chain(p, end);
252 p->des01.etx.own = 0; 303 else
253 ehn_desc_tx_set_on_ring_chain(p, (i == ring_size - 1)); 304 ehn_desc_tx_set_on_ring(p, end);
254 p++;
255 }
256} 305}
257 306
258static int enh_desc_get_tx_owner(struct dma_desc *p) 307static int enh_desc_get_tx_owner(struct dma_desc *p)
@@ -280,20 +329,26 @@ static int enh_desc_get_tx_ls(struct dma_desc *p)
280 return p->des01.etx.last_segment; 329 return p->des01.etx.last_segment;
281} 330}
282 331
283static void enh_desc_release_tx_desc(struct dma_desc *p) 332static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
284{ 333{
285 int ter = p->des01.etx.end_ring; 334 int ter = p->des01.etx.end_ring;
286 335
287 memset(p, 0, offsetof(struct dma_desc, des2)); 336 memset(p, 0, offsetof(struct dma_desc, des2));
288 enh_desc_end_tx_desc(p, ter); 337 if (mode == STMMAC_CHAIN_MODE)
338 enh_desc_end_tx_desc_on_chain(p, ter);
339 else
340 enh_desc_end_tx_desc_on_ring(p, ter);
289} 341}
290 342
291static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 343static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
292 int csum_flag) 344 int csum_flag, int mode)
293{ 345{
294 p->des01.etx.first_segment = is_fs; 346 p->des01.etx.first_segment = is_fs;
295 347
296 enh_set_tx_desc_len(p, len); 348 if (mode == STMMAC_CHAIN_MODE)
349 enh_set_tx_desc_len_on_chain(p, len);
350 else
351 enh_set_tx_desc_len_on_ring(p, len);
297 352
298 if (likely(csum_flag)) 353 if (likely(csum_flag))
299 p->des01.etx.checksum_insertion = cic_full; 354 p->des01.etx.checksum_insertion = cic_full;
@@ -323,6 +378,49 @@ static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
323 return p->des01.erx.frame_length; 378 return p->des01.erx.frame_length;
324} 379}
325 380
381static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
382{
383 p->des01.etx.time_stamp_enable = 1;
384}
385
386static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
387{
388 return p->des01.etx.time_stamp_status;
389}
390
391static u64 enh_desc_get_timestamp(void *desc, u32 ats)
392{
393 u64 ns;
394
395 if (ats) {
396 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
397 ns = p->des6;
398 /* convert high/sec time stamp value to nanosecond */
399 ns += p->des7 * 1000000000ULL;
400 } else {
401 struct dma_desc *p = (struct dma_desc *)desc;
402 ns = p->des2;
403 ns += p->des3 * 1000000000ULL;
404 }
405
406 return ns;
407}
408
409static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
410{
411 if (ats) {
412 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
413 return p->basic.des01.erx.ipc_csum_error;
414 } else {
415 struct dma_desc *p = (struct dma_desc *)desc;
416 if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
417 /* timestamp is corrupted, hence don't store it */
418 return 0;
419 else
420 return 1;
421 }
422}
423
326const struct stmmac_desc_ops enh_desc_ops = { 424const struct stmmac_desc_ops enh_desc_ops = {
327 .tx_status = enh_desc_get_tx_status, 425 .tx_status = enh_desc_get_tx_status,
328 .rx_status = enh_desc_get_rx_status, 426 .rx_status = enh_desc_get_rx_status,
@@ -339,4 +437,9 @@ const struct stmmac_desc_ops enh_desc_ops = {
339 .set_tx_owner = enh_desc_set_tx_owner, 437 .set_tx_owner = enh_desc_set_tx_owner,
340 .set_rx_owner = enh_desc_set_rx_owner, 438 .set_rx_owner = enh_desc_set_rx_owner,
341 .get_rx_frame_len = enh_desc_get_rx_frame_len, 439 .get_rx_frame_len = enh_desc_get_rx_frame_len,
440 .rx_extended_status = enh_desc_get_ext_status,
441 .enable_tx_timestamp = enh_desc_enable_tx_timestamp,
442 .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
443 .get_timestamp = enh_desc_get_timestamp,
444 .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
342}; 445};
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 67995ef25251..48ec001566b5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -28,8 +28,7 @@
28/* MMC control register */ 28/* MMC control register */
29/* When set, all counter are reset */ 29/* When set, all counter are reset */
30#define MMC_CNTRL_COUNTER_RESET 0x1 30#define MMC_CNTRL_COUNTER_RESET 0x1
31/* When set, do not roll over zero 31/* When set, do not roll over zero after reaching the max value*/
32 * after reaching the max value*/
33#define MMC_CNTRL_COUNTER_STOP_ROLLOVER 0x2 32#define MMC_CNTRL_COUNTER_STOP_ROLLOVER 0x2
34#define MMC_CNTRL_RESET_ON_READ 0x4 /* Reset after reading */ 33#define MMC_CNTRL_RESET_ON_READ 0x4 /* Reset after reading */
35#define MMC_CNTRL_COUNTER_FREEZER 0x8 /* Freeze counter values to the 34#define MMC_CNTRL_COUNTER_FREEZER 0x8 /* Freeze counter values to the
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 68962c549a2d..11775b99afc5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -79,8 +79,8 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
79 struct net_device_stats *stats = (struct net_device_stats *)data; 79 struct net_device_stats *stats = (struct net_device_stats *)data;
80 80
81 if (unlikely(p->des01.rx.last_descriptor == 0)) { 81 if (unlikely(p->des01.rx.last_descriptor == 0)) {
82 pr_warning("ndesc Error: Oversized Ethernet " 82 pr_warn("%s: Oversized frame spanned multiple buffers\n",
83 "frame spanned multiple buffers\n"); 83 __func__);
84 stats->rx_length_errors++; 84 stats->rx_length_errors++;
85 return discard_frame; 85 return discard_frame;
86 } 86 }
@@ -122,30 +122,28 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
122 return ret; 122 return ret;
123} 123}
124 124
125static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, 125static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
126 int disable_rx_ic) 126 int end)
127{ 127{
128 int i; 128 p->des01.rx.own = 1;
129 for (i = 0; i < ring_size; i++) { 129 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
130 p->des01.rx.own = 1;
131 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
132 130
133 ndesc_rx_set_on_ring_chain(p, (i == ring_size - 1)); 131 if (mode == STMMAC_CHAIN_MODE)
132 ndesc_rx_set_on_chain(p, end);
133 else
134 ndesc_rx_set_on_ring(p, end);
134 135
135 if (disable_rx_ic) 136 if (disable_rx_ic)
136 p->des01.rx.disable_ic = 1; 137 p->des01.rx.disable_ic = 1;
137 p++;
138 }
139} 138}
140 139
141static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) 140static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
142{ 141{
143 int i; 142 p->des01.tx.own = 0;
144 for (i = 0; i < ring_size; i++) { 143 if (mode == STMMAC_CHAIN_MODE)
145 p->des01.tx.own = 0; 144 ndesc_tx_set_on_chain(p, end);
146 ndesc_tx_set_on_ring_chain(p, (i == (ring_size - 1))); 145 else
147 p++; 146 ndesc_tx_set_on_ring(p, end);
148 }
149} 147}
150 148
151static int ndesc_get_tx_owner(struct dma_desc *p) 149static int ndesc_get_tx_owner(struct dma_desc *p)
@@ -173,19 +171,25 @@ static int ndesc_get_tx_ls(struct dma_desc *p)
173 return p->des01.tx.last_segment; 171 return p->des01.tx.last_segment;
174} 172}
175 173
176static void ndesc_release_tx_desc(struct dma_desc *p) 174static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
177{ 175{
178 int ter = p->des01.tx.end_ring; 176 int ter = p->des01.tx.end_ring;
179 177
180 memset(p, 0, offsetof(struct dma_desc, des2)); 178 memset(p, 0, offsetof(struct dma_desc, des2));
181 ndesc_end_tx_desc(p, ter); 179 if (mode == STMMAC_CHAIN_MODE)
180 ndesc_end_tx_desc_on_chain(p, ter);
181 else
182 ndesc_end_tx_desc_on_ring(p, ter);
182} 183}
183 184
184static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 185static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
185 int csum_flag) 186 int csum_flag, int mode)
186{ 187{
187 p->des01.tx.first_segment = is_fs; 188 p->des01.tx.first_segment = is_fs;
188 norm_set_tx_desc_len(p, len); 189 if (mode == STMMAC_CHAIN_MODE)
190 norm_set_tx_desc_len_on_chain(p, len);
191 else
192 norm_set_tx_desc_len_on_ring(p, len);
189 193
190 if (likely(csum_flag)) 194 if (likely(csum_flag))
191 p->des01.tx.checksum_insertion = cic_full; 195 p->des01.tx.checksum_insertion = cic_full;
@@ -215,6 +219,39 @@ static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
215 return p->des01.rx.frame_length; 219 return p->des01.rx.frame_length;
216} 220}
217 221
222static void ndesc_enable_tx_timestamp(struct dma_desc *p)
223{
224 p->des01.tx.time_stamp_enable = 1;
225}
226
227static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
228{
229 return p->des01.tx.time_stamp_status;
230}
231
232static u64 ndesc_get_timestamp(void *desc, u32 ats)
233{
234 struct dma_desc *p = (struct dma_desc *)desc;
235 u64 ns;
236
237 ns = p->des2;
238 /* convert high/sec time stamp value to nanosecond */
239 ns += p->des3 * 1000000000ULL;
240
241 return ns;
242}
243
244static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
245{
246 struct dma_desc *p = (struct dma_desc *)desc;
247
248 if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
249 /* timestamp is corrupted, hence don't store it */
250 return 0;
251 else
252 return 1;
253}
254
218const struct stmmac_desc_ops ndesc_ops = { 255const struct stmmac_desc_ops ndesc_ops = {
219 .tx_status = ndesc_get_tx_status, 256 .tx_status = ndesc_get_tx_status,
220 .rx_status = ndesc_get_rx_status, 257 .rx_status = ndesc_get_rx_status,
@@ -231,4 +268,8 @@ const struct stmmac_desc_ops ndesc_ops = {
231 .set_tx_owner = ndesc_set_tx_owner, 268 .set_tx_owner = ndesc_set_tx_owner,
232 .set_rx_owner = ndesc_set_rx_owner, 269 .set_rx_owner = ndesc_set_rx_owner,
233 .get_rx_frame_len = ndesc_get_rx_frame_len, 270 .get_rx_frame_len = ndesc_get_rx_frame_len,
271 .enable_tx_timestamp = ndesc_enable_tx_timestamp,
272 .get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
273 .get_timestamp = ndesc_get_timestamp,
274 .get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
234}; 275};
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 4b785e10f2ed..c9d942a5c335 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -30,7 +30,7 @@
30 30
31static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) 31static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
32{ 32{
33 struct stmmac_priv *priv = (struct stmmac_priv *) p; 33 struct stmmac_priv *priv = (struct stmmac_priv *)p;
34 unsigned int txsize = priv->dma_tx_size; 34 unsigned int txsize = priv->dma_tx_size;
35 unsigned int entry = priv->cur_tx % txsize; 35 unsigned int entry = priv->cur_tx % txsize;
36 struct dma_desc *desc = priv->dma_tx + entry; 36 struct dma_desc *desc = priv->dma_tx + entry;
@@ -48,25 +48,30 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
48 48
49 desc->des2 = dma_map_single(priv->device, skb->data, 49 desc->des2 = dma_map_single(priv->device, skb->data,
50 bmax, DMA_TO_DEVICE); 50 bmax, DMA_TO_DEVICE);
51 priv->tx_skbuff_dma[entry] = desc->des2;
51 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 52 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
52 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, 53 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
53 csum); 54 STMMAC_RING_MODE);
54 wmb(); 55 wmb();
55 entry = (++priv->cur_tx) % txsize; 56 entry = (++priv->cur_tx) % txsize;
56 desc = priv->dma_tx + entry; 57 desc = priv->dma_tx + entry;
57 58
58 desc->des2 = dma_map_single(priv->device, skb->data + bmax, 59 desc->des2 = dma_map_single(priv->device, skb->data + bmax,
59 len, DMA_TO_DEVICE); 60 len, DMA_TO_DEVICE);
61 priv->tx_skbuff_dma[entry] = desc->des2;
60 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 62 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
61 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum); 63 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
64 STMMAC_RING_MODE);
62 wmb(); 65 wmb();
63 priv->hw->desc->set_tx_owner(desc); 66 priv->hw->desc->set_tx_owner(desc);
64 priv->tx_skbuff[entry] = NULL; 67 priv->tx_skbuff[entry] = NULL;
65 } else { 68 } else {
66 desc->des2 = dma_map_single(priv->device, skb->data, 69 desc->des2 = dma_map_single(priv->device, skb->data,
67 nopaged_len, DMA_TO_DEVICE); 70 nopaged_len, DMA_TO_DEVICE);
71 priv->tx_skbuff_dma[entry] = desc->des2;
68 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 72 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
69 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum); 73 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
74 STMMAC_RING_MODE);
70 } 75 }
71 76
72 return entry; 77 return entry;
@@ -82,27 +87,23 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
82 return ret; 87 return ret;
83} 88}
84 89
85static void stmmac_refill_desc3(int bfsize, struct dma_desc *p) 90static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
86{ 91{
87 /* Fill DES3 in case of RING mode */ 92 struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
88 if (bfsize >= BUF_SIZE_8KiB)
89 p->des3 = p->des2 + BUF_SIZE_8KiB;
90}
91 93
92/* In ring mode we need to fill the desc3 because it is used 94 if (unlikely(priv->plat->has_gmac))
93 * as buffer */ 95 /* Fill DES3 in case of RING mode */
94static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p) 96 if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
95{ 97 p->des3 = p->des2 + BUF_SIZE_8KiB;
96 if (unlikely(des3_as_data_buf))
97 p->des3 = p->des2 + BUF_SIZE_8KiB;
98} 98}
99 99
100static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr, 100/* In ring mode we need to fill the desc3 because it is used as buffer */
101 unsigned int size) 101static void stmmac_init_desc3(struct dma_desc *p)
102{ 102{
103 p->des3 = p->des2 + BUF_SIZE_8KiB;
103} 104}
104 105
105static void stmmac_clean_desc3(struct dma_desc *p) 106static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
106{ 107{
107 if (unlikely(p->des3)) 108 if (unlikely(p->des3))
108 p->des3 = 0; 109 p->des3 = 0;
@@ -121,7 +122,6 @@ const struct stmmac_ring_mode_ops ring_mode_ops = {
121 .jumbo_frm = stmmac_jumbo_frm, 122 .jumbo_frm = stmmac_jumbo_frm,
122 .refill_desc3 = stmmac_refill_desc3, 123 .refill_desc3 = stmmac_refill_desc3,
123 .init_desc3 = stmmac_init_desc3, 124 .init_desc3 = stmmac_init_desc3,
124 .init_dma_chain = stmmac_init_dma_chain,
125 .clean_desc3 = stmmac_clean_desc3, 125 .clean_desc3 = stmmac_clean_desc3,
126 .set_16kib_bfsize = stmmac_set_16kib_bfsize, 126 .set_16kib_bfsize = stmmac_set_16kib_bfsize,
127}; 127};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b05df8983be5..c922fde929a1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -24,43 +24,56 @@
24#define __STMMAC_H__ 24#define __STMMAC_H__
25 25
26#define STMMAC_RESOURCE_NAME "stmmaceth" 26#define STMMAC_RESOURCE_NAME "stmmaceth"
27#define DRV_MODULE_VERSION "Nov_2012" 27#define DRV_MODULE_VERSION "March_2013"
28 28
29#include <linux/clk.h> 29#include <linux/clk.h>
30#include <linux/stmmac.h> 30#include <linux/stmmac.h>
31#include <linux/phy.h> 31#include <linux/phy.h>
32#include <linux/pci.h> 32#include <linux/pci.h>
33#include "common.h" 33#include "common.h"
34#include <linux/ptp_clock_kernel.h>
34 35
35struct stmmac_priv { 36struct stmmac_priv {
36 /* Frequently used values are kept adjacent for cache effect */ 37 /* Frequently used values are kept adjacent for cache effect */
37 struct dma_desc *dma_tx ____cacheline_aligned; 38 struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
38 dma_addr_t dma_tx_phy; 39 struct dma_desc *dma_tx;
39 struct sk_buff **tx_skbuff; 40 struct sk_buff **tx_skbuff;
40 unsigned int cur_tx; 41 unsigned int cur_tx;
41 unsigned int dirty_tx; 42 unsigned int dirty_tx;
42 unsigned int dma_tx_size; 43 unsigned int dma_tx_size;
44 u32 tx_count_frames;
45 u32 tx_coal_frames;
46 u32 tx_coal_timer;
47 dma_addr_t *tx_skbuff_dma;
48 dma_addr_t dma_tx_phy;
43 int tx_coalesce; 49 int tx_coalesce;
50 int hwts_tx_en;
51 spinlock_t tx_lock;
52 bool tx_path_in_lpi_mode;
53 struct timer_list txtimer;
44 54
45 struct dma_desc *dma_rx ; 55 struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
56 struct dma_extended_desc *dma_erx;
57 struct sk_buff **rx_skbuff;
46 unsigned int cur_rx; 58 unsigned int cur_rx;
47 unsigned int dirty_rx; 59 unsigned int dirty_rx;
48 struct sk_buff **rx_skbuff; 60 unsigned int dma_rx_size;
61 unsigned int dma_buf_sz;
62 u32 rx_riwt;
63 int hwts_rx_en;
49 dma_addr_t *rx_skbuff_dma; 64 dma_addr_t *rx_skbuff_dma;
65 dma_addr_t dma_rx_phy;
50 66
67 struct napi_struct napi ____cacheline_aligned_in_smp;
68
69 void __iomem *ioaddr;
51 struct net_device *dev; 70 struct net_device *dev;
52 dma_addr_t dma_rx_phy;
53 unsigned int dma_rx_size;
54 unsigned int dma_buf_sz;
55 struct device *device; 71 struct device *device;
56 struct mac_device_info *hw; 72 struct mac_device_info *hw;
57 void __iomem *ioaddr;
58
59 struct stmmac_extra_stats xstats;
60 struct napi_struct napi;
61 int no_csum_insertion; 73 int no_csum_insertion;
74 spinlock_t lock;
62 75
63 struct phy_device *phydev; 76 struct phy_device *phydev ____cacheline_aligned_in_smp;
64 int oldlink; 77 int oldlink;
65 int speed; 78 int speed;
66 int oldduplex; 79 int oldduplex;
@@ -69,30 +82,31 @@ struct stmmac_priv {
69 struct mii_bus *mii; 82 struct mii_bus *mii;
70 int mii_irq[PHY_MAX_ADDR]; 83 int mii_irq[PHY_MAX_ADDR];
71 84
72 u32 msg_enable; 85 struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp;
73 spinlock_t lock;
74 spinlock_t tx_lock;
75 int wolopts;
76 int wol_irq;
77 struct plat_stmmacenet_data *plat; 86 struct plat_stmmacenet_data *plat;
78 struct stmmac_counters mmc;
79 struct dma_features dma_cap; 87 struct dma_features dma_cap;
88 struct stmmac_counters mmc;
80 int hw_cap_support; 89 int hw_cap_support;
90 int synopsys_id;
91 u32 msg_enable;
92 int wolopts;
93 int wol_irq;
81 struct clk *stmmac_clk; 94 struct clk *stmmac_clk;
82 int clk_csr; 95 int clk_csr;
83 int synopsys_id;
84 struct timer_list eee_ctrl_timer; 96 struct timer_list eee_ctrl_timer;
85 bool tx_path_in_lpi_mode;
86 int lpi_irq; 97 int lpi_irq;
87 int eee_enabled; 98 int eee_enabled;
88 int eee_active; 99 int eee_active;
89 int tx_lpi_timer; 100 int tx_lpi_timer;
90 struct timer_list txtimer; 101 int pcs;
91 u32 tx_count_frames; 102 unsigned int mode;
92 u32 tx_coal_frames; 103 int extend_desc;
93 u32 tx_coal_timer; 104 struct ptp_clock *ptp_clock;
105 struct ptp_clock_info ptp_clock_ops;
106 unsigned int default_addend;
107 u32 adv_ts;
94 int use_riwt; 108 int use_riwt;
95 u32 rx_riwt; 109 spinlock_t ptp_lock;
96}; 110};
97 111
98extern int phyaddr; 112extern int phyaddr;
@@ -102,6 +116,9 @@ extern int stmmac_mdio_register(struct net_device *ndev);
102extern void stmmac_set_ethtool_ops(struct net_device *netdev); 116extern void stmmac_set_ethtool_ops(struct net_device *netdev);
103extern const struct stmmac_desc_ops enh_desc_ops; 117extern const struct stmmac_desc_ops enh_desc_ops;
104extern const struct stmmac_desc_ops ndesc_ops; 118extern const struct stmmac_desc_ops ndesc_ops;
119extern const struct stmmac_hwtimestamp stmmac_ptp;
120extern int stmmac_ptp_register(struct stmmac_priv *priv);
121extern void stmmac_ptp_unregister(struct stmmac_priv *priv);
105int stmmac_freeze(struct net_device *ndev); 122int stmmac_freeze(struct net_device *ndev);
106int stmmac_restore(struct net_device *ndev); 123int stmmac_restore(struct net_device *ndev);
107int stmmac_resume(struct net_device *ndev); 124int stmmac_resume(struct net_device *ndev);
@@ -125,6 +142,7 @@ static inline int stmmac_register_platform(void)
125 142
126 return err; 143 return err;
127} 144}
145
128static inline void stmmac_unregister_platform(void) 146static inline void stmmac_unregister_platform(void)
129{ 147{
130 platform_driver_unregister(&stmmac_pltfr_driver); 148 platform_driver_unregister(&stmmac_pltfr_driver);
@@ -136,6 +154,7 @@ static inline int stmmac_register_platform(void)
136 154
137 return 0; 155 return 0;
138} 156}
157
139static inline void stmmac_unregister_platform(void) 158static inline void stmmac_unregister_platform(void)
140{ 159{
141} 160}
@@ -153,6 +172,7 @@ static inline int stmmac_register_pci(void)
153 172
154 return err; 173 return err;
155} 174}
175
156static inline void stmmac_unregister_pci(void) 176static inline void stmmac_unregister_pci(void)
157{ 177{
158 pci_unregister_driver(&stmmac_pci_driver); 178 pci_unregister_driver(&stmmac_pci_driver);
@@ -164,6 +184,7 @@ static inline int stmmac_register_pci(void)
164 184
165 return 0; 185 return 0;
166} 186}
187
167static inline void stmmac_unregister_pci(void) 188static inline void stmmac_unregister_pci(void)
168{ 189{
169} 190}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d1ac39c1b05d..c5f9cb85c8ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -27,6 +27,7 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/mii.h> 28#include <linux/mii.h>
29#include <linux/phy.h> 29#include <linux/phy.h>
30#include <linux/net_tstamp.h>
30#include <asm/io.h> 31#include <asm/io.h>
31 32
32#include "stmmac.h" 33#include "stmmac.h"
@@ -108,6 +109,33 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
108 STMMAC_STAT(irq_rx_path_in_lpi_mode_n), 109 STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
109 STMMAC_STAT(irq_rx_path_exit_lpi_mode_n), 110 STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
110 STMMAC_STAT(phy_eee_wakeup_error_n), 111 STMMAC_STAT(phy_eee_wakeup_error_n),
112 /* Extended RDES status */
113 STMMAC_STAT(ip_hdr_err),
114 STMMAC_STAT(ip_payload_err),
115 STMMAC_STAT(ip_csum_bypassed),
116 STMMAC_STAT(ipv4_pkt_rcvd),
117 STMMAC_STAT(ipv6_pkt_rcvd),
118 STMMAC_STAT(rx_msg_type_ext_no_ptp),
119 STMMAC_STAT(rx_msg_type_sync),
120 STMMAC_STAT(rx_msg_type_follow_up),
121 STMMAC_STAT(rx_msg_type_delay_req),
122 STMMAC_STAT(rx_msg_type_delay_resp),
123 STMMAC_STAT(rx_msg_type_pdelay_req),
124 STMMAC_STAT(rx_msg_type_pdelay_resp),
125 STMMAC_STAT(rx_msg_type_pdelay_follow_up),
126 STMMAC_STAT(ptp_frame_type),
127 STMMAC_STAT(ptp_ver),
128 STMMAC_STAT(timestamp_dropped),
129 STMMAC_STAT(av_pkt_rcvd),
130 STMMAC_STAT(av_tagged_pkt_rcvd),
131 STMMAC_STAT(vlan_tag_priority_val),
132 STMMAC_STAT(l3_filter_match),
133 STMMAC_STAT(l4_filter_match),
134 STMMAC_STAT(l3_l4_filter_no_match),
135 /* PCS */
136 STMMAC_STAT(irq_pcs_ane_n),
137 STMMAC_STAT(irq_pcs_link_n),
138 STMMAC_STAT(irq_rgmii_n),
111}; 139};
112#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) 140#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
113 141
@@ -219,6 +247,70 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
219 struct stmmac_priv *priv = netdev_priv(dev); 247 struct stmmac_priv *priv = netdev_priv(dev);
220 struct phy_device *phy = priv->phydev; 248 struct phy_device *phy = priv->phydev;
221 int rc; 249 int rc;
250
251 if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
252 struct rgmii_adv adv;
253
254 if (!priv->xstats.pcs_link) {
255 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
256 cmd->duplex = DUPLEX_UNKNOWN;
257 return 0;
258 }
259 cmd->duplex = priv->xstats.pcs_duplex;
260
261 ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
262
263 /* Get and convert ADV/LP_ADV from the HW AN registers */
264 if (priv->hw->mac->get_adv)
265 priv->hw->mac->get_adv(priv->ioaddr, &adv);
266 else
267 return -EOPNOTSUPP; /* should never happen indeed */
268
269 /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
270
271 if (adv.pause & STMMAC_PCS_PAUSE)
272 cmd->advertising |= ADVERTISED_Pause;
273 if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
274 cmd->advertising |= ADVERTISED_Asym_Pause;
275 if (adv.lp_pause & STMMAC_PCS_PAUSE)
276 cmd->lp_advertising |= ADVERTISED_Pause;
277 if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
278 cmd->lp_advertising |= ADVERTISED_Asym_Pause;
279
280 /* Reg49[3] always set because ANE is always supported */
281 cmd->autoneg = ADVERTISED_Autoneg;
282 cmd->supported |= SUPPORTED_Autoneg;
283 cmd->advertising |= ADVERTISED_Autoneg;
284 cmd->lp_advertising |= ADVERTISED_Autoneg;
285
286 if (adv.duplex) {
287 cmd->supported |= (SUPPORTED_1000baseT_Full |
288 SUPPORTED_100baseT_Full |
289 SUPPORTED_10baseT_Full);
290 cmd->advertising |= (ADVERTISED_1000baseT_Full |
291 ADVERTISED_100baseT_Full |
292 ADVERTISED_10baseT_Full);
293 } else {
294 cmd->supported |= (SUPPORTED_1000baseT_Half |
295 SUPPORTED_100baseT_Half |
296 SUPPORTED_10baseT_Half);
297 cmd->advertising |= (ADVERTISED_1000baseT_Half |
298 ADVERTISED_100baseT_Half |
299 ADVERTISED_10baseT_Half);
300 }
301 if (adv.lp_duplex)
302 cmd->lp_advertising |= (ADVERTISED_1000baseT_Full |
303 ADVERTISED_100baseT_Full |
304 ADVERTISED_10baseT_Full);
305 else
306 cmd->lp_advertising |= (ADVERTISED_1000baseT_Half |
307 ADVERTISED_100baseT_Half |
308 ADVERTISED_10baseT_Half);
309 cmd->port = PORT_OTHER;
310
311 return 0;
312 }
313
222 if (phy == NULL) { 314 if (phy == NULL) {
223 pr_err("%s: %s: PHY is not registered\n", 315 pr_err("%s: %s: PHY is not registered\n",
224 __func__, dev->name); 316 __func__, dev->name);
@@ -243,6 +335,30 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
243 struct phy_device *phy = priv->phydev; 335 struct phy_device *phy = priv->phydev;
244 int rc; 336 int rc;
245 337
338 if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
339 u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
340
341 /* Only support ANE */
342 if (cmd->autoneg != AUTONEG_ENABLE)
343 return -EINVAL;
344
345 if (cmd->autoneg == AUTONEG_ENABLE) {
346 mask &= (ADVERTISED_1000baseT_Half |
347 ADVERTISED_1000baseT_Full |
348 ADVERTISED_100baseT_Half |
349 ADVERTISED_100baseT_Full |
350 ADVERTISED_10baseT_Half |
351 ADVERTISED_10baseT_Full);
352
353 spin_lock(&priv->lock);
354 if (priv->hw->mac->ctrl_ane)
355 priv->hw->mac->ctrl_ane(priv->ioaddr, 1);
356 spin_unlock(&priv->lock);
357 }
358
359 return 0;
360 }
361
246 spin_lock(&priv->lock); 362 spin_lock(&priv->lock);
247 rc = phy_ethtool_sset(phy, cmd); 363 rc = phy_ethtool_sset(phy, cmd);
248 spin_unlock(&priv->lock); 364 spin_unlock(&priv->lock);
@@ -312,6 +428,9 @@ stmmac_get_pauseparam(struct net_device *netdev,
312{ 428{
313 struct stmmac_priv *priv = netdev_priv(netdev); 429 struct stmmac_priv *priv = netdev_priv(netdev);
314 430
431 if (priv->pcs) /* FIXME */
432 return;
433
315 spin_lock(&priv->lock); 434 spin_lock(&priv->lock);
316 435
317 pause->rx_pause = 0; 436 pause->rx_pause = 0;
@@ -335,6 +454,9 @@ stmmac_set_pauseparam(struct net_device *netdev,
335 int new_pause = FLOW_OFF; 454 int new_pause = FLOW_OFF;
336 int ret = 0; 455 int ret = 0;
337 456
457 if (priv->pcs) /* FIXME */
458 return -EOPNOTSUPP;
459
338 spin_lock(&priv->lock); 460 spin_lock(&priv->lock);
339 461
340 if (pause->rx_pause) 462 if (pause->rx_pause)
@@ -604,6 +726,38 @@ static int stmmac_set_coalesce(struct net_device *dev,
604 return 0; 726 return 0;
605} 727}
606 728
729static int stmmac_get_ts_info(struct net_device *dev,
730 struct ethtool_ts_info *info)
731{
732 struct stmmac_priv *priv = netdev_priv(dev);
733
734 if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) {
735
736 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
737 SOF_TIMESTAMPING_RX_HARDWARE |
738 SOF_TIMESTAMPING_RAW_HARDWARE;
739
740 if (priv->ptp_clock)
741 info->phc_index = ptp_clock_index(priv->ptp_clock);
742
743 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
744
745 info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
746 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
747 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
748 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
749 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
750 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
751 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
752 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
753 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
754 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
755 (1 << HWTSTAMP_FILTER_ALL));
756 return 0;
757 } else
758 return ethtool_op_get_ts_info(dev, info);
759}
760
607static const struct ethtool_ops stmmac_ethtool_ops = { 761static const struct ethtool_ops stmmac_ethtool_ops = {
608 .begin = stmmac_check_if_running, 762 .begin = stmmac_check_if_running,
609 .get_drvinfo = stmmac_ethtool_getdrvinfo, 763 .get_drvinfo = stmmac_ethtool_getdrvinfo,
@@ -623,7 +777,7 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
623 .get_eee = stmmac_ethtool_op_get_eee, 777 .get_eee = stmmac_ethtool_op_get_eee,
624 .set_eee = stmmac_ethtool_op_set_eee, 778 .set_eee = stmmac_ethtool_op_set_eee,
625 .get_sset_count = stmmac_get_sset_count, 779 .get_sset_count = stmmac_get_sset_count,
626 .get_ts_info = ethtool_op_get_ts_info, 780 .get_ts_info = stmmac_get_ts_info,
627 .get_coalesce = stmmac_get_coalesce, 781 .get_coalesce = stmmac_get_coalesce,
628 .set_coalesce = stmmac_set_coalesce, 782 .set_coalesce = stmmac_set_coalesce,
629}; 783};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
new file mode 100644
index 000000000000..def7e75e1d57
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -0,0 +1,148 @@
1/*******************************************************************************
2 Copyright (C) 2013 Vayavya Labs Pvt Ltd
3
4 This implements all the API for managing HW timestamp & PTP.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
23 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
24*******************************************************************************/
25
26#include <linux/io.h>
27#include <linux/delay.h>
28#include "common.h"
29#include "stmmac_ptp.h"
30
31static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data)
32{
33 writel(data, ioaddr + PTP_TCR);
34}
35
36static void stmmac_config_sub_second_increment(void __iomem *ioaddr)
37{
38 u32 value = readl(ioaddr + PTP_TCR);
39 unsigned long data;
40
41 /* Convert the ptp_clock to nano second
42 * formula = (1/ptp_clock) * 1000000000
43 * where, ptp_clock = 50MHz.
44 */
45 data = (1000000000ULL / 50000000);
46
47 /* 0.465ns accuracy */
48 if (value & PTP_TCR_TSCTRLSSR)
49 data = (data * 100) / 465;
50
51 writel(data, ioaddr + PTP_SSIR);
52}
53
54static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
55{
56 int limit;
57 u32 value;
58
59 writel(sec, ioaddr + PTP_STSUR);
60 writel(nsec, ioaddr + PTP_STNSUR);
61 /* issue command to initialize the system time value */
62 value = readl(ioaddr + PTP_TCR);
63 value |= PTP_TCR_TSINIT;
64 writel(value, ioaddr + PTP_TCR);
65
66 /* wait for present system time initialize to complete */
67 limit = 10;
68 while (limit--) {
69 if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSINIT))
70 break;
71 mdelay(10);
72 }
73 if (limit < 0)
74 return -EBUSY;
75
76 return 0;
77}
78
79static int stmmac_config_addend(void __iomem *ioaddr, u32 addend)
80{
81 u32 value;
82 int limit;
83
84 writel(addend, ioaddr + PTP_TAR);
85 /* issue command to update the addend value */
86 value = readl(ioaddr + PTP_TCR);
87 value |= PTP_TCR_TSADDREG;
88 writel(value, ioaddr + PTP_TCR);
89
90 /* wait for present addend update to complete */
91 limit = 10;
92 while (limit--) {
93 if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSADDREG))
94 break;
95 mdelay(10);
96 }
97 if (limit < 0)
98 return -EBUSY;
99
100 return 0;
101}
102
103static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
104 int add_sub)
105{
106 u32 value;
107 int limit;
108
109 writel(sec, ioaddr + PTP_STSUR);
110 writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec),
111 ioaddr + PTP_STNSUR);
112 /* issue command to initialize the system time value */
113 value = readl(ioaddr + PTP_TCR);
114 value |= PTP_TCR_TSUPDT;
115 writel(value, ioaddr + PTP_TCR);
116
117 /* wait for present system time adjust/update to complete */
118 limit = 10;
119 while (limit--) {
120 if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT))
121 break;
122 mdelay(10);
123 }
124 if (limit < 0)
125 return -EBUSY;
126
127 return 0;
128}
129
130static u64 stmmac_get_systime(void __iomem *ioaddr)
131{
132 u64 ns;
133
134 ns = readl(ioaddr + PTP_STNSR);
135 /* convert sec time value to nanosecond */
136 ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
137
138 return ns;
139}
140
141const struct stmmac_hwtimestamp stmmac_ptp = {
142 .config_hw_tstamping = stmmac_config_hw_tstamping,
143 .init_systime = stmmac_init_systime,
144 .config_sub_second_increment = stmmac_config_sub_second_increment,
145 .config_addend = stmmac_config_addend,
146 .adjust_systime = stmmac_adjust_systime,
147 .get_systime = stmmac_get_systime,
148};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 39c6c5524633..618446ae1ec1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -46,7 +46,9 @@
46#ifdef CONFIG_STMMAC_DEBUG_FS 46#ifdef CONFIG_STMMAC_DEBUG_FS
47#include <linux/debugfs.h> 47#include <linux/debugfs.h>
48#include <linux/seq_file.h> 48#include <linux/seq_file.h>
49#endif 49#endif /* CONFIG_STMMAC_DEBUG_FS */
50#include <linux/net_tstamp.h>
51#include "stmmac_ptp.h"
50#include "stmmac.h" 52#include "stmmac.h"
51 53
52#undef STMMAC_DEBUG 54#undef STMMAC_DEBUG
@@ -79,14 +81,14 @@
79#define JUMBO_LEN 9000 81#define JUMBO_LEN 9000
80 82
81/* Module parameters */ 83/* Module parameters */
82#define TX_TIMEO 5000 /* default 5 seconds */ 84#define TX_TIMEO 5000
83static int watchdog = TX_TIMEO; 85static int watchdog = TX_TIMEO;
84module_param(watchdog, int, S_IRUGO | S_IWUSR); 86module_param(watchdog, int, S_IRUGO | S_IWUSR);
85MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds"); 87MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
86 88
87static int debug = -1; /* -1: default, 0: no output, 16: all */ 89static int debug = -1;
88module_param(debug, int, S_IRUGO | S_IWUSR); 90module_param(debug, int, S_IRUGO | S_IWUSR);
89MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)"); 91MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
90 92
91int phyaddr = -1; 93int phyaddr = -1;
92module_param(phyaddr, int, S_IRUGO); 94module_param(phyaddr, int, S_IRUGO);
@@ -130,6 +132,13 @@ module_param(eee_timer, int, S_IRUGO | S_IWUSR);
130MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 132MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
131#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) 133#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
132 134
135/* By default the driver will use the ring mode to manage tx and rx descriptors
136 * but passing this value so user can force to use the chain instead of the ring
137 */
138static unsigned int chain_mode;
139module_param(chain_mode, int, S_IRUGO);
140MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
141
133static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 142static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
134 143
135#ifdef CONFIG_STMMAC_DEBUG_FS 144#ifdef CONFIG_STMMAC_DEBUG_FS
@@ -164,6 +173,18 @@ static void stmmac_verify_args(void)
164 eee_timer = STMMAC_DEFAULT_LPI_TIMER; 173 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
165} 174}
166 175
176/**
177 * stmmac_clk_csr_set - dynamically set the MDC clock
178 * @priv: driver private structure
179 * Description: this is to dynamically set the MDC clock according to the csr
180 * clock input.
181 * Note:
182 * If a specific clk_csr value is passed from the platform
183 * this means that the CSR Clock Range selection cannot be
184 * changed at run-time and it is fixed (as reported in the driver
185 * documentation). Viceversa the driver will try to set the MDC
186 * clock dynamically according to the actual clock input.
187 */
167static void stmmac_clk_csr_set(struct stmmac_priv *priv) 188static void stmmac_clk_csr_set(struct stmmac_priv *priv)
168{ 189{
169 u32 clk_rate; 190 u32 clk_rate;
@@ -171,7 +192,12 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
171 clk_rate = clk_get_rate(priv->stmmac_clk); 192 clk_rate = clk_get_rate(priv->stmmac_clk);
172 193
173 /* Platform provided default clk_csr would be assumed valid 194 /* Platform provided default clk_csr would be assumed valid
174 * for all other cases except for the below mentioned ones. */ 195 * for all other cases except for the below mentioned ones.
196 * For values higher than the IEEE 802.3 specified frequency
197 * we can not estimate the proper divider as it is not known
198 * the frequency of clk_csr_i. So we do not change the default
199 * divider.
200 */
175 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 201 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
176 if (clk_rate < CSR_F_35M) 202 if (clk_rate < CSR_F_35M)
177 priv->clk_csr = STMMAC_CSR_20_35M; 203 priv->clk_csr = STMMAC_CSR_20_35M;
@@ -185,10 +211,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
185 priv->clk_csr = STMMAC_CSR_150_250M; 211 priv->clk_csr = STMMAC_CSR_150_250M;
186 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) 212 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
187 priv->clk_csr = STMMAC_CSR_250_300M; 213 priv->clk_csr = STMMAC_CSR_250_300M;
188 } /* For values higher than the IEEE 802.3 specified frequency 214 }
189 * we can not estimate the proper divider as it is not known
190 * the frequency of clk_csr_i. So we do not change the default
191 * divider. */
192} 215}
193 216
194#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG) 217#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
@@ -213,18 +236,25 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
213 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; 236 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
214} 237}
215 238
216/* On some ST platforms, some HW system configuraton registers have to be 239/**
217 * set according to the link speed negotiated. 240 * stmmac_hw_fix_mac_speed: callback for speed selection
241 * @priv: driver private structure
242 * Description: on some platforms (e.g. ST), some HW system configuraton
243 * registers have to be set according to the link speed negotiated.
218 */ 244 */
219static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) 245static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
220{ 246{
221 struct phy_device *phydev = priv->phydev; 247 struct phy_device *phydev = priv->phydev;
222 248
223 if (likely(priv->plat->fix_mac_speed)) 249 if (likely(priv->plat->fix_mac_speed))
224 priv->plat->fix_mac_speed(priv->plat->bsp_priv, 250 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
225 phydev->speed);
226} 251}
227 252
253/**
254 * stmmac_enable_eee_mode: Check and enter in LPI mode
255 * @priv: driver private structure
256 * Description: this function is to verify and enter in LPI mode for EEE.
257 */
228static void stmmac_enable_eee_mode(struct stmmac_priv *priv) 258static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
229{ 259{
230 /* Check and enter in LPI mode */ 260 /* Check and enter in LPI mode */
@@ -233,19 +263,24 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
233 priv->hw->mac->set_eee_mode(priv->ioaddr); 263 priv->hw->mac->set_eee_mode(priv->ioaddr);
234} 264}
235 265
266/**
267 * stmmac_disable_eee_mode: disable/exit from EEE
268 * @priv: driver private structure
269 * Description: this function is to exit and disable EEE in case of
270 * LPI state is true. This is called by the xmit.
271 */
236void stmmac_disable_eee_mode(struct stmmac_priv *priv) 272void stmmac_disable_eee_mode(struct stmmac_priv *priv)
237{ 273{
238 /* Exit and disable EEE in case of we are are in LPI state. */
239 priv->hw->mac->reset_eee_mode(priv->ioaddr); 274 priv->hw->mac->reset_eee_mode(priv->ioaddr);
240 del_timer_sync(&priv->eee_ctrl_timer); 275 del_timer_sync(&priv->eee_ctrl_timer);
241 priv->tx_path_in_lpi_mode = false; 276 priv->tx_path_in_lpi_mode = false;
242} 277}
243 278
244/** 279/**
245 * stmmac_eee_ctrl_timer 280 * stmmac_eee_ctrl_timer: EEE TX SW timer.
246 * @arg : data hook 281 * @arg : data hook
247 * Description: 282 * Description:
248 * If there is no data transfer and if we are not in LPI state, 283 * if there is no data transfer and if we are not in LPI state,
249 * then MAC Transmitter can be moved to LPI state. 284 * then MAC Transmitter can be moved to LPI state.
250 */ 285 */
251static void stmmac_eee_ctrl_timer(unsigned long arg) 286static void stmmac_eee_ctrl_timer(unsigned long arg)
@@ -257,8 +292,8 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
257} 292}
258 293
259/** 294/**
260 * stmmac_eee_init 295 * stmmac_eee_init: init EEE
261 * @priv: private device pointer 296 * @priv: driver private structure
262 * Description: 297 * Description:
263 * If the EEE support has been enabled while configuring the driver, 298 * If the EEE support has been enabled while configuring the driver,
264 * if the GMAC actually supports the EEE (from the HW cap reg) and the 299 * if the GMAC actually supports the EEE (from the HW cap reg) and the
@@ -294,16 +329,359 @@ out:
294 return ret; 329 return ret;
295} 330}
296 331
332/**
333 * stmmac_eee_adjust: adjust HW EEE according to the speed
334 * @priv: driver private structure
335 * Description:
336 * When the EEE has been already initialised we have to
337 * modify the PLS bit in the LPI ctrl & status reg according
338 * to the PHY link status. For this reason.
339 */
297static void stmmac_eee_adjust(struct stmmac_priv *priv) 340static void stmmac_eee_adjust(struct stmmac_priv *priv)
298{ 341{
299 /* When the EEE has been already initialised we have to
300 * modify the PLS bit in the LPI ctrl & status reg according
301 * to the PHY link status. For this reason.
302 */
303 if (priv->eee_enabled) 342 if (priv->eee_enabled)
304 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); 343 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
305} 344}
306 345
346/* stmmac_get_tx_hwtstamp: get HW TX timestamps
347 * @priv: driver private structure
348 * @entry : descriptor index to be used.
349 * @skb : the socket buffer
350 * Description :
351 * This function will read timestamp from the descriptor & pass it to stack.
352 * and also perform some sanity checks.
353 */
354static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
355 unsigned int entry, struct sk_buff *skb)
356{
357 struct skb_shared_hwtstamps shhwtstamp;
358 u64 ns;
359 void *desc = NULL;
360
361 if (!priv->hwts_tx_en)
362 return;
363
364 /* exit if skb doesn't support hw tstamp */
365 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
366 return;
367
368 if (priv->adv_ts)
369 desc = (priv->dma_etx + entry);
370 else
371 desc = (priv->dma_tx + entry);
372
373 /* check tx tstamp status */
374 if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
375 return;
376
377 /* get the valid tstamp */
378 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
379
380 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
381 shhwtstamp.hwtstamp = ns_to_ktime(ns);
382 /* pass tstamp to stack */
383 skb_tstamp_tx(skb, &shhwtstamp);
384
385 return;
386}
387
388/* stmmac_get_rx_hwtstamp: get HW RX timestamps
389 * @priv: driver private structure
390 * @entry : descriptor index to be used.
391 * @skb : the socket buffer
392 * Description :
393 * This function will read received packet's timestamp from the descriptor
394 * and pass it to stack. It also perform some sanity checks.
395 */
396static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
397 unsigned int entry, struct sk_buff *skb)
398{
399 struct skb_shared_hwtstamps *shhwtstamp = NULL;
400 u64 ns;
401 void *desc = NULL;
402
403 if (!priv->hwts_rx_en)
404 return;
405
406 if (priv->adv_ts)
407 desc = (priv->dma_erx + entry);
408 else
409 desc = (priv->dma_rx + entry);
410
411 /* exit if rx tstamp is not valid */
412 if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
413 return;
414
415 /* get valid tstamp */
416 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
417 shhwtstamp = skb_hwtstamps(skb);
418 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
419 shhwtstamp->hwtstamp = ns_to_ktime(ns);
420}
421
422/**
423 * stmmac_hwtstamp_ioctl - control hardware timestamping.
424 * @dev: device pointer.
425 * @ifr: An IOCTL specefic structure, that can contain a pointer to
426 * a proprietary structure used to pass information to the driver.
427 * Description:
428 * This function configures the MAC to enable/disable both outgoing(TX)
429 * and incoming(RX) packets time stamping based on user input.
430 * Return Value:
431 * 0 on success and an appropriate -ve integer on failure.
432 */
433static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
434{
435 struct stmmac_priv *priv = netdev_priv(dev);
436 struct hwtstamp_config config;
437 struct timespec now;
438 u64 temp = 0;
439 u32 ptp_v2 = 0;
440 u32 tstamp_all = 0;
441 u32 ptp_over_ipv4_udp = 0;
442 u32 ptp_over_ipv6_udp = 0;
443 u32 ptp_over_ethernet = 0;
444 u32 snap_type_sel = 0;
445 u32 ts_master_en = 0;
446 u32 ts_event_en = 0;
447 u32 value = 0;
448
449 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
450 netdev_alert(priv->dev, "No support for HW time stamping\n");
451 priv->hwts_tx_en = 0;
452 priv->hwts_rx_en = 0;
453
454 return -EOPNOTSUPP;
455 }
456
457 if (copy_from_user(&config, ifr->ifr_data,
458 sizeof(struct hwtstamp_config)))
459 return -EFAULT;
460
461 pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
462 __func__, config.flags, config.tx_type, config.rx_filter);
463
464 /* reserved for future extensions */
465 if (config.flags)
466 return -EINVAL;
467
468 switch (config.tx_type) {
469 case HWTSTAMP_TX_OFF:
470 priv->hwts_tx_en = 0;
471 break;
472 case HWTSTAMP_TX_ON:
473 priv->hwts_tx_en = 1;
474 break;
475 default:
476 return -ERANGE;
477 }
478
479 if (priv->adv_ts) {
480 switch (config.rx_filter) {
481 case HWTSTAMP_FILTER_NONE:
482 /* time stamp no incoming packet at all */
483 config.rx_filter = HWTSTAMP_FILTER_NONE;
484 break;
485
486 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
487 /* PTP v1, UDP, any kind of event packet */
488 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
489 /* take time stamp for all event messages */
490 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
491
492 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
493 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
494 break;
495
496 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
497 /* PTP v1, UDP, Sync packet */
498 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
499 /* take time stamp for SYNC messages only */
500 ts_event_en = PTP_TCR_TSEVNTENA;
501
502 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
503 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
504 break;
505
506 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
507 /* PTP v1, UDP, Delay_req packet */
508 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
509 /* take time stamp for Delay_Req messages only */
510 ts_master_en = PTP_TCR_TSMSTRENA;
511 ts_event_en = PTP_TCR_TSEVNTENA;
512
513 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
514 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
515 break;
516
517 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
518 /* PTP v2, UDP, any kind of event packet */
519 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
520 ptp_v2 = PTP_TCR_TSVER2ENA;
521 /* take time stamp for all event messages */
522 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
523
524 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
525 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
526 break;
527
528 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
529 /* PTP v2, UDP, Sync packet */
530 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
531 ptp_v2 = PTP_TCR_TSVER2ENA;
532 /* take time stamp for SYNC messages only */
533 ts_event_en = PTP_TCR_TSEVNTENA;
534
535 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
536 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
537 break;
538
539 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
540 /* PTP v2, UDP, Delay_req packet */
541 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
542 ptp_v2 = PTP_TCR_TSVER2ENA;
543 /* take time stamp for Delay_Req messages only */
544 ts_master_en = PTP_TCR_TSMSTRENA;
545 ts_event_en = PTP_TCR_TSEVNTENA;
546
547 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
548 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
549 break;
550
551 case HWTSTAMP_FILTER_PTP_V2_EVENT:
552 /* PTP v2/802.AS1 any layer, any kind of event packet */
553 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
554 ptp_v2 = PTP_TCR_TSVER2ENA;
555 /* take time stamp for all event messages */
556 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
557
558 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
559 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
560 ptp_over_ethernet = PTP_TCR_TSIPENA;
561 break;
562
563 case HWTSTAMP_FILTER_PTP_V2_SYNC:
564 /* PTP v2/802.AS1, any layer, Sync packet */
565 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
566 ptp_v2 = PTP_TCR_TSVER2ENA;
567 /* take time stamp for SYNC messages only */
568 ts_event_en = PTP_TCR_TSEVNTENA;
569
570 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
571 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
572 ptp_over_ethernet = PTP_TCR_TSIPENA;
573 break;
574
575 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
576 /* PTP v2/802.AS1, any layer, Delay_req packet */
577 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
578 ptp_v2 = PTP_TCR_TSVER2ENA;
579 /* take time stamp for Delay_Req messages only */
580 ts_master_en = PTP_TCR_TSMSTRENA;
581 ts_event_en = PTP_TCR_TSEVNTENA;
582
583 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
585 ptp_over_ethernet = PTP_TCR_TSIPENA;
586 break;
587
588 case HWTSTAMP_FILTER_ALL:
589 /* time stamp any incoming packet */
590 config.rx_filter = HWTSTAMP_FILTER_ALL;
591 tstamp_all = PTP_TCR_TSENALL;
592 break;
593
594 default:
595 return -ERANGE;
596 }
597 } else {
598 switch (config.rx_filter) {
599 case HWTSTAMP_FILTER_NONE:
600 config.rx_filter = HWTSTAMP_FILTER_NONE;
601 break;
602 default:
603 /* PTP v1, UDP, any kind of event packet */
604 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
605 break;
606 }
607 }
608 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
609
610 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
611 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
612 else {
613 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
614 tstamp_all | ptp_v2 | ptp_over_ethernet |
615 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
616 ts_master_en | snap_type_sel);
617
618 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
619
620 /* program Sub Second Increment reg */
621 priv->hw->ptp->config_sub_second_increment(priv->ioaddr);
622
623 /* calculate default added value:
624 * formula is :
625 * addend = (2^32)/freq_div_ratio;
626 * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
627 * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
628 * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
629 * achive 20ns accuracy.
630 *
631 * 2^x * y == (y << x), hence
632 * 2^32 * 50000000 ==> (50000000 << 32)
633 */
634 temp = (u64) (50000000ULL << 32);
635 priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
636 priv->hw->ptp->config_addend(priv->ioaddr,
637 priv->default_addend);
638
639 /* initialize system time */
640 getnstimeofday(&now);
641 priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec,
642 now.tv_nsec);
643 }
644
645 return copy_to_user(ifr->ifr_data, &config,
646 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
647}
648
649/**
650 * stmmac_init_ptp: init PTP
651 * @priv: driver private structure
652 * Description: this is to verify if the HW supports the PTPv1 or v2.
653 * This is done by looking at the HW cap. register.
654 * Also it registers the ptp driver.
655 */
656static int stmmac_init_ptp(struct stmmac_priv *priv)
657{
658 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
659 return -EOPNOTSUPP;
660
661 if (netif_msg_hw(priv)) {
662 if (priv->dma_cap.time_stamp) {
663 pr_debug("IEEE 1588-2002 Time Stamp supported\n");
664 priv->adv_ts = 0;
665 }
666 if (priv->dma_cap.atime_stamp && priv->extend_desc) {
667 pr_debug
668 ("IEEE 1588-2008 Advanced Time Stamp supported\n");
669 priv->adv_ts = 1;
670 }
671 }
672
673 priv->hw->ptp = &stmmac_ptp;
674 priv->hwts_tx_en = 0;
675 priv->hwts_rx_en = 0;
676
677 return stmmac_ptp_register(priv);
678}
679
680static void stmmac_release_ptp(struct stmmac_priv *priv)
681{
682 stmmac_ptp_unregister(priv);
683}
684
307/** 685/**
308 * stmmac_adjust_link 686 * stmmac_adjust_link
309 * @dev: net device structure 687 * @dev: net device structure
@@ -349,7 +727,7 @@ static void stmmac_adjust_link(struct net_device *dev)
349 case 1000: 727 case 1000:
350 if (likely(priv->plat->has_gmac)) 728 if (likely(priv->plat->has_gmac))
351 ctrl &= ~priv->hw->link.port; 729 ctrl &= ~priv->hw->link.port;
352 stmmac_hw_fix_mac_speed(priv); 730 stmmac_hw_fix_mac_speed(priv);
353 break; 731 break;
354 case 100: 732 case 100:
355 case 10: 733 case 10:
@@ -367,8 +745,8 @@ static void stmmac_adjust_link(struct net_device *dev)
367 break; 745 break;
368 default: 746 default:
369 if (netif_msg_link(priv)) 747 if (netif_msg_link(priv))
370 pr_warning("%s: Speed (%d) is not 10" 748 pr_warn("%s: Speed (%d) not 10/100\n",
371 " or 100!\n", dev->name, phydev->speed); 749 dev->name, phydev->speed);
372 break; 750 break;
373 } 751 }
374 752
@@ -399,6 +777,31 @@ static void stmmac_adjust_link(struct net_device *dev)
399} 777}
400 778
401/** 779/**
780 * stmmac_check_pcs_mode: verify if RGMII/SGMII is supported
781 * @priv: driver private structure
782 * Description: this is to verify if the HW supports the PCS.
783 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
784 * configured for the TBI, RTBI, or SGMII PHY interface.
785 */
786static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
787{
788 int interface = priv->plat->interface;
789
790 if (priv->dma_cap.pcs) {
791 if ((interface & PHY_INTERFACE_MODE_RGMII) ||
792 (interface & PHY_INTERFACE_MODE_RGMII_ID) ||
793 (interface & PHY_INTERFACE_MODE_RGMII_RXID) ||
794 (interface & PHY_INTERFACE_MODE_RGMII_TXID)) {
795 pr_debug("STMMAC: PCS RGMII support enable\n");
796 priv->pcs = STMMAC_PCS_RGMII;
797 } else if (interface & PHY_INTERFACE_MODE_SGMII) {
798 pr_debug("STMMAC: PCS SGMII support enable\n");
799 priv->pcs = STMMAC_PCS_SGMII;
800 }
801 }
802}
803
804/**
402 * stmmac_init_phy - PHY initialization 805 * stmmac_init_phy - PHY initialization
403 * @dev: net device structure 806 * @dev: net device structure
404 * Description: it initializes the driver's PHY state, and attaches the PHY 807 * Description: it initializes the driver's PHY state, and attaches the PHY
@@ -419,10 +822,10 @@ static int stmmac_init_phy(struct net_device *dev)
419 822
420 if (priv->plat->phy_bus_name) 823 if (priv->plat->phy_bus_name)
421 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", 824 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
422 priv->plat->phy_bus_name, priv->plat->bus_id); 825 priv->plat->phy_bus_name, priv->plat->bus_id);
423 else 826 else
424 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", 827 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
425 priv->plat->bus_id); 828 priv->plat->bus_id);
426 829
427 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 830 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
428 priv->plat->phy_addr); 831 priv->plat->phy_addr);
@@ -461,29 +864,57 @@ static int stmmac_init_phy(struct net_device *dev)
461} 864}
462 865
463/** 866/**
464 * display_ring 867 * stmmac_display_ring: display ring
465 * @p: pointer to the ring. 868 * @head: pointer to the head of the ring passed.
466 * @size: size of the ring. 869 * @size: size of the ring.
467 * Description: display all the descriptors within the ring. 870 * @extend_desc: to verify if extended descriptors are used.
871 * Description: display the control/status and buffer descriptors.
468 */ 872 */
469static void display_ring(struct dma_desc *p, int size) 873static void stmmac_display_ring(void *head, int size, int extend_desc)
470{ 874{
471 struct tmp_s {
472 u64 a;
473 unsigned int b;
474 unsigned int c;
475 };
476 int i; 875 int i;
876 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
877 struct dma_desc *p = (struct dma_desc *)head;
878
477 for (i = 0; i < size; i++) { 879 for (i = 0; i < size; i++) {
478 struct tmp_s *x = (struct tmp_s *)(p + i); 880 u64 x;
479 pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", 881 if (extend_desc) {
480 i, (unsigned int)virt_to_phys(&p[i]), 882 x = *(u64 *) ep;
481 (unsigned int)(x->a), (unsigned int)((x->a) >> 32), 883 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
482 x->b, x->c); 884 i, (unsigned int)virt_to_phys(ep),
885 (unsigned int)x, (unsigned int)(x >> 32),
886 ep->basic.des2, ep->basic.des3);
887 ep++;
888 } else {
889 x = *(u64 *) p;
890 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
891 i, (unsigned int)virt_to_phys(p),
892 (unsigned int)x, (unsigned int)(x >> 32),
893 p->des2, p->des3);
894 p++;
895 }
483 pr_info("\n"); 896 pr_info("\n");
484 } 897 }
485} 898}
486 899
900static void stmmac_display_rings(struct stmmac_priv *priv)
901{
902 unsigned int txsize = priv->dma_tx_size;
903 unsigned int rxsize = priv->dma_rx_size;
904
905 if (priv->extend_desc) {
906 pr_info("Extended RX descriptor ring:\n");
907 stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
908 pr_info("Extended TX descriptor ring:\n");
909 stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
910 } else {
911 pr_info("RX descriptor ring:\n");
912 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
913 pr_info("TX descriptor ring:\n");
914 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
915 }
916}
917
487static int stmmac_set_bfsize(int mtu, int bufsize) 918static int stmmac_set_bfsize(int mtu, int bufsize)
488{ 919{
489 int ret = bufsize; 920 int ret = bufsize;
@@ -501,6 +932,65 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
501} 932}
502 933
503/** 934/**
935 * stmmac_clear_descriptors: clear descriptors
936 * @priv: driver private structure
937 * Description: this function is called to clear the tx and rx descriptors
938 * in case of both basic and extended descriptors are used.
939 */
940static void stmmac_clear_descriptors(struct stmmac_priv *priv)
941{
942 int i;
943 unsigned int txsize = priv->dma_tx_size;
944 unsigned int rxsize = priv->dma_rx_size;
945
946 /* Clear the Rx/Tx descriptors */
947 for (i = 0; i < rxsize; i++)
948 if (priv->extend_desc)
949 priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
950 priv->use_riwt, priv->mode,
951 (i == rxsize - 1));
952 else
953 priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
954 priv->use_riwt, priv->mode,
955 (i == rxsize - 1));
956 for (i = 0; i < txsize; i++)
957 if (priv->extend_desc)
958 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
959 priv->mode,
960 (i == txsize - 1));
961 else
962 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
963 priv->mode,
964 (i == txsize - 1));
965}
966
967static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
968 int i)
969{
970 struct sk_buff *skb;
971
972 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
973 GFP_KERNEL);
974 if (unlikely(skb == NULL)) {
975 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
976 return 1;
977 }
978 skb_reserve(skb, NET_IP_ALIGN);
979 priv->rx_skbuff[i] = skb;
980 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
981 priv->dma_buf_sz,
982 DMA_FROM_DEVICE);
983
984 p->des2 = priv->rx_skbuff_dma[i];
985
986 if ((priv->mode == STMMAC_RING_MODE) &&
987 (priv->dma_buf_sz == BUF_SIZE_16KiB))
988 priv->hw->ring->init_desc3(p);
989
990 return 0;
991}
992
993/**
504 * init_dma_desc_rings - init the RX/TX descriptor rings 994 * init_dma_desc_rings - init the RX/TX descriptor rings
505 * @dev: net device structure 995 * @dev: net device structure
506 * Description: this function initializes the DMA RX/TX descriptors 996 * Description: this function initializes the DMA RX/TX descriptors
@@ -511,110 +1001,114 @@ static void init_dma_desc_rings(struct net_device *dev)
511{ 1001{
512 int i; 1002 int i;
513 struct stmmac_priv *priv = netdev_priv(dev); 1003 struct stmmac_priv *priv = netdev_priv(dev);
514 struct sk_buff *skb;
515 unsigned int txsize = priv->dma_tx_size; 1004 unsigned int txsize = priv->dma_tx_size;
516 unsigned int rxsize = priv->dma_rx_size; 1005 unsigned int rxsize = priv->dma_rx_size;
517 unsigned int bfsize; 1006 unsigned int bfsize = 0;
518 int dis_ic = 0;
519 int des3_as_data_buf = 0;
520 1007
521 /* Set the max buffer size according to the DESC mode 1008 /* Set the max buffer size according to the DESC mode
522 * and the MTU. Note that RING mode allows 16KiB bsize. */ 1009 * and the MTU. Note that RING mode allows 16KiB bsize.
523 bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu); 1010 */
1011 if (priv->mode == STMMAC_RING_MODE)
1012 bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
524 1013
525 if (bfsize == BUF_SIZE_16KiB) 1014 if (bfsize < BUF_SIZE_16KiB)
526 des3_as_data_buf = 1;
527 else
528 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 1015 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
529 1016
530 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", 1017 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
531 txsize, rxsize, bfsize); 1018 txsize, rxsize, bfsize);
532 1019
533 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), 1020 if (priv->extend_desc) {
534 GFP_KERNEL); 1021 priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
535 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), 1022 sizeof(struct
536 GFP_KERNEL); 1023 dma_extended_desc),
537 priv->dma_rx = 1024 &priv->dma_rx_phy,
538 (struct dma_desc *)dma_alloc_coherent(priv->device, 1025 GFP_KERNEL);
539 rxsize * 1026 priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
1027 sizeof(struct
1028 dma_extended_desc),
1029 &priv->dma_tx_phy,
1030 GFP_KERNEL);
1031 if ((!priv->dma_erx) || (!priv->dma_etx))
1032 return;
1033 } else {
1034 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
540 sizeof(struct dma_desc), 1035 sizeof(struct dma_desc),
541 &priv->dma_rx_phy, 1036 &priv->dma_rx_phy,
542 GFP_KERNEL); 1037 GFP_KERNEL);
543 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), 1038 priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
544 GFP_KERNEL);
545 priv->dma_tx =
546 (struct dma_desc *)dma_alloc_coherent(priv->device,
547 txsize *
548 sizeof(struct dma_desc), 1039 sizeof(struct dma_desc),
549 &priv->dma_tx_phy, 1040 &priv->dma_tx_phy,
550 GFP_KERNEL); 1041 GFP_KERNEL);
551 1042 if ((!priv->dma_rx) || (!priv->dma_tx))
552 if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) { 1043 return;
553 pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
554 return;
555 } 1044 }
556 1045
557 DBG(probe, INFO, "stmmac (%s) DMA desc: virt addr (Rx %p, " 1046 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
558 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", 1047 GFP_KERNEL);
559 dev->name, priv->dma_rx, priv->dma_tx, 1048 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
560 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); 1049 GFP_KERNEL);
1050 priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
1051 GFP_KERNEL);
1052 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
1053 GFP_KERNEL);
1054 if (netif_msg_drv(priv))
1055 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
1056 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
561 1057
562 /* RX INITIALIZATION */ 1058 /* RX INITIALIZATION */
563 DBG(probe, INFO, "stmmac: SKB addresses:\n" 1059 DBG(probe, INFO, "stmmac: SKB addresses:\nskb\t\tskb data\tdma data\n");
564 "skb\t\tskb data\tdma data\n");
565
566 for (i = 0; i < rxsize; i++) { 1060 for (i = 0; i < rxsize; i++) {
567 struct dma_desc *p = priv->dma_rx + i; 1061 struct dma_desc *p;
1062 if (priv->extend_desc)
1063 p = &((priv->dma_erx + i)->basic);
1064 else
1065 p = priv->dma_rx + i;
568 1066
569 skb = __netdev_alloc_skb(dev, bfsize + NET_IP_ALIGN, 1067 if (stmmac_init_rx_buffers(priv, p, i))
570 GFP_KERNEL);
571 if (unlikely(skb == NULL)) {
572 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
573 break; 1068 break;
574 }
575 skb_reserve(skb, NET_IP_ALIGN);
576 priv->rx_skbuff[i] = skb;
577 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
578 bfsize, DMA_FROM_DEVICE);
579
580 p->des2 = priv->rx_skbuff_dma[i];
581
582 priv->hw->ring->init_desc3(des3_as_data_buf, p);
583 1069
584 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], 1070 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
585 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); 1071 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
586 } 1072 }
587 priv->cur_rx = 0; 1073 priv->cur_rx = 0;
588 priv->dirty_rx = (unsigned int)(i - rxsize); 1074 priv->dirty_rx = (unsigned int)(i - rxsize);
589 priv->dma_buf_sz = bfsize; 1075 priv->dma_buf_sz = bfsize;
590 buf_sz = bfsize; 1076 buf_sz = bfsize;
591 1077
1078 /* Setup the chained descriptor addresses */
1079 if (priv->mode == STMMAC_CHAIN_MODE) {
1080 if (priv->extend_desc) {
1081 priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
1082 rxsize, 1);
1083 priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
1084 txsize, 1);
1085 } else {
1086 priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
1087 rxsize, 0);
1088 priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
1089 txsize, 0);
1090 }
1091 }
1092
592 /* TX INITIALIZATION */ 1093 /* TX INITIALIZATION */
593 for (i = 0; i < txsize; i++) { 1094 for (i = 0; i < txsize; i++) {
1095 struct dma_desc *p;
1096 if (priv->extend_desc)
1097 p = &((priv->dma_etx + i)->basic);
1098 else
1099 p = priv->dma_tx + i;
1100 p->des2 = 0;
1101 priv->tx_skbuff_dma[i] = 0;
594 priv->tx_skbuff[i] = NULL; 1102 priv->tx_skbuff[i] = NULL;
595 priv->dma_tx[i].des2 = 0;
596 } 1103 }
597 1104
598 /* In case of Chained mode this sets the des3 to the next
599 * element in the chain */
600 priv->hw->ring->init_dma_chain(priv->dma_rx, priv->dma_rx_phy, rxsize);
601 priv->hw->ring->init_dma_chain(priv->dma_tx, priv->dma_tx_phy, txsize);
602
603 priv->dirty_tx = 0; 1105 priv->dirty_tx = 0;
604 priv->cur_tx = 0; 1106 priv->cur_tx = 0;
605 1107
606 if (priv->use_riwt) 1108 stmmac_clear_descriptors(priv);
607 dis_ic = 1;
608 /* Clear the Rx/Tx descriptors */
609 priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
610 priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
611 1109
612 if (netif_msg_hw(priv)) { 1110 if (netif_msg_hw(priv))
613 pr_info("RX descriptor ring:\n"); 1111 stmmac_display_rings(priv);
614 display_ring(priv->dma_rx, rxsize);
615 pr_info("TX descriptor ring:\n");
616 display_ring(priv->dma_tx, txsize);
617 }
618} 1112}
619 1113
620static void dma_free_rx_skbufs(struct stmmac_priv *priv) 1114static void dma_free_rx_skbufs(struct stmmac_priv *priv)
@@ -637,13 +1131,20 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
637 1131
638 for (i = 0; i < priv->dma_tx_size; i++) { 1132 for (i = 0; i < priv->dma_tx_size; i++) {
639 if (priv->tx_skbuff[i] != NULL) { 1133 if (priv->tx_skbuff[i] != NULL) {
640 struct dma_desc *p = priv->dma_tx + i; 1134 struct dma_desc *p;
641 if (p->des2) 1135 if (priv->extend_desc)
642 dma_unmap_single(priv->device, p->des2, 1136 p = &((priv->dma_etx + i)->basic);
1137 else
1138 p = priv->dma_tx + i;
1139
1140 if (priv->tx_skbuff_dma[i])
1141 dma_unmap_single(priv->device,
1142 priv->tx_skbuff_dma[i],
643 priv->hw->desc->get_tx_len(p), 1143 priv->hw->desc->get_tx_len(p),
644 DMA_TO_DEVICE); 1144 DMA_TO_DEVICE);
645 dev_kfree_skb_any(priv->tx_skbuff[i]); 1145 dev_kfree_skb_any(priv->tx_skbuff[i]);
646 priv->tx_skbuff[i] = NULL; 1146 priv->tx_skbuff[i] = NULL;
1147 priv->tx_skbuff_dma[i] = 0;
647 } 1148 }
648 } 1149 }
649} 1150}
@@ -654,29 +1155,38 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
654 dma_free_rx_skbufs(priv); 1155 dma_free_rx_skbufs(priv);
655 dma_free_tx_skbufs(priv); 1156 dma_free_tx_skbufs(priv);
656 1157
657 /* Free the region of consistent memory previously allocated for 1158 /* Free DMA regions of consistent memory previously allocated */
658 * the DMA */ 1159 if (!priv->extend_desc) {
659 dma_free_coherent(priv->device, 1160 dma_free_coherent(priv->device,
660 priv->dma_tx_size * sizeof(struct dma_desc), 1161 priv->dma_tx_size * sizeof(struct dma_desc),
661 priv->dma_tx, priv->dma_tx_phy); 1162 priv->dma_tx, priv->dma_tx_phy);
662 dma_free_coherent(priv->device, 1163 dma_free_coherent(priv->device,
663 priv->dma_rx_size * sizeof(struct dma_desc), 1164 priv->dma_rx_size * sizeof(struct dma_desc),
664 priv->dma_rx, priv->dma_rx_phy); 1165 priv->dma_rx, priv->dma_rx_phy);
1166 } else {
1167 dma_free_coherent(priv->device, priv->dma_tx_size *
1168 sizeof(struct dma_extended_desc),
1169 priv->dma_etx, priv->dma_tx_phy);
1170 dma_free_coherent(priv->device, priv->dma_rx_size *
1171 sizeof(struct dma_extended_desc),
1172 priv->dma_erx, priv->dma_rx_phy);
1173 }
665 kfree(priv->rx_skbuff_dma); 1174 kfree(priv->rx_skbuff_dma);
666 kfree(priv->rx_skbuff); 1175 kfree(priv->rx_skbuff);
1176 kfree(priv->tx_skbuff_dma);
667 kfree(priv->tx_skbuff); 1177 kfree(priv->tx_skbuff);
668} 1178}
669 1179
670/** 1180/**
671 * stmmac_dma_operation_mode - HW DMA operation mode 1181 * stmmac_dma_operation_mode - HW DMA operation mode
672 * @priv : pointer to the private device structure. 1182 * @priv: driver private structure
673 * Description: it sets the DMA operation mode: tx/rx DMA thresholds 1183 * Description: it sets the DMA operation mode: tx/rx DMA thresholds
674 * or Store-And-Forward capability. 1184 * or Store-And-Forward capability.
675 */ 1185 */
676static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 1186static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
677{ 1187{
678 if (likely(priv->plat->force_sf_dma_mode || 1188 if (likely(priv->plat->force_sf_dma_mode ||
679 ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) { 1189 ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) {
680 /* 1190 /*
681 * In case of GMAC, SF mode can be enabled 1191 * In case of GMAC, SF mode can be enabled
682 * to perform the TX COE in HW. This depends on: 1192 * to perform the TX COE in HW. This depends on:
@@ -684,8 +1194,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
684 * 2) There is no bugged Jumbo frame support 1194 * 2) There is no bugged Jumbo frame support
685 * that needs to not insert csum in the TDES. 1195 * that needs to not insert csum in the TDES.
686 */ 1196 */
687 priv->hw->dma->dma_mode(priv->ioaddr, 1197 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE);
688 SF_DMA_MODE, SF_DMA_MODE);
689 tc = SF_DMA_MODE; 1198 tc = SF_DMA_MODE;
690 } else 1199 } else
691 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); 1200 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
@@ -693,7 +1202,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
693 1202
694/** 1203/**
695 * stmmac_tx_clean: 1204 * stmmac_tx_clean:
696 * @priv: private data pointer 1205 * @priv: driver private structure
697 * Description: it reclaims resources after transmission completes. 1206 * Description: it reclaims resources after transmission completes.
698 */ 1207 */
699static void stmmac_tx_clean(struct stmmac_priv *priv) 1208static void stmmac_tx_clean(struct stmmac_priv *priv)
@@ -708,40 +1217,50 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
708 int last; 1217 int last;
709 unsigned int entry = priv->dirty_tx % txsize; 1218 unsigned int entry = priv->dirty_tx % txsize;
710 struct sk_buff *skb = priv->tx_skbuff[entry]; 1219 struct sk_buff *skb = priv->tx_skbuff[entry];
711 struct dma_desc *p = priv->dma_tx + entry; 1220 struct dma_desc *p;
1221
1222 if (priv->extend_desc)
1223 p = (struct dma_desc *)(priv->dma_etx + entry);
1224 else
1225 p = priv->dma_tx + entry;
712 1226
713 /* Check if the descriptor is owned by the DMA. */ 1227 /* Check if the descriptor is owned by the DMA. */
714 if (priv->hw->desc->get_tx_owner(p)) 1228 if (priv->hw->desc->get_tx_owner(p))
715 break; 1229 break;
716 1230
717 /* Verify tx error by looking at the last segment */ 1231 /* Verify tx error by looking at the last segment. */
718 last = priv->hw->desc->get_tx_ls(p); 1232 last = priv->hw->desc->get_tx_ls(p);
719 if (likely(last)) { 1233 if (likely(last)) {
720 int tx_error = 1234 int tx_error =
721 priv->hw->desc->tx_status(&priv->dev->stats, 1235 priv->hw->desc->tx_status(&priv->dev->stats,
722 &priv->xstats, p, 1236 &priv->xstats, p,
723 priv->ioaddr); 1237 priv->ioaddr);
724 if (likely(tx_error == 0)) { 1238 if (likely(tx_error == 0)) {
725 priv->dev->stats.tx_packets++; 1239 priv->dev->stats.tx_packets++;
726 priv->xstats.tx_pkt_n++; 1240 priv->xstats.tx_pkt_n++;
727 } else 1241 } else
728 priv->dev->stats.tx_errors++; 1242 priv->dev->stats.tx_errors++;
1243
1244 stmmac_get_tx_hwtstamp(priv, entry, skb);
729 } 1245 }
730 TX_DBG("%s: curr %d, dirty %d\n", __func__, 1246 TX_DBG("%s: curr %d, dirty %d\n", __func__,
731 priv->cur_tx, priv->dirty_tx); 1247 priv->cur_tx, priv->dirty_tx);
732 1248
733 if (likely(p->des2)) 1249 if (likely(priv->tx_skbuff_dma[entry])) {
734 dma_unmap_single(priv->device, p->des2, 1250 dma_unmap_single(priv->device,
1251 priv->tx_skbuff_dma[entry],
735 priv->hw->desc->get_tx_len(p), 1252 priv->hw->desc->get_tx_len(p),
736 DMA_TO_DEVICE); 1253 DMA_TO_DEVICE);
737 priv->hw->ring->clean_desc3(p); 1254 priv->tx_skbuff_dma[entry] = 0;
1255 }
1256 priv->hw->ring->clean_desc3(priv, p);
738 1257
739 if (likely(skb != NULL)) { 1258 if (likely(skb != NULL)) {
740 dev_kfree_skb(skb); 1259 dev_kfree_skb(skb);
741 priv->tx_skbuff[entry] = NULL; 1260 priv->tx_skbuff[entry] = NULL;
742 } 1261 }
743 1262
744 priv->hw->desc->release_tx_desc(p); 1263 priv->hw->desc->release_tx_desc(p, priv->mode);
745 1264
746 priv->dirty_tx++; 1265 priv->dirty_tx++;
747 } 1266 }
@@ -749,7 +1268,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
749 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { 1268 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
750 netif_tx_lock(priv->dev); 1269 netif_tx_lock(priv->dev);
751 if (netif_queue_stopped(priv->dev) && 1270 if (netif_queue_stopped(priv->dev) &&
752 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { 1271 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
753 TX_DBG("%s: restart transmit\n", __func__); 1272 TX_DBG("%s: restart transmit\n", __func__);
754 netif_wake_queue(priv->dev); 1273 netif_wake_queue(priv->dev);
755 } 1274 }
@@ -773,20 +1292,29 @@ static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
773 priv->hw->dma->disable_dma_irq(priv->ioaddr); 1292 priv->hw->dma->disable_dma_irq(priv->ioaddr);
774} 1293}
775 1294
776
777/** 1295/**
778 * stmmac_tx_err: 1296 * stmmac_tx_err: irq tx error mng function
779 * @priv: pointer to the private device structure 1297 * @priv: driver private structure
780 * Description: it cleans the descriptors and restarts the transmission 1298 * Description: it cleans the descriptors and restarts the transmission
781 * in case of errors. 1299 * in case of errors.
782 */ 1300 */
783static void stmmac_tx_err(struct stmmac_priv *priv) 1301static void stmmac_tx_err(struct stmmac_priv *priv)
784{ 1302{
1303 int i;
1304 int txsize = priv->dma_tx_size;
785 netif_stop_queue(priv->dev); 1305 netif_stop_queue(priv->dev);
786 1306
787 priv->hw->dma->stop_tx(priv->ioaddr); 1307 priv->hw->dma->stop_tx(priv->ioaddr);
788 dma_free_tx_skbufs(priv); 1308 dma_free_tx_skbufs(priv);
789 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 1309 for (i = 0; i < txsize; i++)
1310 if (priv->extend_desc)
1311 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1312 priv->mode,
1313 (i == txsize - 1));
1314 else
1315 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1316 priv->mode,
1317 (i == txsize - 1));
790 priv->dirty_tx = 0; 1318 priv->dirty_tx = 0;
791 priv->cur_tx = 0; 1319 priv->cur_tx = 0;
792 priv->hw->dma->start_tx(priv->ioaddr); 1320 priv->hw->dma->start_tx(priv->ioaddr);
@@ -795,6 +1323,14 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
795 netif_wake_queue(priv->dev); 1323 netif_wake_queue(priv->dev);
796} 1324}
797 1325
1326/**
1327 * stmmac_dma_interrupt: DMA ISR
1328 * @priv: driver private structure
1329 * Description: this is the DMA ISR. It is called by the main ISR.
1330 * It calls the dwmac dma routine to understand which type of interrupt
1331 * happened. In case of there is a Normal interrupt and either TX or RX
1332 * interrupt happened so the NAPI is scheduled.
1333 */
798static void stmmac_dma_interrupt(struct stmmac_priv *priv) 1334static void stmmac_dma_interrupt(struct stmmac_priv *priv)
799{ 1335{
800 int status; 1336 int status;
@@ -817,13 +1353,16 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
817 stmmac_tx_err(priv); 1353 stmmac_tx_err(priv);
818} 1354}
819 1355
1356/**
1357 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1358 * @priv: driver private structure
1359 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1360 */
820static void stmmac_mmc_setup(struct stmmac_priv *priv) 1361static void stmmac_mmc_setup(struct stmmac_priv *priv)
821{ 1362{
822 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 1363 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
823 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 1364 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
824 1365
825 /* Mask MMC irq, counters are managed in SW and registers
826 * are cleared on each READ eventually. */
827 dwmac_mmc_intr_all_mask(priv->ioaddr); 1366 dwmac_mmc_intr_all_mask(priv->ioaddr);
828 1367
829 if (priv->dma_cap.rmon) { 1368 if (priv->dma_cap.rmon) {
@@ -837,8 +1376,7 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
837{ 1376{
838 u32 hwid = priv->hw->synopsys_uid; 1377 u32 hwid = priv->hw->synopsys_uid;
839 1378
840 /* Only check valid Synopsys Id because old MAC chips 1379 /* Check Synopsys Id (not available on old chips) */
841 * have no HW registers where get the ID */
842 if (likely(hwid)) { 1380 if (likely(hwid)) {
843 u32 uid = ((hwid & 0x0000ff00) >> 8); 1381 u32 uid = ((hwid & 0x0000ff00) >> 8);
844 u32 synid = (hwid & 0x000000ff); 1382 u32 synid = (hwid & 0x000000ff);
@@ -852,14 +1390,24 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
852} 1390}
853 1391
854/** 1392/**
855 * stmmac_selec_desc_mode 1393 * stmmac_selec_desc_mode: to select among: normal/alternate/extend descriptors
856 * @priv : private structure 1394 * @priv: driver private structure
857 * Description: select the Enhanced/Alternate or Normal descriptors 1395 * Description: select the Enhanced/Alternate or Normal descriptors.
1396 * In case of Enhanced/Alternate, it looks at the extended descriptors are
1397 * supported by the HW cap. register.
858 */ 1398 */
859static void stmmac_selec_desc_mode(struct stmmac_priv *priv) 1399static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
860{ 1400{
861 if (priv->plat->enh_desc) { 1401 if (priv->plat->enh_desc) {
862 pr_info(" Enhanced/Alternate descriptors\n"); 1402 pr_info(" Enhanced/Alternate descriptors\n");
1403
1404 /* GMAC older than 3.50 has no extended descriptors */
1405 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1406 pr_info("\tEnabled extended descriptors\n");
1407 priv->extend_desc = 1;
1408 } else
1409 pr_warn("Extended descriptors not supported\n");
1410
863 priv->hw->desc = &enh_desc_ops; 1411 priv->hw->desc = &enh_desc_ops;
864 } else { 1412 } else {
865 pr_info(" Normal descriptors\n"); 1413 pr_info(" Normal descriptors\n");
@@ -868,8 +1416,8 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
868} 1416}
869 1417
870/** 1418/**
871 * stmmac_get_hw_features 1419 * stmmac_get_hw_features: get MAC capabilities from the HW cap. register.
872 * @priv : private device pointer 1420 * @priv: driver private structure
873 * Description: 1421 * Description:
874 * new GMAC chip generations have a new register to indicate the 1422 * new GMAC chip generations have a new register to indicate the
875 * presence of the optional feature/functions. 1423 * presence of the optional feature/functions.
@@ -887,69 +1435,78 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
887 priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; 1435 priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
888 priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2; 1436 priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
889 priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4; 1437 priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
890 priv->dma_cap.multi_addr = 1438 priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
891 (hw_cap & DMA_HW_FEAT_ADDMACADRSEL) >> 5;
892 priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6; 1439 priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
893 priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8; 1440 priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
894 priv->dma_cap.pmt_remote_wake_up = 1441 priv->dma_cap.pmt_remote_wake_up =
895 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; 1442 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
896 priv->dma_cap.pmt_magic_frame = 1443 priv->dma_cap.pmt_magic_frame =
897 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; 1444 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
898 /* MMC */ 1445 /* MMC */
899 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; 1446 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
900 /* IEEE 1588-2002*/ 1447 /* IEEE 1588-2002 */
901 priv->dma_cap.time_stamp = 1448 priv->dma_cap.time_stamp =
902 (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12; 1449 (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
903 /* IEEE 1588-2008*/ 1450 /* IEEE 1588-2008 */
904 priv->dma_cap.atime_stamp = 1451 priv->dma_cap.atime_stamp =
905 (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13; 1452 (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
906 /* 802.3az - Energy-Efficient Ethernet (EEE) */ 1453 /* 802.3az - Energy-Efficient Ethernet (EEE) */
907 priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14; 1454 priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
908 priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15; 1455 priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
909 /* TX and RX csum */ 1456 /* TX and RX csum */
910 priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16; 1457 priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
911 priv->dma_cap.rx_coe_type1 = 1458 priv->dma_cap.rx_coe_type1 =
912 (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17; 1459 (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
913 priv->dma_cap.rx_coe_type2 = 1460 priv->dma_cap.rx_coe_type2 =
914 (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18; 1461 (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
915 priv->dma_cap.rxfifo_over_2048 = 1462 priv->dma_cap.rxfifo_over_2048 =
916 (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19; 1463 (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
917 /* TX and RX number of channels */ 1464 /* TX and RX number of channels */
918 priv->dma_cap.number_rx_channel = 1465 priv->dma_cap.number_rx_channel =
919 (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20; 1466 (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
920 priv->dma_cap.number_tx_channel = 1467 priv->dma_cap.number_tx_channel =
921 (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22; 1468 (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
922 /* Alternate (enhanced) DESC mode*/ 1469 /* Alternate (enhanced) DESC mode */
923 priv->dma_cap.enh_desc = 1470 priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
924 (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
925 } 1471 }
926 1472
927 return hw_cap; 1473 return hw_cap;
928} 1474}
929 1475
1476/**
1477 * stmmac_check_ether_addr: check if the MAC addr is valid
1478 * @priv: driver private structure
1479 * Description:
1480 * it is to verify if the MAC address is valid, in case of failures it
1481 * generates a random MAC address
1482 */
930static void stmmac_check_ether_addr(struct stmmac_priv *priv) 1483static void stmmac_check_ether_addr(struct stmmac_priv *priv)
931{ 1484{
932 /* verify if the MAC address is valid, in case of failures it
933 * generates a random MAC address */
934 if (!is_valid_ether_addr(priv->dev->dev_addr)) { 1485 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
935 priv->hw->mac->get_umac_addr((void __iomem *) 1486 priv->hw->mac->get_umac_addr((void __iomem *)
936 priv->dev->base_addr, 1487 priv->dev->base_addr,
937 priv->dev->dev_addr, 0); 1488 priv->dev->dev_addr, 0);
938 if (!is_valid_ether_addr(priv->dev->dev_addr)) 1489 if (!is_valid_ether_addr(priv->dev->dev_addr))
939 eth_hw_addr_random(priv->dev); 1490 eth_hw_addr_random(priv->dev);
940 } 1491 }
941 pr_warning("%s: device MAC address %pM\n", priv->dev->name, 1492 pr_warn("%s: device MAC address %pM\n", priv->dev->name,
942 priv->dev->dev_addr); 1493 priv->dev->dev_addr);
943} 1494}
944 1495
1496/**
1497 * stmmac_init_dma_engine: DMA init.
1498 * @priv: driver private structure
1499 * Description:
1500 * It inits the DMA invoking the specific MAC/GMAC callback.
1501 * Some DMA parameters can be passed from the platform;
1502 * in case of these are not passed a default is kept for the MAC or GMAC.
1503 */
945static int stmmac_init_dma_engine(struct stmmac_priv *priv) 1504static int stmmac_init_dma_engine(struct stmmac_priv *priv)
946{ 1505{
947 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0; 1506 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
948 int mixed_burst = 0; 1507 int mixed_burst = 0;
1508 int atds = 0;
949 1509
950 /* Some DMA parameters can be passed from the platform;
951 * in case of these are not passed we keep a default
952 * (good for all the chips) and init the DMA! */
953 if (priv->plat->dma_cfg) { 1510 if (priv->plat->dma_cfg) {
954 pbl = priv->plat->dma_cfg->pbl; 1511 pbl = priv->plat->dma_cfg->pbl;
955 fixed_burst = priv->plat->dma_cfg->fixed_burst; 1512 fixed_burst = priv->plat->dma_cfg->fixed_burst;
@@ -957,13 +1514,16 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
957 burst_len = priv->plat->dma_cfg->burst_len; 1514 burst_len = priv->plat->dma_cfg->burst_len;
958 } 1515 }
959 1516
1517 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1518 atds = 1;
1519
960 return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, 1520 return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
961 burst_len, priv->dma_tx_phy, 1521 burst_len, priv->dma_tx_phy,
962 priv->dma_rx_phy); 1522 priv->dma_rx_phy, atds);
963} 1523}
964 1524
965/** 1525/**
966 * stmmac_tx_timer: 1526 * stmmac_tx_timer: mitigation sw timer for tx.
967 * @data: data pointer 1527 * @data: data pointer
968 * Description: 1528 * Description:
969 * This is the timer handler to directly invoke the stmmac_tx_clean. 1529 * This is the timer handler to directly invoke the stmmac_tx_clean.
@@ -976,8 +1536,8 @@ static void stmmac_tx_timer(unsigned long data)
976} 1536}
977 1537
978/** 1538/**
979 * stmmac_tx_timer: 1539 * stmmac_init_tx_coalesce: init tx mitigation options.
980 * @priv: private data structure 1540 * @priv: driver private structure
981 * Description: 1541 * Description:
982 * This inits the transmit coalesce parameters: i.e. timer rate, 1542 * This inits the transmit coalesce parameters: i.e. timer rate,
983 * timer handler and default threshold used for enabling the 1543 * timer handler and default threshold used for enabling the
@@ -1012,10 +1572,14 @@ static int stmmac_open(struct net_device *dev)
1012 1572
1013 stmmac_check_ether_addr(priv); 1573 stmmac_check_ether_addr(priv);
1014 1574
1015 ret = stmmac_init_phy(dev); 1575 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
1016 if (unlikely(ret)) { 1576 priv->pcs != STMMAC_PCS_RTBI) {
1017 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); 1577 ret = stmmac_init_phy(dev);
1018 goto open_error; 1578 if (ret) {
1579 pr_err("%s: Cannot attach to PHY (error: %d)\n",
1580 __func__, ret);
1581 goto open_error;
1582 }
1019 } 1583 }
1020 1584
1021 /* Create and initialize the TX/RX descriptors chains. */ 1585 /* Create and initialize the TX/RX descriptors chains. */
@@ -1043,7 +1607,7 @@ static int stmmac_open(struct net_device *dev)
1043 1607
1044 /* Request the IRQ lines */ 1608 /* Request the IRQ lines */
1045 ret = request_irq(dev->irq, stmmac_interrupt, 1609 ret = request_irq(dev->irq, stmmac_interrupt,
1046 IRQF_SHARED, dev->name, dev); 1610 IRQF_SHARED, dev->name, dev);
1047 if (unlikely(ret < 0)) { 1611 if (unlikely(ret < 0)) {
1048 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", 1612 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
1049 __func__, dev->irq, ret); 1613 __func__, dev->irq, ret);
@@ -1055,8 +1619,8 @@ static int stmmac_open(struct net_device *dev)
1055 ret = request_irq(priv->wol_irq, stmmac_interrupt, 1619 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1056 IRQF_SHARED, dev->name, dev); 1620 IRQF_SHARED, dev->name, dev);
1057 if (unlikely(ret < 0)) { 1621 if (unlikely(ret < 0)) {
1058 pr_err("%s: ERROR: allocating the ext WoL IRQ %d " 1622 pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1059 "(error: %d)\n", __func__, priv->wol_irq, ret); 1623 __func__, priv->wol_irq, ret);
1060 goto open_error_wolirq; 1624 goto open_error_wolirq;
1061 } 1625 }
1062 } 1626 }
@@ -1084,10 +1648,14 @@ static int stmmac_open(struct net_device *dev)
1084 1648
1085 stmmac_mmc_setup(priv); 1649 stmmac_mmc_setup(priv);
1086 1650
1651 ret = stmmac_init_ptp(priv);
1652 if (ret)
1653 pr_warn("%s: failed PTP initialisation\n", __func__);
1654
1087#ifdef CONFIG_STMMAC_DEBUG_FS 1655#ifdef CONFIG_STMMAC_DEBUG_FS
1088 ret = stmmac_init_fs(dev); 1656 ret = stmmac_init_fs(dev);
1089 if (ret < 0) 1657 if (ret < 0)
1090 pr_warning("%s: failed debugFS registration\n", __func__); 1658 pr_warn("%s: failed debugFS registration\n", __func__);
1091#endif 1659#endif
1092 /* Start the ball rolling... */ 1660 /* Start the ball rolling... */
1093 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); 1661 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
@@ -1104,7 +1672,13 @@ static int stmmac_open(struct net_device *dev)
1104 phy_start(priv->phydev); 1672 phy_start(priv->phydev);
1105 1673
1106 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER; 1674 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
1107 priv->eee_enabled = stmmac_eee_init(priv); 1675
1676 /* Using PCS we cannot dial with the phy registers at this stage
1677 * so we do not support extra feature like EEE.
1678 */
1679 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
1680 priv->pcs != STMMAC_PCS_RTBI)
1681 priv->eee_enabled = stmmac_eee_init(priv);
1108 1682
1109 stmmac_init_tx_coalesce(priv); 1683 stmmac_init_tx_coalesce(priv);
1110 1684
@@ -1113,6 +1687,9 @@ static int stmmac_open(struct net_device *dev)
1113 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); 1687 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1114 } 1688 }
1115 1689
1690 if (priv->pcs && priv->hw->mac->ctrl_ane)
1691 priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
1692
1116 napi_enable(&priv->napi); 1693 napi_enable(&priv->napi);
1117 netif_start_queue(dev); 1694 netif_start_queue(dev);
1118 1695
@@ -1184,21 +1761,25 @@ static int stmmac_release(struct net_device *dev)
1184#endif 1761#endif
1185 clk_disable_unprepare(priv->stmmac_clk); 1762 clk_disable_unprepare(priv->stmmac_clk);
1186 1763
1764 stmmac_release_ptp(priv);
1765
1187 return 0; 1766 return 0;
1188} 1767}
1189 1768
1190/** 1769/**
1191 * stmmac_xmit: 1770 * stmmac_xmit: Tx entry point of the driver
1192 * @skb : the socket buffer 1771 * @skb : the socket buffer
1193 * @dev : device pointer 1772 * @dev : device pointer
1194 * Description : Tx entry point of the driver. 1773 * Description : this is the tx entry point of the driver.
1774 * It programs the chain or the ring and supports oversized frames
1775 * and SG feature.
1195 */ 1776 */
1196static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 1777static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1197{ 1778{
1198 struct stmmac_priv *priv = netdev_priv(dev); 1779 struct stmmac_priv *priv = netdev_priv(dev);
1199 unsigned int txsize = priv->dma_tx_size; 1780 unsigned int txsize = priv->dma_tx_size;
1200 unsigned int entry; 1781 unsigned int entry;
1201 int i, csum_insertion = 0; 1782 int i, csum_insertion = 0, is_jumbo = 0;
1202 int nfrags = skb_shinfo(skb)->nr_frags; 1783 int nfrags = skb_shinfo(skb)->nr_frags;
1203 struct dma_desc *desc, *first; 1784 struct dma_desc *desc, *first;
1204 unsigned int nopaged_len = skb_headlen(skb); 1785 unsigned int nopaged_len = skb_headlen(skb);
@@ -1207,8 +1788,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1207 if (!netif_queue_stopped(dev)) { 1788 if (!netif_queue_stopped(dev)) {
1208 netif_stop_queue(dev); 1789 netif_stop_queue(dev);
1209 /* This is a hard error, log it. */ 1790 /* This is a hard error, log it. */
1210 pr_err("%s: BUG! Tx Ring full when queue awake\n", 1791 pr_err("%s: Tx Ring full when queue awake\n", __func__);
1211 __func__);
1212 } 1792 }
1213 return NETDEV_TX_BUSY; 1793 return NETDEV_TX_BUSY;
1214 } 1794 }
@@ -1222,10 +1802,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1222 1802
1223#ifdef STMMAC_XMIT_DEBUG 1803#ifdef STMMAC_XMIT_DEBUG
1224 if ((skb->len > ETH_FRAME_LEN) || nfrags) 1804 if ((skb->len > ETH_FRAME_LEN) || nfrags)
1225 pr_debug("stmmac xmit: [entry %d]\n" 1805 pr_debug("%s: [entry %d]: skb addr %p len: %d nopagedlen: %d\n"
1226 "\tskb addr %p - len: %d - nopaged_len: %d\n"
1227 "\tn_frags: %d - ip_summed: %d - %s gso\n" 1806 "\tn_frags: %d - ip_summed: %d - %s gso\n"
1228 "\ttx_count_frames %d\n", entry, 1807 "\ttx_count_frames %d\n", __func__, entry,
1229 skb, skb->len, nopaged_len, nfrags, skb->ip_summed, 1808 skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
1230 !skb_is_gso(skb) ? "isn't" : "is", 1809 !skb_is_gso(skb) ? "isn't" : "is",
1231 priv->tx_count_frames); 1810 priv->tx_count_frames);
@@ -1233,7 +1812,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1233 1812
1234 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 1813 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
1235 1814
1236 desc = priv->dma_tx + entry; 1815 if (priv->extend_desc)
1816 desc = (struct dma_desc *)(priv->dma_etx + entry);
1817 else
1818 desc = priv->dma_tx + entry;
1819
1237 first = desc; 1820 first = desc;
1238 1821
1239#ifdef STMMAC_XMIT_DEBUG 1822#ifdef STMMAC_XMIT_DEBUG
@@ -1244,28 +1827,46 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1244#endif 1827#endif
1245 priv->tx_skbuff[entry] = skb; 1828 priv->tx_skbuff[entry] = skb;
1246 1829
1247 if (priv->hw->ring->is_jumbo_frm(skb->len, priv->plat->enh_desc)) { 1830 /* To program the descriptors according to the size of the frame */
1248 entry = priv->hw->ring->jumbo_frm(priv, skb, csum_insertion); 1831 if (priv->mode == STMMAC_RING_MODE) {
1249 desc = priv->dma_tx + entry; 1832 is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
1833 priv->plat->enh_desc);
1834 if (unlikely(is_jumbo))
1835 entry = priv->hw->ring->jumbo_frm(priv, skb,
1836 csum_insertion);
1250 } else { 1837 } else {
1838 is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
1839 priv->plat->enh_desc);
1840 if (unlikely(is_jumbo))
1841 entry = priv->hw->chain->jumbo_frm(priv, skb,
1842 csum_insertion);
1843 }
1844 if (likely(!is_jumbo)) {
1251 desc->des2 = dma_map_single(priv->device, skb->data, 1845 desc->des2 = dma_map_single(priv->device, skb->data,
1252 nopaged_len, DMA_TO_DEVICE); 1846 nopaged_len, DMA_TO_DEVICE);
1847 priv->tx_skbuff_dma[entry] = desc->des2;
1253 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, 1848 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1254 csum_insertion); 1849 csum_insertion, priv->mode);
1255 } 1850 } else
1851 desc = first;
1256 1852
1257 for (i = 0; i < nfrags; i++) { 1853 for (i = 0; i < nfrags; i++) {
1258 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1854 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1259 int len = skb_frag_size(frag); 1855 int len = skb_frag_size(frag);
1260 1856
1261 entry = (++priv->cur_tx) % txsize; 1857 entry = (++priv->cur_tx) % txsize;
1262 desc = priv->dma_tx + entry; 1858 if (priv->extend_desc)
1859 desc = (struct dma_desc *)(priv->dma_etx + entry);
1860 else
1861 desc = priv->dma_tx + entry;
1263 1862
1264 TX_DBG("\t[entry %d] segment len: %d\n", entry, len); 1863 TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
1265 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, 1864 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
1266 DMA_TO_DEVICE); 1865 DMA_TO_DEVICE);
1866 priv->tx_skbuff_dma[entry] = desc->des2;
1267 priv->tx_skbuff[entry] = NULL; 1867 priv->tx_skbuff[entry] = NULL;
1268 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion); 1868 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
1869 priv->mode);
1269 wmb(); 1870 wmb();
1270 priv->hw->desc->set_tx_owner(desc); 1871 priv->hw->desc->set_tx_owner(desc);
1271 wmb(); 1872 wmb();
@@ -1298,11 +1899,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1298 1899
1299#ifdef STMMAC_XMIT_DEBUG 1900#ifdef STMMAC_XMIT_DEBUG
1300 if (netif_msg_pktdata(priv)) { 1901 if (netif_msg_pktdata(priv)) {
1301 pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, " 1902 pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d"
1302 "first=%p, nfrags=%d\n", 1903 __func__, (priv->cur_tx % txsize),
1303 (priv->cur_tx % txsize), (priv->dirty_tx % txsize), 1904 (priv->dirty_tx % txsize), entry, first, nfrags);
1304 entry, first, nfrags); 1905 if (priv->extend_desc)
1305 display_ring(priv->dma_tx, txsize); 1906 stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
1907 else
1908 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
1909
1306 pr_info(">>> frame to be transmitted: "); 1910 pr_info(">>> frame to be transmitted: ");
1307 print_pkt(skb->data, skb->len); 1911 print_pkt(skb->data, skb->len);
1308 } 1912 }
@@ -1314,7 +1918,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1314 1918
1315 dev->stats.tx_bytes += skb->len; 1919 dev->stats.tx_bytes += skb->len;
1316 1920
1317 skb_tx_timestamp(skb); 1921 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1922 priv->hwts_tx_en)) {
1923 /* declare that device is doing timestamping */
1924 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1925 priv->hw->desc->enable_tx_timestamp(first);
1926 }
1927
1928 if (!priv->hwts_tx_en)
1929 skb_tx_timestamp(skb);
1318 1930
1319 priv->hw->dma->enable_dma_transmission(priv->ioaddr); 1931 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
1320 1932
@@ -1323,14 +1935,26 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1323 return NETDEV_TX_OK; 1935 return NETDEV_TX_OK;
1324} 1936}
1325 1937
1938/**
1939 * stmmac_rx_refill: refill used skb preallocated buffers
1940 * @priv: driver private structure
1941 * Description : this is to reallocate the skb for the reception process
1942 * that is based on zero-copy.
1943 */
1326static inline void stmmac_rx_refill(struct stmmac_priv *priv) 1944static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1327{ 1945{
1328 unsigned int rxsize = priv->dma_rx_size; 1946 unsigned int rxsize = priv->dma_rx_size;
1329 int bfsize = priv->dma_buf_sz; 1947 int bfsize = priv->dma_buf_sz;
1330 struct dma_desc *p = priv->dma_rx;
1331 1948
1332 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { 1949 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
1333 unsigned int entry = priv->dirty_rx % rxsize; 1950 unsigned int entry = priv->dirty_rx % rxsize;
1951 struct dma_desc *p;
1952
1953 if (priv->extend_desc)
1954 p = (struct dma_desc *)(priv->dma_erx + entry);
1955 else
1956 p = priv->dma_rx + entry;
1957
1334 if (likely(priv->rx_skbuff[entry] == NULL)) { 1958 if (likely(priv->rx_skbuff[entry] == NULL)) {
1335 struct sk_buff *skb; 1959 struct sk_buff *skb;
1336 1960
@@ -1344,80 +1968,116 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1344 dma_map_single(priv->device, skb->data, bfsize, 1968 dma_map_single(priv->device, skb->data, bfsize,
1345 DMA_FROM_DEVICE); 1969 DMA_FROM_DEVICE);
1346 1970
1347 (p + entry)->des2 = priv->rx_skbuff_dma[entry]; 1971 p->des2 = priv->rx_skbuff_dma[entry];
1348 1972
1349 if (unlikely(priv->plat->has_gmac)) 1973 priv->hw->ring->refill_desc3(priv, p);
1350 priv->hw->ring->refill_desc3(bfsize, p + entry);
1351 1974
1352 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); 1975 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
1353 } 1976 }
1354 wmb(); 1977 wmb();
1355 priv->hw->desc->set_rx_owner(p + entry); 1978 priv->hw->desc->set_rx_owner(p);
1356 wmb(); 1979 wmb();
1357 } 1980 }
1358} 1981}
1359 1982
1983/**
1984 * stmmac_rx_refill: refill used skb preallocated buffers
1985 * @priv: driver private structure
1986 * @limit: napi bugget.
1987 * Description : this the function called by the napi poll method.
1988 * It gets all the frames inside the ring.
1989 */
1360static int stmmac_rx(struct stmmac_priv *priv, int limit) 1990static int stmmac_rx(struct stmmac_priv *priv, int limit)
1361{ 1991{
1362 unsigned int rxsize = priv->dma_rx_size; 1992 unsigned int rxsize = priv->dma_rx_size;
1363 unsigned int entry = priv->cur_rx % rxsize; 1993 unsigned int entry = priv->cur_rx % rxsize;
1364 unsigned int next_entry; 1994 unsigned int next_entry;
1365 unsigned int count = 0; 1995 unsigned int count = 0;
1366 struct dma_desc *p = priv->dma_rx + entry; 1996 int coe = priv->plat->rx_coe;
1367 struct dma_desc *p_next;
1368 1997
1369#ifdef STMMAC_RX_DEBUG 1998#ifdef STMMAC_RX_DEBUG
1370 if (netif_msg_hw(priv)) { 1999 if (netif_msg_hw(priv)) {
1371 pr_debug(">>> stmmac_rx: descriptor ring:\n"); 2000 pr_debug(">>> stmmac_rx: descriptor ring:\n");
1372 display_ring(priv->dma_rx, rxsize); 2001 if (priv->extend_desc)
2002 stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
2003 else
2004 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
1373 } 2005 }
1374#endif 2006#endif
1375 while (!priv->hw->desc->get_rx_owner(p)) { 2007 while (count < limit) {
1376 int status; 2008 int status;
2009 struct dma_desc *p;
1377 2010
1378 if (count >= limit) 2011 if (priv->extend_desc)
2012 p = (struct dma_desc *)(priv->dma_erx + entry);
2013 else
2014 p = priv->dma_rx + entry;
2015
2016 if (priv->hw->desc->get_rx_owner(p))
1379 break; 2017 break;
1380 2018
1381 count++; 2019 count++;
1382 2020
1383 next_entry = (++priv->cur_rx) % rxsize; 2021 next_entry = (++priv->cur_rx) % rxsize;
1384 p_next = priv->dma_rx + next_entry; 2022 if (priv->extend_desc)
1385 prefetch(p_next); 2023 prefetch(priv->dma_erx + next_entry);
2024 else
2025 prefetch(priv->dma_rx + next_entry);
1386 2026
1387 /* read the status of the incoming frame */ 2027 /* read the status of the incoming frame */
1388 status = (priv->hw->desc->rx_status(&priv->dev->stats, 2028 status = priv->hw->desc->rx_status(&priv->dev->stats,
1389 &priv->xstats, p)); 2029 &priv->xstats, p);
1390 if (unlikely(status == discard_frame)) 2030 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2031 priv->hw->desc->rx_extended_status(&priv->dev->stats,
2032 &priv->xstats,
2033 priv->dma_erx +
2034 entry);
2035 if (unlikely(status == discard_frame)) {
1391 priv->dev->stats.rx_errors++; 2036 priv->dev->stats.rx_errors++;
1392 else { 2037 if (priv->hwts_rx_en && !priv->extend_desc) {
2038 /* DESC2 & DESC3 will be overwitten by device
2039 * with timestamp value, hence reinitialize
2040 * them in stmmac_rx_refill() function so that
2041 * device can reuse it.
2042 */
2043 priv->rx_skbuff[entry] = NULL;
2044 dma_unmap_single(priv->device,
2045 priv->rx_skbuff_dma[entry],
2046 priv->dma_buf_sz,
2047 DMA_FROM_DEVICE);
2048 }
2049 } else {
1393 struct sk_buff *skb; 2050 struct sk_buff *skb;
1394 int frame_len; 2051 int frame_len;
1395 2052
1396 frame_len = priv->hw->desc->get_rx_frame_len(p, 2053 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
1397 priv->plat->rx_coe); 2054
1398 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 2055 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
1399 * Type frames (LLC/LLC-SNAP) */ 2056 * Type frames (LLC/LLC-SNAP)
2057 */
1400 if (unlikely(status != llc_snap)) 2058 if (unlikely(status != llc_snap))
1401 frame_len -= ETH_FCS_LEN; 2059 frame_len -= ETH_FCS_LEN;
1402#ifdef STMMAC_RX_DEBUG 2060#ifdef STMMAC_RX_DEBUG
1403 if (frame_len > ETH_FRAME_LEN) 2061 if (frame_len > ETH_FRAME_LEN)
1404 pr_debug("\tRX frame size %d, COE status: %d\n", 2062 pr_debug("\tRX frame size %d, COE status: %d\n",
1405 frame_len, status); 2063 frame_len, status);
1406 2064
1407 if (netif_msg_hw(priv)) 2065 if (netif_msg_hw(priv))
1408 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", 2066 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
1409 p, entry, p->des2); 2067 p, entry, p->des2);
1410#endif 2068#endif
1411 skb = priv->rx_skbuff[entry]; 2069 skb = priv->rx_skbuff[entry];
1412 if (unlikely(!skb)) { 2070 if (unlikely(!skb)) {
1413 pr_err("%s: Inconsistent Rx descriptor chain\n", 2071 pr_err("%s: Inconsistent Rx descriptor chain\n",
1414 priv->dev->name); 2072 priv->dev->name);
1415 priv->dev->stats.rx_dropped++; 2073 priv->dev->stats.rx_dropped++;
1416 break; 2074 break;
1417 } 2075 }
1418 prefetch(skb->data - NET_IP_ALIGN); 2076 prefetch(skb->data - NET_IP_ALIGN);
1419 priv->rx_skbuff[entry] = NULL; 2077 priv->rx_skbuff[entry] = NULL;
1420 2078
2079 stmmac_get_rx_hwtstamp(priv, entry, skb);
2080
1421 skb_put(skb, frame_len); 2081 skb_put(skb, frame_len);
1422 dma_unmap_single(priv->device, 2082 dma_unmap_single(priv->device,
1423 priv->rx_skbuff_dma[entry], 2083 priv->rx_skbuff_dma[entry],
@@ -1430,7 +2090,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1430#endif 2090#endif
1431 skb->protocol = eth_type_trans(skb, priv->dev); 2091 skb->protocol = eth_type_trans(skb, priv->dev);
1432 2092
1433 if (unlikely(!priv->plat->rx_coe)) 2093 if (unlikely(!coe))
1434 skb_checksum_none_assert(skb); 2094 skb_checksum_none_assert(skb);
1435 else 2095 else
1436 skb->ip_summed = CHECKSUM_UNNECESSARY; 2096 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1441,7 +2101,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1441 priv->dev->stats.rx_bytes += frame_len; 2101 priv->dev->stats.rx_bytes += frame_len;
1442 } 2102 }
1443 entry = next_entry; 2103 entry = next_entry;
1444 p = p_next; /* use prefetched values */
1445 } 2104 }
1446 2105
1447 stmmac_rx_refill(priv); 2106 stmmac_rx_refill(priv);
@@ -1499,18 +2158,16 @@ static int stmmac_config(struct net_device *dev, struct ifmap *map)
1499 2158
1500 /* Don't allow changing the I/O address */ 2159 /* Don't allow changing the I/O address */
1501 if (map->base_addr != dev->base_addr) { 2160 if (map->base_addr != dev->base_addr) {
1502 pr_warning("%s: can't change I/O address\n", dev->name); 2161 pr_warn("%s: can't change I/O address\n", dev->name);
1503 return -EOPNOTSUPP; 2162 return -EOPNOTSUPP;
1504 } 2163 }
1505 2164
1506 /* Don't allow changing the IRQ */ 2165 /* Don't allow changing the IRQ */
1507 if (map->irq != dev->irq) { 2166 if (map->irq != dev->irq) {
1508 pr_warning("%s: can't change IRQ number %d\n", 2167 pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq);
1509 dev->name, dev->irq);
1510 return -EOPNOTSUPP; 2168 return -EOPNOTSUPP;
1511 } 2169 }
1512 2170
1513 /* ignore other fields */
1514 return 0; 2171 return 0;
1515} 2172}
1516 2173
@@ -1570,7 +2227,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1570} 2227}
1571 2228
1572static netdev_features_t stmmac_fix_features(struct net_device *dev, 2229static netdev_features_t stmmac_fix_features(struct net_device *dev,
1573 netdev_features_t features) 2230 netdev_features_t features)
1574{ 2231{
1575 struct stmmac_priv *priv = netdev_priv(dev); 2232 struct stmmac_priv *priv = netdev_priv(dev);
1576 2233
@@ -1584,13 +2241,22 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
1584 /* Some GMAC devices have a bugged Jumbo frame support that 2241 /* Some GMAC devices have a bugged Jumbo frame support that
1585 * needs to have the Tx COE disabled for oversized frames 2242 * needs to have the Tx COE disabled for oversized frames
1586 * (due to limited buffer sizes). In this case we disable 2243 * (due to limited buffer sizes). In this case we disable
1587 * the TX csum insertionin the TDES and not use SF. */ 2244 * the TX csum insertionin the TDES and not use SF.
2245 */
1588 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 2246 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
1589 features &= ~NETIF_F_ALL_CSUM; 2247 features &= ~NETIF_F_ALL_CSUM;
1590 2248
1591 return features; 2249 return features;
1592} 2250}
1593 2251
2252/**
2253 * stmmac_interrupt - main ISR
2254 * @irq: interrupt number.
2255 * @dev_id: to pass the net device pointer.
2256 * Description: this is the main driver interrupt service routine.
2257 * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
2258 * interrupts.
2259 */
1594static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 2260static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1595{ 2261{
1596 struct net_device *dev = (struct net_device *)dev_id; 2262 struct net_device *dev = (struct net_device *)dev_id;
@@ -1604,30 +2270,14 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1604 /* To handle GMAC own interrupts */ 2270 /* To handle GMAC own interrupts */
1605 if (priv->plat->has_gmac) { 2271 if (priv->plat->has_gmac) {
1606 int status = priv->hw->mac->host_irq_status((void __iomem *) 2272 int status = priv->hw->mac->host_irq_status((void __iomem *)
1607 dev->base_addr); 2273 dev->base_addr,
2274 &priv->xstats);
1608 if (unlikely(status)) { 2275 if (unlikely(status)) {
1609 if (status & core_mmc_tx_irq)
1610 priv->xstats.mmc_tx_irq_n++;
1611 if (status & core_mmc_rx_irq)
1612 priv->xstats.mmc_rx_irq_n++;
1613 if (status & core_mmc_rx_csum_offload_irq)
1614 priv->xstats.mmc_rx_csum_offload_irq_n++;
1615 if (status & core_irq_receive_pmt_irq)
1616 priv->xstats.irq_receive_pmt_irq_n++;
1617
1618 /* For LPI we need to save the tx status */ 2276 /* For LPI we need to save the tx status */
1619 if (status & core_irq_tx_path_in_lpi_mode) { 2277 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
1620 priv->xstats.irq_tx_path_in_lpi_mode_n++;
1621 priv->tx_path_in_lpi_mode = true; 2278 priv->tx_path_in_lpi_mode = true;
1622 } 2279 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
1623 if (status & core_irq_tx_path_exit_lpi_mode) {
1624 priv->xstats.irq_tx_path_exit_lpi_mode_n++;
1625 priv->tx_path_in_lpi_mode = false; 2280 priv->tx_path_in_lpi_mode = false;
1626 }
1627 if (status & core_irq_rx_path_in_lpi_mode)
1628 priv->xstats.irq_rx_path_in_lpi_mode_n++;
1629 if (status & core_irq_rx_path_exit_lpi_mode)
1630 priv->xstats.irq_rx_path_exit_lpi_mode_n++;
1631 } 2281 }
1632 } 2282 }
1633 2283
@@ -1639,7 +2289,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1639 2289
1640#ifdef CONFIG_NET_POLL_CONTROLLER 2290#ifdef CONFIG_NET_POLL_CONTROLLER
1641/* Polling receive - used by NETCONSOLE and other diagnostic tools 2291/* Polling receive - used by NETCONSOLE and other diagnostic tools
1642 * to allow network I/O with interrupts disabled. */ 2292 * to allow network I/O with interrupts disabled.
2293 */
1643static void stmmac_poll_controller(struct net_device *dev) 2294static void stmmac_poll_controller(struct net_device *dev)
1644{ 2295{
1645 disable_irq(dev->irq); 2296 disable_irq(dev->irq);
@@ -1655,21 +2306,30 @@ static void stmmac_poll_controller(struct net_device *dev)
1655 * a proprietary structure used to pass information to the driver. 2306 * a proprietary structure used to pass information to the driver.
1656 * @cmd: IOCTL command 2307 * @cmd: IOCTL command
1657 * Description: 2308 * Description:
1658 * Currently there are no special functionality supported in IOCTL, just the 2309 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
1659 * phy_mii_ioctl(...) can be invoked.
1660 */ 2310 */
1661static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2311static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1662{ 2312{
1663 struct stmmac_priv *priv = netdev_priv(dev); 2313 struct stmmac_priv *priv = netdev_priv(dev);
1664 int ret; 2314 int ret = -EOPNOTSUPP;
1665 2315
1666 if (!netif_running(dev)) 2316 if (!netif_running(dev))
1667 return -EINVAL; 2317 return -EINVAL;
1668 2318
1669 if (!priv->phydev) 2319 switch (cmd) {
1670 return -EINVAL; 2320 case SIOCGMIIPHY:
1671 2321 case SIOCGMIIREG:
1672 ret = phy_mii_ioctl(priv->phydev, rq, cmd); 2322 case SIOCSMIIREG:
2323 if (!priv->phydev)
2324 return -EINVAL;
2325 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
2326 break;
2327 case SIOCSHWTSTAMP:
2328 ret = stmmac_hwtstamp_ioctl(dev, rq);
2329 break;
2330 default:
2331 break;
2332 }
1673 2333
1674 return ret; 2334 return ret;
1675} 2335}
@@ -1679,40 +2339,51 @@ static struct dentry *stmmac_fs_dir;
1679static struct dentry *stmmac_rings_status; 2339static struct dentry *stmmac_rings_status;
1680static struct dentry *stmmac_dma_cap; 2340static struct dentry *stmmac_dma_cap;
1681 2341
1682static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) 2342static void sysfs_display_ring(void *head, int size, int extend_desc,
2343 struct seq_file *seq)
1683{ 2344{
1684 struct tmp_s {
1685 u64 a;
1686 unsigned int b;
1687 unsigned int c;
1688 };
1689 int i; 2345 int i;
1690 struct net_device *dev = seq->private; 2346 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
1691 struct stmmac_priv *priv = netdev_priv(dev); 2347 struct dma_desc *p = (struct dma_desc *)head;
1692 2348
1693 seq_printf(seq, "=======================\n"); 2349 for (i = 0; i < size; i++) {
1694 seq_printf(seq, " RX descriptor ring\n"); 2350 u64 x;
1695 seq_printf(seq, "=======================\n"); 2351 if (extend_desc) {
1696 2352 x = *(u64 *) ep;
1697 for (i = 0; i < priv->dma_rx_size; i++) { 2353 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
1698 struct tmp_s *x = (struct tmp_s *)(priv->dma_rx + i); 2354 i, (unsigned int)virt_to_phys(ep),
1699 seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", 2355 (unsigned int)x, (unsigned int)(x >> 32),
1700 i, (unsigned int)(x->a), 2356 ep->basic.des2, ep->basic.des3);
1701 (unsigned int)((x->a) >> 32), x->b, x->c); 2357 ep++;
2358 } else {
2359 x = *(u64 *) p;
2360 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2361 i, (unsigned int)virt_to_phys(ep),
2362 (unsigned int)x, (unsigned int)(x >> 32),
2363 p->des2, p->des3);
2364 p++;
2365 }
1702 seq_printf(seq, "\n"); 2366 seq_printf(seq, "\n");
1703 } 2367 }
2368}
1704 2369
1705 seq_printf(seq, "\n"); 2370static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
1706 seq_printf(seq, "=======================\n"); 2371{
1707 seq_printf(seq, " TX descriptor ring\n"); 2372 struct net_device *dev = seq->private;
1708 seq_printf(seq, "=======================\n"); 2373 struct stmmac_priv *priv = netdev_priv(dev);
2374 unsigned int txsize = priv->dma_tx_size;
2375 unsigned int rxsize = priv->dma_rx_size;
1709 2376
1710 for (i = 0; i < priv->dma_tx_size; i++) { 2377 if (priv->extend_desc) {
1711 struct tmp_s *x = (struct tmp_s *)(priv->dma_tx + i); 2378 seq_printf(seq, "Extended RX descriptor ring:\n");
1712 seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", 2379 sysfs_display_ring((void *)priv->dma_erx, rxsize, 1, seq);
1713 i, (unsigned int)(x->a), 2380 seq_printf(seq, "Extended TX descriptor ring:\n");
1714 (unsigned int)((x->a) >> 32), x->b, x->c); 2381 sysfs_display_ring((void *)priv->dma_etx, txsize, 1, seq);
1715 seq_printf(seq, "\n"); 2382 } else {
2383 seq_printf(seq, "RX descriptor ring:\n");
2384 sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq);
2385 seq_printf(seq, "TX descriptor ring:\n");
2386 sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq);
1716 } 2387 }
1717 2388
1718 return 0; 2389 return 0;
@@ -1817,8 +2488,8 @@ static int stmmac_init_fs(struct net_device *dev)
1817 2488
1818 /* Entry to report DMA RX/TX rings */ 2489 /* Entry to report DMA RX/TX rings */
1819 stmmac_rings_status = debugfs_create_file("descriptors_status", 2490 stmmac_rings_status = debugfs_create_file("descriptors_status",
1820 S_IRUGO, stmmac_fs_dir, dev, 2491 S_IRUGO, stmmac_fs_dir, dev,
1821 &stmmac_rings_status_fops); 2492 &stmmac_rings_status_fops);
1822 2493
1823 if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) { 2494 if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
1824 pr_info("ERROR creating stmmac ring debugfs file\n"); 2495 pr_info("ERROR creating stmmac ring debugfs file\n");
@@ -1868,7 +2539,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
1868 2539
1869/** 2540/**
1870 * stmmac_hw_init - Init the MAC device 2541 * stmmac_hw_init - Init the MAC device
1871 * @priv : pointer to the private device structure. 2542 * @priv: driver private structure
1872 * Description: this function detects which MAC device 2543 * Description: this function detects which MAC device
1873 * (GMAC/MAC10-100) has to attached, checks the HW capability 2544 * (GMAC/MAC10-100) has to attached, checks the HW capability
1874 * (if supported) and sets the driver's features (for example 2545 * (if supported) and sets the driver's features (for example
@@ -1877,7 +2548,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
1877 */ 2548 */
1878static int stmmac_hw_init(struct stmmac_priv *priv) 2549static int stmmac_hw_init(struct stmmac_priv *priv)
1879{ 2550{
1880 int ret = 0; 2551 int ret;
1881 struct mac_device_info *mac; 2552 struct mac_device_info *mac;
1882 2553
1883 /* Identify the MAC HW device */ 2554 /* Identify the MAC HW device */
@@ -1892,12 +2563,23 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1892 2563
1893 priv->hw = mac; 2564 priv->hw = mac;
1894 2565
1895 /* To use the chained or ring mode */
1896 priv->hw->ring = &ring_mode_ops;
1897
1898 /* Get and dump the chip ID */ 2566 /* Get and dump the chip ID */
1899 priv->synopsys_id = stmmac_get_synopsys_id(priv); 2567 priv->synopsys_id = stmmac_get_synopsys_id(priv);
1900 2568
2569 /* To use alternate (extended) or normal descriptor structures */
2570 stmmac_selec_desc_mode(priv);
2571
2572 /* To use the chained or ring mode */
2573 if (chain_mode) {
2574 priv->hw->chain = &chain_mode_ops;
2575 pr_info(" Chain mode enabled\n");
2576 priv->mode = STMMAC_CHAIN_MODE;
2577 } else {
2578 priv->hw->ring = &ring_mode_ops;
2579 pr_info(" Ring mode enabled\n");
2580 priv->mode = STMMAC_RING_MODE;
2581 }
2582
1901 /* Get the HW capability (new GMAC newer than 3.50a) */ 2583 /* Get the HW capability (new GMAC newer than 3.50a) */
1902 priv->hw_cap_support = stmmac_get_hw_features(priv); 2584 priv->hw_cap_support = stmmac_get_hw_features(priv);
1903 if (priv->hw_cap_support) { 2585 if (priv->hw_cap_support) {
@@ -1921,14 +2603,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1921 } else 2603 } else
1922 pr_info(" No HW DMA feature register supported"); 2604 pr_info(" No HW DMA feature register supported");
1923 2605
1924 /* Select the enhnaced/normal descriptor structures */
1925 stmmac_selec_desc_mode(priv);
1926
1927 /* Enable the IPC (Checksum Offload) and check if the feature has been
1928 * enabled during the core configuration. */
1929 ret = priv->hw->mac->rx_ipc(priv->ioaddr); 2606 ret = priv->hw->mac->rx_ipc(priv->ioaddr);
1930 if (!ret) { 2607 if (!ret) {
1931 pr_warning(" RX IPC Checksum Offload not configured.\n"); 2608 pr_warn(" RX IPC Checksum Offload not configured.\n");
1932 priv->plat->rx_coe = STMMAC_RX_COE_NONE; 2609 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1933 } 2610 }
1934 2611
@@ -1943,7 +2620,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1943 device_set_wakeup_capable(priv->device, 1); 2620 device_set_wakeup_capable(priv->device, 1);
1944 } 2621 }
1945 2622
1946 return ret; 2623 return 0;
1947} 2624}
1948 2625
1949/** 2626/**
@@ -1984,12 +2661,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
1984 stmmac_verify_args(); 2661 stmmac_verify_args();
1985 2662
1986 /* Override with kernel parameters if supplied XXX CRS XXX 2663 /* Override with kernel parameters if supplied XXX CRS XXX
1987 * this needs to have multiple instances */ 2664 * this needs to have multiple instances
2665 */
1988 if ((phyaddr >= 0) && (phyaddr <= 31)) 2666 if ((phyaddr >= 0) && (phyaddr <= 31))
1989 priv->plat->phy_addr = phyaddr; 2667 priv->plat->phy_addr = phyaddr;
1990 2668
1991 /* Init MAC and get the capabilities */ 2669 /* Init MAC and get the capabilities */
1992 stmmac_hw_init(priv); 2670 ret = stmmac_hw_init(priv);
2671 if (ret)
2672 goto error_free_netdev;
1993 2673
1994 ndev->netdev_ops = &stmmac_netdev_ops; 2674 ndev->netdev_ops = &stmmac_netdev_ops;
1995 2675
@@ -1999,7 +2679,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
1999 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 2679 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
2000#ifdef STMMAC_VLAN_TAG_USED 2680#ifdef STMMAC_VLAN_TAG_USED
2001 /* Both mac100 and gmac support receive VLAN tag detection */ 2681 /* Both mac100 and gmac support receive VLAN tag detection */
2002 ndev->features |= NETIF_F_HW_VLAN_RX; 2682 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2003#endif 2683#endif
2004 priv->msg_enable = netif_msg_init(debug, default_msg_level); 2684 priv->msg_enable = netif_msg_init(debug, default_msg_level);
2005 2685
@@ -2029,7 +2709,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2029 2709
2030 priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME); 2710 priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
2031 if (IS_ERR(priv->stmmac_clk)) { 2711 if (IS_ERR(priv->stmmac_clk)) {
2032 pr_warning("%s: warning: cannot get CSR clock\n", __func__); 2712 pr_warn("%s: warning: cannot get CSR clock\n", __func__);
2033 goto error_clk_get; 2713 goto error_clk_get;
2034 } 2714 }
2035 2715
@@ -2044,12 +2724,17 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2044 else 2724 else
2045 priv->clk_csr = priv->plat->clk_csr; 2725 priv->clk_csr = priv->plat->clk_csr;
2046 2726
2047 /* MDIO bus Registration */ 2727 stmmac_check_pcs_mode(priv);
2048 ret = stmmac_mdio_register(ndev); 2728
2049 if (ret < 0) { 2729 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
2050 pr_debug("%s: MDIO bus (id: %d) registration failed", 2730 priv->pcs != STMMAC_PCS_RTBI) {
2051 __func__, priv->plat->bus_id); 2731 /* MDIO bus Registration */
2052 goto error_mdio_register; 2732 ret = stmmac_mdio_register(ndev);
2733 if (ret < 0) {
2734 pr_debug("%s: MDIO bus (id: %d) registration failed",
2735 __func__, priv->plat->bus_id);
2736 goto error_mdio_register;
2737 }
2053 } 2738 }
2054 2739
2055 return priv; 2740 return priv;
@@ -2060,6 +2745,7 @@ error_clk_get:
2060 unregister_netdev(ndev); 2745 unregister_netdev(ndev);
2061error_netdev_register: 2746error_netdev_register:
2062 netif_napi_del(&priv->napi); 2747 netif_napi_del(&priv->napi);
2748error_free_netdev:
2063 free_netdev(ndev); 2749 free_netdev(ndev);
2064 2750
2065 return NULL; 2751 return NULL;
@@ -2081,7 +2767,9 @@ int stmmac_dvr_remove(struct net_device *ndev)
2081 priv->hw->dma->stop_tx(priv->ioaddr); 2767 priv->hw->dma->stop_tx(priv->ioaddr);
2082 2768
2083 stmmac_set_mac(priv->ioaddr, false); 2769 stmmac_set_mac(priv->ioaddr, false);
2084 stmmac_mdio_unregister(ndev); 2770 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
2771 priv->pcs != STMMAC_PCS_RTBI)
2772 stmmac_mdio_unregister(ndev);
2085 netif_carrier_off(ndev); 2773 netif_carrier_off(ndev);
2086 unregister_netdev(ndev); 2774 unregister_netdev(ndev);
2087 free_netdev(ndev); 2775 free_netdev(ndev);
@@ -2093,7 +2781,6 @@ int stmmac_dvr_remove(struct net_device *ndev)
2093int stmmac_suspend(struct net_device *ndev) 2781int stmmac_suspend(struct net_device *ndev)
2094{ 2782{
2095 struct stmmac_priv *priv = netdev_priv(ndev); 2783 struct stmmac_priv *priv = netdev_priv(ndev);
2096 int dis_ic = 0;
2097 unsigned long flags; 2784 unsigned long flags;
2098 2785
2099 if (!ndev || !netif_running(ndev)) 2786 if (!ndev || !netif_running(ndev))
@@ -2107,18 +2794,13 @@ int stmmac_suspend(struct net_device *ndev)
2107 netif_device_detach(ndev); 2794 netif_device_detach(ndev);
2108 netif_stop_queue(ndev); 2795 netif_stop_queue(ndev);
2109 2796
2110 if (priv->use_riwt)
2111 dis_ic = 1;
2112
2113 napi_disable(&priv->napi); 2797 napi_disable(&priv->napi);
2114 2798
2115 /* Stop TX/RX DMA */ 2799 /* Stop TX/RX DMA */
2116 priv->hw->dma->stop_tx(priv->ioaddr); 2800 priv->hw->dma->stop_tx(priv->ioaddr);
2117 priv->hw->dma->stop_rx(priv->ioaddr); 2801 priv->hw->dma->stop_rx(priv->ioaddr);
2118 /* Clear the Rx/Tx descriptors */ 2802
2119 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size, 2803 stmmac_clear_descriptors(priv);
2120 dis_ic);
2121 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
2122 2804
2123 /* Enable Power down mode by programming the PMT regs */ 2805 /* Enable Power down mode by programming the PMT regs */
2124 if (device_may_wakeup(priv->device)) 2806 if (device_may_wakeup(priv->device))
@@ -2146,7 +2828,8 @@ int stmmac_resume(struct net_device *ndev)
2146 * automatically as soon as a magic packet or a Wake-up frame 2828 * automatically as soon as a magic packet or a Wake-up frame
2147 * is received. Anyway, it's better to manually clear 2829 * is received. Anyway, it's better to manually clear
2148 * this bit because it can generate problems while resuming 2830 * this bit because it can generate problems while resuming
2149 * from another devices (e.g. serial console). */ 2831 * from another devices (e.g. serial console).
2832 */
2150 if (device_may_wakeup(priv->device)) 2833 if (device_may_wakeup(priv->device))
2151 priv->hw->mac->pmt(priv->ioaddr, 0); 2834 priv->hw->mac->pmt(priv->ioaddr, 0);
2152 else 2835 else
@@ -2257,6 +2940,9 @@ static int __init stmmac_cmdline_opt(char *str)
2257 } else if (!strncmp(opt, "eee_timer:", 10)) { 2940 } else if (!strncmp(opt, "eee_timer:", 10)) {
2258 if (kstrtoint(opt + 10, 0, &eee_timer)) 2941 if (kstrtoint(opt + 10, 0, &eee_timer))
2259 goto err; 2942 goto err;
2943 } else if (!strncmp(opt, "chain_mode:", 11)) {
2944 if (kstrtoint(opt + 11, 0, &chain_mode))
2945 goto err;
2260 } 2946 }
2261 } 2947 }
2262 return 0; 2948 return 0;
@@ -2267,7 +2953,7 @@ err:
2267} 2953}
2268 2954
2269__setup("stmmaceth=", stmmac_cmdline_opt); 2955__setup("stmmaceth=", stmmac_cmdline_opt);
2270#endif 2956#endif /* MODULE */
2271 2957
2272MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 2958MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
2273MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 2959MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0b9829fe3eea..cc15039eaa47 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -177,7 +177,7 @@ int stmmac_mdio_register(struct net_device *ndev)
177 new_bus->write = &stmmac_mdio_write; 177 new_bus->write = &stmmac_mdio_write;
178 new_bus->reset = &stmmac_mdio_reset; 178 new_bus->reset = &stmmac_mdio_reset;
179 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", 179 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
180 new_bus->name, priv->plat->bus_id); 180 new_bus->name, priv->plat->bus_id);
181 new_bus->priv = ndev; 181 new_bus->priv = ndev;
182 new_bus->irq = irqlist; 182 new_bus->irq = irqlist;
183 new_bus->phy_mask = mdio_bus_data->phy_mask; 183 new_bus->phy_mask = mdio_bus_data->phy_mask;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 19b3a2567a46..023b7c29cb2f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -88,7 +88,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
88 continue; 88 continue;
89 addr = pci_iomap(pdev, i, 0); 89 addr = pci_iomap(pdev, i, 0);
90 if (addr == NULL) { 90 if (addr == NULL) {
91 pr_err("%s: ERROR: cannot map register memory, aborting", 91 pr_err("%s: ERROR: cannot map register memory aborting",
92 __func__); 92 __func__);
93 ret = -EIO; 93 ret = -EIO;
94 goto err_out_map_failed; 94 goto err_out_map_failed;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index b43d68b40e50..1d3780f55ba2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -88,11 +88,9 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
88 if (!res) 88 if (!res)
89 return -ENODEV; 89 return -ENODEV;
90 90
91 addr = devm_request_and_ioremap(dev, res); 91 addr = devm_ioremap_resource(dev, res);
92 if (!addr) { 92 if (IS_ERR(addr))
93 pr_err("%s: ERROR: memory mapping failed", __func__); 93 return PTR_ERR(addr);
94 return -ENOMEM;
95 }
96 94
97 if (pdev->dev.of_node) { 95 if (pdev->dev.of_node) {
98 plat_dat = devm_kzalloc(&pdev->dev, 96 plat_dat = devm_kzalloc(&pdev->dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
new file mode 100644
index 000000000000..b8b0eeed0f92
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -0,0 +1,211 @@
1/*******************************************************************************
2 PTP 1588 clock using the STMMAC.
3
4 Copyright (C) 2013 Vayavya Labs Pvt Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
23*******************************************************************************/
24#include "stmmac.h"
25#include "stmmac_ptp.h"
26
27/**
28 * stmmac_adjust_freq
29 *
30 * @ptp: pointer to ptp_clock_info structure
31 * @ppb: desired period change in parts ber billion
32 *
33 * Description: this function will adjust the frequency of hardware clock.
34 */
35static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
36{
37 struct stmmac_priv *priv =
38 container_of(ptp, struct stmmac_priv, ptp_clock_ops);
39 unsigned long flags;
40 u32 diff, addend;
41 int neg_adj = 0;
42 u64 adj;
43
44 if (ppb < 0) {
45 neg_adj = 1;
46 ppb = -ppb;
47 }
48
49 addend = priv->default_addend;
50 adj = addend;
51 adj *= ppb;
52 diff = div_u64(adj, 1000000000ULL);
53 addend = neg_adj ? (addend - diff) : (addend + diff);
54
55 spin_lock_irqsave(&priv->ptp_lock, flags);
56
57 priv->hw->ptp->config_addend(priv->ioaddr, addend);
58
59 spin_unlock_irqrestore(&priv->lock, flags);
60
61 return 0;
62}
63
64/**
65 * stmmac_adjust_time
66 *
67 * @ptp: pointer to ptp_clock_info structure
68 * @delta: desired change in nanoseconds
69 *
70 * Description: this function will shift/adjust the hardware clock time.
71 */
72static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
73{
74 struct stmmac_priv *priv =
75 container_of(ptp, struct stmmac_priv, ptp_clock_ops);
76 unsigned long flags;
77 u32 sec, nsec;
78 u32 quotient, reminder;
79 int neg_adj = 0;
80
81 if (delta < 0) {
82 neg_adj = 1;
83 delta = -delta;
84 }
85
86 quotient = div_u64_rem(delta, 1000000000ULL, &reminder);
87 sec = quotient;
88 nsec = reminder;
89
90 spin_lock_irqsave(&priv->ptp_lock, flags);
91
92 priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
93
94 spin_unlock_irqrestore(&priv->lock, flags);
95
96 return 0;
97}
98
99/**
100 * stmmac_get_time
101 *
102 * @ptp: pointer to ptp_clock_info structure
103 * @ts: pointer to hold time/result
104 *
105 * Description: this function will read the current time from the
106 * hardware clock and store it in @ts.
107 */
108static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts)
109{
110 struct stmmac_priv *priv =
111 container_of(ptp, struct stmmac_priv, ptp_clock_ops);
112 unsigned long flags;
113 u64 ns;
114 u32 reminder;
115
116 spin_lock_irqsave(&priv->ptp_lock, flags);
117
118 ns = priv->hw->ptp->get_systime(priv->ioaddr);
119
120 spin_unlock_irqrestore(&priv->ptp_lock, flags);
121
122 ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &reminder);
123 ts->tv_nsec = reminder;
124
125 return 0;
126}
127
128/**
129 * stmmac_set_time
130 *
131 * @ptp: pointer to ptp_clock_info structure
132 * @ts: time value to set
133 *
134 * Description: this function will set the current time on the
135 * hardware clock.
136 */
137static int stmmac_set_time(struct ptp_clock_info *ptp,
138 const struct timespec *ts)
139{
140 struct stmmac_priv *priv =
141 container_of(ptp, struct stmmac_priv, ptp_clock_ops);
142 unsigned long flags;
143
144 spin_lock_irqsave(&priv->ptp_lock, flags);
145
146 priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec);
147
148 spin_unlock_irqrestore(&priv->ptp_lock, flags);
149
150 return 0;
151}
152
153static int stmmac_enable(struct ptp_clock_info *ptp,
154 struct ptp_clock_request *rq, int on)
155{
156 return -EOPNOTSUPP;
157}
158
159/* structure describing a PTP hardware clock */
160static struct ptp_clock_info stmmac_ptp_clock_ops = {
161 .owner = THIS_MODULE,
162 .name = "stmmac_ptp_clock",
163 .max_adj = 62500000,
164 .n_alarm = 0,
165 .n_ext_ts = 0,
166 .n_per_out = 0,
167 .pps = 0,
168 .adjfreq = stmmac_adjust_freq,
169 .adjtime = stmmac_adjust_time,
170 .gettime = stmmac_get_time,
171 .settime = stmmac_set_time,
172 .enable = stmmac_enable,
173};
174
175/**
176 * stmmac_ptp_register
177 * @priv: driver private structure
178 * Description: this function will register the ptp clock driver
179 * to kernel. It also does some house keeping work.
180 */
181int stmmac_ptp_register(struct stmmac_priv *priv)
182{
183 spin_lock_init(&priv->ptp_lock);
184 priv->ptp_clock_ops = stmmac_ptp_clock_ops;
185
186 priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
187 priv->device);
188 if (IS_ERR(priv->ptp_clock)) {
189 priv->ptp_clock = NULL;
190 pr_err("ptp_clock_register() failed on %s\n", priv->dev->name);
191 } else
192 pr_debug("Added PTP HW clock successfully on %s\n",
193 priv->dev->name);
194
195 return 0;
196}
197
198/**
199 * stmmac_ptp_unregister
200 * @priv: driver private structure
201 * Description: this function will remove/unregister the ptp clock driver
202 * from the kernel.
203 */
204void stmmac_ptp_unregister(struct stmmac_priv *priv)
205{
206 if (priv->ptp_clock) {
207 ptp_clock_unregister(priv->ptp_clock);
208 pr_debug("Removed PTP HW clock successfully on %s\n",
209 priv->dev->name);
210 }
211}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
new file mode 100644
index 000000000000..3dbc047622fa
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -0,0 +1,74 @@
1/******************************************************************************
2 PTP Header file
3
4 Copyright (C) 2013 Vayavya Labs Pvt Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
23******************************************************************************/
24
25#ifndef __STMMAC_PTP_H__
26#define __STMMAC_PTP_H__
27
28#define STMMAC_SYSCLOCK 62500000
29
30/* IEEE 1588 PTP register offsets */
31#define PTP_TCR 0x0700 /* Timestamp Control Reg */
32#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
33#define PTP_STSR 0x0708 /* System Time – Seconds Regr */
34#define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */
35#define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */
36#define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */
37#define PTP_TAR 0x0718 /* Timestamp Addend Reg */
38#define PTP_TTSR 0x071C /* Target Time Seconds Reg */
39#define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */
40#define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */
41#define PTP_TSR 0x0728 /* Timestamp Status */
42
43#define PTP_STNSUR_ADDSUB_SHIFT 31
44
45/* PTP TCR defines */
46#define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */
47#define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */
48#define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */
49#define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */
50/* Timestamp Interrupt Trigger Enable */
51#define PTP_TCR_TSTRIG 0x00000010
52#define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */
53#define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */
54/* Timestamp Digital or Binary Rollover Control */
55#define PTP_TCR_TSCTRLSSR 0x00000200
56
57/* Enable PTP packet Processing for Version 2 Format */
58#define PTP_TCR_TSVER2ENA 0x00000400
59/* Enable Processing of PTP over Ethernet Frames */
60#define PTP_TCR_TSIPENA 0x00000800
61/* Enable Processing of PTP Frames Sent over IPv6-UDP */
62#define PTP_TCR_TSIPV6ENA 0x00001000
63/* Enable Processing of PTP Frames Sent over IPv4-UDP */
64#define PTP_TCR_TSIPV4ENA 0x00002000
65/* Enable Timestamp Snapshot for Event Messages */
66#define PTP_TCR_TSEVNTENA 0x00004000
67/* Enable Snapshot for Messages Relevant to Master */
68#define PTP_TCR_TSMSTRENA 0x00008000
69/* Select PTP packets for Taking Snapshots */
70#define PTP_TCR_SNAPTYPSEL_1 0x00010000
71/* Enable MAC address for PTP Frame Filtering */
72#define PTP_TCR_TSENMACADDR 0x00040000
73
74#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index e4c1c88e4c2a..95cff98d8a34 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6618,7 +6618,7 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
6618 (len << TXHDR_LEN_SHIFT) | 6618 (len << TXHDR_LEN_SHIFT) |
6619 ((l3off / 2) << TXHDR_L3START_SHIFT) | 6619 ((l3off / 2) << TXHDR_L3START_SHIFT) |
6620 (ihl << TXHDR_IHL_SHIFT) | 6620 (ihl << TXHDR_IHL_SHIFT) |
6621 ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) | 6621 ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) |
6622 ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | 6622 ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
6623 (ipv6 ? TXHDR_IP_VER : 0) | 6623 (ipv6 ? TXHDR_IP_VER : 0) |
6624 csum_bits); 6624 csum_bits);
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 5fafca065305..054975939a18 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1169,10 +1169,8 @@ static int bigmac_ether_init(struct platform_device *op,
1169 bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev, 1169 bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev,
1170 PAGE_SIZE, 1170 PAGE_SIZE,
1171 &bp->bblock_dvma, GFP_ATOMIC); 1171 &bp->bblock_dvma, GFP_ATOMIC);
1172 if (bp->bmac_block == NULL || bp->bblock_dvma == 0) { 1172 if (bp->bmac_block == NULL || bp->bblock_dvma == 0)
1173 printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n");
1174 goto fail_and_cleanup; 1173 goto fail_and_cleanup;
1175 }
1176 1174
1177 /* Get the board revision of this BigMAC. */ 1175 /* Get the board revision of this BigMAC. */
1178 bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node, 1176 bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index a1bff49a8155..436fa9d5a071 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2752,10 +2752,8 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
2752 &hp->hblock_dvma, 2752 &hp->hblock_dvma,
2753 GFP_ATOMIC); 2753 GFP_ATOMIC);
2754 err = -ENOMEM; 2754 err = -ENOMEM;
2755 if (!hp->happy_block) { 2755 if (!hp->happy_block)
2756 printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n");
2757 goto err_out_iounmap; 2756 goto err_out_iounmap;
2758 }
2759 2757
2760 /* Force check of the link first time we are brought up. */ 2758 /* Force check of the link first time we are brought up. */
2761 hp->linkcheck = 0; 2759 hp->linkcheck = 0;
@@ -3068,14 +3066,11 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
3068 hp->happy_bursts = DMA_BURSTBITS; 3066 hp->happy_bursts = DMA_BURSTBITS;
3069#endif 3067#endif
3070 3068
3071 hp->happy_block = (struct hmeal_init_block *) 3069 hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3072 dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &hp->hblock_dvma, GFP_KERNEL); 3070 &hp->hblock_dvma, GFP_KERNEL);
3073
3074 err = -ENODEV; 3071 err = -ENODEV;
3075 if (!hp->happy_block) { 3072 if (!hp->happy_block)
3076 printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n");
3077 goto err_out_iounmap; 3073 goto err_out_iounmap;
3078 }
3079 3074
3080 hp->linkcheck = 0; 3075 hp->linkcheck = 0;
3081 hp->timer_state = asleep; 3076 hp->timer_state = asleep;
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 49bf3e2eb652..8182591bc187 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -414,7 +414,7 @@ static void qe_rx(struct sunqe *qep)
414 struct qe_rxd *this; 414 struct qe_rxd *this;
415 struct sunqe_buffers *qbufs = qep->buffers; 415 struct sunqe_buffers *qbufs = qep->buffers;
416 __u32 qbufs_dvma = qep->buffers_dvma; 416 __u32 qbufs_dvma = qep->buffers_dvma;
417 int elem = qep->rx_new, drops = 0; 417 int elem = qep->rx_new;
418 u32 flags; 418 u32 flags;
419 419
420 this = &rxbase[elem]; 420 this = &rxbase[elem];
@@ -436,7 +436,6 @@ static void qe_rx(struct sunqe *qep)
436 } else { 436 } else {
437 skb = netdev_alloc_skb(dev, len + 2); 437 skb = netdev_alloc_skb(dev, len + 2);
438 if (skb == NULL) { 438 if (skb == NULL) {
439 drops++;
440 dev->stats.rx_dropped++; 439 dev->stats.rx_dropped++;
441 } else { 440 } else {
442 skb_reserve(skb, 2); 441 skb_reserve(skb, 2);
@@ -456,8 +455,6 @@ static void qe_rx(struct sunqe *qep)
456 this = &rxbase[elem]; 455 this = &rxbase[elem];
457 } 456 }
458 qep->rx_new = elem; 457 qep->rx_new = elem;
459 if (drops)
460 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name);
461} 458}
462 459
463static void qe_tx_reclaim(struct sunqe *qep); 460static void qe_tx_reclaim(struct sunqe *qep);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index e15cc71b826d..571452e786d5 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -733,7 +733,7 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
733 * @ndev: network device 733 * @ndev: network device
734 * @vid: VLAN vid to add 734 * @vid: VLAN vid to add
735 */ 735 */
736static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) 736static int bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
737{ 737{
738 __bdx_vlan_rx_vid(ndev, vid, 1); 738 __bdx_vlan_rx_vid(ndev, vid, 1);
739 return 0; 739 return 0;
@@ -744,7 +744,7 @@ static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
744 * @ndev: network device 744 * @ndev: network device
745 * @vid: VLAN vid to kill 745 * @vid: VLAN vid to kill
746 */ 746 */
747static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid) 747static int bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
748{ 748{
749 __bdx_vlan_rx_vid(ndev, vid, 0); 749 __bdx_vlan_rx_vid(ndev, vid, 0);
750 return 0; 750 return 0;
@@ -1102,10 +1102,9 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1102 dno = bdx_rxdb_available(db) - 1; 1102 dno = bdx_rxdb_available(db) - 1;
1103 while (dno > 0) { 1103 while (dno > 0) {
1104 skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN); 1104 skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN);
1105 if (!skb) { 1105 if (!skb)
1106 pr_err("NO MEM: netdev_alloc_skb failed\n");
1107 break; 1106 break;
1108 } 1107
1109 skb_reserve(skb, NET_IP_ALIGN); 1108 skb_reserve(skb, NET_IP_ALIGN);
1110 1109
1111 idx = bdx_rxdb_alloc_elem(db); 1110 idx = bdx_rxdb_alloc_elem(db);
@@ -1149,7 +1148,7 @@ NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
1149 priv->ndev->name, 1148 priv->ndev->name,
1150 GET_RXD_VLAN_ID(rxd_vlan), 1149 GET_RXD_VLAN_ID(rxd_vlan),
1151 GET_RXD_VTAG(rxd_val1)); 1150 GET_RXD_VTAG(rxd_val1));
1152 __vlan_hwaccel_put_tag(skb, GET_RXD_VLAN_TCI(rxd_vlan)); 1151 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), GET_RXD_VLAN_TCI(rxd_vlan));
1153 } 1152 }
1154 netif_receive_skb(skb); 1153 netif_receive_skb(skb);
1155} 1154}
@@ -2018,12 +2017,12 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2018 * so we can have them same for all ports of the board */ 2017 * so we can have them same for all ports of the board */
2019 ndev->if_port = port; 2018 ndev->if_port = port;
2020 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO 2019 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
2021 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 2020 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2022 NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM 2021 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
2023 /*| NETIF_F_FRAGLIST */ 2022 /*| NETIF_F_FRAGLIST */
2024 ; 2023 ;
2025 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 2024 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2026 NETIF_F_TSO | NETIF_F_HW_VLAN_TX; 2025 NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
2027 2026
2028 if (pci_using_dac) 2027 if (pci_using_dac)
2029 ndev->features |= NETIF_F_HIGHDMA; 2028 ndev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 4781d3d8e182..59c43918883e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -126,6 +126,13 @@ do { \
126#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) 126#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15)
127#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15) 127#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15)
128 128
129#define CPSW_INTPACEEN (0x3f << 16)
130#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
131#define CPSW_CMINTMAX_CNT 63
132#define CPSW_CMINTMIN_CNT 2
133#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
134#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
135
129#define cpsw_enable_irq(priv) \ 136#define cpsw_enable_irq(priv) \
130 do { \ 137 do { \
131 u32 i; \ 138 u32 i; \
@@ -139,6 +146,10 @@ do { \
139 disable_irq_nosync(priv->irqs_table[i]); \ 146 disable_irq_nosync(priv->irqs_table[i]); \
140 } while (0); 147 } while (0);
141 148
149#define cpsw_slave_index(priv) \
150 ((priv->data.dual_emac) ? priv->emac_port : \
151 priv->data.active_slave)
152
142static int debug_level; 153static int debug_level;
143module_param(debug_level, int, 0); 154module_param(debug_level, int, 0);
144MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); 155MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
@@ -160,6 +171,15 @@ struct cpsw_wr_regs {
160 u32 rx_en; 171 u32 rx_en;
161 u32 tx_en; 172 u32 tx_en;
162 u32 misc_en; 173 u32 misc_en;
174 u32 mem_allign1[8];
175 u32 rx_thresh_stat;
176 u32 rx_stat;
177 u32 tx_stat;
178 u32 misc_stat;
179 u32 mem_allign2[8];
180 u32 rx_imax;
181 u32 tx_imax;
182
163}; 183};
164 184
165struct cpsw_ss_regs { 185struct cpsw_ss_regs {
@@ -314,6 +334,8 @@ struct cpsw_priv {
314 struct cpsw_host_regs __iomem *host_port_regs; 334 struct cpsw_host_regs __iomem *host_port_regs;
315 u32 msg_enable; 335 u32 msg_enable;
316 u32 version; 336 u32 version;
337 u32 coal_intvl;
338 u32 bus_freq_mhz;
317 struct net_device_stats stats; 339 struct net_device_stats stats;
318 int rx_packet_max; 340 int rx_packet_max;
319 int host_port; 341 int host_port;
@@ -326,6 +348,7 @@ struct cpsw_priv {
326 /* snapshot of IRQ numbers */ 348 /* snapshot of IRQ numbers */
327 u32 irqs_table[4]; 349 u32 irqs_table[4];
328 u32 num_irqs; 350 u32 num_irqs;
351 bool irq_enabled;
329 struct cpts *cpts; 352 struct cpts *cpts;
330 u32 emac_port; 353 u32 emac_port;
331}; 354};
@@ -333,12 +356,15 @@ struct cpsw_priv {
333#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) 356#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
334#define for_each_slave(priv, func, arg...) \ 357#define for_each_slave(priv, func, arg...) \
335 do { \ 358 do { \
336 int idx; \ 359 struct cpsw_slave *slave; \
360 int n; \
337 if (priv->data.dual_emac) \ 361 if (priv->data.dual_emac) \
338 (func)((priv)->slaves + priv->emac_port, ##arg);\ 362 (func)((priv)->slaves + priv->emac_port, ##arg);\
339 else \ 363 else \
340 for (idx = 0; idx < (priv)->data.slaves; idx++) \ 364 for (n = (priv)->data.slaves, \
341 (func)((priv)->slaves + idx, ##arg); \ 365 slave = (priv)->slaves; \
366 n; n--) \
367 (func)(slave++, ##arg); \
342 } while (0) 368 } while (0)
343#define cpsw_get_slave_ndev(priv, __slave_no__) \ 369#define cpsw_get_slave_ndev(priv, __slave_no__) \
344 (priv->slaves[__slave_no__].ndev) 370 (priv->slaves[__slave_no__].ndev)
@@ -446,62 +472,69 @@ void cpsw_tx_handler(void *token, int len, int status)
446void cpsw_rx_handler(void *token, int len, int status) 472void cpsw_rx_handler(void *token, int len, int status)
447{ 473{
448 struct sk_buff *skb = token; 474 struct sk_buff *skb = token;
475 struct sk_buff *new_skb;
449 struct net_device *ndev = skb->dev; 476 struct net_device *ndev = skb->dev;
450 struct cpsw_priv *priv = netdev_priv(ndev); 477 struct cpsw_priv *priv = netdev_priv(ndev);
451 int ret = 0; 478 int ret = 0;
452 479
453 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); 480 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
454 481
455 /* free and bail if we are shutting down */ 482 if (unlikely(status < 0)) {
456 if (unlikely(!netif_running(ndev)) || 483 /* the interface is going down, skbs are purged */
457 unlikely(!netif_carrier_ok(ndev))) {
458 dev_kfree_skb_any(skb); 484 dev_kfree_skb_any(skb);
459 return; 485 return;
460 } 486 }
461 if (likely(status >= 0)) { 487
488 new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
489 if (new_skb) {
462 skb_put(skb, len); 490 skb_put(skb, len);
463 cpts_rx_timestamp(priv->cpts, skb); 491 cpts_rx_timestamp(priv->cpts, skb);
464 skb->protocol = eth_type_trans(skb, ndev); 492 skb->protocol = eth_type_trans(skb, ndev);
465 netif_receive_skb(skb); 493 netif_receive_skb(skb);
466 priv->stats.rx_bytes += len; 494 priv->stats.rx_bytes += len;
467 priv->stats.rx_packets++; 495 priv->stats.rx_packets++;
468 skb = NULL; 496 } else {
469 } 497 priv->stats.rx_dropped++;
470 498 new_skb = skb;
471 if (unlikely(!netif_running(ndev))) {
472 if (skb)
473 dev_kfree_skb_any(skb);
474 return;
475 } 499 }
476 500
477 if (likely(!skb)) { 501 ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
478 skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); 502 skb_tailroom(new_skb), 0);
479 if (WARN_ON(!skb)) 503 if (WARN_ON(ret < 0))
480 return; 504 dev_kfree_skb_any(new_skb);
481
482 ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
483 skb_tailroom(skb), 0, GFP_KERNEL);
484 }
485 WARN_ON(ret < 0);
486} 505}
487 506
488static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 507static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
489{ 508{
490 struct cpsw_priv *priv = dev_id; 509 struct cpsw_priv *priv = dev_id;
510 u32 rx, tx, rx_thresh;
491 511
492 if (likely(netif_running(priv->ndev))) { 512 rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat);
493 cpsw_intr_disable(priv); 513 rx = __raw_readl(&priv->wr_regs->rx_stat);
514 tx = __raw_readl(&priv->wr_regs->tx_stat);
515 if (!rx_thresh && !rx && !tx)
516 return IRQ_NONE;
517
518 cpsw_intr_disable(priv);
519 if (priv->irq_enabled == true) {
494 cpsw_disable_irq(priv); 520 cpsw_disable_irq(priv);
521 priv->irq_enabled = false;
522 }
523
524 if (netif_running(priv->ndev)) {
495 napi_schedule(&priv->napi); 525 napi_schedule(&priv->napi);
496 } else { 526 return IRQ_HANDLED;
497 priv = cpsw_get_slave_priv(priv, 1); 527 }
498 if (likely(priv) && likely(netif_running(priv->ndev))) { 528
499 cpsw_intr_disable(priv); 529 priv = cpsw_get_slave_priv(priv, 1);
500 cpsw_disable_irq(priv); 530 if (!priv)
501 napi_schedule(&priv->napi); 531 return IRQ_NONE;
502 } 532
533 if (netif_running(priv->ndev)) {
534 napi_schedule(&priv->napi);
535 return IRQ_HANDLED;
503 } 536 }
504 return IRQ_HANDLED; 537 return IRQ_NONE;
505} 538}
506 539
507static int cpsw_poll(struct napi_struct *napi, int budget) 540static int cpsw_poll(struct napi_struct *napi, int budget)
@@ -515,10 +548,16 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
515 548
516 num_rx = cpdma_chan_process(priv->rxch, budget); 549 num_rx = cpdma_chan_process(priv->rxch, budget);
517 if (num_rx < budget) { 550 if (num_rx < budget) {
551 struct cpsw_priv *prim_cpsw;
552
518 napi_complete(napi); 553 napi_complete(napi);
519 cpsw_intr_enable(priv); 554 cpsw_intr_enable(priv);
520 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 555 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
521 cpsw_enable_irq(priv); 556 prim_cpsw = cpsw_get_slave_priv(priv, 0);
557 if (prim_cpsw->irq_enabled == false) {
558 cpsw_enable_irq(priv);
559 prim_cpsw->irq_enabled = true;
560 }
522 } 561 }
523 562
524 if (num_rx || num_tx) 563 if (num_rx || num_tx)
@@ -612,6 +651,77 @@ static void cpsw_adjust_link(struct net_device *ndev)
612 } 651 }
613} 652}
614 653
654static int cpsw_get_coalesce(struct net_device *ndev,
655 struct ethtool_coalesce *coal)
656{
657 struct cpsw_priv *priv = netdev_priv(ndev);
658
659 coal->rx_coalesce_usecs = priv->coal_intvl;
660 return 0;
661}
662
663static int cpsw_set_coalesce(struct net_device *ndev,
664 struct ethtool_coalesce *coal)
665{
666 struct cpsw_priv *priv = netdev_priv(ndev);
667 u32 int_ctrl;
668 u32 num_interrupts = 0;
669 u32 prescale = 0;
670 u32 addnl_dvdr = 1;
671 u32 coal_intvl = 0;
672
673 if (!coal->rx_coalesce_usecs)
674 return -EINVAL;
675
676 coal_intvl = coal->rx_coalesce_usecs;
677
678 int_ctrl = readl(&priv->wr_regs->int_control);
679 prescale = priv->bus_freq_mhz * 4;
680
681 if (coal_intvl < CPSW_CMINTMIN_INTVL)
682 coal_intvl = CPSW_CMINTMIN_INTVL;
683
684 if (coal_intvl > CPSW_CMINTMAX_INTVL) {
685 /* Interrupt pacer works with 4us Pulse, we can
686 * throttle further by dilating the 4us pulse.
687 */
688 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
689
690 if (addnl_dvdr > 1) {
691 prescale *= addnl_dvdr;
692 if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
693 coal_intvl = (CPSW_CMINTMAX_INTVL
694 * addnl_dvdr);
695 } else {
696 addnl_dvdr = 1;
697 coal_intvl = CPSW_CMINTMAX_INTVL;
698 }
699 }
700
701 num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
702 writel(num_interrupts, &priv->wr_regs->rx_imax);
703 writel(num_interrupts, &priv->wr_regs->tx_imax);
704
705 int_ctrl |= CPSW_INTPACEEN;
706 int_ctrl &= (~CPSW_INTPRESCALE_MASK);
707 int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
708 writel(int_ctrl, &priv->wr_regs->int_control);
709
710 cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
711 if (priv->data.dual_emac) {
712 int i;
713
714 for (i = 0; i < priv->data.slaves; i++) {
715 priv = netdev_priv(priv->slaves[i].ndev);
716 priv->coal_intvl = coal_intvl;
717 }
718 } else {
719 priv->coal_intvl = coal_intvl;
720 }
721
722 return 0;
723}
724
615static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) 725static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
616{ 726{
617 static char *leader = "........................................"; 727 static char *leader = "........................................";
@@ -643,14 +753,14 @@ static inline int cpsw_tx_packet_submit(struct net_device *ndev,
643{ 753{
644 if (!priv->data.dual_emac) 754 if (!priv->data.dual_emac)
645 return cpdma_chan_submit(priv->txch, skb, skb->data, 755 return cpdma_chan_submit(priv->txch, skb, skb->data,
646 skb->len, 0, GFP_KERNEL); 756 skb->len, 0);
647 757
648 if (ndev == cpsw_get_slave_ndev(priv, 0)) 758 if (ndev == cpsw_get_slave_ndev(priv, 0))
649 return cpdma_chan_submit(priv->txch, skb, skb->data, 759 return cpdma_chan_submit(priv->txch, skb, skb->data,
650 skb->len, 1, GFP_KERNEL); 760 skb->len, 1);
651 else 761 else
652 return cpdma_chan_submit(priv->txch, skb, skb->data, 762 return cpdma_chan_submit(priv->txch, skb, skb->data,
653 skb->len, 2, GFP_KERNEL); 763 skb->len, 2);
654} 764}
655 765
656static inline void cpsw_add_dual_emac_def_ale_entries( 766static inline void cpsw_add_dual_emac_def_ale_entries(
@@ -774,9 +884,19 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
774 } 884 }
775} 885}
776 886
887static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
888{
889 if (!slave->phy)
890 return;
891 phy_stop(slave->phy);
892 phy_disconnect(slave->phy);
893 slave->phy = NULL;
894}
895
777static int cpsw_ndo_open(struct net_device *ndev) 896static int cpsw_ndo_open(struct net_device *ndev)
778{ 897{
779 struct cpsw_priv *priv = netdev_priv(ndev); 898 struct cpsw_priv *priv = netdev_priv(ndev);
899 struct cpsw_priv *prim_cpsw;
780 int i, ret; 900 int i, ret;
781 u32 reg; 901 u32 reg;
782 902
@@ -819,14 +939,16 @@ static int cpsw_ndo_open(struct net_device *ndev)
819 struct sk_buff *skb; 939 struct sk_buff *skb;
820 940
821 ret = -ENOMEM; 941 ret = -ENOMEM;
822 skb = netdev_alloc_skb_ip_align(priv->ndev, 942 skb = __netdev_alloc_skb_ip_align(priv->ndev,
823 priv->rx_packet_max); 943 priv->rx_packet_max, GFP_KERNEL);
824 if (!skb) 944 if (!skb)
825 break; 945 goto err_cleanup;
826 ret = cpdma_chan_submit(priv->rxch, skb, skb->data, 946 ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
827 skb_tailroom(skb), 0, GFP_KERNEL); 947 skb_tailroom(skb), 0);
828 if (WARN_ON(ret < 0)) 948 if (ret < 0) {
829 break; 949 kfree_skb(skb);
950 goto err_cleanup;
951 }
830 } 952 }
831 /* continue even if we didn't manage to submit all 953 /* continue even if we didn't manage to submit all
832 * receive descs 954 * receive descs
@@ -834,6 +956,22 @@ static int cpsw_ndo_open(struct net_device *ndev)
834 cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); 956 cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
835 } 957 }
836 958
959 /* Enable Interrupt pacing if configured */
960 if (priv->coal_intvl != 0) {
961 struct ethtool_coalesce coal;
962
963 coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
964 cpsw_set_coalesce(ndev, &coal);
965 }
966
967 prim_cpsw = cpsw_get_slave_priv(priv, 0);
968 if (prim_cpsw->irq_enabled == false) {
969 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
970 prim_cpsw->irq_enabled = true;
971 cpsw_enable_irq(prim_cpsw);
972 }
973 }
974
837 cpdma_ctlr_start(priv->dma); 975 cpdma_ctlr_start(priv->dma);
838 cpsw_intr_enable(priv); 976 cpsw_intr_enable(priv);
839 napi_enable(&priv->napi); 977 napi_enable(&priv->napi);
@@ -843,15 +981,13 @@ static int cpsw_ndo_open(struct net_device *ndev)
843 if (priv->data.dual_emac) 981 if (priv->data.dual_emac)
844 priv->slaves[priv->emac_port].open_stat = true; 982 priv->slaves[priv->emac_port].open_stat = true;
845 return 0; 983 return 0;
846}
847 984
848static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) 985err_cleanup:
849{ 986 cpdma_ctlr_stop(priv->dma);
850 if (!slave->phy) 987 for_each_slave(priv, cpsw_slave_stop, priv);
851 return; 988 pm_runtime_put_sync(&priv->pdev->dev);
852 phy_stop(slave->phy); 989 netif_carrier_off(priv->ndev);
853 phy_disconnect(slave->phy); 990 return ret;
854 slave->phy = NULL;
855} 991}
856 992
857static int cpsw_ndo_stop(struct net_device *ndev) 993static int cpsw_ndo_stop(struct net_device *ndev)
@@ -942,7 +1078,7 @@ static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
942 1078
943static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) 1079static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
944{ 1080{
945 struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave]; 1081 struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave];
946 u32 ts_en, seq_id; 1082 u32 ts_en, seq_id;
947 1083
948 if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) { 1084 if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
@@ -971,7 +1107,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
971 if (priv->data.dual_emac) 1107 if (priv->data.dual_emac)
972 slave = &priv->slaves[priv->emac_port]; 1108 slave = &priv->slaves[priv->emac_port];
973 else 1109 else
974 slave = &priv->slaves[priv->data.cpts_active_slave]; 1110 slave = &priv->slaves[priv->data.active_slave];
975 1111
976 ctrl = slave_read(slave, CPSW2_CONTROL); 1112 ctrl = slave_read(slave, CPSW2_CONTROL);
977 ctrl &= ~CTRL_ALL_TS_MASK; 1113 ctrl &= ~CTRL_ALL_TS_MASK;
@@ -1056,14 +1192,26 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
1056 1192
1057static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 1193static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1058{ 1194{
1195 struct cpsw_priv *priv = netdev_priv(dev);
1196 struct mii_ioctl_data *data = if_mii(req);
1197 int slave_no = cpsw_slave_index(priv);
1198
1059 if (!netif_running(dev)) 1199 if (!netif_running(dev))
1060 return -EINVAL; 1200 return -EINVAL;
1061 1201
1202 switch (cmd) {
1062#ifdef CONFIG_TI_CPTS 1203#ifdef CONFIG_TI_CPTS
1063 if (cmd == SIOCSHWTSTAMP) 1204 case SIOCSHWTSTAMP:
1064 return cpsw_hwtstamp_ioctl(dev, req); 1205 return cpsw_hwtstamp_ioctl(dev, req);
1065#endif 1206#endif
1066 return -ENOTSUPP; 1207 case SIOCGMIIPHY:
1208 data->phy_id = priv->slaves[slave_no].phy->addr;
1209 break;
1210 default:
1211 return -ENOTSUPP;
1212 }
1213
1214 return 0;
1067} 1215}
1068 1216
1069static void cpsw_ndo_tx_timeout(struct net_device *ndev) 1217static void cpsw_ndo_tx_timeout(struct net_device *ndev)
@@ -1138,7 +1286,7 @@ clean_vid:
1138} 1286}
1139 1287
1140static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, 1288static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1141 unsigned short vid) 1289 __be16 proto, u16 vid)
1142{ 1290{
1143 struct cpsw_priv *priv = netdev_priv(ndev); 1291 struct cpsw_priv *priv = netdev_priv(ndev);
1144 1292
@@ -1150,7 +1298,7 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1150} 1298}
1151 1299
1152static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, 1300static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1153 unsigned short vid) 1301 __be16 proto, u16 vid)
1154{ 1302{
1155 struct cpsw_priv *priv = netdev_priv(ndev); 1303 struct cpsw_priv *priv = netdev_priv(ndev);
1156 int ret; 1304 int ret;
@@ -1244,12 +1392,39 @@ static int cpsw_get_ts_info(struct net_device *ndev,
1244 return 0; 1392 return 0;
1245} 1393}
1246 1394
1395static int cpsw_get_settings(struct net_device *ndev,
1396 struct ethtool_cmd *ecmd)
1397{
1398 struct cpsw_priv *priv = netdev_priv(ndev);
1399 int slave_no = cpsw_slave_index(priv);
1400
1401 if (priv->slaves[slave_no].phy)
1402 return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd);
1403 else
1404 return -EOPNOTSUPP;
1405}
1406
1407static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1408{
1409 struct cpsw_priv *priv = netdev_priv(ndev);
1410 int slave_no = cpsw_slave_index(priv);
1411
1412 if (priv->slaves[slave_no].phy)
1413 return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd);
1414 else
1415 return -EOPNOTSUPP;
1416}
1417
1247static const struct ethtool_ops cpsw_ethtool_ops = { 1418static const struct ethtool_ops cpsw_ethtool_ops = {
1248 .get_drvinfo = cpsw_get_drvinfo, 1419 .get_drvinfo = cpsw_get_drvinfo,
1249 .get_msglevel = cpsw_get_msglevel, 1420 .get_msglevel = cpsw_get_msglevel,
1250 .set_msglevel = cpsw_set_msglevel, 1421 .set_msglevel = cpsw_set_msglevel,
1251 .get_link = ethtool_op_get_link, 1422 .get_link = ethtool_op_get_link,
1252 .get_ts_info = cpsw_get_ts_info, 1423 .get_ts_info = cpsw_get_ts_info,
1424 .get_settings = cpsw_get_settings,
1425 .set_settings = cpsw_set_settings,
1426 .get_coalesce = cpsw_get_coalesce,
1427 .set_coalesce = cpsw_set_coalesce,
1253}; 1428};
1254 1429
1255static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, 1430static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -1282,12 +1457,12 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1282 } 1457 }
1283 data->slaves = prop; 1458 data->slaves = prop;
1284 1459
1285 if (of_property_read_u32(node, "cpts_active_slave", &prop)) { 1460 if (of_property_read_u32(node, "active_slave", &prop)) {
1286 pr_err("Missing cpts_active_slave property in the DT.\n"); 1461 pr_err("Missing active_slave property in the DT.\n");
1287 ret = -EINVAL; 1462 ret = -EINVAL;
1288 goto error_ret; 1463 goto error_ret;
1289 } 1464 }
1290 data->cpts_active_slave = prop; 1465 data->active_slave = prop;
1291 1466
1292 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { 1467 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
1293 pr_err("Missing cpts_clock_mult property in the DT.\n"); 1468 pr_err("Missing cpts_clock_mult property in the DT.\n");
@@ -1437,6 +1612,9 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1437 priv_sl2->slaves = priv->slaves; 1612 priv_sl2->slaves = priv->slaves;
1438 priv_sl2->clk = priv->clk; 1613 priv_sl2->clk = priv->clk;
1439 1614
1615 priv_sl2->coal_intvl = 0;
1616 priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
1617
1440 priv_sl2->cpsw_res = priv->cpsw_res; 1618 priv_sl2->cpsw_res = priv->cpsw_res;
1441 priv_sl2->regs = priv->regs; 1619 priv_sl2->regs = priv->regs;
1442 priv_sl2->host_port = priv->host_port; 1620 priv_sl2->host_port = priv->host_port;
@@ -1455,8 +1633,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1455 priv_sl2->irqs_table[i] = priv->irqs_table[i]; 1633 priv_sl2->irqs_table[i] = priv->irqs_table[i];
1456 priv_sl2->num_irqs = priv->num_irqs; 1634 priv_sl2->num_irqs = priv->num_irqs;
1457 } 1635 }
1458 1636 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1459 ndev->features |= NETIF_F_HW_VLAN_FILTER;
1460 1637
1461 ndev->netdev_ops = &cpsw_netdev_ops; 1638 ndev->netdev_ops = &cpsw_netdev_ops;
1462 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); 1639 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
@@ -1476,7 +1653,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1476 1653
1477static int cpsw_probe(struct platform_device *pdev) 1654static int cpsw_probe(struct platform_device *pdev)
1478{ 1655{
1479 struct cpsw_platform_data *data = pdev->dev.platform_data; 1656 struct cpsw_platform_data *data;
1480 struct net_device *ndev; 1657 struct net_device *ndev;
1481 struct cpsw_priv *priv; 1658 struct cpsw_priv *priv;
1482 struct cpdma_params dma_params; 1659 struct cpdma_params dma_params;
@@ -1501,6 +1678,7 @@ static int cpsw_probe(struct platform_device *pdev)
1501 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1678 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
1502 priv->rx_packet_max = max(rx_packet_max, 128); 1679 priv->rx_packet_max = max(rx_packet_max, 128);
1503 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); 1680 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
1681 priv->irq_enabled = true;
1504 if (!ndev) { 1682 if (!ndev) {
1505 pr_err("error allocating cpts\n"); 1683 pr_err("error allocating cpts\n");
1506 goto clean_ndev_ret; 1684 goto clean_ndev_ret;
@@ -1546,6 +1724,8 @@ static int cpsw_probe(struct platform_device *pdev)
1546 ret = -ENODEV; 1724 ret = -ENODEV;
1547 goto clean_slave_ret; 1725 goto clean_slave_ret;
1548 } 1726 }
1727 priv->coal_intvl = 0;
1728 priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
1549 1729
1550 priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1730 priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1551 if (!priv->cpsw_res) { 1731 if (!priv->cpsw_res) {
@@ -1687,12 +1867,12 @@ static int cpsw_probe(struct platform_device *pdev)
1687 goto clean_ale_ret; 1867 goto clean_ale_ret;
1688 } 1868 }
1689 priv->irqs_table[k] = i; 1869 priv->irqs_table[k] = i;
1690 priv->num_irqs = k; 1870 priv->num_irqs = k + 1;
1691 } 1871 }
1692 k++; 1872 k++;
1693 } 1873 }
1694 1874
1695 ndev->features |= NETIF_F_HW_VLAN_FILTER; 1875 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1696 1876
1697 ndev->netdev_ops = &cpsw_netdev_ops; 1877 ndev->netdev_ops = &cpsw_netdev_ops;
1698 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); 1878 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
@@ -1725,7 +1905,8 @@ static int cpsw_probe(struct platform_device *pdev)
1725 return 0; 1905 return 0;
1726 1906
1727clean_irq_ret: 1907clean_irq_ret:
1728 free_irq(ndev->irq, priv); 1908 for (i = 0; i < priv->num_irqs; i++)
1909 free_irq(priv->irqs_table[i], priv);
1729clean_ale_ret: 1910clean_ale_ret:
1730 cpsw_ale_destroy(priv->ale); 1911 cpsw_ale_destroy(priv->ale);
1731clean_dma_ret: 1912clean_dma_ret:
@@ -1748,7 +1929,8 @@ clean_slave_ret:
1748 pm_runtime_disable(&pdev->dev); 1929 pm_runtime_disable(&pdev->dev);
1749 kfree(priv->slaves); 1930 kfree(priv->slaves);
1750clean_ndev_ret: 1931clean_ndev_ret:
1751 free_netdev(ndev); 1932 kfree(priv->data.slave_data);
1933 free_netdev(priv->ndev);
1752 return ret; 1934 return ret;
1753} 1935}
1754 1936
@@ -1756,12 +1938,17 @@ static int cpsw_remove(struct platform_device *pdev)
1756{ 1938{
1757 struct net_device *ndev = platform_get_drvdata(pdev); 1939 struct net_device *ndev = platform_get_drvdata(pdev);
1758 struct cpsw_priv *priv = netdev_priv(ndev); 1940 struct cpsw_priv *priv = netdev_priv(ndev);
1941 int i;
1759 1942
1760 pr_info("removing device");
1761 platform_set_drvdata(pdev, NULL); 1943 platform_set_drvdata(pdev, NULL);
1944 if (priv->data.dual_emac)
1945 unregister_netdev(cpsw_get_slave_ndev(priv, 1));
1946 unregister_netdev(ndev);
1762 1947
1763 cpts_unregister(priv->cpts); 1948 cpts_unregister(priv->cpts);
1764 free_irq(ndev->irq, priv); 1949 for (i = 0; i < priv->num_irqs; i++)
1950 free_irq(priv->irqs_table[i], priv);
1951
1765 cpsw_ale_destroy(priv->ale); 1952 cpsw_ale_destroy(priv->ale);
1766 cpdma_chan_destroy(priv->txch); 1953 cpdma_chan_destroy(priv->txch);
1767 cpdma_chan_destroy(priv->rxch); 1954 cpdma_chan_destroy(priv->rxch);
@@ -1775,8 +1962,10 @@ static int cpsw_remove(struct platform_device *pdev)
1775 pm_runtime_disable(&pdev->dev); 1962 pm_runtime_disable(&pdev->dev);
1776 clk_put(priv->clk); 1963 clk_put(priv->clk);
1777 kfree(priv->slaves); 1964 kfree(priv->slaves);
1965 kfree(priv->data.slave_data);
1966 if (priv->data.dual_emac)
1967 free_netdev(cpsw_get_slave_ndev(priv, 1));
1778 free_netdev(ndev); 1968 free_netdev(ndev);
1779
1780 return 0; 1969 return 0;
1781} 1970}
1782 1971
@@ -1812,6 +2001,7 @@ static const struct of_device_id cpsw_of_mtable[] = {
1812 { .compatible = "ti,cpsw", }, 2001 { .compatible = "ti,cpsw", },
1813 { /* sentinel */ }, 2002 { /* sentinel */ },
1814}; 2003};
2004MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
1815 2005
1816static struct platform_driver cpsw_driver = { 2006static struct platform_driver cpsw_driver = {
1817 .driver = { 2007 .driver = {
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index ee13dc78430c..49dfd592ac1e 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -20,6 +20,7 @@
20#include <linux/err.h> 20#include <linux/err.h>
21#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/delay.h>
23 24
24#include "davinci_cpdma.h" 25#include "davinci_cpdma.h"
25 26
@@ -312,14 +313,16 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
312 } 313 }
313 314
314 if (ctlr->params.has_soft_reset) { 315 if (ctlr->params.has_soft_reset) {
315 unsigned long timeout = jiffies + HZ/10; 316 unsigned timeout = 10 * 100;
316 317
317 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); 318 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
318 while (time_before(jiffies, timeout)) { 319 while (timeout) {
319 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) 320 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
320 break; 321 break;
322 udelay(10);
323 timeout--;
321 } 324 }
322 WARN_ON(!time_before(jiffies, timeout)); 325 WARN_ON(!timeout);
323 } 326 }
324 327
325 for (i = 0; i < ctlr->num_chan; i++) { 328 for (i = 0; i < ctlr->num_chan; i++) {
@@ -673,7 +676,7 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan,
673} 676}
674 677
675int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, 678int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
676 int len, int directed, gfp_t gfp_mask) 679 int len, int directed)
677{ 680{
678 struct cpdma_ctlr *ctlr = chan->ctlr; 681 struct cpdma_ctlr *ctlr = chan->ctlr;
679 struct cpdma_desc __iomem *desc; 682 struct cpdma_desc __iomem *desc;
@@ -773,6 +776,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
773 struct cpdma_ctlr *ctlr = chan->ctlr; 776 struct cpdma_ctlr *ctlr = chan->ctlr;
774 struct cpdma_desc __iomem *desc; 777 struct cpdma_desc __iomem *desc;
775 int status, outlen; 778 int status, outlen;
779 int cb_status = 0;
776 struct cpdma_desc_pool *pool = ctlr->pool; 780 struct cpdma_desc_pool *pool = ctlr->pool;
777 dma_addr_t desc_dma; 781 dma_addr_t desc_dma;
778 unsigned long flags; 782 unsigned long flags;
@@ -808,8 +812,12 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
808 } 812 }
809 813
810 spin_unlock_irqrestore(&chan->lock, flags); 814 spin_unlock_irqrestore(&chan->lock, flags);
815 if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
816 cb_status = -ENOSYS;
817 else
818 cb_status = status;
811 819
812 __cpdma_chan_free(chan, desc, outlen, status); 820 __cpdma_chan_free(chan, desc, outlen, cb_status);
813 return status; 821 return status;
814 822
815unlock_ret: 823unlock_ret:
@@ -868,7 +876,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
868 struct cpdma_desc_pool *pool = ctlr->pool; 876 struct cpdma_desc_pool *pool = ctlr->pool;
869 unsigned long flags; 877 unsigned long flags;
870 int ret; 878 int ret;
871 unsigned long timeout; 879 unsigned timeout;
872 880
873 spin_lock_irqsave(&chan->lock, flags); 881 spin_lock_irqsave(&chan->lock, flags);
874 if (chan->state != CPDMA_STATE_ACTIVE) { 882 if (chan->state != CPDMA_STATE_ACTIVE) {
@@ -883,14 +891,15 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
883 dma_reg_write(ctlr, chan->td, chan_linear(chan)); 891 dma_reg_write(ctlr, chan->td, chan_linear(chan));
884 892
885 /* wait for teardown complete */ 893 /* wait for teardown complete */
886 timeout = jiffies + HZ/10; /* 100 msec */ 894 timeout = 100 * 100; /* 100 ms */
887 while (time_before(jiffies, timeout)) { 895 while (timeout) {
888 u32 cp = chan_read(chan, cp); 896 u32 cp = chan_read(chan, cp);
889 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) 897 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
890 break; 898 break;
891 cpu_relax(); 899 udelay(10);
900 timeout--;
892 } 901 }
893 WARN_ON(!time_before(jiffies, timeout)); 902 WARN_ON(!timeout);
894 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); 903 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
895 904
896 /* handle completed packets */ 905 /* handle completed packets */
@@ -1031,3 +1040,5 @@ unlock_ret:
1031 return ret; 1040 return ret;
1032} 1041}
1033EXPORT_SYMBOL_GPL(cpdma_control_set); 1042EXPORT_SYMBOL_GPL(cpdma_control_set);
1043
1044MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index d9bcc6032fdc..86dee487f2f0 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -89,7 +89,7 @@ int cpdma_chan_dump(struct cpdma_chan *chan);
89int cpdma_chan_get_stats(struct cpdma_chan *chan, 89int cpdma_chan_get_stats(struct cpdma_chan *chan,
90 struct cpdma_chan_stats *stats); 90 struct cpdma_chan_stats *stats);
91int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, 91int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
92 int len, int directed, gfp_t gfp_mask); 92 int len, int directed);
93int cpdma_chan_process(struct cpdma_chan *chan, int quota); 93int cpdma_chan_process(struct cpdma_chan *chan, int quota);
94 94
95int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable); 95int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 72300bc9e378..860e15ddfbcb 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1037,7 +1037,7 @@ static void emac_rx_handler(void *token, int len, int status)
1037 1037
1038recycle: 1038recycle:
1039 ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, 1039 ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
1040 skb_tailroom(skb), 0, GFP_KERNEL); 1040 skb_tailroom(skb), 0);
1041 1041
1042 WARN_ON(ret == -ENOMEM); 1042 WARN_ON(ret == -ENOMEM);
1043 if (unlikely(ret < 0)) 1043 if (unlikely(ret < 0))
@@ -1092,7 +1092,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
1092 skb_tx_timestamp(skb); 1092 skb_tx_timestamp(skb);
1093 1093
1094 ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len, 1094 ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
1095 0, GFP_KERNEL); 1095 0);
1096 if (unlikely(ret_code != 0)) { 1096 if (unlikely(ret_code != 0)) {
1097 if (netif_msg_tx_err(priv) && net_ratelimit()) 1097 if (netif_msg_tx_err(priv) && net_ratelimit())
1098 dev_err(emac_dev, "DaVinci EMAC: desc submit failed"); 1098 dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
@@ -1438,7 +1438,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
1438 * Polled functionality used by netconsole and others in non interrupt mode 1438 * Polled functionality used by netconsole and others in non interrupt mode
1439 * 1439 *
1440 */ 1440 */
1441void emac_poll_controller(struct net_device *ndev) 1441static void emac_poll_controller(struct net_device *ndev)
1442{ 1442{
1443 struct emac_priv *priv = netdev_priv(ndev); 1443 struct emac_priv *priv = netdev_priv(ndev);
1444 1444
@@ -1558,7 +1558,7 @@ static int emac_dev_open(struct net_device *ndev)
1558 break; 1558 break;
1559 1559
1560 ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, 1560 ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
1561 skb_tailroom(skb), 0, GFP_KERNEL); 1561 skb_tailroom(skb), 0);
1562 if (WARN_ON(ret < 0)) 1562 if (WARN_ON(ret < 0))
1563 break; 1563 break;
1564 } 1564 }
@@ -1865,21 +1865,18 @@ static int davinci_emac_probe(struct platform_device *pdev)
1865 1865
1866 1866
1867 /* obtain emac clock from kernel */ 1867 /* obtain emac clock from kernel */
1868 emac_clk = clk_get(&pdev->dev, NULL); 1868 emac_clk = devm_clk_get(&pdev->dev, NULL);
1869 if (IS_ERR(emac_clk)) { 1869 if (IS_ERR(emac_clk)) {
1870 dev_err(&pdev->dev, "failed to get EMAC clock\n"); 1870 dev_err(&pdev->dev, "failed to get EMAC clock\n");
1871 return -EBUSY; 1871 return -EBUSY;
1872 } 1872 }
1873 emac_bus_frequency = clk_get_rate(emac_clk); 1873 emac_bus_frequency = clk_get_rate(emac_clk);
1874 clk_put(emac_clk);
1875 1874
1876 /* TODO: Probe PHY here if possible */ 1875 /* TODO: Probe PHY here if possible */
1877 1876
1878 ndev = alloc_etherdev(sizeof(struct emac_priv)); 1877 ndev = alloc_etherdev(sizeof(struct emac_priv));
1879 if (!ndev) { 1878 if (!ndev)
1880 rc = -ENOMEM; 1879 return -ENOMEM;
1881 goto no_ndev;
1882 }
1883 1880
1884 platform_set_drvdata(pdev, ndev); 1881 platform_set_drvdata(pdev, ndev);
1885 priv = netdev_priv(ndev); 1882 priv = netdev_priv(ndev);
@@ -1893,7 +1890,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1893 if (!pdata) { 1890 if (!pdata) {
1894 dev_err(&pdev->dev, "no platform data\n"); 1891 dev_err(&pdev->dev, "no platform data\n");
1895 rc = -ENODEV; 1892 rc = -ENODEV;
1896 goto probe_quit; 1893 goto no_pdata;
1897 } 1894 }
1898 1895
1899 /* MAC addr and PHY mask , RMII enable info from platform_data */ 1896 /* MAC addr and PHY mask , RMII enable info from platform_data */
@@ -1913,23 +1910,23 @@ static int davinci_emac_probe(struct platform_device *pdev)
1913 if (!res) { 1910 if (!res) {
1914 dev_err(&pdev->dev,"error getting res\n"); 1911 dev_err(&pdev->dev,"error getting res\n");
1915 rc = -ENOENT; 1912 rc = -ENOENT;
1916 goto probe_quit; 1913 goto no_pdata;
1917 } 1914 }
1918 1915
1919 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; 1916 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
1920 size = resource_size(res); 1917 size = resource_size(res);
1921 if (!request_mem_region(res->start, size, ndev->name)) { 1918 if (!devm_request_mem_region(&pdev->dev, res->start,
1919 size, ndev->name)) {
1922 dev_err(&pdev->dev, "failed request_mem_region() for regs\n"); 1920 dev_err(&pdev->dev, "failed request_mem_region() for regs\n");
1923 rc = -ENXIO; 1921 rc = -ENXIO;
1924 goto probe_quit; 1922 goto no_pdata;
1925 } 1923 }
1926 1924
1927 priv->remap_addr = ioremap(res->start, size); 1925 priv->remap_addr = devm_ioremap(&pdev->dev, res->start, size);
1928 if (!priv->remap_addr) { 1926 if (!priv->remap_addr) {
1929 dev_err(&pdev->dev, "unable to map IO\n"); 1927 dev_err(&pdev->dev, "unable to map IO\n");
1930 rc = -ENOMEM; 1928 rc = -ENOMEM;
1931 release_mem_region(res->start, size); 1929 goto no_pdata;
1932 goto probe_quit;
1933 } 1930 }
1934 priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; 1931 priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
1935 ndev->base_addr = (unsigned long)priv->remap_addr; 1932 ndev->base_addr = (unsigned long)priv->remap_addr;
@@ -1962,7 +1959,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1962 if (!priv->dma) { 1959 if (!priv->dma) {
1963 dev_err(&pdev->dev, "error initializing DMA\n"); 1960 dev_err(&pdev->dev, "error initializing DMA\n");
1964 rc = -ENOMEM; 1961 rc = -ENOMEM;
1965 goto no_dma; 1962 goto no_pdata;
1966 } 1963 }
1967 1964
1968 priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH), 1965 priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
@@ -1971,14 +1968,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
1971 emac_rx_handler); 1968 emac_rx_handler);
1972 if (WARN_ON(!priv->txchan || !priv->rxchan)) { 1969 if (WARN_ON(!priv->txchan || !priv->rxchan)) {
1973 rc = -ENOMEM; 1970 rc = -ENOMEM;
1974 goto no_irq_res; 1971 goto no_cpdma_chan;
1975 } 1972 }
1976 1973
1977 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1974 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1978 if (!res) { 1975 if (!res) {
1979 dev_err(&pdev->dev, "error getting irq res\n"); 1976 dev_err(&pdev->dev, "error getting irq res\n");
1980 rc = -ENOENT; 1977 rc = -ENOENT;
1981 goto no_irq_res; 1978 goto no_cpdma_chan;
1982 } 1979 }
1983 ndev->irq = res->start; 1980 ndev->irq = res->start;
1984 1981
@@ -2000,7 +1997,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
2000 if (rc) { 1997 if (rc) {
2001 dev_err(&pdev->dev, "error in register_netdev\n"); 1998 dev_err(&pdev->dev, "error in register_netdev\n");
2002 rc = -ENODEV; 1999 rc = -ENODEV;
2003 goto no_irq_res; 2000 goto no_cpdma_chan;
2004 } 2001 }
2005 2002
2006 2003
@@ -2015,20 +2012,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
2015 2012
2016 return 0; 2013 return 0;
2017 2014
2018no_irq_res: 2015no_cpdma_chan:
2019 if (priv->txchan) 2016 if (priv->txchan)
2020 cpdma_chan_destroy(priv->txchan); 2017 cpdma_chan_destroy(priv->txchan);
2021 if (priv->rxchan) 2018 if (priv->rxchan)
2022 cpdma_chan_destroy(priv->rxchan); 2019 cpdma_chan_destroy(priv->rxchan);
2023 cpdma_ctlr_destroy(priv->dma); 2020 cpdma_ctlr_destroy(priv->dma);
2024no_dma: 2021no_pdata:
2025 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2026 release_mem_region(res->start, resource_size(res));
2027 iounmap(priv->remap_addr);
2028
2029probe_quit:
2030 free_netdev(ndev); 2022 free_netdev(ndev);
2031no_ndev:
2032 return rc; 2023 return rc;
2033} 2024}
2034 2025
@@ -2041,14 +2032,12 @@ no_ndev:
2041 */ 2032 */
2042static int davinci_emac_remove(struct platform_device *pdev) 2033static int davinci_emac_remove(struct platform_device *pdev)
2043{ 2034{
2044 struct resource *res;
2045 struct net_device *ndev = platform_get_drvdata(pdev); 2035 struct net_device *ndev = platform_get_drvdata(pdev);
2046 struct emac_priv *priv = netdev_priv(ndev); 2036 struct emac_priv *priv = netdev_priv(ndev);
2047 2037
2048 dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); 2038 dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
2049 2039
2050 platform_set_drvdata(pdev, NULL); 2040 platform_set_drvdata(pdev, NULL);
2051 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2052 2041
2053 if (priv->txchan) 2042 if (priv->txchan)
2054 cpdma_chan_destroy(priv->txchan); 2043 cpdma_chan_destroy(priv->txchan);
@@ -2056,10 +2045,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
2056 cpdma_chan_destroy(priv->rxchan); 2045 cpdma_chan_destroy(priv->rxchan);
2057 cpdma_ctlr_destroy(priv->dma); 2046 cpdma_ctlr_destroy(priv->dma);
2058 2047
2059 release_mem_region(res->start, resource_size(res));
2060
2061 unregister_netdev(ndev); 2048 unregister_netdev(ndev);
2062 iounmap(priv->remap_addr);
2063 free_netdev(ndev); 2049 free_netdev(ndev);
2064 2050
2065 return 0; 2051 return 0;
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index d04a622b08d4..12aec173564c 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -485,6 +485,7 @@ static const struct of_device_id davinci_mdio_of_mtable[] = {
485 { .compatible = "ti,davinci_mdio", }, 485 { .compatible = "ti,davinci_mdio", },
486 { /* sentinel */ }, 486 { /* sentinel */ },
487}; 487};
488MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
488 489
489static struct platform_driver davinci_mdio_driver = { 490static struct platform_driver davinci_mdio_driver = {
490 .driver = { 491 .driver = {
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 22725386c5de..60c400f6d01f 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -320,6 +320,7 @@ static void tlan_remove_one(struct pci_dev *pdev)
320 free_netdev(dev); 320 free_netdev(dev);
321 321
322 pci_set_drvdata(pdev, NULL); 322 pci_set_drvdata(pdev, NULL);
323 cancel_work_sync(&priv->tlan_tqueue);
323} 324}
324 325
325static void tlan_start(struct net_device *dev) 326static void tlan_start(struct net_device *dev)
@@ -1911,10 +1912,8 @@ static void tlan_reset_lists(struct net_device *dev)
1911 list->frame_size = TLAN_MAX_FRAME_SIZE; 1912 list->frame_size = TLAN_MAX_FRAME_SIZE;
1912 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 1913 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1913 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); 1914 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1914 if (!skb) { 1915 if (!skb)
1915 netdev_err(dev, "Out of memory for received data\n");
1916 break; 1916 break;
1917 }
1918 1917
1919 list->buffer[0].address = pci_map_single(priv->pci_dev, 1918 list->buffer[0].address = pci_map_single(priv->pci_dev,
1920 skb->data, 1919 skb->data,
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 445c0595c997..ad32af67e618 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -58,13 +58,6 @@ MODULE_DESCRIPTION("Gelic Network driver");
58MODULE_LICENSE("GPL"); 58MODULE_LICENSE("GPL");
59 59
60 60
61static inline void gelic_card_enable_rxdmac(struct gelic_card *card);
62static inline void gelic_card_disable_rxdmac(struct gelic_card *card);
63static inline void gelic_card_disable_txdmac(struct gelic_card *card);
64static inline void gelic_card_reset_chain(struct gelic_card *card,
65 struct gelic_descr_chain *chain,
66 struct gelic_descr *start_descr);
67
68/* set irq_mask */ 61/* set irq_mask */
69int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask) 62int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask)
70{ 63{
@@ -78,12 +71,12 @@ int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask)
78 return status; 71 return status;
79} 72}
80 73
81static inline void gelic_card_rx_irq_on(struct gelic_card *card) 74static void gelic_card_rx_irq_on(struct gelic_card *card)
82{ 75{
83 card->irq_mask |= GELIC_CARD_RXINT; 76 card->irq_mask |= GELIC_CARD_RXINT;
84 gelic_card_set_irq_mask(card, card->irq_mask); 77 gelic_card_set_irq_mask(card, card->irq_mask);
85} 78}
86static inline void gelic_card_rx_irq_off(struct gelic_card *card) 79static void gelic_card_rx_irq_off(struct gelic_card *card)
87{ 80{
88 card->irq_mask &= ~GELIC_CARD_RXINT; 81 card->irq_mask &= ~GELIC_CARD_RXINT;
89 gelic_card_set_irq_mask(card, card->irq_mask); 82 gelic_card_set_irq_mask(card, card->irq_mask);
@@ -127,6 +120,120 @@ static int gelic_card_set_link_mode(struct gelic_card *card, int mode)
127 return 0; 120 return 0;
128} 121}
129 122
123/**
124 * gelic_card_disable_txdmac - disables the transmit DMA controller
125 * @card: card structure
126 *
127 * gelic_card_disable_txdmac terminates processing on the DMA controller by
128 * turing off DMA and issuing a force end
129 */
130static void gelic_card_disable_txdmac(struct gelic_card *card)
131{
132 int status;
133
134 /* this hvc blocks until the DMA in progress really stopped */
135 status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card));
136 if (status)
137 dev_err(ctodev(card),
138 "lv1_net_stop_tx_dma failed, status=%d\n", status);
139}
140
141/**
142 * gelic_card_enable_rxdmac - enables the receive DMA controller
143 * @card: card structure
144 *
145 * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
146 * in the GDADMACCNTR register
147 */
148static void gelic_card_enable_rxdmac(struct gelic_card *card)
149{
150 int status;
151
152#ifdef DEBUG
153 if (gelic_descr_get_status(card->rx_chain.head) !=
154 GELIC_DESCR_DMA_CARDOWNED) {
155 printk(KERN_ERR "%s: status=%x\n", __func__,
156 be32_to_cpu(card->rx_chain.head->dmac_cmd_status));
157 printk(KERN_ERR "%s: nextphy=%x\n", __func__,
158 be32_to_cpu(card->rx_chain.head->next_descr_addr));
159 printk(KERN_ERR "%s: head=%p\n", __func__,
160 card->rx_chain.head);
161 }
162#endif
163 status = lv1_net_start_rx_dma(bus_id(card), dev_id(card),
164 card->rx_chain.head->bus_addr, 0);
165 if (status)
166 dev_info(ctodev(card),
167 "lv1_net_start_rx_dma failed, status=%d\n", status);
168}
169
170/**
171 * gelic_card_disable_rxdmac - disables the receive DMA controller
172 * @card: card structure
173 *
174 * gelic_card_disable_rxdmac terminates processing on the DMA controller by
175 * turing off DMA and issuing a force end
176 */
177static void gelic_card_disable_rxdmac(struct gelic_card *card)
178{
179 int status;
180
181 /* this hvc blocks until the DMA in progress really stopped */
182 status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card));
183 if (status)
184 dev_err(ctodev(card),
185 "lv1_net_stop_rx_dma failed, %d\n", status);
186}
187
188/**
189 * gelic_descr_set_status -- sets the status of a descriptor
190 * @descr: descriptor to change
191 * @status: status to set in the descriptor
192 *
193 * changes the status to the specified value. Doesn't change other bits
194 * in the status
195 */
196static void gelic_descr_set_status(struct gelic_descr *descr,
197 enum gelic_descr_dma_status status)
198{
199 descr->dmac_cmd_status = cpu_to_be32(status |
200 (be32_to_cpu(descr->dmac_cmd_status) &
201 ~GELIC_DESCR_DMA_STAT_MASK));
202 /*
203 * dma_cmd_status field is used to indicate whether the descriptor
204 * is valid or not.
205 * Usually caller of this function wants to inform that to the
206 * hardware, so we assure here the hardware sees the change.
207 */
208 wmb();
209}
210
211/**
212 * gelic_card_reset_chain - reset status of a descriptor chain
213 * @card: card structure
214 * @chain: address of chain
215 * @start_descr: address of descriptor array
216 *
217 * Reset the status of dma descriptors to ready state
218 * and re-initialize the hardware chain for later use
219 */
220static void gelic_card_reset_chain(struct gelic_card *card,
221 struct gelic_descr_chain *chain,
222 struct gelic_descr *start_descr)
223{
224 struct gelic_descr *descr;
225
226 for (descr = start_descr; start_descr != descr->next; descr++) {
227 gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
228 descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
229 }
230
231 chain->head = start_descr;
232 chain->tail = (descr - 1);
233
234 (descr - 1)->next_descr_addr = 0;
235}
236
130void gelic_card_up(struct gelic_card *card) 237void gelic_card_up(struct gelic_card *card)
131{ 238{
132 pr_debug("%s: called\n", __func__); 239 pr_debug("%s: called\n", __func__);
@@ -183,29 +290,6 @@ gelic_descr_get_status(struct gelic_descr *descr)
183} 290}
184 291
185/** 292/**
186 * gelic_descr_set_status -- sets the status of a descriptor
187 * @descr: descriptor to change
188 * @status: status to set in the descriptor
189 *
190 * changes the status to the specified value. Doesn't change other bits
191 * in the status
192 */
193static void gelic_descr_set_status(struct gelic_descr *descr,
194 enum gelic_descr_dma_status status)
195{
196 descr->dmac_cmd_status = cpu_to_be32(status |
197 (be32_to_cpu(descr->dmac_cmd_status) &
198 ~GELIC_DESCR_DMA_STAT_MASK));
199 /*
200 * dma_cmd_status field is used to indicate whether the descriptor
201 * is valid or not.
202 * Usually caller of this function wants to inform that to the
203 * hardware, so we assure here the hardware sees the change.
204 */
205 wmb();
206}
207
208/**
209 * gelic_card_free_chain - free descriptor chain 293 * gelic_card_free_chain - free descriptor chain
210 * @card: card structure 294 * @card: card structure
211 * @descr_in: address of desc 295 * @descr_in: address of desc
@@ -286,31 +370,6 @@ iommu_error:
286} 370}
287 371
288/** 372/**
289 * gelic_card_reset_chain - reset status of a descriptor chain
290 * @card: card structure
291 * @chain: address of chain
292 * @start_descr: address of descriptor array
293 *
294 * Reset the status of dma descriptors to ready state
295 * and re-initialize the hardware chain for later use
296 */
297static void gelic_card_reset_chain(struct gelic_card *card,
298 struct gelic_descr_chain *chain,
299 struct gelic_descr *start_descr)
300{
301 struct gelic_descr *descr;
302
303 for (descr = start_descr; start_descr != descr->next; descr++) {
304 gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
305 descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
306 }
307
308 chain->head = start_descr;
309 chain->tail = (descr - 1);
310
311 (descr - 1)->next_descr_addr = 0;
312}
313/**
314 * gelic_descr_prepare_rx - reinitializes a rx descriptor 373 * gelic_descr_prepare_rx - reinitializes a rx descriptor
315 * @card: card structure 374 * @card: card structure
316 * @descr: descriptor to re-init 375 * @descr: descriptor to re-init
@@ -599,71 +658,6 @@ void gelic_net_set_multi(struct net_device *netdev)
599} 658}
600 659
601/** 660/**
602 * gelic_card_enable_rxdmac - enables the receive DMA controller
603 * @card: card structure
604 *
605 * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
606 * in the GDADMACCNTR register
607 */
608static inline void gelic_card_enable_rxdmac(struct gelic_card *card)
609{
610 int status;
611
612#ifdef DEBUG
613 if (gelic_descr_get_status(card->rx_chain.head) !=
614 GELIC_DESCR_DMA_CARDOWNED) {
615 printk(KERN_ERR "%s: status=%x\n", __func__,
616 be32_to_cpu(card->rx_chain.head->dmac_cmd_status));
617 printk(KERN_ERR "%s: nextphy=%x\n", __func__,
618 be32_to_cpu(card->rx_chain.head->next_descr_addr));
619 printk(KERN_ERR "%s: head=%p\n", __func__,
620 card->rx_chain.head);
621 }
622#endif
623 status = lv1_net_start_rx_dma(bus_id(card), dev_id(card),
624 card->rx_chain.head->bus_addr, 0);
625 if (status)
626 dev_info(ctodev(card),
627 "lv1_net_start_rx_dma failed, status=%d\n", status);
628}
629
630/**
631 * gelic_card_disable_rxdmac - disables the receive DMA controller
632 * @card: card structure
633 *
634 * gelic_card_disable_rxdmac terminates processing on the DMA controller by
635 * turing off DMA and issuing a force end
636 */
637static inline void gelic_card_disable_rxdmac(struct gelic_card *card)
638{
639 int status;
640
641 /* this hvc blocks until the DMA in progress really stopped */
642 status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card));
643 if (status)
644 dev_err(ctodev(card),
645 "lv1_net_stop_rx_dma failed, %d\n", status);
646}
647
648/**
649 * gelic_card_disable_txdmac - disables the transmit DMA controller
650 * @card: card structure
651 *
652 * gelic_card_disable_txdmac terminates processing on the DMA controller by
653 * turing off DMA and issuing a force end
654 */
655static inline void gelic_card_disable_txdmac(struct gelic_card *card)
656{
657 int status;
658
659 /* this hvc blocks until the DMA in progress really stopped */
660 status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card));
661 if (status)
662 dev_err(ctodev(card),
663 "lv1_net_stop_tx_dma failed, status=%d\n", status);
664}
665
666/**
667 * gelic_net_stop - called upon ifconfig down 661 * gelic_net_stop - called upon ifconfig down
668 * @netdev: interface device structure 662 * @netdev: interface device structure
669 * 663 *
@@ -746,7 +740,7 @@ static void gelic_descr_set_tx_cmdstat(struct gelic_descr *descr,
746 } 740 }
747} 741}
748 742
749static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb, 743static struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb,
750 unsigned short tag) 744 unsigned short tag)
751{ 745{
752 struct vlan_ethhdr *veth; 746 struct vlan_ethhdr *veth;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index f1b91fd7e41c..c655fe60121e 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -352,8 +352,7 @@ spider_net_init_chain(struct spider_net_card *card,
352 alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr); 352 alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
353 353
354 chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size, 354 chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
355 &chain->dma_addr, GFP_KERNEL); 355 &chain->dma_addr, GFP_KERNEL);
356
357 if (!chain->hwring) 356 if (!chain->hwring)
358 return -ENOMEM; 357 return -ENOMEM;
359 358
@@ -2330,8 +2329,8 @@ spider_net_setup_netdev(struct spider_net_card *card)
2330 if (SPIDER_NET_RX_CSUM_DEFAULT) 2329 if (SPIDER_NET_RX_CSUM_DEFAULT)
2331 netdev->features |= NETIF_F_RXCSUM; 2330 netdev->features |= NETIF_F_RXCSUM;
2332 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX; 2331 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX;
2333 /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 2332 /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2334 * NETIF_F_HW_VLAN_FILTER */ 2333 * NETIF_F_HW_VLAN_CTAG_FILTER */
2335 2334
2336 netdev->irq = card->pdev->irq; 2335 netdev->irq = card->pdev->irq;
2337 card->num_rx_ints = 0; 2336 card->num_rx_ints = 0;
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 8fa947a2d929..3c69a0460832 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1308,27 +1308,16 @@ static int tsi108_open(struct net_device *dev)
1308 data->id, dev->irq, dev->name); 1308 data->id, dev->irq, dev->name);
1309 } 1309 }
1310 1310
1311 data->rxring = dma_alloc_coherent(NULL, rxring_size, 1311 data->rxring = dma_alloc_coherent(NULL, rxring_size, &data->rxdma,
1312 &data->rxdma, GFP_KERNEL); 1312 GFP_KERNEL | __GFP_ZERO);
1313 1313 if (!data->rxring)
1314 if (!data->rxring) {
1315 printk(KERN_DEBUG
1316 "TSI108_ETH: failed to allocate memory for rxring!\n");
1317 return -ENOMEM; 1314 return -ENOMEM;
1318 } else {
1319 memset(data->rxring, 0, rxring_size);
1320 }
1321
1322 data->txring = dma_alloc_coherent(NULL, txring_size,
1323 &data->txdma, GFP_KERNEL);
1324 1315
1316 data->txring = dma_alloc_coherent(NULL, txring_size, &data->txdma,
1317 GFP_KERNEL | __GFP_ZERO);
1325 if (!data->txring) { 1318 if (!data->txring) {
1326 printk(KERN_DEBUG
1327 "TSI108_ETH: failed to allocate memory for txring!\n");
1328 pci_free_consistent(0, rxring_size, data->rxring, data->rxdma); 1319 pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
1329 return -ENOMEM; 1320 return -ENOMEM;
1330 } else {
1331 memset(data->txring, 0, txring_size);
1332 } 1321 }
1333 1322
1334 for (i = 0; i < TSI108_RXRING_LEN; i++) { 1323 for (i = 0; i < TSI108_RXRING_LEN; i++) {
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 185c721c52d7..ca98acabf1b4 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -508,8 +508,10 @@ static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
508static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 508static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
509static const struct ethtool_ops netdev_ethtool_ops; 509static const struct ethtool_ops netdev_ethtool_ops;
510static int rhine_close(struct net_device *dev); 510static int rhine_close(struct net_device *dev);
511static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); 511static int rhine_vlan_rx_add_vid(struct net_device *dev,
512static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); 512 __be16 proto, u16 vid);
513static int rhine_vlan_rx_kill_vid(struct net_device *dev,
514 __be16 proto, u16 vid);
513static void rhine_restart_tx(struct net_device *dev); 515static void rhine_restart_tx(struct net_device *dev);
514 516
515static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low) 517static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
@@ -1026,8 +1028,9 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1026 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 1028 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1027 1029
1028 if (pdev->revision >= VT6105M) 1030 if (pdev->revision >= VT6105M)
1029 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 1031 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
1030 NETIF_F_HW_VLAN_FILTER; 1032 NETIF_F_HW_VLAN_CTAG_RX |
1033 NETIF_F_HW_VLAN_CTAG_FILTER;
1031 1034
1032 /* dev->name not defined before register_netdev()! */ 1035 /* dev->name not defined before register_netdev()! */
1033 rc = register_netdev(dev); 1036 rc = register_netdev(dev);
@@ -1414,7 +1417,7 @@ static void rhine_update_vcam(struct net_device *dev)
1414 rhine_set_vlan_cam_mask(ioaddr, vCAMmask); 1417 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1415} 1418}
1416 1419
1417static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 1420static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1418{ 1421{
1419 struct rhine_private *rp = netdev_priv(dev); 1422 struct rhine_private *rp = netdev_priv(dev);
1420 1423
@@ -1425,7 +1428,7 @@ static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1425 return 0; 1428 return 0;
1426} 1429}
1427 1430
1428static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 1431static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1429{ 1432{
1430 struct rhine_private *rp = netdev_priv(dev); 1433 struct rhine_private *rp = netdev_priv(dev);
1431 1434
@@ -1933,7 +1936,7 @@ static int rhine_rx(struct net_device *dev, int limit)
1933 skb->protocol = eth_type_trans(skb, dev); 1936 skb->protocol = eth_type_trans(skb, dev);
1934 1937
1935 if (unlikely(desc_length & DescTag)) 1938 if (unlikely(desc_length & DescTag))
1936 __vlan_hwaccel_put_tag(skb, vlan_tci); 1939 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1937 netif_receive_skb(skb); 1940 netif_receive_skb(skb);
1938 1941
1939 u64_stats_update_begin(&rp->rx_stats.syncp); 1942 u64_stats_update_begin(&rp->rx_stats.syncp);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 1bc7f9fd2583..fb6248956ee2 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -525,7 +525,8 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
525 mac_set_vlan_cam_mask(regs, vptr->vCAMmask); 525 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
526} 526}
527 527
528static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 528static int velocity_vlan_rx_add_vid(struct net_device *dev,
529 __be16 proto, u16 vid)
529{ 530{
530 struct velocity_info *vptr = netdev_priv(dev); 531 struct velocity_info *vptr = netdev_priv(dev);
531 532
@@ -536,7 +537,8 @@ static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
536 return 0; 537 return 0;
537} 538}
538 539
539static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 540static int velocity_vlan_rx_kill_vid(struct net_device *dev,
541 __be16 proto, u16 vid)
540{ 542{
541 struct velocity_info *vptr = netdev_priv(dev); 543 struct velocity_info *vptr = netdev_priv(dev);
542 544
@@ -2078,7 +2080,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2078 if (rd->rdesc0.RSR & RSR_DETAG) { 2080 if (rd->rdesc0.RSR & RSR_DETAG) {
2079 u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG)); 2081 u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2080 2082
2081 __vlan_hwaccel_put_tag(skb, vid); 2083 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2082 } 2084 }
2083 netif_rx(skb); 2085 netif_rx(skb);
2084 2086
@@ -2810,9 +2812,10 @@ static int velocity_found1(struct pci_dev *pdev,
2810 dev->ethtool_ops = &velocity_ethtool_ops; 2812 dev->ethtool_ops = &velocity_ethtool_ops;
2811 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); 2813 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2812 2814
2813 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_TX; 2815 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2814 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | 2816 NETIF_F_HW_VLAN_CTAG_TX;
2815 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM; 2817 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER |
2818 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_IP_CSUM;
2816 2819
2817 ret = register_netdev(dev); 2820 ret = register_netdev(dev);
2818 if (ret < 0) 2821 if (ret < 0)
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 545043cc4c0b..a518dcab396e 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -754,7 +754,7 @@ static int w5100_remove(struct platform_device *pdev)
754 return 0; 754 return 0;
755} 755}
756 756
757#ifdef CONFIG_PM 757#ifdef CONFIG_PM_SLEEP
758static int w5100_suspend(struct device *dev) 758static int w5100_suspend(struct device *dev)
759{ 759{
760 struct platform_device *pdev = to_platform_device(dev); 760 struct platform_device *pdev = to_platform_device(dev);
@@ -787,7 +787,7 @@ static int w5100_resume(struct device *dev)
787 } 787 }
788 return 0; 788 return 0;
789} 789}
790#endif /* CONFIG_PM */ 790#endif /* CONFIG_PM_SLEEP */
791 791
792static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume); 792static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
793 793
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 7cbd0e6fc6f3..6e00e3f94ce4 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -666,7 +666,7 @@ static int w5300_remove(struct platform_device *pdev)
666 return 0; 666 return 0;
667} 667}
668 668
669#ifdef CONFIG_PM 669#ifdef CONFIG_PM_SLEEP
670static int w5300_suspend(struct device *dev) 670static int w5300_suspend(struct device *dev)
671{ 671{
672 struct platform_device *pdev = to_platform_device(dev); 672 struct platform_device *pdev = to_platform_device(dev);
@@ -699,7 +699,7 @@ static int w5300_resume(struct device *dev)
699 } 699 }
700 return 0; 700 return 0;
701} 701}
702#endif /* CONFIG_PM */ 702#endif /* CONFIG_PM_SLEEP */
703 703
704static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume); 704static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume);
705 705
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9fc2ada4c3c2..57c2e5ef2804 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -245,39 +245,30 @@ static int temac_dma_bd_init(struct net_device *ndev)
245 /* returns a virtual address and a physical address. */ 245 /* returns a virtual address and a physical address. */
246 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 246 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
247 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 247 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
248 &lp->tx_bd_p, GFP_KERNEL); 248 &lp->tx_bd_p, GFP_KERNEL | __GFP_ZERO);
249 if (!lp->tx_bd_v) { 249 if (!lp->tx_bd_v)
250 dev_err(&ndev->dev,
251 "unable to allocate DMA TX buffer descriptors");
252 goto out; 250 goto out;
253 } 251
254 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 252 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
255 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 253 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
256 &lp->rx_bd_p, GFP_KERNEL); 254 &lp->rx_bd_p, GFP_KERNEL | __GFP_ZERO);
257 if (!lp->rx_bd_v) { 255 if (!lp->rx_bd_v)
258 dev_err(&ndev->dev,
259 "unable to allocate DMA RX buffer descriptors");
260 goto out; 256 goto out;
261 }
262 257
263 memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
264 for (i = 0; i < TX_BD_NUM; i++) { 258 for (i = 0; i < TX_BD_NUM; i++) {
265 lp->tx_bd_v[i].next = lp->tx_bd_p + 259 lp->tx_bd_v[i].next = lp->tx_bd_p +
266 sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM); 260 sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
267 } 261 }
268 262
269 memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
270 for (i = 0; i < RX_BD_NUM; i++) { 263 for (i = 0; i < RX_BD_NUM; i++) {
271 lp->rx_bd_v[i].next = lp->rx_bd_p + 264 lp->rx_bd_v[i].next = lp->rx_bd_p +
272 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); 265 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
273 266
274 skb = netdev_alloc_skb_ip_align(ndev, 267 skb = netdev_alloc_skb_ip_align(ndev,
275 XTE_MAX_JUMBO_FRAME_SIZE); 268 XTE_MAX_JUMBO_FRAME_SIZE);
276 269 if (!skb)
277 if (skb == 0) {
278 dev_err(&ndev->dev, "alloc_skb error %d\n", i);
279 goto out; 270 goto out;
280 } 271
281 lp->rx_skb[i] = skb; 272 lp->rx_skb[i] = skb;
282 /* returns physical address of skb->data */ 273 /* returns physical address of skb->data */
283 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 274 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
@@ -789,9 +780,7 @@ static void ll_temac_recv(struct net_device *ndev)
789 780
790 new_skb = netdev_alloc_skb_ip_align(ndev, 781 new_skb = netdev_alloc_skb_ip_align(ndev,
791 XTE_MAX_JUMBO_FRAME_SIZE); 782 XTE_MAX_JUMBO_FRAME_SIZE);
792 783 if (!new_skb) {
793 if (new_skb == 0) {
794 dev_err(&ndev->dev, "no memory for new sk_buff\n");
795 spin_unlock_irqrestore(&lp->rx_lock, flags); 784 spin_unlock_irqrestore(&lp->rx_lock, flags);
796 return; 785 return;
797 } 786 }
@@ -1029,9 +1018,9 @@ static int temac_of_probe(struct platform_device *op)
1029 ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */ 1018 ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
1030 ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */ 1019 ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
1031 ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */ 1020 ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
1032 ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */ 1021 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */
1033 ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */ 1022 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */
1034 ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */ 1023 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */
1035 ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */ 1024 ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
1036 ndev->features |= NETIF_F_GSO; /* Enable software GSO. */ 1025 ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
1037 ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */ 1026 ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 278c9db3b5b8..24748e8367a1 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -204,41 +204,31 @@ static int axienet_dma_bd_init(struct net_device *ndev)
204 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 204 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
205 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 205 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
206 &lp->tx_bd_p, 206 &lp->tx_bd_p,
207 GFP_KERNEL); 207 GFP_KERNEL | __GFP_ZERO);
208 if (!lp->tx_bd_v) { 208 if (!lp->tx_bd_v)
209 dev_err(&ndev->dev, "unable to allocate DMA Tx buffer "
210 "descriptors");
211 goto out; 209 goto out;
212 }
213 210
214 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 211 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
215 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 212 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
216 &lp->rx_bd_p, 213 &lp->rx_bd_p,
217 GFP_KERNEL); 214 GFP_KERNEL | __GFP_ZERO);
218 if (!lp->rx_bd_v) { 215 if (!lp->rx_bd_v)
219 dev_err(&ndev->dev, "unable to allocate DMA Rx buffer "
220 "descriptors");
221 goto out; 216 goto out;
222 }
223 217
224 memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
225 for (i = 0; i < TX_BD_NUM; i++) { 218 for (i = 0; i < TX_BD_NUM; i++) {
226 lp->tx_bd_v[i].next = lp->tx_bd_p + 219 lp->tx_bd_v[i].next = lp->tx_bd_p +
227 sizeof(*lp->tx_bd_v) * 220 sizeof(*lp->tx_bd_v) *
228 ((i + 1) % TX_BD_NUM); 221 ((i + 1) % TX_BD_NUM);
229 } 222 }
230 223
231 memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
232 for (i = 0; i < RX_BD_NUM; i++) { 224 for (i = 0; i < RX_BD_NUM; i++) {
233 lp->rx_bd_v[i].next = lp->rx_bd_p + 225 lp->rx_bd_v[i].next = lp->rx_bd_p +
234 sizeof(*lp->rx_bd_v) * 226 sizeof(*lp->rx_bd_v) *
235 ((i + 1) % RX_BD_NUM); 227 ((i + 1) % RX_BD_NUM);
236 228
237 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 229 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
238 if (!skb) { 230 if (!skb)
239 dev_err(&ndev->dev, "alloc_skb error %d\n", i);
240 goto out; 231 goto out;
241 }
242 232
243 lp->rx_bd_v[i].sw_id_offset = (u32) skb; 233 lp->rx_bd_v[i].sw_id_offset = (u32) skb;
244 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 234 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
@@ -777,10 +767,9 @@ static void axienet_recv(struct net_device *ndev)
777 packets++; 767 packets++;
778 768
779 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 769 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
780 if (!new_skb) { 770 if (!new_skb)
781 dev_err(&ndev->dev, "no memory for new sk_buff\n");
782 return; 771 return;
783 } 772
784 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 773 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
785 lp->max_frm_size, 774 lp->max_frm_size,
786 DMA_FROM_DEVICE); 775 DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 1025b4e937d2..bdd20b888cf6 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1041,7 +1041,6 @@ xirc2ps_interrupt(int irq, void *dev_id)
1041 /* 1 extra so we can use insw */ 1041 /* 1 extra so we can use insw */
1042 skb = netdev_alloc_skb(dev, pktlen + 3); 1042 skb = netdev_alloc_skb(dev, pktlen + 3);
1043 if (!skb) { 1043 if (!skb) {
1044 pr_notice("low memory, packet dropped (size=%u)\n", pktlen);
1045 dev->stats.rx_dropped++; 1044 dev->stats.rx_dropped++;
1046 } else { /* okay get the packet */ 1045 } else { /* okay get the packet */
1047 skb_reserve(skb, 2); 1046 skb_reserve(skb, 2);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 502c8ff1d985..4c8ddc944d51 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -1070,13 +1070,10 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
1070 (PI_ALIGN_K_DESC_BLK - 1); 1070 (PI_ALIGN_K_DESC_BLK - 1);
1071 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size, 1071 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
1072 &bp->kmalloced_dma, 1072 &bp->kmalloced_dma,
1073 GFP_ATOMIC); 1073 GFP_ATOMIC | __GFP_ZERO);
1074 if (top_v == NULL) { 1074 if (top_v == NULL)
1075 printk("%s: Could not allocate memory for host buffers "
1076 "and structures!\n", print_name);
1077 return DFX_K_FAILURE; 1075 return DFX_K_FAILURE;
1078 } 1076
1079 memset(top_v, 0, alloc_size); /* zero out memory before continuing */
1080 top_p = bp->kmalloced_dma; /* get physical address of buffer */ 1077 top_p = bp->kmalloced_dma; /* get physical address of buffer */
1081 1078
1082 /* 1079 /*
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 4cf8f1017aad..b2d863f2ea42 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -866,7 +866,7 @@ static int yam_open(struct net_device *dev)
866 866
867 printk(KERN_INFO "Trying %s at iobase 0x%lx irq %u\n", dev->name, dev->base_addr, dev->irq); 867 printk(KERN_INFO "Trying %s at iobase 0x%lx irq %u\n", dev->name, dev->base_addr, dev->irq);
868 868
869 if (!dev || !yp->bitrate) 869 if (!yp->bitrate)
870 return -ENXIO; 870 return -ENXIO;
871 if (!dev->base_addr || dev->base_addr > 0x1000 - YAM_EXTENT || 871 if (!dev->base_addr || dev->base_addr > 0x1000 - YAM_EXTENT ||
872 dev->irq < 2 || dev->irq > 15) { 872 dev->irq < 2 || dev->irq > 15) {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index f5f0f09e4cc5..2b0480416b31 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -522,7 +522,7 @@ int netvsc_send(struct hv_device *device,
522 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; 522 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
523 523
524 if (packet->completion.send.send_completion) 524 if (packet->completion.send.send_completion)
525 req_id = (u64)packet; 525 req_id = (ulong)packet;
526 else 526 else
527 req_id = 0; 527 req_id = 0;
528 528
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 8341b62e5521..088c55496191 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -429,7 +429,7 @@ static int netvsc_probe(struct hv_device *dev,
429 429
430 /* TODO: Add GSO and Checksum offload */ 430 /* TODO: Add GSO and Checksum offload */
431 net->hw_features = NETIF_F_SG; 431 net->hw_features = NETIF_F_SG;
432 net->features = NETIF_F_SG | NETIF_F_HW_VLAN_TX; 432 net->features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX;
433 433
434 SET_ETHTOOL_OPS(net, &ethtool_ops); 434 SET_ETHTOOL_OPS(net, &ethtool_ops);
435 SET_NETDEV_DEV(net, &dev->device); 435 SET_NETDEV_DEV(net, &dev->device);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index fc1687ea4a42..6f10b4964726 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -51,7 +51,7 @@ struct at86rf230_local {
51 struct ieee802154_dev *dev; 51 struct ieee802154_dev *dev;
52 52
53 spinlock_t lock; 53 spinlock_t lock;
54 bool irq_disabled; 54 bool irq_busy;
55 bool is_tx; 55 bool is_tx;
56}; 56};
57 57
@@ -219,6 +219,9 @@ struct at86rf230_local {
219#define IRQ_PLL_UNL (1 << 1) 219#define IRQ_PLL_UNL (1 << 1)
220#define IRQ_PLL_LOCK (1 << 0) 220#define IRQ_PLL_LOCK (1 << 0)
221 221
222#define IRQ_ACTIVE_HIGH 0
223#define IRQ_ACTIVE_LOW 1
224
222#define STATE_P_ON 0x00 /* BUSY */ 225#define STATE_P_ON 0x00 /* BUSY */
223#define STATE_BUSY_RX 0x01 226#define STATE_BUSY_RX 0x01
224#define STATE_BUSY_TX 0x02 227#define STATE_BUSY_TX 0x02
@@ -233,8 +236,8 @@ struct at86rf230_local {
233#define STATE_SLEEP 0x0F 236#define STATE_SLEEP 0x0F
234#define STATE_BUSY_RX_AACK 0x11 237#define STATE_BUSY_RX_AACK 0x11
235#define STATE_BUSY_TX_ARET 0x12 238#define STATE_BUSY_TX_ARET 0x12
236#define STATE_BUSY_RX_AACK_ON 0x16 239#define STATE_RX_AACK_ON 0x16
237#define STATE_BUSY_TX_ARET_ON 0x19 240#define STATE_TX_ARET_ON 0x19
238#define STATE_RX_ON_NOCLK 0x1C 241#define STATE_RX_ON_NOCLK 0x1C
239#define STATE_RX_AACK_ON_NOCLK 0x1D 242#define STATE_RX_AACK_ON_NOCLK 0x1D
240#define STATE_BUSY_RX_AACK_NOCLK 0x1E 243#define STATE_BUSY_RX_AACK_NOCLK 0x1E
@@ -544,7 +547,7 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
544 unsigned long flags; 547 unsigned long flags;
545 548
546 spin_lock(&lp->lock); 549 spin_lock(&lp->lock);
547 if (lp->irq_disabled) { 550 if (lp->irq_busy) {
548 spin_unlock(&lp->lock); 551 spin_unlock(&lp->lock);
549 return -EBUSY; 552 return -EBUSY;
550 } 553 }
@@ -619,6 +622,52 @@ err:
619 return -EINVAL; 622 return -EINVAL;
620} 623}
621 624
625static int
626at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
627 struct ieee802154_hw_addr_filt *filt,
628 unsigned long changed)
629{
630 struct at86rf230_local *lp = dev->priv;
631
632 if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
633 dev_vdbg(&lp->spi->dev,
634 "at86rf230_set_hw_addr_filt called for saddr\n");
635 __at86rf230_write(lp, RG_SHORT_ADDR_0, filt->short_addr);
636 __at86rf230_write(lp, RG_SHORT_ADDR_1, filt->short_addr >> 8);
637 }
638
639 if (changed & IEEE802515_AFILT_PANID_CHANGED) {
640 dev_vdbg(&lp->spi->dev,
641 "at86rf230_set_hw_addr_filt called for pan id\n");
642 __at86rf230_write(lp, RG_PAN_ID_0, filt->pan_id);
643 __at86rf230_write(lp, RG_PAN_ID_1, filt->pan_id >> 8);
644 }
645
646 if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
647 dev_vdbg(&lp->spi->dev,
648 "at86rf230_set_hw_addr_filt called for IEEE addr\n");
649 at86rf230_write_subreg(lp, SR_IEEE_ADDR_0, filt->ieee_addr[7]);
650 at86rf230_write_subreg(lp, SR_IEEE_ADDR_1, filt->ieee_addr[6]);
651 at86rf230_write_subreg(lp, SR_IEEE_ADDR_2, filt->ieee_addr[5]);
652 at86rf230_write_subreg(lp, SR_IEEE_ADDR_3, filt->ieee_addr[4]);
653 at86rf230_write_subreg(lp, SR_IEEE_ADDR_4, filt->ieee_addr[3]);
654 at86rf230_write_subreg(lp, SR_IEEE_ADDR_5, filt->ieee_addr[2]);
655 at86rf230_write_subreg(lp, SR_IEEE_ADDR_6, filt->ieee_addr[1]);
656 at86rf230_write_subreg(lp, SR_IEEE_ADDR_7, filt->ieee_addr[0]);
657 }
658
659 if (changed & IEEE802515_AFILT_PANC_CHANGED) {
660 dev_vdbg(&lp->spi->dev,
661 "at86rf230_set_hw_addr_filt called for panc change\n");
662 if (filt->pan_coord)
663 at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
664 else
665 at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 0);
666 }
667
668 return 0;
669}
670
622static struct ieee802154_ops at86rf230_ops = { 671static struct ieee802154_ops at86rf230_ops = {
623 .owner = THIS_MODULE, 672 .owner = THIS_MODULE,
624 .xmit = at86rf230_xmit, 673 .xmit = at86rf230_xmit,
@@ -626,6 +675,7 @@ static struct ieee802154_ops at86rf230_ops = {
626 .set_channel = at86rf230_channel, 675 .set_channel = at86rf230_channel,
627 .start = at86rf230_start, 676 .start = at86rf230_start,
628 .stop = at86rf230_stop, 677 .stop = at86rf230_stop,
678 .set_hw_addr_filt = at86rf230_set_hw_addr_filt,
629}; 679};
630 680
631static void at86rf230_irqwork(struct work_struct *work) 681static void at86rf230_irqwork(struct work_struct *work)
@@ -658,8 +708,16 @@ static void at86rf230_irqwork(struct work_struct *work)
658 } 708 }
659 709
660 spin_lock_irqsave(&lp->lock, flags); 710 spin_lock_irqsave(&lp->lock, flags);
661 lp->irq_disabled = 0; 711 lp->irq_busy = 0;
662 spin_unlock_irqrestore(&lp->lock, flags); 712 spin_unlock_irqrestore(&lp->lock, flags);
713}
714
715static void at86rf230_irqwork_level(struct work_struct *work)
716{
717 struct at86rf230_local *lp =
718 container_of(work, struct at86rf230_local, irqwork);
719
720 at86rf230_irqwork(work);
663 721
664 enable_irq(lp->spi->irq); 722 enable_irq(lp->spi->irq);
665} 723}
@@ -668,10 +726,8 @@ static irqreturn_t at86rf230_isr(int irq, void *data)
668{ 726{
669 struct at86rf230_local *lp = data; 727 struct at86rf230_local *lp = data;
670 728
671 disable_irq_nosync(irq);
672
673 spin_lock(&lp->lock); 729 spin_lock(&lp->lock);
674 lp->irq_disabled = 1; 730 lp->irq_busy = 1;
675 spin_unlock(&lp->lock); 731 spin_unlock(&lp->lock);
676 732
677 schedule_work(&lp->irqwork); 733 schedule_work(&lp->irqwork);
@@ -679,11 +735,23 @@ static irqreturn_t at86rf230_isr(int irq, void *data)
679 return IRQ_HANDLED; 735 return IRQ_HANDLED;
680} 736}
681 737
738static irqreturn_t at86rf230_isr_level(int irq, void *data)
739{
740 disable_irq_nosync(irq);
741
742 return at86rf230_isr(irq, data);
743}
744
745static int at86rf230_irq_polarity(struct at86rf230_local *lp, int pol)
746{
747 return at86rf230_write_subreg(lp, SR_IRQ_POLARITY, pol);
748}
682 749
683static int at86rf230_hw_init(struct at86rf230_local *lp) 750static int at86rf230_hw_init(struct at86rf230_local *lp)
684{ 751{
752 struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data;
753 int rc, irq_pol;
685 u8 status; 754 u8 status;
686 int rc;
687 755
688 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status); 756 rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
689 if (rc) 757 if (rc)
@@ -701,12 +769,17 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
701 dev_info(&lp->spi->dev, "Status: %02x\n", status); 769 dev_info(&lp->spi->dev, "Status: %02x\n", status);
702 } 770 }
703 771
704 rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, 0xff); /* IRQ_TRX_UR | 772 /* configure irq polarity, defaults to high active */
705 * IRQ_CCA_ED | 773 if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
706 * IRQ_TRX_END | 774 irq_pol = IRQ_ACTIVE_LOW;
707 * IRQ_PLL_UNL | 775 else
708 * IRQ_PLL_LOCK 776 irq_pol = IRQ_ACTIVE_HIGH;
709 */ 777
778 rc = at86rf230_irq_polarity(lp, irq_pol);
779 if (rc)
780 return rc;
781
782 rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, IRQ_TRX_END);
710 if (rc) 783 if (rc)
711 return rc; 784 return rc;
712 785
@@ -751,37 +824,38 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
751 return 0; 824 return 0;
752} 825}
753 826
754static int at86rf230_fill_data(struct spi_device *spi) 827static void at86rf230_fill_data(struct spi_device *spi)
755{ 828{
756 struct at86rf230_local *lp = spi_get_drvdata(spi); 829 struct at86rf230_local *lp = spi_get_drvdata(spi);
757 struct at86rf230_platform_data *pdata = spi->dev.platform_data; 830 struct at86rf230_platform_data *pdata = spi->dev.platform_data;
758 831
759 if (!pdata) {
760 dev_err(&spi->dev, "no platform_data\n");
761 return -EINVAL;
762 }
763
764 lp->rstn = pdata->rstn; 832 lp->rstn = pdata->rstn;
765 lp->slp_tr = pdata->slp_tr; 833 lp->slp_tr = pdata->slp_tr;
766 lp->dig2 = pdata->dig2; 834 lp->dig2 = pdata->dig2;
767
768 return 0;
769} 835}
770 836
771static int at86rf230_probe(struct spi_device *spi) 837static int at86rf230_probe(struct spi_device *spi)
772{ 838{
839 struct at86rf230_platform_data *pdata;
773 struct ieee802154_dev *dev; 840 struct ieee802154_dev *dev;
774 struct at86rf230_local *lp; 841 struct at86rf230_local *lp;
775 u8 man_id_0, man_id_1; 842 u8 man_id_0, man_id_1, status;
776 int rc; 843 irq_handler_t irq_handler;
844 work_func_t irq_worker;
845 int rc, supported = 0;
777 const char *chip; 846 const char *chip;
778 int supported = 0;
779 847
780 if (!spi->irq) { 848 if (!spi->irq) {
781 dev_err(&spi->dev, "no IRQ specified\n"); 849 dev_err(&spi->dev, "no IRQ specified\n");
782 return -EINVAL; 850 return -EINVAL;
783 } 851 }
784 852
853 pdata = spi->dev.platform_data;
854 if (!pdata) {
855 dev_err(&spi->dev, "no platform_data\n");
856 return -EINVAL;
857 }
858
785 dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops); 859 dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
786 if (!dev) 860 if (!dev)
787 return -ENOMEM; 861 return -ENOMEM;
@@ -791,23 +865,28 @@ static int at86rf230_probe(struct spi_device *spi)
791 865
792 lp->spi = spi; 866 lp->spi = spi;
793 867
794 dev->priv = lp;
795 dev->parent = &spi->dev; 868 dev->parent = &spi->dev;
796 dev->extra_tx_headroom = 0; 869 dev->extra_tx_headroom = 0;
797 /* We do support only 2.4 Ghz */ 870 /* We do support only 2.4 Ghz */
798 dev->phy->channels_supported[0] = 0x7FFF800; 871 dev->phy->channels_supported[0] = 0x7FFF800;
799 dev->flags = IEEE802154_HW_OMIT_CKSUM; 872 dev->flags = IEEE802154_HW_OMIT_CKSUM;
800 873
874 if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
875 irq_worker = at86rf230_irqwork;
876 irq_handler = at86rf230_isr;
877 } else {
878 irq_worker = at86rf230_irqwork_level;
879 irq_handler = at86rf230_isr_level;
880 }
881
801 mutex_init(&lp->bmux); 882 mutex_init(&lp->bmux);
802 INIT_WORK(&lp->irqwork, at86rf230_irqwork); 883 INIT_WORK(&lp->irqwork, irq_worker);
803 spin_lock_init(&lp->lock); 884 spin_lock_init(&lp->lock);
804 init_completion(&lp->tx_complete); 885 init_completion(&lp->tx_complete);
805 886
806 spi_set_drvdata(spi, lp); 887 spi_set_drvdata(spi, lp);
807 888
808 rc = at86rf230_fill_data(spi); 889 at86rf230_fill_data(spi);
809 if (rc)
810 goto err_fill;
811 890
812 rc = gpio_request(lp->rstn, "rstn"); 891 rc = gpio_request(lp->rstn, "rstn");
813 if (rc) 892 if (rc)
@@ -882,18 +961,23 @@ static int at86rf230_probe(struct spi_device *spi)
882 if (rc) 961 if (rc)
883 goto err_gpio_dir; 962 goto err_gpio_dir;
884 963
885 rc = request_irq(spi->irq, at86rf230_isr, IRQF_SHARED, 964 rc = request_irq(spi->irq, irq_handler,
965 IRQF_SHARED | pdata->irq_type,
886 dev_name(&spi->dev), lp); 966 dev_name(&spi->dev), lp);
887 if (rc) 967 if (rc)
888 goto err_gpio_dir; 968 goto err_gpio_dir;
889 969
970 /* Read irq status register to reset irq line */
971 rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
972 if (rc)
973 goto err_irq;
974
890 rc = ieee802154_register_device(lp->dev); 975 rc = ieee802154_register_device(lp->dev);
891 if (rc) 976 if (rc)
892 goto err_irq; 977 goto err_irq;
893 978
894 return rc; 979 return rc;
895 980
896 ieee802154_unregister_device(lp->dev);
897err_irq: 981err_irq:
898 free_irq(spi->irq, lp); 982 free_irq(spi->irq, lp);
899 flush_work(&lp->irqwork); 983 flush_work(&lp->irqwork);
@@ -903,7 +987,6 @@ err_gpio_dir:
903err_slp_tr: 987err_slp_tr:
904 gpio_free(lp->rstn); 988 gpio_free(lp->rstn);
905err_rstn: 989err_rstn:
906err_fill:
907 spi_set_drvdata(spi, NULL); 990 spi_set_drvdata(spi, NULL);
908 mutex_destroy(&lp->bmux); 991 mutex_destroy(&lp->bmux);
909 ieee802154_free_device(lp->dev); 992 ieee802154_free_device(lp->dev);
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
index 8f1c25676d44..bf0d55e2dd63 100644
--- a/drivers/net/ieee802154/fakehard.c
+++ b/drivers/net/ieee802154/fakehard.c
@@ -106,26 +106,6 @@ static u8 fake_get_dsn(const struct net_device *dev)
106} 106}
107 107
108/** 108/**
109 * fake_get_bsn - Retrieve the BSN of the device.
110 * @dev: The network device to retrieve the BSN for.
111 *
112 * Returns the IEEE 802.15.4 BSN for the network device.
113 * The BSN is the sequence number which will be added to each
114 * beacon frame sent by the MAC.
115 *
116 * BSN means 'Beacon Sequence Number'.
117 *
118 * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006
119 * document.
120 */
121static u8 fake_get_bsn(const struct net_device *dev)
122{
123 BUG_ON(dev->type != ARPHRD_IEEE802154);
124
125 return 0x00; /* BSN are implemented in HW, so return just 0 */
126}
127
128/**
129 * fake_assoc_req - Make an association request to the HW. 109 * fake_assoc_req - Make an association request to the HW.
130 * @dev: The network device which we are associating to a network. 110 * @dev: The network device which we are associating to a network.
131 * @addr: The coordinator with which we wish to associate. 111 * @addr: The coordinator with which we wish to associate.
@@ -264,7 +244,6 @@ static struct ieee802154_mlme_ops fake_mlme = {
264 .get_pan_id = fake_get_pan_id, 244 .get_pan_id = fake_get_pan_id,
265 .get_short_addr = fake_get_short_addr, 245 .get_short_addr = fake_get_short_addr,
266 .get_dsn = fake_get_dsn, 246 .get_dsn = fake_get_dsn,
267 .get_bsn = fake_get_bsn,
268}; 247};
269 248
270static int ieee802154_fake_open(struct net_device *dev) 249static int ieee802154_fake_open(struct net_device *dev)
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 3f2c7aaf28c4..ede3ce4912f9 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -22,8 +22,10 @@
22#include <linux/spi/spi.h> 22#include <linux/spi/spi.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/pinctrl/consumer.h>
25#include <net/wpan-phy.h> 26#include <net/wpan-phy.h>
26#include <net/mac802154.h> 27#include <net/mac802154.h>
28#include <net/ieee802154.h>
27 29
28/* MRF24J40 Short Address Registers */ 30/* MRF24J40 Short Address Registers */
29#define REG_RXMCR 0x00 /* Receive MAC control */ 31#define REG_RXMCR 0x00 /* Receive MAC control */
@@ -91,9 +93,8 @@ struct mrf24j40 {
91#define MRF24J40_READLONG(reg) (1 << 15 | (reg) << 5) 93#define MRF24J40_READLONG(reg) (1 << 15 | (reg) << 5)
92#define MRF24J40_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4) 94#define MRF24J40_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4)
93 95
94/* Maximum speed to run the device at. TODO: Get the real max value from 96/* The datasheet indicates the theoretical maximum for SCK to be 10MHz */
95 * someone at Microchip since it isn't in the datasheet. */ 97#define MAX_SPI_SPEED_HZ 10000000
96#define MAX_SPI_SPEED_HZ 1000000
97 98
98#define printdev(X) (&X->spi->dev) 99#define printdev(X) (&X->spi->dev)
99 100
@@ -349,7 +350,9 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
349 if (ret) 350 if (ret)
350 goto err; 351 goto err;
351 val |= 0x1; 352 val |= 0x1;
352 val &= ~0x4; 353 /* Set TXNACKREQ if the ACK bit is set in the packet. */
354 if (skb->data[0] & IEEE802154_FC_ACK_REQ)
355 val |= 0x4;
353 write_short_reg(devrec, REG_TXNCON, val); 356 write_short_reg(devrec, REG_TXNCON, val);
354 357
355 INIT_COMPLETION(devrec->tx_complete); 358 INIT_COMPLETION(devrec->tx_complete);
@@ -361,6 +364,7 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
361 if (ret == -ERESTARTSYS) 364 if (ret == -ERESTARTSYS)
362 goto err; 365 goto err;
363 if (ret == 0) { 366 if (ret == 0) {
367 dev_warn(printdev(devrec), "Timeout waiting for TX interrupt\n");
364 ret = -ETIMEDOUT; 368 ret = -ETIMEDOUT;
365 goto err; 369 goto err;
366 } 370 }
@@ -370,7 +374,7 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
370 if (ret) 374 if (ret)
371 goto err; 375 goto err;
372 if (val & 0x1) { 376 if (val & 0x1) {
373 dev_err(printdev(devrec), "Error Sending. Retry count exceeded\n"); 377 dev_dbg(printdev(devrec), "Error Sending. Retry count exceeded\n");
374 ret = -ECOMM; /* TODO: Better error code ? */ 378 ret = -ECOMM; /* TODO: Better error code ? */
375 } else 379 } else
376 dev_dbg(printdev(devrec), "Packet Sent\n"); 380 dev_dbg(printdev(devrec), "Packet Sent\n");
@@ -477,7 +481,7 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
477 int i; 481 int i;
478 for (i = 0; i < 8; i++) 482 for (i = 0; i < 8; i++)
479 write_short_reg(devrec, REG_EADR0+i, 483 write_short_reg(devrec, REG_EADR0+i,
480 filt->ieee_addr[i]); 484 filt->ieee_addr[7-i]);
481 485
482#ifdef DEBUG 486#ifdef DEBUG
483 printk(KERN_DEBUG "Set long addr to: "); 487 printk(KERN_DEBUG "Set long addr to: ");
@@ -623,6 +627,7 @@ static int mrf24j40_probe(struct spi_device *spi)
623 int ret = -ENOMEM; 627 int ret = -ENOMEM;
624 u8 val; 628 u8 val;
625 struct mrf24j40 *devrec; 629 struct mrf24j40 *devrec;
630 struct pinctrl *pinctrl;
626 631
627 printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq); 632 printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
628 633
@@ -633,6 +638,11 @@ static int mrf24j40_probe(struct spi_device *spi)
633 if (!devrec->buf) 638 if (!devrec->buf)
634 goto err_buf; 639 goto err_buf;
635 640
641 pinctrl = devm_pinctrl_get_select_default(&spi->dev);
642 if (IS_ERR(pinctrl))
643 dev_warn(&spi->dev,
644 "pinctrl pins are not configured from the driver");
645
636 spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */ 646 spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
637 if (spi->max_speed_hz > MAX_SPI_SPEED_HZ) 647 if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
638 spi->max_speed_hz = MAX_SPI_SPEED_HZ; 648 spi->max_speed_hz = MAX_SPI_SPEED_HZ;
@@ -641,7 +651,7 @@ static int mrf24j40_probe(struct spi_device *spi)
641 init_completion(&devrec->tx_complete); 651 init_completion(&devrec->tx_complete);
642 INIT_WORK(&devrec->irqwork, mrf24j40_isrwork); 652 INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
643 devrec->spi = spi; 653 devrec->spi = spi;
644 dev_set_drvdata(&spi->dev, devrec); 654 spi_set_drvdata(spi, devrec);
645 655
646 /* Register with the 802154 subsystem */ 656 /* Register with the 802154 subsystem */
647 657
@@ -713,7 +723,7 @@ err_devrec:
713 723
714static int mrf24j40_remove(struct spi_device *spi) 724static int mrf24j40_remove(struct spi_device *spi)
715{ 725{
716 struct mrf24j40 *devrec = dev_get_drvdata(&spi->dev); 726 struct mrf24j40 *devrec = spi_get_drvdata(spi);
717 727
718 dev_dbg(printdev(devrec), "remove\n"); 728 dev_dbg(printdev(devrec), "remove\n");
719 729
@@ -725,7 +735,7 @@ static int mrf24j40_remove(struct spi_device *spi)
725 * complete? */ 735 * complete? */
726 736
727 /* Clean up the SPI stuff. */ 737 /* Clean up the SPI stuff. */
728 dev_set_drvdata(&spi->dev, NULL); 738 spi_set_drvdata(spi, NULL);
729 kfree(devrec->buf); 739 kfree(devrec->buf);
730 kfree(devrec); 740 kfree(devrec);
731 return 0; 741 return 0;
@@ -749,18 +759,7 @@ static struct spi_driver mrf24j40_driver = {
749 .remove = mrf24j40_remove, 759 .remove = mrf24j40_remove,
750}; 760};
751 761
752static int __init mrf24j40_init(void) 762module_spi_driver(mrf24j40_driver);
753{
754 return spi_register_driver(&mrf24j40_driver);
755}
756
757static void __exit mrf24j40_exit(void)
758{
759 spi_unregister_driver(&mrf24j40_driver);
760}
761
762module_init(mrf24j40_init);
763module_exit(mrf24j40_exit);
764 763
765MODULE_LICENSE("GPL"); 764MODULE_LICENSE("GPL");
766MODULE_AUTHOR("Alan Ott"); 765MODULE_AUTHOR("Alan Ott");
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 82164381f778..dc9f6a45515d 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -166,7 +166,8 @@ static const struct net_device_ops ifb_netdev_ops = {
166 166
167#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ 167#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
168 NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \ 168 NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
169 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX) 169 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
170 NETIF_F_HW_VLAN_STAG_TX)
170 171
171static void ifb_setup(struct net_device *dev) 172static void ifb_setup(struct net_device *dev)
172{ 173{
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 9cea451a6081..3adb43ce138f 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -352,21 +352,19 @@ static int ali_ircc_open(int i, chipio_t *info)
352 /* Allocate memory if needed */ 352 /* Allocate memory if needed */
353 self->rx_buff.head = 353 self->rx_buff.head =
354 dma_alloc_coherent(NULL, self->rx_buff.truesize, 354 dma_alloc_coherent(NULL, self->rx_buff.truesize,
355 &self->rx_buff_dma, GFP_KERNEL); 355 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
356 if (self->rx_buff.head == NULL) { 356 if (self->rx_buff.head == NULL) {
357 err = -ENOMEM; 357 err = -ENOMEM;
358 goto err_out2; 358 goto err_out2;
359 } 359 }
360 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
361 360
362 self->tx_buff.head = 361 self->tx_buff.head =
363 dma_alloc_coherent(NULL, self->tx_buff.truesize, 362 dma_alloc_coherent(NULL, self->tx_buff.truesize,
364 &self->tx_buff_dma, GFP_KERNEL); 363 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
365 if (self->tx_buff.head == NULL) { 364 if (self->tx_buff.head == NULL) {
366 err = -ENOMEM; 365 err = -ENOMEM;
367 goto err_out3; 366 goto err_out3;
368 } 367 }
369 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
370 368
371 self->rx_buff.in_frame = FALSE; 369 self->rx_buff.in_frame = FALSE;
372 self->rx_buff.state = OUTSIDE_FRAME; 370 self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index b5151e4ced61..7a1f684edcb5 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/time.h> 28#include <linux/time.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/ioport.h>
30 31
31#include <net/irda/irda.h> 32#include <net/irda/irda.h>
32#include <net/irda/irmod.h> 33#include <net/irda/irmod.h>
@@ -882,12 +883,12 @@ static int au1k_irda_probe(struct platform_device *pdev)
882 goto out; 883 goto out;
883 884
884 err = -EBUSY; 885 err = -EBUSY;
885 aup->ioarea = request_mem_region(r->start, r->end - r->start + 1, 886 aup->ioarea = request_mem_region(r->start, resource_size(r),
886 pdev->name); 887 pdev->name);
887 if (!aup->ioarea) 888 if (!aup->ioarea)
888 goto out; 889 goto out;
889 890
890 aup->iobase = ioremap_nocache(r->start, r->end - r->start + 1); 891 aup->iobase = ioremap_nocache(r->start, resource_size(r));
891 if (!aup->iobase) 892 if (!aup->iobase)
892 goto out2; 893 goto out2;
893 894
@@ -952,18 +953,7 @@ static struct platform_driver au1k_irda_driver = {
952 .remove = au1k_irda_remove, 953 .remove = au1k_irda_remove,
953}; 954};
954 955
955static int __init au1k_irda_load(void) 956module_platform_driver(au1k_irda_driver);
956{
957 return platform_driver_register(&au1k_irda_driver);
958}
959
960static void __exit au1k_irda_unload(void)
961{
962 return platform_driver_unregister(&au1k_irda_driver);
963}
964 957
965MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>"); 958MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
966MODULE_DESCRIPTION("Au1000 IrDA Device Driver"); 959MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
967
968module_init(au1k_irda_load);
969module_exit(au1k_irda_unload);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index fed4a05d55c7..a06fca61c9a0 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -389,7 +389,8 @@ static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
389 set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev); 389 set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
390 set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev); 390 set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);
391 391
392 port->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA); 392 port->rx_dma_buf.buf = dma_alloc_coherent(NULL, PAGE_SIZE,
393 &dma_handle, GFP_DMA);
393 port->rx_dma_buf.head = 0; 394 port->rx_dma_buf.head = 0;
394 port->rx_dma_buf.tail = 0; 395 port->rx_dma_buf.tail = 0;
395 port->rx_dma_nrows = 0; 396 port->rx_dma_nrows = 0;
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 2a4f2f153244..9cf836b57c49 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -431,22 +431,20 @@ static int __init nsc_ircc_open(chipio_t *info)
431 /* Allocate memory if needed */ 431 /* Allocate memory if needed */
432 self->rx_buff.head = 432 self->rx_buff.head =
433 dma_alloc_coherent(NULL, self->rx_buff.truesize, 433 dma_alloc_coherent(NULL, self->rx_buff.truesize,
434 &self->rx_buff_dma, GFP_KERNEL); 434 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
435 if (self->rx_buff.head == NULL) { 435 if (self->rx_buff.head == NULL) {
436 err = -ENOMEM; 436 err = -ENOMEM;
437 goto out2; 437 goto out2;
438 438
439 } 439 }
440 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
441 440
442 self->tx_buff.head = 441 self->tx_buff.head =
443 dma_alloc_coherent(NULL, self->tx_buff.truesize, 442 dma_alloc_coherent(NULL, self->tx_buff.truesize,
444 &self->tx_buff_dma, GFP_KERNEL); 443 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
445 if (self->tx_buff.head == NULL) { 444 if (self->tx_buff.head == NULL) {
446 err = -ENOMEM; 445 err = -ENOMEM;
447 goto out3; 446 goto out3;
448 } 447 }
449 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
450 448
451 self->rx_buff.in_frame = FALSE; 449 self->rx_buff.in_frame = FALSE;
452 self->rx_buff.state = OUTSIDE_FRAME; 450 self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 858de05bdb7d..964b116a0ab7 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -700,12 +700,12 @@ static int pxa_irda_start(struct net_device *dev)
700 700
701 err = -ENOMEM; 701 err = -ENOMEM;
702 si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, 702 si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
703 &si->dma_rx_buff_phy, GFP_KERNEL ); 703 &si->dma_rx_buff_phy, GFP_KERNEL);
704 if (!si->dma_rx_buff) 704 if (!si->dma_rx_buff)
705 goto err_dma_rx_buff; 705 goto err_dma_rx_buff;
706 706
707 si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, 707 si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
708 &si->dma_tx_buff_phy, GFP_KERNEL ); 708 &si->dma_tx_buff_phy, GFP_KERNEL);
709 if (!si->dma_tx_buff) 709 if (!si->dma_tx_buff)
710 goto err_dma_tx_buff; 710 goto err_dma_tx_buff;
711 711
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 5290952b60c2..aa05dad75335 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -563,24 +563,15 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
563 563
564 self->rx_buff.head = 564 self->rx_buff.head =
565 dma_alloc_coherent(NULL, self->rx_buff.truesize, 565 dma_alloc_coherent(NULL, self->rx_buff.truesize,
566 &self->rx_buff_dma, GFP_KERNEL); 566 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
567 if (self->rx_buff.head == NULL) { 567 if (self->rx_buff.head == NULL)
568 IRDA_ERROR("%s, Can't allocate memory for receive buffer!\n",
569 driver_name);
570 goto err_out2; 568 goto err_out2;
571 }
572 569
573 self->tx_buff.head = 570 self->tx_buff.head =
574 dma_alloc_coherent(NULL, self->tx_buff.truesize, 571 dma_alloc_coherent(NULL, self->tx_buff.truesize,
575 &self->tx_buff_dma, GFP_KERNEL); 572 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
576 if (self->tx_buff.head == NULL) { 573 if (self->tx_buff.head == NULL)
577 IRDA_ERROR("%s, Can't allocate memory for transmit buffer!\n",
578 driver_name);
579 goto err_out3; 574 goto err_out3;
580 }
581
582 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
583 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
584 575
585 self->rx_buff.in_frame = FALSE; 576 self->rx_buff.in_frame = FALSE;
586 self->rx_buff.state = OUTSIDE_FRAME; 577 self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index f9033c6a888c..51f2bc376101 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -364,21 +364,19 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
364 /* Allocate memory if needed */ 364 /* Allocate memory if needed */
365 self->rx_buff.head = 365 self->rx_buff.head =
366 dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize, 366 dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize,
367 &self->rx_buff_dma, GFP_KERNEL); 367 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
368 if (self->rx_buff.head == NULL) { 368 if (self->rx_buff.head == NULL) {
369 err = -ENOMEM; 369 err = -ENOMEM;
370 goto err_out2; 370 goto err_out2;
371 } 371 }
372 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
373 372
374 self->tx_buff.head = 373 self->tx_buff.head =
375 dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize, 374 dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize,
376 &self->tx_buff_dma, GFP_KERNEL); 375 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
377 if (self->tx_buff.head == NULL) { 376 if (self->tx_buff.head == NULL) {
378 err = -ENOMEM; 377 err = -ENOMEM;
379 goto err_out3; 378 goto err_out3;
380 } 379 }
381 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
382 380
383 self->rx_buff.in_frame = FALSE; 381 self->rx_buff.in_frame = FALSE;
384 self->rx_buff.state = OUTSIDE_FRAME; 382 self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index f5bb92f15880..bb8857a158a6 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -216,22 +216,19 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
216 /* Allocate memory if needed */ 216 /* Allocate memory if needed */
217 self->rx_buff.head = 217 self->rx_buff.head =
218 dma_alloc_coherent(NULL, self->rx_buff.truesize, 218 dma_alloc_coherent(NULL, self->rx_buff.truesize,
219 &self->rx_buff_dma, GFP_KERNEL); 219 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
220 if (self->rx_buff.head == NULL) { 220 if (self->rx_buff.head == NULL) {
221 err = -ENOMEM; 221 err = -ENOMEM;
222 goto err_out1; 222 goto err_out1;
223 } 223 }
224 224
225 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
226
227 self->tx_buff.head = 225 self->tx_buff.head =
228 dma_alloc_coherent(NULL, self->tx_buff.truesize, 226 dma_alloc_coherent(NULL, self->tx_buff.truesize,
229 &self->tx_buff_dma, GFP_KERNEL); 227 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
230 if (self->tx_buff.head == NULL) { 228 if (self->tx_buff.head == NULL) {
231 err = -ENOMEM; 229 err = -ENOMEM;
232 goto err_out2; 230 goto err_out2;
233 } 231 }
234 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
235 232
236 self->rx_buff.in_frame = FALSE; 233 self->rx_buff.in_frame = FALSE;
237 self->rx_buff.state = OUTSIDE_FRAME; 234 self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 73abbc1655d5..d5a141c7c4e7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -46,9 +46,16 @@ struct macvlan_port {
46 46
47static void macvlan_port_destroy(struct net_device *dev); 47static void macvlan_port_destroy(struct net_device *dev);
48 48
49#define macvlan_port_get_rcu(dev) \ 49static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
50 ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data)) 50{
51#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data) 51 return rcu_dereference(dev->rx_handler_data);
52}
53
54static struct macvlan_port *macvlan_port_get_rtnl(const struct net_device *dev)
55{
56 return rtnl_dereference(dev->rx_handler_data);
57}
58
52#define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT) 59#define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT)
53 60
54static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, 61static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
@@ -464,7 +471,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
464 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ 471 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
465 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ 472 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
466 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ 473 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
467 NETIF_F_HW_VLAN_FILTER) 474 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
468 475
469#define MACVLAN_STATE_MASK \ 476#define MACVLAN_STATE_MASK \
470 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) 477 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
@@ -560,21 +567,21 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
560} 567}
561 568
562static int macvlan_vlan_rx_add_vid(struct net_device *dev, 569static int macvlan_vlan_rx_add_vid(struct net_device *dev,
563 unsigned short vid) 570 __be16 proto, u16 vid)
564{ 571{
565 struct macvlan_dev *vlan = netdev_priv(dev); 572 struct macvlan_dev *vlan = netdev_priv(dev);
566 struct net_device *lowerdev = vlan->lowerdev; 573 struct net_device *lowerdev = vlan->lowerdev;
567 574
568 return vlan_vid_add(lowerdev, vid); 575 return vlan_vid_add(lowerdev, proto, vid);
569} 576}
570 577
571static int macvlan_vlan_rx_kill_vid(struct net_device *dev, 578static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
572 unsigned short vid) 579 __be16 proto, u16 vid)
573{ 580{
574 struct macvlan_dev *vlan = netdev_priv(dev); 581 struct macvlan_dev *vlan = netdev_priv(dev);
575 struct net_device *lowerdev = vlan->lowerdev; 582 struct net_device *lowerdev = vlan->lowerdev;
576 583
577 vlan_vid_del(lowerdev, vid); 584 vlan_vid_del(lowerdev, proto, vid);
578 return 0; 585 return 0;
579} 586}
580 587
@@ -703,7 +710,7 @@ static int macvlan_port_create(struct net_device *dev)
703 710
704static void macvlan_port_destroy(struct net_device *dev) 711static void macvlan_port_destroy(struct net_device *dev)
705{ 712{
706 struct macvlan_port *port = macvlan_port_get(dev); 713 struct macvlan_port *port = macvlan_port_get_rtnl(dev);
707 714
708 dev->priv_flags &= ~IFF_MACVLAN_PORT; 715 dev->priv_flags &= ~IFF_MACVLAN_PORT;
709 netdev_rx_handler_unregister(dev); 716 netdev_rx_handler_unregister(dev);
@@ -772,7 +779,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
772 if (err < 0) 779 if (err < 0)
773 return err; 780 return err;
774 } 781 }
775 port = macvlan_port_get(lowerdev); 782 port = macvlan_port_get_rtnl(lowerdev);
776 783
777 /* Only 1 macvlan device can be created in passthru mode */ 784 /* Only 1 macvlan device can be created in passthru mode */
778 if (port->passthru) 785 if (port->passthru)
@@ -921,7 +928,7 @@ static int macvlan_device_event(struct notifier_block *unused,
921 if (!macvlan_port_exists(dev)) 928 if (!macvlan_port_exists(dev))
922 return NOTIFY_DONE; 929 return NOTIFY_DONE;
923 930
924 port = macvlan_port_get(dev); 931 port = macvlan_port_get_rtnl(dev);
925 932
926 switch (event) { 933 switch (event) {
927 case NETDEV_CHANGE: 934 case NETDEV_CHANGE:
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index a449439bd653..59e9605de316 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -725,6 +725,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
725 goto err_kfree; 725 goto err_kfree;
726 } 726 }
727 727
728 skb_probe_transport_header(skb, ETH_HLEN);
729
728 rcu_read_lock_bh(); 730 rcu_read_lock_bh();
729 vlan = rcu_dereference_bh(q->vlan); 731 vlan = rcu_dereference_bh(q->vlan);
730 /* copy skb_ubuf_info for callback when skb has no error */ 732 /* copy skb_ubuf_info for callback when skb has no error */
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index ec40ba882f61..ff2e45e9cb54 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -159,7 +159,7 @@ static int lxt973a2_update_link(struct phy_device *phydev)
159 return 0; 159 return 0;
160} 160}
161 161
162int lxt973a2_read_status(struct phy_device *phydev) 162static int lxt973a2_read_status(struct phy_device *phydev)
163{ 163{
164 int adv; 164 int adv;
165 int err; 165 int err;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 22dec9c7ef05..202fe1ff1987 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -7,6 +7,8 @@
7 * 7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc. 8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 * 9 *
10 * Copyright (c) 2013 Michael Stapelberg <michael@stapelberg.de>
11 *
10 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the 13 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your 14 * Free Software Foundation; either version 2 of the License, or (at your
@@ -80,6 +82,28 @@
80#define MII_88E1318S_PHY_MSCR1_REG 16 82#define MII_88E1318S_PHY_MSCR1_REG 16
81#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6) 83#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
82 84
85/* Copper Specific Interrupt Enable Register */
86#define MII_88E1318S_PHY_CSIER 0x12
87/* WOL Event Interrupt Enable */
88#define MII_88E1318S_PHY_CSIER_WOL_EIE BIT(7)
89
90/* LED Timer Control Register */
91#define MII_88E1318S_PHY_LED_PAGE 0x03
92#define MII_88E1318S_PHY_LED_TCR 0x12
93#define MII_88E1318S_PHY_LED_TCR_FORCE_INT BIT(15)
94#define MII_88E1318S_PHY_LED_TCR_INTn_ENABLE BIT(7)
95#define MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW BIT(11)
96
97/* Magic Packet MAC address registers */
98#define MII_88E1318S_PHY_MAGIC_PACKET_WORD2 0x17
99#define MII_88E1318S_PHY_MAGIC_PACKET_WORD1 0x18
100#define MII_88E1318S_PHY_MAGIC_PACKET_WORD0 0x19
101
102#define MII_88E1318S_PHY_WOL_PAGE 0x11
103#define MII_88E1318S_PHY_WOL_CTRL 0x10
104#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12)
105#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14)
106
83#define MII_88E1121_PHY_LED_CTRL 16 107#define MII_88E1121_PHY_LED_CTRL 16
84#define MII_88E1121_PHY_LED_PAGE 3 108#define MII_88E1121_PHY_LED_PAGE 3
85#define MII_88E1121_PHY_LED_DEF 0x0030 109#define MII_88E1121_PHY_LED_DEF 0x0030
@@ -696,6 +720,107 @@ static int m88e1121_did_interrupt(struct phy_device *phydev)
696 return 0; 720 return 0;
697} 721}
698 722
723static void m88e1318_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
724{
725 wol->supported = WAKE_MAGIC;
726 wol->wolopts = 0;
727
728 if (phy_write(phydev, MII_MARVELL_PHY_PAGE,
729 MII_88E1318S_PHY_WOL_PAGE) < 0)
730 return;
731
732 if (phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL) &
733 MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
734 wol->wolopts |= WAKE_MAGIC;
735
736 if (phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x00) < 0)
737 return;
738}
739
740static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
741{
742 int err, oldpage, temp;
743
744 oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
745
746 if (wol->wolopts & WAKE_MAGIC) {
747 /* Explicitly switch to page 0x00, just to be sure */
748 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x00);
749 if (err < 0)
750 return err;
751
752 /* Enable the WOL interrupt */
753 temp = phy_read(phydev, MII_88E1318S_PHY_CSIER);
754 temp |= MII_88E1318S_PHY_CSIER_WOL_EIE;
755 err = phy_write(phydev, MII_88E1318S_PHY_CSIER, temp);
756 if (err < 0)
757 return err;
758
759 err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
760 MII_88E1318S_PHY_LED_PAGE);
761 if (err < 0)
762 return err;
763
764 /* Setup LED[2] as interrupt pin (active low) */
765 temp = phy_read(phydev, MII_88E1318S_PHY_LED_TCR);
766 temp &= ~MII_88E1318S_PHY_LED_TCR_FORCE_INT;
767 temp |= MII_88E1318S_PHY_LED_TCR_INTn_ENABLE;
768 temp |= MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW;
769 err = phy_write(phydev, MII_88E1318S_PHY_LED_TCR, temp);
770 if (err < 0)
771 return err;
772
773 err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
774 MII_88E1318S_PHY_WOL_PAGE);
775 if (err < 0)
776 return err;
777
778 /* Store the device address for the magic packet */
779 err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2,
780 ((phydev->attached_dev->dev_addr[5] << 8) |
781 phydev->attached_dev->dev_addr[4]));
782 if (err < 0)
783 return err;
784 err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1,
785 ((phydev->attached_dev->dev_addr[3] << 8) |
786 phydev->attached_dev->dev_addr[2]));
787 if (err < 0)
788 return err;
789 err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0,
790 ((phydev->attached_dev->dev_addr[1] << 8) |
791 phydev->attached_dev->dev_addr[0]));
792 if (err < 0)
793 return err;
794
795 /* Clear WOL status and enable magic packet matching */
796 temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
797 temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
798 temp |= MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
799 err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
800 if (err < 0)
801 return err;
802 } else {
803 err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
804 MII_88E1318S_PHY_WOL_PAGE);
805 if (err < 0)
806 return err;
807
808 /* Clear WOL status and disable magic packet matching */
809 temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
810 temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
811 temp &= ~MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
812 err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
813 if (err < 0)
814 return err;
815 }
816
817 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
818 if (err < 0)
819 return err;
820
821 return 0;
822}
823
699static struct phy_driver marvell_drivers[] = { 824static struct phy_driver marvell_drivers[] = {
700 { 825 {
701 .phy_id = MARVELL_PHY_ID_88E1101, 826 .phy_id = MARVELL_PHY_ID_88E1101,
@@ -772,6 +897,8 @@ static struct phy_driver marvell_drivers[] = {
772 .ack_interrupt = &marvell_ack_interrupt, 897 .ack_interrupt = &marvell_ack_interrupt,
773 .config_intr = &marvell_config_intr, 898 .config_intr = &marvell_config_intr,
774 .did_interrupt = &m88e1121_did_interrupt, 899 .did_interrupt = &m88e1121_did_interrupt,
900 .get_wol = &m88e1318_get_wol,
901 .set_wol = &m88e1318_set_wol,
775 .driver = { .owner = THIS_MODULE }, 902 .driver = { .owner = THIS_MODULE },
776 }, 903 },
777 { 904 {
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 27274986ab56..a47f9236d966 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -235,17 +235,7 @@ static struct platform_driver mdio_gpio_driver = {
235 }, 235 },
236}; 236};
237 237
238static int __init mdio_gpio_init(void) 238module_platform_driver(mdio_gpio_driver);
239{
240 return platform_driver_register(&mdio_gpio_driver);
241}
242module_init(mdio_gpio_init);
243
244static void __exit mdio_gpio_exit(void)
245{
246 platform_driver_unregister(&mdio_gpio_driver);
247}
248module_exit(mdio_gpio_exit);
249 239
250MODULE_ALIAS("platform:mdio-gpio"); 240MODULE_ALIAS("platform:mdio-gpio");
251MODULE_AUTHOR("Laurent Pinchart, Paulius Zaleckas"); 241MODULE_AUTHOR("Laurent Pinchart, Paulius Zaleckas");
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index 09297fe05ae5..b51fa1f469b0 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2009,2011 Cavium, Inc. 6 * Copyright (C) 2009-2012 Cavium, Inc.
7 */ 7 */
8 8
9#include <linux/platform_device.h> 9#include <linux/platform_device.h>
@@ -27,30 +27,98 @@
27#define SMI_CLK 0x18 27#define SMI_CLK 0x18
28#define SMI_EN 0x20 28#define SMI_EN 0x20
29 29
30enum octeon_mdiobus_mode {
31 UNINIT = 0,
32 C22,
33 C45
34};
35
30struct octeon_mdiobus { 36struct octeon_mdiobus {
31 struct mii_bus *mii_bus; 37 struct mii_bus *mii_bus;
32 u64 register_base; 38 u64 register_base;
33 resource_size_t mdio_phys; 39 resource_size_t mdio_phys;
34 resource_size_t regsize; 40 resource_size_t regsize;
41 enum octeon_mdiobus_mode mode;
35 int phy_irq[PHY_MAX_ADDR]; 42 int phy_irq[PHY_MAX_ADDR];
36}; 43};
37 44
45static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p,
46 enum octeon_mdiobus_mode m)
47{
48 union cvmx_smix_clk smi_clk;
49
50 if (m == p->mode)
51 return;
52
53 smi_clk.u64 = cvmx_read_csr(p->register_base + SMI_CLK);
54 smi_clk.s.mode = (m == C45) ? 1 : 0;
55 smi_clk.s.preamble = 1;
56 cvmx_write_csr(p->register_base + SMI_CLK, smi_clk.u64);
57 p->mode = m;
58}
59
60static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p,
61 int phy_id, int regnum)
62{
63 union cvmx_smix_cmd smi_cmd;
64 union cvmx_smix_wr_dat smi_wr;
65 int timeout = 1000;
66
67 octeon_mdiobus_set_mode(p, C45);
68
69 smi_wr.u64 = 0;
70 smi_wr.s.dat = regnum & 0xffff;
71 cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
72
73 regnum = (regnum >> 16) & 0x1f;
74
75 smi_cmd.u64 = 0;
76 smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */
77 smi_cmd.s.phy_adr = phy_id;
78 smi_cmd.s.reg_adr = regnum;
79 cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
80
81 do {
82 /* Wait 1000 clocks so we don't saturate the RSL bus
83 * doing reads.
84 */
85 __delay(1000);
86 smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
87 } while (smi_wr.s.pending && --timeout);
88
89 if (timeout <= 0)
90 return -EIO;
91 return 0;
92}
93
38static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum) 94static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
39{ 95{
40 struct octeon_mdiobus *p = bus->priv; 96 struct octeon_mdiobus *p = bus->priv;
41 union cvmx_smix_cmd smi_cmd; 97 union cvmx_smix_cmd smi_cmd;
42 union cvmx_smix_rd_dat smi_rd; 98 union cvmx_smix_rd_dat smi_rd;
99 unsigned int op = 1; /* MDIO_CLAUSE_22_READ */
43 int timeout = 1000; 100 int timeout = 1000;
44 101
102 if (regnum & MII_ADDR_C45) {
103 int r = octeon_mdiobus_c45_addr(p, phy_id, regnum);
104 if (r < 0)
105 return r;
106
107 regnum = (regnum >> 16) & 0x1f;
108 op = 3; /* MDIO_CLAUSE_45_READ */
109 } else {
110 octeon_mdiobus_set_mode(p, C22);
111 }
112
113
45 smi_cmd.u64 = 0; 114 smi_cmd.u64 = 0;
46 smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_22_READ */ 115 smi_cmd.s.phy_op = op;
47 smi_cmd.s.phy_adr = phy_id; 116 smi_cmd.s.phy_adr = phy_id;
48 smi_cmd.s.reg_adr = regnum; 117 smi_cmd.s.reg_adr = regnum;
49 cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64); 118 cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
50 119
51 do { 120 do {
52 /* 121 /* Wait 1000 clocks so we don't saturate the RSL bus
53 * Wait 1000 clocks so we don't saturate the RSL bus
54 * doing reads. 122 * doing reads.
55 */ 123 */
56 __delay(1000); 124 __delay(1000);
@@ -69,21 +137,33 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
69 struct octeon_mdiobus *p = bus->priv; 137 struct octeon_mdiobus *p = bus->priv;
70 union cvmx_smix_cmd smi_cmd; 138 union cvmx_smix_cmd smi_cmd;
71 union cvmx_smix_wr_dat smi_wr; 139 union cvmx_smix_wr_dat smi_wr;
140 unsigned int op = 0; /* MDIO_CLAUSE_22_WRITE */
72 int timeout = 1000; 141 int timeout = 1000;
73 142
143
144 if (regnum & MII_ADDR_C45) {
145 int r = octeon_mdiobus_c45_addr(p, phy_id, regnum);
146 if (r < 0)
147 return r;
148
149 regnum = (regnum >> 16) & 0x1f;
150 op = 1; /* MDIO_CLAUSE_45_WRITE */
151 } else {
152 octeon_mdiobus_set_mode(p, C22);
153 }
154
74 smi_wr.u64 = 0; 155 smi_wr.u64 = 0;
75 smi_wr.s.dat = val; 156 smi_wr.s.dat = val;
76 cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64); 157 cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
77 158
78 smi_cmd.u64 = 0; 159 smi_cmd.u64 = 0;
79 smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_22_WRITE */ 160 smi_cmd.s.phy_op = op;
80 smi_cmd.s.phy_adr = phy_id; 161 smi_cmd.s.phy_adr = phy_id;
81 smi_cmd.s.reg_adr = regnum; 162 smi_cmd.s.reg_adr = regnum;
82 cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64); 163 cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
83 164
84 do { 165 do {
85 /* 166 /* Wait 1000 clocks so we don't saturate the RSL bus
86 * Wait 1000 clocks so we don't saturate the RSL bus
87 * doing reads. 167 * doing reads.
88 */ 168 */
89 __delay(1000); 169 __delay(1000);
@@ -197,18 +277,7 @@ void octeon_mdiobus_force_mod_depencency(void)
197} 277}
198EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency); 278EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency);
199 279
200static int __init octeon_mdiobus_mod_init(void) 280module_platform_driver(octeon_mdiobus_driver);
201{
202 return platform_driver_register(&octeon_mdiobus_driver);
203}
204
205static void __exit octeon_mdiobus_mod_exit(void)
206{
207 platform_driver_unregister(&octeon_mdiobus_driver);
208}
209
210module_init(octeon_mdiobus_mod_init);
211module_exit(octeon_mdiobus_mod_exit);
212 281
213MODULE_DESCRIPTION(DRV_DESCRIPTION); 282MODULE_DESCRIPTION(DRV_DESCRIPTION);
214MODULE_VERSION(DRV_VERSION); 283MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index abf7b6153d00..2510435f34ed 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -53,6 +53,18 @@
53#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14) 53#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
54#define KSZ8051_RMII_50MHZ_CLK (1 << 7) 54#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
55 55
56static int ksz_config_flags(struct phy_device *phydev)
57{
58 int regval;
59
60 if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
61 regval = phy_read(phydev, MII_KSZPHY_CTRL);
62 regval |= KSZ8051_RMII_50MHZ_CLK;
63 return phy_write(phydev, MII_KSZPHY_CTRL, regval);
64 }
65 return 0;
66}
67
56static int kszphy_ack_interrupt(struct phy_device *phydev) 68static int kszphy_ack_interrupt(struct phy_device *phydev)
57{ 69{
58 /* bit[7..0] int status, which is a read and clear register. */ 70 /* bit[7..0] int status, which is a read and clear register. */
@@ -114,22 +126,19 @@ static int kszphy_config_init(struct phy_device *phydev)
114 126
115static int ksz8021_config_init(struct phy_device *phydev) 127static int ksz8021_config_init(struct phy_device *phydev)
116{ 128{
129 int rc;
117 const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE; 130 const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
118 phy_write(phydev, MII_KSZPHY_OMSO, val); 131 phy_write(phydev, MII_KSZPHY_OMSO, val);
119 return 0; 132 rc = ksz_config_flags(phydev);
133 return rc < 0 ? rc : 0;
120} 134}
121 135
122static int ks8051_config_init(struct phy_device *phydev) 136static int ks8051_config_init(struct phy_device *phydev)
123{ 137{
124 int regval; 138 int rc;
125
126 if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
127 regval = phy_read(phydev, MII_KSZPHY_CTRL);
128 regval |= KSZ8051_RMII_50MHZ_CLK;
129 phy_write(phydev, MII_KSZPHY_CTRL, regval);
130 }
131 139
132 return 0; 140 rc = ksz_config_flags(phydev);
141 return rc < 0 ? rc : 0;
133} 142}
134 143
135#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 144#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
@@ -192,6 +201,19 @@ static struct phy_driver ksphy_driver[] = {
192 .config_intr = kszphy_config_intr, 201 .config_intr = kszphy_config_intr,
193 .driver = { .owner = THIS_MODULE,}, 202 .driver = { .owner = THIS_MODULE,},
194}, { 203}, {
204 .phy_id = PHY_ID_KSZ8031,
205 .phy_id_mask = 0x00ffffff,
206 .name = "Micrel KSZ8031",
207 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
208 SUPPORTED_Asym_Pause),
209 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
210 .config_init = ksz8021_config_init,
211 .config_aneg = genphy_config_aneg,
212 .read_status = genphy_read_status,
213 .ack_interrupt = kszphy_ack_interrupt,
214 .config_intr = kszphy_config_intr,
215 .driver = { .owner = THIS_MODULE,},
216}, {
195 .phy_id = PHY_ID_KSZ8041, 217 .phy_id = PHY_ID_KSZ8041,
196 .phy_id_mask = 0x00fffff0, 218 .phy_id_mask = 0x00fffff0,
197 .name = "Micrel KSZ8041", 219 .name = "Micrel KSZ8041",
@@ -325,6 +347,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
325 { PHY_ID_KSZ8001, 0x00ffffff }, 347 { PHY_ID_KSZ8001, 0x00ffffff },
326 { PHY_ID_KS8737, 0x00fffff0 }, 348 { PHY_ID_KS8737, 0x00fffff0 },
327 { PHY_ID_KSZ8021, 0x00ffffff }, 349 { PHY_ID_KSZ8021, 0x00ffffff },
350 { PHY_ID_KSZ8031, 0x00ffffff },
328 { PHY_ID_KSZ8041, 0x00fffff0 }, 351 { PHY_ID_KSZ8041, 0x00fffff0 },
329 { PHY_ID_KSZ8051, 0x00fffff0 }, 352 { PHY_ID_KSZ8051, 0x00fffff0 },
330 { PHY_ID_KSZ8061, 0x00fffff0 }, 353 { PHY_ID_KSZ8061, 0x00fffff0 },
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index ef9ea9248223..c14f14741b3f 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -463,33 +463,6 @@ void phy_stop_machine(struct phy_device *phydev)
463} 463}
464 464
465/** 465/**
466 * phy_force_reduction - reduce PHY speed/duplex settings by one step
467 * @phydev: target phy_device struct
468 *
469 * Description: Reduces the speed/duplex settings by one notch,
470 * in this order--
471 * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
472 * The function bottoms out at 10/HALF.
473 */
474static void phy_force_reduction(struct phy_device *phydev)
475{
476 int idx;
477
478 idx = phy_find_setting(phydev->speed, phydev->duplex);
479
480 idx++;
481
482 idx = phy_find_valid(idx, phydev->supported);
483
484 phydev->speed = settings[idx].speed;
485 phydev->duplex = settings[idx].duplex;
486
487 pr_info("Trying %d/%s\n",
488 phydev->speed, DUPLEX_FULL == phydev->duplex ? "FULL" : "HALF");
489}
490
491
492/**
493 * phy_error - enter HALTED state for this PHY device 466 * phy_error - enter HALTED state for this PHY device
494 * @phydev: target phy_device struct 467 * @phydev: target phy_device struct
495 * 468 *
@@ -818,30 +791,11 @@ void phy_state_machine(struct work_struct *work)
818 phydev->adjust_link(phydev->attached_dev); 791 phydev->adjust_link(phydev->attached_dev);
819 792
820 } else if (0 == phydev->link_timeout--) { 793 } else if (0 == phydev->link_timeout--) {
821 int idx;
822
823 needs_aneg = 1; 794 needs_aneg = 1;
824 /* If we have the magic_aneg bit, 795 /* If we have the magic_aneg bit,
825 * we try again */ 796 * we try again */
826 if (phydev->drv->flags & PHY_HAS_MAGICANEG) 797 if (phydev->drv->flags & PHY_HAS_MAGICANEG)
827 break; 798 break;
828
829 /* The timer expired, and we still
830 * don't have a setting, so we try
831 * forcing it until we find one that
832 * works, starting from the fastest speed,
833 * and working our way down */
834 idx = phy_find_valid(0, phydev->supported);
835
836 phydev->speed = settings[idx].speed;
837 phydev->duplex = settings[idx].duplex;
838
839 phydev->autoneg = AUTONEG_DISABLE;
840
841 pr_info("Trying %d/%s\n",
842 phydev->speed,
843 DUPLEX_FULL == phydev->duplex ?
844 "FULL" : "HALF");
845 } 799 }
846 break; 800 break;
847 case PHY_NOLINK: 801 case PHY_NOLINK:
@@ -866,10 +820,8 @@ void phy_state_machine(struct work_struct *work)
866 phydev->state = PHY_RUNNING; 820 phydev->state = PHY_RUNNING;
867 netif_carrier_on(phydev->attached_dev); 821 netif_carrier_on(phydev->attached_dev);
868 } else { 822 } else {
869 if (0 == phydev->link_timeout--) { 823 if (0 == phydev->link_timeout--)
870 phy_force_reduction(phydev);
871 needs_aneg = 1; 824 needs_aneg = 1;
872 }
873 } 825 }
874 826
875 phydev->adjust_link(phydev->attached_dev); 827 phydev->adjust_link(phydev->attached_dev);
@@ -1188,3 +1140,19 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1188 return 0; 1140 return 0;
1189} 1141}
1190EXPORT_SYMBOL(phy_ethtool_set_eee); 1142EXPORT_SYMBOL(phy_ethtool_set_eee);
1143
1144int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1145{
1146 if (phydev->drv->set_wol)
1147 return phydev->drv->set_wol(phydev, wol);
1148
1149 return -EOPNOTSUPP;
1150}
1151EXPORT_SYMBOL(phy_ethtool_set_wol);
1152
1153void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1154{
1155 if (phydev->drv->get_wol)
1156 phydev->drv->get_wol(phydev, wol);
1157}
1158EXPORT_SYMBOL(phy_ethtool_get_wol);
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 5c87eef40bf9..d11c93e69e03 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -281,7 +281,7 @@ static int ks8995_probe(struct spi_device *spi)
281 mutex_init(&ks->lock); 281 mutex_init(&ks->lock);
282 ks->pdata = pdata; 282 ks->pdata = pdata;
283 ks->spi = spi_dev_get(spi); 283 ks->spi = spi_dev_get(spi);
284 dev_set_drvdata(&spi->dev, ks); 284 spi_set_drvdata(spi, ks);
285 285
286 spi->mode = SPI_MODE_0; 286 spi->mode = SPI_MODE_0;
287 spi->bits_per_word = 8; 287 spi->bits_per_word = 8;
@@ -325,7 +325,7 @@ static int ks8995_probe(struct spi_device *spi)
325 return 0; 325 return 0;
326 326
327err_drvdata: 327err_drvdata:
328 dev_set_drvdata(&spi->dev, NULL); 328 spi_set_drvdata(spi, NULL);
329 kfree(ks); 329 kfree(ks);
330 return err; 330 return err;
331} 331}
@@ -334,10 +334,10 @@ static int ks8995_remove(struct spi_device *spi)
334{ 334{
335 struct ks8995_data *ks8995; 335 struct ks8995_data *ks8995;
336 336
337 ks8995 = dev_get_drvdata(&spi->dev); 337 ks8995 = spi_get_drvdata(spi);
338 sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr); 338 sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
339 339
340 dev_set_drvdata(&spi->dev, NULL); 340 spi_set_drvdata(spi, NULL);
341 kfree(ks8995); 341 kfree(ks8995);
342 342
343 return 0; 343 return 0;
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 2585c383e623..3492b5391273 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -61,7 +61,7 @@ MODULE_DESCRIPTION("Vitesse PHY driver");
61MODULE_AUTHOR("Kriston Carson"); 61MODULE_AUTHOR("Kriston Carson");
62MODULE_LICENSE("GPL"); 62MODULE_LICENSE("GPL");
63 63
64int vsc824x_add_skew(struct phy_device *phydev) 64static int vsc824x_add_skew(struct phy_device *phydev)
65{ 65{
66 int err; 66 int err;
67 int extcon; 67 int extcon;
@@ -81,7 +81,6 @@ int vsc824x_add_skew(struct phy_device *phydev)
81 81
82 return err; 82 return err;
83} 83}
84EXPORT_SYMBOL(vsc824x_add_skew);
85 84
86static int vsc824x_config_init(struct phy_device *phydev) 85static int vsc824x_config_init(struct phy_device *phydev)
87{ 86{
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index bed62d9c53c8..1f7bef90b467 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -560,7 +560,7 @@ static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
560 * so don't forget to remove it. 560 * so don't forget to remove it.
561 */ 561 */
562 562
563 if (ntohs(eth->h_proto) >= 1536) 563 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
564 return eth->h_proto; 564 return eth->h_proto;
565 565
566 rawp = skb->data; 566 rawp = skb->data;
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index bdf3b13a71a8..925d3e295bac 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -105,64 +105,15 @@ static const struct ppp_channel_ops sync_ops = {
105}; 105};
106 106
107/* 107/*
108 * Utility procedures to print a buffer in hex/ascii 108 * Utility procedure to print a buffer in hex/ascii
109 */ 109 */
110static void 110static void
111ppp_print_hex (register __u8 * out, const __u8 * in, int count)
112{
113 register __u8 next_ch;
114 static const char hex[] = "0123456789ABCDEF";
115
116 while (count-- > 0) {
117 next_ch = *in++;
118 *out++ = hex[(next_ch >> 4) & 0x0F];
119 *out++ = hex[next_ch & 0x0F];
120 ++out;
121 }
122}
123
124static void
125ppp_print_char (register __u8 * out, const __u8 * in, int count)
126{
127 register __u8 next_ch;
128
129 while (count-- > 0) {
130 next_ch = *in++;
131
132 if (next_ch < 0x20 || next_ch > 0x7e)
133 *out++ = '.';
134 else {
135 *out++ = next_ch;
136 if (next_ch == '%') /* printk/syslogd has a bug !! */
137 *out++ = '%';
138 }
139 }
140 *out = '\0';
141}
142
143static void
144ppp_print_buffer (const char *name, const __u8 *buf, int count) 111ppp_print_buffer (const char *name, const __u8 *buf, int count)
145{ 112{
146 __u8 line[44];
147
148 if (name != NULL) 113 if (name != NULL)
149 printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count); 114 printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);
150 115
151 while (count > 8) { 116 print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, count);
152 memset (line, 32, 44);
153 ppp_print_hex (line, buf, 8);
154 ppp_print_char (&line[8 * 3], buf, 8);
155 printk(KERN_DEBUG "%s\n", line);
156 count -= 8;
157 buf += 8;
158 }
159
160 if (count > 0) {
161 memset (line, 32, 44);
162 ppp_print_hex (line, buf, count);
163 ppp_print_char (&line[8 * 3], buf, count);
164 printk(KERN_DEBUG "%s\n", line);
165 }
166} 117}
167 118
168 119
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index c3011af68e91..c853d84fd99f 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -37,6 +37,18 @@ config NET_TEAM_MODE_ROUNDROBIN
37 To compile this team mode as a module, choose M here: the module 37 To compile this team mode as a module, choose M here: the module
38 will be called team_mode_roundrobin. 38 will be called team_mode_roundrobin.
39 39
40config NET_TEAM_MODE_RANDOM
41 tristate "Random mode support"
42 depends on NET_TEAM
43 ---help---
44 Basic mode where port used for transmitting packets is selected
45 randomly.
46
47 All added ports are setup to have team's device address.
48
49 To compile this team mode as a module, choose M here: the module
50 will be called team_mode_random.
51
40config NET_TEAM_MODE_ACTIVEBACKUP 52config NET_TEAM_MODE_ACTIVEBACKUP
41 tristate "Active-backup mode support" 53 tristate "Active-backup mode support"
42 depends on NET_TEAM 54 depends on NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index 975763014e5a..c57e85889751 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -5,5 +5,6 @@
5obj-$(CONFIG_NET_TEAM) += team.o 5obj-$(CONFIG_NET_TEAM) += team.o
6obj-$(CONFIG_NET_TEAM_MODE_BROADCAST) += team_mode_broadcast.o 6obj-$(CONFIG_NET_TEAM_MODE_BROADCAST) += team_mode_broadcast.o
7obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o 7obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
8obj-$(CONFIG_NET_TEAM_MODE_RANDOM) += team_mode_random.o
8obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o 9obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
9obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o 10obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index bf3419297875..7c43261975bd 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -73,11 +73,24 @@ static int team_port_set_orig_dev_addr(struct team_port *port)
73 return __set_port_dev_addr(port->dev, port->orig.dev_addr); 73 return __set_port_dev_addr(port->dev, port->orig.dev_addr);
74} 74}
75 75
76int team_port_set_team_dev_addr(struct team_port *port) 76static int team_port_set_team_dev_addr(struct team *team,
77 struct team_port *port)
78{
79 return __set_port_dev_addr(port->dev, team->dev->dev_addr);
80}
81
82int team_modeop_port_enter(struct team *team, struct team_port *port)
83{
84 return team_port_set_team_dev_addr(team, port);
85}
86EXPORT_SYMBOL(team_modeop_port_enter);
87
88void team_modeop_port_change_dev_addr(struct team *team,
89 struct team_port *port)
77{ 90{
78 return __set_port_dev_addr(port->dev, port->team->dev->dev_addr); 91 team_port_set_team_dev_addr(team, port);
79} 92}
80EXPORT_SYMBOL(team_port_set_team_dev_addr); 93EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
81 94
82static void team_refresh_port_linkup(struct team_port *port) 95static void team_refresh_port_linkup(struct team_port *port)
83{ 96{
@@ -490,9 +503,9 @@ static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
490 return false; 503 return false;
491} 504}
492 505
493rx_handler_result_t team_dummy_receive(struct team *team, 506static rx_handler_result_t team_dummy_receive(struct team *team,
494 struct team_port *port, 507 struct team_port *port,
495 struct sk_buff *skb) 508 struct sk_buff *skb)
496{ 509{
497 return RX_HANDLER_ANOTHER; 510 return RX_HANDLER_ANOTHER;
498} 511}
@@ -1491,8 +1504,8 @@ static void team_set_rx_mode(struct net_device *dev)
1491 1504
1492 rcu_read_lock(); 1505 rcu_read_lock();
1493 list_for_each_entry_rcu(port, &team->port_list, list) { 1506 list_for_each_entry_rcu(port, &team->port_list, list) {
1494 dev_uc_sync(port->dev, dev); 1507 dev_uc_sync_multiple(port->dev, dev);
1495 dev_mc_sync(port->dev, dev); 1508 dev_mc_sync_multiple(port->dev, dev);
1496 } 1509 }
1497 rcu_read_unlock(); 1510 rcu_read_unlock();
1498} 1511}
@@ -1585,7 +1598,7 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1585 return stats; 1598 return stats;
1586} 1599}
1587 1600
1588static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid) 1601static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1589{ 1602{
1590 struct team *team = netdev_priv(dev); 1603 struct team *team = netdev_priv(dev);
1591 struct team_port *port; 1604 struct team_port *port;
@@ -1597,7 +1610,7 @@ static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
1597 */ 1610 */
1598 mutex_lock(&team->lock); 1611 mutex_lock(&team->lock);
1599 list_for_each_entry(port, &team->port_list, list) { 1612 list_for_each_entry(port, &team->port_list, list) {
1600 err = vlan_vid_add(port->dev, vid); 1613 err = vlan_vid_add(port->dev, proto, vid);
1601 if (err) 1614 if (err)
1602 goto unwind; 1615 goto unwind;
1603 } 1616 }
@@ -1607,20 +1620,20 @@ static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
1607 1620
1608unwind: 1621unwind:
1609 list_for_each_entry_continue_reverse(port, &team->port_list, list) 1622 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1610 vlan_vid_del(port->dev, vid); 1623 vlan_vid_del(port->dev, proto, vid);
1611 mutex_unlock(&team->lock); 1624 mutex_unlock(&team->lock);
1612 1625
1613 return err; 1626 return err;
1614} 1627}
1615 1628
1616static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) 1629static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1617{ 1630{
1618 struct team *team = netdev_priv(dev); 1631 struct team *team = netdev_priv(dev);
1619 struct team_port *port; 1632 struct team_port *port;
1620 1633
1621 rcu_read_lock(); 1634 rcu_read_lock();
1622 list_for_each_entry_rcu(port, &team->port_list, list) 1635 list_for_each_entry_rcu(port, &team->port_list, list)
1623 vlan_vid_del(port->dev, vid); 1636 vlan_vid_del(port->dev, proto, vid);
1624 rcu_read_unlock(); 1637 rcu_read_unlock();
1625 1638
1626 return 0; 1639 return 0;
@@ -1828,9 +1841,9 @@ static void team_setup(struct net_device *dev)
1828 dev->features |= NETIF_F_LLTX; 1841 dev->features |= NETIF_F_LLTX;
1829 dev->features |= NETIF_F_GRO; 1842 dev->features |= NETIF_F_GRO;
1830 dev->hw_features = TEAM_VLAN_FEATURES | 1843 dev->hw_features = TEAM_VLAN_FEATURES |
1831 NETIF_F_HW_VLAN_TX | 1844 NETIF_F_HW_VLAN_CTAG_TX |
1832 NETIF_F_HW_VLAN_RX | 1845 NETIF_F_HW_VLAN_CTAG_RX |
1833 NETIF_F_HW_VLAN_FILTER; 1846 NETIF_F_HW_VLAN_CTAG_FILTER;
1834 1847
1835 dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); 1848 dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
1836 dev->features |= dev->hw_features; 1849 dev->features |= dev->hw_features;
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
index c5db428e73fa..c366cd299c06 100644
--- a/drivers/net/team/team_mode_broadcast.c
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -46,20 +46,10 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
46 return sum_ret; 46 return sum_ret;
47} 47}
48 48
49static int bc_port_enter(struct team *team, struct team_port *port)
50{
51 return team_port_set_team_dev_addr(port);
52}
53
54static void bc_port_change_dev_addr(struct team *team, struct team_port *port)
55{
56 team_port_set_team_dev_addr(port);
57}
58
59static const struct team_mode_ops bc_mode_ops = { 49static const struct team_mode_ops bc_mode_ops = {
60 .transmit = bc_transmit, 50 .transmit = bc_transmit,
61 .port_enter = bc_port_enter, 51 .port_enter = team_modeop_port_enter,
62 .port_change_dev_addr = bc_port_change_dev_addr, 52 .port_change_dev_addr = team_modeop_port_change_dev_addr,
63}; 53};
64 54
65static const struct team_mode bc_mode = { 55static const struct team_mode bc_mode = {
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
new file mode 100644
index 000000000000..9eabfaa22f3e
--- /dev/null
+++ b/drivers/net/team/team_mode_random.c
@@ -0,0 +1,71 @@
1/*
2 * drivers/net/team/team_mode_random.c - Random mode for team
3 * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/skbuff.h>
16#include <linux/reciprocal_div.h>
17#include <linux/if_team.h>
18
19static u32 random_N(unsigned int N)
20{
21 return reciprocal_divide(random32(), N);
22}
23
24static bool rnd_transmit(struct team *team, struct sk_buff *skb)
25{
26 struct team_port *port;
27 int port_index;
28
29 port_index = random_N(team->en_port_count);
30 port = team_get_port_by_index_rcu(team, port_index);
31 port = team_get_first_port_txable_rcu(team, port);
32 if (unlikely(!port))
33 goto drop;
34 if (team_dev_queue_xmit(team, port, skb))
35 return false;
36 return true;
37
38drop:
39 dev_kfree_skb_any(skb);
40 return false;
41}
42
43static const struct team_mode_ops rnd_mode_ops = {
44 .transmit = rnd_transmit,
45 .port_enter = team_modeop_port_enter,
46 .port_change_dev_addr = team_modeop_port_change_dev_addr,
47};
48
49static const struct team_mode rnd_mode = {
50 .kind = "random",
51 .owner = THIS_MODULE,
52 .ops = &rnd_mode_ops,
53};
54
55static int __init rnd_init_module(void)
56{
57 return team_mode_register(&rnd_mode);
58}
59
60static void __exit rnd_cleanup_module(void)
61{
62 team_mode_unregister(&rnd_mode);
63}
64
65module_init(rnd_init_module);
66module_exit(rnd_cleanup_module);
67
68MODULE_LICENSE("GPL v2");
69MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
70MODULE_DESCRIPTION("Random mode for team");
71MODULE_ALIAS("team-mode-random");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 105135aa8f05..d268e4de781b 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -25,26 +25,6 @@ static struct rr_priv *rr_priv(struct team *team)
25 return (struct rr_priv *) &team->mode_priv; 25 return (struct rr_priv *) &team->mode_priv;
26} 26}
27 27
28static struct team_port *__get_first_port_up(struct team *team,
29 struct team_port *port)
30{
31 struct team_port *cur;
32
33 if (team_port_txable(port))
34 return port;
35 cur = port;
36 list_for_each_entry_continue_rcu(cur, &team->port_list, list)
37 if (team_port_txable(port))
38 return cur;
39 list_for_each_entry_rcu(cur, &team->port_list, list) {
40 if (cur == port)
41 break;
42 if (team_port_txable(port))
43 return cur;
44 }
45 return NULL;
46}
47
48static bool rr_transmit(struct team *team, struct sk_buff *skb) 28static bool rr_transmit(struct team *team, struct sk_buff *skb)
49{ 29{
50 struct team_port *port; 30 struct team_port *port;
@@ -52,7 +32,7 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
52 32
53 port_index = rr_priv(team)->sent_packets++ % team->en_port_count; 33 port_index = rr_priv(team)->sent_packets++ % team->en_port_count;
54 port = team_get_port_by_index_rcu(team, port_index); 34 port = team_get_port_by_index_rcu(team, port_index);
55 port = __get_first_port_up(team, port); 35 port = team_get_first_port_txable_rcu(team, port);
56 if (unlikely(!port)) 36 if (unlikely(!port))
57 goto drop; 37 goto drop;
58 if (team_dev_queue_xmit(team, port, skb)) 38 if (team_dev_queue_xmit(team, port, skb))
@@ -64,20 +44,10 @@ drop:
64 return false; 44 return false;
65} 45}
66 46
67static int rr_port_enter(struct team *team, struct team_port *port)
68{
69 return team_port_set_team_dev_addr(port);
70}
71
72static void rr_port_change_dev_addr(struct team *team, struct team_port *port)
73{
74 team_port_set_team_dev_addr(port);
75}
76
77static const struct team_mode_ops rr_mode_ops = { 47static const struct team_mode_ops rr_mode_ops = {
78 .transmit = rr_transmit, 48 .transmit = rr_transmit,
79 .port_enter = rr_port_enter, 49 .port_enter = team_modeop_port_enter,
80 .port_change_dev_addr = rr_port_change_dev_addr, 50 .port_change_dev_addr = team_modeop_port_change_dev_addr,
81}; 51};
82 52
83static const struct team_mode rr_mode = { 53static const struct team_mode rr_mode = {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 729ed533bb33..f042b0373e5d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -409,14 +409,12 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
409{ 409{
410 struct tun_file *ntfile; 410 struct tun_file *ntfile;
411 struct tun_struct *tun; 411 struct tun_struct *tun;
412 struct net_device *dev;
413 412
414 tun = rtnl_dereference(tfile->tun); 413 tun = rtnl_dereference(tfile->tun);
415 414
416 if (tun && !tfile->detached) { 415 if (tun && !tfile->detached) {
417 u16 index = tfile->queue_index; 416 u16 index = tfile->queue_index;
418 BUG_ON(index >= tun->numqueues); 417 BUG_ON(index >= tun->numqueues);
419 dev = tun->dev;
420 418
421 rcu_assign_pointer(tun->tfiles[index], 419 rcu_assign_pointer(tun->tfiles[index],
422 tun->tfiles[tun->numqueues - 1]); 420 tun->tfiles[tun->numqueues - 1]);
@@ -1205,6 +1203,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1205 } 1203 }
1206 1204
1207 skb_reset_network_header(skb); 1205 skb_reset_network_header(skb);
1206 skb_probe_transport_header(skb, 0);
1207
1208 rxhash = skb_get_rxhash(skb); 1208 rxhash = skb_get_rxhash(skb);
1209 netif_rx_ni(skb); 1209 netif_rx_ni(skb);
1210 1210
@@ -1471,14 +1471,17 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
1471 if (!tun) 1471 if (!tun)
1472 return -EBADFD; 1472 return -EBADFD;
1473 1473
1474 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) 1474 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
1475 return -EINVAL; 1475 ret = -EINVAL;
1476 goto out;
1477 }
1476 ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len, 1478 ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
1477 flags & MSG_DONTWAIT); 1479 flags & MSG_DONTWAIT);
1478 if (ret > total_len) { 1480 if (ret > total_len) {
1479 m->msg_flags |= MSG_TRUNC; 1481 m->msg_flags |= MSG_TRUNC;
1480 ret = flags & MSG_TRUNC ? ret : total_len; 1482 ret = flags & MSG_TRUNC ? ret : total_len;
1481 } 1483 }
1484out:
1482 tun_put(tun); 1485 tun_put(tun);
1483 return ret; 1486 return ret;
1484} 1487}
@@ -1593,8 +1596,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1593 return err; 1596 return err;
1594 1597
1595 if (tun->flags & TUN_TAP_MQ && 1598 if (tun->flags & TUN_TAP_MQ &&
1596 (tun->numqueues + tun->numdisabled > 1)) 1599 (tun->numqueues + tun->numdisabled > 1)) {
1597 return -EBUSY; 1600 /* One or more queue has already been attached, no need
1601 * to initialize the device again.
1602 */
1603 return 0;
1604 }
1598 } 1605 }
1599 else { 1606 else {
1600 char *name; 1607 char *name;
@@ -1656,6 +1663,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1656 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 1663 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1657 TUN_USER_FEATURES; 1664 TUN_USER_FEATURES;
1658 dev->features = dev->hw_features; 1665 dev->features = dev->hw_features;
1666 dev->vlan_features = dev->features;
1659 1667
1660 INIT_LIST_HEAD(&tun->disabled); 1668 INIT_LIST_HEAD(&tun->disabled);
1661 err = tun_attach(tun, file); 1669 err = tun_attach(tun, file);
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 709753469099..ad5d1e4384db 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -55,11 +55,7 @@ static void asix_status(struct usbnet *dev, struct urb *urb)
55 event = urb->transfer_buffer; 55 event = urb->transfer_buffer;
56 link = event->link & 0x01; 56 link = event->link & 0x01;
57 if (netif_carrier_ok(dev->net) != link) { 57 if (netif_carrier_ok(dev->net) != link) {
58 if (link) { 58 usbnet_link_change(dev, link, 1);
59 netif_carrier_on(dev->net);
60 usbnet_defer_kevent (dev, EVENT_LINK_RESET );
61 } else
62 netif_carrier_off(dev->net);
63 netdev_dbg(dev->net, "Link Status is: %d\n", link); 59 netdev_dbg(dev->net, "Link Status is: %d\n", link);
64 } 60 }
65} 61}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 71c27d8d214f..bd8758fa38c1 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -352,11 +352,7 @@ static void ax88179_status(struct usbnet *dev, struct urb *urb)
352 link = (((__force u32)event->intdata1) & AX_INT_PPLS_LINK) >> 16; 352 link = (((__force u32)event->intdata1) & AX_INT_PPLS_LINK) >> 16;
353 353
354 if (netif_carrier_ok(dev->net) != link) { 354 if (netif_carrier_ok(dev->net) != link) {
355 if (link) 355 usbnet_link_change(dev, link, 1);
356 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
357 else
358 netif_carrier_off(dev->net);
359
360 netdev_info(dev->net, "ax88179 - Link status is: %d\n", link); 356 netdev_info(dev->net, "ax88179 - Link status is: %d\n", link);
361 } 357 }
362} 358}
@@ -455,7 +451,7 @@ static int ax88179_resume(struct usb_interface *intf)
455 u16 tmp16; 451 u16 tmp16;
456 u8 tmp8; 452 u8 tmp8;
457 453
458 netif_carrier_off(dev->net); 454 usbnet_link_change(dev, 0, 0);
459 455
460 /* Power up ethernet PHY */ 456 /* Power up ethernet PHY */
461 tmp16 = 0; 457 tmp16 = 0;
@@ -1068,7 +1064,7 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
1068 /* Restart autoneg */ 1064 /* Restart autoneg */
1069 mii_nway_restart(&dev->mii); 1065 mii_nway_restart(&dev->mii);
1070 1066
1071 netif_carrier_off(dev->net); 1067 usbnet_link_change(dev, 0, 0);
1072 1068
1073 return 0; 1069 return 0;
1074} 1070}
@@ -1356,7 +1352,7 @@ static int ax88179_reset(struct usbnet *dev)
1356 /* Restart autoneg */ 1352 /* Restart autoneg */
1357 mii_nway_restart(&dev->mii); 1353 mii_nway_restart(&dev->mii);
1358 1354
1359 netif_carrier_off(dev->net); 1355 usbnet_link_change(dev, 0, 0);
1360 1356
1361 return 0; 1357 return 0;
1362} 1358}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 57136dc1b887..4ff71d619cd8 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -406,10 +406,7 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
406 case USB_CDC_NOTIFY_NETWORK_CONNECTION: 406 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
407 netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n", 407 netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
408 event->wValue ? "on" : "off"); 408 event->wValue ? "on" : "off");
409 if (event->wValue) 409 usbnet_link_change(dev, !!event->wValue, 0);
410 netif_carrier_on(dev->net);
411 else
412 netif_carrier_off(dev->net);
413 break; 410 break;
414 case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */ 411 case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
415 netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n", 412 netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n",
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 32a76059e7da..872819851aef 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -101,7 +101,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
101 dev->net->flags |= IFF_NOARP; 101 dev->net->flags |= IFF_NOARP;
102 102
103 /* no need to put the VLAN tci in the packet headers */ 103 /* no need to put the VLAN tci in the packet headers */
104 dev->net->features |= NETIF_F_HW_VLAN_TX; 104 dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX;
105err: 105err:
106 return ret; 106 return ret;
107} 107}
@@ -221,7 +221,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
221 221
222 /* map MBIM session to VLAN */ 222 /* map MBIM session to VLAN */
223 if (tci) 223 if (tci)
224 vlan_put_tag(skb, tci); 224 vlan_put_tag(skb, htons(ETH_P_8021Q), tci);
225err: 225err:
226 return skb; 226 return skb;
227} 227}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 4709fa3497cf..43afde8f48d2 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -362,8 +362,8 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
362 u8 iface_no; 362 u8 iface_no;
363 363
364 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 364 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
365 if (ctx == NULL) 365 if (!ctx)
366 return -ENODEV; 366 return -ENOMEM;
367 367
368 hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 368 hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
369 ctx->tx_timer.function = &cdc_ncm_tx_timer_cb; 369 ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
@@ -610,7 +610,7 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
610 * (carrier is OFF) during attach, so the IP network stack does not 610 * (carrier is OFF) during attach, so the IP network stack does not
611 * start IPv6 negotiation and more. 611 * start IPv6 negotiation and more.
612 */ 612 */
613 netif_carrier_off(dev->net); 613 usbnet_link_change(dev, 0, 0);
614 return ret; 614 return ret;
615} 615}
616 616
@@ -1106,12 +1106,9 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1106 " %sconnected\n", 1106 " %sconnected\n",
1107 ctx->netdev->name, ctx->connected ? "" : "dis"); 1107 ctx->netdev->name, ctx->connected ? "" : "dis");
1108 1108
1109 if (ctx->connected) 1109 usbnet_link_change(dev, ctx->connected, 0);
1110 netif_carrier_on(dev->net); 1110 if (!ctx->connected)
1111 else {
1112 netif_carrier_off(dev->net);
1113 ctx->tx_speed = ctx->rx_speed = 0; 1111 ctx->tx_speed = ctx->rx_speed = 0;
1114 }
1115 break; 1112 break;
1116 1113
1117 case USB_CDC_NOTIFY_SPEED_CHANGE: 1114 case USB_CDC_NOTIFY_SPEED_CHANGE:
@@ -1124,8 +1121,9 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1124 break; 1121 break;
1125 1122
1126 default: 1123 default:
1127 dev_err(&dev->udev->dev, "NCM: unexpected " 1124 dev_dbg(&dev->udev->dev,
1128 "notification 0x%02x!\n", event->bNotificationType); 1125 "NCM: unexpected notification 0x%02x!\n",
1126 event->bNotificationType);
1129 break; 1127 break;
1130 } 1128 }
1131} 1129}
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 174e5ecea4cc..2dbb9460349d 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -524,12 +524,7 @@ static void dm9601_status(struct usbnet *dev, struct urb *urb)
524 524
525 link = !!(buf[0] & 0x40); 525 link = !!(buf[0] & 0x40);
526 if (netif_carrier_ok(dev->net) != link) { 526 if (netif_carrier_ok(dev->net) != link) {
527 if (link) { 527 usbnet_link_change(dev, link, 1);
528 netif_carrier_on(dev->net);
529 usbnet_defer_kevent (dev, EVENT_LINK_RESET);
530 }
531 else
532 netif_carrier_off(dev->net);
533 netdev_dbg(dev->net, "Link Status is: %d\n", link); 528 netdev_dbg(dev->net, "Link Status is: %d\n", link);
534 } 529 }
535} 530}
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 3f3f566afa0b..03832d3780aa 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -576,11 +576,7 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb)
576 */ 576 */
577 if (data->link_counter > 20) { 577 if (data->link_counter > 20) {
578 data->link_counter = 0; 578 data->link_counter = 0;
579 if (link) { 579 usbnet_link_change(dev, link, 0);
580 netif_carrier_on(dev->net);
581 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
582 } else
583 netif_carrier_off(dev->net);
584 netdev_dbg(dev->net, "Link Status is: %d\n", link); 580 netdev_dbg(dev->net, "Link Status is: %d\n", link);
585 } 581 }
586 } else 582 } else
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 73051d10ead2..09699054b54f 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 1999-2005 Petko Manolov (petkan@users.sourceforge.net) 2 * Copyright (c) 1999-2013 Petko Manolov (petkan@nucleusys.com)
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
@@ -26,6 +26,9 @@
26 * v0.5.1 ethtool support added 26 * v0.5.1 ethtool support added
27 * v0.5.5 rx socket buffers are in a pool and the their allocation 27 * v0.5.5 rx socket buffers are in a pool and the their allocation
28 * is out of the interrupt routine. 28 * is out of the interrupt routine.
29 * ...
30 * v0.9.3 simplified [get|set]_register(s), async update registers
31 * logic revisited, receive skb_pool removed.
29 */ 32 */
30 33
31#include <linux/sched.h> 34#include <linux/sched.h>
@@ -45,8 +48,8 @@
45/* 48/*
46 * Version Information 49 * Version Information
47 */ 50 */
48#define DRIVER_VERSION "v0.6.14 (2006/09/27)" 51#define DRIVER_VERSION "v0.9.3 (2013/04/25)"
49#define DRIVER_AUTHOR "Petko Manolov <petkan@users.sourceforge.net>" 52#define DRIVER_AUTHOR "Petko Manolov <petkan@nucleusys.com>"
50#define DRIVER_DESC "Pegasus/Pegasus II USB Ethernet driver" 53#define DRIVER_DESC "Pegasus/Pegasus II USB Ethernet driver"
51 54
52static const char driver_name[] = "pegasus"; 55static const char driver_name[] = "pegasus";
@@ -108,251 +111,137 @@ MODULE_PARM_DESC(msg_level, "Override default message level");
108MODULE_DEVICE_TABLE(usb, pegasus_ids); 111MODULE_DEVICE_TABLE(usb, pegasus_ids);
109static const struct net_device_ops pegasus_netdev_ops; 112static const struct net_device_ops pegasus_netdev_ops;
110 113
111static int update_eth_regs_async(pegasus_t *); 114/*****/
112/* Aargh!!! I _really_ hate such tweaks */ 115
113static void ctrl_callback(struct urb *urb) 116static void async_ctrl_callback(struct urb *urb)
114{ 117{
115 pegasus_t *pegasus = urb->context; 118 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
116 int status = urb->status; 119 int status = urb->status;
117 120
118 if (!pegasus) 121 if (status < 0)
119 return; 122 dev_dbg(&urb->dev->dev, "%s failed with %d", __func__, status);
120 123 kfree(req);
121 switch (status) { 124 usb_free_urb(urb);
122 case 0:
123 if (pegasus->flags & ETH_REGS_CHANGE) {
124 pegasus->flags &= ~ETH_REGS_CHANGE;
125 pegasus->flags |= ETH_REGS_CHANGED;
126 update_eth_regs_async(pegasus);
127 return;
128 }
129 break;
130 case -EINPROGRESS:
131 return;
132 case -ENOENT:
133 break;
134 default:
135 if (net_ratelimit())
136 netif_dbg(pegasus, drv, pegasus->net,
137 "%s, status %d\n", __func__, status);
138 break;
139 }
140 pegasus->flags &= ~ETH_REGS_CHANGED;
141 wake_up(&pegasus->ctrl_wait);
142} 125}
143 126
144static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, 127static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
145 void *data)
146{ 128{
147 int ret; 129 int ret;
148 char *buffer;
149 DECLARE_WAITQUEUE(wait, current);
150
151 buffer = kmalloc(size, GFP_KERNEL);
152 if (!buffer)
153 return -ENOMEM;
154
155 add_wait_queue(&pegasus->ctrl_wait, &wait);
156 set_current_state(TASK_UNINTERRUPTIBLE);
157 while (pegasus->flags & ETH_REGS_CHANGED)
158 schedule();
159 remove_wait_queue(&pegasus->ctrl_wait, &wait);
160 set_current_state(TASK_RUNNING);
161
162 pegasus->dr.bRequestType = PEGASUS_REQT_READ;
163 pegasus->dr.bRequest = PEGASUS_REQ_GET_REGS;
164 pegasus->dr.wValue = cpu_to_le16(0);
165 pegasus->dr.wIndex = cpu_to_le16(indx);
166 pegasus->dr.wLength = cpu_to_le16(size);
167 pegasus->ctrl_urb->transfer_buffer_length = size;
168
169 usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb,
170 usb_rcvctrlpipe(pegasus->usb, 0),
171 (char *) &pegasus->dr,
172 buffer, size, ctrl_callback, pegasus);
173
174 add_wait_queue(&pegasus->ctrl_wait, &wait);
175 set_current_state(TASK_UNINTERRUPTIBLE);
176
177 /* using ATOMIC, we'd never wake up if we slept */
178 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
179 set_current_state(TASK_RUNNING);
180 if (ret == -ENODEV)
181 netif_device_detach(pegasus->net);
182 if (net_ratelimit())
183 netif_err(pegasus, drv, pegasus->net,
184 "%s, status %d\n", __func__, ret);
185 goto out;
186 }
187
188 schedule();
189out:
190 remove_wait_queue(&pegasus->ctrl_wait, &wait);
191 memcpy(data, buffer, size);
192 kfree(buffer);
193 130
131 ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0),
132 PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0,
133 indx, data, size, 1000);
134 if (ret < 0)
135 netif_dbg(pegasus, drv, pegasus->net,
136 "%s returned %d\n", __func__, ret);
194 return ret; 137 return ret;
195} 138}
196 139
197static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, 140static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
198 void *data)
199{ 141{
200 int ret; 142 int ret;
201 char *buffer;
202 DECLARE_WAITQUEUE(wait, current);
203
204 buffer = kmemdup(data, size, GFP_KERNEL);
205 if (!buffer) {
206 netif_warn(pegasus, drv, pegasus->net,
207 "out of memory in %s\n", __func__);
208 return -ENOMEM;
209 }
210
211 add_wait_queue(&pegasus->ctrl_wait, &wait);
212 set_current_state(TASK_UNINTERRUPTIBLE);
213 while (pegasus->flags & ETH_REGS_CHANGED)
214 schedule();
215 remove_wait_queue(&pegasus->ctrl_wait, &wait);
216 set_current_state(TASK_RUNNING);
217
218 pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
219 pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
220 pegasus->dr.wValue = cpu_to_le16(0);
221 pegasus->dr.wIndex = cpu_to_le16(indx);
222 pegasus->dr.wLength = cpu_to_le16(size);
223 pegasus->ctrl_urb->transfer_buffer_length = size;
224
225 usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb,
226 usb_sndctrlpipe(pegasus->usb, 0),
227 (char *) &pegasus->dr,
228 buffer, size, ctrl_callback, pegasus);
229
230 add_wait_queue(&pegasus->ctrl_wait, &wait);
231 set_current_state(TASK_UNINTERRUPTIBLE);
232
233 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
234 if (ret == -ENODEV)
235 netif_device_detach(pegasus->net);
236 netif_err(pegasus, drv, pegasus->net,
237 "%s, status %d\n", __func__, ret);
238 goto out;
239 }
240
241 schedule();
242out:
243 remove_wait_queue(&pegasus->ctrl_wait, &wait);
244 kfree(buffer);
245 143
144 ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
145 PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0,
146 indx, data, size, 100);
147 if (ret < 0)
148 netif_dbg(pegasus, drv, pegasus->net,
149 "%s returned %d\n", __func__, ret);
246 return ret; 150 return ret;
247} 151}
248 152
249static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) 153static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
250{ 154{
251 int ret; 155 int ret;
252 char *tmp;
253 DECLARE_WAITQUEUE(wait, current);
254
255 tmp = kmemdup(&data, 1, GFP_KERNEL);
256 if (!tmp) {
257 netif_warn(pegasus, drv, pegasus->net,
258 "out of memory in %s\n", __func__);
259 return -ENOMEM;
260 }
261 add_wait_queue(&pegasus->ctrl_wait, &wait);
262 set_current_state(TASK_UNINTERRUPTIBLE);
263 while (pegasus->flags & ETH_REGS_CHANGED)
264 schedule();
265 remove_wait_queue(&pegasus->ctrl_wait, &wait);
266 set_current_state(TASK_RUNNING);
267
268 pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
269 pegasus->dr.bRequest = PEGASUS_REQ_SET_REG;
270 pegasus->dr.wValue = cpu_to_le16(data);
271 pegasus->dr.wIndex = cpu_to_le16(indx);
272 pegasus->dr.wLength = cpu_to_le16(1);
273 pegasus->ctrl_urb->transfer_buffer_length = 1;
274
275 usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb,
276 usb_sndctrlpipe(pegasus->usb, 0),
277 (char *) &pegasus->dr,
278 tmp, 1, ctrl_callback, pegasus);
279
280 add_wait_queue(&pegasus->ctrl_wait, &wait);
281 set_current_state(TASK_UNINTERRUPTIBLE);
282
283 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
284 if (ret == -ENODEV)
285 netif_device_detach(pegasus->net);
286 if (net_ratelimit())
287 netif_err(pegasus, drv, pegasus->net,
288 "%s, status %d\n", __func__, ret);
289 goto out;
290 }
291
292 schedule();
293out:
294 remove_wait_queue(&pegasus->ctrl_wait, &wait);
295 kfree(tmp);
296 156
157 ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
158 PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
159 indx, &data, 1, 1000);
160 if (ret < 0)
161 netif_dbg(pegasus, drv, pegasus->net,
162 "%s returned %d\n", __func__, ret);
297 return ret; 163 return ret;
298} 164}
299 165
300static int update_eth_regs_async(pegasus_t *pegasus) 166static int update_eth_regs_async(pegasus_t *pegasus)
301{ 167{
302 int ret; 168 int ret = -ENOMEM;
303 169 struct urb *async_urb;
304 pegasus->dr.bRequestType = PEGASUS_REQT_WRITE; 170 struct usb_ctrlrequest *req;
305 pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS; 171
306 pegasus->dr.wValue = cpu_to_le16(0); 172 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
307 pegasus->dr.wIndex = cpu_to_le16(EthCtrl0); 173 if (req == NULL)
308 pegasus->dr.wLength = cpu_to_le16(3); 174 return ret;
309 pegasus->ctrl_urb->transfer_buffer_length = 3; 175
310 176 async_urb = usb_alloc_urb(0, GFP_ATOMIC);
311 usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb, 177 if (async_urb == NULL) {
312 usb_sndctrlpipe(pegasus->usb, 0), 178 kfree(req);
313 (char *) &pegasus->dr, 179 return ret;
314 pegasus->eth_regs, 3, ctrl_callback, pegasus); 180 }
315 181 req->bRequestType = PEGASUS_REQT_WRITE;
316 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) { 182 req->bRequest = PEGASUS_REQ_SET_REGS;
183 req->wValue = cpu_to_le16(0);
184 req->wIndex = cpu_to_le16(EthCtrl0);
185 req->wLength = cpu_to_le16(3);
186
187 usb_fill_control_urb(async_urb, pegasus->usb,
188 usb_sndctrlpipe(pegasus->usb, 0), (void *)req,
189 pegasus->eth_regs, 3, async_ctrl_callback, req);
190
191 ret = usb_submit_urb(async_urb, GFP_ATOMIC);
192 if (ret) {
317 if (ret == -ENODEV) 193 if (ret == -ENODEV)
318 netif_device_detach(pegasus->net); 194 netif_device_detach(pegasus->net);
319 netif_err(pegasus, drv, pegasus->net, 195 netif_err(pegasus, drv, pegasus->net,
320 "%s, status %d\n", __func__, ret); 196 "%s returned %d\n", __func__, ret);
321 } 197 }
322
323 return ret; 198 return ret;
324} 199}
325 200
326/* Returns 0 on success, error on failure */ 201static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
327static int read_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd)
328{ 202{
329 int i; 203 int i;
330 __u8 data[4] = { phy, 0, 0, indx }; 204 __u8 data[4] = { phy, 0, 0, indx };
331 __le16 regdi; 205 __le16 regdi;
332 int ret; 206 int ret = -ETIMEDOUT;
333 207
334 set_register(pegasus, PhyCtrl, 0); 208 if (cmd & PHY_WRITE) {
335 set_registers(pegasus, PhyAddr, sizeof(data), data); 209 __le16 *t = (__le16 *) & data[1];
336 set_register(pegasus, PhyCtrl, (indx | PHY_READ)); 210 *t = cpu_to_le16(*regd);
211 }
212 set_register(p, PhyCtrl, 0);
213 set_registers(p, PhyAddr, sizeof(data), data);
214 set_register(p, PhyCtrl, (indx | cmd));
337 for (i = 0; i < REG_TIMEOUT; i++) { 215 for (i = 0; i < REG_TIMEOUT; i++) {
338 ret = get_registers(pegasus, PhyCtrl, 1, data); 216 ret = get_registers(p, PhyCtrl, 1, data);
339 if (ret == -ESHUTDOWN) 217 if (ret < 0)
340 goto fail; 218 goto fail;
341 if (data[0] & PHY_DONE) 219 if (data[0] & PHY_DONE)
342 break; 220 break;
343 } 221 }
344
345 if (i >= REG_TIMEOUT) 222 if (i >= REG_TIMEOUT)
346 goto fail; 223 goto fail;
347 224 if (cmd & PHY_READ) {
348 ret = get_registers(pegasus, PhyData, 2, &regdi); 225 ret = get_registers(p, PhyData, 2, &regdi);
349 *regd = le16_to_cpu(regdi); 226 *regd = le16_to_cpu(regdi);
227 return ret;
228 }
229 return 0;
230fail:
231 netif_dbg(p, drv, p->net, "%s failed\n", __func__);
350 return ret; 232 return ret;
233}
351 234
352fail: 235/* Returns non-negative int on success, error on failure */
353 netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__); 236static int read_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd)
237{
238 return __mii_op(pegasus, phy, indx, regd, PHY_READ);
239}
354 240
355 return ret; 241/* Returns zero on success, error on failure */
242static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd)
243{
244 return __mii_op(pegasus, phy, indx, regd, PHY_WRITE);
356} 245}
357 246
358static int mdio_read(struct net_device *dev, int phy_id, int loc) 247static int mdio_read(struct net_device *dev, int phy_id, int loc)
@@ -364,40 +253,11 @@ static int mdio_read(struct net_device *dev, int phy_id, int loc)
364 return (int)res; 253 return (int)res;
365} 254}
366 255
367static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 regd)
368{
369 int i;
370 __u8 data[4] = { phy, 0, 0, indx };
371 int ret;
372
373 data[1] = (u8) regd;
374 data[2] = (u8) (regd >> 8);
375 set_register(pegasus, PhyCtrl, 0);
376 set_registers(pegasus, PhyAddr, sizeof(data), data);
377 set_register(pegasus, PhyCtrl, (indx | PHY_WRITE));
378 for (i = 0; i < REG_TIMEOUT; i++) {
379 ret = get_registers(pegasus, PhyCtrl, 1, data);
380 if (ret == -ESHUTDOWN)
381 goto fail;
382 if (data[0] & PHY_DONE)
383 break;
384 }
385
386 if (i >= REG_TIMEOUT)
387 goto fail;
388
389 return ret;
390
391fail:
392 netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
393 return -ETIMEDOUT;
394}
395
396static void mdio_write(struct net_device *dev, int phy_id, int loc, int val) 256static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
397{ 257{
398 pegasus_t *pegasus = netdev_priv(dev); 258 pegasus_t *pegasus = netdev_priv(dev);
399 259
400 write_mii_word(pegasus, phy_id, loc, val); 260 write_mii_word(pegasus, phy_id, loc, (__u16 *)&val);
401} 261}
402 262
403static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) 263static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
@@ -434,7 +294,6 @@ fail:
434static inline void enable_eprom_write(pegasus_t *pegasus) 294static inline void enable_eprom_write(pegasus_t *pegasus)
435{ 295{
436 __u8 tmp; 296 __u8 tmp;
437 int ret;
438 297
439 get_registers(pegasus, EthCtrl2, 1, &tmp); 298 get_registers(pegasus, EthCtrl2, 1, &tmp);
440 set_register(pegasus, EthCtrl2, tmp | EPROM_WR_ENABLE); 299 set_register(pegasus, EthCtrl2, tmp | EPROM_WR_ENABLE);
@@ -443,7 +302,6 @@ static inline void enable_eprom_write(pegasus_t *pegasus)
443static inline void disable_eprom_write(pegasus_t *pegasus) 302static inline void disable_eprom_write(pegasus_t *pegasus)
444{ 303{
445 __u8 tmp; 304 __u8 tmp;
446 int ret;
447 305
448 get_registers(pegasus, EthCtrl2, 1, &tmp); 306 get_registers(pegasus, EthCtrl2, 1, &tmp);
449 set_register(pegasus, EpromCtrl, 0); 307 set_register(pegasus, EpromCtrl, 0);
@@ -537,7 +395,8 @@ static inline int reset_mac(pegasus_t *pegasus)
537 if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) { 395 if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) {
538 __u16 auxmode; 396 __u16 auxmode;
539 read_mii_word(pegasus, 3, 0x1b, &auxmode); 397 read_mii_word(pegasus, 3, 0x1b, &auxmode);
540 write_mii_word(pegasus, 3, 0x1b, auxmode | 4); 398 auxmode |= 4;
399 write_mii_word(pegasus, 3, 0x1b, &auxmode);
541 } 400 }
542 401
543 return 0; 402 return 0;
@@ -569,57 +428,13 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
569 usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) { 428 usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) {
570 u16 auxmode; 429 u16 auxmode;
571 read_mii_word(pegasus, 0, 0x1b, &auxmode); 430 read_mii_word(pegasus, 0, 0x1b, &auxmode);
572 write_mii_word(pegasus, 0, 0x1b, auxmode | 4); 431 auxmode |= 4;
432 write_mii_word(pegasus, 0, 0x1b, &auxmode);
573 } 433 }
574 434
575 return ret; 435 return ret;
576} 436}
577 437
578static void fill_skb_pool(pegasus_t *pegasus)
579{
580 int i;
581
582 for (i = 0; i < RX_SKBS; i++) {
583 if (pegasus->rx_pool[i])
584 continue;
585 pegasus->rx_pool[i] = dev_alloc_skb(PEGASUS_MTU + 2);
586 /*
587 ** we give up if the allocation fail. the tasklet will be
588 ** rescheduled again anyway...
589 */
590 if (pegasus->rx_pool[i] == NULL)
591 return;
592 skb_reserve(pegasus->rx_pool[i], 2);
593 }
594}
595
596static void free_skb_pool(pegasus_t *pegasus)
597{
598 int i;
599
600 for (i = 0; i < RX_SKBS; i++) {
601 if (pegasus->rx_pool[i]) {
602 dev_kfree_skb(pegasus->rx_pool[i]);
603 pegasus->rx_pool[i] = NULL;
604 }
605 }
606}
607
608static inline struct sk_buff *pull_skb(pegasus_t * pegasus)
609{
610 int i;
611 struct sk_buff *skb;
612
613 for (i = 0; i < RX_SKBS; i++) {
614 if (likely(pegasus->rx_pool[i] != NULL)) {
615 skb = pegasus->rx_pool[i];
616 pegasus->rx_pool[i] = NULL;
617 return skb;
618 }
619 }
620 return NULL;
621}
622
623static void read_bulk_callback(struct urb *urb) 438static void read_bulk_callback(struct urb *urb)
624{ 439{
625 pegasus_t *pegasus = urb->context; 440 pegasus_t *pegasus = urb->context;
@@ -704,9 +519,8 @@ static void read_bulk_callback(struct urb *urb)
704 if (pegasus->flags & PEGASUS_UNPLUG) 519 if (pegasus->flags & PEGASUS_UNPLUG)
705 return; 520 return;
706 521
707 spin_lock(&pegasus->rx_pool_lock); 522 pegasus->rx_skb = __netdev_alloc_skb_ip_align(pegasus->net, PEGASUS_MTU,
708 pegasus->rx_skb = pull_skb(pegasus); 523 GFP_ATOMIC);
709 spin_unlock(&pegasus->rx_pool_lock);
710 524
711 if (pegasus->rx_skb == NULL) 525 if (pegasus->rx_skb == NULL)
712 goto tl_sched; 526 goto tl_sched;
@@ -734,24 +548,23 @@ tl_sched:
734static void rx_fixup(unsigned long data) 548static void rx_fixup(unsigned long data)
735{ 549{
736 pegasus_t *pegasus; 550 pegasus_t *pegasus;
737 unsigned long flags;
738 int status; 551 int status;
739 552
740 pegasus = (pegasus_t *) data; 553 pegasus = (pegasus_t *) data;
741 if (pegasus->flags & PEGASUS_UNPLUG) 554 if (pegasus->flags & PEGASUS_UNPLUG)
742 return; 555 return;
743 556
744 spin_lock_irqsave(&pegasus->rx_pool_lock, flags);
745 fill_skb_pool(pegasus);
746 if (pegasus->flags & PEGASUS_RX_URB_FAIL) 557 if (pegasus->flags & PEGASUS_RX_URB_FAIL)
747 if (pegasus->rx_skb) 558 if (pegasus->rx_skb)
748 goto try_again; 559 goto try_again;
749 if (pegasus->rx_skb == NULL) 560 if (pegasus->rx_skb == NULL)
750 pegasus->rx_skb = pull_skb(pegasus); 561 pegasus->rx_skb = __netdev_alloc_skb_ip_align(pegasus->net,
562 PEGASUS_MTU,
563 GFP_ATOMIC);
751 if (pegasus->rx_skb == NULL) { 564 if (pegasus->rx_skb == NULL) {
752 netif_warn(pegasus, rx_err, pegasus->net, "low on memory\n"); 565 netif_warn(pegasus, rx_err, pegasus->net, "low on memory\n");
753 tasklet_schedule(&pegasus->rx_tl); 566 tasklet_schedule(&pegasus->rx_tl);
754 goto done; 567 return;
755 } 568 }
756 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, 569 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
757 usb_rcvbulkpipe(pegasus->usb, 1), 570 usb_rcvbulkpipe(pegasus->usb, 1),
@@ -767,8 +580,6 @@ try_again:
767 } else { 580 } else {
768 pegasus->flags &= ~PEGASUS_RX_URB_FAIL; 581 pegasus->flags &= ~PEGASUS_RX_URB_FAIL;
769 } 582 }
770done:
771 spin_unlock_irqrestore(&pegasus->rx_pool_lock, flags);
772} 583}
773 584
774static void write_bulk_callback(struct urb *urb) 585static void write_bulk_callback(struct urb *urb)
@@ -963,7 +774,6 @@ static void free_all_urbs(pegasus_t *pegasus)
963 usb_free_urb(pegasus->intr_urb); 774 usb_free_urb(pegasus->intr_urb);
964 usb_free_urb(pegasus->tx_urb); 775 usb_free_urb(pegasus->tx_urb);
965 usb_free_urb(pegasus->rx_urb); 776 usb_free_urb(pegasus->rx_urb);
966 usb_free_urb(pegasus->ctrl_urb);
967} 777}
968 778
969static void unlink_all_urbs(pegasus_t *pegasus) 779static void unlink_all_urbs(pegasus_t *pegasus)
@@ -971,48 +781,42 @@ static void unlink_all_urbs(pegasus_t *pegasus)
971 usb_kill_urb(pegasus->intr_urb); 781 usb_kill_urb(pegasus->intr_urb);
972 usb_kill_urb(pegasus->tx_urb); 782 usb_kill_urb(pegasus->tx_urb);
973 usb_kill_urb(pegasus->rx_urb); 783 usb_kill_urb(pegasus->rx_urb);
974 usb_kill_urb(pegasus->ctrl_urb);
975} 784}
976 785
977static int alloc_urbs(pegasus_t *pegasus) 786static int alloc_urbs(pegasus_t *pegasus)
978{ 787{
979 pegasus->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); 788 int res = -ENOMEM;
980 if (!pegasus->ctrl_urb) 789
981 return 0;
982 pegasus->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 790 pegasus->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
983 if (!pegasus->rx_urb) { 791 if (!pegasus->rx_urb) {
984 usb_free_urb(pegasus->ctrl_urb); 792 return res;
985 return 0;
986 } 793 }
987 pegasus->tx_urb = usb_alloc_urb(0, GFP_KERNEL); 794 pegasus->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
988 if (!pegasus->tx_urb) { 795 if (!pegasus->tx_urb) {
989 usb_free_urb(pegasus->rx_urb); 796 usb_free_urb(pegasus->rx_urb);
990 usb_free_urb(pegasus->ctrl_urb); 797 return res;
991 return 0;
992 } 798 }
993 pegasus->intr_urb = usb_alloc_urb(0, GFP_KERNEL); 799 pegasus->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
994 if (!pegasus->intr_urb) { 800 if (!pegasus->intr_urb) {
995 usb_free_urb(pegasus->tx_urb); 801 usb_free_urb(pegasus->tx_urb);
996 usb_free_urb(pegasus->rx_urb); 802 usb_free_urb(pegasus->rx_urb);
997 usb_free_urb(pegasus->ctrl_urb); 803 return res;
998 return 0;
999 } 804 }
1000 805
1001 return 1; 806 return 0;
1002} 807}
1003 808
1004static int pegasus_open(struct net_device *net) 809static int pegasus_open(struct net_device *net)
1005{ 810{
1006 pegasus_t *pegasus = netdev_priv(net); 811 pegasus_t *pegasus = netdev_priv(net);
1007 int res; 812 int res=-ENOMEM;
1008 813
1009 if (pegasus->rx_skb == NULL) 814 if (pegasus->rx_skb == NULL)
1010 pegasus->rx_skb = pull_skb(pegasus); 815 pegasus->rx_skb = __netdev_alloc_skb_ip_align(pegasus->net,
1011 /* 816 PEGASUS_MTU,
1012 ** Note: no point to free the pool. it is empty :-) 817 GFP_KERNEL);
1013 */
1014 if (!pegasus->rx_skb) 818 if (!pegasus->rx_skb)
1015 return -ENOMEM; 819 goto exit;
1016 820
1017 res = set_registers(pegasus, EthID, 6, net->dev_addr); 821 res = set_registers(pegasus, EthID, 6, net->dev_addr);
1018 822
@@ -1038,13 +842,13 @@ static int pegasus_open(struct net_device *net)
1038 usb_kill_urb(pegasus->rx_urb); 842 usb_kill_urb(pegasus->rx_urb);
1039 goto exit; 843 goto exit;
1040 } 844 }
1041 if ((res = enable_net_traffic(net, pegasus->usb))) { 845 res = enable_net_traffic(net, pegasus->usb);
846 if (res < 0) {
1042 netif_dbg(pegasus, ifup, net, 847 netif_dbg(pegasus, ifup, net,
1043 "can't enable_net_traffic() - %d\n", res); 848 "can't enable_net_traffic() - %d\n", res);
1044 res = -EIO; 849 res = -EIO;
1045 usb_kill_urb(pegasus->rx_urb); 850 usb_kill_urb(pegasus->rx_urb);
1046 usb_kill_urb(pegasus->intr_urb); 851 usb_kill_urb(pegasus->intr_urb);
1047 free_skb_pool(pegasus);
1048 goto exit; 852 goto exit;
1049 } 853 }
1050 set_carrier(net); 854 set_carrier(net);
@@ -1195,7 +999,7 @@ static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
1195 case SIOCDEVPRIVATE + 2: 999 case SIOCDEVPRIVATE + 2:
1196 if (!capable(CAP_NET_ADMIN)) 1000 if (!capable(CAP_NET_ADMIN))
1197 return -EPERM; 1001 return -EPERM;
1198 write_mii_word(pegasus, pegasus->phy, data[1] & 0x1f, data[2]); 1002 write_mii_word(pegasus, pegasus->phy, data[1] & 0x1f, &data[2]);
1199 res = 0; 1003 res = 0;
1200 break; 1004 break;
1201 default: 1005 default:
@@ -1219,11 +1023,7 @@ static void pegasus_set_multicast(struct net_device *net)
1219 pegasus->eth_regs[EthCtrl0] &= ~RX_MULTICAST; 1023 pegasus->eth_regs[EthCtrl0] &= ~RX_MULTICAST;
1220 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS; 1024 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
1221 } 1025 }
1222 1026 update_eth_regs_async(pegasus);
1223 pegasus->ctrl_urb->status = 0;
1224
1225 pegasus->flags |= ETH_REGS_CHANGE;
1226 ctrl_callback(pegasus->ctrl_urb);
1227} 1027}
1228 1028
1229static __u8 mii_phy_probe(pegasus_t *pegasus) 1029static __u8 mii_phy_probe(pegasus_t *pegasus)
@@ -1340,9 +1140,9 @@ static int pegasus_probe(struct usb_interface *intf,
1340 1140
1341 pegasus = netdev_priv(net); 1141 pegasus = netdev_priv(net);
1342 pegasus->dev_index = dev_index; 1142 pegasus->dev_index = dev_index;
1343 init_waitqueue_head(&pegasus->ctrl_wait);
1344 1143
1345 if (!alloc_urbs(pegasus)) { 1144 res = alloc_urbs(pegasus);
1145 if (res < 0) {
1346 dev_err(&intf->dev, "can't allocate %s\n", "urbs"); 1146 dev_err(&intf->dev, "can't allocate %s\n", "urbs");
1347 goto out1; 1147 goto out1;
1348 } 1148 }
@@ -1364,7 +1164,6 @@ static int pegasus_probe(struct usb_interface *intf,
1364 pegasus->mii.mdio_write = mdio_write; 1164 pegasus->mii.mdio_write = mdio_write;
1365 pegasus->mii.phy_id_mask = 0x1f; 1165 pegasus->mii.phy_id_mask = 0x1f;
1366 pegasus->mii.reg_num_mask = 0x1f; 1166 pegasus->mii.reg_num_mask = 0x1f;
1367 spin_lock_init(&pegasus->rx_pool_lock);
1368 pegasus->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV 1167 pegasus->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
1369 | NETIF_MSG_PROBE | NETIF_MSG_LINK); 1168 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
1370 1169
@@ -1376,7 +1175,6 @@ static int pegasus_probe(struct usb_interface *intf,
1376 goto out2; 1175 goto out2;
1377 } 1176 }
1378 set_ethernet_addr(pegasus); 1177 set_ethernet_addr(pegasus);
1379 fill_skb_pool(pegasus);
1380 if (pegasus->features & PEGASUS_II) { 1178 if (pegasus->features & PEGASUS_II) {
1381 dev_info(&intf->dev, "setup Pegasus II specific registers\n"); 1179 dev_info(&intf->dev, "setup Pegasus II specific registers\n");
1382 setup_pegasus_II(pegasus); 1180 setup_pegasus_II(pegasus);
@@ -1394,17 +1192,13 @@ static int pegasus_probe(struct usb_interface *intf,
1394 if (res) 1192 if (res)
1395 goto out3; 1193 goto out3;
1396 queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, 1194 queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
1397 CARRIER_CHECK_DELAY); 1195 CARRIER_CHECK_DELAY);
1398 1196 dev_info(&intf->dev, "%s, %s, %pM\n", net->name,
1399 dev_info(&intf->dev, "%s, %s, %pM\n", 1197 usb_dev_id[dev_index].name, net->dev_addr);
1400 net->name,
1401 usb_dev_id[dev_index].name,
1402 net->dev_addr);
1403 return 0; 1198 return 0;
1404 1199
1405out3: 1200out3:
1406 usb_set_intfdata(intf, NULL); 1201 usb_set_intfdata(intf, NULL);
1407 free_skb_pool(pegasus);
1408out2: 1202out2:
1409 free_all_urbs(pegasus); 1203 free_all_urbs(pegasus);
1410out1: 1204out1:
@@ -1429,7 +1223,6 @@ static void pegasus_disconnect(struct usb_interface *intf)
1429 unregister_netdev(pegasus->net); 1223 unregister_netdev(pegasus->net);
1430 unlink_all_urbs(pegasus); 1224 unlink_all_urbs(pegasus);
1431 free_all_urbs(pegasus); 1225 free_all_urbs(pegasus);
1432 free_skb_pool(pegasus);
1433 if (pegasus->rx_skb != NULL) { 1226 if (pegasus->rx_skb != NULL) {
1434 dev_kfree_skb(pegasus->rx_skb); 1227 dev_kfree_skb(pegasus->rx_skb);
1435 pegasus->rx_skb = NULL; 1228 pegasus->rx_skb = NULL;
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index 65b78b35b73c..d15646244fdf 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 1999-2003 Petko Manolov - Petkan (petkan@users.sourceforge.net) 2 * Copyright (c) 1999-2013 Petko Manolov (petkan@nucleusys.com)
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as published 5 * it under the terms of the GNU General Public License version 2 as published
@@ -13,7 +13,6 @@
13#define HAS_HOME_PNA 0x40000000 13#define HAS_HOME_PNA 0x40000000
14 14
15#define PEGASUS_MTU 1536 15#define PEGASUS_MTU 1536
16#define RX_SKBS 4
17 16
18#define EPROM_WRITE 0x01 17#define EPROM_WRITE 0x01
19#define EPROM_READ 0x02 18#define EPROM_READ 0x02
@@ -34,8 +33,6 @@
34#define CTRL_URB_SLEEP 0x00000020 33#define CTRL_URB_SLEEP 0x00000020
35#define PEGASUS_UNPLUG 0x00000040 34#define PEGASUS_UNPLUG 0x00000040
36#define PEGASUS_RX_URB_FAIL 0x00000080 35#define PEGASUS_RX_URB_FAIL 0x00000080
37#define ETH_REGS_CHANGE 0x40000000
38#define ETH_REGS_CHANGED 0x80000000
39 36
40#define RX_MULTICAST 2 37#define RX_MULTICAST 2
41#define RX_PROMISCUOUS 4 38#define RX_PROMISCUOUS 4
@@ -96,12 +93,8 @@ typedef struct pegasus {
96 int intr_interval; 93 int intr_interval;
97 struct tasklet_struct rx_tl; 94 struct tasklet_struct rx_tl;
98 struct delayed_work carrier_check; 95 struct delayed_work carrier_check;
99 struct urb *ctrl_urb, *rx_urb, *tx_urb, *intr_urb; 96 struct urb *rx_urb, *tx_urb, *intr_urb;
100 struct sk_buff *rx_pool[RX_SKBS];
101 struct sk_buff *rx_skb; 97 struct sk_buff *rx_skb;
102 struct usb_ctrlrequest dr;
103 wait_queue_head_t ctrl_wait;
104 spinlock_t rx_pool_lock;
105 int chip; 98 int chip;
106 unsigned char intr_buff[8]; 99 unsigned char intr_buff[8];
107 __u8 tx_buff[PEGASUS_MTU]; 100 __u8 tx_buff[PEGASUS_MTU];
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 79ab2435d9d3..a923d61c6fc5 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -413,11 +413,10 @@ static void sierra_net_handle_lsi(struct usbnet *dev, char *data,
413 if (link_up) { 413 if (link_up) {
414 sierra_net_set_ctx_index(priv, hh->msgspecific.byte); 414 sierra_net_set_ctx_index(priv, hh->msgspecific.byte);
415 priv->link_up = 1; 415 priv->link_up = 1;
416 netif_carrier_on(dev->net);
417 } else { 416 } else {
418 priv->link_up = 0; 417 priv->link_up = 0;
419 netif_carrier_off(dev->net);
420 } 418 }
419 usbnet_link_change(dev, link_up, 0);
421} 420}
422 421
423static void sierra_net_dosync(struct usbnet *dev) 422static void sierra_net_dosync(struct usbnet *dev)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 51f3192f3931..1e5a9b72650e 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -938,6 +938,27 @@ static const struct ethtool_ops usbnet_ethtool_ops = {
938 938
939/*-------------------------------------------------------------------------*/ 939/*-------------------------------------------------------------------------*/
940 940
941static void __handle_link_change(struct usbnet *dev)
942{
943 if (!test_bit(EVENT_DEV_OPEN, &dev->flags))
944 return;
945
946 if (!netif_carrier_ok(dev->net)) {
947 /* kill URBs for reading packets to save bus bandwidth */
948 unlink_urbs(dev, &dev->rxq);
949
950 /*
951 * tx_timeout will unlink URBs for sending packets and
952 * tx queue is stopped by netcore after link becomes off
953 */
954 } else {
955 /* submitting URBs for reading packets */
956 tasklet_schedule(&dev->bh);
957 }
958
959 clear_bit(EVENT_LINK_CHANGE, &dev->flags);
960}
961
941/* work that cannot be done in interrupt context uses keventd. 962/* work that cannot be done in interrupt context uses keventd.
942 * 963 *
943 * NOTE: with 2.5 we could do more of this using completion callbacks, 964 * NOTE: with 2.5 we could do more of this using completion callbacks,
@@ -1035,8 +1056,14 @@ skip_reset:
1035 } else { 1056 } else {
1036 usb_autopm_put_interface(dev->intf); 1057 usb_autopm_put_interface(dev->intf);
1037 } 1058 }
1059
1060 /* handle link change from link resetting */
1061 __handle_link_change(dev);
1038 } 1062 }
1039 1063
1064 if (test_bit (EVENT_LINK_CHANGE, &dev->flags))
1065 __handle_link_change(dev);
1066
1040 if (dev->flags) 1067 if (dev->flags)
1041 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); 1068 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
1042} 1069}
@@ -1286,6 +1313,7 @@ static void usbnet_bh (unsigned long param)
1286 // or are we maybe short a few urbs? 1313 // or are we maybe short a few urbs?
1287 } else if (netif_running (dev->net) && 1314 } else if (netif_running (dev->net) &&
1288 netif_device_present (dev->net) && 1315 netif_device_present (dev->net) &&
1316 netif_carrier_ok(dev->net) &&
1289 !timer_pending (&dev->delay) && 1317 !timer_pending (&dev->delay) &&
1290 !test_bit (EVENT_RX_HALT, &dev->flags)) { 1318 !test_bit (EVENT_RX_HALT, &dev->flags)) {
1291 int temp = dev->rxq.qlen; 1319 int temp = dev->rxq.qlen;
@@ -1521,7 +1549,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1521 netif_device_attach (net); 1549 netif_device_attach (net);
1522 1550
1523 if (dev->driver_info->flags & FLAG_LINK_INTR) 1551 if (dev->driver_info->flags & FLAG_LINK_INTR)
1524 netif_carrier_off(net); 1552 usbnet_link_change(dev, 0, 0);
1525 1553
1526 return 0; 1554 return 0;
1527 1555
@@ -1653,6 +1681,21 @@ int usbnet_manage_power(struct usbnet *dev, int on)
1653} 1681}
1654EXPORT_SYMBOL(usbnet_manage_power); 1682EXPORT_SYMBOL(usbnet_manage_power);
1655 1683
1684void usbnet_link_change(struct usbnet *dev, bool link, bool need_reset)
1685{
1686 /* update link after link is reseted */
1687 if (link && !need_reset)
1688 netif_carrier_on(dev->net);
1689 else
1690 netif_carrier_off(dev->net);
1691
1692 if (need_reset && link)
1693 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
1694 else
1695 usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
1696}
1697EXPORT_SYMBOL(usbnet_link_change);
1698
1656/*-------------------------------------------------------------------------*/ 1699/*-------------------------------------------------------------------------*/
1657static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, 1700static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1658 u16 value, u16 index, void *data, u16 size) 1701 u16 value, u16 index, void *data, u16 size)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 07a4af0aa3dc..177f911f5946 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -255,7 +255,8 @@ static const struct net_device_ops veth_netdev_ops = {
255 255
256#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ 256#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
257 NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \ 257 NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
258 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX) 258 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
259 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
259 260
260static void veth_setup(struct net_device *dev) 261static void veth_setup(struct net_device *dev)
261{ 262{
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 57ac4b0294bc..50077753a0e5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -154,7 +154,7 @@ struct padded_vnet_hdr {
154 */ 154 */
155static int vq2txq(struct virtqueue *vq) 155static int vq2txq(struct virtqueue *vq)
156{ 156{
157 return (virtqueue_get_queue_index(vq) - 1) / 2; 157 return (vq->index - 1) / 2;
158} 158}
159 159
160static int txq2vq(int txq) 160static int txq2vq(int txq)
@@ -164,7 +164,7 @@ static int txq2vq(int txq)
164 164
165static int vq2rxq(struct virtqueue *vq) 165static int vq2rxq(struct virtqueue *vq)
166{ 166{
167 return virtqueue_get_queue_index(vq) / 2; 167 return vq->index / 2;
168} 168}
169 169
170static int rxq2vq(int rxq) 170static int rxq2vq(int rxq)
@@ -1006,7 +1006,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1006 kfree(buf); 1006 kfree(buf);
1007} 1007}
1008 1008
1009static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) 1009static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1010 __be16 proto, u16 vid)
1010{ 1011{
1011 struct virtnet_info *vi = netdev_priv(dev); 1012 struct virtnet_info *vi = netdev_priv(dev);
1012 struct scatterlist sg; 1013 struct scatterlist sg;
@@ -1019,7 +1020,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
1019 return 0; 1020 return 0;
1020} 1021}
1021 1022
1022static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) 1023static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1024 __be16 proto, u16 vid)
1023{ 1025{
1024 struct virtnet_info *vi = netdev_priv(dev); 1026 struct virtnet_info *vi = netdev_priv(dev);
1025 struct scatterlist sg; 1027 struct scatterlist sg;
@@ -1376,7 +1378,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
1376 if (vi->has_cvq) { 1378 if (vi->has_cvq) {
1377 vi->cvq = vqs[total_vqs - 1]; 1379 vi->cvq = vqs[total_vqs - 1];
1378 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 1380 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1379 vi->dev->features |= NETIF_F_HW_VLAN_FILTER; 1381 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1380 } 1382 }
1381 1383
1382 for (i = 0; i < vi->max_queue_pairs; i++) { 1384 for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -1511,6 +1513,8 @@ static int virtnet_probe(struct virtio_device *vdev)
1511 /* (!csum && gso) case will be fixed by register_netdev() */ 1513 /* (!csum && gso) case will be fixed by register_netdev() */
1512 } 1514 }
1513 1515
1516 dev->vlan_features = dev->features;
1517
1514 /* Configuration may specify what MAC to use. Otherwise random. */ 1518 /* Configuration may specify what MAC to use. Otherwise random. */
1515 if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, 1519 if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
1516 offsetof(struct virtio_net_config, mac), 1520 offsetof(struct virtio_net_config, mac),
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index eae7a03d4f9b..55a62cae2cb4 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1293,7 +1293,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1293 skb->protocol = eth_type_trans(skb, adapter->netdev); 1293 skb->protocol = eth_type_trans(skb, adapter->netdev);
1294 1294
1295 if (unlikely(rcd->ts)) 1295 if (unlikely(rcd->ts))
1296 __vlan_hwaccel_put_tag(skb, rcd->tci); 1296 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
1297 1297
1298 if (adapter->netdev->features & NETIF_F_LRO) 1298 if (adapter->netdev->features & NETIF_F_LRO)
1299 netif_receive_skb(skb); 1299 netif_receive_skb(skb);
@@ -1931,7 +1931,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1931 1931
1932 1932
1933static int 1933static int
1934vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1934vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1935{ 1935{
1936 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1936 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1937 1937
@@ -1953,7 +1953,7 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1953 1953
1954 1954
1955static int 1955static int
1956vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1956vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
1957{ 1957{
1958 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1958 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1959 1959
@@ -2107,7 +2107,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2107 devRead->misc.uptFeatures |= UPT1_F_LRO; 2107 devRead->misc.uptFeatures |= UPT1_F_LRO;
2108 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); 2108 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2109 } 2109 }
2110 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) 2110 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2111 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 2111 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2112 2112
2113 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); 2113 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
@@ -2669,14 +2669,15 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2669 struct net_device *netdev = adapter->netdev; 2669 struct net_device *netdev = adapter->netdev;
2670 2670
2671 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | 2671 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2672 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX | 2672 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2673 NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 | 2673 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
2674 NETIF_F_LRO; 2674 NETIF_F_LRO;
2675 if (dma64) 2675 if (dma64)
2676 netdev->hw_features |= NETIF_F_HIGHDMA; 2676 netdev->hw_features |= NETIF_F_HIGHDMA;
2677 netdev->vlan_features = netdev->hw_features & 2677 netdev->vlan_features = netdev->hw_features &
2678 ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 2678 ~(NETIF_F_HW_VLAN_CTAG_TX |
2679 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER; 2679 NETIF_F_HW_VLAN_CTAG_RX);
2680 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
2680} 2681}
2681 2682
2682 2683
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 63a124340cbe..600ab56c0008 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -263,7 +263,8 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
263 unsigned long flags; 263 unsigned long flags;
264 netdev_features_t changed = features ^ netdev->features; 264 netdev_features_t changed = features ^ netdev->features;
265 265
266 if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_RX)) { 266 if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO |
267 NETIF_F_HW_VLAN_CTAG_RX)) {
267 if (features & NETIF_F_RXCSUM) 268 if (features & NETIF_F_RXCSUM)
268 adapter->shared->devRead.misc.uptFeatures |= 269 adapter->shared->devRead.misc.uptFeatures |=
269 UPT1_F_RXCSUM; 270 UPT1_F_RXCSUM;
@@ -279,7 +280,7 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
279 adapter->shared->devRead.misc.uptFeatures &= 280 adapter->shared->devRead.misc.uptFeatures &=
280 ~UPT1_F_LRO; 281 ~UPT1_F_LRO;
281 282
282 if (features & NETIF_F_HW_VLAN_RX) 283 if (features & NETIF_F_HW_VLAN_CTAG_RX)
283 adapter->shared->devRead.misc.uptFeatures |= 284 adapter->shared->devRead.misc.uptFeatures |=
284 UPT1_F_RXVLAN; 285 UPT1_F_RXVLAN;
285 else 286 else
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7cee7a3068ec..ba81f3c39a83 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1,14 +1,13 @@
1/* 1/*
2 * VXLAN: Virtual eXtensible Local Area Network 2 * VXLAN: Virtual eXtensible Local Area Network
3 * 3 *
4 * Copyright (c) 2012 Vyatta Inc. 4 * Copyright (c) 2012-2013 Vyatta Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * 9 *
10 * TODO 10 * TODO
11 * - use IANA UDP port number (when defined)
12 * - IPv6 (not in RFC) 11 * - IPv6 (not in RFC)
13 */ 12 */
14 13
@@ -33,7 +32,7 @@
33#include <net/arp.h> 32#include <net/arp.h>
34#include <net/ndisc.h> 33#include <net/ndisc.h>
35#include <net/ip.h> 34#include <net/ip.h>
36#include <net/ipip.h> 35#include <net/ip_tunnels.h>
37#include <net/icmp.h> 36#include <net/icmp.h>
38#include <net/udp.h> 37#include <net/udp.h>
39#include <net/rtnetlink.h> 38#include <net/rtnetlink.h>
@@ -65,7 +64,10 @@ struct vxlanhdr {
65 __be32 vx_vni; 64 __be32 vx_vni;
66}; 65};
67 66
68/* UDP port for VXLAN traffic. */ 67/* UDP port for VXLAN traffic.
68 * The IANA assigned port is 4789, but the Linux default is 8472
69 * for compatability with early adopters.
70 */
69static unsigned int vxlan_port __read_mostly = 8472; 71static unsigned int vxlan_port __read_mostly = 8472;
70module_param_named(udp_port, vxlan_port, uint, 0444); 72module_param_named(udp_port, vxlan_port, uint, 0444);
71MODULE_PARM_DESC(udp_port, "Destination UDP port"); 73MODULE_PARM_DESC(udp_port, "Destination UDP port");
@@ -81,35 +83,34 @@ struct vxlan_net {
81 struct hlist_head vni_list[VNI_HASH_SIZE]; 83 struct hlist_head vni_list[VNI_HASH_SIZE];
82}; 84};
83 85
86struct vxlan_rdst {
87 struct rcu_head rcu;
88 __be32 remote_ip;
89 __be16 remote_port;
90 u32 remote_vni;
91 u32 remote_ifindex;
92 struct vxlan_rdst *remote_next;
93};
94
84/* Forwarding table entry */ 95/* Forwarding table entry */
85struct vxlan_fdb { 96struct vxlan_fdb {
86 struct hlist_node hlist; /* linked list of entries */ 97 struct hlist_node hlist; /* linked list of entries */
87 struct rcu_head rcu; 98 struct rcu_head rcu;
88 unsigned long updated; /* jiffies */ 99 unsigned long updated; /* jiffies */
89 unsigned long used; 100 unsigned long used;
90 __be32 remote_ip; 101 struct vxlan_rdst remote;
91 u16 state; /* see ndm_state */ 102 u16 state; /* see ndm_state */
103 u8 flags; /* see ndm_flags */
92 u8 eth_addr[ETH_ALEN]; 104 u8 eth_addr[ETH_ALEN];
93}; 105};
94 106
95/* Per-cpu network traffic stats */
96struct vxlan_stats {
97 u64 rx_packets;
98 u64 rx_bytes;
99 u64 tx_packets;
100 u64 tx_bytes;
101 struct u64_stats_sync syncp;
102};
103
104/* Pseudo network device */ 107/* Pseudo network device */
105struct vxlan_dev { 108struct vxlan_dev {
106 struct hlist_node hlist; 109 struct hlist_node hlist;
107 struct net_device *dev; 110 struct net_device *dev;
108 struct vxlan_stats __percpu *stats; 111 struct vxlan_rdst default_dst; /* default destination */
109 __u32 vni; /* virtual network id */
110 __be32 gaddr; /* multicast group */
111 __be32 saddr; /* source address */ 112 __be32 saddr; /* source address */
112 unsigned int link; /* link to multicast over */ 113 __be16 dst_port;
113 __u16 port_min; /* source port range */ 114 __u16 port_min; /* source port range */
114 __u16 port_max; 115 __u16 port_max;
115 __u8 tos; /* TOS override */ 116 __u8 tos; /* TOS override */
@@ -147,7 +148,7 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
147 struct vxlan_dev *vxlan; 148 struct vxlan_dev *vxlan;
148 149
149 hlist_for_each_entry_rcu(vxlan, vni_head(net, id), hlist) { 150 hlist_for_each_entry_rcu(vxlan, vni_head(net, id), hlist) {
150 if (vxlan->vni == id) 151 if (vxlan->default_dst.remote_vni == id)
151 return vxlan; 152 return vxlan;
152 } 153 }
153 154
@@ -157,7 +158,8 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
157/* Fill in neighbour message in skbuff. */ 158/* Fill in neighbour message in skbuff. */
158static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, 159static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
159 const struct vxlan_fdb *fdb, 160 const struct vxlan_fdb *fdb,
160 u32 portid, u32 seq, int type, unsigned int flags) 161 u32 portid, u32 seq, int type, unsigned int flags,
162 const struct vxlan_rdst *rdst)
161{ 163{
162 unsigned long now = jiffies; 164 unsigned long now = jiffies;
163 struct nda_cacheinfo ci; 165 struct nda_cacheinfo ci;
@@ -176,19 +178,29 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
176 178
177 if (type == RTM_GETNEIGH) { 179 if (type == RTM_GETNEIGH) {
178 ndm->ndm_family = AF_INET; 180 ndm->ndm_family = AF_INET;
179 send_ip = fdb->remote_ip != 0; 181 send_ip = rdst->remote_ip != htonl(INADDR_ANY);
180 send_eth = !is_zero_ether_addr(fdb->eth_addr); 182 send_eth = !is_zero_ether_addr(fdb->eth_addr);
181 } else 183 } else
182 ndm->ndm_family = AF_BRIDGE; 184 ndm->ndm_family = AF_BRIDGE;
183 ndm->ndm_state = fdb->state; 185 ndm->ndm_state = fdb->state;
184 ndm->ndm_ifindex = vxlan->dev->ifindex; 186 ndm->ndm_ifindex = vxlan->dev->ifindex;
185 ndm->ndm_flags = NTF_SELF; 187 ndm->ndm_flags = fdb->flags;
186 ndm->ndm_type = NDA_DST; 188 ndm->ndm_type = NDA_DST;
187 189
188 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) 190 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
189 goto nla_put_failure; 191 goto nla_put_failure;
190 192
191 if (send_ip && nla_put_be32(skb, NDA_DST, fdb->remote_ip)) 193 if (send_ip && nla_put_be32(skb, NDA_DST, rdst->remote_ip))
194 goto nla_put_failure;
195
196 if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
197 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
198 goto nla_put_failure;
199 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
200 nla_put_be32(skb, NDA_VNI, rdst->remote_vni))
201 goto nla_put_failure;
202 if (rdst->remote_ifindex &&
203 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
192 goto nla_put_failure; 204 goto nla_put_failure;
193 205
194 ci.ndm_used = jiffies_to_clock_t(now - fdb->used); 206 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
@@ -211,6 +223,9 @@ static inline size_t vxlan_nlmsg_size(void)
211 return NLMSG_ALIGN(sizeof(struct ndmsg)) 223 return NLMSG_ALIGN(sizeof(struct ndmsg))
212 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ 224 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
213 + nla_total_size(sizeof(__be32)) /* NDA_DST */ 225 + nla_total_size(sizeof(__be32)) /* NDA_DST */
226 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
227 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
228 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
214 + nla_total_size(sizeof(struct nda_cacheinfo)); 229 + nla_total_size(sizeof(struct nda_cacheinfo));
215} 230}
216 231
@@ -225,7 +240,7 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
225 if (skb == NULL) 240 if (skb == NULL)
226 goto errout; 241 goto errout;
227 242
228 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0); 243 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, &fdb->remote);
229 if (err < 0) { 244 if (err < 0) {
230 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ 245 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
231 WARN_ON(err == -EMSGSIZE); 246 WARN_ON(err == -EMSGSIZE);
@@ -247,7 +262,8 @@ static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
247 262
248 memset(&f, 0, sizeof f); 263 memset(&f, 0, sizeof f);
249 f.state = NUD_STALE; 264 f.state = NUD_STALE;
250 f.remote_ip = ipa; /* goes to NDA_DST */ 265 f.remote.remote_ip = ipa; /* goes to NDA_DST */
266 f.remote.remote_vni = VXLAN_N_VID;
251 267
252 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH); 268 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
253} 269}
@@ -300,10 +316,39 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
300 return NULL; 316 return NULL;
301} 317}
302 318
319/* Add/update destinations for multicast */
320static int vxlan_fdb_append(struct vxlan_fdb *f,
321 __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
322{
323 struct vxlan_rdst *rd_prev, *rd;
324
325 rd_prev = NULL;
326 for (rd = &f->remote; rd; rd = rd->remote_next) {
327 if (rd->remote_ip == ip &&
328 rd->remote_port == port &&
329 rd->remote_vni == vni &&
330 rd->remote_ifindex == ifindex)
331 return 0;
332 rd_prev = rd;
333 }
334 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
335 if (rd == NULL)
336 return -ENOBUFS;
337 rd->remote_ip = ip;
338 rd->remote_port = port;
339 rd->remote_vni = vni;
340 rd->remote_ifindex = ifindex;
341 rd->remote_next = NULL;
342 rd_prev->remote_next = rd;
343 return 1;
344}
345
303/* Add new entry to forwarding table -- assumes lock held */ 346/* Add new entry to forwarding table -- assumes lock held */
304static int vxlan_fdb_create(struct vxlan_dev *vxlan, 347static int vxlan_fdb_create(struct vxlan_dev *vxlan,
305 const u8 *mac, __be32 ip, 348 const u8 *mac, __be32 ip,
306 __u16 state, __u16 flags) 349 __u16 state, __u16 flags,
350 __be16 port, __u32 vni, __u32 ifindex,
351 __u8 ndm_flags)
307{ 352{
308 struct vxlan_fdb *f; 353 struct vxlan_fdb *f;
309 int notify = 0; 354 int notify = 0;
@@ -320,6 +365,19 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
320 f->updated = jiffies; 365 f->updated = jiffies;
321 notify = 1; 366 notify = 1;
322 } 367 }
368 if (f->flags != ndm_flags) {
369 f->flags = ndm_flags;
370 f->updated = jiffies;
371 notify = 1;
372 }
373 if ((flags & NLM_F_APPEND) &&
374 is_multicast_ether_addr(f->eth_addr)) {
375 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex);
376
377 if (rc < 0)
378 return rc;
379 notify |= rc;
380 }
323 } else { 381 } else {
324 if (!(flags & NLM_F_CREATE)) 382 if (!(flags & NLM_F_CREATE))
325 return -ENOENT; 383 return -ENOENT;
@@ -333,8 +391,13 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
333 return -ENOMEM; 391 return -ENOMEM;
334 392
335 notify = 1; 393 notify = 1;
336 f->remote_ip = ip; 394 f->remote.remote_ip = ip;
395 f->remote.remote_port = port;
396 f->remote.remote_vni = vni;
397 f->remote.remote_ifindex = ifindex;
398 f->remote.remote_next = NULL;
337 f->state = state; 399 f->state = state;
400 f->flags = ndm_flags;
338 f->updated = f->used = jiffies; 401 f->updated = f->used = jiffies;
339 memcpy(f->eth_addr, mac, ETH_ALEN); 402 memcpy(f->eth_addr, mac, ETH_ALEN);
340 403
@@ -349,6 +412,19 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
349 return 0; 412 return 0;
350} 413}
351 414
415static void vxlan_fdb_free(struct rcu_head *head)
416{
417 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
418
419 while (f->remote.remote_next) {
420 struct vxlan_rdst *rd = f->remote.remote_next;
421
422 f->remote.remote_next = rd->remote_next;
423 kfree(rd);
424 }
425 kfree(f);
426}
427
352static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) 428static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
353{ 429{
354 netdev_dbg(vxlan->dev, 430 netdev_dbg(vxlan->dev,
@@ -358,7 +434,7 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
358 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH); 434 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
359 435
360 hlist_del_rcu(&f->hlist); 436 hlist_del_rcu(&f->hlist);
361 kfree_rcu(f, rcu); 437 call_rcu(&f->rcu, vxlan_fdb_free);
362} 438}
363 439
364/* Add static entry (via netlink) */ 440/* Add static entry (via netlink) */
@@ -367,7 +443,10 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
367 const unsigned char *addr, u16 flags) 443 const unsigned char *addr, u16 flags)
368{ 444{
369 struct vxlan_dev *vxlan = netdev_priv(dev); 445 struct vxlan_dev *vxlan = netdev_priv(dev);
446 struct net *net = dev_net(vxlan->dev);
370 __be32 ip; 447 __be32 ip;
448 __be16 port;
449 u32 vni, ifindex;
371 int err; 450 int err;
372 451
373 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) { 452 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
@@ -384,8 +463,36 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
384 463
385 ip = nla_get_be32(tb[NDA_DST]); 464 ip = nla_get_be32(tb[NDA_DST]);
386 465
466 if (tb[NDA_PORT]) {
467 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
468 return -EINVAL;
469 port = nla_get_be16(tb[NDA_PORT]);
470 } else
471 port = vxlan->dst_port;
472
473 if (tb[NDA_VNI]) {
474 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
475 return -EINVAL;
476 vni = nla_get_u32(tb[NDA_VNI]);
477 } else
478 vni = vxlan->default_dst.remote_vni;
479
480 if (tb[NDA_IFINDEX]) {
481 struct net_device *tdev;
482
483 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
484 return -EINVAL;
485 ifindex = nla_get_u32(tb[NDA_IFINDEX]);
486 tdev = dev_get_by_index(net, ifindex);
487 if (!tdev)
488 return -EADDRNOTAVAIL;
489 dev_put(tdev);
490 } else
491 ifindex = 0;
492
387 spin_lock_bh(&vxlan->hash_lock); 493 spin_lock_bh(&vxlan->hash_lock);
388 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags); 494 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags,
495 port, vni, ifindex, ndm->ndm_flags);
389 spin_unlock_bh(&vxlan->hash_lock); 496 spin_unlock_bh(&vxlan->hash_lock);
390 497
391 return err; 498 return err;
@@ -423,18 +530,21 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
423 int err; 530 int err;
424 531
425 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { 532 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
426 if (idx < cb->args[0]) 533 struct vxlan_rdst *rd;
427 goto skip; 534 for (rd = &f->remote; rd; rd = rd->remote_next) {
428 535 if (idx < cb->args[0])
429 err = vxlan_fdb_info(skb, vxlan, f, 536 goto skip;
430 NETLINK_CB(cb->skb).portid, 537
431 cb->nlh->nlmsg_seq, 538 err = vxlan_fdb_info(skb, vxlan, f,
432 RTM_NEWNEIGH, 539 NETLINK_CB(cb->skb).portid,
433 NLM_F_MULTI); 540 cb->nlh->nlmsg_seq,
434 if (err < 0) 541 RTM_NEWNEIGH,
435 break; 542 NLM_F_MULTI, rd);
543 if (err < 0)
544 break;
436skip: 545skip:
437 ++idx; 546 ++idx;
547 }
438 } 548 }
439 } 549 }
440 550
@@ -454,22 +564,25 @@ static void vxlan_snoop(struct net_device *dev,
454 f = vxlan_find_mac(vxlan, src_mac); 564 f = vxlan_find_mac(vxlan, src_mac);
455 if (likely(f)) { 565 if (likely(f)) {
456 f->used = jiffies; 566 f->used = jiffies;
457 if (likely(f->remote_ip == src_ip)) 567 if (likely(f->remote.remote_ip == src_ip))
458 return; 568 return;
459 569
460 if (net_ratelimit()) 570 if (net_ratelimit())
461 netdev_info(dev, 571 netdev_info(dev,
462 "%pM migrated from %pI4 to %pI4\n", 572 "%pM migrated from %pI4 to %pI4\n",
463 src_mac, &f->remote_ip, &src_ip); 573 src_mac, &f->remote.remote_ip, &src_ip);
464 574
465 f->remote_ip = src_ip; 575 f->remote.remote_ip = src_ip;
466 f->updated = jiffies; 576 f->updated = jiffies;
467 } else { 577 } else {
468 /* learned new entry */ 578 /* learned new entry */
469 spin_lock(&vxlan->hash_lock); 579 spin_lock(&vxlan->hash_lock);
470 err = vxlan_fdb_create(vxlan, src_mac, src_ip, 580 err = vxlan_fdb_create(vxlan, src_mac, src_ip,
471 NUD_REACHABLE, 581 NUD_REACHABLE,
472 NLM_F_EXCL|NLM_F_CREATE); 582 NLM_F_EXCL|NLM_F_CREATE,
583 vxlan->dst_port,
584 vxlan->default_dst.remote_vni,
585 0, NTF_SELF);
473 spin_unlock(&vxlan->hash_lock); 586 spin_unlock(&vxlan->hash_lock);
474 } 587 }
475} 588}
@@ -490,7 +603,7 @@ static bool vxlan_group_used(struct vxlan_net *vn,
490 if (!netif_running(vxlan->dev)) 603 if (!netif_running(vxlan->dev))
491 continue; 604 continue;
492 605
493 if (vxlan->gaddr == this->gaddr) 606 if (vxlan->default_dst.remote_ip == this->default_dst.remote_ip)
494 return true; 607 return true;
495 } 608 }
496 609
@@ -504,8 +617,8 @@ static int vxlan_join_group(struct net_device *dev)
504 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 617 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
505 struct sock *sk = vn->sock->sk; 618 struct sock *sk = vn->sock->sk;
506 struct ip_mreqn mreq = { 619 struct ip_mreqn mreq = {
507 .imr_multiaddr.s_addr = vxlan->gaddr, 620 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
508 .imr_ifindex = vxlan->link, 621 .imr_ifindex = vxlan->default_dst.remote_ifindex,
509 }; 622 };
510 int err; 623 int err;
511 624
@@ -532,8 +645,8 @@ static int vxlan_leave_group(struct net_device *dev)
532 int err = 0; 645 int err = 0;
533 struct sock *sk = vn->sock->sk; 646 struct sock *sk = vn->sock->sk;
534 struct ip_mreqn mreq = { 647 struct ip_mreqn mreq = {
535 .imr_multiaddr.s_addr = vxlan->gaddr, 648 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
536 .imr_ifindex = vxlan->link, 649 .imr_ifindex = vxlan->default_dst.remote_ifindex,
537 }; 650 };
538 651
539 /* Only leave group when last vxlan is done. */ 652 /* Only leave group when last vxlan is done. */
@@ -556,7 +669,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
556 struct iphdr *oip; 669 struct iphdr *oip;
557 struct vxlanhdr *vxh; 670 struct vxlanhdr *vxh;
558 struct vxlan_dev *vxlan; 671 struct vxlan_dev *vxlan;
559 struct vxlan_stats *stats; 672 struct pcpu_tstats *stats;
560 __u32 vni; 673 __u32 vni;
561 int err; 674 int err;
562 675
@@ -632,7 +745,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
632 } 745 }
633 } 746 }
634 747
635 stats = this_cpu_ptr(vxlan->stats); 748 stats = this_cpu_ptr(vxlan->dev->tstats);
636 u64_stats_update_begin(&stats->syncp); 749 u64_stats_update_begin(&stats->syncp);
637 stats->rx_packets++; 750 stats->rx_packets++;
638 stats->rx_bytes += skb->len; 751 stats->rx_bytes += skb->len;
@@ -691,7 +804,6 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
691 n = neigh_lookup(&arp_tbl, &tip, dev); 804 n = neigh_lookup(&arp_tbl, &tip, dev);
692 805
693 if (n) { 806 if (n) {
694 struct vxlan_dev *vxlan = netdev_priv(dev);
695 struct vxlan_fdb *f; 807 struct vxlan_fdb *f;
696 struct sk_buff *reply; 808 struct sk_buff *reply;
697 809
@@ -701,7 +813,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
701 } 813 }
702 814
703 f = vxlan_find_mac(vxlan, n->ha); 815 f = vxlan_find_mac(vxlan, n->ha);
704 if (f && f->remote_ip == 0) { 816 if (f && f->remote.remote_ip == htonl(INADDR_ANY)) {
705 /* bridge-local neighbor */ 817 /* bridge-local neighbor */
706 neigh_release(n); 818 neigh_release(n);
707 goto out; 819 goto out;
@@ -763,28 +875,6 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
763 return false; 875 return false;
764} 876}
765 877
766/* Extract dsfield from inner protocol */
767static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
768 const struct sk_buff *skb)
769{
770 if (skb->protocol == htons(ETH_P_IP))
771 return iph->tos;
772 else if (skb->protocol == htons(ETH_P_IPV6))
773 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
774 else
775 return 0;
776}
777
778/* Propogate ECN bits out */
779static inline u8 vxlan_ecn_encap(u8 tos,
780 const struct iphdr *iph,
781 const struct sk_buff *skb)
782{
783 u8 inner = vxlan_get_dsfield(iph, skb);
784
785 return INET_ECN_encapsulate(tos, inner);
786}
787
788static void vxlan_sock_free(struct sk_buff *skb) 878static void vxlan_sock_free(struct sk_buff *skb)
789{ 879{
790 sock_put(skb->sk); 880 sock_put(skb->sk);
@@ -807,7 +897,7 @@ static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
807 * better and maybe available from hardware 897 * better and maybe available from hardware
808 * secondary choice is to use jhash on the Ethernet header 898 * secondary choice is to use jhash on the Ethernet header
809 */ 899 */
810static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb) 900static __be16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
811{ 901{
812 unsigned int range = (vxlan->port_max - vxlan->port_min) + 1; 902 unsigned int range = (vxlan->port_max - vxlan->port_min) + 1;
813 u32 hash; 903 u32 hash;
@@ -817,71 +907,78 @@ static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
817 hash = jhash(skb->data, 2 * ETH_ALEN, 907 hash = jhash(skb->data, 2 * ETH_ALEN,
818 (__force u32) skb->protocol); 908 (__force u32) skb->protocol);
819 909
820 return (((u64) hash * range) >> 32) + vxlan->port_min; 910 return htons((((u64) hash * range) >> 32) + vxlan->port_min);
821} 911}
822 912
823/* Transmit local packets over Vxlan 913static int handle_offloads(struct sk_buff *skb)
824 * 914{
825 * Outer IP header inherits ECN and DF from inner header. 915 if (skb_is_gso(skb)) {
826 * Outer UDP destination is the VXLAN assigned port. 916 int err = skb_unclone(skb, GFP_ATOMIC);
827 * source port is based on hash of flow 917 if (unlikely(err))
828 */ 918 return err;
829static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) 919
920 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
921 } else if (skb->ip_summed != CHECKSUM_PARTIAL)
922 skb->ip_summed = CHECKSUM_NONE;
923
924 return 0;
925}
926
927/* Bypass encapsulation if the destination is local */
928static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
929 struct vxlan_dev *dst_vxlan)
930{
931 struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
932 struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
933
934 skb->pkt_type = PACKET_HOST;
935 skb->encapsulation = 0;
936 skb->dev = dst_vxlan->dev;
937 __skb_pull(skb, skb_network_offset(skb));
938
939 if (dst_vxlan->flags & VXLAN_F_LEARN)
940 vxlan_snoop(skb->dev, htonl(INADDR_LOOPBACK),
941 eth_hdr(skb)->h_source);
942
943 u64_stats_update_begin(&tx_stats->syncp);
944 tx_stats->tx_packets++;
945 tx_stats->tx_bytes += skb->len;
946 u64_stats_update_end(&tx_stats->syncp);
947
948 if (netif_rx(skb) == NET_RX_SUCCESS) {
949 u64_stats_update_begin(&rx_stats->syncp);
950 rx_stats->rx_packets++;
951 rx_stats->rx_bytes += skb->len;
952 u64_stats_update_end(&rx_stats->syncp);
953 } else {
954 skb->dev->stats.rx_dropped++;
955 }
956}
957
958static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
959 struct vxlan_rdst *rdst, bool did_rsc)
830{ 960{
831 struct vxlan_dev *vxlan = netdev_priv(dev); 961 struct vxlan_dev *vxlan = netdev_priv(dev);
832 struct rtable *rt; 962 struct rtable *rt;
833 const struct iphdr *old_iph; 963 const struct iphdr *old_iph;
834 struct ethhdr *eth;
835 struct iphdr *iph; 964 struct iphdr *iph;
836 struct vxlanhdr *vxh; 965 struct vxlanhdr *vxh;
837 struct udphdr *uh; 966 struct udphdr *uh;
838 struct flowi4 fl4; 967 struct flowi4 fl4;
839 unsigned int pkt_len = skb->len;
840 __be32 dst; 968 __be32 dst;
841 __u16 src_port; 969 __be16 src_port, dst_port;
970 u32 vni;
842 __be16 df = 0; 971 __be16 df = 0;
843 __u8 tos, ttl; 972 __u8 tos, ttl;
844 int err;
845 bool did_rsc = false;
846 const struct vxlan_fdb *f;
847
848 skb_reset_mac_header(skb);
849 eth = eth_hdr(skb);
850 973
851 if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP) 974 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
852 return arp_reduce(dev, skb); 975 vni = rdst->remote_vni;
853 else if ((vxlan->flags&VXLAN_F_RSC) && ntohs(eth->h_proto) == ETH_P_IP) 976 dst = rdst->remote_ip;
854 did_rsc = route_shortcircuit(dev, skb);
855
856 f = vxlan_find_mac(vxlan, eth->h_dest);
857 if (f == NULL) {
858 did_rsc = false;
859 dst = vxlan->gaddr;
860 if (!dst && (vxlan->flags & VXLAN_F_L2MISS) &&
861 !is_multicast_ether_addr(eth->h_dest))
862 vxlan_fdb_miss(vxlan, eth->h_dest);
863 } else
864 dst = f->remote_ip;
865 977
866 if (!dst) { 978 if (!dst) {
867 if (did_rsc) { 979 if (did_rsc) {
868 __skb_pull(skb, skb_network_offset(skb));
869 skb->ip_summed = CHECKSUM_NONE;
870 skb->pkt_type = PACKET_HOST;
871
872 /* short-circuited back to local bridge */ 980 /* short-circuited back to local bridge */
873 if (netif_rx(skb) == NET_RX_SUCCESS) { 981 vxlan_encap_bypass(skb, vxlan, vxlan);
874 struct vxlan_stats *stats =
875 this_cpu_ptr(vxlan->stats);
876
877 u64_stats_update_begin(&stats->syncp);
878 stats->tx_packets++;
879 stats->tx_bytes += pkt_len;
880 u64_stats_update_end(&stats->syncp);
881 } else {
882 dev->stats.tx_errors++;
883 dev->stats.tx_aborted_errors++;
884 }
885 return NETDEV_TX_OK; 982 return NETDEV_TX_OK;
886 } 983 }
887 goto drop; 984 goto drop;
@@ -904,12 +1001,12 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
904 1001
905 tos = vxlan->tos; 1002 tos = vxlan->tos;
906 if (tos == 1) 1003 if (tos == 1)
907 tos = vxlan_get_dsfield(old_iph, skb); 1004 tos = ip_tunnel_get_dsfield(old_iph, skb);
908 1005
909 src_port = vxlan_src_port(vxlan, skb); 1006 src_port = vxlan_src_port(vxlan, skb);
910 1007
911 memset(&fl4, 0, sizeof(fl4)); 1008 memset(&fl4, 0, sizeof(fl4));
912 fl4.flowi4_oif = vxlan->link; 1009 fl4.flowi4_oif = rdst->remote_ifindex;
913 fl4.flowi4_tos = RT_TOS(tos); 1010 fl4.flowi4_tos = RT_TOS(tos);
914 fl4.daddr = dst; 1011 fl4.daddr = dst;
915 fl4.saddr = vxlan->saddr; 1012 fl4.saddr = vxlan->saddr;
@@ -928,6 +1025,19 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
928 goto tx_error; 1025 goto tx_error;
929 } 1026 }
930 1027
1028 /* Bypass encapsulation if the destination is local */
1029 if (rt->rt_flags & RTCF_LOCAL &&
1030 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1031 struct vxlan_dev *dst_vxlan;
1032
1033 ip_rt_put(rt);
1034 dst_vxlan = vxlan_find_vni(dev_net(dev), vni);
1035 if (!dst_vxlan)
1036 goto tx_error;
1037 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1038 return NETDEV_TX_OK;
1039 }
1040
931 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1041 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
932 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 1042 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
933 IPSKB_REROUTED); 1043 IPSKB_REROUTED);
@@ -936,14 +1046,14 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
936 1046
937 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1047 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
938 vxh->vx_flags = htonl(VXLAN_FLAGS); 1048 vxh->vx_flags = htonl(VXLAN_FLAGS);
939 vxh->vx_vni = htonl(vxlan->vni << 8); 1049 vxh->vx_vni = htonl(vni << 8);
940 1050
941 __skb_push(skb, sizeof(*uh)); 1051 __skb_push(skb, sizeof(*uh));
942 skb_reset_transport_header(skb); 1052 skb_reset_transport_header(skb);
943 uh = udp_hdr(skb); 1053 uh = udp_hdr(skb);
944 1054
945 uh->dest = htons(vxlan_port); 1055 uh->dest = dst_port;
946 uh->source = htons(src_port); 1056 uh->source = src_port;
947 1057
948 uh->len = htons(skb->len); 1058 uh->len = htons(skb->len);
949 uh->check = 0; 1059 uh->check = 0;
@@ -955,7 +1065,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
955 iph->ihl = sizeof(struct iphdr) >> 2; 1065 iph->ihl = sizeof(struct iphdr) >> 2;
956 iph->frag_off = df; 1066 iph->frag_off = df;
957 iph->protocol = IPPROTO_UDP; 1067 iph->protocol = IPPROTO_UDP;
958 iph->tos = vxlan_ecn_encap(tos, old_iph, skb); 1068 iph->tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
959 iph->daddr = dst; 1069 iph->daddr = dst;
960 iph->saddr = fl4.saddr; 1070 iph->saddr = fl4.saddr;
961 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 1071 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
@@ -965,22 +1075,10 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
965 1075
966 vxlan_set_owner(dev, skb); 1076 vxlan_set_owner(dev, skb);
967 1077
968 /* See iptunnel_xmit() */ 1078 if (handle_offloads(skb))
969 if (skb->ip_summed != CHECKSUM_PARTIAL) 1079 goto drop;
970 skb->ip_summed = CHECKSUM_NONE;
971
972 err = ip_local_out(skb);
973 if (likely(net_xmit_eval(err) == 0)) {
974 struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
975 1080
976 u64_stats_update_begin(&stats->syncp); 1081 iptunnel_xmit(skb, dev);
977 stats->tx_packets++;
978 stats->tx_bytes += pkt_len;
979 u64_stats_update_end(&stats->syncp);
980 } else {
981 dev->stats.tx_errors++;
982 dev->stats.tx_aborted_errors++;
983 }
984 return NETDEV_TX_OK; 1082 return NETDEV_TX_OK;
985 1083
986drop: 1084drop:
@@ -994,6 +1092,65 @@ tx_free:
994 return NETDEV_TX_OK; 1092 return NETDEV_TX_OK;
995} 1093}
996 1094
1095/* Transmit local packets over Vxlan
1096 *
1097 * Outer IP header inherits ECN and DF from inner header.
1098 * Outer UDP destination is the VXLAN assigned port.
1099 * source port is based on hash of flow
1100 */
1101static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1102{
1103 struct vxlan_dev *vxlan = netdev_priv(dev);
1104 struct ethhdr *eth;
1105 bool did_rsc = false;
1106 struct vxlan_rdst *rdst0, *rdst;
1107 struct vxlan_fdb *f;
1108 int rc1, rc;
1109
1110 skb_reset_mac_header(skb);
1111 eth = eth_hdr(skb);
1112
1113 if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
1114 return arp_reduce(dev, skb);
1115
1116 f = vxlan_find_mac(vxlan, eth->h_dest);
1117 did_rsc = false;
1118
1119 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
1120 ntohs(eth->h_proto) == ETH_P_IP) {
1121 did_rsc = route_shortcircuit(dev, skb);
1122 if (did_rsc)
1123 f = vxlan_find_mac(vxlan, eth->h_dest);
1124 }
1125
1126 if (f == NULL) {
1127 rdst0 = &vxlan->default_dst;
1128
1129 if (rdst0->remote_ip == htonl(INADDR_ANY) &&
1130 (vxlan->flags & VXLAN_F_L2MISS) &&
1131 !is_multicast_ether_addr(eth->h_dest))
1132 vxlan_fdb_miss(vxlan, eth->h_dest);
1133 } else
1134 rdst0 = &f->remote;
1135
1136 rc = NETDEV_TX_OK;
1137
1138 /* if there are multiple destinations, send copies */
1139 for (rdst = rdst0->remote_next; rdst; rdst = rdst->remote_next) {
1140 struct sk_buff *skb1;
1141
1142 skb1 = skb_clone(skb, GFP_ATOMIC);
1143 rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
1144 if (rc == NETDEV_TX_OK)
1145 rc = rc1;
1146 }
1147
1148 rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
1149 if (rc == NETDEV_TX_OK)
1150 rc = rc1;
1151 return rc;
1152}
1153
997/* Walk the forwarding table and purge stale entries */ 1154/* Walk the forwarding table and purge stale entries */
998static void vxlan_cleanup(unsigned long arg) 1155static void vxlan_cleanup(unsigned long arg)
999{ 1156{
@@ -1034,10 +1191,8 @@ static void vxlan_cleanup(unsigned long arg)
1034/* Setup stats when device is created */ 1191/* Setup stats when device is created */
1035static int vxlan_init(struct net_device *dev) 1192static int vxlan_init(struct net_device *dev)
1036{ 1193{
1037 struct vxlan_dev *vxlan = netdev_priv(dev); 1194 dev->tstats = alloc_percpu(struct pcpu_tstats);
1038 1195 if (!dev->tstats)
1039 vxlan->stats = alloc_percpu(struct vxlan_stats);
1040 if (!vxlan->stats)
1041 return -ENOMEM; 1196 return -ENOMEM;
1042 1197
1043 return 0; 1198 return 0;
@@ -1049,7 +1204,7 @@ static int vxlan_open(struct net_device *dev)
1049 struct vxlan_dev *vxlan = netdev_priv(dev); 1204 struct vxlan_dev *vxlan = netdev_priv(dev);
1050 int err; 1205 int err;
1051 1206
1052 if (vxlan->gaddr) { 1207 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
1053 err = vxlan_join_group(dev); 1208 err = vxlan_join_group(dev);
1054 if (err) 1209 if (err)
1055 return err; 1210 return err;
@@ -1083,7 +1238,7 @@ static int vxlan_stop(struct net_device *dev)
1083{ 1238{
1084 struct vxlan_dev *vxlan = netdev_priv(dev); 1239 struct vxlan_dev *vxlan = netdev_priv(dev);
1085 1240
1086 if (vxlan->gaddr) 1241 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)))
1087 vxlan_leave_group(dev); 1242 vxlan_leave_group(dev);
1088 1243
1089 del_timer_sync(&vxlan->age_timer); 1244 del_timer_sync(&vxlan->age_timer);
@@ -1093,49 +1248,6 @@ static int vxlan_stop(struct net_device *dev)
1093 return 0; 1248 return 0;
1094} 1249}
1095 1250
1096/* Merge per-cpu statistics */
1097static struct rtnl_link_stats64 *vxlan_stats64(struct net_device *dev,
1098 struct rtnl_link_stats64 *stats)
1099{
1100 struct vxlan_dev *vxlan = netdev_priv(dev);
1101 struct vxlan_stats tmp, sum = { 0 };
1102 unsigned int cpu;
1103
1104 for_each_possible_cpu(cpu) {
1105 unsigned int start;
1106 const struct vxlan_stats *stats
1107 = per_cpu_ptr(vxlan->stats, cpu);
1108
1109 do {
1110 start = u64_stats_fetch_begin_bh(&stats->syncp);
1111 memcpy(&tmp, stats, sizeof(tmp));
1112 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
1113
1114 sum.tx_bytes += tmp.tx_bytes;
1115 sum.tx_packets += tmp.tx_packets;
1116 sum.rx_bytes += tmp.rx_bytes;
1117 sum.rx_packets += tmp.rx_packets;
1118 }
1119
1120 stats->tx_bytes = sum.tx_bytes;
1121 stats->tx_packets = sum.tx_packets;
1122 stats->rx_bytes = sum.rx_bytes;
1123 stats->rx_packets = sum.rx_packets;
1124
1125 stats->multicast = dev->stats.multicast;
1126 stats->rx_length_errors = dev->stats.rx_length_errors;
1127 stats->rx_frame_errors = dev->stats.rx_frame_errors;
1128 stats->rx_errors = dev->stats.rx_errors;
1129
1130 stats->tx_dropped = dev->stats.tx_dropped;
1131 stats->tx_carrier_errors = dev->stats.tx_carrier_errors;
1132 stats->tx_aborted_errors = dev->stats.tx_aborted_errors;
1133 stats->collisions = dev->stats.collisions;
1134 stats->tx_errors = dev->stats.tx_errors;
1135
1136 return stats;
1137}
1138
1139/* Stub, nothing needs to be done. */ 1251/* Stub, nothing needs to be done. */
1140static void vxlan_set_multicast_list(struct net_device *dev) 1252static void vxlan_set_multicast_list(struct net_device *dev)
1141{ 1253{
@@ -1146,7 +1258,7 @@ static const struct net_device_ops vxlan_netdev_ops = {
1146 .ndo_open = vxlan_open, 1258 .ndo_open = vxlan_open,
1147 .ndo_stop = vxlan_stop, 1259 .ndo_stop = vxlan_stop,
1148 .ndo_start_xmit = vxlan_xmit, 1260 .ndo_start_xmit = vxlan_xmit,
1149 .ndo_get_stats64 = vxlan_stats64, 1261 .ndo_get_stats64 = ip_tunnel_get_stats64,
1150 .ndo_set_rx_mode = vxlan_set_multicast_list, 1262 .ndo_set_rx_mode = vxlan_set_multicast_list,
1151 .ndo_change_mtu = eth_change_mtu, 1263 .ndo_change_mtu = eth_change_mtu,
1152 .ndo_validate_addr = eth_validate_addr, 1264 .ndo_validate_addr = eth_validate_addr,
@@ -1163,9 +1275,7 @@ static struct device_type vxlan_type = {
1163 1275
1164static void vxlan_free(struct net_device *dev) 1276static void vxlan_free(struct net_device *dev)
1165{ 1277{
1166 struct vxlan_dev *vxlan = netdev_priv(dev); 1278 free_percpu(dev->tstats);
1167
1168 free_percpu(vxlan->stats);
1169 free_netdev(dev); 1279 free_netdev(dev);
1170} 1280}
1171 1281
@@ -1189,8 +1299,10 @@ static void vxlan_setup(struct net_device *dev)
1189 dev->features |= NETIF_F_NETNS_LOCAL; 1299 dev->features |= NETIF_F_NETNS_LOCAL;
1190 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 1300 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
1191 dev->features |= NETIF_F_RXCSUM; 1301 dev->features |= NETIF_F_RXCSUM;
1302 dev->features |= NETIF_F_GSO_SOFTWARE;
1192 1303
1193 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 1304 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
1305 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1194 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1306 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1195 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1307 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1196 1308
@@ -1203,6 +1315,7 @@ static void vxlan_setup(struct net_device *dev)
1203 inet_get_local_port_range(&low, &high); 1315 inet_get_local_port_range(&low, &high);
1204 vxlan->port_min = low; 1316 vxlan->port_min = low;
1205 vxlan->port_max = high; 1317 vxlan->port_max = high;
1318 vxlan->dst_port = htons(vxlan_port);
1206 1319
1207 vxlan->dev = dev; 1320 vxlan->dev = dev;
1208 1321
@@ -1225,6 +1338,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
1225 [IFLA_VXLAN_RSC] = { .type = NLA_U8 }, 1338 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
1226 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 }, 1339 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
1227 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 }, 1340 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
1341 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
1228}; 1342};
1229 1343
1230static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) 1344static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -1250,14 +1364,6 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
1250 return -ERANGE; 1364 return -ERANGE;
1251 } 1365 }
1252 1366
1253 if (data[IFLA_VXLAN_GROUP]) {
1254 __be32 gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1255 if (!IN_MULTICAST(ntohl(gaddr))) {
1256 pr_debug("group address is not IPv4 multicast\n");
1257 return -EADDRNOTAVAIL;
1258 }
1259 }
1260
1261 if (data[IFLA_VXLAN_PORT_RANGE]) { 1367 if (data[IFLA_VXLAN_PORT_RANGE]) {
1262 const struct ifla_vxlan_port_range *p 1368 const struct ifla_vxlan_port_range *p
1263 = nla_data(data[IFLA_VXLAN_PORT_RANGE]); 1369 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
@@ -1288,6 +1394,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1288 struct nlattr *tb[], struct nlattr *data[]) 1394 struct nlattr *tb[], struct nlattr *data[])
1289{ 1395{
1290 struct vxlan_dev *vxlan = netdev_priv(dev); 1396 struct vxlan_dev *vxlan = netdev_priv(dev);
1397 struct vxlan_rdst *dst = &vxlan->default_dst;
1291 __u32 vni; 1398 __u32 vni;
1292 int err; 1399 int err;
1293 1400
@@ -1299,21 +1406,21 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1299 pr_info("duplicate VNI %u\n", vni); 1406 pr_info("duplicate VNI %u\n", vni);
1300 return -EEXIST; 1407 return -EEXIST;
1301 } 1408 }
1302 vxlan->vni = vni; 1409 dst->remote_vni = vni;
1303 1410
1304 if (data[IFLA_VXLAN_GROUP]) 1411 if (data[IFLA_VXLAN_GROUP])
1305 vxlan->gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]); 1412 dst->remote_ip = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1306 1413
1307 if (data[IFLA_VXLAN_LOCAL]) 1414 if (data[IFLA_VXLAN_LOCAL])
1308 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]); 1415 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
1309 1416
1310 if (data[IFLA_VXLAN_LINK] && 1417 if (data[IFLA_VXLAN_LINK] &&
1311 (vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]))) { 1418 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
1312 struct net_device *lowerdev 1419 struct net_device *lowerdev
1313 = __dev_get_by_index(net, vxlan->link); 1420 = __dev_get_by_index(net, dst->remote_ifindex);
1314 1421
1315 if (!lowerdev) { 1422 if (!lowerdev) {
1316 pr_info("ifindex %d does not exist\n", vxlan->link); 1423 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
1317 return -ENODEV; 1424 return -ENODEV;
1318 } 1425 }
1319 1426
@@ -1361,11 +1468,14 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1361 vxlan->port_max = ntohs(p->high); 1468 vxlan->port_max = ntohs(p->high);
1362 } 1469 }
1363 1470
1471 if (data[IFLA_VXLAN_PORT])
1472 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
1473
1364 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops); 1474 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
1365 1475
1366 err = register_netdevice(dev); 1476 err = register_netdevice(dev);
1367 if (!err) 1477 if (!err)
1368 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni)); 1478 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, dst->remote_vni));
1369 1479
1370 return err; 1480 return err;
1371} 1481}
@@ -1396,24 +1506,26 @@ static size_t vxlan_get_size(const struct net_device *dev)
1396 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ 1506 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
1397 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ 1507 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
1398 nla_total_size(sizeof(struct ifla_vxlan_port_range)) + 1508 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
1509 nla_total_size(sizeof(__be16))+ /* IFLA_VXLAN_PORT */
1399 0; 1510 0;
1400} 1511}
1401 1512
1402static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) 1513static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1403{ 1514{
1404 const struct vxlan_dev *vxlan = netdev_priv(dev); 1515 const struct vxlan_dev *vxlan = netdev_priv(dev);
1516 const struct vxlan_rdst *dst = &vxlan->default_dst;
1405 struct ifla_vxlan_port_range ports = { 1517 struct ifla_vxlan_port_range ports = {
1406 .low = htons(vxlan->port_min), 1518 .low = htons(vxlan->port_min),
1407 .high = htons(vxlan->port_max), 1519 .high = htons(vxlan->port_max),
1408 }; 1520 };
1409 1521
1410 if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni)) 1522 if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
1411 goto nla_put_failure; 1523 goto nla_put_failure;
1412 1524
1413 if (vxlan->gaddr && nla_put_be32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr)) 1525 if (dst->remote_ip && nla_put_be32(skb, IFLA_VXLAN_GROUP, dst->remote_ip))
1414 goto nla_put_failure; 1526 goto nla_put_failure;
1415 1527
1416 if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link)) 1528 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
1417 goto nla_put_failure; 1529 goto nla_put_failure;
1418 1530
1419 if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr)) 1531 if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
@@ -1431,7 +1543,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1431 nla_put_u8(skb, IFLA_VXLAN_L3MISS, 1543 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
1432 !!(vxlan->flags & VXLAN_F_L3MISS)) || 1544 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
1433 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) || 1545 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
1434 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax)) 1546 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
1547 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port))
1435 goto nla_put_failure; 1548 goto nla_put_failure;
1436 1549
1437 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports)) 1550 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
@@ -1555,10 +1668,11 @@ static void __exit vxlan_cleanup_module(void)
1555{ 1668{
1556 rtnl_link_unregister(&vxlan_link_ops); 1669 rtnl_link_unregister(&vxlan_link_ops);
1557 unregister_pernet_device(&vxlan_net_ops); 1670 unregister_pernet_device(&vxlan_net_ops);
1671 rcu_barrier();
1558} 1672}
1559module_exit(vxlan_cleanup_module); 1673module_exit(vxlan_cleanup_module);
1560 1674
1561MODULE_LICENSE("GPL"); 1675MODULE_LICENSE("GPL");
1562MODULE_VERSION(VXLAN_VERSION); 1676MODULE_VERSION(VXLAN_VERSION);
1563MODULE_AUTHOR("Stephen Hemminger <shemminger@vyatta.com>"); 1677MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
1564MODULE_ALIAS_RTNL_LINK("vxlan"); 1678MODULE_ALIAS_RTNL_LINK("vxlan");
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 3d339e04efb7..f9a24e599dee 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1293,7 +1293,8 @@ static int adm8211_config(struct ieee80211_hw *dev, u32 changed)
1293{ 1293{
1294 struct adm8211_priv *priv = dev->priv; 1294 struct adm8211_priv *priv = dev->priv;
1295 struct ieee80211_conf *conf = &dev->conf; 1295 struct ieee80211_conf *conf = &dev->conf;
1296 int channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 1296 int channel =
1297 ieee80211_frequency_to_channel(conf->chandef.chan->center_freq);
1297 1298
1298 if (channel != priv->channel) { 1299 if (channel != priv->channel) {
1299 priv->channel = channel; 1300 priv->channel = channel;
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 5ac5f7ae2721..34c8a33cac06 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1943,12 +1943,12 @@ static int at76_config(struct ieee80211_hw *hw, u32 changed)
1943 struct at76_priv *priv = hw->priv; 1943 struct at76_priv *priv = hw->priv;
1944 1944
1945 at76_dbg(DBG_MAC80211, "%s(): channel %d", 1945 at76_dbg(DBG_MAC80211, "%s(): channel %d",
1946 __func__, hw->conf.channel->hw_value); 1946 __func__, hw->conf.chandef.chan->hw_value);
1947 at76_dbg_dump(DBG_MAC80211, priv->bssid, ETH_ALEN, "bssid:"); 1947 at76_dbg_dump(DBG_MAC80211, priv->bssid, ETH_ALEN, "bssid:");
1948 1948
1949 mutex_lock(&priv->mtx); 1949 mutex_lock(&priv->mtx);
1950 1950
1951 priv->channel = hw->conf.channel->hw_value; 1951 priv->channel = hw->conf.chandef.chan->hw_value;
1952 1952
1953 if (is_valid_ether_addr(priv->bssid)) 1953 if (is_valid_ether_addr(priv->bssid))
1954 at76_join(priv); 1954 at76_join(priv);
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 7157f7d311c5..17d7fece35d2 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -457,14 +457,14 @@ static int ar5523_set_chan(struct ar5523 *ar)
457 memset(&reset, 0, sizeof(reset)); 457 memset(&reset, 0, sizeof(reset));
458 reset.flags |= cpu_to_be32(UATH_CHAN_2GHZ); 458 reset.flags |= cpu_to_be32(UATH_CHAN_2GHZ);
459 reset.flags |= cpu_to_be32(UATH_CHAN_OFDM); 459 reset.flags |= cpu_to_be32(UATH_CHAN_OFDM);
460 reset.freq = cpu_to_be32(conf->channel->center_freq); 460 reset.freq = cpu_to_be32(conf->chandef.chan->center_freq);
461 reset.maxrdpower = cpu_to_be32(50); /* XXX */ 461 reset.maxrdpower = cpu_to_be32(50); /* XXX */
462 reset.channelchange = cpu_to_be32(1); 462 reset.channelchange = cpu_to_be32(1);
463 reset.keeprccontent = cpu_to_be32(0); 463 reset.keeprccontent = cpu_to_be32(0);
464 464
465 ar5523_dbg(ar, "set chan flags 0x%x freq %d\n", 465 ar5523_dbg(ar, "set chan flags 0x%x freq %d\n",
466 be32_to_cpu(reset.flags), 466 be32_to_cpu(reset.flags),
467 conf->channel->center_freq); 467 conf->chandef.chan->center_freq);
468 return ar5523_cmd_write(ar, WDCMSG_RESET, &reset, sizeof(reset), 0); 468 return ar5523_cmd_write(ar, WDCMSG_RESET, &reset, sizeof(reset), 0);
469} 469}
470 470
@@ -594,7 +594,7 @@ static void ar5523_data_rx_cb(struct urb *urb)
594 rx_status = IEEE80211_SKB_RXCB(data->skb); 594 rx_status = IEEE80211_SKB_RXCB(data->skb);
595 memset(rx_status, 0, sizeof(*rx_status)); 595 memset(rx_status, 0, sizeof(*rx_status));
596 rx_status->freq = be32_to_cpu(desc->channel); 596 rx_status->freq = be32_to_cpu(desc->channel);
597 rx_status->band = hw->conf.channel->band; 597 rx_status->band = hw->conf.chandef.chan->band;
598 rx_status->signal = -95 + be32_to_cpu(desc->rssi); 598 rx_status->signal = -95 + be32_to_cpu(desc->rssi);
599 599
600 ieee80211_rx_irqsafe(hw, data->skb); 600 ieee80211_rx_irqsafe(hw, data->skb);
@@ -1091,7 +1091,7 @@ static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
1091 return ret; 1091 return ret;
1092} 1092}
1093 1093
1094static void ar5523_flush(struct ieee80211_hw *hw, bool drop) 1094static void ar5523_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1095{ 1095{
1096 struct ar5523 *ar = hw->priv; 1096 struct ar5523 *ar = hw->priv;
1097 1097
@@ -1153,13 +1153,13 @@ static int ar5523_get_wlan_mode(struct ar5523 *ar,
1153 struct ieee80211_sta *sta; 1153 struct ieee80211_sta *sta;
1154 u32 sta_rate_set; 1154 u32 sta_rate_set;
1155 1155
1156 band = ar->hw->wiphy->bands[ar->hw->conf.channel->band]; 1156 band = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
1157 sta = ieee80211_find_sta(ar->vif, bss_conf->bssid); 1157 sta = ieee80211_find_sta(ar->vif, bss_conf->bssid);
1158 if (!sta) { 1158 if (!sta) {
1159 ar5523_info(ar, "STA not found!\n"); 1159 ar5523_info(ar, "STA not found!\n");
1160 return WLAN_MODE_11b; 1160 return WLAN_MODE_11b;
1161 } 1161 }
1162 sta_rate_set = sta->supp_rates[ar->hw->conf.channel->band]; 1162 sta_rate_set = sta->supp_rates[ar->hw->conf.chandef.chan->band];
1163 1163
1164 for (bit = 0; bit < band->n_bitrates; bit++) { 1164 for (bit = 0; bit < band->n_bitrates; bit++) {
1165 if (sta_rate_set & 1) { 1165 if (sta_rate_set & 1) {
@@ -1197,11 +1197,11 @@ static void ar5523_create_rateset(struct ar5523 *ar,
1197 ar5523_info(ar, "STA not found. Cannot set rates\n"); 1197 ar5523_info(ar, "STA not found. Cannot set rates\n");
1198 sta_rate_set = bss_conf->basic_rates; 1198 sta_rate_set = bss_conf->basic_rates;
1199 } else 1199 } else
1200 sta_rate_set = sta->supp_rates[ar->hw->conf.channel->band]; 1200 sta_rate_set = sta->supp_rates[ar->hw->conf.chandef.chan->band];
1201 1201
1202 ar5523_dbg(ar, "sta rate_set = %08x\n", sta_rate_set); 1202 ar5523_dbg(ar, "sta rate_set = %08x\n", sta_rate_set);
1203 1203
1204 band = ar->hw->wiphy->bands[ar->hw->conf.channel->band]; 1204 band = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
1205 for (bit = 0; bit < band->n_bitrates; bit++) { 1205 for (bit = 0; bit < band->n_bitrates; bit++) {
1206 BUG_ON(i >= AR5523_MAX_NRATES); 1206 BUG_ON(i >= AR5523_MAX_NRATES);
1207 ar5523_dbg(ar, "Considering rate %d : %d\n", 1207 ar5523_dbg(ar, "Considering rate %d : %d\n",
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index f60b3899afc4..1b3a34f7f224 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -10,6 +10,7 @@ ath5k-y += phy.o
10ath5k-y += reset.o 10ath5k-y += reset.o
11ath5k-y += attach.o 11ath5k-y += attach.o
12ath5k-y += base.o 12ath5k-y += base.o
13CFLAGS_base.o += -I$(src)
13ath5k-y += led.o 14ath5k-y += led.o
14ath5k-y += rfkill.o 15ath5k-y += rfkill.o
15ath5k-y += ani.o 16ath5k-y += ani.o
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 3150def17193..2d691b8b95b9 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1523,7 +1523,8 @@ int ath5k_hw_dma_stop(struct ath5k_hw *ah);
1523/* EEPROM access functions */ 1523/* EEPROM access functions */
1524int ath5k_eeprom_init(struct ath5k_hw *ah); 1524int ath5k_eeprom_init(struct ath5k_hw *ah);
1525void ath5k_eeprom_detach(struct ath5k_hw *ah); 1525void ath5k_eeprom_detach(struct ath5k_hw *ah);
1526 1526int ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah,
1527 struct ieee80211_channel *channel);
1527 1528
1528/* Protocol Control Unit Functions */ 1529/* Protocol Control Unit Functions */
1529/* Helpers */ 1530/* Helpers */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 1d264c0f5a9b..9b20d9ee2719 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2639,7 +2639,7 @@ int ath5k_start(struct ieee80211_hw *hw)
2639 * be followed by initialization of the appropriate bits 2639 * be followed by initialization of the appropriate bits
2640 * and then setup of the interrupt mask. 2640 * and then setup of the interrupt mask.
2641 */ 2641 */
2642 ah->curchan = ah->hw->conf.channel; 2642 ah->curchan = ah->hw->conf.chandef.chan;
2643 ah->imask = AR5K_INT_RXOK 2643 ah->imask = AR5K_INT_RXOK
2644 | AR5K_INT_RXERR 2644 | AR5K_INT_RXERR
2645 | AR5K_INT_RXEOL 2645 | AR5K_INT_RXEOL
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index b7e0258887e7..94d34ee02265 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -1779,7 +1779,8 @@ ath5k_eeprom_detach(struct ath5k_hw *ah)
1779} 1779}
1780 1780
1781int 1781int
1782ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel) 1782ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah,
1783 struct ieee80211_channel *channel)
1783{ 1784{
1784 switch (channel->hw_value) { 1785 switch (channel->hw_value) {
1785 case AR5K_MODE_11A: 1786 case AR5K_MODE_11A:
@@ -1789,6 +1790,7 @@ ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel)
1789 case AR5K_MODE_11B: 1790 case AR5K_MODE_11B:
1790 return AR5K_EEPROM_MODE_11B; 1791 return AR5K_EEPROM_MODE_11B;
1791 default: 1792 default:
1792 return -1; 1793 ATH5K_WARN(ah, "channel is not A/B/G!");
1794 return AR5K_EEPROM_MODE_11A;
1793 } 1795 }
1794} 1796}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 94a9bbea6874..693296ee9693 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -493,6 +493,3 @@ struct ath5k_eeprom_info {
493 /* Antenna raw switch tables */ 493 /* Antenna raw switch tables */
494 u32 ee_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX]; 494 u32 ee_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
495}; 495};
496
497int
498ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel);
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 4264341533ea..06f86f435711 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -202,7 +202,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
202 mutex_lock(&ah->lock); 202 mutex_lock(&ah->lock);
203 203
204 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 204 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
205 ret = ath5k_chan_set(ah, conf->channel); 205 ret = ath5k_chan_set(ah, conf->chandef.chan);
206 if (ret < 0) 206 if (ret < 0)
207 goto unlock; 207 goto unlock;
208 } 208 }
@@ -678,7 +678,7 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
678 678
679 memcpy(survey, &ah->survey, sizeof(*survey)); 679 memcpy(survey, &ah->survey, sizeof(*survey));
680 680
681 survey->channel = conf->channel; 681 survey->channel = conf->chandef.chan;
682 survey->noise = ah->ah_noise_floor; 682 survey->noise = ah->ah_noise_floor;
683 survey->filled = SURVEY_INFO_NOISE_DBM | 683 survey->filled = SURVEY_INFO_NOISE_DBM |
684 SURVEY_INFO_CHANNEL_TIME | 684 SURVEY_INFO_CHANNEL_TIME |
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index a78afa98c650..d6bc7cb61bfb 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1612,11 +1612,7 @@ ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
1612 1612
1613 ah->ah_cal_mask |= AR5K_CALIBRATION_NF; 1613 ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
1614 1614
1615 ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel); 1615 ee_mode = ath5k_eeprom_mode_from_channel(ah, ah->ah_current_channel);
1616 if (WARN_ON(ee_mode < 0)) {
1617 ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
1618 return;
1619 }
1620 1616
1621 /* completed NF calibration, test threshold */ 1617 /* completed NF calibration, test threshold */
1622 nf = ath5k_hw_read_measured_noise_floor(ah); 1618 nf = ath5k_hw_read_measured_noise_floor(ah);
@@ -2317,12 +2313,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
2317 2313
2318 def_ant = ah->ah_def_ant; 2314 def_ant = ah->ah_def_ant;
2319 2315
2320 ee_mode = ath5k_eeprom_mode_from_channel(channel); 2316 ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
2321 if (ee_mode < 0) {
2322 ATH5K_ERR(ah,
2323 "invalid channel: %d\n", channel->center_freq);
2324 return;
2325 }
2326 2317
2327 switch (ant_mode) { 2318 switch (ant_mode) {
2328 case AR5K_ANTMODE_DEFAULT: 2319 case AR5K_ANTMODE_DEFAULT:
@@ -3622,12 +3613,7 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3622 return -EINVAL; 3613 return -EINVAL;
3623 } 3614 }
3624 3615
3625 ee_mode = ath5k_eeprom_mode_from_channel(channel); 3616 ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
3626 if (ee_mode < 0) {
3627 ATH5K_ERR(ah,
3628 "invalid channel: %d\n", channel->center_freq);
3629 return -EINVAL;
3630 }
3631 3617
3632 /* Initialize TX power table */ 3618 /* Initialize TX power table */
3633 switch (ah->ah_radio) { 3619 switch (ah->ah_radio) {
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index e2d8b2cf19eb..a3399c4f13a9 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -984,9 +984,7 @@ ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
984 if (ah->ah_version == AR5K_AR5210) 984 if (ah->ah_version == AR5K_AR5210)
985 return; 985 return;
986 986
987 ee_mode = ath5k_eeprom_mode_from_channel(channel); 987 ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
988 if (WARN_ON(ee_mode < 0))
989 return;
990 988
991 /* Adjust power delta for channel 14 */ 989 /* Adjust power delta for channel 14 */
992 if (channel->center_freq == 2484) 990 if (channel->center_freq == 2484)
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
index 00f015819344..c6eef519bb61 100644
--- a/drivers/net/wireless/ath/ath5k/trace.h
+++ b/drivers/net/wireless/ath/ath5k/trace.h
@@ -97,7 +97,7 @@ TRACE_EVENT(ath5k_tx_complete,
97#if defined(CONFIG_ATH5K_TRACER) && !defined(__CHECKER__) 97#if defined(CONFIG_ATH5K_TRACER) && !defined(__CHECKER__)
98 98
99#undef TRACE_INCLUDE_PATH 99#undef TRACE_INCLUDE_PATH
100#define TRACE_INCLUDE_PATH ../../drivers/net/wireless/ath/ath5k 100#define TRACE_INCLUDE_PATH .
101#undef TRACE_INCLUDE_FILE 101#undef TRACE_INCLUDE_FILE
102#define TRACE_INCLUDE_FILE trace 102#define TRACE_INCLUDE_FILE trace
103 103
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
index 630c83db056e..e39e5860a2e9 100644
--- a/drivers/net/wireless/ath/ath6kl/Kconfig
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -30,6 +30,15 @@ config ATH6KL_DEBUG
30 ---help--- 30 ---help---
31 Enables debug support 31 Enables debug support
32 32
33config ATH6KL_TRACING
34 bool "Atheros ath6kl tracing support"
35 depends on ATH6KL
36 depends on EVENT_TRACING
37 ---help---
38 Select this to ath6kl use tracing infrastructure.
39
40 If unsure, say Y to make it easier to debug problems.
41
33config ATH6KL_REGDOMAIN 42config ATH6KL_REGDOMAIN
34 bool "Atheros ath6kl regdomain support" 43 bool "Atheros ath6kl regdomain support"
35 depends on ATH6KL 44 depends on ATH6KL
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index cab0ec0d5380..dc2b3b46781e 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -35,10 +35,15 @@ ath6kl_core-y += txrx.o
35ath6kl_core-y += wmi.o 35ath6kl_core-y += wmi.o
36ath6kl_core-y += core.o 36ath6kl_core-y += core.o
37ath6kl_core-y += recovery.o 37ath6kl_core-y += recovery.o
38
38ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o 39ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
40ath6kl_core-$(CONFIG_ATH6KL_TRACING) += trace.o
39 41
40obj-$(CONFIG_ATH6KL_SDIO) += ath6kl_sdio.o 42obj-$(CONFIG_ATH6KL_SDIO) += ath6kl_sdio.o
41ath6kl_sdio-y += sdio.o 43ath6kl_sdio-y += sdio.o
42 44
43obj-$(CONFIG_ATH6KL_USB) += ath6kl_usb.o 45obj-$(CONFIG_ATH6KL_USB) += ath6kl_usb.o
44ath6kl_usb-y += usb.o 46ath6kl_usb-y += usb.o
47
48# for tracing framework to find trace.h
49CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 752ffc4f4166..5c9736a94e54 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -402,7 +402,7 @@ static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
402 if (type == NL80211_IFTYPE_STATION || 402 if (type == NL80211_IFTYPE_STATION ||
403 type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) { 403 type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) {
404 for (i = 0; i < ar->vif_max; i++) { 404 for (i = 0; i < ar->vif_max; i++) {
405 if ((ar->avail_idx_map >> i) & BIT(0)) { 405 if ((ar->avail_idx_map) & BIT(i)) {
406 *if_idx = i; 406 *if_idx = i;
407 return true; 407 return true;
408 } 408 }
@@ -412,7 +412,7 @@ static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
412 if (type == NL80211_IFTYPE_P2P_CLIENT || 412 if (type == NL80211_IFTYPE_P2P_CLIENT ||
413 type == NL80211_IFTYPE_P2P_GO) { 413 type == NL80211_IFTYPE_P2P_GO) {
414 for (i = ar->max_norm_iface; i < ar->vif_max; i++) { 414 for (i = ar->max_norm_iface; i < ar->vif_max; i++) {
415 if ((ar->avail_idx_map >> i) & BIT(0)) { 415 if ((ar->avail_idx_map) & BIT(i)) {
416 *if_idx = i; 416 *if_idx = i;
417 return true; 417 return true;
418 } 418 }
@@ -1535,7 +1535,9 @@ static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
1535 1535
1536 ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag)); 1536 ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
1537 1537
1538 rtnl_lock();
1538 ath6kl_cfg80211_vif_cleanup(vif); 1539 ath6kl_cfg80211_vif_cleanup(vif);
1540 rtnl_unlock();
1539 1541
1540 return 0; 1542 return 0;
1541} 1543}
@@ -2990,13 +2992,15 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
2990{ 2992{
2991 struct ath6kl *ar = ath6kl_priv(dev); 2993 struct ath6kl *ar = ath6kl_priv(dev);
2992 struct ath6kl_vif *vif = netdev_priv(dev); 2994 struct ath6kl_vif *vif = netdev_priv(dev);
2995 int err;
2993 2996
2994 if (vif->nw_type != AP_NETWORK) 2997 if (vif->nw_type != AP_NETWORK)
2995 return -EOPNOTSUPP; 2998 return -EOPNOTSUPP;
2996 2999
2997 /* Use this only for authorizing/unauthorizing a station */ 3000 err = cfg80211_check_station_change(wiphy, params,
2998 if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) 3001 CFG80211_STA_AP_MLME_CLIENT);
2999 return -EOPNOTSUPP; 3002 if (err)
3003 return err;
3000 3004
3001 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED)) 3005 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
3002 return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, 3006 return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
@@ -3659,7 +3663,6 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
3659 vif->sme_state = SME_DISCONNECTED; 3663 vif->sme_state = SME_DISCONNECTED;
3660 set_bit(WLAN_ENABLED, &vif->flags); 3664 set_bit(WLAN_ENABLED, &vif->flags);
3661 ar->wlan_pwr_state = WLAN_POWER_STATE_ON; 3665 ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
3662 set_bit(NETDEV_REGISTERED, &vif->flags);
3663 3666
3664 if (type == NL80211_IFTYPE_ADHOC) 3667 if (type == NL80211_IFTYPE_ADHOC)
3665 ar->ibss_if_active = true; 3668 ar->ibss_if_active = true;
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 61b2f98b4e77..26b0f92424e1 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -560,7 +560,6 @@ enum ath6kl_vif_state {
560 WMM_ENABLED, 560 WMM_ENABLED,
561 NETQ_STOPPED, 561 NETQ_STOPPED,
562 DTIM_EXPIRED, 562 DTIM_EXPIRED,
563 NETDEV_REGISTERED,
564 CLEAR_BSSFILTER_ON_BEACON, 563 CLEAR_BSSFILTER_ON_BEACON,
565 DTIM_PERIOD_AVAIL, 564 DTIM_PERIOD_AVAIL,
566 WLAN_ENABLED, 565 WLAN_ENABLED,
@@ -936,8 +935,6 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
936 u8 win_sz); 935 u8 win_sz);
937void ath6kl_wakeup_event(void *dev); 936void ath6kl_wakeup_event(void *dev);
938 937
939void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
940 bool wait_fot_compltn, bool cold_reset);
941void ath6kl_init_control_info(struct ath6kl_vif *vif); 938void ath6kl_init_control_info(struct ath6kl_vif *vif);
942struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar); 939struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
943void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready); 940void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready);
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 15cfe30e54fd..fe38b836cb26 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -56,6 +56,60 @@ int ath6kl_printk(const char *level, const char *fmt, ...)
56} 56}
57EXPORT_SYMBOL(ath6kl_printk); 57EXPORT_SYMBOL(ath6kl_printk);
58 58
59int ath6kl_info(const char *fmt, ...)
60{
61 struct va_format vaf = {
62 .fmt = fmt,
63 };
64 va_list args;
65 int ret;
66
67 va_start(args, fmt);
68 vaf.va = &args;
69 ret = ath6kl_printk(KERN_INFO, "%pV", &vaf);
70 trace_ath6kl_log_info(&vaf);
71 va_end(args);
72
73 return ret;
74}
75EXPORT_SYMBOL(ath6kl_info);
76
77int ath6kl_err(const char *fmt, ...)
78{
79 struct va_format vaf = {
80 .fmt = fmt,
81 };
82 va_list args;
83 int ret;
84
85 va_start(args, fmt);
86 vaf.va = &args;
87 ret = ath6kl_printk(KERN_ERR, "%pV", &vaf);
88 trace_ath6kl_log_err(&vaf);
89 va_end(args);
90
91 return ret;
92}
93EXPORT_SYMBOL(ath6kl_err);
94
95int ath6kl_warn(const char *fmt, ...)
96{
97 struct va_format vaf = {
98 .fmt = fmt,
99 };
100 va_list args;
101 int ret;
102
103 va_start(args, fmt);
104 vaf.va = &args;
105 ret = ath6kl_printk(KERN_WARNING, "%pV", &vaf);
106 trace_ath6kl_log_warn(&vaf);
107 va_end(args);
108
109 return ret;
110}
111EXPORT_SYMBOL(ath6kl_warn);
112
59#ifdef CONFIG_ATH6KL_DEBUG 113#ifdef CONFIG_ATH6KL_DEBUG
60 114
61void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...) 115void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...)
@@ -63,15 +117,15 @@ void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...)
63 struct va_format vaf; 117 struct va_format vaf;
64 va_list args; 118 va_list args;
65 119
66 if (!(debug_mask & mask))
67 return;
68
69 va_start(args, fmt); 120 va_start(args, fmt);
70 121
71 vaf.fmt = fmt; 122 vaf.fmt = fmt;
72 vaf.va = &args; 123 vaf.va = &args;
73 124
74 ath6kl_printk(KERN_DEBUG, "%pV", &vaf); 125 if (debug_mask & mask)
126 ath6kl_printk(KERN_DEBUG, "%pV", &vaf);
127
128 trace_ath6kl_log_dbg(mask, &vaf);
75 129
76 va_end(args); 130 va_end(args);
77} 131}
@@ -87,6 +141,10 @@ void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
87 141
88 print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len); 142 print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
89 } 143 }
144
145 /* tracing code doesn't like null strings :/ */
146 trace_ath6kl_log_dbg_dump(msg ? msg : "", prefix ? prefix : "",
147 buf, len);
90} 148}
91EXPORT_SYMBOL(ath6kl_dbg_dump); 149EXPORT_SYMBOL(ath6kl_dbg_dump);
92 150
@@ -1752,8 +1810,10 @@ int ath6kl_debug_init_fs(struct ath6kl *ar)
1752 debugfs_create_file("tgt_stats", S_IRUSR, ar->debugfs_phy, ar, 1810 debugfs_create_file("tgt_stats", S_IRUSR, ar->debugfs_phy, ar,
1753 &fops_tgt_stats); 1811 &fops_tgt_stats);
1754 1812
1755 debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar, 1813 if (ar->hif_type == ATH6KL_HIF_TYPE_SDIO)
1756 &fops_credit_dist_stats); 1814 debugfs_create_file("credit_dist_stats", S_IRUSR,
1815 ar->debugfs_phy, ar,
1816 &fops_credit_dist_stats);
1757 1817
1758 debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR, 1818 debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR,
1759 ar->debugfs_phy, ar, &fops_endpoint_stats); 1819 ar->debugfs_phy, ar, &fops_endpoint_stats);
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index f97cd4ead543..74369de00fb5 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -19,6 +19,7 @@
19#define DEBUG_H 19#define DEBUG_H
20 20
21#include "hif.h" 21#include "hif.h"
22#include "trace.h"
22 23
23enum ATH6K_DEBUG_MASK { 24enum ATH6K_DEBUG_MASK {
24 ATH6KL_DBG_CREDIT = BIT(0), 25 ATH6KL_DBG_CREDIT = BIT(0),
@@ -51,13 +52,9 @@ enum ATH6K_DEBUG_MASK {
51extern unsigned int debug_mask; 52extern unsigned int debug_mask;
52extern __printf(2, 3) 53extern __printf(2, 3)
53int ath6kl_printk(const char *level, const char *fmt, ...); 54int ath6kl_printk(const char *level, const char *fmt, ...);
54 55extern __printf(1, 2) int ath6kl_info(const char *fmt, ...);
55#define ath6kl_info(fmt, ...) \ 56extern __printf(1, 2) int ath6kl_err(const char *fmt, ...);
56 ath6kl_printk(KERN_INFO, fmt, ##__VA_ARGS__) 57extern __printf(1, 2) int ath6kl_warn(const char *fmt, ...);
57#define ath6kl_err(fmt, ...) \
58 ath6kl_printk(KERN_ERR, fmt, ##__VA_ARGS__)
59#define ath6kl_warn(fmt, ...) \
60 ath6kl_printk(KERN_WARNING, fmt, ##__VA_ARGS__)
61 58
62enum ath6kl_war { 59enum ath6kl_war {
63 ATH6KL_WAR_INVALID_RATE, 60 ATH6KL_WAR_INVALID_RATE,
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index a6b614421fa4..fea7709b5dda 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -22,6 +22,7 @@
22#include "target.h" 22#include "target.h"
23#include "hif-ops.h" 23#include "hif-ops.h"
24#include "debug.h" 24#include "debug.h"
25#include "trace.h"
25 26
26#define MAILBOX_FOR_BLOCK_SIZE 1 27#define MAILBOX_FOR_BLOCK_SIZE 1
27 28
@@ -436,6 +437,8 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
436 437
437 ath6kl_dump_registers(dev, &dev->irq_proc_reg, 438 ath6kl_dump_registers(dev, &dev->irq_proc_reg,
438 &dev->irq_en_reg); 439 &dev->irq_en_reg);
440 trace_ath6kl_sdio_irq(&dev->irq_en_reg,
441 sizeof(dev->irq_en_reg));
439 442
440 /* Update only those registers that are enabled */ 443 /* Update only those registers that are enabled */
441 host_int_status = dev->irq_proc_reg.host_int_status & 444 host_int_status = dev->irq_proc_reg.host_int_status &
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index fbb78dfe078f..65e5b719093d 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -19,6 +19,8 @@
19#include "hif.h" 19#include "hif.h"
20#include "debug.h" 20#include "debug.h"
21#include "hif-ops.h" 21#include "hif-ops.h"
22#include "trace.h"
23
22#include <asm/unaligned.h> 24#include <asm/unaligned.h>
23 25
24#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) 26#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
@@ -537,6 +539,8 @@ static int ath6kl_htc_tx_issue(struct htc_target *target,
537 packet->buf, padded_len, 539 packet->buf, padded_len,
538 HIF_WR_ASYNC_BLOCK_INC, packet); 540 HIF_WR_ASYNC_BLOCK_INC, packet);
539 541
542 trace_ath6kl_htc_tx(status, packet->endpoint, packet->buf, send_len);
543
540 return status; 544 return status;
541} 545}
542 546
@@ -757,7 +761,8 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
757{ 761{
758 struct htc_target *target = endpoint->target; 762 struct htc_target *target = endpoint->target;
759 struct hif_scatter_req *scat_req = NULL; 763 struct hif_scatter_req *scat_req = NULL;
760 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0; 764 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0, i;
765 struct htc_packet *packet;
761 int status; 766 int status;
762 u32 txb_mask; 767 u32 txb_mask;
763 u8 ac = WMM_NUM_AC; 768 u8 ac = WMM_NUM_AC;
@@ -832,6 +837,13 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
832 ath6kl_dbg(ATH6KL_DBG_HTC, 837 ath6kl_dbg(ATH6KL_DBG_HTC,
833 "htc tx scatter bytes %d entries %d\n", 838 "htc tx scatter bytes %d entries %d\n",
834 scat_req->len, scat_req->scat_entries); 839 scat_req->len, scat_req->scat_entries);
840
841 for (i = 0; i < scat_req->scat_entries; i++) {
842 packet = scat_req->scat_list[i].packet;
843 trace_ath6kl_htc_tx(packet->status, packet->endpoint,
844 packet->buf, packet->act_len);
845 }
846
835 ath6kl_hif_submit_scat_req(target->dev, scat_req, false); 847 ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
836 848
837 if (status) 849 if (status)
@@ -1903,6 +1915,7 @@ static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
1903 ath6kl_dbg(ATH6KL_DBG_HTC, 1915 ath6kl_dbg(ATH6KL_DBG_HTC,
1904 "htc rx complete ep %d packet 0x%p\n", 1916 "htc rx complete ep %d packet 0x%p\n",
1905 endpoint->eid, packet); 1917 endpoint->eid, packet);
1918
1906 endpoint->ep_cb.rx(endpoint->target, packet); 1919 endpoint->ep_cb.rx(endpoint->target, packet);
1907} 1920}
1908 1921
@@ -2011,6 +2024,9 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target,
2011 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) { 2024 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
2012 ep = &target->endpoint[packet->endpoint]; 2025 ep = &target->endpoint[packet->endpoint];
2013 2026
2027 trace_ath6kl_htc_rx(packet->status, packet->endpoint,
2028 packet->buf, packet->act_len);
2029
2014 /* process header for each of the recv packet */ 2030 /* process header for each of the recv packet */
2015 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds, 2031 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
2016 n_lk_ahd); 2032 n_lk_ahd);
@@ -2291,6 +2307,9 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
2291 if (ath6kl_htc_rx_packet(target, packet, packet->act_len)) 2307 if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
2292 goto fail_ctrl_rx; 2308 goto fail_ctrl_rx;
2293 2309
2310 trace_ath6kl_htc_rx(packet->status, packet->endpoint,
2311 packet->buf, packet->act_len);
2312
2294 /* process receive header */ 2313 /* process receive header */
2295 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL); 2314 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
2296 2315
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 281390178e3d..67aa924ed8b3 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -988,8 +988,6 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
988 988
989 htc_hdr = (struct htc_frame_hdr *) netdata; 989 htc_hdr = (struct htc_frame_hdr *) netdata;
990 990
991 ep = &target->endpoint[htc_hdr->eid];
992
993 if (htc_hdr->eid >= ENDPOINT_MAX) { 991 if (htc_hdr->eid >= ENDPOINT_MAX) {
994 ath6kl_dbg(ATH6KL_DBG_HTC, 992 ath6kl_dbg(ATH6KL_DBG_HTC,
995 "HTC Rx: invalid EndpointID=%d\n", 993 "HTC Rx: invalid EndpointID=%d\n",
@@ -997,6 +995,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
997 status = -EINVAL; 995 status = -EINVAL;
998 goto free_skb; 996 goto free_skb;
999 } 997 }
998 ep = &target->endpoint[htc_hdr->eid];
1000 999
1001 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len)); 1000 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1002 1001
@@ -1168,8 +1167,8 @@ static int htc_wait_recv_ctrl_message(struct htc_target *target)
1168 } 1167 }
1169 1168
1170 if (count <= 0) { 1169 if (count <= 0) {
1171 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Timeout!\n", __func__); 1170 ath6kl_warn("htc pipe control receive timeout!\n");
1172 return -ECOMM; 1171 return -ETIMEDOUT;
1173 } 1172 }
1174 1173
1175 return 0; 1174 return 0;
@@ -1582,16 +1581,16 @@ static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
1582 return status; 1581 return status;
1583 1582
1584 if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) { 1583 if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
1585 ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg len:%d!\n", 1584 ath6kl_warn("invalid htc pipe ready msg len: %d\n",
1586 target->pipe.ctrl_response_len); 1585 target->pipe.ctrl_response_len);
1587 return -ECOMM; 1586 return -ECOMM;
1588 } 1587 }
1589 1588
1590 ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf; 1589 ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
1591 1590
1592 if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) { 1591 if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
1593 ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg : 0x%X !\n", 1592 ath6kl_warn("invalid htc pipe ready msg: 0x%x\n",
1594 ready_msg->ver2_0_info.msg_id); 1593 ready_msg->ver2_0_info.msg_id);
1595 return -ECOMM; 1594 return -ECOMM;
1596 } 1595 }
1597 1596
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 5d434cf88f35..40ffee6184fd 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -201,8 +201,8 @@ struct sk_buff *ath6kl_buf_alloc(int size)
201 u16 reserved; 201 u16 reserved;
202 202
203 /* Add chacheline space at front and back of buffer */ 203 /* Add chacheline space at front and back of buffer */
204 reserved = (2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET + 204 reserved = roundup((2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET +
205 sizeof(struct htc_packet) + ATH6KL_HTC_ALIGN_BYTES; 205 sizeof(struct htc_packet) + ATH6KL_HTC_ALIGN_BYTES, 4);
206 skb = dev_alloc_skb(size + reserved); 206 skb = dev_alloc_skb(size + reserved);
207 207
208 if (skb) 208 if (skb)
@@ -1549,10 +1549,89 @@ static const char *ath6kl_init_get_hif_name(enum ath6kl_hif_type type)
1549 return NULL; 1549 return NULL;
1550} 1550}
1551 1551
1552
1553static const struct fw_capa_str_map {
1554 int id;
1555 const char *name;
1556} fw_capa_map[] = {
1557 { ATH6KL_FW_CAPABILITY_HOST_P2P, "host-p2p" },
1558 { ATH6KL_FW_CAPABILITY_SCHED_SCAN, "sched-scan" },
1559 { ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, "sta-p2pdev-duplex" },
1560 { ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT, "inactivity-timeout" },
1561 { ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, "rsn-cap-override" },
1562 { ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER, "wow-mc-filter" },
1563 { ATH6KL_FW_CAPABILITY_BMISS_ENHANCE, "bmiss-enhance" },
1564 { ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST, "sscan-match-list" },
1565 { ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD, "rssi-scan-thold" },
1566 { ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR, "custom-mac-addr" },
1567 { ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, "tx-err-notify" },
1568 { ATH6KL_FW_CAPABILITY_REGDOMAIN, "regdomain" },
1569 { ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, "sched-scan-v2" },
1570 { ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL, "hb-poll" },
1571};
1572
1573static const char *ath6kl_init_get_fw_capa_name(unsigned int id)
1574{
1575 int i;
1576
1577 for (i = 0; i < ARRAY_SIZE(fw_capa_map); i++) {
1578 if (fw_capa_map[i].id == id)
1579 return fw_capa_map[i].name;
1580 }
1581
1582 return "<unknown>";
1583}
1584
1585static void ath6kl_init_get_fwcaps(struct ath6kl *ar, char *buf, size_t buf_len)
1586{
1587 u8 *data = (u8 *) ar->fw_capabilities;
1588 size_t trunc_len, len = 0;
1589 int i, index, bit;
1590 char *trunc = "...";
1591
1592 for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) {
1593 index = i / 8;
1594 bit = i % 8;
1595
1596 if (index >= sizeof(ar->fw_capabilities) * 4)
1597 break;
1598
1599 if (buf_len - len < 4) {
1600 ath6kl_warn("firmware capability buffer too small!\n");
1601
1602 /* add "..." to the end of string */
1603 trunc_len = strlen(trunc) + 1;
1604 strncpy(buf + buf_len - trunc_len, trunc, trunc_len);
1605
1606 return;
1607 }
1608
1609 if (data[index] & (1 << bit)) {
1610 len += scnprintf(buf + len, buf_len - len, "%s,",
1611 ath6kl_init_get_fw_capa_name(i));
1612 }
1613 }
1614
1615 /* overwrite the last comma */
1616 if (len > 0)
1617 len--;
1618
1619 buf[len] = '\0';
1620}
1621
1622static int ath6kl_init_hw_reset(struct ath6kl *ar)
1623{
1624 ath6kl_dbg(ATH6KL_DBG_BOOT, "cold resetting the device");
1625
1626 return ath6kl_diag_write32(ar, RESET_CONTROL_ADDRESS,
1627 cpu_to_le32(RESET_CONTROL_COLD_RST));
1628}
1629
1552static int __ath6kl_init_hw_start(struct ath6kl *ar) 1630static int __ath6kl_init_hw_start(struct ath6kl *ar)
1553{ 1631{
1554 long timeleft; 1632 long timeleft;
1555 int ret, i; 1633 int ret, i;
1634 char buf[200];
1556 1635
1557 ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n"); 1636 ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n");
1558 1637
@@ -1569,24 +1648,35 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
1569 goto err_power_off; 1648 goto err_power_off;
1570 1649
1571 /* Do we need to finish the BMI phase */ 1650 /* Do we need to finish the BMI phase */
1572 /* FIXME: return error from ath6kl_bmi_done() */ 1651 ret = ath6kl_bmi_done(ar);
1573 if (ath6kl_bmi_done(ar)) { 1652 if (ret)
1574 ret = -EIO;
1575 goto err_power_off; 1653 goto err_power_off;
1576 }
1577 1654
1578 /* 1655 /*
1579 * The reason we have to wait for the target here is that the 1656 * The reason we have to wait for the target here is that the
1580 * driver layer has to init BMI in order to set the host block 1657 * driver layer has to init BMI in order to set the host block
1581 * size. 1658 * size.
1582 */ 1659 */
1583 if (ath6kl_htc_wait_target(ar->htc_target)) { 1660 ret = ath6kl_htc_wait_target(ar->htc_target);
1584 ret = -EIO; 1661
1662 if (ret == -ETIMEDOUT) {
1663 /*
1664 * Most likely USB target is in odd state after reboot and
1665 * needs a reset. A cold reset makes the whole device
1666 * disappear from USB bus and initialisation starts from
1667 * beginning.
1668 */
1669 ath6kl_warn("htc wait target timed out, resetting device\n");
1670 ath6kl_init_hw_reset(ar);
1671 goto err_power_off;
1672 } else if (ret) {
1673 ath6kl_err("htc wait target failed: %d\n", ret);
1585 goto err_power_off; 1674 goto err_power_off;
1586 } 1675 }
1587 1676
1588 if (ath6kl_init_service_ep(ar)) { 1677 ret = ath6kl_init_service_ep(ar);
1589 ret = -EIO; 1678 if (ret) {
1679 ath6kl_err("Endpoint service initilisation failed: %d\n", ret);
1590 goto err_cleanup_scatter; 1680 goto err_cleanup_scatter;
1591 } 1681 }
1592 1682
@@ -1617,6 +1707,8 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
1617 ar->wiphy->fw_version, 1707 ar->wiphy->fw_version,
1618 ar->fw_api, 1708 ar->fw_api,
1619 test_bit(TESTMODE, &ar->flag) ? " testmode" : ""); 1709 test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
1710 ath6kl_init_get_fwcaps(ar, buf, sizeof(buf));
1711 ath6kl_info("firmware supports: %s\n", buf);
1620 } 1712 }
1621 1713
1622 if (ar->version.abi_ver != ATH6KL_ABI_VERSION) { 1714 if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
@@ -1765,9 +1857,7 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
1765 * Try to reset the device if we can. The driver may have been 1857 * Try to reset the device if we can. The driver may have been
1766 * configure NOT to reset the target during a debug session. 1858 * configure NOT to reset the target during a debug session.
1767 */ 1859 */
1768 ath6kl_dbg(ATH6KL_DBG_TRC, 1860 ath6kl_init_hw_reset(ar);
1769 "attempting to reset target on instance destroy\n");
1770 ath6kl_reset_device(ar, ar->target_type, true, true);
1771 1861
1772 up(&ar->sem); 1862 up(&ar->sem);
1773} 1863}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index bd50b6b7b492..d4fcfcad57d0 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -345,39 +345,6 @@ out:
345 return ret; 345 return ret;
346} 346}
347 347
348/* FIXME: move to a better place, target.h? */
349#define AR6003_RESET_CONTROL_ADDRESS 0x00004000
350#define AR6004_RESET_CONTROL_ADDRESS 0x00004000
351
352void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
353 bool wait_fot_compltn, bool cold_reset)
354{
355 int status = 0;
356 u32 address;
357 __le32 data;
358
359 if (target_type != TARGET_TYPE_AR6003 &&
360 target_type != TARGET_TYPE_AR6004)
361 return;
362
363 data = cold_reset ? cpu_to_le32(RESET_CONTROL_COLD_RST) :
364 cpu_to_le32(RESET_CONTROL_MBOX_RST);
365
366 switch (target_type) {
367 case TARGET_TYPE_AR6003:
368 address = AR6003_RESET_CONTROL_ADDRESS;
369 break;
370 case TARGET_TYPE_AR6004:
371 address = AR6004_RESET_CONTROL_ADDRESS;
372 break;
373 }
374
375 status = ath6kl_diag_write32(ar, address, data);
376
377 if (status)
378 ath6kl_err("failed to reset target\n");
379}
380
381static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif) 348static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
382{ 349{
383 u8 index; 350 u8 index;
@@ -1327,9 +1294,11 @@ void init_netdev(struct net_device *dev)
1327 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; 1294 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
1328 1295
1329 dev->needed_headroom = ETH_HLEN; 1296 dev->needed_headroom = ETH_HLEN;
1330 dev->needed_headroom += sizeof(struct ath6kl_llc_snap_hdr) + 1297 dev->needed_headroom += roundup(sizeof(struct ath6kl_llc_snap_hdr) +
1331 sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH 1298 sizeof(struct wmi_data_hdr) +
1332 + WMI_MAX_TX_META_SZ + ATH6KL_HTC_ALIGN_BYTES; 1299 HTC_HDR_LENGTH +
1300 WMI_MAX_TX_META_SZ +
1301 ATH6KL_HTC_ALIGN_BYTES, 4);
1333 1302
1334 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 1303 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1335 1304
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index d111980d44c0..fb141454c6d2 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -28,6 +28,7 @@
28#include "target.h" 28#include "target.h"
29#include "debug.h" 29#include "debug.h"
30#include "cfg80211.h" 30#include "cfg80211.h"
31#include "trace.h"
31 32
32struct ath6kl_sdio { 33struct ath6kl_sdio {
33 struct sdio_func *func; 34 struct sdio_func *func;
@@ -179,6 +180,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
179 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len); 180 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
180 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len); 181 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
181 182
183 trace_ath6kl_sdio(addr, request, buf, len);
184
182 return ret; 185 return ret;
183} 186}
184 187
@@ -309,6 +312,13 @@ static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
309 sdio_claim_host(ar_sdio->func); 312 sdio_claim_host(ar_sdio->func);
310 313
311 mmc_set_data_timeout(&data, ar_sdio->func->card); 314 mmc_set_data_timeout(&data, ar_sdio->func->card);
315
316 trace_ath6kl_sdio_scat(scat_req->addr,
317 scat_req->req,
318 scat_req->len,
319 scat_req->scat_entries,
320 scat_req->scat_list);
321
312 /* synchronous call to process request */ 322 /* synchronous call to process request */
313 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req); 323 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
314 324
@@ -1123,10 +1133,12 @@ static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
1123 1133
1124 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, 1134 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1125 HIF_WR_SYNC_BYTE_INC); 1135 HIF_WR_SYNC_BYTE_INC);
1126 if (ret) 1136 if (ret) {
1127 ath6kl_err("unable to send the bmi data to the device\n"); 1137 ath6kl_err("unable to send the bmi data to the device\n");
1138 return ret;
1139 }
1128 1140
1129 return ret; 1141 return 0;
1130} 1142}
1131 1143
1132static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) 1144static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index a98c12ba70c1..a580a629a0da 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -25,7 +25,7 @@
25#define AR6004_BOARD_DATA_SZ 6144 25#define AR6004_BOARD_DATA_SZ 6144
26#define AR6004_BOARD_EXT_DATA_SZ 0 26#define AR6004_BOARD_EXT_DATA_SZ 0
27 27
28#define RESET_CONTROL_ADDRESS 0x00000000 28#define RESET_CONTROL_ADDRESS 0x00004000
29#define RESET_CONTROL_COLD_RST 0x00000100 29#define RESET_CONTROL_COLD_RST 0x00000100
30#define RESET_CONTROL_MBOX_RST 0x00000004 30#define RESET_CONTROL_MBOX_RST 0x00000004
31 31
diff --git a/drivers/net/wireless/ath/ath6kl/trace.c b/drivers/net/wireless/ath/ath6kl/trace.c
new file mode 100644
index 000000000000..e7d64b1285cb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/trace.c
@@ -0,0 +1,23 @@
1/*
2 * Copyright (c) 2012 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/module.h>
18
19#define CREATE_TRACE_POINTS
20#include "trace.h"
21
22EXPORT_TRACEPOINT_SYMBOL(ath6kl_sdio);
23EXPORT_TRACEPOINT_SYMBOL(ath6kl_sdio_scat);
diff --git a/drivers/net/wireless/ath/ath6kl/trace.h b/drivers/net/wireless/ath/ath6kl/trace.h
new file mode 100644
index 000000000000..1a1ea7881b4d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/trace.h
@@ -0,0 +1,332 @@
1#if !defined(_ATH6KL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
2
3#include <net/cfg80211.h>
4#include <linux/skbuff.h>
5#include <linux/tracepoint.h>
6#include "wmi.h"
7#include "hif.h"
8
9#if !defined(_ATH6KL_TRACE_H)
10static inline unsigned int ath6kl_get_wmi_id(void *buf, size_t buf_len)
11{
12 struct wmi_cmd_hdr *hdr = buf;
13
14 if (buf_len < sizeof(*hdr))
15 return 0;
16
17 return le16_to_cpu(hdr->cmd_id);
18}
19#endif /* __ATH6KL_TRACE_H */
20
21#define _ATH6KL_TRACE_H
22
23/* create empty functions when tracing is disabled */
24#if !defined(CONFIG_ATH6KL_TRACING)
25#undef TRACE_EVENT
26#define TRACE_EVENT(name, proto, ...) \
27static inline void trace_ ## name(proto) {}
28#undef DECLARE_EVENT_CLASS
29#define DECLARE_EVENT_CLASS(...)
30#undef DEFINE_EVENT
31#define DEFINE_EVENT(evt_class, name, proto, ...) \
32static inline void trace_ ## name(proto) {}
33#endif /* !CONFIG_ATH6KL_TRACING || __CHECKER__ */
34
35#undef TRACE_SYSTEM
36#define TRACE_SYSTEM ath6kl
37
38TRACE_EVENT(ath6kl_wmi_cmd,
39 TP_PROTO(void *buf, size_t buf_len),
40
41 TP_ARGS(buf, buf_len),
42
43 TP_STRUCT__entry(
44 __field(unsigned int, id)
45 __field(size_t, buf_len)
46 __dynamic_array(u8, buf, buf_len)
47 ),
48
49 TP_fast_assign(
50 __entry->id = ath6kl_get_wmi_id(buf, buf_len);
51 __entry->buf_len = buf_len;
52 memcpy(__get_dynamic_array(buf), buf, buf_len);
53 ),
54
55 TP_printk(
56 "id %d len %zd",
57 __entry->id, __entry->buf_len
58 )
59);
60
61TRACE_EVENT(ath6kl_wmi_event,
62 TP_PROTO(void *buf, size_t buf_len),
63
64 TP_ARGS(buf, buf_len),
65
66 TP_STRUCT__entry(
67 __field(unsigned int, id)
68 __field(size_t, buf_len)
69 __dynamic_array(u8, buf, buf_len)
70 ),
71
72 TP_fast_assign(
73 __entry->id = ath6kl_get_wmi_id(buf, buf_len);
74 __entry->buf_len = buf_len;
75 memcpy(__get_dynamic_array(buf), buf, buf_len);
76 ),
77
78 TP_printk(
79 "id %d len %zd",
80 __entry->id, __entry->buf_len
81 )
82);
83
84TRACE_EVENT(ath6kl_sdio,
85 TP_PROTO(unsigned int addr, int flags,
86 void *buf, size_t buf_len),
87
88 TP_ARGS(addr, flags, buf, buf_len),
89
90 TP_STRUCT__entry(
91 __field(unsigned int, tx)
92 __field(unsigned int, addr)
93 __field(int, flags)
94 __field(size_t, buf_len)
95 __dynamic_array(u8, buf, buf_len)
96 ),
97
98 TP_fast_assign(
99 __entry->addr = addr;
100 __entry->flags = flags;
101 __entry->buf_len = buf_len;
102 memcpy(__get_dynamic_array(buf), buf, buf_len);
103
104 if (flags & HIF_WRITE)
105 __entry->tx = 1;
106 else
107 __entry->tx = 0;
108 ),
109
110 TP_printk(
111 "%s addr 0x%x flags 0x%x len %zd\n",
112 __entry->tx ? "tx" : "rx",
113 __entry->addr,
114 __entry->flags,
115 __entry->buf_len
116 )
117);
118
119TRACE_EVENT(ath6kl_sdio_scat,
120 TP_PROTO(unsigned int addr, int flags, unsigned int total_len,
121 unsigned int entries, struct hif_scatter_item *list),
122
123 TP_ARGS(addr, flags, total_len, entries, list),
124
125 TP_STRUCT__entry(
126 __field(unsigned int, tx)
127 __field(unsigned int, addr)
128 __field(int, flags)
129 __field(unsigned int, entries)
130 __field(size_t, total_len)
131 __dynamic_array(unsigned int, len_array, entries)
132 __dynamic_array(u8, data, total_len)
133 ),
134
135 TP_fast_assign(
136 unsigned int *len_array;
137 int i, offset = 0;
138 size_t len;
139
140 __entry->addr = addr;
141 __entry->flags = flags;
142 __entry->entries = entries;
143 __entry->total_len = total_len;
144
145 if (flags & HIF_WRITE)
146 __entry->tx = 1;
147 else
148 __entry->tx = 0;
149
150 len_array = __get_dynamic_array(len_array);
151
152 for (i = 0; i < entries; i++) {
153 len = list[i].len;
154
155 memcpy((u8 *) __get_dynamic_array(data) + offset,
156 list[i].buf, len);
157
158 len_array[i] = len;
159 offset += len;
160 }
161 ),
162
163 TP_printk(
164 "%s addr 0x%x flags 0x%x entries %d total_len %zd\n",
165 __entry->tx ? "tx" : "rx",
166 __entry->addr,
167 __entry->flags,
168 __entry->entries,
169 __entry->total_len
170 )
171);
172
173TRACE_EVENT(ath6kl_sdio_irq,
174 TP_PROTO(void *buf, size_t buf_len),
175
176 TP_ARGS(buf, buf_len),
177
178 TP_STRUCT__entry(
179 __field(size_t, buf_len)
180 __dynamic_array(u8, buf, buf_len)
181 ),
182
183 TP_fast_assign(
184 __entry->buf_len = buf_len;
185 memcpy(__get_dynamic_array(buf), buf, buf_len);
186 ),
187
188 TP_printk(
189 "irq len %zd\n", __entry->buf_len
190 )
191);
192
193TRACE_EVENT(ath6kl_htc_rx,
194 TP_PROTO(int status, int endpoint, void *buf,
195 size_t buf_len),
196
197 TP_ARGS(status, endpoint, buf, buf_len),
198
199 TP_STRUCT__entry(
200 __field(int, status)
201 __field(int, endpoint)
202 __field(size_t, buf_len)
203 __dynamic_array(u8, buf, buf_len)
204 ),
205
206 TP_fast_assign(
207 __entry->status = status;
208 __entry->endpoint = endpoint;
209 __entry->buf_len = buf_len;
210 memcpy(__get_dynamic_array(buf), buf, buf_len);
211 ),
212
213 TP_printk(
214 "status %d endpoint %d len %zd\n",
215 __entry->status,
216 __entry->endpoint,
217 __entry->buf_len
218 )
219);
220
221TRACE_EVENT(ath6kl_htc_tx,
222 TP_PROTO(int status, int endpoint, void *buf,
223 size_t buf_len),
224
225 TP_ARGS(status, endpoint, buf, buf_len),
226
227 TP_STRUCT__entry(
228 __field(int, status)
229 __field(int, endpoint)
230 __field(size_t, buf_len)
231 __dynamic_array(u8, buf, buf_len)
232 ),
233
234 TP_fast_assign(
235 __entry->status = status;
236 __entry->endpoint = endpoint;
237 __entry->buf_len = buf_len;
238 memcpy(__get_dynamic_array(buf), buf, buf_len);
239 ),
240
241 TP_printk(
242 "status %d endpoint %d len %zd\n",
243 __entry->status,
244 __entry->endpoint,
245 __entry->buf_len
246 )
247);
248
249#define ATH6KL_MSG_MAX 200
250
251DECLARE_EVENT_CLASS(ath6kl_log_event,
252 TP_PROTO(struct va_format *vaf),
253 TP_ARGS(vaf),
254 TP_STRUCT__entry(
255 __dynamic_array(char, msg, ATH6KL_MSG_MAX)
256 ),
257 TP_fast_assign(
258 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
259 ATH6KL_MSG_MAX,
260 vaf->fmt,
261 *vaf->va) >= ATH6KL_MSG_MAX);
262 ),
263 TP_printk("%s", __get_str(msg))
264);
265
266DEFINE_EVENT(ath6kl_log_event, ath6kl_log_err,
267 TP_PROTO(struct va_format *vaf),
268 TP_ARGS(vaf)
269);
270
271DEFINE_EVENT(ath6kl_log_event, ath6kl_log_warn,
272 TP_PROTO(struct va_format *vaf),
273 TP_ARGS(vaf)
274);
275
276DEFINE_EVENT(ath6kl_log_event, ath6kl_log_info,
277 TP_PROTO(struct va_format *vaf),
278 TP_ARGS(vaf)
279);
280
281TRACE_EVENT(ath6kl_log_dbg,
282 TP_PROTO(unsigned int level, struct va_format *vaf),
283 TP_ARGS(level, vaf),
284 TP_STRUCT__entry(
285 __field(unsigned int, level)
286 __dynamic_array(char, msg, ATH6KL_MSG_MAX)
287 ),
288 TP_fast_assign(
289 __entry->level = level;
290 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
291 ATH6KL_MSG_MAX,
292 vaf->fmt,
293 *vaf->va) >= ATH6KL_MSG_MAX);
294 ),
295 TP_printk("%s", __get_str(msg))
296);
297
298TRACE_EVENT(ath6kl_log_dbg_dump,
299 TP_PROTO(const char *msg, const char *prefix,
300 const void *buf, size_t buf_len),
301
302 TP_ARGS(msg, prefix, buf, buf_len),
303
304 TP_STRUCT__entry(
305 __string(msg, msg)
306 __string(prefix, prefix)
307 __field(size_t, buf_len)
308 __dynamic_array(u8, buf, buf_len)
309 ),
310
311 TP_fast_assign(
312 __assign_str(msg, msg);
313 __assign_str(prefix, prefix);
314 __entry->buf_len = buf_len;
315 memcpy(__get_dynamic_array(buf), buf, buf_len);
316 ),
317
318 TP_printk(
319 "%s/%s\n", __get_str(prefix), __get_str(msg)
320 )
321);
322
323#endif /* _ ATH6KL_TRACE_H || TRACE_HEADER_MULTI_READ*/
324
325/* we don't want to use include/trace/events */
326#undef TRACE_INCLUDE_PATH
327#define TRACE_INCLUDE_PATH .
328#undef TRACE_INCLUDE_FILE
329#define TRACE_INCLUDE_FILE trace
330
331/* This part must be outside protection */
332#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 78b369286579..ebb24045a8ae 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -20,6 +20,7 @@
20#include "core.h" 20#include "core.h"
21#include "debug.h" 21#include "debug.h"
22#include "htc-ops.h" 22#include "htc-ops.h"
23#include "trace.h"
23 24
24/* 25/*
25 * tid - tid_mux0..tid_mux3 26 * tid - tid_mux0..tid_mux3
@@ -288,6 +289,8 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
288 int status = 0; 289 int status = 0;
289 struct ath6kl_cookie *cookie = NULL; 290 struct ath6kl_cookie *cookie = NULL;
290 291
292 trace_ath6kl_wmi_cmd(skb->data, skb->len);
293
291 if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) { 294 if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
292 dev_kfree_skb(skb); 295 dev_kfree_skb(skb);
293 return -EACCES; 296 return -EACCES;
@@ -1324,7 +1327,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1324 __func__, ar, ept, skb, packet->buf, 1327 __func__, ar, ept, skb, packet->buf,
1325 packet->act_len, status); 1328 packet->act_len, status);
1326 1329
1327 if (status || !(skb->data + HTC_HDR_LENGTH)) { 1330 if (status || packet->act_len < HTC_HDR_LENGTH) {
1328 dev_kfree_skb(skb); 1331 dev_kfree_skb(skb);
1329 return; 1332 return;
1330 } 1333 }
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 5fcd342762de..bed0d337712d 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -856,11 +856,9 @@ static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
856 int ret; 856 int ret;
857 857
858 if (size > 0) { 858 if (size > 0) {
859 buf = kmalloc(size, GFP_KERNEL); 859 buf = kmemdup(data, size, GFP_KERNEL);
860 if (buf == NULL) 860 if (buf == NULL)
861 return -ENOMEM; 861 return -ENOMEM;
862
863 memcpy(buf, data, size);
864 } 862 }
865 863
866 /* note: if successful returns number of bytes transfered */ 864 /* note: if successful returns number of bytes transfered */
@@ -872,8 +870,9 @@ static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
872 size, 1000); 870 size, 1000);
873 871
874 if (ret < 0) { 872 if (ret < 0) {
875 ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n", 873 ath6kl_warn("Failed to submit usb control message: %d\n", ret);
876 __func__, ret); 874 kfree(buf);
875 return ret;
877 } 876 }
878 877
879 kfree(buf); 878 kfree(buf);
@@ -903,8 +902,9 @@ static int ath6kl_usb_submit_ctrl_in(struct ath6kl_usb *ar_usb,
903 size, 2 * HZ); 902 size, 2 * HZ);
904 903
905 if (ret < 0) { 904 if (ret < 0) {
906 ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n", 905 ath6kl_warn("Failed to read usb control message: %d\n", ret);
907 __func__, ret); 906 kfree(buf);
907 return ret;
908 } 908 }
909 909
910 memcpy((u8 *) data, buf, size); 910 memcpy((u8 *) data, buf, size);
@@ -961,8 +961,10 @@ static int ath6kl_usb_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
961 ATH6KL_USB_CONTROL_REQ_DIAG_RESP, 961 ATH6KL_USB_CONTROL_REQ_DIAG_RESP,
962 ar_usb->diag_resp_buffer, &resp_len); 962 ar_usb->diag_resp_buffer, &resp_len);
963 963
964 if (ret) 964 if (ret) {
965 ath6kl_warn("diag read32 failed: %d\n", ret);
965 return ret; 966 return ret;
967 }
966 968
967 resp = (struct ath6kl_usb_ctrl_diag_resp_read *) 969 resp = (struct ath6kl_usb_ctrl_diag_resp_read *)
968 ar_usb->diag_resp_buffer; 970 ar_usb->diag_resp_buffer;
@@ -976,6 +978,7 @@ static int ath6kl_usb_diag_write32(struct ath6kl *ar, u32 address, __le32 data)
976{ 978{
977 struct ath6kl_usb *ar_usb = ar->hif_priv; 979 struct ath6kl_usb *ar_usb = ar->hif_priv;
978 struct ath6kl_usb_ctrl_diag_cmd_write *cmd; 980 struct ath6kl_usb_ctrl_diag_cmd_write *cmd;
981 int ret;
979 982
980 cmd = (struct ath6kl_usb_ctrl_diag_cmd_write *) ar_usb->diag_cmd_buffer; 983 cmd = (struct ath6kl_usb_ctrl_diag_cmd_write *) ar_usb->diag_cmd_buffer;
981 984
@@ -984,12 +987,17 @@ static int ath6kl_usb_diag_write32(struct ath6kl *ar, u32 address, __le32 data)
984 cmd->address = cpu_to_le32(address); 987 cmd->address = cpu_to_le32(address);
985 cmd->value = data; 988 cmd->value = data;
986 989
987 return ath6kl_usb_ctrl_msg_exchange(ar_usb, 990 ret = ath6kl_usb_ctrl_msg_exchange(ar_usb,
988 ATH6KL_USB_CONTROL_REQ_DIAG_CMD, 991 ATH6KL_USB_CONTROL_REQ_DIAG_CMD,
989 (u8 *) cmd, 992 (u8 *) cmd,
990 sizeof(*cmd), 993 sizeof(*cmd),
991 0, NULL, NULL); 994 0, NULL, NULL);
995 if (ret) {
996 ath6kl_warn("diag_write32 failed: %d\n", ret);
997 return ret;
998 }
992 999
1000 return 0;
993} 1001}
994 1002
995static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) 1003static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
@@ -1001,7 +1009,7 @@ static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
1001 ret = ath6kl_usb_submit_ctrl_in(ar_usb, 1009 ret = ath6kl_usb_submit_ctrl_in(ar_usb,
1002 ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP, 1010 ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP,
1003 0, 0, buf, len); 1011 0, 0, buf, len);
1004 if (ret != 0) { 1012 if (ret) {
1005 ath6kl_err("Unable to read the bmi data from the device: %d\n", 1013 ath6kl_err("Unable to read the bmi data from the device: %d\n",
1006 ret); 1014 ret);
1007 return ret; 1015 return ret;
@@ -1019,7 +1027,7 @@ static int ath6kl_usb_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
1019 ret = ath6kl_usb_submit_ctrl_out(ar_usb, 1027 ret = ath6kl_usb_submit_ctrl_out(ar_usb,
1020 ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD, 1028 ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD,
1021 0, 0, buf, len); 1029 0, 0, buf, len);
1022 if (ret != 0) { 1030 if (ret) {
1023 ath6kl_err("unable to send the bmi data to the device: %d\n", 1031 ath6kl_err("unable to send the bmi data to the device: %d\n",
1024 ret); 1032 ret);
1025 return ret; 1033 return ret;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index d76b5bd81a0d..87aefb4c4c23 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -20,6 +20,7 @@
20#include "core.h" 20#include "core.h"
21#include "debug.h" 21#include "debug.h"
22#include "testmode.h" 22#include "testmode.h"
23#include "trace.h"
23#include "../regd.h" 24#include "../regd.h"
24#include "../regd_common.h" 25#include "../regd_common.h"
25 26
@@ -2028,6 +2029,9 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
2028 if (!sband) 2029 if (!sband)
2029 continue; 2030 continue;
2030 2031
2032 if (WARN_ON(band >= ATH6KL_NUM_BANDS))
2033 break;
2034
2031 ratemask = rates[band]; 2035 ratemask = rates[band];
2032 supp_rates = sc->supp_rates[band].rates; 2036 supp_rates = sc->supp_rates[band].rates;
2033 num_rates = 0; 2037 num_rates = 0;
@@ -4086,6 +4090,8 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
4086 return -EINVAL; 4090 return -EINVAL;
4087 } 4091 }
4088 4092
4093 trace_ath6kl_wmi_event(skb->data, skb->len);
4094
4089 return ath6kl_wmi_proc_events(wmi, skb); 4095 return ath6kl_wmi_proc_events(wmi, skb);
4090} 4096}
4091 4097
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index fd69376ecc83..391da5ad6a99 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -18,6 +18,7 @@
18#include "hw-ops.h" 18#include "hw-ops.h"
19#include "../regd.h" 19#include "../regd.h"
20#include "ar9002_phy.h" 20#include "ar9002_phy.h"
21#include "ar5008_initvals.h"
21 22
22/* All code below is for AR5008, AR9001, AR9002 */ 23/* All code below is for AR5008, AR9001, AR9002 */
23 24
@@ -43,23 +44,16 @@ static const int m2ThreshLowExt_off = 127;
43static const int m1ThreshExt_off = 127; 44static const int m1ThreshExt_off = 127;
44static const int m2ThreshExt_off = 127; 45static const int m2ThreshExt_off = 127;
45 46
47static const struct ar5416IniArray bank0 = STATIC_INI_ARRAY(ar5416Bank0);
48static const struct ar5416IniArray bank1 = STATIC_INI_ARRAY(ar5416Bank1);
49static const struct ar5416IniArray bank2 = STATIC_INI_ARRAY(ar5416Bank2);
50static const struct ar5416IniArray bank3 = STATIC_INI_ARRAY(ar5416Bank3);
51static const struct ar5416IniArray bank7 = STATIC_INI_ARRAY(ar5416Bank7);
46 52
47static void ar5008_rf_bank_setup(u32 *bank, struct ar5416IniArray *array, 53static void ar5008_write_bank6(struct ath_hw *ah, unsigned int *writecnt)
48 int col)
49{
50 int i;
51
52 for (i = 0; i < array->ia_rows; i++)
53 bank[i] = INI_RA(array, i, col);
54}
55
56
57#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) \
58 ar5008_write_rf_array(ah, iniarray, regData, &(regWr))
59
60static void ar5008_write_rf_array(struct ath_hw *ah, struct ar5416IniArray *array,
61 u32 *data, unsigned int *writecnt)
62{ 54{
55 struct ar5416IniArray *array = &ah->iniBank6;
56 u32 *data = ah->analogBank6Data;
63 int r; 57 int r;
64 58
65 ENABLE_REGWRITE_BUFFER(ah); 59 ENABLE_REGWRITE_BUFFER(ah);
@@ -165,7 +159,7 @@ static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
165 ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3); 159 ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
166 160
167 /* write Bank 6 with new params */ 161 /* write Bank 6 with new params */
168 REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes); 162 ar5008_write_bank6(ah, &reg_writes);
169} 163}
170 164
171/** 165/**
@@ -469,31 +463,16 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
469 */ 463 */
470static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah) 464static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
471{ 465{
472#define ATH_ALLOC_BANK(bank, size) do { \ 466 int size = ah->iniBank6.ia_rows * sizeof(u32);
473 bank = devm_kzalloc(ah->dev, sizeof(u32) * size, GFP_KERNEL); \
474 if (!bank) \
475 goto error; \
476 } while (0);
477
478 struct ath_common *common = ath9k_hw_common(ah);
479 467
480 if (AR_SREV_9280_20_OR_LATER(ah)) 468 if (AR_SREV_9280_20_OR_LATER(ah))
481 return 0; 469 return 0;
482 470
483 ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows); 471 ah->analogBank6Data = devm_kzalloc(ah->dev, size, GFP_KERNEL);
484 ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows); 472 if (!ah->analogBank6Data)
485 ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows); 473 return -ENOMEM;
486 ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
487 ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
488 ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
489 ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
490 ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
491 474
492 return 0; 475 return 0;
493#undef ATH_ALLOC_BANK
494error:
495 ath_err(common, "Cannot allocate RF banks\n");
496 return -ENOMEM;
497} 476}
498 477
499 478
@@ -517,6 +496,7 @@ static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
517 u32 ob5GHz = 0, db5GHz = 0; 496 u32 ob5GHz = 0, db5GHz = 0;
518 u32 ob2GHz = 0, db2GHz = 0; 497 u32 ob2GHz = 0, db2GHz = 0;
519 int regWrites = 0; 498 int regWrites = 0;
499 int i;
520 500
521 /* 501 /*
522 * Software does not need to program bank data 502 * Software does not need to program bank data
@@ -529,25 +509,8 @@ static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
529 /* Setup rf parameters */ 509 /* Setup rf parameters */
530 eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV); 510 eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
531 511
532 /* Setup Bank 0 Write */ 512 for (i = 0; i < ah->iniBank6.ia_rows; i++)
533 ar5008_rf_bank_setup(ah->analogBank0Data, &ah->iniBank0, 1); 513 ah->analogBank6Data[i] = INI_RA(&ah->iniBank6, i, modesIndex);
534
535 /* Setup Bank 1 Write */
536 ar5008_rf_bank_setup(ah->analogBank1Data, &ah->iniBank1, 1);
537
538 /* Setup Bank 2 Write */
539 ar5008_rf_bank_setup(ah->analogBank2Data, &ah->iniBank2, 1);
540
541 /* Setup Bank 6 Write */
542 ar5008_rf_bank_setup(ah->analogBank3Data, &ah->iniBank3,
543 modesIndex);
544 {
545 int i;
546 for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) {
547 ah->analogBank6Data[i] =
548 INI_RA(&ah->iniBank6TPC, i, modesIndex);
549 }
550 }
551 514
552 /* Only the 5 or 2 GHz OB/DB need to be set for a mode */ 515 /* Only the 5 or 2 GHz OB/DB need to be set for a mode */
553 if (eepMinorRev >= 2) { 516 if (eepMinorRev >= 2) {
@@ -568,22 +531,13 @@ static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
568 } 531 }
569 } 532 }
570 533
571 /* Setup Bank 7 Setup */
572 ar5008_rf_bank_setup(ah->analogBank7Data, &ah->iniBank7, 1);
573
574 /* Write Analog registers */ 534 /* Write Analog registers */
575 REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data, 535 REG_WRITE_ARRAY(&bank0, 1, regWrites);
576 regWrites); 536 REG_WRITE_ARRAY(&bank1, 1, regWrites);
577 REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data, 537 REG_WRITE_ARRAY(&bank2, 1, regWrites);
578 regWrites); 538 REG_WRITE_ARRAY(&bank3, modesIndex, regWrites);
579 REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data, 539 ar5008_write_bank6(ah, &regWrites);
580 regWrites); 540 REG_WRITE_ARRAY(&bank7, 1, regWrites);
581 REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data,
582 regWrites);
583 REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data,
584 regWrites);
585 REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data,
586 regWrites);
587 541
588 return true; 542 return true;
589} 543}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index c55e5bbafc46..9f589744a9f9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -731,7 +731,8 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
731 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, 731 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
732 AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) { 732 AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
733 ath_dbg(common, CALIBRATE, 733 ath_dbg(common, CALIBRATE,
734 "offset calibration failed to complete in 1ms; noisy environment?\n"); 734 "offset calibration failed to complete in %d ms; noisy environment?\n",
735 AH_WAIT_TIMEOUT / 1000);
735 return false; 736 return false;
736 } 737 }
737 REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN); 738 REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
@@ -745,7 +746,8 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
745 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 746 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
746 0, AH_WAIT_TIMEOUT)) { 747 0, AH_WAIT_TIMEOUT)) {
747 ath_dbg(common, CALIBRATE, 748 ath_dbg(common, CALIBRATE,
748 "offset calibration failed to complete in 1ms; noisy environment?\n"); 749 "offset calibration failed to complete in %d ms; noisy environment?\n",
750 AH_WAIT_TIMEOUT / 1000);
749 return false; 751 return false;
750 } 752 }
751 753
@@ -841,7 +843,8 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
841 AR_PHY_AGC_CONTROL_CAL, 843 AR_PHY_AGC_CONTROL_CAL,
842 0, AH_WAIT_TIMEOUT)) { 844 0, AH_WAIT_TIMEOUT)) {
843 ath_dbg(common, CALIBRATE, 845 ath_dbg(common, CALIBRATE,
844 "offset calibration failed to complete in 1ms; noisy environment?\n"); 846 "offset calibration failed to complete in %d ms; noisy environment?\n",
847 AH_WAIT_TIMEOUT / 1000);
845 return false; 848 return false;
846 } 849 }
847 850
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index f053d978540e..830daa12feb6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -67,12 +67,10 @@ static int ar9002_hw_init_mode_regs(struct ath_hw *ah)
67 } else if (AR_SREV_9100_OR_LATER(ah)) { 67 } else if (AR_SREV_9100_OR_LATER(ah)) {
68 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100); 68 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100);
69 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100); 69 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100);
70 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100);
71 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100); 70 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100);
72 } else { 71 } else {
73 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes); 72 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes);
74 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common); 73 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common);
75 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC);
76 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac); 74 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac);
77 } 75 }
78 76
@@ -80,20 +78,11 @@ static int ar9002_hw_init_mode_regs(struct ath_hw *ah)
80 /* Common for AR5416, AR913x, AR9160 */ 78 /* Common for AR5416, AR913x, AR9160 */
81 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain); 79 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain);
82 80
83 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0);
84 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1);
85 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2);
86 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3);
87 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7);
88
89 /* Common for AR5416, AR9160 */
90 if (!AR_SREV_9100(ah))
91 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6);
92
93 /* Common for AR913x, AR9160 */ 81 /* Common for AR913x, AR9160 */
94 if (!AR_SREV_5416(ah)) 82 if (!AR_SREV_5416(ah))
95 INIT_INI_ARRAY(&ah->iniBank6TPC, 83 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6TPC_9100);
96 ar5416Bank6TPC_9100); 84 else
85 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6TPC);
97 } 86 }
98 87
99 /* iniAddac needs to be modified for these chips */ 88 /* iniAddac needs to be modified for these chips */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index f76c3ca07a45..639ba7d18ea4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -1126,7 +1126,8 @@ skip_tx_iqcal:
1126 ar9003_hw_rtt_disable(ah); 1126 ar9003_hw_rtt_disable(ah);
1127 1127
1128 ath_dbg(common, CALIBRATE, 1128 ath_dbg(common, CALIBRATE,
1129 "offset calibration failed to complete in 1ms; noisy environment?\n"); 1129 "offset calibration failed to complete in %d ms; noisy environment?\n",
1130 AH_WAIT_TIMEOUT / 1000);
1130 return false; 1131 return false;
1131 } 1132 }
1132 1133
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 881e989ea470..e6b92ff265fd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3606,6 +3606,12 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3606 value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz); 3606 value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
3607 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value); 3607 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
3608 3608
3609 if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) {
3610 value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz);
3611 REG_RMW_FIELD(ah, switch_chain_reg[0],
3612 AR_SWITCH_TABLE_ALL, value);
3613 }
3614
3609 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { 3615 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
3610 if ((ah->rxchainmask & BIT(chain)) || 3616 if ((ah->rxchainmask & BIT(chain)) ||
3611 (ah->txchainmask & BIT(chain))) { 3617 (ah->txchainmask & BIT(chain))) {
@@ -3772,6 +3778,17 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
3772 AR_PHY_EXT_ATTEN_CTL_2, 3778 AR_PHY_EXT_ATTEN_CTL_2,
3773 }; 3779 };
3774 3780
3781 if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) {
3782 value = ar9003_hw_atten_chain_get(ah, 1, chan);
3783 REG_RMW_FIELD(ah, ext_atten_reg[0],
3784 AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value);
3785
3786 value = ar9003_hw_atten_chain_get_margin(ah, 1, chan);
3787 REG_RMW_FIELD(ah, ext_atten_reg[0],
3788 AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
3789 value);
3790 }
3791
3775 /* Test value. if 0 then attenuation is unused. Don't load anything. */ 3792 /* Test value. if 0 then attenuation is unused. Don't load anything. */
3776 for (i = 0; i < 3; i++) { 3793 for (i = 0; i < 3; i++) {
3777 if (ah->txchainmask & BIT(i)) { 3794 if (ah->txchainmask & BIT(i)) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index ccc42a71b436..999ab08c34e6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -37,28 +37,28 @@ static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = {
37 /* Addr allmodes */ 37 /* Addr allmodes */
38 {0x00018c00, 0x18253ede}, 38 {0x00018c00, 0x18253ede},
39 {0x00018c04, 0x000801d8}, 39 {0x00018c04, 0x000801d8},
40 {0x00018c08, 0x0003580c}, 40 {0x00018c08, 0x0003780c},
41}; 41};
42 42
43static const u32 ar9462_2p0_baseband_postamble[][5] = { 43static const u32 ar9462_2p0_baseband_postamble[][5] = {
44 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 44 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
45 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d}, 45 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
46 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae}, 46 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
47 {0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da}, 47 {0x00009824, 0x63c640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
48 {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81}, 48 {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
49 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, 49 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
50 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c}, 50 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
51 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, 51 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
52 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0}, 52 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a2},
53 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020}, 53 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
54 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, 54 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
55 {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, 55 {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
56 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x32395d5e}, 56 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
57 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 57 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
58 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 58 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
59 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 59 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
60 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 60 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
61 {0x00009e3c, 0xcf946222, 0xcf946222, 0xcfd5c782, 0xcfd5c282}, 61 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
62 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, 62 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
63 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 63 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
64 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 64 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -82,9 +82,9 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
82 {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982}, 82 {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
83 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, 83 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
84 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 84 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
85 {0x0000a3a4, 0x00000010, 0x00000010, 0x00000000, 0x00000000}, 85 {0x0000a3a4, 0x00000050, 0x00000050, 0x00000000, 0x00000000},
86 {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa}, 86 {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
87 {0x0000a3ac, 0xaaaaaa00, 0xaaaaaa30, 0xaaaaaa00, 0xaaaaaa00}, 87 {0x0000a3ac, 0xaaaaaa00, 0xaa30aa30, 0xaaaaaa00, 0xaaaaaa00},
88 {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, 88 {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
89 {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce}, 89 {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
90 {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, 90 {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
@@ -363,14 +363,14 @@ static const u32 ar9462_pciephy_clkreq_disable_L1_2p0[][2] = {
363 /* Addr allmodes */ 363 /* Addr allmodes */
364 {0x00018c00, 0x18213ede}, 364 {0x00018c00, 0x18213ede},
365 {0x00018c04, 0x000801d8}, 365 {0x00018c04, 0x000801d8},
366 {0x00018c08, 0x0003580c}, 366 {0x00018c08, 0x0003780c},
367}; 367};
368 368
369static const u32 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0[][2] = { 369static const u32 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0[][2] = {
370 /* Addr allmodes */ 370 /* Addr allmodes */
371 {0x00018c00, 0x18212ede}, 371 {0x00018c00, 0x18212ede},
372 {0x00018c04, 0x000801d8}, 372 {0x00018c04, 0x000801d8},
373 {0x00018c08, 0x0003580c}, 373 {0x00018c08, 0x0003780c},
374}; 374};
375 375
376static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = { 376static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
@@ -775,7 +775,7 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
775 {0x00009fc0, 0x803e4788}, 775 {0x00009fc0, 0x803e4788},
776 {0x00009fc4, 0x0001efb5}, 776 {0x00009fc4, 0x0001efb5},
777 {0x00009fcc, 0x40000014}, 777 {0x00009fcc, 0x40000014},
778 {0x00009fd0, 0x01193b93}, 778 {0x00009fd0, 0x0a193b93},
779 {0x0000a20c, 0x00000000}, 779 {0x0000a20c, 0x00000000},
780 {0x0000a220, 0x00000000}, 780 {0x0000a220, 0x00000000},
781 {0x0000a224, 0x00000000}, 781 {0x0000a224, 0x00000000},
@@ -850,7 +850,7 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
850 {0x0000a7cc, 0x00000000}, 850 {0x0000a7cc, 0x00000000},
851 {0x0000a7d0, 0x00000000}, 851 {0x0000a7d0, 0x00000000},
852 {0x0000a7d4, 0x00000004}, 852 {0x0000a7d4, 0x00000004},
853 {0x0000a7dc, 0x00000001}, 853 {0x0000a7dc, 0x00000000},
854 {0x0000a7f0, 0x80000000}, 854 {0x0000a7f0, 0x80000000},
855 {0x0000a8d0, 0x004b6a8e}, 855 {0x0000a8d0, 0x004b6a8e},
856 {0x0000a8d4, 0x00000820}, 856 {0x0000a8d4, 0x00000820},
@@ -886,7 +886,7 @@ static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
886 {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584}, 886 {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
887 {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800}, 887 {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
888 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, 888 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
889 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 889 {0x0000a410, 0x000050da, 0x000050da, 0x000050de, 0x000050de},
890 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 890 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
891 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 891 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
892 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, 892 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
@@ -906,20 +906,20 @@ static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
906 {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640}, 906 {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
907 {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660}, 907 {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
908 {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861}, 908 {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
909 {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81}, 909 {0x0000a548, 0x55025eb3, 0x55025eb3, 0x3e001a81, 0x3e001a81},
910 {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83}, 910 {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x42001a83, 0x42001a83},
911 {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84}, 911 {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001a84, 0x44001a84},
912 {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3}, 912 {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
913 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, 913 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
914 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, 914 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
915 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb}, 915 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
916 {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 916 {0x0000a564, 0x751ffff6, 0x751ffff6, 0x56001eec, 0x56001eec},
917 {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 917 {0x0000a568, 0x751ffff6, 0x751ffff6, 0x58001ef0, 0x58001ef0},
918 {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 918 {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x5a001ef4, 0x5a001ef4},
919 {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 919 {0x0000a570, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
920 {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 920 {0x0000a574, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
921 {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 921 {0x0000a578, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
922 {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 922 {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
923 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 923 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
924 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 924 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
925 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 925 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1053,7 +1053,6 @@ static const u32 ar9462_2p0_mac_core[][2] = {
1053 {0x00008044, 0x00000000}, 1053 {0x00008044, 0x00000000},
1054 {0x00008048, 0x00000000}, 1054 {0x00008048, 0x00000000},
1055 {0x0000804c, 0xffffffff}, 1055 {0x0000804c, 0xffffffff},
1056 {0x00008050, 0xffffffff},
1057 {0x00008054, 0x00000000}, 1056 {0x00008054, 0x00000000},
1058 {0x00008058, 0x00000000}, 1057 {0x00008058, 0x00000000},
1059 {0x0000805c, 0x000fc78f}, 1058 {0x0000805c, 0x000fc78f},
@@ -1117,9 +1116,9 @@ static const u32 ar9462_2p0_mac_core[][2] = {
1117 {0x000081f8, 0x00000000}, 1116 {0x000081f8, 0x00000000},
1118 {0x000081fc, 0x00000000}, 1117 {0x000081fc, 0x00000000},
1119 {0x00008240, 0x00100000}, 1118 {0x00008240, 0x00100000},
1120 {0x00008244, 0x0010f424}, 1119 {0x00008244, 0x0010f400},
1121 {0x00008248, 0x00000800}, 1120 {0x00008248, 0x00000800},
1122 {0x0000824c, 0x0001e848}, 1121 {0x0000824c, 0x0001e800},
1123 {0x00008250, 0x00000000}, 1122 {0x00008250, 0x00000000},
1124 {0x00008254, 0x00000000}, 1123 {0x00008254, 0x00000000},
1125 {0x00008258, 0x00000000}, 1124 {0x00008258, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index a56b2416e2f9..8a1888d02070 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -234,6 +234,7 @@ struct ath_buf {
234 dma_addr_t bf_daddr; /* physical addr of desc */ 234 dma_addr_t bf_daddr; /* physical addr of desc */
235 dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */ 235 dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */
236 bool bf_stale; 236 bool bf_stale;
237 struct ieee80211_tx_rate rates[4];
237 struct ath_buf_state bf_state; 238 struct ath_buf_state bf_state;
238}; 239};
239 240
@@ -311,6 +312,7 @@ struct ath_rx_edma {
311struct ath_rx { 312struct ath_rx {
312 u8 defant; 313 u8 defant;
313 u8 rxotherant; 314 u8 rxotherant;
315 bool discard_next;
314 u32 *rxlink; 316 u32 *rxlink;
315 u32 num_pkts; 317 u32 num_pkts;
316 unsigned int rxfilter; 318 unsigned int rxfilter;
@@ -657,11 +659,10 @@ enum sc_op_flags {
657struct ath_rate_table; 659struct ath_rate_table;
658 660
659struct ath9k_vif_iter_data { 661struct ath9k_vif_iter_data {
660 const u8 *hw_macaddr; /* phy's hardware address, set 662 u8 hw_macaddr[ETH_ALEN]; /* address of the first vif */
661 * before starting iteration for
662 * valid bssid mask.
663 */
664 u8 mask[ETH_ALEN]; /* bssid mask */ 663 u8 mask[ETH_ALEN]; /* bssid mask */
664 bool has_hw_macaddr;
665
665 int naps; /* number of AP vifs */ 666 int naps; /* number of AP vifs */
666 int nmeshes; /* number of mesh vifs */ 667 int nmeshes; /* number of mesh vifs */
667 int nstations; /* number of station vifs */ 668 int nstations; /* number of station vifs */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 5f05c26d1ec4..2ff570f7f8ff 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -79,7 +79,7 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
79 u8 chainmask = ah->txchainmask; 79 u8 chainmask = ah->txchainmask;
80 u8 rate = 0; 80 u8 rate = 0;
81 81
82 sband = &sc->sbands[common->hw->conf.channel->band]; 82 sband = &sc->sbands[common->hw->conf.chandef.chan->band];
83 rate = sband->bitrates[rateidx].hw_value; 83 rate = sband->bitrates[rateidx].hw_value;
84 if (vif->bss_conf.use_short_preamble) 84 if (vif->bss_conf.use_short_preamble)
85 rate |= sband->bitrates[rateidx].hw_value_short; 85 rate |= sband->bitrates[rateidx].hw_value_short;
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 1e8508530e98..7304e7585009 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -208,7 +208,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
208 return true; 208 return true;
209 209
210 ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n", 210 ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
211 currCal->calData->calType, conf->channel->center_freq); 211 currCal->calData->calType, conf->chandef.chan->center_freq);
212 212
213 ah->caldata->CalValid &= ~currCal->calData->calType; 213 ah->caldata->CalValid &= ~currCal->calData->calType;
214 currCal->calState = CAL_WAITING; 214 currCal->calState = CAL_WAITING;
@@ -369,7 +369,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
369 struct ieee80211_channel *c = chan->chan; 369 struct ieee80211_channel *c = chan->chan;
370 struct ath9k_hw_cal_data *caldata = ah->caldata; 370 struct ath9k_hw_cal_data *caldata = ah->caldata;
371 371
372 chan->channelFlags &= (~CHANNEL_CW_INT);
373 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { 372 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
374 ath_dbg(common, CALIBRATE, 373 ath_dbg(common, CALIBRATE,
375 "NF did not complete in calibration window\n"); 374 "NF did not complete in calibration window\n");
@@ -384,7 +383,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
384 ath_dbg(common, CALIBRATE, 383 ath_dbg(common, CALIBRATE,
385 "noise floor failed detected; detected %d, threshold %d\n", 384 "noise floor failed detected; detected %d, threshold %d\n",
386 nf, nfThresh); 385 nf, nfThresh);
387 chan->channelFlags |= CHANNEL_CW_INT;
388 } 386 }
389 387
390 if (!caldata) { 388 if (!caldata) {
@@ -410,7 +408,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
410 int i, j; 408 int i, j;
411 409
412 ah->caldata->channel = chan->channel; 410 ah->caldata->channel = chan->channel;
413 ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT; 411 ah->caldata->channelFlags = chan->channelFlags;
414 ah->caldata->chanmode = chan->chanmode; 412 ah->caldata->chanmode = chan->chanmode;
415 h = ah->caldata->nfCalHist; 413 h = ah->caldata->nfCalHist;
416 default_nf = ath9k_hw_get_default_nf(ah, chan); 414 default_nf = ath9k_hw_get_default_nf(ah, chan);
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 60dcb6c22db9..3d70b8c2bcdd 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -33,6 +33,12 @@ struct ar5416IniArray {
33 u32 ia_columns; 33 u32 ia_columns;
34}; 34};
35 35
36#define STATIC_INI_ARRAY(array) { \
37 .ia_array = (u32 *)(array), \
38 .ia_rows = ARRAY_SIZE(array), \
39 .ia_columns = ARRAY_SIZE(array[0]), \
40 }
41
36#define INIT_INI_ARRAY(iniarray, array) do { \ 42#define INIT_INI_ARRAY(iniarray, array) do { \
37 (iniarray)->ia_array = (u32 *)(array); \ 43 (iniarray)->ia_array = (u32 *)(array); \
38 (iniarray)->ia_rows = ARRAY_SIZE(array); \ 44 (iniarray)->ia_rows = ARRAY_SIZE(array); \
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 905f1b313961..344fdde1d7a3 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -27,20 +27,6 @@ MODULE_AUTHOR("Atheros Communications");
27MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards."); 27MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards.");
28MODULE_LICENSE("Dual BSD/GPL"); 28MODULE_LICENSE("Dual BSD/GPL");
29 29
30int ath9k_cmn_padpos(__le16 frame_control)
31{
32 int padpos = 24;
33 if (ieee80211_has_a4(frame_control)) {
34 padpos += ETH_ALEN;
35 }
36 if (ieee80211_is_data_qos(frame_control)) {
37 padpos += IEEE80211_QOS_CTL_LEN;
38 }
39
40 return padpos;
41}
42EXPORT_SYMBOL(ath9k_cmn_padpos);
43
44int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb) 30int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
45{ 31{
46 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 32 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -133,13 +119,14 @@ EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
133struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw, 119struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
134 struct ath_hw *ah) 120 struct ath_hw *ah)
135{ 121{
136 struct ieee80211_channel *curchan = hw->conf.channel; 122 struct ieee80211_channel *curchan = hw->conf.chandef.chan;
137 struct ath9k_channel *channel; 123 struct ath9k_channel *channel;
138 u8 chan_idx; 124 u8 chan_idx;
139 125
140 chan_idx = curchan->hw_value; 126 chan_idx = curchan->hw_value;
141 channel = &ah->channels[chan_idx]; 127 channel = &ah->channels[chan_idx];
142 ath9k_cmn_update_ichannel(channel, curchan, hw->conf.channel_type); 128 ath9k_cmn_update_ichannel(channel, curchan,
129 cfg80211_get_chandef_type(&hw->conf.chandef));
143 130
144 return channel; 131 return channel;
145} 132}
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 050ca4a4850d..207d06995b15 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -40,9 +40,8 @@
40 x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \ 40 x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \
41} while (0) 41} while (0)
42#define ATH_EP_RND(x, mul) \ 42#define ATH_EP_RND(x, mul) \
43 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 43 (((x) + ((mul)/2)) / (mul))
44 44
45int ath9k_cmn_padpos(__le16 frame_control);
46int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb); 45int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
47void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan, 46void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
48 struct ieee80211_channel *chan, 47 struct ieee80211_channel *chan,
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 3714b971d18e..e6307b86363a 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -537,6 +537,7 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
537 PR("AMPDUs Completed:", a_completed); 537 PR("AMPDUs Completed:", a_completed);
538 PR("AMPDUs Retried: ", a_retries); 538 PR("AMPDUs Retried: ", a_retries);
539 PR("AMPDUs XRetried: ", a_xretries); 539 PR("AMPDUs XRetried: ", a_xretries);
540 PR("TXERR Filtered: ", txerr_filtered);
540 PR("FIFO Underrun: ", fifo_underrun); 541 PR("FIFO Underrun: ", fifo_underrun);
541 PR("TXOP Exceeded: ", xtxop); 542 PR("TXOP Exceeded: ", xtxop);
542 PR("TXTIMER Expiry: ", timer_exp); 543 PR("TXTIMER Expiry: ", timer_exp);
@@ -756,6 +757,8 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
756 TX_STAT_INC(qnum, completed); 757 TX_STAT_INC(qnum, completed);
757 } 758 }
758 759
760 if (ts->ts_status & ATH9K_TXERR_FILT)
761 TX_STAT_INC(qnum, txerr_filtered);
759 if (ts->ts_status & ATH9K_TXERR_FIFO) 762 if (ts->ts_status & ATH9K_TXERR_FIFO)
760 TX_STAT_INC(qnum, fifo_underrun); 763 TX_STAT_INC(qnum, fifo_underrun);
761 if (ts->ts_status & ATH9K_TXERR_XTXOP) 764 if (ts->ts_status & ATH9K_TXERR_XTXOP)
@@ -1909,6 +1912,7 @@ static const char ath9k_gstrings_stats[][ETH_GSTRING_LEN] = {
1909 AMKSTR(d_tx_desc_cfg_err), 1912 AMKSTR(d_tx_desc_cfg_err),
1910 AMKSTR(d_tx_data_underrun), 1913 AMKSTR(d_tx_data_underrun),
1911 AMKSTR(d_tx_delim_underrun), 1914 AMKSTR(d_tx_delim_underrun),
1915 "d_rx_crc_err",
1912 "d_rx_decrypt_crc_err", 1916 "d_rx_decrypt_crc_err",
1913 "d_rx_phy_err", 1917 "d_rx_phy_err",
1914 "d_rx_mic_err", 1918 "d_rx_mic_err",
@@ -1989,6 +1993,7 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
1989 AWDATA(data_underrun); 1993 AWDATA(data_underrun);
1990 AWDATA(delim_underrun); 1994 AWDATA(delim_underrun);
1991 1995
1996 AWDATA_RX(crc_err);
1992 AWDATA_RX(decrypt_crc_err); 1997 AWDATA_RX(decrypt_crc_err);
1993 AWDATA_RX(phy_err); 1998 AWDATA_RX(phy_err);
1994 AWDATA_RX(mic_err); 1999 AWDATA_RX(mic_err);
@@ -2067,7 +2072,7 @@ int ath9k_init_debug(struct ath_hw *ah)
2067 &fops_modal_eeprom); 2072 &fops_modal_eeprom);
2068 sc->rfs_chan_spec_scan = relay_open("spectral_scan", 2073 sc->rfs_chan_spec_scan = relay_open("spectral_scan",
2069 sc->debug.debugfs_phy, 2074 sc->debug.debugfs_phy,
2070 262144, 4, &rfs_spec_scan_cb, 2075 1024, 256, &rfs_spec_scan_cb,
2071 NULL); 2076 NULL);
2072 debugfs_create_file("spectral_scan_ctl", S_IRUSR | S_IWUSR, 2077 debugfs_create_file("spectral_scan_ctl", S_IRUSR | S_IWUSR,
2073 sc->debug.debugfs_phy, sc, 2078 sc->debug.debugfs_phy, sc,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 410d6d8f1aa7..794a7ec83a24 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -142,6 +142,7 @@ struct ath_interrupt_stats {
142 * @a_completed: Total AMPDUs completed 142 * @a_completed: Total AMPDUs completed
143 * @a_retries: No. of AMPDUs retried (SW) 143 * @a_retries: No. of AMPDUs retried (SW)
144 * @a_xretries: No. of AMPDUs dropped due to xretries 144 * @a_xretries: No. of AMPDUs dropped due to xretries
145 * @txerr_filtered: No. of frames with TXERR_FILT flag set.
145 * @fifo_underrun: FIFO underrun occurrences 146 * @fifo_underrun: FIFO underrun occurrences
146 Valid only for: 147 Valid only for:
147 - non-aggregate condition. 148 - non-aggregate condition.
@@ -168,6 +169,7 @@ struct ath_tx_stats {
168 u32 a_completed; 169 u32 a_completed;
169 u32 a_retries; 170 u32 a_retries;
170 u32 a_xretries; 171 u32 a_xretries;
172 u32 txerr_filtered;
171 u32 fifo_underrun; 173 u32 fifo_underrun;
172 u32 xtxop; 174 u32 xtxop;
173 u32 timer_exp; 175 u32 timer_exp;
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index ecc81792f2dc..7187d3671512 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -55,12 +55,6 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
55 u8 rssi; 55 u8 rssi;
56 u16 dur; 56 u16 dur;
57 57
58 ath_dbg(ath9k_hw_common(sc->sc_ah), DFS,
59 "pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n",
60 ard->pulse_bw_info,
61 ard->pulse_length_pri, ard->rssi,
62 ard->pulse_length_ext, ard->ext_rssi);
63
64 /* 58 /*
65 * Only the last 2 bits of the BW info are relevant, they indicate 59 * Only the last 2 bits of the BW info are relevant, they indicate
66 * which channel the radar was detected in. 60 * which channel the radar was detected in.
@@ -193,9 +187,7 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
193 DFS_STAT_INC(sc, pulses_processed); 187 DFS_STAT_INC(sc, pulses_processed);
194 if (pd != NULL && pd->add_pulse(pd, &pe)) { 188 if (pd != NULL && pd->add_pulse(pd, &pe)) {
195 DFS_STAT_INC(sc, radar_detected); 189 DFS_STAT_INC(sc, radar_detected);
196 /* 190 ieee80211_radar_detected(sc->hw);
197 * TODO: forward radar event to DFS management layer
198 */
199 } 191 }
200 } 192 }
201} 193}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 55d28072adeb..b7611b7bbe43 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -105,6 +105,24 @@ static ssize_t write_file_dfs(struct file *file, const char __user *user_buf,
105 return count; 105 return count;
106} 106}
107 107
108static ssize_t write_file_simulate_radar(struct file *file,
109 const char __user *user_buf,
110 size_t count, loff_t *ppos)
111{
112 struct ath_softc *sc = file->private_data;
113
114 ieee80211_radar_detected(sc->hw);
115
116 return count;
117}
118
119static const struct file_operations fops_simulate_radar = {
120 .write = write_file_simulate_radar,
121 .open = simple_open,
122 .owner = THIS_MODULE,
123 .llseek = default_llseek,
124};
125
108static const struct file_operations fops_dfs_stats = { 126static const struct file_operations fops_dfs_stats = {
109 .read = read_file_dfs, 127 .read = read_file_dfs,
110 .write = write_file_dfs, 128 .write = write_file_dfs,
@@ -117,4 +135,6 @@ void ath9k_dfs_init_debug(struct ath_softc *sc)
117{ 135{
118 debugfs_create_file("dfs_stats", S_IRUSR, 136 debugfs_create_file("dfs_stats", S_IRUSR,
119 sc->debug.debugfs_phy, sc, &fops_dfs_stats); 137 sc->debug.debugfs_phy, sc, &fops_dfs_stats);
138 debugfs_create_file("dfs_simulate_radar", S_IWUSR,
139 sc->debug.debugfs_phy, sc, &fops_simulate_radar);
120} 140}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
index 73fe8d6db566..491305c81fce 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
@@ -19,6 +19,7 @@
19 19
20#include "dfs_pattern_detector.h" 20#include "dfs_pattern_detector.h"
21#include "dfs_pri_detector.h" 21#include "dfs_pri_detector.h"
22#include "ath9k.h"
22 23
23/* 24/*
24 * tolerated deviation of radar time stamp in usecs on both sides 25 * tolerated deviation of radar time stamp in usecs on both sides
@@ -142,6 +143,7 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
142{ 143{
143 u32 sz, i; 144 u32 sz, i;
144 struct channel_detector *cd; 145 struct channel_detector *cd;
146 struct ath_common *common = ath9k_hw_common(dpd->ah);
145 147
146 cd = kmalloc(sizeof(*cd), GFP_ATOMIC); 148 cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
147 if (cd == NULL) 149 if (cd == NULL)
@@ -165,7 +167,8 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
165 return cd; 167 return cd;
166 168
167fail: 169fail:
168 pr_err("failed to allocate channel_detector for freq=%d\n", freq); 170 ath_dbg(common, DFS,
171 "failed to allocate channel_detector for freq=%d\n", freq);
169 channel_detector_exit(dpd, cd); 172 channel_detector_exit(dpd, cd);
170 return NULL; 173 return NULL;
171} 174}
@@ -216,34 +219,34 @@ static bool
216dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event) 219dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
217{ 220{
218 u32 i; 221 u32 i;
219 bool ts_wraparound;
220 struct channel_detector *cd; 222 struct channel_detector *cd;
221 223
222 if (dpd->region == NL80211_DFS_UNSET) { 224 /*
223 /* 225 * pulses received for a non-supported or un-initialized
224 * pulses received for a non-supported or un-initialized 226 * domain are treated as detected radars for fail-safety
225 * domain are treated as detected radars 227 */
226 */ 228 if (dpd->region == NL80211_DFS_UNSET)
227 return true; 229 return true;
228 }
229 230
230 cd = channel_detector_get(dpd, event->freq); 231 cd = channel_detector_get(dpd, event->freq);
231 if (cd == NULL) 232 if (cd == NULL)
232 return false; 233 return false;
233 234
234 ts_wraparound = (event->ts < dpd->last_pulse_ts);
235 dpd->last_pulse_ts = event->ts; 235 dpd->last_pulse_ts = event->ts;
236 if (ts_wraparound) { 236 /* reset detector on time stamp wraparound, caused by TSF reset */
237 /* 237 if (event->ts < dpd->last_pulse_ts)
238 * reset detector on time stamp wraparound
239 * with monotonic time stamps, this should never happen
240 */
241 pr_warn("DFS: time stamp wraparound detected, resetting\n");
242 dpd_reset(dpd); 238 dpd_reset(dpd);
243 } 239
244 /* do type individual pattern matching */ 240 /* do type individual pattern matching */
245 for (i = 0; i < dpd->num_radar_types; i++) { 241 for (i = 0; i < dpd->num_radar_types; i++) {
246 if (cd->detectors[i]->add_pulse(cd->detectors[i], event) != 0) { 242 struct pri_detector *pd = cd->detectors[i];
243 struct pri_sequence *ps = pd->add_pulse(pd, event);
244 if (ps != NULL) {
245 ath_dbg(ath9k_hw_common(dpd->ah), DFS,
246 "DFS: radar found on freq=%d: id=%d, pri=%d, "
247 "count=%d, count_false=%d\n",
248 event->freq, pd->rs->type_id,
249 ps->pri, ps->count, ps->count_falses);
247 channel_detector_reset(dpd, cd); 250 channel_detector_reset(dpd, cd);
248 return true; 251 return true;
249 } 252 }
@@ -285,9 +288,10 @@ static struct dfs_pattern_detector default_dpd = {
285}; 288};
286 289
287struct dfs_pattern_detector * 290struct dfs_pattern_detector *
288dfs_pattern_detector_init(enum nl80211_dfs_regions region) 291dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
289{ 292{
290 struct dfs_pattern_detector *dpd; 293 struct dfs_pattern_detector *dpd;
294 struct ath_common *common = ath9k_hw_common(ah);
291 295
292 dpd = kmalloc(sizeof(*dpd), GFP_KERNEL); 296 dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
293 if (dpd == NULL) 297 if (dpd == NULL)
@@ -296,10 +300,11 @@ dfs_pattern_detector_init(enum nl80211_dfs_regions region)
296 *dpd = default_dpd; 300 *dpd = default_dpd;
297 INIT_LIST_HEAD(&dpd->channel_detectors); 301 INIT_LIST_HEAD(&dpd->channel_detectors);
298 302
303 dpd->ah = ah;
299 if (dpd->set_dfs_domain(dpd, region)) 304 if (dpd->set_dfs_domain(dpd, region))
300 return dpd; 305 return dpd;
301 306
302 pr_err("Could not set DFS domain to %d. ", region); 307 ath_dbg(common, DFS,"Could not set DFS domain to %d", region);
303 kfree(dpd); 308 kfree(dpd);
304 return NULL; 309 return NULL;
305} 310}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
index cda52f39f28a..90a5abcc4265 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
@@ -80,6 +80,8 @@ struct dfs_pattern_detector {
80 enum nl80211_dfs_regions region; 80 enum nl80211_dfs_regions region;
81 u8 num_radar_types; 81 u8 num_radar_types;
82 u64 last_pulse_ts; 82 u64 last_pulse_ts;
83 /* needed for ath_dbg() */
84 struct ath_hw *ah;
83 85
84 const struct radar_detector_specs *radar_spec; 86 const struct radar_detector_specs *radar_spec;
85 struct list_head channel_detectors; 87 struct list_head channel_detectors;
@@ -92,10 +94,10 @@ struct dfs_pattern_detector {
92 */ 94 */
93#if defined(CONFIG_ATH9K_DFS_CERTIFIED) 95#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
94extern struct dfs_pattern_detector * 96extern struct dfs_pattern_detector *
95dfs_pattern_detector_init(enum nl80211_dfs_regions region); 97dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region);
96#else 98#else
97static inline struct dfs_pattern_detector * 99static inline struct dfs_pattern_detector *
98dfs_pattern_detector_init(enum nl80211_dfs_regions region) 100dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
99{ 101{
100 return NULL; 102 return NULL;
101} 103}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
index 5e48c5515b8c..5ba4b6fe37c0 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
@@ -23,28 +23,6 @@
23#include "dfs_debug.h" 23#include "dfs_debug.h"
24 24
25/** 25/**
26 * struct pri_sequence - sequence of pulses matching one PRI
27 * @head: list_head
28 * @pri: pulse repetition interval (PRI) in usecs
29 * @dur: duration of sequence in usecs
30 * @count: number of pulses in this sequence
31 * @count_falses: number of not matching pulses in this sequence
32 * @first_ts: time stamp of first pulse in usecs
33 * @last_ts: time stamp of last pulse in usecs
34 * @deadline_ts: deadline when this sequence becomes invalid (first_ts + dur)
35 */
36struct pri_sequence {
37 struct list_head head;
38 u32 pri;
39 u32 dur;
40 u32 count;
41 u32 count_falses;
42 u64 first_ts;
43 u64 last_ts;
44 u64 deadline_ts;
45};
46
47/**
48 * struct pulse_elem - elements in pulse queue 26 * struct pulse_elem - elements in pulse queue
49 * @ts: time stamp in usecs 27 * @ts: time stamp in usecs
50 */ 28 */
@@ -393,8 +371,8 @@ static void pri_detector_exit(struct pri_detector *de)
393 kfree(de); 371 kfree(de);
394} 372}
395 373
396static bool pri_detector_add_pulse(struct pri_detector *de, 374static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
397 struct pulse_event *event) 375 struct pulse_event *event)
398{ 376{
399 u32 max_updated_seq; 377 u32 max_updated_seq;
400 struct pri_sequence *ps; 378 struct pri_sequence *ps;
@@ -403,38 +381,33 @@ static bool pri_detector_add_pulse(struct pri_detector *de,
403 381
404 /* ignore pulses not within width range */ 382 /* ignore pulses not within width range */
405 if ((rs->width_min > event->width) || (rs->width_max < event->width)) 383 if ((rs->width_min > event->width) || (rs->width_max < event->width))
406 return false; 384 return NULL;
407 385
408 if ((ts - de->last_ts) < rs->max_pri_tolerance) 386 if ((ts - de->last_ts) < rs->max_pri_tolerance)
409 /* if delta to last pulse is too short, don't use this pulse */ 387 /* if delta to last pulse is too short, don't use this pulse */
410 return false; 388 return NULL;
411 de->last_ts = ts; 389 de->last_ts = ts;
412 390
413 max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts); 391 max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
414 392
415 if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) { 393 if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
416 pr_err("failed to create pulse sequences\n");
417 pri_detector_reset(de, ts); 394 pri_detector_reset(de, ts);
418 return false; 395 return false;
419 } 396 }
420 397
421 ps = pseq_handler_check_detection(de); 398 ps = pseq_handler_check_detection(de);
422 399
423 if (ps != NULL) { 400 if (ps == NULL)
424 pr_info("DFS: radar found: pri=%d, count=%d, count_false=%d\n", 401 pulse_queue_enqueue(de, ts);
425 ps->pri, ps->count, ps->count_falses); 402
426 pri_detector_reset(de, ts); 403 return ps;
427 return true;
428 }
429 pulse_queue_enqueue(de, ts);
430 return false;
431} 404}
432 405
433struct pri_detector * 406struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs)
434pri_detector_init(const struct radar_detector_specs *rs)
435{ 407{
436 struct pri_detector *de; 408 struct pri_detector *de;
437 de = kzalloc(sizeof(*de), GFP_KERNEL); 409
410 de = kzalloc(sizeof(*de), GFP_ATOMIC);
438 if (de == NULL) 411 if (de == NULL)
439 return NULL; 412 return NULL;
440 de->exit = pri_detector_exit; 413 de->exit = pri_detector_exit;
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
index 81cde9f28e44..723962d1abc6 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
@@ -20,9 +20,31 @@
20#include <linux/list.h> 20#include <linux/list.h>
21 21
22/** 22/**
23 * struct pri_sequence - sequence of pulses matching one PRI
24 * @head: list_head
25 * @pri: pulse repetition interval (PRI) in usecs
26 * @dur: duration of sequence in usecs
27 * @count: number of pulses in this sequence
28 * @count_falses: number of not matching pulses in this sequence
29 * @first_ts: time stamp of first pulse in usecs
30 * @last_ts: time stamp of last pulse in usecs
31 * @deadline_ts: deadline when this sequence becomes invalid (first_ts + dur)
32 */
33struct pri_sequence {
34 struct list_head head;
35 u32 pri;
36 u32 dur;
37 u32 count;
38 u32 count_falses;
39 u64 first_ts;
40 u64 last_ts;
41 u64 deadline_ts;
42};
43
44/**
23 * struct pri_detector - PRI detector element for a dedicated radar type 45 * struct pri_detector - PRI detector element for a dedicated radar type
24 * @exit(): destructor 46 * @exit(): destructor
25 * @add_pulse(): add pulse event, returns true if pattern was detected 47 * @add_pulse(): add pulse event, returns pri_sequence if pattern was detected
26 * @reset(): clear states and reset to given time stamp 48 * @reset(): clear states and reset to given time stamp
27 * @rs: detector specs for this detector element 49 * @rs: detector specs for this detector element
28 * @last_ts: last pulse time stamp considered for this element in usecs 50 * @last_ts: last pulse time stamp considered for this element in usecs
@@ -34,7 +56,8 @@
34 */ 56 */
35struct pri_detector { 57struct pri_detector {
36 void (*exit) (struct pri_detector *de); 58 void (*exit) (struct pri_detector *de);
37 bool (*add_pulse)(struct pri_detector *de, struct pulse_event *e); 59 struct pri_sequence *
60 (*add_pulse)(struct pri_detector *de, struct pulse_event *e);
38 void (*reset) (struct pri_detector *de, u64 ts); 61 void (*reset) (struct pri_detector *de, u64 ts);
39 62
40/* private: internal use only */ 63/* private: internal use only */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index d0ce1f5bba10..f13f458dd656 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -308,7 +308,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
308 while(skb) { 308 while(skb) {
309 hdr = (struct ieee80211_hdr *) skb->data; 309 hdr = (struct ieee80211_hdr *) skb->data;
310 310
311 padpos = ath9k_cmn_padpos(hdr->frame_control); 311 padpos = ieee80211_hdrlen(hdr->frame_control);
312 padsize = padpos & 3; 312 padsize = padpos & 3;
313 if (padsize && skb->len > padpos) { 313 if (padsize && skb->len > padpos) {
314 if (skb_headroom(skb) < padsize) { 314 if (skb_headroom(skb) < padsize) {
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index a8016d70088a..0743a47cef8f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -190,7 +190,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
190{ 190{
191 struct ath_hw *ah = priv->ah; 191 struct ath_hw *ah = priv->ah;
192 struct ath_common *common = ath9k_hw_common(ah); 192 struct ath_common *common = ath9k_hw_common(ah);
193 struct ieee80211_channel *channel = priv->hw->conf.channel; 193 struct ieee80211_channel *channel = priv->hw->conf.chandef.chan;
194 struct ath9k_hw_cal_data *caldata = NULL; 194 struct ath9k_hw_cal_data *caldata = NULL;
195 enum htc_phymode mode; 195 enum htc_phymode mode;
196 __be16 htc_mode; 196 __be16 htc_mode;
@@ -250,7 +250,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
250 struct ath_common *common = ath9k_hw_common(ah); 250 struct ath_common *common = ath9k_hw_common(ah);
251 struct ieee80211_conf *conf = &common->hw->conf; 251 struct ieee80211_conf *conf = &common->hw->conf;
252 bool fastcc; 252 bool fastcc;
253 struct ieee80211_channel *channel = hw->conf.channel; 253 struct ieee80211_channel *channel = hw->conf.chandef.chan;
254 struct ath9k_hw_cal_data *caldata = NULL; 254 struct ath9k_hw_cal_data *caldata = NULL;
255 enum htc_phymode mode; 255 enum htc_phymode mode;
256 __be16 htc_mode; 256 __be16 htc_mode;
@@ -602,7 +602,7 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
602 u32 caps = 0; 602 u32 caps = 0;
603 int i, j; 603 int i, j;
604 604
605 sband = priv->hw->wiphy->bands[priv->hw->conf.channel->band]; 605 sband = priv->hw->wiphy->bands[priv->hw->conf.chandef.chan->band];
606 606
607 for (i = 0, j = 0; i < sband->n_bitrates; i++) { 607 for (i = 0, j = 0; i < sband->n_bitrates; i++) {
608 if (sta->supp_rates[sband->band] & BIT(i)) { 608 if (sta->supp_rates[sband->band] & BIT(i)) {
@@ -866,7 +866,7 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw,
866 hdr = (struct ieee80211_hdr *) skb->data; 866 hdr = (struct ieee80211_hdr *) skb->data;
867 867
868 /* Add the padding after the header if this is not already done */ 868 /* Add the padding after the header if this is not already done */
869 padpos = ath9k_cmn_padpos(hdr->frame_control); 869 padpos = ieee80211_hdrlen(hdr->frame_control);
870 padsize = padpos & 3; 870 padsize = padpos & 3;
871 if (padsize && skb->len > padpos) { 871 if (padsize && skb->len > padpos) {
872 if (skb_headroom(skb) < padsize) { 872 if (skb_headroom(skb) < padsize) {
@@ -904,7 +904,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
904 struct ath9k_htc_priv *priv = hw->priv; 904 struct ath9k_htc_priv *priv = hw->priv;
905 struct ath_hw *ah = priv->ah; 905 struct ath_hw *ah = priv->ah;
906 struct ath_common *common = ath9k_hw_common(ah); 906 struct ath_common *common = ath9k_hw_common(ah);
907 struct ieee80211_channel *curchan = hw->conf.channel; 907 struct ieee80211_channel *curchan = hw->conf.chandef.chan;
908 struct ath9k_channel *init_channel; 908 struct ath9k_channel *init_channel;
909 int ret = 0; 909 int ret = 0;
910 enum htc_phymode mode; 910 enum htc_phymode mode;
@@ -1193,15 +1193,17 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1193 } 1193 }
1194 1194
1195 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || chip_reset) { 1195 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || chip_reset) {
1196 struct ieee80211_channel *curchan = hw->conf.channel; 1196 struct ieee80211_channel *curchan = hw->conf.chandef.chan;
1197 enum nl80211_channel_type channel_type =
1198 cfg80211_get_chandef_type(&hw->conf.chandef);
1197 int pos = curchan->hw_value; 1199 int pos = curchan->hw_value;
1198 1200
1199 ath_dbg(common, CONFIG, "Set channel: %d MHz\n", 1201 ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
1200 curchan->center_freq); 1202 curchan->center_freq);
1201 1203
1202 ath9k_cmn_update_ichannel(&priv->ah->channels[pos], 1204 ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
1203 hw->conf.channel, 1205 hw->conf.chandef.chan,
1204 hw->conf.channel_type); 1206 channel_type);
1205 1207
1206 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) { 1208 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
1207 ath_err(common, "Unable to set channel\n"); 1209 ath_err(common, "Unable to set channel\n");
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index bd8251c1c749..6bd0e92ea2aa 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -490,7 +490,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
490 if (txs->ts_flags & ATH9K_HTC_TXSTAT_SGI) 490 if (txs->ts_flags & ATH9K_HTC_TXSTAT_SGI)
491 rate->flags |= IEEE80211_TX_RC_SHORT_GI; 491 rate->flags |= IEEE80211_TX_RC_SHORT_GI;
492 } else { 492 } else {
493 if (cur_conf->channel->band == IEEE80211_BAND_5GHZ) 493 if (cur_conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
494 rate->idx += 4; /* No CCK rates */ 494 rate->idx += 4; /* No CCK rates */
495 } 495 }
496 496
@@ -939,7 +939,7 @@ static void ath9k_process_rate(struct ieee80211_hw *hw,
939 return; 939 return;
940 } 940 }
941 941
942 band = hw->conf.channel->band; 942 band = hw->conf.chandef.chan->band;
943 sband = hw->wiphy->bands[band]; 943 sband = hw->wiphy->bands[band];
944 944
945 for (i = 0; i < sband->n_bitrates; i++) { 945 for (i = 0; i < sband->n_bitrates; i++) {
@@ -966,7 +966,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
966 struct sk_buff *skb = rxbuf->skb; 966 struct sk_buff *skb = rxbuf->skb;
967 struct ath_common *common = ath9k_hw_common(priv->ah); 967 struct ath_common *common = ath9k_hw_common(priv->ah);
968 struct ath_htc_rx_status *rxstatus; 968 struct ath_htc_rx_status *rxstatus;
969 int hdrlen, padpos, padsize; 969 int hdrlen, padsize;
970 int last_rssi = ATH_RSSI_DUMMY_MARKER; 970 int last_rssi = ATH_RSSI_DUMMY_MARKER;
971 __le16 fc; 971 __le16 fc;
972 972
@@ -996,11 +996,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
996 fc = hdr->frame_control; 996 fc = hdr->frame_control;
997 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 997 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
998 998
999 padpos = ath9k_cmn_padpos(fc); 999 padsize = hdrlen & 3;
1000 1000 if (padsize && skb->len >= hdrlen+padsize+FCS_LEN) {
1001 padsize = padpos & 3; 1001 memmove(skb->data + padsize, skb->data, hdrlen);
1002 if (padsize && skb->len >= padpos+padsize+FCS_LEN) {
1003 memmove(skb->data + padsize, skb->data, padpos);
1004 skb_pull(skb, padsize); 1002 skb_pull(skb, padsize);
1005 } 1003 }
1006 1004
@@ -1082,8 +1080,8 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
1082 } 1080 }
1083 1081
1084 rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp); 1082 rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
1085 rx_status->band = hw->conf.channel->band; 1083 rx_status->band = hw->conf.chandef.chan->band;
1086 rx_status->freq = hw->conf.channel->center_freq; 1084 rx_status->freq = hw->conf.chandef.chan->center_freq;
1087 rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR; 1085 rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
1088 rx_status->antenna = rxbuf->rxstatus.rs_antenna; 1086 rx_status->antenna = rxbuf->rxstatus.rs_antenna;
1089 rx_status->flag |= RX_FLAG_MACTIME_END; 1087 rx_status->flag |= RX_FLAG_MACTIME_END;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 07e25260c31d..7f25da8444fe 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -139,7 +139,7 @@ static void ath9k_hw_set_clockrate(struct ath_hw *ah)
139 clockrate = 117; 139 clockrate = 117;
140 else if (!ah->curchan) /* should really check for CCK instead */ 140 else if (!ah->curchan) /* should really check for CCK instead */
141 clockrate = ATH9K_CLOCK_RATE_CCK; 141 clockrate = ATH9K_CLOCK_RATE_CCK;
142 else if (conf->channel->band == IEEE80211_BAND_2GHZ) 142 else if (conf->chandef.chan->band == IEEE80211_BAND_2GHZ)
143 clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM; 143 clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
144 else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK) 144 else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
145 clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM; 145 clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
@@ -1100,7 +1100,8 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
1100 } 1100 }
1101 1101
1102 /* As defined by IEEE 802.11-2007 17.3.8.6 */ 1102 /* As defined by IEEE 802.11-2007 17.3.8.6 */
1103 acktimeout = slottime + sifstime + 3 * ah->coverage_class + ack_offset; 1103 slottime += 3 * ah->coverage_class;
1104 acktimeout = slottime + sifstime + ack_offset;
1104 ctstimeout = acktimeout; 1105 ctstimeout = acktimeout;
1105 1106
1106 /* 1107 /*
@@ -1110,7 +1111,8 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
1110 * BA frames in some implementations, but it has been found to fix ACK 1111 * BA frames in some implementations, but it has been found to fix ACK
1111 * timeout issues in other cases as well. 1112 * timeout issues in other cases as well.
1112 */ 1113 */
1113 if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ && 1114 if (conf->chandef.chan &&
1115 conf->chandef.chan->band == IEEE80211_BAND_2GHZ &&
1114 !IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) { 1116 !IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) {
1115 acktimeout += 64 - sifstime - ah->slottime; 1117 acktimeout += 64 - sifstime - ah->slottime;
1116 ctstimeout += 48 - sifstime - ah->slottime; 1118 ctstimeout += 48 - sifstime - ah->slottime;
@@ -1669,6 +1671,103 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
1669} 1671}
1670EXPORT_SYMBOL(ath9k_hw_check_alive); 1672EXPORT_SYMBOL(ath9k_hw_check_alive);
1671 1673
1674static void ath9k_hw_init_mfp(struct ath_hw *ah)
1675{
1676 /* Setup MFP options for CCMP */
1677 if (AR_SREV_9280_20_OR_LATER(ah)) {
1678 /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
1679 * frames when constructing CCMP AAD. */
1680 REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT,
1681 0xc7ff);
1682 ah->sw_mgmt_crypto = false;
1683 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
1684 /* Disable hardware crypto for management frames */
1685 REG_CLR_BIT(ah, AR_PCU_MISC_MODE2,
1686 AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE);
1687 REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
1688 AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT);
1689 ah->sw_mgmt_crypto = true;
1690 } else {
1691 ah->sw_mgmt_crypto = true;
1692 }
1693}
1694
1695static void ath9k_hw_reset_opmode(struct ath_hw *ah,
1696 u32 macStaId1, u32 saveDefAntenna)
1697{
1698 struct ath_common *common = ath9k_hw_common(ah);
1699
1700 ENABLE_REGWRITE_BUFFER(ah);
1701
1702 REG_RMW(ah, AR_STA_ID1, macStaId1
1703 | AR_STA_ID1_RTS_USE_DEF
1704 | (ah->config.ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
1705 | ah->sta_id1_defaults,
1706 ~AR_STA_ID1_SADH_MASK);
1707 ath_hw_setbssidmask(common);
1708 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
1709 ath9k_hw_write_associd(ah);
1710 REG_WRITE(ah, AR_ISR, ~0);
1711 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
1712
1713 REGWRITE_BUFFER_FLUSH(ah);
1714
1715 ath9k_hw_set_operating_mode(ah, ah->opmode);
1716}
1717
1718static void ath9k_hw_init_queues(struct ath_hw *ah)
1719{
1720 int i;
1721
1722 ENABLE_REGWRITE_BUFFER(ah);
1723
1724 for (i = 0; i < AR_NUM_DCU; i++)
1725 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
1726
1727 REGWRITE_BUFFER_FLUSH(ah);
1728
1729 ah->intr_txqs = 0;
1730 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1731 ath9k_hw_resettxqueue(ah, i);
1732}
1733
1734/*
1735 * For big endian systems turn on swapping for descriptors
1736 */
1737static void ath9k_hw_init_desc(struct ath_hw *ah)
1738{
1739 struct ath_common *common = ath9k_hw_common(ah);
1740
1741 if (AR_SREV_9100(ah)) {
1742 u32 mask;
1743 mask = REG_READ(ah, AR_CFG);
1744 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
1745 ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n",
1746 mask);
1747 } else {
1748 mask = INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
1749 REG_WRITE(ah, AR_CFG, mask);
1750 ath_dbg(common, RESET, "Setting CFG 0x%x\n",
1751 REG_READ(ah, AR_CFG));
1752 }
1753 } else {
1754 if (common->bus_ops->ath_bus_type == ATH_USB) {
1755 /* Configure AR9271 target WLAN */
1756 if (AR_SREV_9271(ah))
1757 REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
1758 else
1759 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
1760 }
1761#ifdef __BIG_ENDIAN
1762 else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
1763 AR_SREV_9550(ah))
1764 REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
1765 else
1766 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
1767#endif
1768 }
1769}
1770
1672/* 1771/*
1673 * Fast channel change: 1772 * Fast channel change:
1674 * (Change synthesizer based on channel freq without resetting chip) 1773 * (Change synthesizer based on channel freq without resetting chip)
@@ -1746,7 +1845,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1746 u32 saveDefAntenna; 1845 u32 saveDefAntenna;
1747 u32 macStaId1; 1846 u32 macStaId1;
1748 u64 tsf = 0; 1847 u64 tsf = 0;
1749 int i, r; 1848 int r;
1750 bool start_mci_reset = false; 1849 bool start_mci_reset = false;
1751 bool save_fullsleep = ah->chip_fullsleep; 1850 bool save_fullsleep = ah->chip_fullsleep;
1752 1851
@@ -1763,10 +1862,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1763 ath9k_hw_getnf(ah, ah->curchan); 1862 ath9k_hw_getnf(ah, ah->curchan);
1764 1863
1765 ah->caldata = caldata; 1864 ah->caldata = caldata;
1766 if (caldata && 1865 if (caldata && (chan->channel != caldata->channel ||
1767 (chan->channel != caldata->channel || 1866 chan->channelFlags != caldata->channelFlags)) {
1768 (chan->channelFlags & ~CHANNEL_CW_INT) !=
1769 (caldata->channelFlags & ~CHANNEL_CW_INT))) {
1770 /* Operating channel changed, reset channel calibration data */ 1867 /* Operating channel changed, reset channel calibration data */
1771 memset(caldata, 0, sizeof(*caldata)); 1868 memset(caldata, 0, sizeof(*caldata));
1772 ath9k_init_nfcal_hist_buffer(ah, chan); 1869 ath9k_init_nfcal_hist_buffer(ah, chan);
@@ -1853,22 +1950,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1853 ath9k_hw_settsf64(ah, tsf); 1950 ath9k_hw_settsf64(ah, tsf);
1854 } 1951 }
1855 1952
1856 /* Setup MFP options for CCMP */ 1953 ath9k_hw_init_mfp(ah);
1857 if (AR_SREV_9280_20_OR_LATER(ah)) {
1858 /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
1859 * frames when constructing CCMP AAD. */
1860 REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT,
1861 0xc7ff);
1862 ah->sw_mgmt_crypto = false;
1863 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
1864 /* Disable hardware crypto for management frames */
1865 REG_CLR_BIT(ah, AR_PCU_MISC_MODE2,
1866 AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE);
1867 REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
1868 AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT);
1869 ah->sw_mgmt_crypto = true;
1870 } else
1871 ah->sw_mgmt_crypto = true;
1872 1954
1873 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 1955 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
1874 ath9k_hw_set_delta_slope(ah, chan); 1956 ath9k_hw_set_delta_slope(ah, chan);
@@ -1876,24 +1958,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1876 ath9k_hw_spur_mitigate_freq(ah, chan); 1958 ath9k_hw_spur_mitigate_freq(ah, chan);
1877 ah->eep_ops->set_board_values(ah, chan); 1959 ah->eep_ops->set_board_values(ah, chan);
1878 1960
1879 ENABLE_REGWRITE_BUFFER(ah); 1961 ath9k_hw_reset_opmode(ah, macStaId1, saveDefAntenna);
1880
1881 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
1882 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
1883 | macStaId1
1884 | AR_STA_ID1_RTS_USE_DEF
1885 | (ah->config.
1886 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
1887 | ah->sta_id1_defaults);
1888 ath_hw_setbssidmask(common);
1889 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
1890 ath9k_hw_write_associd(ah);
1891 REG_WRITE(ah, AR_ISR, ~0);
1892 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
1893
1894 REGWRITE_BUFFER_FLUSH(ah);
1895
1896 ath9k_hw_set_operating_mode(ah, ah->opmode);
1897 1962
1898 r = ath9k_hw_rf_set_freq(ah, chan); 1963 r = ath9k_hw_rf_set_freq(ah, chan);
1899 if (r) 1964 if (r)
@@ -1901,17 +1966,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1901 1966
1902 ath9k_hw_set_clockrate(ah); 1967 ath9k_hw_set_clockrate(ah);
1903 1968
1904 ENABLE_REGWRITE_BUFFER(ah); 1969 ath9k_hw_init_queues(ah);
1905
1906 for (i = 0; i < AR_NUM_DCU; i++)
1907 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
1908
1909 REGWRITE_BUFFER_FLUSH(ah);
1910
1911 ah->intr_txqs = 0;
1912 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1913 ath9k_hw_resettxqueue(ah, i);
1914
1915 ath9k_hw_init_interrupt_masks(ah, ah->opmode); 1970 ath9k_hw_init_interrupt_masks(ah, ah->opmode);
1916 ath9k_hw_ani_cache_ini_regs(ah); 1971 ath9k_hw_ani_cache_ini_regs(ah);
1917 ath9k_hw_init_qos(ah); 1972 ath9k_hw_init_qos(ah);
@@ -1966,38 +2021,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1966 2021
1967 REGWRITE_BUFFER_FLUSH(ah); 2022 REGWRITE_BUFFER_FLUSH(ah);
1968 2023
1969 /* 2024 ath9k_hw_init_desc(ah);
1970 * For big endian systems turn on swapping for descriptors
1971 */
1972 if (AR_SREV_9100(ah)) {
1973 u32 mask;
1974 mask = REG_READ(ah, AR_CFG);
1975 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
1976 ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n",
1977 mask);
1978 } else {
1979 mask =
1980 INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
1981 REG_WRITE(ah, AR_CFG, mask);
1982 ath_dbg(common, RESET, "Setting CFG 0x%x\n",
1983 REG_READ(ah, AR_CFG));
1984 }
1985 } else {
1986 if (common->bus_ops->ath_bus_type == ATH_USB) {
1987 /* Configure AR9271 target WLAN */
1988 if (AR_SREV_9271(ah))
1989 REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
1990 else
1991 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
1992 }
1993#ifdef __BIG_ENDIAN
1994 else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
1995 AR_SREV_9550(ah))
1996 REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
1997 else
1998 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
1999#endif
2000 }
2001 2025
2002 if (ath9k_hw_btcoex_is_enabled(ah)) 2026 if (ath9k_hw_btcoex_is_enabled(ah))
2003 ath9k_hw_btcoex_enable(ah); 2027 ath9k_hw_btcoex_enable(ah);
@@ -2010,7 +2034,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2010 2034
2011 if (AR_SREV_9300_20_OR_LATER(ah)) { 2035 if (AR_SREV_9300_20_OR_LATER(ah)) {
2012 ar9003_hw_bb_watchdog_config(ah); 2036 ar9003_hw_bb_watchdog_config(ah);
2013
2014 ar9003_hw_disable_phy_restart(ah); 2037 ar9003_hw_disable_phy_restart(ah);
2015 } 2038 }
2016 2039
@@ -2358,8 +2381,11 @@ static bool ath9k_hw_dfs_tested(struct ath_hw *ah)
2358{ 2381{
2359 2382
2360 switch (ah->hw_version.macVersion) { 2383 switch (ah->hw_version.macVersion) {
2384 /* for temporary testing DFS with 9280 */
2385 case AR_SREV_VERSION_9280:
2361 /* AR9580 will likely be our first target to get testing on */ 2386 /* AR9580 will likely be our first target to get testing on */
2362 case AR_SREV_VERSION_9580: 2387 case AR_SREV_VERSION_9580:
2388 return true;
2363 default: 2389 default:
2364 return false; 2390 return false;
2365 } 2391 }
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 784e81ccb903..ae3034374bc4 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -363,7 +363,6 @@ enum ath9k_int {
363 ATH9K_INT_NOCARD = 0xffffffff 363 ATH9K_INT_NOCARD = 0xffffffff
364}; 364};
365 365
366#define CHANNEL_CW_INT 0x00002
367#define CHANNEL_CCK 0x00020 366#define CHANNEL_CCK 0x00020
368#define CHANNEL_OFDM 0x00040 367#define CHANNEL_OFDM 0x00040
369#define CHANNEL_2GHZ 0x00080 368#define CHANNEL_2GHZ 0x00080
@@ -848,14 +847,7 @@ struct ath_hw {
848 struct ath_hw_ops ops; 847 struct ath_hw_ops ops;
849 848
850 /* Used to program the radio on non single-chip devices */ 849 /* Used to program the radio on non single-chip devices */
851 u32 *analogBank0Data;
852 u32 *analogBank1Data;
853 u32 *analogBank2Data;
854 u32 *analogBank3Data;
855 u32 *analogBank6Data; 850 u32 *analogBank6Data;
856 u32 *analogBank6TPCData;
857 u32 *analogBank7Data;
858 u32 *bank6Temp;
859 851
860 int coverage_class; 852 int coverage_class;
861 u32 slottime; 853 u32 slottime;
@@ -886,14 +878,8 @@ struct ath_hw {
886 878
887 struct ar5416IniArray iniModes; 879 struct ar5416IniArray iniModes;
888 struct ar5416IniArray iniCommon; 880 struct ar5416IniArray iniCommon;
889 struct ar5416IniArray iniBank0;
890 struct ar5416IniArray iniBB_RfGain; 881 struct ar5416IniArray iniBB_RfGain;
891 struct ar5416IniArray iniBank1;
892 struct ar5416IniArray iniBank2;
893 struct ar5416IniArray iniBank3;
894 struct ar5416IniArray iniBank6; 882 struct ar5416IniArray iniBank6;
895 struct ar5416IniArray iniBank6TPC;
896 struct ar5416IniArray iniBank7;
897 struct ar5416IniArray iniAddac; 883 struct ar5416IniArray iniAddac;
898 struct ar5416IniArray iniPcieSerdes; 884 struct ar5416IniArray iniPcieSerdes;
899#ifdef CONFIG_PM_SLEEP 885#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index af932c9444de..0237b2868961 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -319,6 +319,10 @@ static void ath9k_reg_notifier(struct wiphy *wiphy,
319 ath9k_ps_wakeup(sc); 319 ath9k_ps_wakeup(sc);
320 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false); 320 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
321 sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit; 321 sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
322 /* synchronize DFS detector if regulatory domain changed */
323 if (sc->dfs_detector != NULL)
324 sc->dfs_detector->set_dfs_domain(sc->dfs_detector,
325 request->dfs_region);
322 ath9k_ps_restore(sc); 326 ath9k_ps_restore(sc);
323 } 327 }
324} 328}
@@ -573,7 +577,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
573 atomic_set(&ah->intr_ref_cnt, -1); 577 atomic_set(&ah->intr_ref_cnt, -1);
574 sc->sc_ah = ah; 578 sc->sc_ah = ah;
575 579
576 sc->dfs_detector = dfs_pattern_detector_init(NL80211_DFS_UNSET); 580 sc->dfs_detector = dfs_pattern_detector_init(ah, NL80211_DFS_UNSET);
577 581
578 if (!pdata) { 582 if (!pdata) {
579 ah->ah_flags |= AH_USE_EEPROM; 583 ah->ah_flags |= AH_USE_EEPROM;
@@ -727,12 +731,28 @@ static const struct ieee80211_iface_limit if_limits[] = {
727 BIT(NL80211_IFTYPE_P2P_GO) }, 731 BIT(NL80211_IFTYPE_P2P_GO) },
728}; 732};
729 733
730static const struct ieee80211_iface_combination if_comb = { 734
731 .limits = if_limits, 735static const struct ieee80211_iface_limit if_dfs_limits[] = {
732 .n_limits = ARRAY_SIZE(if_limits), 736 { .max = 1, .types = BIT(NL80211_IFTYPE_AP) },
733 .max_interfaces = 2048, 737};
734 .num_different_channels = 1, 738
735 .beacon_int_infra_match = true, 739static const struct ieee80211_iface_combination if_comb[] = {
740 {
741 .limits = if_limits,
742 .n_limits = ARRAY_SIZE(if_limits),
743 .max_interfaces = 2048,
744 .num_different_channels = 1,
745 .beacon_int_infra_match = true,
746 },
747 {
748 .limits = if_dfs_limits,
749 .n_limits = ARRAY_SIZE(if_dfs_limits),
750 .max_interfaces = 1,
751 .num_different_channels = 1,
752 .beacon_int_infra_match = true,
753 .radar_detect_widths = BIT(NL80211_CHAN_NO_HT) |
754 BIT(NL80211_CHAN_HT20),
755 }
736}; 756};
737 757
738void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 758void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
@@ -746,7 +766,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
746 IEEE80211_HW_SUPPORTS_PS | 766 IEEE80211_HW_SUPPORTS_PS |
747 IEEE80211_HW_PS_NULLFUNC_STACK | 767 IEEE80211_HW_PS_NULLFUNC_STACK |
748 IEEE80211_HW_SPECTRUM_MGMT | 768 IEEE80211_HW_SPECTRUM_MGMT |
749 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 769 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
770 IEEE80211_HW_SUPPORTS_RC_TABLE;
750 771
751 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 772 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
752 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; 773 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
@@ -763,8 +784,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
763 BIT(NL80211_IFTYPE_ADHOC) | 784 BIT(NL80211_IFTYPE_ADHOC) |
764 BIT(NL80211_IFTYPE_MESH_POINT); 785 BIT(NL80211_IFTYPE_MESH_POINT);
765 786
766 hw->wiphy->iface_combinations = &if_comb; 787 hw->wiphy->iface_combinations = if_comb;
767 hw->wiphy->n_iface_combinations = 1; 788 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
768 789
769 if (AR_SREV_5416(sc->sc_ah)) 790 if (AR_SREV_5416(sc->sc_ah))
770 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 791 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 7fdac6c7b3ea..849259b07370 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -214,7 +214,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
214 txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE]; 214 txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE];
215 215
216 memset(tx_info, 0, sizeof(*tx_info)); 216 memset(tx_info, 0, sizeof(*tx_info));
217 tx_info->band = hw->conf.channel->band; 217 tx_info->band = hw->conf.chandef.chan->band;
218 tx_info->flags |= IEEE80211_TX_CTL_NO_ACK; 218 tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
219 tx_info->control.rates[0].idx = 0; 219 tx_info->control.rates[0].idx = 0;
220 tx_info->control.rates[0].count = 1; 220 tx_info->control.rates[0].count = 1;
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 811007ec07a7..498fee04afa0 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -615,6 +615,14 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
615 rs->rs_status |= ATH9K_RXERR_DECRYPT; 615 rs->rs_status |= ATH9K_RXERR_DECRYPT;
616 else if (ads.ds_rxstatus8 & AR_MichaelErr) 616 else if (ads.ds_rxstatus8 & AR_MichaelErr)
617 rs->rs_status |= ATH9K_RXERR_MIC; 617 rs->rs_status |= ATH9K_RXERR_MIC;
618 } else {
619 if (ads.ds_rxstatus8 &
620 (AR_CRCErr | AR_PHYErr | AR_DecryptCRCErr | AR_MichaelErr))
621 rs->rs_status |= ATH9K_RXERR_CORRUPT_DESC;
622
623 /* Only up to MCS16 supported, everything above is invalid */
624 if (rs->rs_rate >= 0x90)
625 rs->rs_status |= ATH9K_RXERR_CORRUPT_DESC;
618 } 626 }
619 627
620 if (ads.ds_rxstatus8 & AR_KeyMiss) 628 if (ads.ds_rxstatus8 & AR_KeyMiss)
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 1ff817061ebc..5865f92998e1 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -183,6 +183,7 @@ struct ath_htc_rx_status {
183#define ATH9K_RXERR_DECRYPT 0x08 183#define ATH9K_RXERR_DECRYPT 0x08
184#define ATH9K_RXERR_MIC 0x10 184#define ATH9K_RXERR_MIC 0x10
185#define ATH9K_RXERR_KEYMISS 0x20 185#define ATH9K_RXERR_KEYMISS 0x20
186#define ATH9K_RXERR_CORRUPT_DESC 0x40
186 187
187#define ATH9K_RX_MORE 0x01 188#define ATH9K_RX_MORE 0x01
188#define ATH9K_RX_MORE_AGGR 0x02 189#define ATH9K_RX_MORE_AGGR 0x02
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 988372d218a4..6963862a1872 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -589,7 +589,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
589 struct ath_softc *sc = hw->priv; 589 struct ath_softc *sc = hw->priv;
590 struct ath_hw *ah = sc->sc_ah; 590 struct ath_hw *ah = sc->sc_ah;
591 struct ath_common *common = ath9k_hw_common(ah); 591 struct ath_common *common = ath9k_hw_common(ah);
592 struct ieee80211_channel *curchan = hw->conf.channel; 592 struct ieee80211_channel *curchan = hw->conf.chandef.chan;
593 struct ath9k_channel *init_channel; 593 struct ath9k_channel *init_channel;
594 int r; 594 int r;
595 595
@@ -839,10 +839,14 @@ static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
839 struct ath9k_vif_iter_data *iter_data = data; 839 struct ath9k_vif_iter_data *iter_data = data;
840 int i; 840 int i;
841 841
842 if (iter_data->hw_macaddr) 842 if (iter_data->has_hw_macaddr) {
843 for (i = 0; i < ETH_ALEN; i++) 843 for (i = 0; i < ETH_ALEN; i++)
844 iter_data->mask[i] &= 844 iter_data->mask[i] &=
845 ~(iter_data->hw_macaddr[i] ^ mac[i]); 845 ~(iter_data->hw_macaddr[i] ^ mac[i]);
846 } else {
847 memcpy(iter_data->hw_macaddr, mac, ETH_ALEN);
848 iter_data->has_hw_macaddr = true;
849 }
846 850
847 switch (vif->type) { 851 switch (vif->type) {
848 case NL80211_IFTYPE_AP: 852 case NL80211_IFTYPE_AP:
@@ -891,7 +895,6 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
891 * together with the BSSID mask when matching addresses. 895 * together with the BSSID mask when matching addresses.
892 */ 896 */
893 memset(iter_data, 0, sizeof(*iter_data)); 897 memset(iter_data, 0, sizeof(*iter_data));
894 iter_data->hw_macaddr = common->macaddr;
895 memset(&iter_data->mask, 0xff, ETH_ALEN); 898 memset(&iter_data->mask, 0xff, ETH_ALEN);
896 899
897 if (vif) 900 if (vif)
@@ -901,6 +904,8 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
901 ieee80211_iterate_active_interfaces_atomic( 904 ieee80211_iterate_active_interfaces_atomic(
902 sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL, 905 sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
903 ath9k_vif_iter, iter_data); 906 ath9k_vif_iter, iter_data);
907
908 memcpy(common->macaddr, iter_data->hw_macaddr, ETH_ALEN);
904} 909}
905 910
906/* Called with sc->mutex held. */ 911/* Called with sc->mutex held. */
@@ -1188,7 +1193,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1188 } 1193 }
1189 1194
1190 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) { 1195 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
1191 struct ieee80211_channel *curchan = hw->conf.channel; 1196 struct ieee80211_channel *curchan = hw->conf.chandef.chan;
1197 enum nl80211_channel_type channel_type =
1198 cfg80211_get_chandef_type(&conf->chandef);
1192 int pos = curchan->hw_value; 1199 int pos = curchan->hw_value;
1193 int old_pos = -1; 1200 int old_pos = -1;
1194 unsigned long flags; 1201 unsigned long flags;
@@ -1197,7 +1204,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1197 old_pos = ah->curchan - &ah->channels[0]; 1204 old_pos = ah->curchan - &ah->channels[0];
1198 1205
1199 ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n", 1206 ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n",
1200 curchan->center_freq, conf->channel_type); 1207 curchan->center_freq, channel_type);
1201 1208
1202 /* update survey stats for the old channel before switching */ 1209 /* update survey stats for the old channel before switching */
1203 spin_lock_irqsave(&common->cc_lock, flags); 1210 spin_lock_irqsave(&common->cc_lock, flags);
@@ -1212,7 +1219,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1212 ath9k_hw_getnf(ah, ah->curchan); 1219 ath9k_hw_getnf(ah, ah->curchan);
1213 1220
1214 ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos], 1221 ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
1215 curchan, conf->channel_type); 1222 curchan, channel_type);
1216 1223
1217 /* 1224 /*
1218 * If the operating channel changes, change the survey in-use flags 1225 * If the operating channel changes, change the survey in-use flags
@@ -1249,10 +1256,27 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1249 if (old_pos >= 0) 1256 if (old_pos >= 0)
1250 ath_update_survey_nf(sc, old_pos); 1257 ath_update_survey_nf(sc, old_pos);
1251 1258
1252 /* perform spectral scan if requested. */ 1259 /*
1253 if (sc->scanning && sc->spectral_mode == SPECTRAL_CHANSCAN) 1260 * Enable radar pulse detection if on a DFS channel. Spectral
1254 ath9k_spectral_scan_trigger(hw); 1261 * scanning and radar detection can not be used concurrently.
1255 1262 */
1263 if (hw->conf.radar_enabled) {
1264 u32 rxfilter;
1265
1266 /* set HW specific DFS configuration */
1267 ath9k_hw_set_radar_params(ah);
1268 rxfilter = ath9k_hw_getrxfilter(ah);
1269 rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
1270 ATH9K_RX_FILTER_PHYERR;
1271 ath9k_hw_setrxfilter(ah, rxfilter);
1272 ath_dbg(common, DFS, "DFS enabled at freq %d\n",
1273 curchan->center_freq);
1274 } else {
1275 /* perform spectral scan if requested. */
1276 if (sc->scanning &&
1277 sc->spectral_mode == SPECTRAL_CHANSCAN)
1278 ath9k_spectral_scan_trigger(hw);
1279 }
1256 } 1280 }
1257 1281
1258 if (changed & IEEE80211_CONF_CHANGE_POWER) { 1282 if (changed & IEEE80211_CONF_CHANGE_POWER) {
@@ -1749,7 +1773,7 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
1749 mutex_unlock(&sc->mutex); 1773 mutex_unlock(&sc->mutex);
1750} 1774}
1751 1775
1752static void ath9k_flush(struct ieee80211_hw *hw, bool drop) 1776static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1753{ 1777{
1754 struct ath_softc *sc = hw->priv; 1778 struct ath_softc *sc = hw->priv;
1755 struct ath_hw *ah = sc->sc_ah; 1779 struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 96ac433ba7f6..aa4d368d8d3d 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -814,7 +814,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
814 * So, set fourth rate in series to be same as third one for 814 * So, set fourth rate in series to be same as third one for
815 * above conditions. 815 * above conditions.
816 */ 816 */
817 if ((sc->hw->conf.channel->band == IEEE80211_BAND_2GHZ) && 817 if ((sc->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) &&
818 (conf_is_ht(&sc->hw->conf))) { 818 (conf_is_ht(&sc->hw->conf))) {
819 u8 dot11rate = rate_table->info[rix].dot11rate; 819 u8 dot11rate = rate_table->info[rix].dot11rate;
820 u8 phy = rate_table->info[rix].phy; 820 u8 phy = rate_table->info[rix].phy;
@@ -1328,7 +1328,7 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1328 1328
1329 ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, 1329 ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
1330 "Operating HT Bandwidth changed to: %d\n", 1330 "Operating HT Bandwidth changed to: %d\n",
1331 sc->hw->conf.channel_type); 1331 cfg80211_get_chandef_type(&sc->hw->conf.chandef));
1332 } 1332 }
1333} 1333}
1334 1334
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index ee156e543147..8be2b5d8c155 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -124,13 +124,13 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
124 124
125 SKB_CB_ATHBUF(skb) = bf; 125 SKB_CB_ATHBUF(skb) = bf;
126 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 126 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
127 skb_queue_tail(&rx_edma->rx_fifo, skb); 127 __skb_queue_tail(&rx_edma->rx_fifo, skb);
128 128
129 return true; 129 return true;
130} 130}
131 131
132static void ath_rx_addbuffer_edma(struct ath_softc *sc, 132static void ath_rx_addbuffer_edma(struct ath_softc *sc,
133 enum ath9k_rx_qtype qtype, int size) 133 enum ath9k_rx_qtype qtype)
134{ 134{
135 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 135 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
136 struct ath_buf *bf, *tbf; 136 struct ath_buf *bf, *tbf;
@@ -155,7 +155,7 @@ static void ath_rx_remove_buffer(struct ath_softc *sc,
155 155
156 rx_edma = &sc->rx.rx_edma[qtype]; 156 rx_edma = &sc->rx.rx_edma[qtype];
157 157
158 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 158 while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
159 bf = SKB_CB_ATHBUF(skb); 159 bf = SKB_CB_ATHBUF(skb);
160 BUG_ON(!bf); 160 BUG_ON(!bf);
161 list_add_tail(&bf->list, &sc->rx.rxbuf); 161 list_add_tail(&bf->list, &sc->rx.rxbuf);
@@ -250,15 +250,9 @@ rx_init_fail:
250static void ath_edma_start_recv(struct ath_softc *sc) 250static void ath_edma_start_recv(struct ath_softc *sc)
251{ 251{
252 ath9k_hw_rxena(sc->sc_ah); 252 ath9k_hw_rxena(sc->sc_ah);
253 253 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP);
254 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 254 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP);
255 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
256
257 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
258 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
259
260 ath_opmode_init(sc); 255 ath_opmode_init(sc);
261
262 ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 256 ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
263} 257}
264 258
@@ -280,49 +274,47 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
280 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 274 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
281 sc->sc_ah->caps.rx_status_len; 275 sc->sc_ah->caps.rx_status_len;
282 276
283 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 277 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
284 return ath_rx_edma_init(sc, nbufs); 278 return ath_rx_edma_init(sc, nbufs);
285 } else {
286 ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
287 common->cachelsz, common->rx_bufsize);
288 279
289 /* Initialize rx descriptors */ 280 ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
281 common->cachelsz, common->rx_bufsize);
290 282
291 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 283 /* Initialize rx descriptors */
292 "rx", nbufs, 1, 0); 284
293 if (error != 0) { 285 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
294 ath_err(common, 286 "rx", nbufs, 1, 0);
295 "failed to allocate rx descriptors: %d\n", 287 if (error != 0) {
296 error); 288 ath_err(common,
289 "failed to allocate rx descriptors: %d\n",
290 error);
291 goto err;
292 }
293
294 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
295 skb = ath_rxbuf_alloc(common, common->rx_bufsize,
296 GFP_KERNEL);
297 if (skb == NULL) {
298 error = -ENOMEM;
297 goto err; 299 goto err;
298 } 300 }
299 301
300 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 302 bf->bf_mpdu = skb;
301 skb = ath_rxbuf_alloc(common, common->rx_bufsize, 303 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
302 GFP_KERNEL); 304 common->rx_bufsize,
303 if (skb == NULL) { 305 DMA_FROM_DEVICE);
304 error = -ENOMEM; 306 if (unlikely(dma_mapping_error(sc->dev,
305 goto err; 307 bf->bf_buf_addr))) {
306 } 308 dev_kfree_skb_any(skb);
307 309 bf->bf_mpdu = NULL;
308 bf->bf_mpdu = skb; 310 bf->bf_buf_addr = 0;
309 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 311 ath_err(common,
310 common->rx_bufsize, 312 "dma_mapping_error() on RX init\n");
311 DMA_FROM_DEVICE); 313 error = -ENOMEM;
312 if (unlikely(dma_mapping_error(sc->dev, 314 goto err;
313 bf->bf_buf_addr))) {
314 dev_kfree_skb_any(skb);
315 bf->bf_mpdu = NULL;
316 bf->bf_buf_addr = 0;
317 ath_err(common,
318 "dma_mapping_error() on RX init\n");
319 error = -ENOMEM;
320 goto err;
321 }
322 } 315 }
323 sc->rx.rxlink = NULL;
324 } 316 }
325 317 sc->rx.rxlink = NULL;
326err: 318err:
327 if (error) 319 if (error)
328 ath_rx_cleanup(sc); 320 ath_rx_cleanup(sc);
@@ -340,17 +332,17 @@ void ath_rx_cleanup(struct ath_softc *sc)
340 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 332 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
341 ath_rx_edma_cleanup(sc); 333 ath_rx_edma_cleanup(sc);
342 return; 334 return;
343 } else { 335 }
344 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 336
345 skb = bf->bf_mpdu; 337 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
346 if (skb) { 338 skb = bf->bf_mpdu;
347 dma_unmap_single(sc->dev, bf->bf_buf_addr, 339 if (skb) {
348 common->rx_bufsize, 340 dma_unmap_single(sc->dev, bf->bf_buf_addr,
349 DMA_FROM_DEVICE); 341 common->rx_bufsize,
350 dev_kfree_skb(skb); 342 DMA_FROM_DEVICE);
351 bf->bf_buf_addr = 0; 343 dev_kfree_skb(skb);
352 bf->bf_mpdu = NULL; 344 bf->bf_buf_addr = 0;
353 } 345 bf->bf_mpdu = NULL;
354 } 346 }
355 } 347 }
356} 348}
@@ -381,6 +373,10 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
381 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 373 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
382 | ATH9K_RX_FILTER_MCAST; 374 | ATH9K_RX_FILTER_MCAST;
383 375
376 /* if operating on a DFS channel, enable radar pulse detection */
377 if (sc->hw->conf.radar_enabled)
378 rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR;
379
384 if (sc->rx.rxfilter & FIF_PROBE_REQ) 380 if (sc->rx.rxfilter & FIF_PROBE_REQ)
385 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 381 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
386 382
@@ -723,6 +719,13 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
723 ret = ath9k_hw_rxprocdesc(ah, tds, &trs); 719 ret = ath9k_hw_rxprocdesc(ah, tds, &trs);
724 if (ret == -EINPROGRESS) 720 if (ret == -EINPROGRESS)
725 return NULL; 721 return NULL;
722
723 /*
724 * mark descriptor as zero-length and set the 'more'
725 * flag to ensure that both buffers get discarded
726 */
727 rs->rs_datalen = 0;
728 rs->rs_more = true;
726 } 729 }
727 730
728 list_del(&bf->list); 731 list_del(&bf->list);
@@ -859,7 +862,7 @@ static int ath9k_process_rate(struct ath_common *common,
859 unsigned int i = 0; 862 unsigned int i = 0;
860 struct ath_softc __maybe_unused *sc = common->priv; 863 struct ath_softc __maybe_unused *sc = common->priv;
861 864
862 band = hw->conf.channel->band; 865 band = hw->conf.chandef.chan->band;
863 sband = hw->wiphy->bands[band]; 866 sband = hw->wiphy->bands[band];
864 867
865 if (rx_stats->rs_rate & 0x80) { 868 if (rx_stats->rs_rate & 0x80) {
@@ -929,14 +932,20 @@ static void ath9k_process_rssi(struct ath_common *common,
929 * up the frame up to let mac80211 handle the actual error case, be it no 932 * up the frame up to let mac80211 handle the actual error case, be it no
930 * decryption key or real decryption error. This let us keep statistics there. 933 * decryption key or real decryption error. This let us keep statistics there.
931 */ 934 */
932static int ath9k_rx_skb_preprocess(struct ath_common *common, 935static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
933 struct ieee80211_hw *hw,
934 struct ieee80211_hdr *hdr, 936 struct ieee80211_hdr *hdr,
935 struct ath_rx_status *rx_stats, 937 struct ath_rx_status *rx_stats,
936 struct ieee80211_rx_status *rx_status, 938 struct ieee80211_rx_status *rx_status,
937 bool *decrypt_error) 939 bool *decrypt_error)
938{ 940{
939 struct ath_hw *ah = common->ah; 941 struct ieee80211_hw *hw = sc->hw;
942 struct ath_hw *ah = sc->sc_ah;
943 struct ath_common *common = ath9k_hw_common(ah);
944 bool discard_current = sc->rx.discard_next;
945
946 sc->rx.discard_next = rx_stats->rs_more;
947 if (discard_current)
948 return -EINVAL;
940 949
941 /* 950 /*
942 * everything but the rate is checked here, the rate check is done 951 * everything but the rate is checked here, the rate check is done
@@ -954,14 +963,15 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
954 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 963 if (ath9k_process_rate(common, hw, rx_stats, rx_status))
955 return -EINVAL; 964 return -EINVAL;
956 965
957 rx_status->band = hw->conf.channel->band; 966 rx_status->band = hw->conf.chandef.chan->band;
958 rx_status->freq = hw->conf.channel->center_freq; 967 rx_status->freq = hw->conf.chandef.chan->center_freq;
959 rx_status->signal = ah->noise + rx_stats->rs_rssi; 968 rx_status->signal = ah->noise + rx_stats->rs_rssi;
960 rx_status->antenna = rx_stats->rs_antenna; 969 rx_status->antenna = rx_stats->rs_antenna;
961 rx_status->flag |= RX_FLAG_MACTIME_END; 970 rx_status->flag |= RX_FLAG_MACTIME_END;
962 if (rx_stats->rs_moreaggr) 971 if (rx_stats->rs_moreaggr)
963 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 972 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
964 973
974 sc->rx.discard_next = false;
965 return 0; 975 return 0;
966} 976}
967 977
@@ -981,7 +991,7 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common,
981 hdr = (struct ieee80211_hdr *) skb->data; 991 hdr = (struct ieee80211_hdr *) skb->data;
982 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 992 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
983 fc = hdr->frame_control; 993 fc = hdr->frame_control;
984 padpos = ath9k_cmn_padpos(hdr->frame_control); 994 padpos = ieee80211_hdrlen(fc);
985 995
986 /* The MAC header is padded to have 32-bit boundary if the 996 /* The MAC header is padded to have 32-bit boundary if the
987 * packet payload is non-zero. The general calculation for 997 * packet payload is non-zero. The general calculation for
@@ -1162,6 +1172,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1162 u64 tsf = 0; 1172 u64 tsf = 0;
1163 u32 tsf_lower = 0; 1173 u32 tsf_lower = 0;
1164 unsigned long flags; 1174 unsigned long flags;
1175 dma_addr_t new_buf_addr;
1165 1176
1166 if (edma) 1177 if (edma)
1167 dma_type = DMA_BIDIRECTIONAL; 1178 dma_type = DMA_BIDIRECTIONAL;
@@ -1228,6 +1239,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1228 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1239 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1229 rxs->mactime += 0x100000000ULL; 1240 rxs->mactime += 0x100000000ULL;
1230 1241
1242 if (rs.rs_phyerr == ATH9K_PHYERR_RADAR)
1243 ath9k_dfs_process_phyerr(sc, hdr, &rs, rxs->mactime);
1244
1231 if (rs.rs_status & ATH9K_RXERR_PHY) { 1245 if (rs.rs_status & ATH9K_RXERR_PHY) {
1232 if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) { 1246 if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) {
1233 RX_STAT_INC(rx_spectral); 1247 RX_STAT_INC(rx_spectral);
@@ -1235,8 +1249,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1235 } 1249 }
1236 } 1250 }
1237 1251
1238 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1252 retval = ath9k_rx_skb_preprocess(sc, hdr, &rs, rxs,
1239 rxs, &decrypt_error); 1253 &decrypt_error);
1240 if (retval) 1254 if (retval)
1241 goto requeue_drop_frag; 1255 goto requeue_drop_frag;
1242 1256
@@ -1257,10 +1271,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1257 goto requeue_drop_frag; 1271 goto requeue_drop_frag;
1258 } 1272 }
1259 1273
1274 /* We will now give hardware our shiny new allocated skb */
1275 new_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1276 common->rx_bufsize, dma_type);
1277 if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) {
1278 dev_kfree_skb_any(requeue_skb);
1279 goto requeue_drop_frag;
1280 }
1281
1260 /* Unmap the frame */ 1282 /* Unmap the frame */
1261 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1283 dma_unmap_single(sc->dev, bf->bf_buf_addr,
1262 common->rx_bufsize, 1284 common->rx_bufsize, dma_type);
1263 dma_type); 1285
1286 bf->bf_mpdu = requeue_skb;
1287 bf->bf_buf_addr = new_buf_addr;
1264 1288
1265 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1289 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1266 if (ah->caps.rx_status_len) 1290 if (ah->caps.rx_status_len)
@@ -1270,21 +1294,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1270 ath9k_rx_skb_postprocess(common, hdr_skb, &rs, 1294 ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
1271 rxs, decrypt_error); 1295 rxs, decrypt_error);
1272 1296
1273 /* We will now give hardware our shiny new allocated skb */
1274 bf->bf_mpdu = requeue_skb;
1275 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1276 common->rx_bufsize,
1277 dma_type);
1278 if (unlikely(dma_mapping_error(sc->dev,
1279 bf->bf_buf_addr))) {
1280 dev_kfree_skb_any(requeue_skb);
1281 bf->bf_mpdu = NULL;
1282 bf->bf_buf_addr = 0;
1283 ath_err(common, "dma_mapping_error() on RX\n");
1284 ieee80211_rx(hw, skb);
1285 break;
1286 }
1287
1288 if (rs.rs_more) { 1297 if (rs.rs_more) {
1289 RX_STAT_INC(rx_frags); 1298 RX_STAT_INC(rx_frags);
1290 /* 1299 /*
@@ -1302,6 +1311,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1302 sc->rx.frag = skb; 1311 sc->rx.frag = skb;
1303 goto requeue; 1312 goto requeue;
1304 } 1313 }
1314 if (rs.rs_status & ATH9K_RXERR_CORRUPT_DESC)
1315 goto requeue_drop_frag;
1305 1316
1306 if (sc->rx.frag) { 1317 if (sc->rx.frag) {
1307 int space = skb->len - skb_tailroom(hdr_skb); 1318 int space = skb->len - skb_tailroom(hdr_skb);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 5929850649f0..5c4ab5026dca 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1493,9 +1493,6 @@ enum {
1493#define AR9271_RADIO_RF_RST 0x20 1493#define AR9271_RADIO_RF_RST 0x20
1494#define AR9271_GATE_MAC_CTL 0x4000 1494#define AR9271_GATE_MAC_CTL 0x4000
1495 1495
1496#define AR_STA_ID0 0x8000
1497#define AR_STA_ID1 0x8004
1498#define AR_STA_ID1_SADH_MASK 0x0000FFFF
1499#define AR_STA_ID1_STA_AP 0x00010000 1496#define AR_STA_ID1_STA_AP 0x00010000
1500#define AR_STA_ID1_ADHOC 0x00020000 1497#define AR_STA_ID1_ADHOC 0x00020000
1501#define AR_STA_ID1_PWR_SAV 0x00040000 1498#define AR_STA_ID1_PWR_SAV 0x00040000
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 89a64411b82e..eab0fcb7ded6 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -157,6 +157,13 @@ static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
157 seqno << IEEE80211_SEQ_SEQ_SHIFT); 157 seqno << IEEE80211_SEQ_SEQ_SHIFT);
158} 158}
159 159
160static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
161 struct ath_buf *bf)
162{
163 ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates,
164 ARRAY_SIZE(bf->rates));
165}
166
160static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 167static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
161{ 168{
162 struct ath_txq *txq = tid->ac->txq; 169 struct ath_txq *txq = tid->ac->txq;
@@ -189,6 +196,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
189 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 196 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
190 sendbar = true; 197 sendbar = true;
191 } else { 198 } else {
199 ath_set_rates(tid->an->vif, tid->an->sta, bf);
192 ath_tx_send_normal(sc, txq, NULL, skb); 200 ath_tx_send_normal(sc, txq, NULL, skb);
193 } 201 }
194 } 202 }
@@ -407,7 +415,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
407 415
408 tx_info = IEEE80211_SKB_CB(skb); 416 tx_info = IEEE80211_SKB_CB(skb);
409 417
410 memcpy(rates, tx_info->control.rates, sizeof(rates)); 418 memcpy(rates, bf->rates, sizeof(rates));
411 419
412 retries = ts->ts_longretry + 1; 420 retries = ts->ts_longretry + 1;
413 for (i = 0; i < ts->ts_rateindex; i++) 421 for (i = 0; i < ts->ts_rateindex; i++)
@@ -516,8 +524,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
516 * not a holding desc. 524 * not a holding desc.
517 */ 525 */
518 INIT_LIST_HEAD(&bf_head); 526 INIT_LIST_HEAD(&bf_head);
519 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) || 527 if (bf_next != NULL || !bf_last->bf_stale)
520 bf_next != NULL || !bf_last->bf_stale)
521 list_move_tail(&bf->list, &bf_head); 528 list_move_tail(&bf->list, &bf_head);
522 529
523 if (!txpending || (tid->state & AGGR_CLEANUP)) { 530 if (!txpending || (tid->state & AGGR_CLEANUP)) {
@@ -537,8 +544,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
537 !txfail); 544 !txfail);
538 } else { 545 } else {
539 /* retry the un-acked ones */ 546 /* retry the un-acked ones */
540 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && 547 if (bf->bf_next == NULL && bf_last->bf_stale) {
541 bf->bf_next == NULL && bf_last->bf_stale) {
542 struct ath_buf *tbf; 548 struct ath_buf *tbf;
543 549
544 tbf = ath_clone_txbuf(sc, bf_last); 550 tbf = ath_clone_txbuf(sc, bf_last);
@@ -738,8 +744,6 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
738 bool first_subfrm) 744 bool first_subfrm)
739{ 745{
740#define FIRST_DESC_NDELIMS 60 746#define FIRST_DESC_NDELIMS 60
741 struct sk_buff *skb = bf->bf_mpdu;
742 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
743 u32 nsymbits, nsymbols; 747 u32 nsymbits, nsymbols;
744 u16 minlen; 748 u16 minlen;
745 u8 flags, rix; 749 u8 flags, rix;
@@ -780,8 +784,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
780 if (tid->an->mpdudensity == 0) 784 if (tid->an->mpdudensity == 0)
781 return ndelim; 785 return ndelim;
782 786
783 rix = tx_info->control.rates[0].idx; 787 rix = bf->rates[0].idx;
784 flags = tx_info->control.rates[0].flags; 788 flags = bf->rates[0].flags;
785 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0; 789 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
786 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; 790 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
787 791
@@ -860,6 +864,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
860 bf_first = bf; 864 bf_first = bf;
861 865
862 if (!rl) { 866 if (!rl) {
867 ath_set_rates(tid->an->vif, tid->an->sta, bf);
863 aggr_limit = ath_lookup_rate(sc, bf, tid); 868 aggr_limit = ath_lookup_rate(sc, bf, tid);
864 rl = 1; 869 rl = 1;
865 } 870 }
@@ -1000,14 +1005,14 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
1000 1005
1001 skb = bf->bf_mpdu; 1006 skb = bf->bf_mpdu;
1002 tx_info = IEEE80211_SKB_CB(skb); 1007 tx_info = IEEE80211_SKB_CB(skb);
1003 rates = tx_info->control.rates; 1008 rates = bf->rates;
1004 hdr = (struct ieee80211_hdr *)skb->data; 1009 hdr = (struct ieee80211_hdr *)skb->data;
1005 1010
1006 /* set dur_update_en for l-sig computation except for PS-Poll frames */ 1011 /* set dur_update_en for l-sig computation except for PS-Poll frames */
1007 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control); 1012 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
1008 info->rtscts_rate = fi->rtscts_rate; 1013 info->rtscts_rate = fi->rtscts_rate;
1009 1014
1010 for (i = 0; i < 4; i++) { 1015 for (i = 0; i < ARRAY_SIZE(bf->rates); i++) {
1011 bool is_40, is_sgi, is_sp; 1016 bool is_40, is_sgi, is_sp;
1012 int phy; 1017 int phy;
1013 1018
@@ -1745,6 +1750,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1745 return; 1750 return;
1746 } 1751 }
1747 1752
1753 ath_set_rates(tid->an->vif, tid->an->sta, bf);
1748 bf->bf_state.bf_type = BUF_AMPDU; 1754 bf->bf_state.bf_type = BUF_AMPDU;
1749 INIT_LIST_HEAD(&bf_head); 1755 INIT_LIST_HEAD(&bf_head);
1750 list_add(&bf->list, &bf_head); 1756 list_add(&bf->list, &bf_head);
@@ -1894,49 +1900,6 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1894 return bf; 1900 return bf;
1895} 1901}
1896 1902
1897/* FIXME: tx power */
1898static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
1899 struct ath_tx_control *txctl)
1900{
1901 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1902 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1903 struct ath_atx_tid *tid = NULL;
1904 struct ath_buf *bf;
1905 u8 tidno;
1906
1907 if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
1908 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1909 IEEE80211_QOS_CTL_TID_MASK;
1910 tid = ATH_AN_2_TID(txctl->an, tidno);
1911
1912 WARN_ON(tid->ac->txq != txctl->txq);
1913 }
1914
1915 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
1916 /*
1917 * Try aggregation if it's a unicast data frame
1918 * and the destination is HT capable.
1919 */
1920 ath_tx_send_ampdu(sc, tid, skb, txctl);
1921 } else {
1922 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1923 if (!bf) {
1924 if (txctl->paprd)
1925 dev_kfree_skb_any(skb);
1926 else
1927 ieee80211_free_txskb(sc->hw, skb);
1928 return;
1929 }
1930
1931 bf->bf_state.bfs_paprd = txctl->paprd;
1932
1933 if (txctl->paprd)
1934 bf->bf_state.bfs_paprd_timestamp = jiffies;
1935
1936 ath_tx_send_normal(sc, txctl->txq, tid, skb);
1937 }
1938}
1939
1940/* Upon failure caller should free skb */ 1903/* Upon failure caller should free skb */
1941int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 1904int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1942 struct ath_tx_control *txctl) 1905 struct ath_tx_control *txctl)
@@ -1947,8 +1910,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1947 struct ieee80211_vif *vif = info->control.vif; 1910 struct ieee80211_vif *vif = info->control.vif;
1948 struct ath_softc *sc = hw->priv; 1911 struct ath_softc *sc = hw->priv;
1949 struct ath_txq *txq = txctl->txq; 1912 struct ath_txq *txq = txctl->txq;
1913 struct ath_atx_tid *tid = NULL;
1914 struct ath_buf *bf;
1950 int padpos, padsize; 1915 int padpos, padsize;
1951 int frmlen = skb->len + FCS_LEN; 1916 int frmlen = skb->len + FCS_LEN;
1917 u8 tidno;
1952 int q; 1918 int q;
1953 1919
1954 /* NOTE: sta can be NULL according to net/mac80211.h */ 1920 /* NOTE: sta can be NULL according to net/mac80211.h */
@@ -1971,7 +1937,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1971 } 1937 }
1972 1938
1973 /* Add the padding after the header if this is not already done */ 1939 /* Add the padding after the header if this is not already done */
1974 padpos = ath9k_cmn_padpos(hdr->frame_control); 1940 padpos = ieee80211_hdrlen(hdr->frame_control);
1975 padsize = padpos & 3; 1941 padsize = padpos & 3;
1976 if (padsize && skb->len > padpos) { 1942 if (padsize && skb->len > padpos) {
1977 if (skb_headroom(skb) < padsize) 1943 if (skb_headroom(skb) < padsize)
@@ -2004,8 +1970,41 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
2004 txq->stopped = true; 1970 txq->stopped = true;
2005 } 1971 }
2006 1972
2007 ath_tx_start_dma(sc, skb, txctl); 1973 if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
1974 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1975 IEEE80211_QOS_CTL_TID_MASK;
1976 tid = ATH_AN_2_TID(txctl->an, tidno);
1977
1978 WARN_ON(tid->ac->txq != txctl->txq);
1979 }
1980
1981 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
1982 /*
1983 * Try aggregation if it's a unicast data frame
1984 * and the destination is HT capable.
1985 */
1986 ath_tx_send_ampdu(sc, tid, skb, txctl);
1987 goto out;
1988 }
2008 1989
1990 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1991 if (!bf) {
1992 if (txctl->paprd)
1993 dev_kfree_skb_any(skb);
1994 else
1995 ieee80211_free_txskb(sc->hw, skb);
1996 goto out;
1997 }
1998
1999 bf->bf_state.bfs_paprd = txctl->paprd;
2000
2001 if (txctl->paprd)
2002 bf->bf_state.bfs_paprd_timestamp = jiffies;
2003
2004 ath_set_rates(vif, sta, bf);
2005 ath_tx_send_normal(sc, txctl->txq, tid, skb);
2006
2007out:
2009 ath_txq_unlock(sc, txq); 2008 ath_txq_unlock(sc, txq);
2010 2009
2011 return 0; 2010 return 0;
@@ -2033,7 +2032,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2033 /* Frame was ACKed */ 2032 /* Frame was ACKed */
2034 tx_info->flags |= IEEE80211_TX_STAT_ACK; 2033 tx_info->flags |= IEEE80211_TX_STAT_ACK;
2035 2034
2036 padpos = ath9k_cmn_padpos(hdr->frame_control); 2035 padpos = ieee80211_hdrlen(hdr->frame_control);
2037 padsize = padpos & 3; 2036 padsize = padpos & 3;
2038 if (padsize && skb->len>padpos+padsize) { 2037 if (padsize && skb->len>padpos+padsize) {
2039 /* 2038 /*
@@ -2264,6 +2263,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2264 struct ath_txq *txq; 2263 struct ath_txq *txq;
2265 struct ath_buf *bf, *lastbf; 2264 struct ath_buf *bf, *lastbf;
2266 struct list_head bf_head; 2265 struct list_head bf_head;
2266 struct list_head *fifo_list;
2267 int status; 2267 int status;
2268 2268
2269 for (;;) { 2269 for (;;) {
@@ -2291,20 +2291,24 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2291 2291
2292 TX_STAT_INC(txq->axq_qnum, txprocdesc); 2292 TX_STAT_INC(txq->axq_qnum, txprocdesc);
2293 2293
2294 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 2294 fifo_list = &txq->txq_fifo[txq->txq_tailidx];
2295 if (list_empty(fifo_list)) {
2295 ath_txq_unlock(sc, txq); 2296 ath_txq_unlock(sc, txq);
2296 return; 2297 return;
2297 } 2298 }
2298 2299
2299 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx], 2300 bf = list_first_entry(fifo_list, struct ath_buf, list);
2300 struct ath_buf, list); 2301 if (bf->bf_stale) {
2302 list_del(&bf->list);
2303 ath_tx_return_buffer(sc, bf);
2304 bf = list_first_entry(fifo_list, struct ath_buf, list);
2305 }
2306
2301 lastbf = bf->bf_lastbf; 2307 lastbf = bf->bf_lastbf;
2302 2308
2303 INIT_LIST_HEAD(&bf_head); 2309 INIT_LIST_HEAD(&bf_head);
2304 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx], 2310 if (list_is_last(&lastbf->list, fifo_list)) {
2305 &lastbf->list); 2311 list_splice_tail_init(fifo_list, &bf_head);
2306
2307 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2308 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 2312 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2309 2313
2310 if (!list_empty(&txq->axq_q)) { 2314 if (!list_empty(&txq->axq_q)) {
@@ -2315,6 +2319,11 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2315 list_splice_tail_init(&txq->axq_q, &bf_q); 2319 list_splice_tail_init(&txq->axq_q, &bf_q);
2316 ath_tx_txqaddbuf(sc, txq, &bf_q, true); 2320 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2317 } 2321 }
2322 } else {
2323 lastbf->bf_stale = true;
2324 if (bf != lastbf)
2325 list_cut_position(&bf_head, fifo_list,
2326 lastbf->list.prev);
2318 } 2327 }
2319 2328
2320 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); 2329 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 25599741cd8a..9dce106cd6d4 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -70,12 +70,6 @@
70 70
71static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 3, 2, 1, 0 }; 71static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 3, 2, 1, 0 };
72 72
73enum carl9170_rf_init_mode {
74 CARL9170_RFI_NONE,
75 CARL9170_RFI_WARM,
76 CARL9170_RFI_COLD,
77};
78
79#define CARL9170_MAX_RX_BUFFER_SIZE 8192 73#define CARL9170_MAX_RX_BUFFER_SIZE 8192
80 74
81enum carl9170_device_state { 75enum carl9170_device_state {
@@ -599,7 +593,7 @@ int carl9170_led_set_state(struct ar9170 *ar, const u32 led_state);
599 593
600/* PHY / RF */ 594/* PHY / RF */
601int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel, 595int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
602 enum nl80211_channel_type bw, enum carl9170_rf_init_mode rfi); 596 enum nl80211_channel_type bw);
603int carl9170_get_noisefloor(struct ar9170 *ar); 597int carl9170_get_noisefloor(struct ar9170 *ar);
604 598
605/* FW */ 599/* FW */
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index 93fe6003a493..3d70cd277fd7 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -654,8 +654,8 @@ static ssize_t carl9170_debugfs_bug_write(struct ar9170 *ar, const char *buf,
654 goto out; 654 goto out;
655 655
656 case 'P': 656 case 'P':
657 err = carl9170_set_channel(ar, ar->hw->conf.channel, 657 err = carl9170_set_channel(ar, ar->hw->conf.chandef.chan,
658 ar->hw->conf.channel_type, CARL9170_RFI_COLD); 658 cfg80211_get_chandef_type(&ar->hw->conf.chandef));
659 if (err < 0) 659 if (err < 0)
660 count = err; 660 count = err;
661 661
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index 24d75ab94f0d..a2f005703c04 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -48,7 +48,7 @@ int carl9170_set_dyn_sifs_ack(struct ar9170 *ar)
48 if (conf_is_ht40(&ar->hw->conf)) 48 if (conf_is_ht40(&ar->hw->conf))
49 val = 0x010a; 49 val = 0x010a;
50 else { 50 else {
51 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) 51 if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
52 val = 0x105; 52 val = 0x105;
53 else 53 else
54 val = 0x104; 54 val = 0x104;
@@ -66,7 +66,7 @@ int carl9170_set_rts_cts_rate(struct ar9170 *ar)
66 rts_rate = 0x1da; 66 rts_rate = 0x1da;
67 cts_rate = 0x10a; 67 cts_rate = 0x10a;
68 } else { 68 } else {
69 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) { 69 if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
70 /* 11 mbit CCK */ 70 /* 11 mbit CCK */
71 rts_rate = 033; 71 rts_rate = 033;
72 cts_rate = 003; 72 cts_rate = 003;
@@ -93,7 +93,7 @@ int carl9170_set_slot_time(struct ar9170 *ar)
93 return 0; 93 return 0;
94 } 94 }
95 95
96 if ((ar->hw->conf.channel->band == IEEE80211_BAND_5GHZ) || 96 if ((ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) ||
97 vif->bss_conf.use_short_slot) 97 vif->bss_conf.use_short_slot)
98 slottime = 9; 98 slottime = 9;
99 99
@@ -120,7 +120,7 @@ int carl9170_set_mac_rates(struct ar9170 *ar)
120 basic |= (vif->bss_conf.basic_rates & 0xff0) << 4; 120 basic |= (vif->bss_conf.basic_rates & 0xff0) << 4;
121 rcu_read_unlock(); 121 rcu_read_unlock();
122 122
123 if (ar->hw->conf.channel->band == IEEE80211_BAND_5GHZ) 123 if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
124 mandatory = 0xff00; /* OFDM 6/9/12/18/24/36/48/54 */ 124 mandatory = 0xff00; /* OFDM 6/9/12/18/24/36/48/54 */
125 else 125 else
126 mandatory = 0xff0f; /* OFDM (6/9../54) + CCK (1/2/5.5/11) */ 126 mandatory = 0xff0f; /* OFDM (6/9../54) + CCK (1/2/5.5/11) */
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index f293b3ff4756..e9010a481dfd 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -929,6 +929,9 @@ static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
929 } 929 }
930 930
931 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 931 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
932 enum nl80211_channel_type channel_type =
933 cfg80211_get_chandef_type(&hw->conf.chandef);
934
932 /* adjust slot time for 5 GHz */ 935 /* adjust slot time for 5 GHz */
933 err = carl9170_set_slot_time(ar); 936 err = carl9170_set_slot_time(ar);
934 if (err) 937 if (err)
@@ -938,8 +941,8 @@ static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
938 if (err) 941 if (err)
939 goto out; 942 goto out;
940 943
941 err = carl9170_set_channel(ar, hw->conf.channel, 944 err = carl9170_set_channel(ar, hw->conf.chandef.chan,
942 hw->conf.channel_type, CARL9170_RFI_NONE); 945 channel_type);
943 if (err) 946 if (err)
944 goto out; 947 goto out;
945 948
@@ -957,7 +960,7 @@ static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
957 } 960 }
958 961
959 if (changed & IEEE80211_CONF_CHANGE_POWER) { 962 if (changed & IEEE80211_CONF_CHANGE_POWER) {
960 err = carl9170_set_mac_tpc(ar, ar->hw->conf.channel); 963 err = carl9170_set_mac_tpc(ar, ar->hw->conf.chandef.chan);
961 if (err) 964 if (err)
962 goto out; 965 goto out;
963 } 966 }
@@ -1703,7 +1706,7 @@ found:
1703 return 0; 1706 return 0;
1704} 1707}
1705 1708
1706static void carl9170_op_flush(struct ieee80211_hw *hw, bool drop) 1709static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1707{ 1710{
1708 struct ar9170 *ar = hw->priv; 1711 struct ar9170 *ar = hw->priv;
1709 unsigned int vid; 1712 unsigned int vid;
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
index b72c09cf43a4..ab4ee7d39ad3 100644
--- a/drivers/net/wireless/ath/carl9170/phy.c
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -1331,7 +1331,7 @@ static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw)
1331 * CTL_ETSI for 2GHz and CTL_FCC for 5GHz. 1331 * CTL_ETSI for 2GHz and CTL_FCC for 5GHz.
1332 */ 1332 */
1333 ctl_grp = ath_regd_get_band_ctl(&ar->common.regulatory, 1333 ctl_grp = ath_regd_get_band_ctl(&ar->common.regulatory,
1334 ar->hw->conf.channel->band); 1334 ar->hw->conf.chandef.chan->band);
1335 1335
1336 /* ctl group not found - either invalid band (NO_CTL) or ww roaming */ 1336 /* ctl group not found - either invalid band (NO_CTL) or ww roaming */
1337 if (ctl_grp == NO_CTL || ctl_grp == SD_NO_CTL) 1337 if (ctl_grp == NO_CTL || ctl_grp == SD_NO_CTL)
@@ -1341,7 +1341,7 @@ static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw)
1341 /* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */ 1341 /* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */
1342 return; 1342 return;
1343 1343
1344 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) { 1344 if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
1345 modes = mode_list_2ghz; 1345 modes = mode_list_2ghz;
1346 nr_modes = ARRAY_SIZE(mode_list_2ghz); 1346 nr_modes = ARRAY_SIZE(mode_list_2ghz);
1347 } else { 1347 } else {
@@ -1569,16 +1569,14 @@ static enum carl9170_bw nl80211_to_carl(enum nl80211_channel_type type)
1569} 1569}
1570 1570
1571int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel, 1571int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1572 enum nl80211_channel_type _bw, 1572 enum nl80211_channel_type _bw)
1573 enum carl9170_rf_init_mode rfi)
1574{ 1573{
1575 const struct carl9170_phy_freq_params *freqpar; 1574 const struct carl9170_phy_freq_params *freqpar;
1576 struct carl9170_rf_init_result rf_res; 1575 struct carl9170_rf_init_result rf_res;
1577 struct carl9170_rf_init rf; 1576 struct carl9170_rf_init rf;
1578 u32 cmd, tmp, offs = 0, new_ht = 0; 1577 u32 tmp, offs = 0, new_ht = 0;
1579 int err; 1578 int err;
1580 enum carl9170_bw bw; 1579 enum carl9170_bw bw;
1581 bool warm_reset;
1582 struct ieee80211_channel *old_channel = NULL; 1580 struct ieee80211_channel *old_channel = NULL;
1583 1581
1584 bw = nl80211_to_carl(_bw); 1582 bw = nl80211_to_carl(_bw);
@@ -1592,51 +1590,27 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1592 /* may be NULL at first setup */ 1590 /* may be NULL at first setup */
1593 if (ar->channel) { 1591 if (ar->channel) {
1594 old_channel = ar->channel; 1592 old_channel = ar->channel;
1595 warm_reset = (old_channel->band != channel->band) ||
1596 (old_channel->center_freq ==
1597 channel->center_freq) ||
1598 (ar->ht_settings != new_ht);
1599
1600 ar->channel = NULL; 1593 ar->channel = NULL;
1601 } else {
1602 warm_reset = true;
1603 } 1594 }
1604 1595
1605 /* HW workaround */ 1596 /* cold reset BB/ADDA */
1606 if (!ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] && 1597 err = carl9170_write_reg(ar, AR9170_PWR_REG_RESET,
1607 channel->center_freq <= 2417) 1598 AR9170_PWR_RESET_BB_COLD_RESET);
1608 warm_reset = true; 1599 if (err)
1609 1600 return err;
1610 if (rfi != CARL9170_RFI_NONE || warm_reset) {
1611 u32 val;
1612
1613 if (rfi == CARL9170_RFI_COLD)
1614 val = AR9170_PWR_RESET_BB_COLD_RESET;
1615 else
1616 val = AR9170_PWR_RESET_BB_WARM_RESET;
1617
1618 /* warm/cold reset BB/ADDA */
1619 err = carl9170_write_reg(ar, AR9170_PWR_REG_RESET, val);
1620 if (err)
1621 return err;
1622
1623 err = carl9170_write_reg(ar, AR9170_PWR_REG_RESET, 0x0);
1624 if (err)
1625 return err;
1626 1601
1627 err = carl9170_init_phy(ar, channel->band); 1602 err = carl9170_write_reg(ar, AR9170_PWR_REG_RESET, 0x0);
1628 if (err) 1603 if (err)
1629 return err; 1604 return err;
1630 1605
1631 err = carl9170_init_rf_banks_0_7(ar, 1606 err = carl9170_init_phy(ar, channel->band);
1632 channel->band == IEEE80211_BAND_5GHZ); 1607 if (err)
1633 if (err) 1608 return err;
1634 return err;
1635 1609
1636 cmd = CARL9170_CMD_RF_INIT; 1610 err = carl9170_init_rf_banks_0_7(ar,
1637 } else { 1611 channel->band == IEEE80211_BAND_5GHZ);
1638 cmd = CARL9170_CMD_FREQUENCY; 1612 if (err)
1639 } 1613 return err;
1640 1614
1641 err = carl9170_exec_cmd(ar, CARL9170_CMD_FREQ_START, 0, NULL, 0, NULL); 1615 err = carl9170_exec_cmd(ar, CARL9170_CMD_FREQ_START, 0, NULL, 0, NULL);
1642 if (err) 1616 if (err)
@@ -1648,8 +1622,8 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1648 return err; 1622 return err;
1649 1623
1650 err = carl9170_init_rf_bank4_pwr(ar, 1624 err = carl9170_init_rf_bank4_pwr(ar,
1651 channel->band == IEEE80211_BAND_5GHZ, 1625 channel->band == IEEE80211_BAND_5GHZ,
1652 channel->center_freq, bw); 1626 channel->center_freq, bw);
1653 if (err) 1627 if (err)
1654 return err; 1628 return err;
1655 1629
@@ -1703,13 +1677,8 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1703 rf.delta_slope_coeff_man = cpu_to_le32(freqpar->coeff_man); 1677 rf.delta_slope_coeff_man = cpu_to_le32(freqpar->coeff_man);
1704 rf.delta_slope_coeff_exp_shgi = cpu_to_le32(freqpar->coeff_exp_shgi); 1678 rf.delta_slope_coeff_exp_shgi = cpu_to_le32(freqpar->coeff_exp_shgi);
1705 rf.delta_slope_coeff_man_shgi = cpu_to_le32(freqpar->coeff_man_shgi); 1679 rf.delta_slope_coeff_man_shgi = cpu_to_le32(freqpar->coeff_man_shgi);
1706 1680 rf.finiteLoopCount = cpu_to_le32(2000);
1707 if (rfi != CARL9170_RFI_NONE) 1681 err = carl9170_exec_cmd(ar, CARL9170_CMD_RF_INIT, sizeof(rf), &rf,
1708 rf.finiteLoopCount = cpu_to_le32(2000);
1709 else
1710 rf.finiteLoopCount = cpu_to_le32(1000);
1711
1712 err = carl9170_exec_cmd(ar, cmd, sizeof(rf), &rf,
1713 sizeof(rf_res), &rf_res); 1682 sizeof(rf_res), &rf_res);
1714 if (err) 1683 if (err)
1715 return err; 1684 return err;
@@ -1724,9 +1693,8 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1724 old_channel->center_freq : -1, channel->center_freq, 1693 old_channel->center_freq : -1, channel->center_freq,
1725 err); 1694 err);
1726 1695
1727 if ((rfi == CARL9170_RFI_COLD) || (ar->chan_fail > 3)) { 1696 if (ar->chan_fail > 3) {
1728 /* 1697 /* We have tried very hard to change to _another_
1729 * We have tried very hard to change to _another_
1730 * channel and we've failed to do so! 1698 * channel and we've failed to do so!
1731 * Chances are that the PHY/RF is no longer 1699 * Chances are that the PHY/RF is no longer
1732 * operable (due to corruptions/fatal events/bugs?) 1700 * operable (due to corruptions/fatal events/bugs?)
@@ -1736,8 +1704,7 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1736 return 0; 1704 return 0;
1737 } 1705 }
1738 1706
1739 err = carl9170_set_channel(ar, channel, _bw, 1707 err = carl9170_set_channel(ar, channel, _bw);
1740 CARL9170_RFI_COLD);
1741 if (err) 1708 if (err)
1742 return err; 1709 return err;
1743 } else { 1710 } else {
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 9c0b150d5b8e..c61cafa2665b 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -387,8 +387,7 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
387 u8 tid; 387 u8 tid;
388 388
389 if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) || 389 if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
390 txinfo->flags & IEEE80211_TX_CTL_INJECTED || 390 txinfo->flags & IEEE80211_TX_CTL_INJECTED)
391 (!(super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_AGGR))))
392 return; 391 return;
393 392
394 rcu_read_lock(); 393 rcu_read_lock();
@@ -981,30 +980,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
981 980
982 SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR, 981 SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR,
983 txc->s.ampdu_settings, factor); 982 txc->s.ampdu_settings, factor);
984
985 for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
986 txrate = &info->control.rates[i];
987 if (txrate->idx >= 0) {
988 txc->s.ri[i] =
989 CARL9170_TX_SUPER_RI_AMPDU;
990
991 if (WARN_ON(!(txrate->flags &
992 IEEE80211_TX_RC_MCS))) {
993 /*
994 * Not sure if it's even possible
995 * to aggregate non-ht rates with
996 * this HW.
997 */
998 goto err_out;
999 }
1000 continue;
1001 }
1002
1003 txrate->idx = 0;
1004 txrate->count = ar->hw->max_rate_tries;
1005 }
1006
1007 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1008 } 983 }
1009 984
1010 /* 985 /*
@@ -1012,11 +987,31 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
1012 * taken from mac_control. For all fallback rate, the firmware 987 * taken from mac_control. For all fallback rate, the firmware
1013 * updates the mac_control flags from the rate info field. 988 * updates the mac_control flags from the rate info field.
1014 */ 989 */
1015 for (i = 1; i < CARL9170_TX_MAX_RATES; i++) { 990 for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
991 __le32 phy_set;
1016 txrate = &info->control.rates[i]; 992 txrate = &info->control.rates[i];
1017 if (txrate->idx < 0) 993 if (txrate->idx < 0)
1018 break; 994 break;
1019 995
996 phy_set = carl9170_tx_physet(ar, info, txrate);
997 if (i == 0) {
998 /* first rate - part of the hw's frame header */
999 txc->f.phy_control = phy_set;
1000
1001 if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS)
1002 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1003 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
1004 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
1005 else if (carl9170_tx_cts_check(ar, txrate))
1006 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
1007
1008 } else {
1009 /* fallback rates are stored in the firmware's
1010 * retry rate set array.
1011 */
1012 txc->s.rr[i - 1] = phy_set;
1013 }
1014
1020 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i], 1015 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
1021 txrate->count); 1016 txrate->count);
1022 1017
@@ -1027,21 +1022,13 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
1027 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS << 1022 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
1028 CARL9170_TX_SUPER_RI_ERP_PROT_S); 1023 CARL9170_TX_SUPER_RI_ERP_PROT_S);
1029 1024
1030 txc->s.rr[i - 1] = carl9170_tx_physet(ar, info, txrate); 1025 if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS))
1026 txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
1031 } 1027 }
1032 1028
1033 txrate = &info->control.rates[0];
1034 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[0], txrate->count);
1035
1036 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
1037 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
1038 else if (carl9170_tx_cts_check(ar, txrate))
1039 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
1040
1041 txc->s.len = cpu_to_le16(skb->len); 1029 txc->s.len = cpu_to_le16(skb->len);
1042 txc->f.length = cpu_to_le16(len + FCS_LEN); 1030 txc->f.length = cpu_to_le16(len + FCS_LEN);
1043 txc->f.mac_control = mac_tmp; 1031 txc->f.mac_control = mac_tmp;
1044 txc->f.phy_control = carl9170_tx_physet(ar, info, txrate);
1045 1032
1046 arinfo = (void *)info->rate_driver_data; 1033 arinfo = (void *)info->rate_driver_data;
1047 arinfo->timeout = jiffies; 1034 arinfo->timeout = jiffies;
@@ -1381,9 +1368,9 @@ static void carl9170_tx(struct ar9170 *ar)
1381} 1368}
1382 1369
1383static bool carl9170_tx_ampdu_queue(struct ar9170 *ar, 1370static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
1384 struct ieee80211_sta *sta, struct sk_buff *skb) 1371 struct ieee80211_sta *sta, struct sk_buff *skb,
1372 struct ieee80211_tx_info *txinfo)
1385{ 1373{
1386 struct _carl9170_tx_superframe *super = (void *) skb->data;
1387 struct carl9170_sta_info *sta_info; 1374 struct carl9170_sta_info *sta_info;
1388 struct carl9170_sta_tid *agg; 1375 struct carl9170_sta_tid *agg;
1389 struct sk_buff *iter; 1376 struct sk_buff *iter;
@@ -1450,7 +1437,7 @@ err_unlock:
1450 1437
1451err_unlock_rcu: 1438err_unlock_rcu:
1452 rcu_read_unlock(); 1439 rcu_read_unlock();
1453 super->f.mac_control &= ~cpu_to_le16(AR9170_TX_MAC_AGGR); 1440 txinfo->flags &= ~IEEE80211_TX_CTL_AMPDU;
1454 carl9170_tx_status(ar, skb, false); 1441 carl9170_tx_status(ar, skb, false);
1455 ar->tx_dropped++; 1442 ar->tx_dropped++;
1456 return false; 1443 return false;
@@ -1492,7 +1479,7 @@ void carl9170_op_tx(struct ieee80211_hw *hw,
1492 * sta == NULL checks are redundant in this 1479 * sta == NULL checks are redundant in this
1493 * special case. 1480 * special case.
1494 */ 1481 */
1495 run = carl9170_tx_ampdu_queue(ar, sta, skb); 1482 run = carl9170_tx_ampdu_queue(ar, sta, skb, info);
1496 if (run) 1483 if (run)
1497 carl9170_tx_ampdu(ar); 1484 carl9170_tx_ampdu(ar);
1498 1485
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index 39e8a590d7fc..eae9abf540a7 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -118,6 +118,12 @@
118void ath_hw_setbssidmask(struct ath_common *common) 118void ath_hw_setbssidmask(struct ath_common *common)
119{ 119{
120 void *ah = common->ah; 120 void *ah = common->ah;
121 u32 id1;
122
123 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
124 id1 = REG_READ(ah, AR_STA_ID1) & ~AR_STA_ID1_SADH_MASK;
125 id1 |= get_unaligned_le16(common->macaddr + 4);
126 REG_WRITE(ah, AR_STA_ID1, id1);
121 127
122 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(common->bssidmask)); 128 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(common->bssidmask));
123 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(common->bssidmask + 4)); 129 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(common->bssidmask + 4));
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 5c54aa43ca2d..1816b4e7dc26 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -45,7 +45,8 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
45 void *ah = common->ah; 45 void *ah = common->ah;
46 46
47 if (entry >= common->keymax) { 47 if (entry >= common->keymax) {
48 ath_err(common, "keycache entry %u out of range\n", entry); 48 ath_err(common, "keyreset: keycache entry %u out of range\n",
49 entry);
49 return false; 50 return false;
50 } 51 }
51 52
@@ -91,7 +92,8 @@ static bool ath_hw_keysetmac(struct ath_common *common,
91 void *ah = common->ah; 92 void *ah = common->ah;
92 93
93 if (entry >= common->keymax) { 94 if (entry >= common->keymax) {
94 ath_err(common, "keycache entry %u out of range\n", entry); 95 ath_err(common, "keysetmac: keycache entry %u out of range\n",
96 entry);
95 return false; 97 return false;
96 } 98 }
97 99
@@ -133,7 +135,8 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
133 u32 keyType; 135 u32 keyType;
134 136
135 if (entry >= common->keymax) { 137 if (entry >= common->keymax) {
136 ath_err(common, "keycache entry %u out of range\n", entry); 138 ath_err(common, "set-entry: keycache entry %u out of range\n",
139 entry);
137 return false; 140 return false;
138 } 141 }
139 142
diff --git a/drivers/net/wireless/ath/reg.h b/drivers/net/wireless/ath/reg.h
index 298e53f3fa48..3ad4c774bd22 100644
--- a/drivers/net/wireless/ath/reg.h
+++ b/drivers/net/wireless/ath/reg.h
@@ -23,6 +23,10 @@
23#define AR_MIBC_CMC 0x00000004 23#define AR_MIBC_CMC 0x00000004
24#define AR_MIBC_MCS 0x00000008 24#define AR_MIBC_MCS 0x00000008
25 25
26#define AR_STA_ID0 0x8000
27#define AR_STA_ID1 0x8004
28#define AR_STA_ID1_SADH_MASK 0x0000ffff
29
26/* 30/*
27 * BSSID mask registers. See ath_hw_set_bssid_mask() 31 * BSSID mask registers. See ath_hw_set_bssid_mask()
28 * for detailed documentation about these registers. 32 * for detailed documentation about these registers.
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 9396dc9fe3c5..d288eea0a26a 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -9,5 +9,7 @@ wil6210-objs += wmi.o
9wil6210-objs += interrupt.o 9wil6210-objs += interrupt.o
10wil6210-objs += txrx.o 10wil6210-objs += txrx.o
11 11
12subdir-ccflags-y += -Werror 12ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
13 subdir-ccflags-y += -Werror
14endif
13subdir-ccflags-y += -D__CHECK_ENDIAN__ 15subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 9ecc1968262c..c5d4a87abaaf 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -14,16 +14,6 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/sched.h>
20#include <linux/etherdevice.h>
21#include <linux/wireless.h>
22#include <linux/ieee80211.h>
23#include <linux/slab.h>
24#include <linux/version.h>
25#include <net/cfg80211.h>
26
27#include "wil6210.h" 17#include "wil6210.h"
28#include "wmi.h" 18#include "wmi.h"
29 19
@@ -292,7 +282,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
292 282
293 /* WMI_CONNECT_CMD */ 283 /* WMI_CONNECT_CMD */
294 memset(&conn, 0, sizeof(conn)); 284 memset(&conn, 0, sizeof(conn));
295 switch (bss->capability & 0x03) { 285 switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) {
296 case WLAN_CAPABILITY_DMG_TYPE_AP: 286 case WLAN_CAPABILITY_DMG_TYPE_AP:
297 conn.network_type = WMI_NETTYPE_INFRA; 287 conn.network_type = WMI_NETTYPE_INFRA;
298 break; 288 break;
@@ -437,17 +427,18 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
437 if (rc) 427 if (rc)
438 return rc; 428 return rc;
439 429
440 rc = wmi_set_channel(wil, channel->hw_value);
441 if (rc)
442 return rc;
443
444 /* MAC address - pre-requisite for other commands */ 430 /* MAC address - pre-requisite for other commands */
445 wmi_set_mac_address(wil, ndev->dev_addr); 431 wmi_set_mac_address(wil, ndev->dev_addr);
446 432
447 /* IE's */ 433 /* IE's */
448 /* bcon 'head IE's are not relevant for 60g band */ 434 /* bcon 'head IE's are not relevant for 60g band */
449 wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len, 435 /*
450 bcon->beacon_ies); 436 * FW do not form regular beacon, so bcon IE's are not set
437 * For the DMG bcon, when it will be supported, bcon IE's will
438 * be reused; add something like:
439 * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
440 * bcon->beacon_ies);
441 */
451 wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len, 442 wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
452 bcon->proberesp_ies); 443 bcon->proberesp_ies);
453 wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, 444 wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
@@ -455,7 +446,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
455 446
456 wil->secure_pcp = info->privacy; 447 wil->secure_pcp = info->privacy;
457 448
458 rc = wmi_set_bcon(wil, info->beacon_interval, wmi_nettype); 449 rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
450 channel->hw_value);
459 if (rc) 451 if (rc)
460 return rc; 452 return rc;
461 453
@@ -472,11 +464,8 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
472{ 464{
473 int rc = 0; 465 int rc = 0;
474 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 466 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
475 struct wireless_dev *wdev = ndev->ieee80211_ptr;
476 u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
477 467
478 /* To stop beaconing, set BI to 0 */ 468 rc = wmi_pcp_stop(wil);
479 rc = wmi_set_bcon(wil, 0, wmi_nettype);
480 469
481 return rc; 470 return rc;
482} 471}
diff --git a/drivers/net/wireless/ath/wil6210/dbg_hexdump.h b/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
deleted file mode 100644
index e5712f026c47..000000000000
--- a/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef WIL_DBG_HEXDUMP_H_
2#define WIL_DBG_HEXDUMP_H_
3
4#include <linux/printk.h>
5#include <linux/dynamic_debug.h>
6
7#if defined(CONFIG_DYNAMIC_DEBUG)
8#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
9 groupsize, buf, len, ascii) \
10 dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
11 groupsize, buf, len, ascii)
12
13#else /* defined(CONFIG_DYNAMIC_DEBUG) */
14#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
15 groupsize, buf, len, ascii) \
16 print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
17 groupsize, buf, len, ascii)
18#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
19
20#endif /* WIL_DBG_HEXDUMP_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 65fc9683bfd8..4be07f5e22b9 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -312,14 +312,6 @@ static const struct file_operations fops_memread = {
312 .llseek = seq_lseek, 312 .llseek = seq_lseek,
313}; 313};
314 314
315static int wil_default_open(struct inode *inode, struct file *file)
316{
317 if (inode->i_private)
318 file->private_data = inode->i_private;
319
320 return 0;
321}
322
323static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, 315static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
324 size_t count, loff_t *ppos) 316 size_t count, loff_t *ppos)
325{ 317{
@@ -361,7 +353,7 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
361 353
362static const struct file_operations fops_ioblob = { 354static const struct file_operations fops_ioblob = {
363 .read = wil_read_file_ioblob, 355 .read = wil_read_file_ioblob,
364 .open = wil_default_open, 356 .open = simple_open,
365 .llseek = default_llseek, 357 .llseek = default_llseek,
366}; 358};
367 359
@@ -396,7 +388,7 @@ static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
396 388
397static const struct file_operations fops_reset = { 389static const struct file_operations fops_reset = {
398 .write = wil_write_file_reset, 390 .write = wil_write_file_reset,
399 .open = wil_default_open, 391 .open = simple_open,
400}; 392};
401/*---------Tx descriptor------------*/ 393/*---------Tx descriptor------------*/
402 394
@@ -526,7 +518,50 @@ static ssize_t wil_write_file_ssid(struct file *file, const char __user *buf,
526static const struct file_operations fops_ssid = { 518static const struct file_operations fops_ssid = {
527 .read = wil_read_file_ssid, 519 .read = wil_read_file_ssid,
528 .write = wil_write_file_ssid, 520 .write = wil_write_file_ssid,
529 .open = wil_default_open, 521 .open = simple_open,
522};
523
524/*---------temp------------*/
525static void print_temp(struct seq_file *s, const char *prefix, u32 t)
526{
527 switch (t) {
528 case 0:
529 case ~(u32)0:
530 seq_printf(s, "%s N/A\n", prefix);
531 break;
532 default:
533 seq_printf(s, "%s %d.%03d\n", prefix, t / 1000, t % 1000);
534 break;
535 }
536}
537
538static int wil_temp_debugfs_show(struct seq_file *s, void *data)
539{
540 struct wil6210_priv *wil = s->private;
541 u32 t_m, t_r;
542
543 int rc = wmi_get_temperature(wil, &t_m, &t_r);
544 if (rc) {
545 seq_printf(s, "Failed\n");
546 return 0;
547 }
548
549 print_temp(s, "MAC temperature :", t_m);
550 print_temp(s, "Radio temperature :", t_r);
551
552 return 0;
553}
554
555static int wil_temp_seq_open(struct inode *inode, struct file *file)
556{
557 return single_open(file, wil_temp_debugfs_show, inode->i_private);
558}
559
560static const struct file_operations fops_temp = {
561 .open = wil_temp_seq_open,
562 .release = single_release,
563 .read = seq_read,
564 .llseek = seq_lseek,
530}; 565};
531 566
532/*----------------*/ 567/*----------------*/
@@ -563,6 +598,7 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
563 debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread); 598 debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread);
564 599
565 debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset); 600 debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset);
601 debugfs_create_file("temp", S_IRUGO, dbg, wil, &fops_temp);
566 602
567 wil->rgf_blob.data = (void * __force)wil->csr + 0; 603 wil->rgf_blob.data = (void * __force)wil->csr + 0;
568 wil->rgf_blob.size = 0xa000; 604 wil->rgf_blob.size = 0xa000;
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index dc97e7b2609c..e3c1e7684f9c 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -240,6 +240,15 @@ static void wil_notify_fw_error(struct wil6210_priv *wil)
240 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); 240 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
241} 241}
242 242
243static void wil_cache_mbox_regs(struct wil6210_priv *wil)
244{
245 /* make shadow copy of registers that should not change on run time */
246 wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
247 sizeof(struct wil6210_mbox_ctl));
248 wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
249 wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
250}
251
243static irqreturn_t wil6210_irq_misc(int irq, void *cookie) 252static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
244{ 253{
245 struct wil6210_priv *wil = cookie; 254 struct wil6210_priv *wil = cookie;
@@ -257,14 +266,19 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
257 wil6210_mask_irq_misc(wil); 266 wil6210_mask_irq_misc(wil);
258 267
259 if (isr & ISR_MISC_FW_ERROR) { 268 if (isr & ISR_MISC_FW_ERROR) {
260 wil_dbg_irq(wil, "IRQ: Firmware error\n"); 269 wil_err(wil, "Firmware error detected\n");
261 clear_bit(wil_status_fwready, &wil->status); 270 clear_bit(wil_status_fwready, &wil->status);
262 wil_notify_fw_error(wil); 271 /*
263 isr &= ~ISR_MISC_FW_ERROR; 272 * do not clear @isr here - we do 2-nd part in thread
273 * there, user space get notified, and it should be done
274 * in non-atomic context
275 */
264 } 276 }
265 277
266 if (isr & ISR_MISC_FW_READY) { 278 if (isr & ISR_MISC_FW_READY) {
267 wil_dbg_irq(wil, "IRQ: FW ready\n"); 279 wil_dbg_irq(wil, "IRQ: FW ready\n");
280 wil_cache_mbox_regs(wil);
281 set_bit(wil_status_reset_done, &wil->status);
268 /** 282 /**
269 * Actual FW ready indicated by the 283 * Actual FW ready indicated by the
270 * WMI_FW_READY_EVENTID 284 * WMI_FW_READY_EVENTID
@@ -289,6 +303,11 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
289 303
290 wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr); 304 wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
291 305
306 if (isr & ISR_MISC_FW_ERROR) {
307 wil_notify_fw_error(wil);
308 isr &= ~ISR_MISC_FW_ERROR;
309 }
310
292 if (isr & ISR_MISC_MBOX_EVT) { 311 if (isr & ISR_MISC_MBOX_EVT) {
293 wil_dbg_irq(wil, "MBOX event\n"); 312 wil_dbg_irq(wil, "MBOX event\n");
294 wmi_recv_cmd(wil); 313 wmi_recv_cmd(wil);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 761c389586d4..a0478e2f6868 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -14,12 +14,6 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/sched.h>
20#include <linux/ieee80211.h>
21#include <linux/wireless.h>
22#include <linux/slab.h>
23#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
24#include <linux/if_arp.h> 18#include <linux/if_arp.h>
25 19
@@ -109,13 +103,24 @@ static void wil_connect_timer_fn(ulong x)
109 schedule_work(&wil->disconnect_worker); 103 schedule_work(&wil->disconnect_worker);
110} 104}
111 105
112static void wil_cache_mbox_regs(struct wil6210_priv *wil) 106static void wil_connect_worker(struct work_struct *work)
113{ 107{
114 /* make shadow copy of registers that should not change on run time */ 108 int rc;
115 wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX, 109 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
116 sizeof(struct wil6210_mbox_ctl)); 110 connect_worker);
117 wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx); 111 int cid = wil->pending_connect_cid;
118 wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx); 112
113 if (cid < 0) {
114 wil_err(wil, "No connection pending\n");
115 return;
116 }
117
118 wil_dbg_wmi(wil, "Configure for connection CID %d\n", cid);
119
120 rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE, cid, 0);
121 wil->pending_connect_cid = -1;
122 if (rc == 0)
123 wil_link_on(wil);
119} 124}
120 125
121int wil_priv_init(struct wil6210_priv *wil) 126int wil_priv_init(struct wil6210_priv *wil)
@@ -130,7 +135,7 @@ int wil_priv_init(struct wil6210_priv *wil)
130 wil->pending_connect_cid = -1; 135 wil->pending_connect_cid = -1;
131 setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil); 136 setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
132 137
133 INIT_WORK(&wil->wmi_connect_worker, wmi_connect_worker); 138 INIT_WORK(&wil->connect_worker, wil_connect_worker);
134 INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker); 139 INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
135 INIT_WORK(&wil->wmi_event_worker, wmi_event_worker); 140 INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
136 141
@@ -147,8 +152,6 @@ int wil_priv_init(struct wil6210_priv *wil)
147 return -EAGAIN; 152 return -EAGAIN;
148 } 153 }
149 154
150 wil_cache_mbox_regs(wil);
151
152 return 0; 155 return 0;
153} 156}
154 157
@@ -185,15 +188,11 @@ static void wil_target_reset(struct wil6210_priv *wil)
185 W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */ 188 W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */
186 W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */ 189 W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */
187 190
188 msleep(100);
189
190 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000); 191 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
191 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F); 192 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
192 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170); 193 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170);
193 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00); 194 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00);
194 195
195 msleep(100);
196
197 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0); 196 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
198 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0); 197 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
199 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0); 198 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
@@ -203,12 +202,6 @@ static void wil_target_reset(struct wil6210_priv *wil)
203 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080); 202 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
204 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); 203 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
205 204
206 msleep(2000);
207
208 W(RGF_USER_USER_CPU_0, BIT(0)); /* user_cpu_man_de_rst */
209
210 msleep(2000);
211
212 wil_dbg_misc(wil, "Reset completed\n"); 205 wil_dbg_misc(wil, "Reset completed\n");
213 206
214#undef W 207#undef W
@@ -265,8 +258,6 @@ int wil_reset(struct wil6210_priv *wil)
265 wil->pending_connect_cid = -1; 258 wil->pending_connect_cid = -1;
266 INIT_COMPLETION(wil->wmi_ready); 259 INIT_COMPLETION(wil->wmi_ready);
267 260
268 wil_cache_mbox_regs(wil);
269
270 /* TODO: release MAC reset */ 261 /* TODO: release MAC reset */
271 wil6210_enable_irq(wil); 262 wil6210_enable_irq(wil);
272 263
@@ -352,9 +343,9 @@ static int __wil_up(struct wil6210_priv *wil)
352 wil_err(wil, "SSID not set\n"); 343 wil_err(wil, "SSID not set\n");
353 return -EINVAL; 344 return -EINVAL;
354 } 345 }
355 wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid); 346 rc = wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid);
356 if (channel) 347 if (rc)
357 wmi_set_channel(wil, channel->hw_value); 348 return rc;
358 break; 349 break;
359 default: 350 default:
360 break; 351 break;
@@ -364,9 +355,12 @@ static int __wil_up(struct wil6210_priv *wil)
364 wmi_set_mac_address(wil, ndev->dev_addr); 355 wmi_set_mac_address(wil, ndev->dev_addr);
365 356
366 /* Set up beaconing if required. */ 357 /* Set up beaconing if required. */
367 rc = wmi_set_bcon(wil, bi, wmi_nettype); 358 if (bi > 0) {
368 if (rc) 359 rc = wmi_pcp_start(wil, bi, wmi_nettype,
369 return rc; 360 (channel ? channel->hw_value : 0));
361 if (rc)
362 return rc;
363 }
370 364
371 /* Rx VRING. After MAC and beacon */ 365 /* Rx VRING. After MAC and beacon */
372 wil_rx_init(wil); 366 wil_rx_init(wil);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 8ce2e33dce20..098a8ec6b841 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -14,10 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/module.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
20#include <linux/slab.h>
21 18
22#include "wil6210.h" 19#include "wil6210.h"
23 20
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 81c35c6e3832..eb1dc7ad80fb 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -14,10 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/kernel.h>
18#include <linux/module.h> 17#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/netdevice.h>
21#include <linux/debugfs.h> 18#include <linux/debugfs.h>
22#include <linux/pci.h> 19#include <linux/pci.h>
23#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index d1315b442375..797024507c71 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -14,10 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
20#include <linux/hardirq.h>
21#include <net/ieee80211_radiotap.h> 18#include <net/ieee80211_radiotap.h>
22#include <linux/if_arp.h> 19#include <linux/if_arp.h>
23#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
@@ -83,8 +80,6 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
83 */ 80 */
84 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); 81 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
85 if (!vring->va) { 82 if (!vring->va) {
86 wil_err(wil, "vring_alloc [%d] failed to alloc DMA mem\n",
87 vring->size);
88 kfree(vring->ctx); 83 kfree(vring->ctx);
89 vring->ctx = NULL; 84 vring->ctx = NULL;
90 return -ENOMEM; 85 return -ENOMEM;
@@ -196,8 +191,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
196 * - Phy info 191 * - Phy info
197 */ 192 */
198static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, 193static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
199 struct sk_buff *skb, 194 struct sk_buff *skb)
200 volatile struct vring_rx_desc *d)
201{ 195{
202 struct wireless_dev *wdev = wil->wdev; 196 struct wireless_dev *wdev = wil->wdev;
203 struct wil6210_rtap { 197 struct wil6210_rtap {
@@ -221,6 +215,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
221 __le16 vendor_skip; 215 __le16 vendor_skip;
222 u8 vendor_data[0]; 216 u8 vendor_data[0];
223 } __packed; 217 } __packed;
218 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
224 struct wil6210_rtap_vendor *rtap_vendor; 219 struct wil6210_rtap_vendor *rtap_vendor;
225 int rtap_len = sizeof(struct wil6210_rtap); 220 int rtap_len = sizeof(struct wil6210_rtap);
226 int phy_length = 0; /* phy info header size, bytes */ 221 int phy_length = 0; /* phy info header size, bytes */
@@ -317,6 +312,8 @@ static void wil_swap_ethaddr(void *data)
317/** 312/**
318 * reap 1 frame from @swhead 313 * reap 1 frame from @swhead
319 * 314 *
315 * Rx descriptor copied to skb->cb
316 *
320 * Safe to call from IRQ 317 * Safe to call from IRQ
321 */ 318 */
322static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, 319static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
@@ -325,12 +322,15 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
325 struct device *dev = wil_to_dev(wil); 322 struct device *dev = wil_to_dev(wil);
326 struct net_device *ndev = wil_to_ndev(wil); 323 struct net_device *ndev = wil_to_ndev(wil);
327 volatile struct vring_rx_desc *d; 324 volatile struct vring_rx_desc *d;
325 struct vring_rx_desc *d1;
328 struct sk_buff *skb; 326 struct sk_buff *skb;
329 dma_addr_t pa; 327 dma_addr_t pa;
330 unsigned int sz = RX_BUF_LEN; 328 unsigned int sz = RX_BUF_LEN;
331 u8 ftype; 329 u8 ftype;
332 u8 ds_bits; 330 u8 ds_bits;
333 331
332 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
333
334 if (wil_vring_is_empty(vring)) 334 if (wil_vring_is_empty(vring))
335 return NULL; 335 return NULL;
336 336
@@ -345,11 +345,14 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
345 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); 345 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
346 skb_trim(skb, d->dma.length); 346 skb_trim(skb, d->dma.length);
347 347
348 wil->stats.last_mcs_rx = wil_rxdesc_mcs(d); 348 d1 = wil_skb_rxdesc(skb);
349 *d1 = *d;
350
351 wil->stats.last_mcs_rx = wil_rxdesc_mcs(d1);
349 352
350 /* use radiotap header only if required */ 353 /* use radiotap header only if required */
351 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) 354 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
352 wil_rx_add_radiotap_header(wil, skb, d); 355 wil_rx_add_radiotap_header(wil, skb);
353 356
354 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length); 357 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
355 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4, 358 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
@@ -365,7 +368,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
365 * Driver should recognize it by frame type, that is found 368 * Driver should recognize it by frame type, that is found
366 * in Rx descriptor. If type is not data, it is 802.11 frame as is 369 * in Rx descriptor. If type is not data, it is 802.11 frame as is
367 */ 370 */
368 ftype = wil_rxdesc_ftype(d) << 2; 371 ftype = wil_rxdesc_ftype(d1) << 2;
369 if (ftype != IEEE80211_FTYPE_DATA) { 372 if (ftype != IEEE80211_FTYPE_DATA) {
370 wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype); 373 wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
371 /* TODO: process it */ 374 /* TODO: process it */
@@ -380,7 +383,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
380 return NULL; 383 return NULL;
381 } 384 }
382 385
383 ds_bits = wil_rxdesc_ds_bits(d); 386 ds_bits = wil_rxdesc_ds_bits(d1);
384 if (ds_bits == 1) { 387 if (ds_bits == 1) {
385 /* 388 /*
386 * HW bug - in ToDS mode, i.e. Rx on AP side, 389 * HW bug - in ToDS mode, i.e. Rx on AP side,
@@ -522,6 +525,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
522 .vring_cfg = { 525 .vring_cfg = {
523 .tx_sw_ring = { 526 .tx_sw_ring = {
524 .max_mpdu_size = cpu_to_le16(TX_BUF_LEN), 527 .max_mpdu_size = cpu_to_le16(TX_BUF_LEN),
528 .ring_size = cpu_to_le16(size),
525 }, 529 },
526 .ringid = id, 530 .ringid = id,
527 .cidxtid = (cid & 0xf) | ((tid & 0xf) << 4), 531 .cidxtid = (cid & 0xf) | ((tid & 0xf) << 4),
@@ -553,14 +557,13 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
553 goto out; 557 goto out;
554 558
555 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); 559 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
556 cmd.vring_cfg.tx_sw_ring.ring_size = cpu_to_le16(vring->size);
557 560
558 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd), 561 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
559 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); 562 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
560 if (rc) 563 if (rc)
561 goto out_free; 564 goto out_free;
562 565
563 if (reply.cmd.status != WMI_VRING_CFG_SUCCESS) { 566 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
564 wil_err(wil, "Tx config failed, status 0x%02x\n", 567 wil_err(wil, "Tx config failed, status 0x%02x\n",
565 reply.cmd.status); 568 reply.cmd.status);
566 rc = -EINVAL; 569 rc = -EINVAL;
@@ -784,9 +787,14 @@ void wil_tx_complete(struct wil6210_priv *wil, int ringid)
784 wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); 787 wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
785 788
786 while (!wil_vring_is_empty(vring)) { 789 while (!wil_vring_is_empty(vring)) {
787 volatile struct vring_tx_desc *d = &vring->va[vring->swtail].tx; 790 volatile struct vring_tx_desc *d1 =
791 &vring->va[vring->swtail].tx;
792 struct vring_tx_desc dd, *d = &dd;
788 dma_addr_t pa; 793 dma_addr_t pa;
789 struct sk_buff *skb; 794 struct sk_buff *skb;
795
796 dd = *d1;
797
790 if (!(d->dma.status & TX_DMA_STATUS_DU)) 798 if (!(d->dma.status & TX_DMA_STATUS_DU))
791 break; 799 break;
792 800
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 45a61f597c5c..adef12fb2aee 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -339,24 +339,59 @@ union vring_desc {
339 struct vring_rx_desc rx; 339 struct vring_rx_desc rx;
340} __packed; 340} __packed;
341 341
342static inline int wil_rxdesc_phy_length(volatile struct vring_rx_desc *d) 342static inline int wil_rxdesc_tid(struct vring_rx_desc *d)
343{ 343{
344 return WIL_GET_BITS(d->dma.d0, 16, 29); 344 return WIL_GET_BITS(d->mac.d0, 0, 3);
345} 345}
346 346
347static inline int wil_rxdesc_mcs(volatile struct vring_rx_desc *d) 347static inline int wil_rxdesc_cid(struct vring_rx_desc *d)
348{ 348{
349 return WIL_GET_BITS(d->mac.d1, 21, 24); 349 return WIL_GET_BITS(d->mac.d0, 4, 6);
350} 350}
351 351
352static inline int wil_rxdesc_ds_bits(volatile struct vring_rx_desc *d) 352static inline int wil_rxdesc_mid(struct vring_rx_desc *d)
353{ 353{
354 return WIL_GET_BITS(d->mac.d1, 8, 9); 354 return WIL_GET_BITS(d->mac.d0, 8, 9);
355} 355}
356 356
357static inline int wil_rxdesc_ftype(volatile struct vring_rx_desc *d) 357static inline int wil_rxdesc_ftype(struct vring_rx_desc *d)
358{ 358{
359 return WIL_GET_BITS(d->mac.d0, 10, 11); 359 return WIL_GET_BITS(d->mac.d0, 10, 11);
360} 360}
361 361
362static inline int wil_rxdesc_subtype(struct vring_rx_desc *d)
363{
364 return WIL_GET_BITS(d->mac.d0, 12, 15);
365}
366
367static inline int wil_rxdesc_seq(struct vring_rx_desc *d)
368{
369 return WIL_GET_BITS(d->mac.d0, 16, 27);
370}
371
372static inline int wil_rxdesc_ext_subtype(struct vring_rx_desc *d)
373{
374 return WIL_GET_BITS(d->mac.d0, 28, 31);
375}
376
377static inline int wil_rxdesc_ds_bits(struct vring_rx_desc *d)
378{
379 return WIL_GET_BITS(d->mac.d1, 8, 9);
380}
381
382static inline int wil_rxdesc_mcs(struct vring_rx_desc *d)
383{
384 return WIL_GET_BITS(d->mac.d1, 21, 24);
385}
386
387static inline int wil_rxdesc_phy_length(struct vring_rx_desc *d)
388{
389 return WIL_GET_BITS(d->dma.d0, 16, 29);
390}
391
392static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
393{
394 return (void *)skb->cb;
395}
396
362#endif /* WIL6210_TXRX_H */ 397#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index aea961ff8f08..8f76ecd8a7e5 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -21,8 +21,6 @@
21#include <linux/wireless.h> 21#include <linux/wireless.h>
22#include <net/cfg80211.h> 22#include <net/cfg80211.h>
23 23
24#include "dbg_hexdump.h"
25
26#define WIL_NAME "wil6210" 24#define WIL_NAME "wil6210"
27 25
28/** 26/**
@@ -188,6 +186,7 @@ enum { /* for wil6210_priv.status */
188 wil_status_fwready = 0, 186 wil_status_fwready = 0,
189 wil_status_fwconnected, 187 wil_status_fwconnected,
190 wil_status_dontscan, 188 wil_status_dontscan,
189 wil_status_reset_done,
191 wil_status_irqen, /* FIXME: interrupts enabled - for debug */ 190 wil_status_irqen, /* FIXME: interrupts enabled - for debug */
192}; 191};
193 192
@@ -210,6 +209,8 @@ struct wil6210_priv {
210 struct wireless_dev *wdev; 209 struct wireless_dev *wdev;
211 void __iomem *csr; 210 void __iomem *csr;
212 ulong status; 211 ulong status;
212 u32 fw_version;
213 u8 n_mids; /* number of additional MIDs as reported by FW */
213 /* profile */ 214 /* profile */
214 u32 monitor_flags; 215 u32 monitor_flags;
215 u32 secure_pcp; /* create secure PCP? */ 216 u32 secure_pcp; /* create secure PCP? */
@@ -227,7 +228,7 @@ struct wil6210_priv {
227 struct workqueue_struct *wmi_wq; /* for deferred calls */ 228 struct workqueue_struct *wmi_wq; /* for deferred calls */
228 struct work_struct wmi_event_worker; 229 struct work_struct wmi_event_worker;
229 struct workqueue_struct *wmi_wq_conn; /* for connect worker */ 230 struct workqueue_struct *wmi_wq_conn; /* for connect worker */
230 struct work_struct wmi_connect_worker; 231 struct work_struct connect_worker;
231 struct work_struct disconnect_worker; 232 struct work_struct disconnect_worker;
232 struct timer_list connect_timer; 233 struct timer_list connect_timer;
233 int pending_connect_cid; 234 int pending_connect_cid;
@@ -277,13 +278,13 @@ struct wil6210_priv {
277 278
278#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \ 279#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
279 groupsize, buf, len, ascii) \ 280 groupsize, buf, len, ascii) \
280 wil_print_hex_dump_debug("DBG[TXRX]" prefix_str,\ 281 print_hex_dump_debug("DBG[TXRX]" prefix_str,\
281 prefix_type, rowsize, \ 282 prefix_type, rowsize, \
282 groupsize, buf, len, ascii) 283 groupsize, buf, len, ascii)
283 284
284#define wil_hex_dump_wmi(prefix_str, prefix_type, rowsize, \ 285#define wil_hex_dump_wmi(prefix_str, prefix_type, rowsize, \
285 groupsize, buf, len, ascii) \ 286 groupsize, buf, len, ascii) \
286 wil_print_hex_dump_debug("DBG[ WMI]" prefix_str,\ 287 print_hex_dump_debug("DBG[ WMI]" prefix_str,\
287 prefix_type, rowsize, \ 288 prefix_type, rowsize, \
288 groupsize, buf, len, ascii) 289 groupsize, buf, len, ascii)
289 290
@@ -313,7 +314,6 @@ int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len);
313void wmi_recv_cmd(struct wil6210_priv *wil); 314void wmi_recv_cmd(struct wil6210_priv *wil);
314int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, 315int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
315 u16 reply_id, void *reply, u8 reply_size, int to_msec); 316 u16 reply_id, void *reply, u8 reply_size, int to_msec);
316void wmi_connect_worker(struct work_struct *work);
317void wmi_event_worker(struct work_struct *work); 317void wmi_event_worker(struct work_struct *work);
318void wmi_event_flush(struct wil6210_priv *wil); 318void wmi_event_flush(struct wil6210_priv *wil);
319int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid); 319int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid);
@@ -328,6 +328,8 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
328int wmi_echo(struct wil6210_priv *wil); 328int wmi_echo(struct wil6210_priv *wil);
329int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie); 329int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
330int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring); 330int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
331int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
332int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
331 333
332int wil6210_init_irq(struct wil6210_priv *wil, int irq); 334int wil6210_init_irq(struct wil6210_priv *wil, int irq);
333void wil6210_fini_irq(struct wil6210_priv *wil, int irq); 335void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
@@ -341,7 +343,8 @@ struct wireless_dev *wil_cfg80211_init(struct device *dev);
341void wil_wdev_free(struct wil6210_priv *wil); 343void wil_wdev_free(struct wil6210_priv *wil);
342 344
343int wmi_set_mac_address(struct wil6210_priv *wil, void *addr); 345int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
344int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype); 346int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan);
347int wmi_pcp_stop(struct wil6210_priv *wil);
345void wil6210_disconnect(struct wil6210_priv *wil, void *bssid); 348void wil6210_disconnect(struct wil6210_priv *wil, void *bssid);
346 349
347int wil_rx_init(struct wil6210_priv *wil); 350int wil_rx_init(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 0bb3b76b4b58..45b04e383f9a 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -14,9 +14,6 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/pci.h>
18#include <linux/io.h>
19#include <linux/list.h>
20#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
21#include <linux/if_arp.h> 18#include <linux/if_arp.h>
22 19
@@ -272,16 +269,18 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
272 struct net_device *ndev = wil_to_ndev(wil); 269 struct net_device *ndev = wil_to_ndev(wil);
273 struct wireless_dev *wdev = wil->wdev; 270 struct wireless_dev *wdev = wil->wdev;
274 struct wmi_ready_event *evt = d; 271 struct wmi_ready_event *evt = d;
275 u32 ver = le32_to_cpu(evt->sw_version); 272 wil->fw_version = le32_to_cpu(evt->sw_version);
273 wil->n_mids = evt->numof_additional_mids;
276 274
277 wil_dbg_wmi(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac); 275 wil_dbg_wmi(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
276 evt->mac, wil->n_mids);
278 277
279 if (!is_valid_ether_addr(ndev->dev_addr)) { 278 if (!is_valid_ether_addr(ndev->dev_addr)) {
280 memcpy(ndev->dev_addr, evt->mac, ETH_ALEN); 279 memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
281 memcpy(ndev->perm_addr, evt->mac, ETH_ALEN); 280 memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
282 } 281 }
283 snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version), 282 snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
284 "%d", ver); 283 "%d", wil->fw_version);
285} 284}
286 285
287static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d, 286static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
@@ -324,17 +323,9 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
324 323
325 if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) { 324 if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
326 struct cfg80211_bss *bss; 325 struct cfg80211_bss *bss;
327 u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp); 326
328 u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info); 327 bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
329 u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int); 328 d_len, signal, GFP_KERNEL);
330 const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable;
331 size_t ie_len = d_len - offsetof(struct ieee80211_mgmt,
332 u.beacon.variable);
333 wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
334
335 bss = cfg80211_inform_bss(wiphy, channel, rx_mgmt_frame->bssid,
336 tsf, cap, bi, ie_buf, ie_len,
337 signal, GFP_KERNEL);
338 if (bss) { 329 if (bss) {
339 wil_dbg_wmi(wil, "Added BSS %pM\n", 330 wil_dbg_wmi(wil, "Added BSS %pM\n",
340 rx_mgmt_frame->bssid); 331 rx_mgmt_frame->bssid);
@@ -342,6 +333,9 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
342 } else { 333 } else {
343 wil_err(wil, "cfg80211_inform_bss() failed\n"); 334 wil_err(wil, "cfg80211_inform_bss() failed\n");
344 } 335 }
336 } else {
337 cfg80211_rx_mgmt(wil->wdev, freq, signal,
338 (void *)rx_mgmt_frame, d_len, GFP_KERNEL);
345 } 339 }
346} 340}
347 341
@@ -443,7 +437,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
443 memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN); 437 memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN);
444 438
445 wil->pending_connect_cid = evt->cid; 439 wil->pending_connect_cid = evt->cid;
446 queue_work(wil->wmi_wq_conn, &wil->wmi_connect_worker); 440 queue_work(wil->wmi_wq_conn, &wil->connect_worker);
447} 441}
448 442
449static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, 443static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
@@ -528,6 +522,37 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
528 } 522 }
529} 523}
530 524
525static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
526{
527 struct net_device *ndev = wil_to_ndev(wil);
528 struct wmi_data_port_open_event *evt = d;
529
530 wil_dbg_wmi(wil, "Link UP for CID %d\n", evt->cid);
531
532 netif_carrier_on(ndev);
533}
534
535static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
536{
537 struct net_device *ndev = wil_to_ndev(wil);
538 struct wmi_wbe_link_down_event *evt = d;
539
540 wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
541 evt->cid, le32_to_cpu(evt->reason));
542
543 netif_carrier_off(ndev);
544}
545
546static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
547 int len)
548{
549 struct wmi_vring_ba_status_event *evt = d;
550
551 wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d\n",
552 evt->ringid, evt->status ? "N/A" : "OK", evt->agg_wsize,
553 __le16_to_cpu(evt->ba_timeout));
554}
555
531static const struct { 556static const struct {
532 int eventid; 557 int eventid;
533 void (*handler)(struct wil6210_priv *wil, int eventid, 558 void (*handler)(struct wil6210_priv *wil, int eventid,
@@ -541,6 +566,9 @@ static const struct {
541 {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect}, 566 {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
542 {WMI_NOTIFY_REQ_DONE_EVENTID, wmi_evt_notify}, 567 {WMI_NOTIFY_REQ_DONE_EVENTID, wmi_evt_notify},
543 {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx}, 568 {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx},
569 {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup},
570 {WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown},
571 {WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
544}; 572};
545 573
546/* 574/*
@@ -559,6 +587,11 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
559 void __iomem *src; 587 void __iomem *src;
560 ulong flags; 588 ulong flags;
561 589
590 if (!test_bit(wil_status_reset_done, &wil->status)) {
591 wil_err(wil, "Reset not completed\n");
592 return;
593 }
594
562 for (;;) { 595 for (;;) {
563 u16 len; 596 u16 len;
564 597
@@ -683,18 +716,39 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
683 return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd)); 716 return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
684} 717}
685 718
686int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype) 719int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
687{ 720{
688 struct wmi_bcon_ctrl_cmd cmd = { 721 int rc;
722
723 struct wmi_pcp_start_cmd cmd = {
689 .bcon_interval = cpu_to_le16(bi), 724 .bcon_interval = cpu_to_le16(bi),
690 .network_type = wmi_nettype, 725 .network_type = wmi_nettype,
691 .disable_sec_offload = 1, 726 .disable_sec_offload = 1,
727 .channel = chan,
692 }; 728 };
729 struct {
730 struct wil6210_mbox_hdr_wmi wmi;
731 struct wmi_pcp_started_event evt;
732 } __packed reply;
693 733
694 if (!wil->secure_pcp) 734 if (!wil->secure_pcp)
695 cmd.disable_sec = 1; 735 cmd.disable_sec = 1;
696 736
697 return wmi_send(wil, WMI_BCON_CTRL_CMDID, &cmd, sizeof(cmd)); 737 rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd),
738 WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 100);
739 if (rc)
740 return rc;
741
742 if (reply.evt.status != WMI_FW_STATUS_SUCCESS)
743 rc = -EINVAL;
744
745 return rc;
746}
747
748int wmi_pcp_stop(struct wil6210_priv *wil)
749{
750 return wmi_call(wil, WMI_PCP_STOP_CMDID, NULL, 0,
751 WMI_PCP_STOPPED_EVENTID, NULL, 0, 20);
698} 752}
699 753
700int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid) 754int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid)
@@ -765,6 +819,16 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel)
765 return 0; 819 return 0;
766} 820}
767 821
822int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
823{
824 struct wmi_p2p_cfg_cmd cmd = {
825 .discovery_mode = WMI_DISCOVERY_MODE_NON_OFFLOAD,
826 .channel = channel - 1,
827 };
828
829 return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd));
830}
831
768int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb) 832int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
769{ 833{
770 struct wmi_eapol_tx_cmd *cmd; 834 struct wmi_eapol_tx_cmd *cmd;
@@ -843,7 +907,7 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
843 /* BUG: FW API define ieLen as u8. Will fix FW */ 907 /* BUG: FW API define ieLen as u8. Will fix FW */
844 cmd->ie_len = cpu_to_le16(ie_len); 908 cmd->ie_len = cpu_to_le16(ie_len);
845 memcpy(cmd->ie_info, ie, ie_len); 909 memcpy(cmd->ie_info, ie, ie_len);
846 rc = wmi_send(wil, WMI_SET_APPIE_CMDID, &cmd, len); 910 rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len);
847 kfree(cmd); 911 kfree(cmd);
848 912
849 return rc; 913 return rc;
@@ -898,6 +962,31 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
898 return rc; 962 return rc;
899} 963}
900 964
965int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r)
966{
967 int rc;
968 struct wmi_temp_sense_cmd cmd = {
969 .measure_marlon_m_en = cpu_to_le32(!!t_m),
970 .measure_marlon_r_en = cpu_to_le32(!!t_r),
971 };
972 struct {
973 struct wil6210_mbox_hdr_wmi wmi;
974 struct wmi_temp_sense_done_event evt;
975 } __packed reply;
976
977 rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, &cmd, sizeof(cmd),
978 WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply), 100);
979 if (rc)
980 return rc;
981
982 if (t_m)
983 *t_m = le32_to_cpu(reply.evt.marlon_m_t1000);
984 if (t_r)
985 *t_r = le32_to_cpu(reply.evt.marlon_r_t1000);
986
987 return 0;
988}
989
901void wmi_event_flush(struct wil6210_priv *wil) 990void wmi_event_flush(struct wil6210_priv *wil)
902{ 991{
903 struct pending_wmi_event *evt, *t; 992 struct pending_wmi_event *evt, *t;
@@ -997,24 +1086,3 @@ void wmi_event_worker(struct work_struct *work)
997 kfree(evt); 1086 kfree(evt);
998 } 1087 }
999} 1088}
1000
1001void wmi_connect_worker(struct work_struct *work)
1002{
1003 int rc;
1004 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
1005 wmi_connect_worker);
1006
1007 if (wil->pending_connect_cid < 0) {
1008 wil_err(wil, "No connection pending\n");
1009 return;
1010 }
1011
1012 wil_dbg_wmi(wil, "Configure for connection CID %d\n",
1013 wil->pending_connect_cid);
1014
1015 rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE,
1016 wil->pending_connect_cid, 0);
1017 wil->pending_connect_cid = -1;
1018 if (rc == 0)
1019 wil_link_on(wil);
1020}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 3bbf87572b07..50b8528394f4 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -36,6 +36,7 @@
36enum wmi_command_id { 36enum wmi_command_id {
37 WMI_CONNECT_CMDID = 0x0001, 37 WMI_CONNECT_CMDID = 0x0001,
38 WMI_DISCONNECT_CMDID = 0x0003, 38 WMI_DISCONNECT_CMDID = 0x0003,
39 WMI_DISCONNECT_STA_CMDID = 0x0004,
39 WMI_START_SCAN_CMDID = 0x0007, 40 WMI_START_SCAN_CMDID = 0x0007,
40 WMI_SET_BSS_FILTER_CMDID = 0x0009, 41 WMI_SET_BSS_FILTER_CMDID = 0x0009,
41 WMI_SET_PROBED_SSID_CMDID = 0x000a, 42 WMI_SET_PROBED_SSID_CMDID = 0x000a,
@@ -44,7 +45,6 @@ enum wmi_command_id {
44 WMI_ADD_CIPHER_KEY_CMDID = 0x0016, 45 WMI_ADD_CIPHER_KEY_CMDID = 0x0016,
45 WMI_DELETE_CIPHER_KEY_CMDID = 0x0017, 46 WMI_DELETE_CIPHER_KEY_CMDID = 0x0017,
46 WMI_SET_APPIE_CMDID = 0x003f, 47 WMI_SET_APPIE_CMDID = 0x003f,
47 WMI_GET_APPIE_CMDID = 0x0040,
48 WMI_SET_WSC_STATUS_CMDID = 0x0041, 48 WMI_SET_WSC_STATUS_CMDID = 0x0041,
49 WMI_PXMT_RANGE_CFG_CMDID = 0x0042, 49 WMI_PXMT_RANGE_CFG_CMDID = 0x0042,
50 WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043, 50 WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043,
@@ -55,11 +55,11 @@ enum wmi_command_id {
55 WMI_DEEP_ECHO_CMDID = 0x0804, 55 WMI_DEEP_ECHO_CMDID = 0x0804,
56 WMI_CONFIG_MAC_CMDID = 0x0805, 56 WMI_CONFIG_MAC_CMDID = 0x0805,
57 WMI_CONFIG_PHY_DEBUG_CMDID = 0x0806, 57 WMI_CONFIG_PHY_DEBUG_CMDID = 0x0806,
58 WMI_ADD_STATION_CMDID = 0x0807,
59 WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x0808, 58 WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x0808,
60 WMI_PHY_GET_STATISTICS_CMDID = 0x0809, 59 WMI_PHY_GET_STATISTICS_CMDID = 0x0809,
61 WMI_FS_TUNE_CMDID = 0x080a, 60 WMI_FS_TUNE_CMDID = 0x080a,
62 WMI_CORR_MEASURE_CMDID = 0x080b, 61 WMI_CORR_MEASURE_CMDID = 0x080b,
62 WMI_READ_RSSI_CMDID = 0x080c,
63 WMI_TEMP_SENSE_CMDID = 0x080e, 63 WMI_TEMP_SENSE_CMDID = 0x080e,
64 WMI_DC_CALIB_CMDID = 0x080f, 64 WMI_DC_CALIB_CMDID = 0x080f,
65 WMI_SEND_TONE_CMDID = 0x0810, 65 WMI_SEND_TONE_CMDID = 0x0810,
@@ -75,9 +75,9 @@ enum wmi_command_id {
75 MAC_IO_STATIC_PARAMS_CMDID = 0x081b, 75 MAC_IO_STATIC_PARAMS_CMDID = 0x081b,
76 MAC_IO_DYNAMIC_PARAMS_CMDID = 0x081c, 76 MAC_IO_DYNAMIC_PARAMS_CMDID = 0x081c,
77 WMI_SILENT_RSSI_CALIB_CMDID = 0x081d, 77 WMI_SILENT_RSSI_CALIB_CMDID = 0x081d,
78 WMI_RF_RX_TEST_CMDID = 0x081e,
78 WMI_CFG_RX_CHAIN_CMDID = 0x0820, 79 WMI_CFG_RX_CHAIN_CMDID = 0x0820,
79 WMI_VRING_CFG_CMDID = 0x0821, 80 WMI_VRING_CFG_CMDID = 0x0821,
80 WMI_RX_ON_CMDID = 0x0822,
81 WMI_VRING_BA_EN_CMDID = 0x0823, 81 WMI_VRING_BA_EN_CMDID = 0x0823,
82 WMI_VRING_BA_DIS_CMDID = 0x0824, 82 WMI_VRING_BA_DIS_CMDID = 0x0824,
83 WMI_RCP_ADDBA_RESP_CMDID = 0x0825, 83 WMI_RCP_ADDBA_RESP_CMDID = 0x0825,
@@ -87,7 +87,6 @@ enum wmi_command_id {
87 WMI_SET_PCP_CHANNEL_CMDID = 0x0829, 87 WMI_SET_PCP_CHANNEL_CMDID = 0x0829,
88 WMI_GET_PCP_CHANNEL_CMDID = 0x082a, 88 WMI_GET_PCP_CHANNEL_CMDID = 0x082a,
89 WMI_SW_TX_REQ_CMDID = 0x082b, 89 WMI_SW_TX_REQ_CMDID = 0x082b,
90 WMI_RX_OFF_CMDID = 0x082c,
91 WMI_READ_MAC_RXQ_CMDID = 0x0830, 90 WMI_READ_MAC_RXQ_CMDID = 0x0830,
92 WMI_READ_MAC_TXQ_CMDID = 0x0831, 91 WMI_READ_MAC_TXQ_CMDID = 0x0831,
93 WMI_WRITE_MAC_RXQ_CMDID = 0x0832, 92 WMI_WRITE_MAC_RXQ_CMDID = 0x0832,
@@ -112,6 +111,18 @@ enum wmi_command_id {
112 WMI_FLASH_READ_CMDID = 0x0902, 111 WMI_FLASH_READ_CMDID = 0x0902,
113 WMI_FLASH_WRITE_CMDID = 0x0903, 112 WMI_FLASH_WRITE_CMDID = 0x0903,
114 WMI_SECURITY_UNIT_TEST_CMDID = 0x0904, 113 WMI_SECURITY_UNIT_TEST_CMDID = 0x0904,
114 /*P2P*/
115 WMI_P2P_CFG_CMDID = 0x0910,
116 WMI_PORT_ALLOCATE_CMDID = 0x0911,
117 WMI_PORT_DELETE_CMDID = 0x0912,
118 WMI_POWER_MGMT_CFG_CMDID = 0x0913,
119 WMI_START_LISTEN_CMDID = 0x0914,
120 WMI_START_SEARCH_CMDID = 0x0915,
121 WMI_DISCOVERY_START_CMDID = 0x0916,
122 WMI_DISCOVERY_STOP_CMDID = 0x0917,
123 WMI_PCP_START_CMDID = 0x0918,
124 WMI_PCP_STOP_CMDID = 0x0919,
125 WMI_GET_PCP_FACTOR_CMDID = 0x091b,
115 126
116 WMI_SET_MAC_ADDRESS_CMDID = 0xf003, 127 WMI_SET_MAC_ADDRESS_CMDID = 0xf003,
117 WMI_ABORT_SCAN_CMDID = 0xf007, 128 WMI_ABORT_SCAN_CMDID = 0xf007,
@@ -132,18 +143,6 @@ enum wmi_command_id {
132 */ 143 */
133 144
134/* 145/*
135 * Frame Types
136 */
137enum wmi_mgmt_frame_type {
138 WMI_FRAME_BEACON = 0,
139 WMI_FRAME_PROBE_REQ = 1,
140 WMI_FRAME_PROBE_RESP = 2,
141 WMI_FRAME_ASSOC_REQ = 3,
142 WMI_FRAME_ASSOC_RESP = 4,
143 WMI_NUM_MGMT_FRAME,
144};
145
146/*
147 * WMI_CONNECT_CMDID 146 * WMI_CONNECT_CMDID
148 */ 147 */
149enum wmi_network_type { 148enum wmi_network_type {
@@ -184,7 +183,7 @@ enum wmi_crypto_type {
184enum wmi_connect_ctrl_flag_bits { 183enum wmi_connect_ctrl_flag_bits {
185 WMI_CONNECT_ASSOC_POLICY_USER = 0x0001, 184 WMI_CONNECT_ASSOC_POLICY_USER = 0x0001,
186 WMI_CONNECT_SEND_REASSOC = 0x0002, 185 WMI_CONNECT_SEND_REASSOC = 0x0002,
187 WMI_CONNECT_IGNORE_WPAx_GROUP_CIPHER = 0x0004, 186 WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER = 0x0004,
188 WMI_CONNECT_PROFILE_MATCH_DONE = 0x0008, 187 WMI_CONNECT_PROFILE_MATCH_DONE = 0x0008,
189 WMI_CONNECT_IGNORE_AAC_BEACON = 0x0010, 188 WMI_CONNECT_IGNORE_AAC_BEACON = 0x0010,
190 WMI_CONNECT_CSA_FOLLOW_BSS = 0x0020, 189 WMI_CONNECT_CSA_FOLLOW_BSS = 0x0020,
@@ -212,6 +211,13 @@ struct wmi_connect_cmd {
212 u8 reserved1[2]; 211 u8 reserved1[2];
213} __packed; 212} __packed;
214 213
214/*
215 * WMI_DISCONNECT_STA_CMDID
216 */
217struct wmi_disconnect_sta_cmd {
218 u8 dst_mac[WMI_MAC_LEN];
219 __le16 disconnect_reason;
220} __packed;
215 221
216/* 222/*
217 * WMI_RECONNECT_CMDID 223 * WMI_RECONNECT_CMDID
@@ -289,10 +295,12 @@ struct wmi_delete_cipher_key_cmd {
289enum wmi_scan_type { 295enum wmi_scan_type {
290 WMI_LONG_SCAN = 0, 296 WMI_LONG_SCAN = 0,
291 WMI_SHORT_SCAN = 1, 297 WMI_SHORT_SCAN = 1,
298 WMI_PBC_SCAN = 2,
292}; 299};
293 300
294struct wmi_start_scan_cmd { 301struct wmi_start_scan_cmd {
295 u8 reserved[8]; 302 u8 reserved[8];
303
296 __le32 home_dwell_time; /* Max duration in the home channel(ms) */ 304 __le32 home_dwell_time; /* Max duration in the home channel(ms) */
297 __le32 force_scan_interval; /* Time interval between scans (ms)*/ 305 __le32 force_scan_interval; /* Time interval between scans (ms)*/
298 u8 scan_type; /* wmi_scan_type */ 306 u8 scan_type; /* wmi_scan_type */
@@ -309,7 +317,7 @@ struct wmi_start_scan_cmd {
309/* 317/*
310 * WMI_SET_PROBED_SSID_CMDID 318 * WMI_SET_PROBED_SSID_CMDID
311 */ 319 */
312#define MAX_PROBED_SSID_INDEX (15) 320#define MAX_PROBED_SSID_INDEX (3)
313 321
314enum wmi_ssid_flag { 322enum wmi_ssid_flag {
315 WMI_SSID_FLAG_DISABLE = 0, /* disables entry */ 323 WMI_SSID_FLAG_DISABLE = 0, /* disables entry */
@@ -328,6 +336,20 @@ struct wmi_probed_ssid_cmd {
328 * WMI_SET_APPIE_CMDID 336 * WMI_SET_APPIE_CMDID
329 * Add Application specified IE to a management frame 337 * Add Application specified IE to a management frame
330 */ 338 */
339#define WMI_MAX_IE_LEN (1024)
340
341/*
342 * Frame Types
343 */
344enum wmi_mgmt_frame_type {
345 WMI_FRAME_BEACON = 0,
346 WMI_FRAME_PROBE_REQ = 1,
347 WMI_FRAME_PROBE_RESP = 2,
348 WMI_FRAME_ASSOC_REQ = 3,
349 WMI_FRAME_ASSOC_RESP = 4,
350 WMI_NUM_MGMT_FRAME,
351};
352
331struct wmi_set_appie_cmd { 353struct wmi_set_appie_cmd {
332 u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */ 354 u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
333 u8 reserved; 355 u8 reserved;
@@ -335,13 +357,18 @@ struct wmi_set_appie_cmd {
335 u8 ie_info[0]; 357 u8 ie_info[0];
336} __packed; 358} __packed;
337 359
338#define WMI_MAX_IE_LEN (1024)
339 360
361/*
362 * WMI_PXMT_RANGE_CFG_CMDID
363 */
340struct wmi_pxmt_range_cfg_cmd { 364struct wmi_pxmt_range_cfg_cmd {
341 u8 dst_mac[WMI_MAC_LEN]; 365 u8 dst_mac[WMI_MAC_LEN];
342 __le16 range; 366 __le16 range;
343} __packed; 367} __packed;
344 368
369/*
370 * WMI_PXMT_SNR2_RANGE_CFG_CMDID
371 */
345struct wmi_pxmt_snr2_range_cfg_cmd { 372struct wmi_pxmt_snr2_range_cfg_cmd {
346 s8 snr2range_arr[WMI_PROX_RANGE_NUM-1]; 373 s8 snr2range_arr[WMI_PROX_RANGE_NUM-1];
347} __packed; 374} __packed;
@@ -359,6 +386,23 @@ struct wmi_rf_mgmt_cmd {
359 __le32 rf_mgmt_type; 386 __le32 rf_mgmt_type;
360} __packed; 387} __packed;
361 388
389
390/*
391 * WMI_RF_RX_TEST_CMDID
392 */
393struct wmi_rf_rx_test_cmd {
394 __le32 sector;
395} __packed;
396
397/*
398 * WMI_CORR_MEASURE_CMDID
399 */
400struct wmi_corr_measure_cmd {
401 s32 freq_mhz;
402 __le32 length_samples;
403 __le32 iterations;
404} __packed;
405
362/* 406/*
363 * WMI_SET_SSID_CMDID 407 * WMI_SET_SSID_CMDID
364 */ 408 */
@@ -388,6 +432,74 @@ struct wmi_bcon_ctrl_cmd {
388 u8 disable_sec; 432 u8 disable_sec;
389} __packed; 433} __packed;
390 434
435
436/******* P2P ***********/
437
438/*
439 * WMI_PORT_ALLOCATE_CMDID
440 */
441enum wmi_port_role {
442 WMI_PORT_STA = 0,
443 WMI_PORT_PCP = 1,
444 WMI_PORT_AP = 2,
445 WMI_PORT_P2P_DEV = 3,
446 WMI_PORT_P2P_CLIENT = 4,
447 WMI_PORT_P2P_GO = 5,
448};
449
450struct wmi_port_allocate_cmd {
451 u8 mac[WMI_MAC_LEN];
452 u8 port_role;
453 u8 midid;
454} __packed;
455
456/*
457 * WMI_PORT_DELETE_CMDID
458 */
459struct wmi_delete_port_cmd {
460 u8 mid;
461 u8 reserved[3];
462} __packed;
463
464/*
465 * WMI_P2P_CFG_CMDID
466 */
467enum wmi_discovery_mode {
468 WMI_DISCOVERY_MODE_NON_OFFLOAD = 0,
469 WMI_DISCOVERY_MODE_OFFLOAD = 1,
470};
471
472struct wmi_p2p_cfg_cmd {
473 u8 discovery_mode; /* wmi_discovery_mode */
474 u8 channel;
475 __le16 bcon_interval; /* base to listen/search duration calculation */
476} __packed;
477
478/*
479 * WMI_POWER_MGMT_CFG_CMDID
480 */
481enum wmi_power_source_type {
482 WMI_POWER_SOURCE_BATTERY = 0,
483 WMI_POWER_SOURCE_OTHER = 1,
484};
485
486struct wmi_power_mgmt_cfg_cmd {
487 u8 power_source; /* wmi_power_source_type */
488 u8 reserved[3];
489} __packed;
490
491/*
492 * WMI_PCP_START_CMDID
493 */
494struct wmi_pcp_start_cmd {
495 __le16 bcon_interval;
496 u8 reserved0[10];
497 u8 network_type;
498 u8 channel;
499 u8 disable_sec_offload;
500 u8 disable_sec;
501} __packed;
502
391/* 503/*
392 * WMI_SW_TX_REQ_CMDID 504 * WMI_SW_TX_REQ_CMDID
393 */ 505 */
@@ -435,16 +547,17 @@ enum wmi_vring_cfg_schd_params_priority {
435 WMI_SCH_PRIO_HIGH = 1, 547 WMI_SCH_PRIO_HIGH = 1,
436}; 548};
437 549
550#define CIDXTID_CID_POS (0)
551#define CIDXTID_CID_LEN (4)
552#define CIDXTID_CID_MSK (0xF)
553#define CIDXTID_TID_POS (4)
554#define CIDXTID_TID_LEN (4)
555#define CIDXTID_TID_MSK (0xF0)
556
438struct wmi_vring_cfg { 557struct wmi_vring_cfg {
439 struct wmi_sw_ring_cfg tx_sw_ring; 558 struct wmi_sw_ring_cfg tx_sw_ring;
440 u8 ringid; /* 0-23 vrings */ 559 u8 ringid; /* 0-23 vrings */
441 560
442 #define CIDXTID_CID_POS (0)
443 #define CIDXTID_CID_LEN (4)
444 #define CIDXTID_CID_MSK (0xF)
445 #define CIDXTID_TID_POS (4)
446 #define CIDXTID_TID_LEN (4)
447 #define CIDXTID_TID_MSK (0xF0)
448 u8 cidxtid; 561 u8 cidxtid;
449 562
450 u8 encap_trans_type; 563 u8 encap_trans_type;
@@ -501,8 +614,14 @@ struct wmi_vring_ba_dis_cmd {
501 */ 614 */
502struct wmi_notify_req_cmd { 615struct wmi_notify_req_cmd {
503 u8 cid; 616 u8 cid;
504 u8 reserved[3]; 617 u8 year;
618 u8 month;
619 u8 day;
505 __le32 interval_usec; 620 __le32 interval_usec;
621 u8 hour;
622 u8 minute;
623 u8 second;
624 u8 miliseconds;
506} __packed; 625} __packed;
507 626
508/* 627/*
@@ -548,6 +667,11 @@ enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
548 WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 2, 667 WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 2,
549}; 668};
550 669
670enum wmi_cfg_rx_chain_cmd_reorder_type {
671 WMI_RX_HW_REORDER = 0,
672 WMI_RX_SW_REORDER = 1,
673};
674
551struct wmi_cfg_rx_chain_cmd { 675struct wmi_cfg_rx_chain_cmd {
552 __le32 action; 676 __le32 action;
553 struct wmi_sw_ring_cfg rx_sw_ring; 677 struct wmi_sw_ring_cfg rx_sw_ring;
@@ -596,7 +720,8 @@ struct wmi_cfg_rx_chain_cmd {
596 __le16 wb_thrsh; 720 __le16 wb_thrsh;
597 __le32 itr_value; 721 __le32 itr_value;
598 __le16 host_thrsh; 722 __le16 host_thrsh;
599 u8 reserved[2]; 723 u8 reorder_type;
724 u8 reserved;
600 struct wmi_sniffer_cfg sniffer_cfg; 725 struct wmi_sniffer_cfg sniffer_cfg;
601} __packed; 726} __packed;
602 727
@@ -604,15 +729,7 @@ struct wmi_cfg_rx_chain_cmd {
604 * WMI_RCP_ADDBA_RESP_CMDID 729 * WMI_RCP_ADDBA_RESP_CMDID
605 */ 730 */
606struct wmi_rcp_addba_resp_cmd { 731struct wmi_rcp_addba_resp_cmd {
607
608 #define CIDXTID_CID_POS (0)
609 #define CIDXTID_CID_LEN (4)
610 #define CIDXTID_CID_MSK (0xF)
611 #define CIDXTID_TID_POS (4)
612 #define CIDXTID_TID_LEN (4)
613 #define CIDXTID_TID_MSK (0xF0)
614 u8 cidxtid; 732 u8 cidxtid;
615
616 u8 dialog_token; 733 u8 dialog_token;
617 __le16 status_code; 734 __le16 status_code;
618 __le16 ba_param_set; /* ieee80211_ba_parameterset field to send */ 735 __le16 ba_param_set; /* ieee80211_ba_parameterset field to send */
@@ -623,15 +740,7 @@ struct wmi_rcp_addba_resp_cmd {
623 * WMI_RCP_DELBA_CMDID 740 * WMI_RCP_DELBA_CMDID
624 */ 741 */
625struct wmi_rcp_delba_cmd { 742struct wmi_rcp_delba_cmd {
626
627 #define CIDXTID_CID_POS (0)
628 #define CIDXTID_CID_LEN (4)
629 #define CIDXTID_CID_MSK (0xF)
630 #define CIDXTID_TID_POS (4)
631 #define CIDXTID_TID_LEN (4)
632 #define CIDXTID_TID_MSK (0xF0)
633 u8 cidxtid; 743 u8 cidxtid;
634
635 u8 reserved; 744 u8 reserved;
636 __le16 reason; 745 __le16 reason;
637} __packed; 746} __packed;
@@ -640,15 +749,7 @@ struct wmi_rcp_delba_cmd {
640 * WMI_RCP_ADDBA_REQ_CMDID 749 * WMI_RCP_ADDBA_REQ_CMDID
641 */ 750 */
642struct wmi_rcp_addba_req_cmd { 751struct wmi_rcp_addba_req_cmd {
643
644 #define CIDXTID_CID_POS (0)
645 #define CIDXTID_CID_LEN (4)
646 #define CIDXTID_CID_MSK (0xF)
647 #define CIDXTID_TID_POS (4)
648 #define CIDXTID_TID_LEN (4)
649 #define CIDXTID_TID_MSK (0xF0)
650 u8 cidxtid; 752 u8 cidxtid;
651
652 u8 dialog_token; 753 u8 dialog_token;
653 /* ieee80211_ba_parameterset field as it received */ 754 /* ieee80211_ba_parameterset field as it received */
654 __le16 ba_param_set; 755 __le16 ba_param_set;
@@ -665,7 +766,6 @@ struct wmi_set_mac_address_cmd {
665 u8 reserved[2]; 766 u8 reserved[2];
666} __packed; 767} __packed;
667 768
668
669/* 769/*
670* WMI_EAPOL_TX_CMDID 770* WMI_EAPOL_TX_CMDID
671*/ 771*/
@@ -692,6 +792,17 @@ struct wmi_echo_cmd {
692} __packed; 792} __packed;
693 793
694/* 794/*
795 * WMI_TEMP_SENSE_CMDID
796 *
797 * Measure MAC and radio temperatures
798 */
799struct wmi_temp_sense_cmd {
800 __le32 measure_marlon_m_en;
801 __le32 measure_marlon_r_en;
802} __packed;
803
804
805/*
695 * WMI Events 806 * WMI Events
696 */ 807 */
697 808
@@ -699,7 +810,6 @@ struct wmi_echo_cmd {
699 * List of Events (target to host) 810 * List of Events (target to host)
700 */ 811 */
701enum wmi_event_id { 812enum wmi_event_id {
702 WMI_IMM_RSP_EVENTID = 0x0000,
703 WMI_READY_EVENTID = 0x1001, 813 WMI_READY_EVENTID = 0x1001,
704 WMI_CONNECT_EVENTID = 0x1002, 814 WMI_CONNECT_EVENTID = 0x1002,
705 WMI_DISCONNECT_EVENTID = 0x1003, 815 WMI_DISCONNECT_EVENTID = 0x1003,
@@ -709,13 +819,9 @@ enum wmi_event_id {
709 WMI_FW_READY_EVENTID = 0x1801, 819 WMI_FW_READY_EVENTID = 0x1801,
710 WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x0200, 820 WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x0200,
711 WMI_ECHO_RSP_EVENTID = 0x1803, 821 WMI_ECHO_RSP_EVENTID = 0x1803,
712 WMI_CONFIG_MAC_DONE_EVENTID = 0x1805,
713 WMI_CONFIG_PHY_DEBUG_DONE_EVENTID = 0x1806,
714 WMI_ADD_STATION_DONE_EVENTID = 0x1807,
715 WMI_ADD_DEBUG_TX_PCKT_DONE_EVENTID = 0x1808,
716 WMI_PHY_GET_STATISTICS_EVENTID = 0x1809,
717 WMI_FS_TUNE_DONE_EVENTID = 0x180a, 822 WMI_FS_TUNE_DONE_EVENTID = 0x180a,
718 WMI_CORR_MEASURE_DONE_EVENTID = 0x180b, 823 WMI_CORR_MEASURE_EVENTID = 0x180b,
824 WMI_READ_RSSI_EVENTID = 0x180c,
719 WMI_TEMP_SENSE_DONE_EVENTID = 0x180e, 825 WMI_TEMP_SENSE_DONE_EVENTID = 0x180e,
720 WMI_DC_CALIB_DONE_EVENTID = 0x180f, 826 WMI_DC_CALIB_DONE_EVENTID = 0x180f,
721 WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811, 827 WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
@@ -727,10 +833,9 @@ enum wmi_event_id {
727 WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819, 833 WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
728 WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a, 834 WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a,
729 WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181d, 835 WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181d,
730 836 WMI_RF_RX_TEST_DONE_EVENTID = 0x181e,
731 WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820, 837 WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
732 WMI_VRING_CFG_DONE_EVENTID = 0x1821, 838 WMI_VRING_CFG_DONE_EVENTID = 0x1821,
733 WMI_RX_ON_DONE_EVENTID = 0x1822,
734 WMI_BA_STATUS_EVENTID = 0x1823, 839 WMI_BA_STATUS_EVENTID = 0x1823,
735 WMI_RCP_ADDBA_REQ_EVENTID = 0x1824, 840 WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
736 WMI_ADDBA_RESP_SENT_EVENTID = 0x1825, 841 WMI_ADDBA_RESP_SENT_EVENTID = 0x1825,
@@ -738,7 +843,6 @@ enum wmi_event_id {
738 WMI_GET_SSID_EVENTID = 0x1828, 843 WMI_GET_SSID_EVENTID = 0x1828,
739 WMI_GET_PCP_CHANNEL_EVENTID = 0x182a, 844 WMI_GET_PCP_CHANNEL_EVENTID = 0x182a,
740 WMI_SW_TX_COMPLETE_EVENTID = 0x182b, 845 WMI_SW_TX_COMPLETE_EVENTID = 0x182b,
741 WMI_RX_OFF_DONE_EVENTID = 0x182c,
742 846
743 WMI_READ_MAC_RXQ_EVENTID = 0x1830, 847 WMI_READ_MAC_RXQ_EVENTID = 0x1830,
744 WMI_READ_MAC_TXQ_EVENTID = 0x1831, 848 WMI_READ_MAC_TXQ_EVENTID = 0x1831,
@@ -765,7 +869,16 @@ enum wmi_event_id {
765 WMI_UNIT_TEST_EVENTID = 0x1900, 869 WMI_UNIT_TEST_EVENTID = 0x1900,
766 WMI_FLASH_READ_DONE_EVENTID = 0x1902, 870 WMI_FLASH_READ_DONE_EVENTID = 0x1902,
767 WMI_FLASH_WRITE_DONE_EVENTID = 0x1903, 871 WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
768 872 /*P2P*/
873 WMI_PORT_ALLOCATED_EVENTID = 0x1911,
874 WMI_PORT_DELETED_EVENTID = 0x1912,
875 WMI_LISTEN_STARTED_EVENTID = 0x1914,
876 WMI_SEARCH_STARTED_EVENTID = 0x1915,
877 WMI_DISCOVERY_STARTED_EVENTID = 0x1916,
878 WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
879 WMI_PCP_STARTED_EVENTID = 0x1918,
880 WMI_PCP_STOPPED_EVENTID = 0x1919,
881 WMI_PCP_FACTOR_EVENTID = 0x191a,
769 WMI_SET_CHANNEL_EVENTID = 0x9000, 882 WMI_SET_CHANNEL_EVENTID = 0x9000,
770 WMI_ASSOC_REQ_EVENTID = 0x9001, 883 WMI_ASSOC_REQ_EVENTID = 0x9001,
771 WMI_EAPOL_RX_EVENTID = 0x9002, 884 WMI_EAPOL_RX_EVENTID = 0x9002,
@@ -777,6 +890,12 @@ enum wmi_event_id {
777 * Events data structures 890 * Events data structures
778 */ 891 */
779 892
893
894enum wmi_fw_status {
895 WMI_FW_STATUS_SUCCESS,
896 WMI_FW_STATUS_FAILURE,
897};
898
780/* 899/*
781 * WMI_RF_MGMT_STATUS_EVENTID 900 * WMI_RF_MGMT_STATUS_EVENTID
782 */ 901 */
@@ -857,7 +976,7 @@ struct wmi_ready_event {
857 __le32 abi_version; 976 __le32 abi_version;
858 u8 mac[WMI_MAC_LEN]; 977 u8 mac[WMI_MAC_LEN];
859 u8 phy_capability; /* enum wmi_phy_capability */ 978 u8 phy_capability; /* enum wmi_phy_capability */
860 u8 reserved; 979 u8 numof_additional_mids;
861} __packed; 980} __packed;
862 981
863/* 982/*
@@ -876,6 +995,8 @@ struct wmi_notify_req_done_event {
876 __le16 other_rx_sector; 995 __le16 other_rx_sector;
877 __le16 other_tx_sector; 996 __le16 other_tx_sector;
878 __le16 range; 997 __le16 range;
998 u8 sqi;
999 u8 reserved[3];
879} __packed; 1000} __packed;
880 1001
881/* 1002/*
@@ -951,27 +1072,15 @@ struct wmi_vring_ba_status_event {
951 * WMI_DELBA_EVENTID 1072 * WMI_DELBA_EVENTID
952 */ 1073 */
953struct wmi_delba_event { 1074struct wmi_delba_event {
954
955 #define CIDXTID_CID_POS (0)
956 #define CIDXTID_CID_LEN (4)
957 #define CIDXTID_CID_MSK (0xF)
958 #define CIDXTID_TID_POS (4)
959 #define CIDXTID_TID_LEN (4)
960 #define CIDXTID_TID_MSK (0xF0)
961 u8 cidxtid; 1075 u8 cidxtid;
962
963 u8 from_initiator; 1076 u8 from_initiator;
964 __le16 reason; 1077 __le16 reason;
965} __packed; 1078} __packed;
966 1079
1080
967/* 1081/*
968 * WMI_VRING_CFG_DONE_EVENTID 1082 * WMI_VRING_CFG_DONE_EVENTID
969 */ 1083 */
970enum wmi_vring_cfg_done_event_status {
971 WMI_VRING_CFG_SUCCESS = 0,
972 WMI_VRING_CFG_FAILURE = 1,
973};
974
975struct wmi_vring_cfg_done_event { 1084struct wmi_vring_cfg_done_event {
976 u8 ringid; 1085 u8 ringid;
977 u8 status; 1086 u8 status;
@@ -982,21 +1091,8 @@ struct wmi_vring_cfg_done_event {
982/* 1091/*
983 * WMI_ADDBA_RESP_SENT_EVENTID 1092 * WMI_ADDBA_RESP_SENT_EVENTID
984 */ 1093 */
985enum wmi_rcp_addba_resp_sent_event_status {
986 WMI_ADDBA_SUCCESS = 0,
987 WMI_ADDBA_FAIL = 1,
988};
989
990struct wmi_rcp_addba_resp_sent_event { 1094struct wmi_rcp_addba_resp_sent_event {
991
992 #define CIDXTID_CID_POS (0)
993 #define CIDXTID_CID_LEN (4)
994 #define CIDXTID_CID_MSK (0xF)
995 #define CIDXTID_TID_POS (4)
996 #define CIDXTID_TID_LEN (4)
997 #define CIDXTID_TID_MSK (0xF0)
998 u8 cidxtid; 1095 u8 cidxtid;
999
1000 u8 reserved; 1096 u8 reserved;
1001 __le16 status; 1097 __le16 status;
1002} __packed; 1098} __packed;
@@ -1005,15 +1101,7 @@ struct wmi_rcp_addba_resp_sent_event {
1005 * WMI_RCP_ADDBA_REQ_EVENTID 1101 * WMI_RCP_ADDBA_REQ_EVENTID
1006 */ 1102 */
1007struct wmi_rcp_addba_req_event { 1103struct wmi_rcp_addba_req_event {
1008
1009 #define CIDXTID_CID_POS (0)
1010 #define CIDXTID_CID_LEN (4)
1011 #define CIDXTID_CID_MSK (0xF)
1012 #define CIDXTID_TID_POS (4)
1013 #define CIDXTID_TID_LEN (4)
1014 #define CIDXTID_TID_MSK (0xF0)
1015 u8 cidxtid; 1104 u8 cidxtid;
1016
1017 u8 dialog_token; 1105 u8 dialog_token;
1018 __le16 ba_param_set; /* ieee80211_ba_parameterset as it received */ 1106 __le16 ba_param_set; /* ieee80211_ba_parameterset as it received */
1019 __le16 ba_timeout; 1107 __le16 ba_timeout;
@@ -1055,6 +1143,7 @@ struct wmi_data_port_open_event {
1055 u8 reserved[3]; 1143 u8 reserved[3];
1056} __packed; 1144} __packed;
1057 1145
1146
1058/* 1147/*
1059 * WMI_GET_PCP_CHANNEL_EVENTID 1148 * WMI_GET_PCP_CHANNEL_EVENTID
1060 */ 1149 */
@@ -1063,6 +1152,54 @@ struct wmi_get_pcp_channel_event {
1063 u8 reserved[3]; 1152 u8 reserved[3];
1064} __packed; 1153} __packed;
1065 1154
1155
1156/*
1157* WMI_PORT_ALLOCATED_EVENTID
1158*/
1159struct wmi_port_allocated_event {
1160 u8 status; /* wmi_fw_status */
1161 u8 reserved[3];
1162} __packed;
1163
1164/*
1165* WMI_PORT_DELETED_EVENTID
1166*/
1167struct wmi_port_deleted_event {
1168 u8 status; /* wmi_fw_status */
1169 u8 reserved[3];
1170} __packed;
1171
1172/*
1173 * WMI_LISTEN_STARTED_EVENTID
1174 */
1175struct wmi_listen_started_event {
1176 u8 status; /* wmi_fw_status */
1177 u8 reserved[3];
1178} __packed;
1179
1180/*
1181 * WMI_SEARCH_STARTED_EVENTID
1182 */
1183struct wmi_search_started_event {
1184 u8 status; /* wmi_fw_status */
1185 u8 reserved[3];
1186} __packed;
1187
1188/*
1189 * WMI_PCP_STARTED_EVENTID
1190 */
1191struct wmi_pcp_started_event {
1192 u8 status; /* wmi_fw_status */
1193 u8 reserved[3];
1194} __packed;
1195
1196/*
1197 * WMI_PCP_FACTOR_EVENTID
1198 */
1199struct wmi_pcp_factor_event {
1200 __le32 pcp_factor;
1201} __packed;
1202
1066/* 1203/*
1067 * WMI_SW_TX_COMPLETE_EVENTID 1204 * WMI_SW_TX_COMPLETE_EVENTID
1068 */ 1205 */
@@ -1078,6 +1215,23 @@ struct wmi_sw_tx_complete_event {
1078} __packed; 1215} __packed;
1079 1216
1080/* 1217/*
1218 * WMI_CORR_MEASURE_EVENTID
1219 */
1220struct wmi_corr_measure_event {
1221 s32 i;
1222 s32 q;
1223 s32 image_i;
1224 s32 image_q;
1225} __packed;
1226
1227/*
1228 * WMI_READ_RSSI_EVENTID
1229 */
1230struct wmi_read_rssi_event {
1231 __le32 ina_rssi_adc_dbm;
1232} __packed;
1233
1234/*
1081 * WMI_GET_SSID_EVENTID 1235 * WMI_GET_SSID_EVENTID
1082 */ 1236 */
1083struct wmi_get_ssid_event { 1237struct wmi_get_ssid_event {
@@ -1091,7 +1245,8 @@ struct wmi_get_ssid_event {
1091struct wmi_rx_mgmt_info { 1245struct wmi_rx_mgmt_info {
1092 u8 mcs; 1246 u8 mcs;
1093 s8 snr; 1247 s8 snr;
1094 __le16 range; 1248 u8 range;
1249 u8 sqi;
1095 __le16 stype; 1250 __le16 stype;
1096 __le16 status; 1251 __le16 status;
1097 __le32 len; 1252 __le32 len;
@@ -1113,4 +1268,14 @@ struct wmi_echo_event {
1113 __le32 echoed_value; 1268 __le32 echoed_value;
1114} __packed; 1269} __packed;
1115 1270
1271/*
1272 * WMI_TEMP_SENSE_DONE_EVENTID
1273 *
1274 * Measure MAC and radio temperatures
1275 */
1276struct wmi_temp_sense_done_event {
1277 __le32 marlon_m_t1000;
1278 __le32 marlon_r_t1000;
1279} __packed;
1280
1116#endif /* __WILOCITY_WMI_H__ */ 1281#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 287c6b670a36..078e6f3477a9 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -131,7 +131,7 @@ config B43_PHY_LP
131 131
132config B43_PHY_HT 132config B43_PHY_HT
133 bool "Support for HT-PHY (high throughput) devices" 133 bool "Support for HT-PHY (high throughput) devices"
134 depends on B43 134 depends on B43 && B43_BCMA
135 ---help--- 135 ---help---
136 Support for the HT-PHY. 136 Support for the HT-PHY.
137 137
@@ -166,8 +166,8 @@ config B43_DEBUG
166 Broadcom 43xx debugging. 166 Broadcom 43xx debugging.
167 167
168 This adds additional runtime sanity checks and statistics to the driver. 168 This adds additional runtime sanity checks and statistics to the driver.
169 These checks and statistics might me expensive and hurt runtime performance 169 These checks and statistics might be expensive and hurt the runtime
170 of your system. 170 performance of your system.
171 This also adds the b43 debugfs interface. 171 This also adds the b43 debugfs interface.
172 172
173 Do not enable this, unless you are debugging the driver. 173 Do not enable this, unless you are debugging the driver.
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 10e288d470e7..7f3d461f7e8d 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -285,7 +285,9 @@ enum {
285#define B43_SHM_SH_DTIMPER 0x0012 /* DTIM period */ 285#define B43_SHM_SH_DTIMPER 0x0012 /* DTIM period */
286#define B43_SHM_SH_NOSLPZNATDTIM 0x004C /* NOSLPZNAT DTIM */ 286#define B43_SHM_SH_NOSLPZNATDTIM 0x004C /* NOSLPZNAT DTIM */
287/* SHM_SHARED beacon/AP variables */ 287/* SHM_SHARED beacon/AP variables */
288#define B43_SHM_SH_BT_BASE0 0x0068 /* Beacon template base 0 */
288#define B43_SHM_SH_BTL0 0x0018 /* Beacon template length 0 */ 289#define B43_SHM_SH_BTL0 0x0018 /* Beacon template length 0 */
290#define B43_SHM_SH_BT_BASE1 0x0468 /* Beacon template base 1 */
289#define B43_SHM_SH_BTL1 0x001A /* Beacon template length 1 */ 291#define B43_SHM_SH_BTL1 0x001A /* Beacon template length 1 */
290#define B43_SHM_SH_BTSFOFF 0x001C /* Beacon TSF offset */ 292#define B43_SHM_SH_BTSFOFF 0x001C /* Beacon TSF offset */
291#define B43_SHM_SH_TIMBPOS 0x001E /* TIM B position in beacon */ 293#define B43_SHM_SH_TIMBPOS 0x001E /* TIM B position in beacon */
@@ -473,6 +475,12 @@ enum {
473#define B43_MACCMD_CCA 0x00000008 /* Clear channel assessment */ 475#define B43_MACCMD_CCA 0x00000008 /* Clear channel assessment */
474#define B43_MACCMD_BGNOISE 0x00000010 /* Background noise */ 476#define B43_MACCMD_BGNOISE 0x00000010 /* Background noise */
475 477
478/* See BCMA_CLKCTLST_EXTRESREQ and BCMA_CLKCTLST_EXTRESST */
479#define B43_BCMA_CLKCTLST_80211_PLL_REQ 0x00000100
480#define B43_BCMA_CLKCTLST_PHY_PLL_REQ 0x00000200
481#define B43_BCMA_CLKCTLST_80211_PLL_ST 0x01000000
482#define B43_BCMA_CLKCTLST_PHY_PLL_ST 0x02000000
483
476/* BCMA 802.11 core specific IO Control (BCMA_IOCTL) flags */ 484/* BCMA 802.11 core specific IO Control (BCMA_IOCTL) flags */
477#define B43_BCMA_IOCTL_PHY_CLKEN 0x00000004 /* PHY Clock Enable */ 485#define B43_BCMA_IOCTL_PHY_CLKEN 0x00000004 /* PHY Clock Enable */
478#define B43_BCMA_IOCTL_PHY_RESET 0x00000008 /* PHY Reset */ 486#define B43_BCMA_IOCTL_PHY_RESET 0x00000008 /* PHY Reset */
@@ -972,7 +980,7 @@ static inline int b43_is_mode(struct b43_wl *wl, int type)
972 */ 980 */
973static inline enum ieee80211_band b43_current_band(struct b43_wl *wl) 981static inline enum ieee80211_band b43_current_band(struct b43_wl *wl)
974{ 982{
975 return wl->hw->conf.channel->band; 983 return wl->hw->conf.chandef.chan->band;
976} 984}
977 985
978static inline int b43_bus_may_powerdown(struct b43_wldev *wldev) 986static inline int b43_bus_may_powerdown(struct b43_wldev *wldev)
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 122146943bf2..523355b87659 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -419,8 +419,6 @@ static inline
419 419
420static int alloc_ringmemory(struct b43_dmaring *ring) 420static int alloc_ringmemory(struct b43_dmaring *ring)
421{ 421{
422 gfp_t flags = GFP_KERNEL;
423
424 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K 422 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
425 * alignment and 8K buffers for 64-bit DMA with 8K alignment. 423 * alignment and 8K buffers for 64-bit DMA with 8K alignment.
426 * In practice we could use smaller buffers for the latter, but the 424 * In practice we could use smaller buffers for the latter, but the
@@ -435,12 +433,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
435 433
436 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 434 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
437 ring_mem_size, &(ring->dmabase), 435 ring_mem_size, &(ring->dmabase),
438 flags); 436 GFP_KERNEL | __GFP_ZERO);
439 if (!ring->descbase) { 437 if (!ring->descbase)
440 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
441 return -ENOMEM; 438 return -ENOMEM;
442 }
443 memset(ring->descbase, 0, ring_mem_size);
444 439
445 return 0; 440 return 0;
446} 441}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 05682736e466..d377f77d30b5 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1189,10 +1189,15 @@ static void b43_bcma_phy_reset(struct b43_wldev *dev)
1189 1189
1190static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode) 1190static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
1191{ 1191{
1192 u32 req = B43_BCMA_CLKCTLST_80211_PLL_REQ |
1193 B43_BCMA_CLKCTLST_PHY_PLL_REQ;
1194 u32 status = B43_BCMA_CLKCTLST_80211_PLL_ST |
1195 B43_BCMA_CLKCTLST_PHY_PLL_ST;
1196
1192 b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN); 1197 b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN);
1193 bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST); 1198 bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
1194 b43_bcma_phy_reset(dev); 1199 b43_bcma_phy_reset(dev);
1195 bcma_core_pll_ctl(dev->dev->bdev, 0x300, 0x3000000, true); 1200 bcma_core_pll_ctl(dev->dev->bdev, req, status, true);
1196} 1201}
1197#endif 1202#endif
1198 1203
@@ -1305,17 +1310,19 @@ static u32 b43_jssi_read(struct b43_wldev *dev)
1305{ 1310{
1306 u32 val = 0; 1311 u32 val = 0;
1307 1312
1308 val = b43_shm_read16(dev, B43_SHM_SHARED, 0x08A); 1313 val = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_JSSI1);
1309 val <<= 16; 1314 val <<= 16;
1310 val |= b43_shm_read16(dev, B43_SHM_SHARED, 0x088); 1315 val |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_JSSI0);
1311 1316
1312 return val; 1317 return val;
1313} 1318}
1314 1319
1315static void b43_jssi_write(struct b43_wldev *dev, u32 jssi) 1320static void b43_jssi_write(struct b43_wldev *dev, u32 jssi)
1316{ 1321{
1317 b43_shm_write16(dev, B43_SHM_SHARED, 0x088, (jssi & 0x0000FFFF)); 1322 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_JSSI0,
1318 b43_shm_write16(dev, B43_SHM_SHARED, 0x08A, (jssi & 0xFFFF0000) >> 16); 1323 (jssi & 0x0000FFFF));
1324 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_JSSI1,
1325 (jssi & 0xFFFF0000) >> 16);
1319} 1326}
1320 1327
1321static void b43_generate_noise_sample(struct b43_wldev *dev) 1328static void b43_generate_noise_sample(struct b43_wldev *dev)
@@ -1618,7 +1625,7 @@ static void b43_upload_beacon0(struct b43_wldev *dev)
1618 1625
1619 if (wl->beacon0_uploaded) 1626 if (wl->beacon0_uploaded)
1620 return; 1627 return;
1621 b43_write_beacon_template(dev, 0x68, 0x18); 1628 b43_write_beacon_template(dev, B43_SHM_SH_BT_BASE0, B43_SHM_SH_BTL0);
1622 wl->beacon0_uploaded = true; 1629 wl->beacon0_uploaded = true;
1623} 1630}
1624 1631
@@ -1628,7 +1635,7 @@ static void b43_upload_beacon1(struct b43_wldev *dev)
1628 1635
1629 if (wl->beacon1_uploaded) 1636 if (wl->beacon1_uploaded)
1630 return; 1637 return;
1631 b43_write_beacon_template(dev, 0x468, 0x1A); 1638 b43_write_beacon_template(dev, B43_SHM_SH_BT_BASE1, B43_SHM_SH_BTL1);
1632 wl->beacon1_uploaded = true; 1639 wl->beacon1_uploaded = true;
1633} 1640}
1634 1641
@@ -2775,9 +2782,7 @@ static int b43_gpio_init(struct b43_wldev *dev)
2775 switch (dev->dev->bus_type) { 2782 switch (dev->dev->bus_type) {
2776#ifdef CONFIG_B43_BCMA 2783#ifdef CONFIG_B43_BCMA
2777 case B43_BUS_BCMA: 2784 case B43_BUS_BCMA:
2778 bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL, 2785 bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc, mask, set);
2779 (bcma_cc_read32(&dev->dev->bdev->bus->drv_cc,
2780 BCMA_CC_GPIOCTL) & ~mask) | set);
2781 break; 2786 break;
2782#endif 2787#endif
2783#ifdef CONFIG_B43_SSB 2788#ifdef CONFIG_B43_SSB
@@ -2802,8 +2807,7 @@ static void b43_gpio_cleanup(struct b43_wldev *dev)
2802 switch (dev->dev->bus_type) { 2807 switch (dev->dev->bus_type) {
2803#ifdef CONFIG_B43_BCMA 2808#ifdef CONFIG_B43_BCMA
2804 case B43_BUS_BCMA: 2809 case B43_BUS_BCMA:
2805 bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL, 2810 bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc, ~0, 0);
2806 0);
2807 break; 2811 break;
2808#endif 2812#endif
2809#ifdef CONFIG_B43_SSB 2813#ifdef CONFIG_B43_SSB
@@ -3111,7 +3115,7 @@ static int b43_chip_init(struct b43_wldev *dev)
3111 3115
3112 /* Probe Response Timeout value */ 3116 /* Probe Response Timeout value */
3113 /* FIXME: Default to 0, has to be set by ioctl probably... :-/ */ 3117 /* FIXME: Default to 0, has to be set by ioctl probably... :-/ */
3114 b43_shm_write16(dev, B43_SHM_SHARED, 0x0074, 0x0000); 3118 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRMAXTIME, 0);
3115 3119
3116 /* Initially set the wireless operation mode. */ 3120 /* Initially set the wireless operation mode. */
3117 b43_adjust_opmode(dev); 3121 b43_adjust_opmode(dev);
@@ -3848,7 +3852,7 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
3848 dev = wl->current_dev; 3852 dev = wl->current_dev;
3849 3853
3850 /* Switch the band (if necessary). This might change the active core. */ 3854 /* Switch the band (if necessary). This might change the active core. */
3851 err = b43_switch_band(wl, conf->channel); 3855 err = b43_switch_band(wl, conf->chandef.chan);
3852 if (err) 3856 if (err)
3853 goto out_unlock_mutex; 3857 goto out_unlock_mutex;
3854 3858
@@ -3878,8 +3882,8 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
3878 3882
3879 /* Switch to the requested channel. 3883 /* Switch to the requested channel.
3880 * The firmware takes care of races with the TX handler. */ 3884 * The firmware takes care of races with the TX handler. */
3881 if (conf->channel->hw_value != phy->channel) 3885 if (conf->chandef.chan->hw_value != phy->channel)
3882 b43_switch_channel(dev, conf->channel->hw_value); 3886 b43_switch_channel(dev, conf->chandef.chan->hw_value);
3883 3887
3884 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_MONITOR); 3888 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_MONITOR);
3885 3889
@@ -5002,7 +5006,7 @@ static int b43_op_get_survey(struct ieee80211_hw *hw, int idx,
5002 if (idx != 0) 5006 if (idx != 0)
5003 return -ENOENT; 5007 return -ENOENT;
5004 5008
5005 survey->channel = conf->channel; 5009 survey->channel = conf->chandef.chan;
5006 survey->filled = SURVEY_INFO_NOISE_DBM; 5010 survey->filled = SURVEY_INFO_NOISE_DBM;
5007 survey->noise = dev->stats.link_noise; 5011 survey->noise = dev->stats.link_noise;
5008 5012
diff --git a/drivers/net/wireless/b43/phy_ht.c b/drivers/net/wireless/b43/phy_ht.c
index 7416c5e9154d..5d6833f18498 100644
--- a/drivers/net/wireless/b43/phy_ht.c
+++ b/drivers/net/wireless/b43/phy_ht.c
@@ -30,6 +30,17 @@
30#include "radio_2059.h" 30#include "radio_2059.h"
31#include "main.h" 31#include "main.h"
32 32
33/* Force values to keep compatibility with wl */
34enum ht_rssi_type {
35 HT_RSSI_W1 = 0,
36 HT_RSSI_W2 = 1,
37 HT_RSSI_NB = 2,
38 HT_RSSI_IQ = 3,
39 HT_RSSI_TSSI_2G = 4,
40 HT_RSSI_TSSI_5G = 5,
41 HT_RSSI_TBD = 6,
42};
43
33/************************************************** 44/**************************************************
34 * Radio 2059. 45 * Radio 2059.
35 **************************************************/ 46 **************************************************/
@@ -37,8 +48,9 @@
37static void b43_radio_2059_channel_setup(struct b43_wldev *dev, 48static void b43_radio_2059_channel_setup(struct b43_wldev *dev,
38 const struct b43_phy_ht_channeltab_e_radio2059 *e) 49 const struct b43_phy_ht_channeltab_e_radio2059 *e)
39{ 50{
40 u8 i; 51 static const u16 routing[] = { R2059_C1, R2059_C2, R2059_C3, };
41 u16 routing; 52 u16 r;
53 int core;
42 54
43 b43_radio_write(dev, 0x16, e->radio_syn16); 55 b43_radio_write(dev, 0x16, e->radio_syn16);
44 b43_radio_write(dev, 0x17, e->radio_syn17); 56 b43_radio_write(dev, 0x17, e->radio_syn17);
@@ -53,25 +65,17 @@ static void b43_radio_2059_channel_setup(struct b43_wldev *dev,
53 b43_radio_write(dev, 0x41, e->radio_syn41); 65 b43_radio_write(dev, 0x41, e->radio_syn41);
54 b43_radio_write(dev, 0x43, e->radio_syn43); 66 b43_radio_write(dev, 0x43, e->radio_syn43);
55 b43_radio_write(dev, 0x47, e->radio_syn47); 67 b43_radio_write(dev, 0x47, e->radio_syn47);
56 b43_radio_write(dev, 0x4a, e->radio_syn4a); 68
57 b43_radio_write(dev, 0x58, e->radio_syn58); 69 for (core = 0; core < 3; core++) {
58 b43_radio_write(dev, 0x5a, e->radio_syn5a); 70 r = routing[core];
59 b43_radio_write(dev, 0x6a, e->radio_syn6a); 71 b43_radio_write(dev, r | 0x4a, e->radio_rxtx4a);
60 b43_radio_write(dev, 0x6d, e->radio_syn6d); 72 b43_radio_write(dev, r | 0x58, e->radio_rxtx58);
61 b43_radio_write(dev, 0x6e, e->radio_syn6e); 73 b43_radio_write(dev, r | 0x5a, e->radio_rxtx5a);
62 b43_radio_write(dev, 0x92, e->radio_syn92); 74 b43_radio_write(dev, r | 0x6a, e->radio_rxtx6a);
63 b43_radio_write(dev, 0x98, e->radio_syn98); 75 b43_radio_write(dev, r | 0x6d, e->radio_rxtx6d);
64 76 b43_radio_write(dev, r | 0x6e, e->radio_rxtx6e);
65 for (i = 0; i < 2; i++) { 77 b43_radio_write(dev, r | 0x92, e->radio_rxtx92);
66 routing = i ? R2059_RXRX1 : R2059_TXRX0; 78 b43_radio_write(dev, r | 0x98, e->radio_rxtx98);
67 b43_radio_write(dev, routing | 0x4a, e->radio_rxtx4a);
68 b43_radio_write(dev, routing | 0x58, e->radio_rxtx58);
69 b43_radio_write(dev, routing | 0x5a, e->radio_rxtx5a);
70 b43_radio_write(dev, routing | 0x6a, e->radio_rxtx6a);
71 b43_radio_write(dev, routing | 0x6d, e->radio_rxtx6d);
72 b43_radio_write(dev, routing | 0x6e, e->radio_rxtx6e);
73 b43_radio_write(dev, routing | 0x92, e->radio_rxtx92);
74 b43_radio_write(dev, routing | 0x98, e->radio_rxtx98);
75 } 79 }
76 80
77 udelay(50); 81 udelay(50);
@@ -87,7 +91,7 @@ static void b43_radio_2059_channel_setup(struct b43_wldev *dev,
87 91
88static void b43_radio_2059_init(struct b43_wldev *dev) 92static void b43_radio_2059_init(struct b43_wldev *dev)
89{ 93{
90 const u16 routing[] = { R2059_SYN, R2059_TXRX0, R2059_RXRX1 }; 94 const u16 routing[] = { R2059_C1, R2059_C2, R2059_C3 };
91 const u16 radio_values[3][2] = { 95 const u16 radio_values[3][2] = {
92 { 0x61, 0xE9 }, { 0x69, 0xD5 }, { 0x73, 0x99 }, 96 { 0x61, 0xE9 }, { 0x69, 0xD5 }, { 0x73, 0x99 },
93 }; 97 };
@@ -106,17 +110,17 @@ static void b43_radio_2059_init(struct b43_wldev *dev)
106 b43_radio_mask(dev, 0xc0, ~0x0080); 110 b43_radio_mask(dev, 0xc0, ~0x0080);
107 111
108 if (1) { /* FIXME */ 112 if (1) { /* FIXME */
109 b43_radio_set(dev, R2059_RXRX1 | 0x4, 0x1); 113 b43_radio_set(dev, R2059_C3 | 0x4, 0x1);
110 udelay(10); 114 udelay(10);
111 b43_radio_set(dev, R2059_RXRX1 | 0x0BF, 0x1); 115 b43_radio_set(dev, R2059_C3 | 0x0BF, 0x1);
112 b43_radio_maskset(dev, R2059_RXRX1 | 0x19B, 0x3, 0x2); 116 b43_radio_maskset(dev, R2059_C3 | 0x19B, 0x3, 0x2);
113 117
114 b43_radio_set(dev, R2059_RXRX1 | 0x4, 0x2); 118 b43_radio_set(dev, R2059_C3 | 0x4, 0x2);
115 udelay(100); 119 udelay(100);
116 b43_radio_mask(dev, R2059_RXRX1 | 0x4, ~0x2); 120 b43_radio_mask(dev, R2059_C3 | 0x4, ~0x2);
117 121
118 for (i = 0; i < 10000; i++) { 122 for (i = 0; i < 10000; i++) {
119 if (b43_radio_read(dev, R2059_RXRX1 | 0x145) & 1) { 123 if (b43_radio_read(dev, R2059_C3 | 0x145) & 1) {
120 i = 0; 124 i = 0;
121 break; 125 break;
122 } 126 }
@@ -125,7 +129,7 @@ static void b43_radio_2059_init(struct b43_wldev *dev)
125 if (i) 129 if (i)
126 b43err(dev->wl, "radio 0x945 timeout\n"); 130 b43err(dev->wl, "radio 0x945 timeout\n");
127 131
128 b43_radio_mask(dev, R2059_RXRX1 | 0x4, ~0x1); 132 b43_radio_mask(dev, R2059_C3 | 0x4, ~0x1);
129 b43_radio_set(dev, 0xa, 0x60); 133 b43_radio_set(dev, 0xa, 0x60);
130 134
131 for (i = 0; i < 3; i++) { 135 for (i = 0; i < 3; i++) {
@@ -154,9 +158,84 @@ static void b43_radio_2059_init(struct b43_wldev *dev)
154} 158}
155 159
156/************************************************** 160/**************************************************
161 * RF
162 **************************************************/
163
164static void b43_phy_ht_force_rf_sequence(struct b43_wldev *dev, u16 rf_seq)
165{
166 u8 i;
167
168 u16 save_seq_mode = b43_phy_read(dev, B43_PHY_HT_RF_SEQ_MODE);
169 b43_phy_set(dev, B43_PHY_HT_RF_SEQ_MODE, 0x3);
170
171 b43_phy_set(dev, B43_PHY_HT_RF_SEQ_TRIG, rf_seq);
172 for (i = 0; i < 200; i++) {
173 if (!(b43_phy_read(dev, B43_PHY_HT_RF_SEQ_STATUS) & rf_seq)) {
174 i = 0;
175 break;
176 }
177 msleep(1);
178 }
179 if (i)
180 b43err(dev->wl, "Forcing RF sequence timeout\n");
181
182 b43_phy_write(dev, B43_PHY_HT_RF_SEQ_MODE, save_seq_mode);
183}
184
185static void b43_phy_ht_pa_override(struct b43_wldev *dev, bool enable)
186{
187 struct b43_phy_ht *htphy = dev->phy.ht;
188 static const u16 regs[3] = { B43_PHY_HT_RF_CTL_INT_C1,
189 B43_PHY_HT_RF_CTL_INT_C2,
190 B43_PHY_HT_RF_CTL_INT_C3 };
191 int i;
192
193 if (enable) {
194 for (i = 0; i < 3; i++)
195 b43_phy_write(dev, regs[i], htphy->rf_ctl_int_save[i]);
196 } else {
197 for (i = 0; i < 3; i++)
198 htphy->rf_ctl_int_save[i] = b43_phy_read(dev, regs[i]);
199 /* TODO: Does 5GHz band use different value (not 0x0400)? */
200 for (i = 0; i < 3; i++)
201 b43_phy_write(dev, regs[i], 0x0400);
202 }
203}
204
205/**************************************************
157 * Various PHY ops 206 * Various PHY ops
158 **************************************************/ 207 **************************************************/
159 208
209static u16 b43_phy_ht_classifier(struct b43_wldev *dev, u16 mask, u16 val)
210{
211 u16 tmp;
212 u16 allowed = B43_PHY_HT_CLASS_CTL_CCK_EN |
213 B43_PHY_HT_CLASS_CTL_OFDM_EN |
214 B43_PHY_HT_CLASS_CTL_WAITED_EN;
215
216 tmp = b43_phy_read(dev, B43_PHY_HT_CLASS_CTL);
217 tmp &= allowed;
218 tmp &= ~mask;
219 tmp |= (val & mask);
220 b43_phy_maskset(dev, B43_PHY_HT_CLASS_CTL, ~allowed, tmp);
221
222 return tmp;
223}
224
225static void b43_phy_ht_reset_cca(struct b43_wldev *dev)
226{
227 u16 bbcfg;
228
229 b43_phy_force_clock(dev, true);
230 bbcfg = b43_phy_read(dev, B43_PHY_HT_BBCFG);
231 b43_phy_write(dev, B43_PHY_HT_BBCFG, bbcfg | B43_PHY_HT_BBCFG_RSTCCA);
232 udelay(1);
233 b43_phy_write(dev, B43_PHY_HT_BBCFG, bbcfg & ~B43_PHY_HT_BBCFG_RSTCCA);
234 b43_phy_force_clock(dev, false);
235
236 b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RST2RX);
237}
238
160static void b43_phy_ht_zero_extg(struct b43_wldev *dev) 239static void b43_phy_ht_zero_extg(struct b43_wldev *dev)
161{ 240{
162 u8 i, j; 241 u8 i, j;
@@ -176,10 +255,10 @@ static void b43_phy_ht_afe_unk1(struct b43_wldev *dev)
176{ 255{
177 u8 i; 256 u8 i;
178 257
179 const u16 ctl_regs[3][2] = { 258 static const u16 ctl_regs[3][2] = {
180 { B43_PHY_HT_AFE_CTL1, B43_PHY_HT_AFE_CTL2 }, 259 { B43_PHY_HT_AFE_C1_OVER, B43_PHY_HT_AFE_C1 },
181 { B43_PHY_HT_AFE_CTL3, B43_PHY_HT_AFE_CTL4 }, 260 { B43_PHY_HT_AFE_C2_OVER, B43_PHY_HT_AFE_C2 },
182 { B43_PHY_HT_AFE_CTL5, B43_PHY_HT_AFE_CTL6}, 261 { B43_PHY_HT_AFE_C3_OVER, B43_PHY_HT_AFE_C3},
183 }; 262 };
184 263
185 for (i = 0; i < 3; i++) { 264 for (i = 0; i < 3; i++) {
@@ -193,27 +272,6 @@ static void b43_phy_ht_afe_unk1(struct b43_wldev *dev)
193 } 272 }
194} 273}
195 274
196static void b43_phy_ht_force_rf_sequence(struct b43_wldev *dev, u16 rf_seq)
197{
198 u8 i;
199
200 u16 save_seq_mode = b43_phy_read(dev, B43_PHY_HT_RF_SEQ_MODE);
201 b43_phy_set(dev, B43_PHY_HT_RF_SEQ_MODE, 0x3);
202
203 b43_phy_set(dev, B43_PHY_HT_RF_SEQ_TRIG, rf_seq);
204 for (i = 0; i < 200; i++) {
205 if (!(b43_phy_read(dev, B43_PHY_HT_RF_SEQ_STATUS) & rf_seq)) {
206 i = 0;
207 break;
208 }
209 msleep(1);
210 }
211 if (i)
212 b43err(dev->wl, "Forcing RF sequence timeout\n");
213
214 b43_phy_write(dev, B43_PHY_HT_RF_SEQ_MODE, save_seq_mode);
215}
216
217static void b43_phy_ht_read_clip_detection(struct b43_wldev *dev, u16 *clip_st) 275static void b43_phy_ht_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
218{ 276{
219 clip_st[0] = b43_phy_read(dev, B43_PHY_HT_C1_CLIP1THRES); 277 clip_st[0] = b43_phy_read(dev, B43_PHY_HT_C1_CLIP1THRES);
@@ -240,15 +298,456 @@ static void b43_phy_ht_bphy_init(struct b43_wldev *dev)
240} 298}
241 299
242/************************************************** 300/**************************************************
301 * Samples
302 **************************************************/
303
304static void b43_phy_ht_stop_playback(struct b43_wldev *dev)
305{
306 struct b43_phy_ht *phy_ht = dev->phy.ht;
307 u16 tmp;
308 int i;
309
310 tmp = b43_phy_read(dev, B43_PHY_HT_SAMP_STAT);
311 if (tmp & 0x1)
312 b43_phy_set(dev, B43_PHY_HT_SAMP_CMD, B43_PHY_HT_SAMP_CMD_STOP);
313 else if (tmp & 0x2)
314 b43_phy_mask(dev, B43_PHY_HT_IQLOCAL_CMDGCTL, 0x7FFF);
315
316 b43_phy_mask(dev, B43_PHY_HT_SAMP_CMD, ~0x0004);
317
318 for (i = 0; i < 3; i++) {
319 if (phy_ht->bb_mult_save[i] >= 0) {
320 b43_httab_write(dev, B43_HTTAB16(13, 0x63 + i * 4),
321 phy_ht->bb_mult_save[i]);
322 b43_httab_write(dev, B43_HTTAB16(13, 0x67 + i * 4),
323 phy_ht->bb_mult_save[i]);
324 }
325 }
326}
327
328static u16 b43_phy_ht_load_samples(struct b43_wldev *dev)
329{
330 int i;
331 u16 len = 20 << 3;
332
333 b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, 0x4400);
334
335 for (i = 0; i < len; i++) {
336 b43_phy_write(dev, B43_PHY_HT_TABLE_DATAHI, 0);
337 b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, 0);
338 }
339
340 return len;
341}
342
343static void b43_phy_ht_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
344 u16 wait)
345{
346 struct b43_phy_ht *phy_ht = dev->phy.ht;
347 u16 save_seq_mode;
348 int i;
349
350 for (i = 0; i < 3; i++) {
351 if (phy_ht->bb_mult_save[i] < 0)
352 phy_ht->bb_mult_save[i] = b43_httab_read(dev, B43_HTTAB16(13, 0x63 + i * 4));
353 }
354
355 b43_phy_write(dev, B43_PHY_HT_SAMP_DEP_CNT, samps - 1);
356 if (loops != 0xFFFF)
357 loops--;
358 b43_phy_write(dev, B43_PHY_HT_SAMP_LOOP_CNT, loops);
359 b43_phy_write(dev, B43_PHY_HT_SAMP_WAIT_CNT, wait);
360
361 save_seq_mode = b43_phy_read(dev, B43_PHY_HT_RF_SEQ_MODE);
362 b43_phy_set(dev, B43_PHY_HT_RF_SEQ_MODE,
363 B43_PHY_HT_RF_SEQ_MODE_CA_OVER);
364
365 /* TODO: find out mask bits! Do we need more function arguments? */
366 b43_phy_mask(dev, B43_PHY_HT_SAMP_CMD, ~0);
367 b43_phy_mask(dev, B43_PHY_HT_SAMP_CMD, ~0);
368 b43_phy_mask(dev, B43_PHY_HT_IQLOCAL_CMDGCTL, ~0);
369 b43_phy_set(dev, B43_PHY_HT_SAMP_CMD, 0x1);
370
371 for (i = 0; i < 100; i++) {
372 if (!(b43_phy_read(dev, B43_PHY_HT_RF_SEQ_STATUS) & 1)) {
373 i = 0;
374 break;
375 }
376 udelay(10);
377 }
378 if (i)
379 b43err(dev->wl, "run samples timeout\n");
380
381 b43_phy_write(dev, B43_PHY_HT_RF_SEQ_MODE, save_seq_mode);
382}
383
384static void b43_phy_ht_tx_tone(struct b43_wldev *dev)
385{
386 u16 samp;
387
388 samp = b43_phy_ht_load_samples(dev);
389 b43_phy_ht_run_samples(dev, samp, 0xFFFF, 0);
390}
391
392/**************************************************
393 * RSSI
394 **************************************************/
395
396static void b43_phy_ht_rssi_select(struct b43_wldev *dev, u8 core_sel,
397 enum ht_rssi_type rssi_type)
398{
399 static const u16 ctl_regs[3][2] = {
400 { B43_PHY_HT_AFE_C1, B43_PHY_HT_AFE_C1_OVER, },
401 { B43_PHY_HT_AFE_C2, B43_PHY_HT_AFE_C2_OVER, },
402 { B43_PHY_HT_AFE_C3, B43_PHY_HT_AFE_C3_OVER, },
403 };
404 static const u16 radio_r[] = { R2059_C1, R2059_C2, R2059_C3, };
405 int core;
406
407 if (core_sel == 0) {
408 b43err(dev->wl, "RSSI selection for core off not implemented yet\n");
409 } else {
410 for (core = 0; core < 3; core++) {
411 /* Check if caller requested a one specific core */
412 if ((core_sel == 1 && core != 0) ||
413 (core_sel == 2 && core != 1) ||
414 (core_sel == 3 && core != 2))
415 continue;
416
417 switch (rssi_type) {
418 case HT_RSSI_TSSI_2G:
419 b43_phy_set(dev, ctl_regs[core][0], 0x3 << 8);
420 b43_phy_set(dev, ctl_regs[core][0], 0x3 << 10);
421 b43_phy_set(dev, ctl_regs[core][1], 0x1 << 9);
422 b43_phy_set(dev, ctl_regs[core][1], 0x1 << 10);
423
424 b43_radio_set(dev, R2059_C3 | 0xbf, 0x1);
425 b43_radio_write(dev, radio_r[core] | 0x159,
426 0x11);
427 break;
428 default:
429 b43err(dev->wl, "RSSI selection for type %d not implemented yet\n",
430 rssi_type);
431 }
432 }
433 }
434}
435
436static void b43_phy_ht_poll_rssi(struct b43_wldev *dev, enum ht_rssi_type type,
437 s32 *buf, u8 nsamp)
438{
439 u16 phy_regs_values[12];
440 static const u16 phy_regs_to_save[] = {
441 B43_PHY_HT_AFE_C1, B43_PHY_HT_AFE_C1_OVER,
442 0x848, 0x841,
443 B43_PHY_HT_AFE_C2, B43_PHY_HT_AFE_C2_OVER,
444 0x868, 0x861,
445 B43_PHY_HT_AFE_C3, B43_PHY_HT_AFE_C3_OVER,
446 0x888, 0x881,
447 };
448 u16 tmp[3];
449 int i;
450
451 for (i = 0; i < 12; i++)
452 phy_regs_values[i] = b43_phy_read(dev, phy_regs_to_save[i]);
453
454 b43_phy_ht_rssi_select(dev, 5, type);
455
456 for (i = 0; i < 6; i++)
457 buf[i] = 0;
458
459 for (i = 0; i < nsamp; i++) {
460 tmp[0] = b43_phy_read(dev, B43_PHY_HT_RSSI_C1);
461 tmp[1] = b43_phy_read(dev, B43_PHY_HT_RSSI_C2);
462 tmp[2] = b43_phy_read(dev, B43_PHY_HT_RSSI_C3);
463
464 buf[0] += ((s8)((tmp[0] & 0x3F) << 2)) >> 2;
465 buf[1] += ((s8)(((tmp[0] >> 8) & 0x3F) << 2)) >> 2;
466 buf[2] += ((s8)((tmp[1] & 0x3F) << 2)) >> 2;
467 buf[3] += ((s8)(((tmp[1] >> 8) & 0x3F) << 2)) >> 2;
468 buf[4] += ((s8)((tmp[2] & 0x3F) << 2)) >> 2;
469 buf[5] += ((s8)(((tmp[2] >> 8) & 0x3F) << 2)) >> 2;
470 }
471
472 for (i = 0; i < 12; i++)
473 b43_phy_write(dev, phy_regs_to_save[i], phy_regs_values[i]);
474}
475
476/**************************************************
477 * Tx/Rx
478 **************************************************/
479
480static void b43_phy_ht_tx_power_fix(struct b43_wldev *dev)
481{
482 int i;
483
484 for (i = 0; i < 3; i++) {
485 u16 mask;
486 u32 tmp = b43_httab_read(dev, B43_HTTAB32(26, 0xE8));
487
488 if (0) /* FIXME */
489 mask = 0x2 << (i * 4);
490 else
491 mask = 0;
492 b43_phy_mask(dev, B43_PHY_EXTG(0x108), mask);
493
494 b43_httab_write(dev, B43_HTTAB16(7, 0x110 + i), tmp >> 16);
495 b43_httab_write(dev, B43_HTTAB8(13, 0x63 + (i * 4)),
496 tmp & 0xFF);
497 b43_httab_write(dev, B43_HTTAB8(13, 0x73 + (i * 4)),
498 tmp & 0xFF);
499 }
500}
501
502static void b43_phy_ht_tx_power_ctl(struct b43_wldev *dev, bool enable)
503{
504 struct b43_phy_ht *phy_ht = dev->phy.ht;
505 u16 en_bits = B43_PHY_HT_TXPCTL_CMD_C1_COEFF |
506 B43_PHY_HT_TXPCTL_CMD_C1_HWPCTLEN |
507 B43_PHY_HT_TXPCTL_CMD_C1_PCTLEN;
508 static const u16 cmd_regs[3] = { B43_PHY_HT_TXPCTL_CMD_C1,
509 B43_PHY_HT_TXPCTL_CMD_C2,
510 B43_PHY_HT_TXPCTL_CMD_C3 };
511 static const u16 status_regs[3] = { B43_PHY_HT_TX_PCTL_STATUS_C1,
512 B43_PHY_HT_TX_PCTL_STATUS_C2,
513 B43_PHY_HT_TX_PCTL_STATUS_C3 };
514 int i;
515
516 if (!enable) {
517 if (b43_phy_read(dev, B43_PHY_HT_TXPCTL_CMD_C1) & en_bits) {
518 /* We disable enabled TX pwr ctl, save it's state */
519 for (i = 0; i < 3; i++)
520 phy_ht->tx_pwr_idx[i] =
521 b43_phy_read(dev, status_regs[i]);
522 }
523 b43_phy_mask(dev, B43_PHY_HT_TXPCTL_CMD_C1, ~en_bits);
524 } else {
525 b43_phy_set(dev, B43_PHY_HT_TXPCTL_CMD_C1, en_bits);
526
527 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
528 for (i = 0; i < 3; i++)
529 b43_phy_write(dev, cmd_regs[i], 0x32);
530 }
531
532 for (i = 0; i < 3; i++)
533 if (phy_ht->tx_pwr_idx[i] <=
534 B43_PHY_HT_TXPCTL_CMD_C1_INIT)
535 b43_phy_write(dev, cmd_regs[i],
536 phy_ht->tx_pwr_idx[i]);
537 }
538
539 phy_ht->tx_pwr_ctl = enable;
540}
541
542static void b43_phy_ht_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
543{
544 struct b43_phy_ht *phy_ht = dev->phy.ht;
545 static const u16 base[] = { 0x840, 0x860, 0x880 };
546 u16 save_regs[3][3];
547 s32 rssi_buf[6];
548 int core;
549
550 for (core = 0; core < 3; core++) {
551 save_regs[core][1] = b43_phy_read(dev, base[core] + 6);
552 save_regs[core][2] = b43_phy_read(dev, base[core] + 7);
553 save_regs[core][0] = b43_phy_read(dev, base[core] + 0);
554
555 b43_phy_write(dev, base[core] + 6, 0);
556 b43_phy_mask(dev, base[core] + 7, ~0xF); /* 0xF? Or just 0x6? */
557 b43_phy_set(dev, base[core] + 0, 0x0400);
558 b43_phy_set(dev, base[core] + 0, 0x1000);
559 }
560
561 b43_phy_ht_tx_tone(dev);
562 udelay(20);
563 b43_phy_ht_poll_rssi(dev, HT_RSSI_TSSI_2G, rssi_buf, 1);
564 b43_phy_ht_stop_playback(dev);
565 b43_phy_ht_reset_cca(dev);
566
567 phy_ht->idle_tssi[0] = rssi_buf[0] & 0xff;
568 phy_ht->idle_tssi[1] = rssi_buf[2] & 0xff;
569 phy_ht->idle_tssi[2] = rssi_buf[4] & 0xff;
570
571 for (core = 0; core < 3; core++) {
572 b43_phy_write(dev, base[core] + 0, save_regs[core][0]);
573 b43_phy_write(dev, base[core] + 6, save_regs[core][1]);
574 b43_phy_write(dev, base[core] + 7, save_regs[core][2]);
575 }
576}
577
578static void b43_phy_ht_tssi_setup(struct b43_wldev *dev)
579{
580 static const u16 routing[] = { R2059_C1, R2059_C2, R2059_C3, };
581 int core;
582
583 /* 0x159 is probably TX_SSI_MUX or TSSIG (by comparing to N-PHY) */
584 for (core = 0; core < 3; core++) {
585 b43_radio_set(dev, 0x8bf, 0x1);
586 b43_radio_write(dev, routing[core] | 0x0159, 0x0011);
587 }
588}
589
590static void b43_phy_ht_tx_power_ctl_setup(struct b43_wldev *dev)
591{
592 struct b43_phy_ht *phy_ht = dev->phy.ht;
593 struct ssb_sprom *sprom = dev->dev->bus_sprom;
594
595 u8 *idle = phy_ht->idle_tssi;
596 u8 target[3];
597 s16 a1[3], b0[3], b1[3];
598
599 u16 freq = dev->phy.channel_freq;
600 int i, c;
601
602 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
603 for (c = 0; c < 3; c++) {
604 target[c] = sprom->core_pwr_info[c].maxpwr_2g;
605 a1[c] = sprom->core_pwr_info[c].pa_2g[0];
606 b0[c] = sprom->core_pwr_info[c].pa_2g[1];
607 b1[c] = sprom->core_pwr_info[c].pa_2g[2];
608 }
609 } else if (freq >= 4900 && freq < 5100) {
610 for (c = 0; c < 3; c++) {
611 target[c] = sprom->core_pwr_info[c].maxpwr_5gl;
612 a1[c] = sprom->core_pwr_info[c].pa_5gl[0];
613 b0[c] = sprom->core_pwr_info[c].pa_5gl[1];
614 b1[c] = sprom->core_pwr_info[c].pa_5gl[2];
615 }
616 } else if (freq >= 5100 && freq < 5500) {
617 for (c = 0; c < 3; c++) {
618 target[c] = sprom->core_pwr_info[c].maxpwr_5g;
619 a1[c] = sprom->core_pwr_info[c].pa_5g[0];
620 b0[c] = sprom->core_pwr_info[c].pa_5g[1];
621 b1[c] = sprom->core_pwr_info[c].pa_5g[2];
622 }
623 } else if (freq >= 5500) {
624 for (c = 0; c < 3; c++) {
625 target[c] = sprom->core_pwr_info[c].maxpwr_5gh;
626 a1[c] = sprom->core_pwr_info[c].pa_5gh[0];
627 b0[c] = sprom->core_pwr_info[c].pa_5gh[1];
628 b1[c] = sprom->core_pwr_info[c].pa_5gh[2];
629 }
630 } else {
631 target[0] = target[1] = target[2] = 52;
632 a1[0] = a1[1] = a1[2] = -424;
633 b0[0] = b0[1] = b0[2] = 5612;
634 b1[0] = b1[1] = b1[2] = -1393;
635 }
636
637 b43_phy_set(dev, B43_PHY_HT_TSSIMODE, B43_PHY_HT_TSSIMODE_EN);
638 b43_phy_mask(dev, B43_PHY_HT_TXPCTL_CMD_C1,
639 ~B43_PHY_HT_TXPCTL_CMD_C1_PCTLEN & 0xFFFF);
640
641 /* TODO: Does it depend on sprom->fem.ghz2.tssipos? */
642 b43_phy_set(dev, B43_PHY_HT_TXPCTL_IDLE_TSSI, 0x4000);
643
644 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_CMD_C1,
645 ~B43_PHY_HT_TXPCTL_CMD_C1_INIT, 0x19);
646 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_CMD_C2,
647 ~B43_PHY_HT_TXPCTL_CMD_C2_INIT, 0x19);
648 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_CMD_C3,
649 ~B43_PHY_HT_TXPCTL_CMD_C3_INIT, 0x19);
650
651 b43_phy_set(dev, B43_PHY_HT_TXPCTL_IDLE_TSSI,
652 B43_PHY_HT_TXPCTL_IDLE_TSSI_BINF);
653
654 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_IDLE_TSSI,
655 ~B43_PHY_HT_TXPCTL_IDLE_TSSI_C1,
656 idle[0] << B43_PHY_HT_TXPCTL_IDLE_TSSI_C1_SHIFT);
657 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_IDLE_TSSI,
658 ~B43_PHY_HT_TXPCTL_IDLE_TSSI_C2,
659 idle[1] << B43_PHY_HT_TXPCTL_IDLE_TSSI_C2_SHIFT);
660 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_IDLE_TSSI2,
661 ~B43_PHY_HT_TXPCTL_IDLE_TSSI2_C3,
662 idle[2] << B43_PHY_HT_TXPCTL_IDLE_TSSI2_C3_SHIFT);
663
664 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_N, ~B43_PHY_HT_TXPCTL_N_TSSID,
665 0xf0);
666 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_N, ~B43_PHY_HT_TXPCTL_N_NPTIL2,
667 0x3 << B43_PHY_HT_TXPCTL_N_NPTIL2_SHIFT);
668#if 0
669 /* TODO: what to mask/set? */
670 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_CMD_C1, 0x800, 0)
671 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_CMD_C1, 0x400, 0)
672#endif
673
674 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_TARG_PWR,
675 ~B43_PHY_HT_TXPCTL_TARG_PWR_C1,
676 target[0] << B43_PHY_HT_TXPCTL_TARG_PWR_C1_SHIFT);
677 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_TARG_PWR,
678 ~B43_PHY_HT_TXPCTL_TARG_PWR_C2 & 0xFFFF,
679 target[1] << B43_PHY_HT_TXPCTL_TARG_PWR_C2_SHIFT);
680 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_TARG_PWR2,
681 ~B43_PHY_HT_TXPCTL_TARG_PWR2_C3,
682 target[2] << B43_PHY_HT_TXPCTL_TARG_PWR2_C3_SHIFT);
683
684 for (c = 0; c < 3; c++) {
685 s32 num, den, pwr;
686 u32 regval[64];
687
688 for (i = 0; i < 64; i++) {
689 num = 8 * (16 * b0[c] + b1[c] * i);
690 den = 32768 + a1[c] * i;
691 pwr = max((4 * num + den / 2) / den, -8);
692 regval[i] = pwr;
693 }
694 b43_httab_write_bulk(dev, B43_HTTAB16(26 + c, 0), 64, regval);
695 }
696}
697
698/**************************************************
243 * Channel switching ops. 699 * Channel switching ops.
244 **************************************************/ 700 **************************************************/
245 701
702static void b43_phy_ht_spur_avoid(struct b43_wldev *dev,
703 struct ieee80211_channel *new_channel)
704{
705 struct bcma_device *core = dev->dev->bdev;
706 int spuravoid = 0;
707 u16 tmp;
708
709 /* Check for 13 and 14 is just a guess, we don't have enough logs. */
710 if (new_channel->hw_value == 13 || new_channel->hw_value == 14)
711 spuravoid = 1;
712 bcma_core_pll_ctl(core, B43_BCMA_CLKCTLST_PHY_PLL_REQ, 0, false);
713 bcma_pmu_spuravoid_pllupdate(&core->bus->drv_cc, spuravoid);
714 bcma_core_pll_ctl(core,
715 B43_BCMA_CLKCTLST_80211_PLL_REQ |
716 B43_BCMA_CLKCTLST_PHY_PLL_REQ,
717 B43_BCMA_CLKCTLST_80211_PLL_ST |
718 B43_BCMA_CLKCTLST_PHY_PLL_ST, false);
719
720 /* Values has been taken from wlc_bmac_switch_macfreq comments */
721 switch (spuravoid) {
722 case 2: /* 126MHz */
723 tmp = 0x2082;
724 break;
725 case 1: /* 123MHz */
726 tmp = 0x5341;
727 break;
728 default: /* 120MHz */
729 tmp = 0x8889;
730 }
731
732 b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, tmp);
733 b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
734
735 /* TODO: reset PLL */
736
737 if (spuravoid)
738 b43_phy_set(dev, B43_PHY_HT_BBCFG, B43_PHY_HT_BBCFG_RSTRX);
739 else
740 b43_phy_mask(dev, B43_PHY_HT_BBCFG,
741 ~B43_PHY_HT_BBCFG_RSTRX & 0xFFFF);
742
743 b43_phy_ht_reset_cca(dev);
744}
745
246static void b43_phy_ht_channel_setup(struct b43_wldev *dev, 746static void b43_phy_ht_channel_setup(struct b43_wldev *dev,
247 const struct b43_phy_ht_channeltab_e_phy *e, 747 const struct b43_phy_ht_channeltab_e_phy *e,
248 struct ieee80211_channel *new_channel) 748 struct ieee80211_channel *new_channel)
249{ 749{
250 bool old_band_5ghz; 750 bool old_band_5ghz;
251 u8 i;
252 751
253 old_band_5ghz = b43_phy_read(dev, B43_PHY_HT_BANDCTL) & 0; /* FIXME */ 752 old_band_5ghz = b43_phy_read(dev, B43_PHY_HT_BANDCTL) & 0; /* FIXME */
254 if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) { 753 if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) {
@@ -264,25 +763,20 @@ static void b43_phy_ht_channel_setup(struct b43_wldev *dev,
264 b43_phy_write(dev, B43_PHY_HT_BW5, e->bw5); 763 b43_phy_write(dev, B43_PHY_HT_BW5, e->bw5);
265 b43_phy_write(dev, B43_PHY_HT_BW6, e->bw6); 764 b43_phy_write(dev, B43_PHY_HT_BW6, e->bw6);
266 765
267 /* TODO: some ops on PHY regs 0x0B0 and 0xC0A */ 766 if (new_channel->hw_value == 14) {
268 767 b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_OFDM_EN, 0);
269 /* TODO: separated function? */ 768 b43_phy_set(dev, B43_PHY_HT_TEST, 0x0800);
270 for (i = 0; i < 3; i++) { 769 } else {
271 u16 mask; 770 b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_OFDM_EN,
272 u32 tmp = b43_httab_read(dev, B43_HTTAB32(26, 0xE8)); 771 B43_PHY_HT_CLASS_CTL_OFDM_EN);
772 if (new_channel->band == IEEE80211_BAND_2GHZ)
773 b43_phy_mask(dev, B43_PHY_HT_TEST, ~0x840);
774 }
273 775
274 if (0) /* FIXME */ 776 if (1) /* TODO: On N it's for early devices only, what about HT? */
275 mask = 0x2 << (i * 4); 777 b43_phy_ht_tx_power_fix(dev);
276 else
277 mask = 0;
278 b43_phy_mask(dev, B43_PHY_EXTG(0x108), mask);
279 778
280 b43_httab_write(dev, B43_HTTAB16(7, 0x110 + i), tmp >> 16); 779 b43_phy_ht_spur_avoid(dev, new_channel);
281 b43_httab_write(dev, B43_HTTAB8(13, 0x63 + (i * 4)),
282 tmp & 0xFF);
283 b43_httab_write(dev, B43_HTTAB8(13, 0x73 + (i * 4)),
284 tmp & 0xFF);
285 }
286 780
287 b43_phy_write(dev, 0x017e, 0x3830); 781 b43_phy_write(dev, 0x017e, 0x3830);
288} 782}
@@ -337,14 +831,29 @@ static void b43_phy_ht_op_prepare_structs(struct b43_wldev *dev)
337{ 831{
338 struct b43_phy *phy = &dev->phy; 832 struct b43_phy *phy = &dev->phy;
339 struct b43_phy_ht *phy_ht = phy->ht; 833 struct b43_phy_ht *phy_ht = phy->ht;
834 int i;
340 835
341 memset(phy_ht, 0, sizeof(*phy_ht)); 836 memset(phy_ht, 0, sizeof(*phy_ht));
837
838 phy_ht->tx_pwr_ctl = true;
839 for (i = 0; i < 3; i++)
840 phy_ht->tx_pwr_idx[i] = B43_PHY_HT_TXPCTL_CMD_C1_INIT + 1;
841
842 for (i = 0; i < 3; i++)
843 phy_ht->bb_mult_save[i] = -1;
342} 844}
343 845
344static int b43_phy_ht_op_init(struct b43_wldev *dev) 846static int b43_phy_ht_op_init(struct b43_wldev *dev)
345{ 847{
848 struct b43_phy_ht *phy_ht = dev->phy.ht;
346 u16 tmp; 849 u16 tmp;
347 u16 clip_state[3]; 850 u16 clip_state[3];
851 bool saved_tx_pwr_ctl;
852
853 if (dev->dev->bus_type != B43_BUS_BCMA) {
854 b43err(dev->wl, "HT-PHY is supported only on BCMA bus!\n");
855 return -EOPNOTSUPP;
856 }
348 857
349 b43_phy_ht_tables_init(dev); 858 b43_phy_ht_tables_init(dev);
350 859
@@ -357,9 +866,9 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
357 866
358 b43_phy_mask(dev, B43_PHY_EXTG(0), ~0x3); 867 b43_phy_mask(dev, B43_PHY_EXTG(0), ~0x3);
359 868
360 b43_phy_write(dev, B43_PHY_HT_AFE_CTL1, 0); 869 b43_phy_write(dev, B43_PHY_HT_AFE_C1_OVER, 0);
361 b43_phy_write(dev, B43_PHY_HT_AFE_CTL3, 0); 870 b43_phy_write(dev, B43_PHY_HT_AFE_C2_OVER, 0);
362 b43_phy_write(dev, B43_PHY_HT_AFE_CTL5, 0); 871 b43_phy_write(dev, B43_PHY_HT_AFE_C3_OVER, 0);
363 872
364 b43_phy_write(dev, B43_PHY_EXTG(0x103), 0x20); 873 b43_phy_write(dev, B43_PHY_EXTG(0x103), 0x20);
365 b43_phy_write(dev, B43_PHY_EXTG(0x101), 0x20); 874 b43_phy_write(dev, B43_PHY_EXTG(0x101), 0x20);
@@ -371,8 +880,11 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
371 if (0) /* TODO: condition */ 880 if (0) /* TODO: condition */
372 ; /* TODO: PHY op on reg 0x217 */ 881 ; /* TODO: PHY op on reg 0x217 */
373 882
374 b43_phy_read(dev, 0xb0); /* TODO: what for? */ 883 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
375 b43_phy_set(dev, 0xb0, 0x1); 884 b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN, 0);
885 else
886 b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN,
887 B43_PHY_HT_CLASS_CTL_CCK_EN);
376 888
377 b43_phy_set(dev, 0xb1, 0x91); 889 b43_phy_set(dev, 0xb1, 0x91);
378 b43_phy_write(dev, 0x32f, 0x0003); 890 b43_phy_write(dev, 0x32f, 0x0003);
@@ -448,12 +960,13 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
448 960
449 b43_mac_phy_clock_set(dev, true); 961 b43_mac_phy_clock_set(dev, true);
450 962
963 b43_phy_ht_pa_override(dev, false);
451 b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RX2TX); 964 b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RX2TX);
452 b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RST2RX); 965 b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RST2RX);
453 966 b43_phy_ht_pa_override(dev, true);
454 /* TODO: PHY op on reg 0xb0 */
455 967
456 /* TODO: Should we restore it? Or store it in global PHY info? */ 968 /* TODO: Should we restore it? Or store it in global PHY info? */
969 b43_phy_ht_classifier(dev, 0, 0);
457 b43_phy_ht_read_clip_detection(dev, clip_state); 970 b43_phy_ht_read_clip_detection(dev, clip_state);
458 971
459 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 972 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
@@ -462,6 +975,14 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
462 b43_httab_write_bulk(dev, B43_HTTAB32(0x1a, 0xc0), 975 b43_httab_write_bulk(dev, B43_HTTAB32(0x1a, 0xc0),
463 B43_HTTAB_1A_C0_LATE_SIZE, b43_httab_0x1a_0xc0_late); 976 B43_HTTAB_1A_C0_LATE_SIZE, b43_httab_0x1a_0xc0_late);
464 977
978 saved_tx_pwr_ctl = phy_ht->tx_pwr_ctl;
979 b43_phy_ht_tx_power_fix(dev);
980 b43_phy_ht_tx_power_ctl(dev, false);
981 b43_phy_ht_tx_power_ctl_idle_tssi(dev);
982 b43_phy_ht_tx_power_ctl_setup(dev);
983 b43_phy_ht_tssi_setup(dev);
984 b43_phy_ht_tx_power_ctl(dev, saved_tx_pwr_ctl);
985
465 return 0; 986 return 0;
466} 987}
467 988
@@ -506,27 +1027,28 @@ static void b43_phy_ht_op_software_rfkill(struct b43_wldev *dev,
506static void b43_phy_ht_op_switch_analog(struct b43_wldev *dev, bool on) 1027static void b43_phy_ht_op_switch_analog(struct b43_wldev *dev, bool on)
507{ 1028{
508 if (on) { 1029 if (on) {
509 b43_phy_write(dev, B43_PHY_HT_AFE_CTL2, 0x00cd); 1030 b43_phy_write(dev, B43_PHY_HT_AFE_C1, 0x00cd);
510 b43_phy_write(dev, B43_PHY_HT_AFE_CTL1, 0x0000); 1031 b43_phy_write(dev, B43_PHY_HT_AFE_C1_OVER, 0x0000);
511 b43_phy_write(dev, B43_PHY_HT_AFE_CTL4, 0x00cd); 1032 b43_phy_write(dev, B43_PHY_HT_AFE_C2, 0x00cd);
512 b43_phy_write(dev, B43_PHY_HT_AFE_CTL3, 0x0000); 1033 b43_phy_write(dev, B43_PHY_HT_AFE_C2_OVER, 0x0000);
513 b43_phy_write(dev, B43_PHY_HT_AFE_CTL6, 0x00cd); 1034 b43_phy_write(dev, B43_PHY_HT_AFE_C3, 0x00cd);
514 b43_phy_write(dev, B43_PHY_HT_AFE_CTL5, 0x0000); 1035 b43_phy_write(dev, B43_PHY_HT_AFE_C3_OVER, 0x0000);
515 } else { 1036 } else {
516 b43_phy_write(dev, B43_PHY_HT_AFE_CTL1, 0x07ff); 1037 b43_phy_write(dev, B43_PHY_HT_AFE_C1_OVER, 0x07ff);
517 b43_phy_write(dev, B43_PHY_HT_AFE_CTL2, 0x00fd); 1038 b43_phy_write(dev, B43_PHY_HT_AFE_C1, 0x00fd);
518 b43_phy_write(dev, B43_PHY_HT_AFE_CTL3, 0x07ff); 1039 b43_phy_write(dev, B43_PHY_HT_AFE_C2_OVER, 0x07ff);
519 b43_phy_write(dev, B43_PHY_HT_AFE_CTL4, 0x00fd); 1040 b43_phy_write(dev, B43_PHY_HT_AFE_C2, 0x00fd);
520 b43_phy_write(dev, B43_PHY_HT_AFE_CTL5, 0x07ff); 1041 b43_phy_write(dev, B43_PHY_HT_AFE_C3_OVER, 0x07ff);
521 b43_phy_write(dev, B43_PHY_HT_AFE_CTL6, 0x00fd); 1042 b43_phy_write(dev, B43_PHY_HT_AFE_C3, 0x00fd);
522 } 1043 }
523} 1044}
524 1045
525static int b43_phy_ht_op_switch_channel(struct b43_wldev *dev, 1046static int b43_phy_ht_op_switch_channel(struct b43_wldev *dev,
526 unsigned int new_channel) 1047 unsigned int new_channel)
527{ 1048{
528 struct ieee80211_channel *channel = dev->wl->hw->conf.channel; 1049 struct ieee80211_channel *channel = dev->wl->hw->conf.chandef.chan;
529 enum nl80211_channel_type channel_type = dev->wl->hw->conf.channel_type; 1050 enum nl80211_channel_type channel_type =
1051 cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
530 1052
531 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 1053 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
532 if ((new_channel < 1) || (new_channel > 14)) 1054 if ((new_channel < 1) || (new_channel > 14))
diff --git a/drivers/net/wireless/b43/phy_ht.h b/drivers/net/wireless/b43/phy_ht.h
index 6544c4293b34..6cae370d1018 100644
--- a/drivers/net/wireless/b43/phy_ht.h
+++ b/drivers/net/wireless/b43/phy_ht.h
@@ -12,18 +12,65 @@
12#define B43_PHY_HT_TABLE_ADDR 0x072 /* Table address */ 12#define B43_PHY_HT_TABLE_ADDR 0x072 /* Table address */
13#define B43_PHY_HT_TABLE_DATALO 0x073 /* Table data low */ 13#define B43_PHY_HT_TABLE_DATALO 0x073 /* Table data low */
14#define B43_PHY_HT_TABLE_DATAHI 0x074 /* Table data high */ 14#define B43_PHY_HT_TABLE_DATAHI 0x074 /* Table data high */
15#define B43_PHY_HT_CLASS_CTL 0x0B0 /* Classifier control */
16#define B43_PHY_HT_CLASS_CTL_CCK_EN 0x0001 /* CCK enable */
17#define B43_PHY_HT_CLASS_CTL_OFDM_EN 0x0002 /* OFDM enable */
18#define B43_PHY_HT_CLASS_CTL_WAITED_EN 0x0004 /* Waited enable */
19#define B43_PHY_HT_IQLOCAL_CMDGCTL 0x0C2 /* I/Q LO cal command G control */
20#define B43_PHY_HT_SAMP_CMD 0x0C3 /* Sample command */
21#define B43_PHY_HT_SAMP_CMD_STOP 0x0002 /* Stop */
22#define B43_PHY_HT_SAMP_LOOP_CNT 0x0C4 /* Sample loop count */
23#define B43_PHY_HT_SAMP_WAIT_CNT 0x0C5 /* Sample wait count */
24#define B43_PHY_HT_SAMP_DEP_CNT 0x0C6 /* Sample depth count */
25#define B43_PHY_HT_SAMP_STAT 0x0C7 /* Sample status */
26#define B43_PHY_HT_EST_PWR_C1 0x118
27#define B43_PHY_HT_EST_PWR_C2 0x119
28#define B43_PHY_HT_EST_PWR_C3 0x11A
29#define B43_PHY_HT_TSSIMODE 0x122 /* TSSI mode */
30#define B43_PHY_HT_TSSIMODE_EN 0x0001 /* TSSI enable */
31#define B43_PHY_HT_TSSIMODE_PDEN 0x0002 /* Power det enable */
15#define B43_PHY_HT_BW1 0x1CE 32#define B43_PHY_HT_BW1 0x1CE
16#define B43_PHY_HT_BW2 0x1CF 33#define B43_PHY_HT_BW2 0x1CF
17#define B43_PHY_HT_BW3 0x1D0 34#define B43_PHY_HT_BW3 0x1D0
18#define B43_PHY_HT_BW4 0x1D1 35#define B43_PHY_HT_BW4 0x1D1
19#define B43_PHY_HT_BW5 0x1D2 36#define B43_PHY_HT_BW5 0x1D2
20#define B43_PHY_HT_BW6 0x1D3 37#define B43_PHY_HT_BW6 0x1D3
38#define B43_PHY_HT_TXPCTL_CMD_C1 0x1E7 /* TX power control command */
39#define B43_PHY_HT_TXPCTL_CMD_C1_INIT 0x007F /* Init */
40#define B43_PHY_HT_TXPCTL_CMD_C1_COEFF 0x2000 /* Power control coefficients */
41#define B43_PHY_HT_TXPCTL_CMD_C1_HWPCTLEN 0x4000 /* Hardware TX power control enable */
42#define B43_PHY_HT_TXPCTL_CMD_C1_PCTLEN 0x8000 /* TX power control enable */
43#define B43_PHY_HT_TXPCTL_N 0x1E8 /* TX power control N num */
44#define B43_PHY_HT_TXPCTL_N_TSSID 0x00FF /* N TSSI delay */
45#define B43_PHY_HT_TXPCTL_N_TSSID_SHIFT 0
46#define B43_PHY_HT_TXPCTL_N_NPTIL2 0x0700 /* N PT integer log2 */
47#define B43_PHY_HT_TXPCTL_N_NPTIL2_SHIFT 8
48#define B43_PHY_HT_TXPCTL_IDLE_TSSI 0x1E9 /* TX power control idle TSSI */
49#define B43_PHY_HT_TXPCTL_IDLE_TSSI_C1 0x003F
50#define B43_PHY_HT_TXPCTL_IDLE_TSSI_C1_SHIFT 0
51#define B43_PHY_HT_TXPCTL_IDLE_TSSI_C2 0x3F00
52#define B43_PHY_HT_TXPCTL_IDLE_TSSI_C2_SHIFT 8
53#define B43_PHY_HT_TXPCTL_IDLE_TSSI_BINF 0x8000 /* Raw TSSI offset bin format */
54#define B43_PHY_HT_TXPCTL_TARG_PWR 0x1EA /* TX power control target power */
55#define B43_PHY_HT_TXPCTL_TARG_PWR_C1 0x00FF /* Power 0 */
56#define B43_PHY_HT_TXPCTL_TARG_PWR_C1_SHIFT 0
57#define B43_PHY_HT_TXPCTL_TARG_PWR_C2 0xFF00 /* Power 1 */
58#define B43_PHY_HT_TXPCTL_TARG_PWR_C2_SHIFT 8
59#define B43_PHY_HT_TX_PCTL_STATUS_C1 0x1ED
60#define B43_PHY_HT_TX_PCTL_STATUS_C2 0x1EE
61#define B43_PHY_HT_TXPCTL_CMD_C2 0x222
62#define B43_PHY_HT_TXPCTL_CMD_C2_INIT 0x007F
63#define B43_PHY_HT_RSSI_C1 0x219
64#define B43_PHY_HT_RSSI_C2 0x21A
65#define B43_PHY_HT_RSSI_C3 0x21B
21 66
22#define B43_PHY_HT_C1_CLIP1THRES B43_PHY_OFDM(0x00E) 67#define B43_PHY_HT_C1_CLIP1THRES B43_PHY_OFDM(0x00E)
23#define B43_PHY_HT_C2_CLIP1THRES B43_PHY_OFDM(0x04E) 68#define B43_PHY_HT_C2_CLIP1THRES B43_PHY_OFDM(0x04E)
24#define B43_PHY_HT_C3_CLIP1THRES B43_PHY_OFDM(0x08E) 69#define B43_PHY_HT_C3_CLIP1THRES B43_PHY_OFDM(0x08E)
25 70
26#define B43_PHY_HT_RF_SEQ_MODE B43_PHY_EXTG(0x000) 71#define B43_PHY_HT_RF_SEQ_MODE B43_PHY_EXTG(0x000)
72#define B43_PHY_HT_RF_SEQ_MODE_CA_OVER 0x0001 /* Core active override */
73#define B43_PHY_HT_RF_SEQ_MODE_TR_OVER 0x0002 /* Trigger override */
27#define B43_PHY_HT_RF_SEQ_TRIG B43_PHY_EXTG(0x003) 74#define B43_PHY_HT_RF_SEQ_TRIG B43_PHY_EXTG(0x003)
28#define B43_PHY_HT_RF_SEQ_TRIG_RX2TX 0x0001 /* RX2TX */ 75#define B43_PHY_HT_RF_SEQ_TRIG_RX2TX 0x0001 /* RX2TX */
29#define B43_PHY_HT_RF_SEQ_TRIG_TX2RX 0x0002 /* TX2RX */ 76#define B43_PHY_HT_RF_SEQ_TRIG_TX2RX 0x0002 /* TX2RX */
@@ -36,12 +83,28 @@
36 83
37#define B43_PHY_HT_RF_CTL1 B43_PHY_EXTG(0x010) 84#define B43_PHY_HT_RF_CTL1 B43_PHY_EXTG(0x010)
38 85
39#define B43_PHY_HT_AFE_CTL1 B43_PHY_EXTG(0x110) 86#define B43_PHY_HT_RF_CTL_INT_C1 B43_PHY_EXTG(0x04c)
40#define B43_PHY_HT_AFE_CTL2 B43_PHY_EXTG(0x111) 87#define B43_PHY_HT_RF_CTL_INT_C2 B43_PHY_EXTG(0x06c)
41#define B43_PHY_HT_AFE_CTL3 B43_PHY_EXTG(0x114) 88#define B43_PHY_HT_RF_CTL_INT_C3 B43_PHY_EXTG(0x08c)
42#define B43_PHY_HT_AFE_CTL4 B43_PHY_EXTG(0x115) 89
43#define B43_PHY_HT_AFE_CTL5 B43_PHY_EXTG(0x118) 90#define B43_PHY_HT_AFE_C1_OVER B43_PHY_EXTG(0x110)
44#define B43_PHY_HT_AFE_CTL6 B43_PHY_EXTG(0x119) 91#define B43_PHY_HT_AFE_C1 B43_PHY_EXTG(0x111)
92#define B43_PHY_HT_AFE_C2_OVER B43_PHY_EXTG(0x114)
93#define B43_PHY_HT_AFE_C2 B43_PHY_EXTG(0x115)
94#define B43_PHY_HT_AFE_C3_OVER B43_PHY_EXTG(0x118)
95#define B43_PHY_HT_AFE_C3 B43_PHY_EXTG(0x119)
96
97#define B43_PHY_HT_TXPCTL_CMD_C3 B43_PHY_EXTG(0x164)
98#define B43_PHY_HT_TXPCTL_CMD_C3_INIT 0x007F
99#define B43_PHY_HT_TXPCTL_IDLE_TSSI2 B43_PHY_EXTG(0x165) /* TX power control idle TSSI */
100#define B43_PHY_HT_TXPCTL_IDLE_TSSI2_C3 0x003F
101#define B43_PHY_HT_TXPCTL_IDLE_TSSI2_C3_SHIFT 0
102#define B43_PHY_HT_TXPCTL_TARG_PWR2 B43_PHY_EXTG(0x166) /* TX power control target power */
103#define B43_PHY_HT_TXPCTL_TARG_PWR2_C3 0x00FF
104#define B43_PHY_HT_TXPCTL_TARG_PWR2_C3_SHIFT 0
105#define B43_PHY_HT_TX_PCTL_STATUS_C3 B43_PHY_EXTG(0x169)
106
107#define B43_PHY_HT_TEST B43_PHY_N_BMODE(0x00A)
45 108
46 109
47/* Values for PHY registers used on channel switching */ 110/* Values for PHY registers used on channel switching */
@@ -56,6 +119,14 @@ struct b43_phy_ht_channeltab_e_phy {
56 119
57 120
58struct b43_phy_ht { 121struct b43_phy_ht {
122 u16 rf_ctl_int_save[3];
123
124 bool tx_pwr_ctl;
125 u8 tx_pwr_idx[3];
126
127 s32 bb_mult_save[3];
128
129 u8 idle_tssi[3];
59}; 130};
60 131
61 132
diff --git a/drivers/net/wireless/b43/phy_lcn.c b/drivers/net/wireless/b43/phy_lcn.c
index a13e28ef6246..0bafa3b17035 100644
--- a/drivers/net/wireless/b43/phy_lcn.c
+++ b/drivers/net/wireless/b43/phy_lcn.c
@@ -808,8 +808,9 @@ static void b43_phy_lcn_op_switch_analog(struct b43_wldev *dev, bool on)
808static int b43_phy_lcn_op_switch_channel(struct b43_wldev *dev, 808static int b43_phy_lcn_op_switch_channel(struct b43_wldev *dev,
809 unsigned int new_channel) 809 unsigned int new_channel)
810{ 810{
811 struct ieee80211_channel *channel = dev->wl->hw->conf.channel; 811 struct ieee80211_channel *channel = dev->wl->hw->conf.chandef.chan;
812 enum nl80211_channel_type channel_type = dev->wl->hw->conf.channel_type; 812 enum nl80211_channel_type channel_type =
813 cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
813 814
814 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 815 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
815 if ((new_channel < 1) || (new_channel > 14)) 816 if ((new_channel < 1) || (new_channel > 14))
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 3ae28561f7a4..92190dacf689 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -104,14 +104,8 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev)
104 maxpwr = sprom->maxpwr_bg; 104 maxpwr = sprom->maxpwr_bg;
105 lpphy->max_tx_pwr_med_band = maxpwr; 105 lpphy->max_tx_pwr_med_band = maxpwr;
106 cckpo = sprom->cck2gpo; 106 cckpo = sprom->cck2gpo;
107 /*
108 * We don't read SPROM's opo as specs say. On rev8 SPROMs
109 * opo == ofdm2gpo and we don't know any SSB with LP-PHY
110 * and SPROM rev below 8.
111 */
112 B43_WARN_ON(sprom->revision < 8);
113 ofdmpo = sprom->ofdm2gpo;
114 if (cckpo) { 107 if (cckpo) {
108 ofdmpo = sprom->ofdm2gpo;
115 for (i = 0; i < 4; i++) { 109 for (i = 0; i < 4; i++) {
116 lpphy->tx_max_rate[i] = 110 lpphy->tx_max_rate[i] =
117 maxpwr - (ofdmpo & 0xF) * 2; 111 maxpwr - (ofdmpo & 0xF) * 2;
@@ -124,11 +118,11 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev)
124 ofdmpo >>= 4; 118 ofdmpo >>= 4;
125 } 119 }
126 } else { 120 } else {
127 ofdmpo &= 0xFF; 121 u8 opo = sprom->opo;
128 for (i = 0; i < 4; i++) 122 for (i = 0; i < 4; i++)
129 lpphy->tx_max_rate[i] = maxpwr; 123 lpphy->tx_max_rate[i] = maxpwr;
130 for (i = 4; i < 15; i++) 124 for (i = 4; i < 15; i++)
131 lpphy->tx_max_rate[i] = maxpwr - ofdmpo; 125 lpphy->tx_max_rate[i] = maxpwr - opo;
132 } 126 }
133 } else { /* 5GHz */ 127 } else { /* 5GHz */
134 lpphy->tx_isolation_low_band = sprom->tri5gl; 128 lpphy->tx_isolation_low_band = sprom->tri5gl;
@@ -287,8 +281,8 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
287 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xFFC0, 0x000A); 281 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xFFC0, 0x000A);
288 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xC0FF, 0x0B00); 282 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xC0FF, 0x0B00);
289 } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ || 283 } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ ||
290 (dev->dev->board_type == 0x048A) || ((dev->phy.rev == 0) && 284 (dev->dev->board_type == SSB_BOARD_BU4312) ||
291 (sprom->boardflags_lo & B43_BFL_FEM))) { 285 (dev->phy.rev == 0 && (sprom->boardflags_lo & B43_BFL_FEM))) {
292 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0001); 286 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0001);
293 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xC0FF, 0x0400); 287 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xC0FF, 0x0400);
294 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x0001); 288 b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x0001);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index b70f220bc4b3..7c970d3ae358 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -69,14 +69,27 @@ enum b43_nphy_rf_sequence {
69 B43_RFSEQ_UPDATE_GAINU, 69 B43_RFSEQ_UPDATE_GAINU,
70}; 70};
71 71
72enum b43_nphy_rssi_type { 72enum n_intc_override {
73 B43_NPHY_RSSI_X = 0, 73 N_INTC_OVERRIDE_OFF = 0,
74 B43_NPHY_RSSI_Y, 74 N_INTC_OVERRIDE_TRSW = 1,
75 B43_NPHY_RSSI_Z, 75 N_INTC_OVERRIDE_PA = 2,
76 B43_NPHY_RSSI_PWRDET, 76 N_INTC_OVERRIDE_EXT_LNA_PU = 3,
77 B43_NPHY_RSSI_TSSI_I, 77 N_INTC_OVERRIDE_EXT_LNA_GAIN = 4,
78 B43_NPHY_RSSI_TSSI_Q, 78};
79 B43_NPHY_RSSI_TBD, 79
80enum n_rssi_type {
81 N_RSSI_W1 = 0,
82 N_RSSI_W2,
83 N_RSSI_NB,
84 N_RSSI_IQ,
85 N_RSSI_TSSI_2G,
86 N_RSSI_TSSI_5G,
87 N_RSSI_TBD,
88};
89
90enum n_rail_type {
91 N_RAIL_I = 0,
92 N_RAIL_Q = 1,
80}; 93};
81 94
82static inline bool b43_nphy_ipa(struct b43_wldev *dev) 95static inline bool b43_nphy_ipa(struct b43_wldev *dev)
@@ -94,7 +107,7 @@ static u8 b43_nphy_get_rx_core_state(struct b43_wldev *dev)
94} 107}
95 108
96/************************************************** 109/**************************************************
97 * RF (just without b43_nphy_rf_control_intc_override) 110 * RF (just without b43_nphy_rf_ctl_intc_override)
98 **************************************************/ 111 **************************************************/
99 112
100/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */ 113/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
@@ -128,9 +141,9 @@ ok:
128} 141}
129 142
130/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */ 143/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */
131static void b43_nphy_rf_control_override_rev7(struct b43_wldev *dev, u16 field, 144static void b43_nphy_rf_ctl_override_rev7(struct b43_wldev *dev, u16 field,
132 u16 value, u8 core, bool off, 145 u16 value, u8 core, bool off,
133 u8 override) 146 u8 override)
134{ 147{
135 const struct nphy_rf_control_override_rev7 *e; 148 const struct nphy_rf_control_override_rev7 *e;
136 u16 en_addrs[3][2] = { 149 u16 en_addrs[3][2] = {
@@ -168,8 +181,8 @@ static void b43_nphy_rf_control_override_rev7(struct b43_wldev *dev, u16 field,
168} 181}
169 182
170/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */ 183/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
171static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field, 184static void b43_nphy_rf_ctl_override(struct b43_wldev *dev, u16 field,
172 u16 value, u8 core, bool off) 185 u16 value, u8 core, bool off)
173{ 186{
174 int i; 187 int i;
175 u8 index = fls(field); 188 u8 index = fls(field);
@@ -244,14 +257,14 @@ static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
244} 257}
245 258
246/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */ 259/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
247static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field, 260static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
248 u16 value, u8 core) 261 enum n_intc_override intc_override,
262 u16 value, u8 core)
249{ 263{
250 u8 i, j; 264 u8 i, j;
251 u16 reg, tmp, val; 265 u16 reg, tmp, val;
252 266
253 B43_WARN_ON(dev->phy.rev < 3); 267 B43_WARN_ON(dev->phy.rev < 3);
254 B43_WARN_ON(field > 4);
255 268
256 for (i = 0; i < 2; i++) { 269 for (i = 0; i < 2; i++) {
257 if ((core == 1 && i == 1) || (core == 2 && !i)) 270 if ((core == 1 && i == 1) || (core == 2 && !i))
@@ -261,12 +274,12 @@ static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
261 B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2; 274 B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
262 b43_phy_set(dev, reg, 0x400); 275 b43_phy_set(dev, reg, 0x400);
263 276
264 switch (field) { 277 switch (intc_override) {
265 case 0: 278 case N_INTC_OVERRIDE_OFF:
266 b43_phy_write(dev, reg, 0); 279 b43_phy_write(dev, reg, 0);
267 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); 280 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
268 break; 281 break;
269 case 1: 282 case N_INTC_OVERRIDE_TRSW:
270 if (!i) { 283 if (!i) {
271 b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1, 284 b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1,
272 0xFC3F, (value << 6)); 285 0xFC3F, (value << 6));
@@ -307,7 +320,7 @@ static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
307 0xFFFE); 320 0xFFFE);
308 } 321 }
309 break; 322 break;
310 case 2: 323 case N_INTC_OVERRIDE_PA:
311 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 324 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
312 tmp = 0x0020; 325 tmp = 0x0020;
313 val = value << 5; 326 val = value << 5;
@@ -317,7 +330,7 @@ static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
317 } 330 }
318 b43_phy_maskset(dev, reg, ~tmp, val); 331 b43_phy_maskset(dev, reg, ~tmp, val);
319 break; 332 break;
320 case 3: 333 case N_INTC_OVERRIDE_EXT_LNA_PU:
321 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 334 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
322 tmp = 0x0001; 335 tmp = 0x0001;
323 val = value; 336 val = value;
@@ -327,7 +340,7 @@ static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
327 } 340 }
328 b43_phy_maskset(dev, reg, ~tmp, val); 341 b43_phy_maskset(dev, reg, ~tmp, val);
329 break; 342 break;
330 case 4: 343 case N_INTC_OVERRIDE_EXT_LNA_GAIN:
331 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 344 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
332 tmp = 0x0002; 345 tmp = 0x0002;
333 val = value << 1; 346 val = value << 1;
@@ -1011,7 +1024,7 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
1011 1024
1012 if (sprom->revision < 4) 1025 if (sprom->revision < 4)
1013 workaround = (dev->dev->board_vendor != PCI_VENDOR_ID_BROADCOM 1026 workaround = (dev->dev->board_vendor != PCI_VENDOR_ID_BROADCOM
1014 && dev->dev->board_type == 0x46D 1027 && dev->dev->board_type == SSB_BOARD_CB2_4321
1015 && dev->dev->board_rev >= 0x41); 1028 && dev->dev->board_rev >= 0x41);
1016 else 1029 else
1017 workaround = 1030 workaround =
@@ -1207,8 +1220,9 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
1207 1220
1208/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */ 1221/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
1209static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale, 1222static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
1210 s8 offset, u8 core, u8 rail, 1223 s8 offset, u8 core,
1211 enum b43_nphy_rssi_type type) 1224 enum n_rail_type rail,
1225 enum n_rssi_type rssi_type)
1212{ 1226{
1213 u16 tmp; 1227 u16 tmp;
1214 bool core1or5 = (core == 1) || (core == 5); 1228 bool core1or5 = (core == 1) || (core == 5);
@@ -1217,63 +1231,74 @@ static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
1217 offset = clamp_val(offset, -32, 31); 1231 offset = clamp_val(offset, -32, 31);
1218 tmp = ((scale & 0x3F) << 8) | (offset & 0x3F); 1232 tmp = ((scale & 0x3F) << 8) | (offset & 0x3F);
1219 1233
1220 if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_Z)) 1234 switch (rssi_type) {
1221 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp); 1235 case N_RSSI_NB:
1222 if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_Z)) 1236 if (core1or5 && rail == N_RAIL_I)
1223 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp); 1237 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp);
1224 if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_Z)) 1238 if (core1or5 && rail == N_RAIL_Q)
1225 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp); 1239 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp);
1226 if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_Z)) 1240 if (core2or5 && rail == N_RAIL_I)
1227 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp); 1241 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp);
1228 1242 if (core2or5 && rail == N_RAIL_Q)
1229 if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_X)) 1243 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp);
1230 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp); 1244 break;
1231 if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_X)) 1245 case N_RSSI_W1:
1232 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp); 1246 if (core1or5 && rail == N_RAIL_I)
1233 if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_X)) 1247 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp);
1234 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp); 1248 if (core1or5 && rail == N_RAIL_Q)
1235 if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_X)) 1249 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp);
1236 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp); 1250 if (core2or5 && rail == N_RAIL_I)
1237 1251 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp);
1238 if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_Y)) 1252 if (core2or5 && rail == N_RAIL_Q)
1239 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp); 1253 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp);
1240 if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_Y)) 1254 break;
1241 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp); 1255 case N_RSSI_W2:
1242 if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_Y)) 1256 if (core1or5 && rail == N_RAIL_I)
1243 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp); 1257 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp);
1244 if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_Y)) 1258 if (core1or5 && rail == N_RAIL_Q)
1245 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp); 1259 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp);
1246 1260 if (core2or5 && rail == N_RAIL_I)
1247 if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_TBD)) 1261 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp);
1248 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp); 1262 if (core2or5 && rail == N_RAIL_Q)
1249 if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_TBD)) 1263 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp);
1250 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp); 1264 break;
1251 if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_TBD)) 1265 case N_RSSI_TBD:
1252 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp); 1266 if (core1or5 && rail == N_RAIL_I)
1253 if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_TBD)) 1267 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp);
1254 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp); 1268 if (core1or5 && rail == N_RAIL_Q)
1255 1269 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp);
1256 if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_PWRDET)) 1270 if (core2or5 && rail == N_RAIL_I)
1257 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp); 1271 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp);
1258 if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_PWRDET)) 1272 if (core2or5 && rail == N_RAIL_Q)
1259 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp); 1273 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp);
1260 if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_PWRDET)) 1274 break;
1261 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp); 1275 case N_RSSI_IQ:
1262 if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_PWRDET)) 1276 if (core1or5 && rail == N_RAIL_I)
1263 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp); 1277 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp);
1264 1278 if (core1or5 && rail == N_RAIL_Q)
1265 if (core1or5 && (type == B43_NPHY_RSSI_TSSI_I)) 1279 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp);
1266 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp); 1280 if (core2or5 && rail == N_RAIL_I)
1267 if (core2or5 && (type == B43_NPHY_RSSI_TSSI_I)) 1281 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp);
1268 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp); 1282 if (core2or5 && rail == N_RAIL_Q)
1269 1283 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp);
1270 if (core1or5 && (type == B43_NPHY_RSSI_TSSI_Q)) 1284 break;
1271 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp); 1285 case N_RSSI_TSSI_2G:
1272 if (core2or5 && (type == B43_NPHY_RSSI_TSSI_Q)) 1286 if (core1or5)
1273 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp); 1287 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp);
1288 if (core2or5)
1289 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp);
1290 break;
1291 case N_RSSI_TSSI_5G:
1292 if (core1or5)
1293 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp);
1294 if (core2or5)
1295 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp);
1296 break;
1297 }
1274} 1298}
1275 1299
1276static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type) 1300static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code,
1301 enum n_rssi_type rssi_type)
1277{ 1302{
1278 u8 i; 1303 u8 i;
1279 u16 reg, val; 1304 u16 reg, val;
@@ -1296,7 +1321,9 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
1296 B43_NPHY_AFECTL_OVER1 : B43_NPHY_AFECTL_OVER; 1321 B43_NPHY_AFECTL_OVER1 : B43_NPHY_AFECTL_OVER;
1297 b43_phy_maskset(dev, reg, 0xFDFF, 0x0200); 1322 b43_phy_maskset(dev, reg, 0xFDFF, 0x0200);
1298 1323
1299 if (type < 3) { 1324 if (rssi_type == N_RSSI_W1 ||
1325 rssi_type == N_RSSI_W2 ||
1326 rssi_type == N_RSSI_NB) {
1300 reg = (i == 0) ? 1327 reg = (i == 0) ?
1301 B43_NPHY_AFECTL_C1 : 1328 B43_NPHY_AFECTL_C1 :
1302 B43_NPHY_AFECTL_C2; 1329 B43_NPHY_AFECTL_C2;
@@ -1307,9 +1334,9 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
1307 B43_NPHY_RFCTL_LUT_TRSW_UP2; 1334 B43_NPHY_RFCTL_LUT_TRSW_UP2;
1308 b43_phy_maskset(dev, reg, 0xFFC3, 0); 1335 b43_phy_maskset(dev, reg, 0xFFC3, 0);
1309 1336
1310 if (type == 0) 1337 if (rssi_type == N_RSSI_W1)
1311 val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8; 1338 val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8;
1312 else if (type == 1) 1339 else if (rssi_type == N_RSSI_W2)
1313 val = 16; 1340 val = 16;
1314 else 1341 else
1315 val = 32; 1342 val = 32;
@@ -1320,9 +1347,9 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
1320 B43_NPHY_TXF_40CO_B32S1; 1347 B43_NPHY_TXF_40CO_B32S1;
1321 b43_phy_set(dev, reg, 0x0020); 1348 b43_phy_set(dev, reg, 0x0020);
1322 } else { 1349 } else {
1323 if (type == 6) 1350 if (rssi_type == N_RSSI_TBD)
1324 val = 0x0100; 1351 val = 0x0100;
1325 else if (type == 3) 1352 else if (rssi_type == N_RSSI_IQ)
1326 val = 0x0200; 1353 val = 0x0200;
1327 else 1354 else
1328 val = 0x0300; 1355 val = 0x0300;
@@ -1334,7 +1361,8 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
1334 b43_phy_maskset(dev, reg, 0xFCFF, val); 1361 b43_phy_maskset(dev, reg, 0xFCFF, val);
1335 b43_phy_maskset(dev, reg, 0xF3FF, val << 2); 1362 b43_phy_maskset(dev, reg, 0xF3FF, val << 2);
1336 1363
1337 if (type != 3 && type != 6) { 1364 if (rssi_type != N_RSSI_IQ &&
1365 rssi_type != N_RSSI_TBD) {
1338 enum ieee80211_band band = 1366 enum ieee80211_band band =
1339 b43_current_band(dev->wl); 1367 b43_current_band(dev->wl);
1340 1368
@@ -1344,7 +1372,7 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
1344 val = 0x11; 1372 val = 0x11;
1345 reg = (i == 0) ? 0x2000 : 0x3000; 1373 reg = (i == 0) ? 0x2000 : 0x3000;
1346 reg |= B2055_PADDRV; 1374 reg |= B2055_PADDRV;
1347 b43_radio_write16(dev, reg, val); 1375 b43_radio_write(dev, reg, val);
1348 1376
1349 reg = (i == 0) ? 1377 reg = (i == 0) ?
1350 B43_NPHY_AFECTL_OVER1 : 1378 B43_NPHY_AFECTL_OVER1 :
@@ -1356,33 +1384,43 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
1356 } 1384 }
1357} 1385}
1358 1386
1359static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type) 1387static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code,
1388 enum n_rssi_type rssi_type)
1360{ 1389{
1361 u16 val; 1390 u16 val;
1391 bool rssi_w1_w2_nb = false;
1362 1392
1363 if (type < 3) 1393 switch (rssi_type) {
1394 case N_RSSI_W1:
1395 case N_RSSI_W2:
1396 case N_RSSI_NB:
1364 val = 0; 1397 val = 0;
1365 else if (type == 6) 1398 rssi_w1_w2_nb = true;
1399 break;
1400 case N_RSSI_TBD:
1366 val = 1; 1401 val = 1;
1367 else if (type == 3) 1402 break;
1403 case N_RSSI_IQ:
1368 val = 2; 1404 val = 2;
1369 else 1405 break;
1406 default:
1370 val = 3; 1407 val = 3;
1408 }
1371 1409
1372 val = (val << 12) | (val << 14); 1410 val = (val << 12) | (val << 14);
1373 b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val); 1411 b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
1374 b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val); 1412 b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
1375 1413
1376 if (type < 3) { 1414 if (rssi_w1_w2_nb) {
1377 b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF, 1415 b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
1378 (type + 1) << 4); 1416 (rssi_type + 1) << 4);
1379 b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF, 1417 b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
1380 (type + 1) << 4); 1418 (rssi_type + 1) << 4);
1381 } 1419 }
1382 1420
1383 if (code == 0) { 1421 if (code == 0) {
1384 b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000); 1422 b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000);
1385 if (type < 3) { 1423 if (rssi_w1_w2_nb) {
1386 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, 1424 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
1387 ~(B43_NPHY_RFCTL_CMD_RXEN | 1425 ~(B43_NPHY_RFCTL_CMD_RXEN |
1388 B43_NPHY_RFCTL_CMD_CORESEL)); 1426 B43_NPHY_RFCTL_CMD_CORESEL));
@@ -1398,7 +1436,7 @@ static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
1398 } 1436 }
1399 } else { 1437 } else {
1400 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000); 1438 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000);
1401 if (type < 3) { 1439 if (rssi_w1_w2_nb) {
1402 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 1440 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
1403 ~(B43_NPHY_RFCTL_CMD_RXEN | 1441 ~(B43_NPHY_RFCTL_CMD_RXEN |
1404 B43_NPHY_RFCTL_CMD_CORESEL), 1442 B43_NPHY_RFCTL_CMD_CORESEL),
@@ -1418,7 +1456,8 @@ static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
1418} 1456}
1419 1457
1420/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */ 1458/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
1421static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type) 1459static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code,
1460 enum n_rssi_type type)
1422{ 1461{
1423 if (dev->phy.rev >= 3) 1462 if (dev->phy.rev >= 3)
1424 b43_nphy_rev3_rssi_select(dev, code, type); 1463 b43_nphy_rev3_rssi_select(dev, code, type);
@@ -1427,11 +1466,12 @@ static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
1427} 1466}
1428 1467
1429/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */ 1468/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */
1430static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, u8 type, u8 *buf) 1469static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev,
1470 enum n_rssi_type rssi_type, u8 *buf)
1431{ 1471{
1432 int i; 1472 int i;
1433 for (i = 0; i < 2; i++) { 1473 for (i = 0; i < 2; i++) {
1434 if (type == 2) { 1474 if (rssi_type == N_RSSI_NB) {
1435 if (i == 0) { 1475 if (i == 0) {
1436 b43_radio_maskset(dev, B2055_C1_B0NB_RSSIVCM, 1476 b43_radio_maskset(dev, B2055_C1_B0NB_RSSIVCM,
1437 0xFC, buf[0]); 1477 0xFC, buf[0]);
@@ -1455,8 +1495,8 @@ static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, u8 type, u8 *buf)
1455} 1495}
1456 1496
1457/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */ 1497/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */
1458static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf, 1498static int b43_nphy_poll_rssi(struct b43_wldev *dev, enum n_rssi_type rssi_type,
1459 u8 nsamp) 1499 s32 *buf, u8 nsamp)
1460{ 1500{
1461 int i; 1501 int i;
1462 int out; 1502 int out;
@@ -1487,7 +1527,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
1487 save_regs_phy[8] = 0; 1527 save_regs_phy[8] = 0;
1488 } 1528 }
1489 1529
1490 b43_nphy_rssi_select(dev, 5, type); 1530 b43_nphy_rssi_select(dev, 5, rssi_type);
1491 1531
1492 if (dev->phy.rev < 2) { 1532 if (dev->phy.rev < 2) {
1493 save_regs_phy[8] = b43_phy_read(dev, B43_NPHY_GPIO_SEL); 1533 save_regs_phy[8] = b43_phy_read(dev, B43_NPHY_GPIO_SEL);
@@ -1574,7 +1614,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1574 1614
1575 u16 r; /* routing */ 1615 u16 r; /* routing */
1576 u8 rx_core_state; 1616 u8 rx_core_state;
1577 u8 core, i, j; 1617 int core, i, j, vcm;
1578 1618
1579 class = b43_nphy_classifier(dev, 0, 0); 1619 class = b43_nphy_classifier(dev, 0, 0);
1580 b43_nphy_classifier(dev, 7, 4); 1620 b43_nphy_classifier(dev, 7, 4);
@@ -1586,19 +1626,19 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1586 for (i = 0; i < ARRAY_SIZE(regs_to_store); i++) 1626 for (i = 0; i < ARRAY_SIZE(regs_to_store); i++)
1587 saved_regs_phy[i] = b43_phy_read(dev, regs_to_store[i]); 1627 saved_regs_phy[i] = b43_phy_read(dev, regs_to_store[i]);
1588 1628
1589 b43_nphy_rf_control_intc_override(dev, 0, 0, 7); 1629 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_OFF, 0, 7);
1590 b43_nphy_rf_control_intc_override(dev, 1, 1, 7); 1630 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 1, 7);
1591 b43_nphy_rf_control_override(dev, 0x1, 0, 0, false); 1631 b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
1592 b43_nphy_rf_control_override(dev, 0x2, 1, 0, false); 1632 b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
1593 b43_nphy_rf_control_override(dev, 0x80, 1, 0, false); 1633 b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
1594 b43_nphy_rf_control_override(dev, 0x40, 1, 0, false); 1634 b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
1595 1635
1596 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 1636 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
1597 b43_nphy_rf_control_override(dev, 0x20, 0, 0, false); 1637 b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
1598 b43_nphy_rf_control_override(dev, 0x10, 1, 0, false); 1638 b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
1599 } else { 1639 } else {
1600 b43_nphy_rf_control_override(dev, 0x10, 0, 0, false); 1640 b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false);
1601 b43_nphy_rf_control_override(dev, 0x20, 1, 0, false); 1641 b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false);
1602 } 1642 }
1603 1643
1604 rx_core_state = b43_nphy_get_rx_core_state(dev); 1644 rx_core_state = b43_nphy_get_rx_core_state(dev);
@@ -1606,35 +1646,44 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1606 if (!(rx_core_state & (1 << core))) 1646 if (!(rx_core_state & (1 << core)))
1607 continue; 1647 continue;
1608 r = core ? B2056_RX1 : B2056_RX0; 1648 r = core ? B2056_RX1 : B2056_RX0;
1609 b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 0, 2); 1649 b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, N_RAIL_I,
1610 b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 1, 2); 1650 N_RSSI_NB);
1611 for (i = 0; i < 8; i++) { 1651 b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, N_RAIL_Q,
1652 N_RSSI_NB);
1653
1654 /* Grab RSSI results for every possible VCM */
1655 for (vcm = 0; vcm < 8; vcm++) {
1612 b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3, 1656 b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
1613 i << 2); 1657 vcm << 2);
1614 b43_nphy_poll_rssi(dev, 2, results[i], 8); 1658 b43_nphy_poll_rssi(dev, N_RSSI_NB, results[vcm], 8);
1615 } 1659 }
1660
1661 /* Find out which VCM got the best results */
1616 for (i = 0; i < 4; i += 2) { 1662 for (i = 0; i < 4; i += 2) {
1617 s32 curr; 1663 s32 currd;
1618 s32 mind = 0x100000; 1664 s32 mind = 0x100000;
1619 s32 minpoll = 249; 1665 s32 minpoll = 249;
1620 u8 minvcm = 0; 1666 u8 minvcm = 0;
1621 if (2 * core != i) 1667 if (2 * core != i)
1622 continue; 1668 continue;
1623 for (j = 0; j < 8; j++) { 1669 for (vcm = 0; vcm < 8; vcm++) {
1624 curr = results[j][i] * results[j][i] + 1670 currd = results[vcm][i] * results[vcm][i] +
1625 results[j][i + 1] * results[j][i]; 1671 results[vcm][i + 1] * results[vcm][i];
1626 if (curr < mind) { 1672 if (currd < mind) {
1627 mind = curr; 1673 mind = currd;
1628 minvcm = j; 1674 minvcm = vcm;
1629 } 1675 }
1630 if (results[j][i] < minpoll) 1676 if (results[vcm][i] < minpoll)
1631 minpoll = results[j][i]; 1677 minpoll = results[vcm][i];
1632 } 1678 }
1633 vcm_final = minvcm; 1679 vcm_final = minvcm;
1634 results_min[i] = minpoll; 1680 results_min[i] = minpoll;
1635 } 1681 }
1682
1683 /* Select the best VCM */
1636 b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3, 1684 b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
1637 vcm_final << 2); 1685 vcm_final << 2);
1686
1638 for (i = 0; i < 4; i++) { 1687 for (i = 0; i < 4; i++) {
1639 if (core != i / 2) 1688 if (core != i / 2)
1640 continue; 1689 continue;
@@ -1647,16 +1696,19 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1647 offset[i] = -32; 1696 offset[i] = -32;
1648 b43_nphy_scale_offset_rssi(dev, 0, offset[i], 1697 b43_nphy_scale_offset_rssi(dev, 0, offset[i],
1649 (i / 2 == 0) ? 1 : 2, 1698 (i / 2 == 0) ? 1 : 2,
1650 (i % 2 == 0) ? 0 : 1, 1699 (i % 2 == 0) ? N_RAIL_I : N_RAIL_Q,
1651 2); 1700 N_RSSI_NB);
1652 } 1701 }
1653 } 1702 }
1703
1654 for (core = 0; core < 2; core++) { 1704 for (core = 0; core < 2; core++) {
1655 if (!(rx_core_state & (1 << core))) 1705 if (!(rx_core_state & (1 << core)))
1656 continue; 1706 continue;
1657 for (i = 0; i < 2; i++) { 1707 for (i = 0; i < 2; i++) {
1658 b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 0, i); 1708 b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1,
1659 b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 1, i); 1709 N_RAIL_I, i);
1710 b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1,
1711 N_RAIL_Q, i);
1660 b43_nphy_poll_rssi(dev, i, poll_results, 8); 1712 b43_nphy_poll_rssi(dev, i, poll_results, 8);
1661 for (j = 0; j < 4; j++) { 1713 for (j = 0; j < 4; j++) {
1662 if (j / 2 == core) { 1714 if (j / 2 == core) {
@@ -1696,8 +1748,13 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1696 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G; 1748 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
1697 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G; 1749 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
1698 } 1750 }
1699 rssical_radio_regs[0] = b43_radio_read(dev, 0x602B); 1751 if (dev->phy.rev >= 7) {
1700 rssical_radio_regs[0] = b43_radio_read(dev, 0x702B); 1752 } else {
1753 rssical_radio_regs[0] = b43_radio_read(dev, B2056_RX0 |
1754 B2056_RX_RSSI_MISC);
1755 rssical_radio_regs[1] = b43_radio_read(dev, B2056_RX1 |
1756 B2056_RX_RSSI_MISC);
1757 }
1701 rssical_phy_regs[0] = b43_phy_read(dev, B43_NPHY_RSSIMC_0I_RSSI_Z); 1758 rssical_phy_regs[0] = b43_phy_read(dev, B43_NPHY_RSSIMC_0I_RSSI_Z);
1702 rssical_phy_regs[1] = b43_phy_read(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z); 1759 rssical_phy_regs[1] = b43_phy_read(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z);
1703 rssical_phy_regs[2] = b43_phy_read(dev, B43_NPHY_RSSIMC_1I_RSSI_Z); 1760 rssical_phy_regs[2] = b43_phy_read(dev, B43_NPHY_RSSIMC_1I_RSSI_Z);
@@ -1723,9 +1780,9 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1723} 1780}
1724 1781
1725/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */ 1782/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */
1726static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) 1783static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, enum n_rssi_type type)
1727{ 1784{
1728 int i, j; 1785 int i, j, vcm;
1729 u8 state[4]; 1786 u8 state[4];
1730 u8 code, val; 1787 u8 code, val;
1731 u16 class, override; 1788 u16 class, override;
@@ -1743,10 +1800,10 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
1743 s32 results[4][4] = { }; 1800 s32 results[4][4] = { };
1744 s32 miniq[4][2] = { }; 1801 s32 miniq[4][2] = { };
1745 1802
1746 if (type == 2) { 1803 if (type == N_RSSI_NB) {
1747 code = 0; 1804 code = 0;
1748 val = 6; 1805 val = 6;
1749 } else if (type < 2) { 1806 } else if (type == N_RSSI_W1 || type == N_RSSI_W2) {
1750 code = 25; 1807 code = 25;
1751 val = 4; 1808 val = 4;
1752 } else { 1809 } else {
@@ -1765,63 +1822,63 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
1765 override = 0x110; 1822 override = 0x110;
1766 1823
1767 regs_save_phy[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); 1824 regs_save_phy[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
1768 regs_save_radio[0] = b43_radio_read16(dev, B2055_C1_PD_RXTX); 1825 regs_save_radio[0] = b43_radio_read(dev, B2055_C1_PD_RXTX);
1769 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, override); 1826 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, override);
1770 b43_radio_write16(dev, B2055_C1_PD_RXTX, val); 1827 b43_radio_write(dev, B2055_C1_PD_RXTX, val);
1771 1828
1772 regs_save_phy[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); 1829 regs_save_phy[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
1773 regs_save_radio[1] = b43_radio_read16(dev, B2055_C2_PD_RXTX); 1830 regs_save_radio[1] = b43_radio_read(dev, B2055_C2_PD_RXTX);
1774 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, override); 1831 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, override);
1775 b43_radio_write16(dev, B2055_C2_PD_RXTX, val); 1832 b43_radio_write(dev, B2055_C2_PD_RXTX, val);
1776 1833
1777 state[0] = b43_radio_read16(dev, B2055_C1_PD_RSSIMISC) & 0x07; 1834 state[0] = b43_radio_read(dev, B2055_C1_PD_RSSIMISC) & 0x07;
1778 state[1] = b43_radio_read16(dev, B2055_C2_PD_RSSIMISC) & 0x07; 1835 state[1] = b43_radio_read(dev, B2055_C2_PD_RSSIMISC) & 0x07;
1779 b43_radio_mask(dev, B2055_C1_PD_RSSIMISC, 0xF8); 1836 b43_radio_mask(dev, B2055_C1_PD_RSSIMISC, 0xF8);
1780 b43_radio_mask(dev, B2055_C2_PD_RSSIMISC, 0xF8); 1837 b43_radio_mask(dev, B2055_C2_PD_RSSIMISC, 0xF8);
1781 state[2] = b43_radio_read16(dev, B2055_C1_SP_RSSI) & 0x07; 1838 state[2] = b43_radio_read(dev, B2055_C1_SP_RSSI) & 0x07;
1782 state[3] = b43_radio_read16(dev, B2055_C2_SP_RSSI) & 0x07; 1839 state[3] = b43_radio_read(dev, B2055_C2_SP_RSSI) & 0x07;
1783 1840
1784 b43_nphy_rssi_select(dev, 5, type); 1841 b43_nphy_rssi_select(dev, 5, type);
1785 b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 0, type); 1842 b43_nphy_scale_offset_rssi(dev, 0, 0, 5, N_RAIL_I, type);
1786 b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 1, type); 1843 b43_nphy_scale_offset_rssi(dev, 0, 0, 5, N_RAIL_Q, type);
1787 1844
1788 for (i = 0; i < 4; i++) { 1845 for (vcm = 0; vcm < 4; vcm++) {
1789 u8 tmp[4]; 1846 u8 tmp[4];
1790 for (j = 0; j < 4; j++) 1847 for (j = 0; j < 4; j++)
1791 tmp[j] = i; 1848 tmp[j] = vcm;
1792 if (type != 1) 1849 if (type != N_RSSI_W2)
1793 b43_nphy_set_rssi_2055_vcm(dev, type, tmp); 1850 b43_nphy_set_rssi_2055_vcm(dev, type, tmp);
1794 b43_nphy_poll_rssi(dev, type, results[i], 8); 1851 b43_nphy_poll_rssi(dev, type, results[vcm], 8);
1795 if (type < 2) 1852 if (type == N_RSSI_W1 || type == N_RSSI_W2)
1796 for (j = 0; j < 2; j++) 1853 for (j = 0; j < 2; j++)
1797 miniq[i][j] = min(results[i][2 * j], 1854 miniq[vcm][j] = min(results[vcm][2 * j],
1798 results[i][2 * j + 1]); 1855 results[vcm][2 * j + 1]);
1799 } 1856 }
1800 1857
1801 for (i = 0; i < 4; i++) { 1858 for (i = 0; i < 4; i++) {
1802 s32 mind = 0x100000; 1859 s32 mind = 0x100000;
1803 u8 minvcm = 0; 1860 u8 minvcm = 0;
1804 s32 minpoll = 249; 1861 s32 minpoll = 249;
1805 s32 curr; 1862 s32 currd;
1806 for (j = 0; j < 4; j++) { 1863 for (vcm = 0; vcm < 4; vcm++) {
1807 if (type == 2) 1864 if (type == N_RSSI_NB)
1808 curr = abs(results[j][i]); 1865 currd = abs(results[vcm][i] - code * 8);
1809 else 1866 else
1810 curr = abs(miniq[j][i / 2] - code * 8); 1867 currd = abs(miniq[vcm][i / 2] - code * 8);
1811 1868
1812 if (curr < mind) { 1869 if (currd < mind) {
1813 mind = curr; 1870 mind = currd;
1814 minvcm = j; 1871 minvcm = vcm;
1815 } 1872 }
1816 1873
1817 if (results[j][i] < minpoll) 1874 if (results[vcm][i] < minpoll)
1818 minpoll = results[j][i]; 1875 minpoll = results[vcm][i];
1819 } 1876 }
1820 results_min[i] = minpoll; 1877 results_min[i] = minpoll;
1821 vcm_final[i] = minvcm; 1878 vcm_final[i] = minvcm;
1822 } 1879 }
1823 1880
1824 if (type != 1) 1881 if (type != N_RSSI_W2)
1825 b43_nphy_set_rssi_2055_vcm(dev, type, vcm_final); 1882 b43_nphy_set_rssi_2055_vcm(dev, type, vcm_final);
1826 1883
1827 for (i = 0; i < 4; i++) { 1884 for (i = 0; i < 4; i++) {
@@ -1836,7 +1893,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
1836 offset[i] = code - 32; 1893 offset[i] = code - 32;
1837 1894
1838 core = (i / 2) ? 2 : 1; 1895 core = (i / 2) ? 2 : 1;
1839 rail = (i % 2) ? 1 : 0; 1896 rail = (i % 2) ? N_RAIL_Q : N_RAIL_I;
1840 1897
1841 b43_nphy_scale_offset_rssi(dev, 0, offset[i], core, rail, 1898 b43_nphy_scale_offset_rssi(dev, 0, offset[i], core, rail,
1842 type); 1899 type);
@@ -1847,37 +1904,37 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
1847 1904
1848 switch (state[2]) { 1905 switch (state[2]) {
1849 case 1: 1906 case 1:
1850 b43_nphy_rssi_select(dev, 1, 2); 1907 b43_nphy_rssi_select(dev, 1, N_RSSI_NB);
1851 break; 1908 break;
1852 case 4: 1909 case 4:
1853 b43_nphy_rssi_select(dev, 1, 0); 1910 b43_nphy_rssi_select(dev, 1, N_RSSI_W1);
1854 break; 1911 break;
1855 case 2: 1912 case 2:
1856 b43_nphy_rssi_select(dev, 1, 1); 1913 b43_nphy_rssi_select(dev, 1, N_RSSI_W2);
1857 break; 1914 break;
1858 default: 1915 default:
1859 b43_nphy_rssi_select(dev, 1, 1); 1916 b43_nphy_rssi_select(dev, 1, N_RSSI_W2);
1860 break; 1917 break;
1861 } 1918 }
1862 1919
1863 switch (state[3]) { 1920 switch (state[3]) {
1864 case 1: 1921 case 1:
1865 b43_nphy_rssi_select(dev, 2, 2); 1922 b43_nphy_rssi_select(dev, 2, N_RSSI_NB);
1866 break; 1923 break;
1867 case 4: 1924 case 4:
1868 b43_nphy_rssi_select(dev, 2, 0); 1925 b43_nphy_rssi_select(dev, 2, N_RSSI_W1);
1869 break; 1926 break;
1870 default: 1927 default:
1871 b43_nphy_rssi_select(dev, 2, 1); 1928 b43_nphy_rssi_select(dev, 2, N_RSSI_W2);
1872 break; 1929 break;
1873 } 1930 }
1874 1931
1875 b43_nphy_rssi_select(dev, 0, type); 1932 b43_nphy_rssi_select(dev, 0, type);
1876 1933
1877 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs_save_phy[0]); 1934 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs_save_phy[0]);
1878 b43_radio_write16(dev, B2055_C1_PD_RXTX, regs_save_radio[0]); 1935 b43_radio_write(dev, B2055_C1_PD_RXTX, regs_save_radio[0]);
1879 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs_save_phy[1]); 1936 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs_save_phy[1]);
1880 b43_radio_write16(dev, B2055_C2_PD_RXTX, regs_save_radio[1]); 1937 b43_radio_write(dev, B2055_C2_PD_RXTX, regs_save_radio[1]);
1881 1938
1882 b43_nphy_classifier(dev, 7, class); 1939 b43_nphy_classifier(dev, 7, class);
1883 b43_nphy_write_clip_detection(dev, clip_state); 1940 b43_nphy_write_clip_detection(dev, clip_state);
@@ -1895,9 +1952,9 @@ static void b43_nphy_rssi_cal(struct b43_wldev *dev)
1895 if (dev->phy.rev >= 3) { 1952 if (dev->phy.rev >= 3) {
1896 b43_nphy_rev3_rssi_cal(dev); 1953 b43_nphy_rev3_rssi_cal(dev);
1897 } else { 1954 } else {
1898 b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_Z); 1955 b43_nphy_rev2_rssi_cal(dev, N_RSSI_NB);
1899 b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_X); 1956 b43_nphy_rev2_rssi_cal(dev, N_RSSI_W1);
1900 b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_Y); 1957 b43_nphy_rev2_rssi_cal(dev, N_RSSI_W2);
1901 } 1958 }
1902} 1959}
1903 1960
@@ -1930,10 +1987,8 @@ static void b43_nphy_gain_ctl_workarounds_rev3plus(struct b43_wldev *dev)
1930 b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040); 1987 b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040);
1931 1988
1932 /* Set Clip 2 detect */ 1989 /* Set Clip 2 detect */
1933 b43_phy_set(dev, B43_NPHY_C1_CGAINI, 1990 b43_phy_set(dev, B43_NPHY_C1_CGAINI, B43_NPHY_C1_CGAINI_CL2DETECT);
1934 B43_NPHY_C1_CGAINI_CL2DETECT); 1991 b43_phy_set(dev, B43_NPHY_C2_CGAINI, B43_NPHY_C2_CGAINI_CL2DETECT);
1935 b43_phy_set(dev, B43_NPHY_C2_CGAINI,
1936 B43_NPHY_C2_CGAINI_CL2DETECT);
1937 1992
1938 b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC, 1993 b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC,
1939 0x17); 1994 0x17);
@@ -1967,22 +2022,22 @@ static void b43_nphy_gain_ctl_workarounds_rev3plus(struct b43_wldev *dev)
1967 b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits); 2022 b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits);
1968 b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits); 2023 b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits);
1969 2024
1970 b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain); 2025 b43_phy_write(dev, B43_NPHY_REV3_C1_INITGAIN_A, e->init_gain);
1971 b43_phy_write(dev, 0x2A7, e->init_gain); 2026 b43_phy_write(dev, B43_NPHY_REV3_C2_INITGAIN_A, e->init_gain);
2027
1972 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2, 2028 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2,
1973 e->rfseq_init); 2029 e->rfseq_init);
1974 2030
1975 /* TODO: check defines. Do not match variables names */ 2031 b43_phy_write(dev, B43_NPHY_REV3_C1_CLIP_HIGAIN_A, e->cliphi_gain);
1976 b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain); 2032 b43_phy_write(dev, B43_NPHY_REV3_C2_CLIP_HIGAIN_A, e->cliphi_gain);
1977 b43_phy_write(dev, 0x2A9, e->cliphi_gain); 2033 b43_phy_write(dev, B43_NPHY_REV3_C1_CLIP_MEDGAIN_A, e->clipmd_gain);
1978 b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain); 2034 b43_phy_write(dev, B43_NPHY_REV3_C2_CLIP_MEDGAIN_A, e->clipmd_gain);
1979 b43_phy_write(dev, 0x2AB, e->clipmd_gain); 2035 b43_phy_write(dev, B43_NPHY_REV3_C1_CLIP_LOGAIN_A, e->cliplo_gain);
1980 b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain); 2036 b43_phy_write(dev, B43_NPHY_REV3_C2_CLIP_LOGAIN_A, e->cliplo_gain);
1981 b43_phy_write(dev, 0x2AD, e->cliplo_gain); 2037
1982 2038 b43_phy_maskset(dev, B43_NPHY_CRSMINPOWER0, 0xFF00, e->crsmin);
1983 b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin); 2039 b43_phy_maskset(dev, B43_NPHY_CRSMINPOWERL0, 0xFF00, e->crsminl);
1984 b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl); 2040 b43_phy_maskset(dev, B43_NPHY_CRSMINPOWERU0, 0xFF00, e->crsminu);
1985 b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu);
1986 b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip); 2041 b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip);
1987 b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip); 2042 b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip);
1988 b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, 2043 b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
@@ -2164,8 +2219,8 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
2164 b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0x80FF, 0x4000); 2219 b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0x80FF, 0x4000);
2165 } 2220 }
2166 if (phy->rev <= 8) { 2221 if (phy->rev <= 8) {
2167 b43_phy_write(dev, 0x23F, 0x1B0); 2222 b43_phy_write(dev, B43_NPHY_FORCEFRONT0, 0x1B0);
2168 b43_phy_write(dev, 0x240, 0x1B0); 2223 b43_phy_write(dev, B43_NPHY_FORCEFRONT1, 0x1B0);
2169 } 2224 }
2170 if (phy->rev >= 8) 2225 if (phy->rev >= 8)
2171 b43_phy_maskset(dev, B43_NPHY_TXTAILCNT, ~0xFF, 0x72); 2226 b43_phy_maskset(dev, B43_NPHY_TXTAILCNT, ~0xFF, 0x72);
@@ -2182,8 +2237,8 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
2182 b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa, 2237 b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
2183 rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa)); 2238 rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
2184 2239
2185 b43_phy_maskset(dev, 0x299, 0x3FFF, 0x4000); 2240 b43_phy_maskset(dev, B43_NPHY_EPS_OVERRIDEI_0, 0x3FFF, 0x4000);
2186 b43_phy_maskset(dev, 0x29D, 0x3FFF, 0x4000); 2241 b43_phy_maskset(dev, B43_NPHY_EPS_OVERRIDEI_1, 0x3FFF, 0x4000);
2187 2242
2188 lpf_20 = b43_nphy_read_lpf_ctl(dev, 0x154); 2243 lpf_20 = b43_nphy_read_lpf_ctl(dev, 0x154);
2189 lpf_40 = b43_nphy_read_lpf_ctl(dev, 0x159); 2244 lpf_40 = b43_nphy_read_lpf_ctl(dev, 0x159);
@@ -2260,11 +2315,11 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
2260 b43_ntab_write(dev, B43_NTAB16(7, 0x159 + core * 16), 2315 b43_ntab_write(dev, B43_NTAB16(7, 0x159 + core * 16),
2261 rx2tx_lut_40_11n); 2316 rx2tx_lut_40_11n);
2262 } 2317 }
2263 b43_nphy_rf_control_override_rev7(dev, 16, 1, 3, false, 2); 2318 b43_nphy_rf_ctl_override_rev7(dev, 16, 1, 3, false, 2);
2264 } 2319 }
2265 b43_phy_write(dev, 0x32F, 0x3); 2320 b43_phy_write(dev, 0x32F, 0x3);
2266 if (phy->radio_rev == 4 || phy->radio_rev == 6) 2321 if (phy->radio_rev == 4 || phy->radio_rev == 6)
2267 b43_nphy_rf_control_override_rev7(dev, 4, 1, 3, false, 0); 2322 b43_nphy_rf_ctl_override_rev7(dev, 4, 1, 3, false, 0);
2268 2323
2269 if (phy->radio_rev == 3 || phy->radio_rev == 4 || phy->radio_rev == 6) { 2324 if (phy->radio_rev == 3 || phy->radio_rev == 4 || phy->radio_rev == 6) {
2270 if (sprom->revision && 2325 if (sprom->revision &&
@@ -2450,8 +2505,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
2450 u16 tmp16; 2505 u16 tmp16;
2451 u32 tmp32; 2506 u32 tmp32;
2452 2507
2453 b43_phy_write(dev, 0x23f, 0x1f8); 2508 b43_phy_write(dev, B43_NPHY_FORCEFRONT0, 0x1f8);
2454 b43_phy_write(dev, 0x240, 0x1f8); 2509 b43_phy_write(dev, B43_NPHY_FORCEFRONT1, 0x1f8);
2455 2510
2456 tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0)); 2511 tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
2457 tmp32 &= 0xffffff; 2512 tmp32 &= 0xffffff;
@@ -2464,8 +2519,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
2464 b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD); 2519 b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
2465 b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020); 2520 b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
2466 2521
2467 b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C); 2522 b43_phy_write(dev, B43_NPHY_REV3_C1_CLIP_LOGAIN_B, 0x000C);
2468 b43_phy_write(dev, 0x2AE, 0x000C); 2523 b43_phy_write(dev, B43_NPHY_REV3_C2_CLIP_LOGAIN_B, 0x000C);
2469 2524
2470 /* TX to RX */ 2525 /* TX to RX */
2471 b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, 2526 b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays,
@@ -2490,7 +2545,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
2490 0x2 : 0x9C40; 2545 0x2 : 0x9C40;
2491 b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16); 2546 b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
2492 2547
2493 b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700); 2548 b43_phy_maskset(dev, B43_NPHY_SGILTRNOFFSET, 0xF0FF, 0x0700);
2494 2549
2495 if (!dev->phy.is_40mhz) { 2550 if (!dev->phy.is_40mhz) {
2496 b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D); 2551 b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
@@ -2542,18 +2597,18 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
2542 } 2597 }
2543 2598
2544 /* Dropped probably-always-true condition */ 2599 /* Dropped probably-always-true condition */
2545 b43_phy_write(dev, 0x224, 0x03eb); 2600 b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH0, 0x03eb);
2546 b43_phy_write(dev, 0x225, 0x03eb); 2601 b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH1, 0x03eb);
2547 b43_phy_write(dev, 0x226, 0x0341); 2602 b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
2548 b43_phy_write(dev, 0x227, 0x0341); 2603 b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
2549 b43_phy_write(dev, 0x228, 0x042b); 2604 b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH0, 0x042b);
2550 b43_phy_write(dev, 0x229, 0x042b); 2605 b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH1, 0x042b);
2551 b43_phy_write(dev, 0x22a, 0x0381); 2606 b43_phy_write(dev, B43_NPHY_ED_CRS20LDEASSERTTHRESH0, 0x0381);
2552 b43_phy_write(dev, 0x22b, 0x0381); 2607 b43_phy_write(dev, B43_NPHY_ED_CRS20LDEASSERTTHRESH1, 0x0381);
2553 b43_phy_write(dev, 0x22c, 0x042b); 2608 b43_phy_write(dev, B43_NPHY_ED_CRS20UASSERTTHRESH0, 0x042b);
2554 b43_phy_write(dev, 0x22d, 0x042b); 2609 b43_phy_write(dev, B43_NPHY_ED_CRS20UASSERTTHRESH1, 0x042b);
2555 b43_phy_write(dev, 0x22e, 0x0381); 2610 b43_phy_write(dev, B43_NPHY_ED_CRS20UDEASSERTTHRESH0, 0x0381);
2556 b43_phy_write(dev, 0x22f, 0x0381); 2611 b43_phy_write(dev, B43_NPHY_ED_CRS20UDEASSERTTHRESH1, 0x0381);
2557 2612
2558 if (dev->phy.rev >= 6 && sprom->boardflags2_lo & B43_BFL2_SINGLEANT_CCK) 2613 if (dev->phy.rev >= 6 && sprom->boardflags2_lo & B43_BFL2_SINGLEANT_CCK)
2559 ; /* TODO: 0x0080000000000000 HF */ 2614 ; /* TODO: 0x0080000000000000 HF */
@@ -2572,7 +2627,7 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
2572 u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 }; 2627 u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
2573 2628
2574 if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD || 2629 if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD ||
2575 dev->dev->board_type == 0x8B) { 2630 dev->dev->board_type == BCMA_BOARD_TYPE_BCM943224M93) {
2576 delays1[0] = 0x1; 2631 delays1[0] = 0x1;
2577 delays1[5] = 0x14; 2632 delays1[5] = 0x14;
2578 } 2633 }
@@ -2789,10 +2844,6 @@ static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
2789 * Tx and Rx 2844 * Tx and Rx
2790 **************************************************/ 2845 **************************************************/
2791 2846
2792void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
2793{//TODO
2794}
2795
2796static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev) 2847static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev)
2797{//TODO 2848{//TODO
2798} 2849}
@@ -3124,21 +3175,21 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
3124 b43_nphy_ipa_internal_tssi_setup(dev); 3175 b43_nphy_ipa_internal_tssi_setup(dev);
3125 3176
3126 if (phy->rev >= 7) 3177 if (phy->rev >= 7)
3127 b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, false, 0); 3178 b43_nphy_rf_ctl_override_rev7(dev, 0x2000, 0, 3, false, 0);
3128 else if (phy->rev >= 3) 3179 else if (phy->rev >= 3)
3129 b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false); 3180 b43_nphy_rf_ctl_override(dev, 0x2000, 0, 3, false);
3130 3181
3131 b43_nphy_stop_playback(dev); 3182 b43_nphy_stop_playback(dev);
3132 b43_nphy_tx_tone(dev, 0xFA0, 0, false, false); 3183 b43_nphy_tx_tone(dev, 0xFA0, 0, false, false);
3133 udelay(20); 3184 udelay(20);
3134 tmp = b43_nphy_poll_rssi(dev, 4, rssi, 1); 3185 tmp = b43_nphy_poll_rssi(dev, N_RSSI_TSSI_2G, rssi, 1);
3135 b43_nphy_stop_playback(dev); 3186 b43_nphy_stop_playback(dev);
3136 b43_nphy_rssi_select(dev, 0, 0); 3187 b43_nphy_rssi_select(dev, 0, N_RSSI_W1);
3137 3188
3138 if (phy->rev >= 7) 3189 if (phy->rev >= 7)
3139 b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, true, 0); 3190 b43_nphy_rf_ctl_override_rev7(dev, 0x2000, 0, 3, true, 0);
3140 else if (phy->rev >= 3) 3191 else if (phy->rev >= 3)
3141 b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true); 3192 b43_nphy_rf_ctl_override(dev, 0x2000, 0, 3, true);
3142 3193
3143 if (phy->rev >= 3) { 3194 if (phy->rev >= 3) {
3144 nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 24) & 0xFF; 3195 nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 24) & 0xFF;
@@ -3577,8 +3628,8 @@ static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
3577 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007); 3628 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007);
3578 } 3629 }
3579 3630
3580 b43_nphy_rf_control_intc_override(dev, 2, 0, 3); 3631 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_PA, 0, 3);
3581 b43_nphy_rf_control_override(dev, 8, 0, 3, false); 3632 b43_nphy_rf_ctl_override(dev, 8, 0, 3, false);
3582 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); 3633 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
3583 3634
3584 if (core == 0) { 3635 if (core == 0) {
@@ -3588,8 +3639,10 @@ static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
3588 rxval = 4; 3639 rxval = 4;
3589 txval = 2; 3640 txval = 2;
3590 } 3641 }
3591 b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1)); 3642 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, rxval,
3592 b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core)); 3643 core + 1);
3644 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, txval,
3645 2 - core);
3593} 3646}
3594#endif 3647#endif
3595 3648
@@ -3851,9 +3904,13 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
3851 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G; 3904 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
3852 } 3905 }
3853 3906
3854 /* TODO use some definitions */ 3907 if (dev->phy.rev >= 7) {
3855 b43_radio_maskset(dev, 0x602B, 0xE3, rssical_radio_regs[0]); 3908 } else {
3856 b43_radio_maskset(dev, 0x702B, 0xE3, rssical_radio_regs[1]); 3909 b43_radio_maskset(dev, B2056_RX0 | B2056_RX_RSSI_MISC, 0xE3,
3910 rssical_radio_regs[0]);
3911 b43_radio_maskset(dev, B2056_RX1 | B2056_RX_RSSI_MISC, 0xE3,
3912 rssical_radio_regs[1]);
3913 }
3857 3914
3858 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, rssical_phy_regs[0]); 3915 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, rssical_phy_regs[0]);
3859 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, rssical_phy_regs[1]); 3916 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, rssical_phy_regs[1]);
@@ -3884,75 +3941,75 @@ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
3884 tmp = (i == 0) ? 0x2000 : 0x3000; 3941 tmp = (i == 0) ? 0x2000 : 0x3000;
3885 offset = i * 11; 3942 offset = i * 11;
3886 3943
3887 save[offset + 0] = b43_radio_read16(dev, B2055_CAL_RVARCTL); 3944 save[offset + 0] = b43_radio_read(dev, B2055_CAL_RVARCTL);
3888 save[offset + 1] = b43_radio_read16(dev, B2055_CAL_LPOCTL); 3945 save[offset + 1] = b43_radio_read(dev, B2055_CAL_LPOCTL);
3889 save[offset + 2] = b43_radio_read16(dev, B2055_CAL_TS); 3946 save[offset + 2] = b43_radio_read(dev, B2055_CAL_TS);
3890 save[offset + 3] = b43_radio_read16(dev, B2055_CAL_RCCALRTS); 3947 save[offset + 3] = b43_radio_read(dev, B2055_CAL_RCCALRTS);
3891 save[offset + 4] = b43_radio_read16(dev, B2055_CAL_RCALRTS); 3948 save[offset + 4] = b43_radio_read(dev, B2055_CAL_RCALRTS);
3892 save[offset + 5] = b43_radio_read16(dev, B2055_PADDRV); 3949 save[offset + 5] = b43_radio_read(dev, B2055_PADDRV);
3893 save[offset + 6] = b43_radio_read16(dev, B2055_XOCTL1); 3950 save[offset + 6] = b43_radio_read(dev, B2055_XOCTL1);
3894 save[offset + 7] = b43_radio_read16(dev, B2055_XOCTL2); 3951 save[offset + 7] = b43_radio_read(dev, B2055_XOCTL2);
3895 save[offset + 8] = b43_radio_read16(dev, B2055_XOREGUL); 3952 save[offset + 8] = b43_radio_read(dev, B2055_XOREGUL);
3896 save[offset + 9] = b43_radio_read16(dev, B2055_XOMISC); 3953 save[offset + 9] = b43_radio_read(dev, B2055_XOMISC);
3897 save[offset + 10] = b43_radio_read16(dev, B2055_PLL_LFC1); 3954 save[offset + 10] = b43_radio_read(dev, B2055_PLL_LFC1);
3898 3955
3899 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 3956 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
3900 b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x0A); 3957 b43_radio_write(dev, tmp | B2055_CAL_RVARCTL, 0x0A);
3901 b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40); 3958 b43_radio_write(dev, tmp | B2055_CAL_LPOCTL, 0x40);
3902 b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55); 3959 b43_radio_write(dev, tmp | B2055_CAL_TS, 0x55);
3903 b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0); 3960 b43_radio_write(dev, tmp | B2055_CAL_RCCALRTS, 0);
3904 b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0); 3961 b43_radio_write(dev, tmp | B2055_CAL_RCALRTS, 0);
3905 if (nphy->ipa5g_on) { 3962 if (nphy->ipa5g_on) {
3906 b43_radio_write16(dev, tmp | B2055_PADDRV, 4); 3963 b43_radio_write(dev, tmp | B2055_PADDRV, 4);
3907 b43_radio_write16(dev, tmp | B2055_XOCTL1, 1); 3964 b43_radio_write(dev, tmp | B2055_XOCTL1, 1);
3908 } else { 3965 } else {
3909 b43_radio_write16(dev, tmp | B2055_PADDRV, 0); 3966 b43_radio_write(dev, tmp | B2055_PADDRV, 0);
3910 b43_radio_write16(dev, tmp | B2055_XOCTL1, 0x2F); 3967 b43_radio_write(dev, tmp | B2055_XOCTL1, 0x2F);
3911 } 3968 }
3912 b43_radio_write16(dev, tmp | B2055_XOCTL2, 0); 3969 b43_radio_write(dev, tmp | B2055_XOCTL2, 0);
3913 } else { 3970 } else {
3914 b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x06); 3971 b43_radio_write(dev, tmp | B2055_CAL_RVARCTL, 0x06);
3915 b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40); 3972 b43_radio_write(dev, tmp | B2055_CAL_LPOCTL, 0x40);
3916 b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55); 3973 b43_radio_write(dev, tmp | B2055_CAL_TS, 0x55);
3917 b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0); 3974 b43_radio_write(dev, tmp | B2055_CAL_RCCALRTS, 0);
3918 b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0); 3975 b43_radio_write(dev, tmp | B2055_CAL_RCALRTS, 0);
3919 b43_radio_write16(dev, tmp | B2055_XOCTL1, 0); 3976 b43_radio_write(dev, tmp | B2055_XOCTL1, 0);
3920 if (nphy->ipa2g_on) { 3977 if (nphy->ipa2g_on) {
3921 b43_radio_write16(dev, tmp | B2055_PADDRV, 6); 3978 b43_radio_write(dev, tmp | B2055_PADDRV, 6);
3922 b43_radio_write16(dev, tmp | B2055_XOCTL2, 3979 b43_radio_write(dev, tmp | B2055_XOCTL2,
3923 (dev->phy.rev < 5) ? 0x11 : 0x01); 3980 (dev->phy.rev < 5) ? 0x11 : 0x01);
3924 } else { 3981 } else {
3925 b43_radio_write16(dev, tmp | B2055_PADDRV, 0); 3982 b43_radio_write(dev, tmp | B2055_PADDRV, 0);
3926 b43_radio_write16(dev, tmp | B2055_XOCTL2, 0); 3983 b43_radio_write(dev, tmp | B2055_XOCTL2, 0);
3927 } 3984 }
3928 } 3985 }
3929 b43_radio_write16(dev, tmp | B2055_XOREGUL, 0); 3986 b43_radio_write(dev, tmp | B2055_XOREGUL, 0);
3930 b43_radio_write16(dev, tmp | B2055_XOMISC, 0); 3987 b43_radio_write(dev, tmp | B2055_XOMISC, 0);
3931 b43_radio_write16(dev, tmp | B2055_PLL_LFC1, 0); 3988 b43_radio_write(dev, tmp | B2055_PLL_LFC1, 0);
3932 } 3989 }
3933 } else { 3990 } else {
3934 save[0] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL1); 3991 save[0] = b43_radio_read(dev, B2055_C1_TX_RF_IQCAL1);
3935 b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL1, 0x29); 3992 b43_radio_write(dev, B2055_C1_TX_RF_IQCAL1, 0x29);
3936 3993
3937 save[1] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL2); 3994 save[1] = b43_radio_read(dev, B2055_C1_TX_RF_IQCAL2);
3938 b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL2, 0x54); 3995 b43_radio_write(dev, B2055_C1_TX_RF_IQCAL2, 0x54);
3939 3996
3940 save[2] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL1); 3997 save[2] = b43_radio_read(dev, B2055_C2_TX_RF_IQCAL1);
3941 b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL1, 0x29); 3998 b43_radio_write(dev, B2055_C2_TX_RF_IQCAL1, 0x29);
3942 3999
3943 save[3] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL2); 4000 save[3] = b43_radio_read(dev, B2055_C2_TX_RF_IQCAL2);
3944 b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL2, 0x54); 4001 b43_radio_write(dev, B2055_C2_TX_RF_IQCAL2, 0x54);
3945 4002
3946 save[3] = b43_radio_read16(dev, B2055_C1_PWRDET_RXTX); 4003 save[3] = b43_radio_read(dev, B2055_C1_PWRDET_RXTX);
3947 save[4] = b43_radio_read16(dev, B2055_C2_PWRDET_RXTX); 4004 save[4] = b43_radio_read(dev, B2055_C2_PWRDET_RXTX);
3948 4005
3949 if (!(b43_phy_read(dev, B43_NPHY_BANDCTL) & 4006 if (!(b43_phy_read(dev, B43_NPHY_BANDCTL) &
3950 B43_NPHY_BANDCTL_5GHZ)) { 4007 B43_NPHY_BANDCTL_5GHZ)) {
3951 b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x04); 4008 b43_radio_write(dev, B2055_C1_PWRDET_RXTX, 0x04);
3952 b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x04); 4009 b43_radio_write(dev, B2055_C2_PWRDET_RXTX, 0x04);
3953 } else { 4010 } else {
3954 b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x20); 4011 b43_radio_write(dev, B2055_C1_PWRDET_RXTX, 0x20);
3955 b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x20); 4012 b43_radio_write(dev, B2055_C2_PWRDET_RXTX, 0x20);
3956 } 4013 }
3957 4014
3958 if (dev->phy.rev < 2) { 4015 if (dev->phy.rev < 2) {
@@ -4148,9 +4205,9 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
4148 regs[7] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); 4205 regs[7] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
4149 regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); 4206 regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
4150 4207
4151 b43_nphy_rf_control_intc_override(dev, 2, 1, 3); 4208 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_PA, 1, 3);
4152 b43_nphy_rf_control_intc_override(dev, 1, 2, 1); 4209 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 2, 1);
4153 b43_nphy_rf_control_intc_override(dev, 1, 8, 2); 4210 b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 8, 2);
4154 4211
4155 regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0); 4212 regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
4156 regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1); 4213 regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
@@ -4683,7 +4740,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
4683 4740
4684 tmp[0] = ((cur_hpf2 << 8) | (cur_hpf1 << 4) | 4741 tmp[0] = ((cur_hpf2 << 8) | (cur_hpf1 << 4) |
4685 (cur_lna << 2)); 4742 (cur_lna << 2));
4686 b43_nphy_rf_control_override(dev, 0x400, tmp[0], 3, 4743 b43_nphy_rf_ctl_override(dev, 0x400, tmp[0], 3,
4687 false); 4744 false);
4688 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); 4745 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
4689 b43_nphy_stop_playback(dev); 4746 b43_nphy_stop_playback(dev);
@@ -4732,7 +4789,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
4732 break; 4789 break;
4733 } 4790 }
4734 4791
4735 b43_nphy_rf_control_override(dev, 0x400, 0, 3, true); 4792 b43_nphy_rf_ctl_override(dev, 0x400, 0, 3, true);
4736 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); 4793 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
4737 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save); 4794 b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save);
4738 4795
@@ -4801,18 +4858,6 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
4801 * N-PHY init 4858 * N-PHY init
4802 **************************************************/ 4859 **************************************************/
4803 4860
4804/*
4805 * Upload the N-PHY tables.
4806 * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
4807 */
4808static void b43_nphy_tables_init(struct b43_wldev *dev)
4809{
4810 if (dev->phy.rev < 3)
4811 b43_nphy_rev0_1_2_tables_init(dev);
4812 else
4813 b43_nphy_rev3plus_tables_init(dev);
4814}
4815
4816/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */ 4861/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
4817static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble) 4862static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
4818{ 4863{
@@ -4892,7 +4937,7 @@ static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
4892} 4937}
4893 4938
4894/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N */ 4939/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N */
4895int b43_phy_initn(struct b43_wldev *dev) 4940static int b43_phy_initn(struct b43_wldev *dev)
4896{ 4941{
4897 struct ssb_sprom *sprom = dev->dev->bus_sprom; 4942 struct ssb_sprom *sprom = dev->dev->bus_sprom;
4898 struct b43_phy *phy = &dev->phy; 4943 struct b43_phy *phy = &dev->phy;
@@ -4962,7 +5007,7 @@ int b43_phy_initn(struct b43_wldev *dev)
4962 5007
4963 if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD || 5008 if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD ||
4964 (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE && 5009 (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE &&
4965 dev->dev->board_type == 0x8B)) 5010 dev->dev->board_type == BCMA_BOARD_TYPE_BCM943224M93))
4966 b43_phy_write(dev, B43_NPHY_TXREALFD, 0xA0); 5011 b43_phy_write(dev, B43_NPHY_TXREALFD, 0xA0);
4967 else 5012 else
4968 b43_phy_write(dev, B43_NPHY_TXREALFD, 0xB8); 5013 b43_phy_write(dev, B43_NPHY_TXREALFD, 0xB8);
@@ -5104,63 +5149,11 @@ static void b43_chantab_phy_upload(struct b43_wldev *dev,
5104/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */ 5149/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */
5105static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid) 5150static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
5106{ 5151{
5107 struct bcma_drv_cc __maybe_unused *cc;
5108 u32 __maybe_unused pmu_ctl;
5109
5110 switch (dev->dev->bus_type) { 5152 switch (dev->dev->bus_type) {
5111#ifdef CONFIG_B43_BCMA 5153#ifdef CONFIG_B43_BCMA
5112 case B43_BUS_BCMA: 5154 case B43_BUS_BCMA:
5113 cc = &dev->dev->bdev->bus->drv_cc; 5155 bcma_pmu_spuravoid_pllupdate(&dev->dev->bdev->bus->drv_cc,
5114 if (dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) { 5156 avoid);
5115 if (avoid) {
5116 bcma_chipco_pll_write(cc, 0x0, 0x11500010);
5117 bcma_chipco_pll_write(cc, 0x1, 0x000C0C06);
5118 bcma_chipco_pll_write(cc, 0x2, 0x0F600a08);
5119 bcma_chipco_pll_write(cc, 0x3, 0x00000000);
5120 bcma_chipco_pll_write(cc, 0x4, 0x2001E920);
5121 bcma_chipco_pll_write(cc, 0x5, 0x88888815);
5122 } else {
5123 bcma_chipco_pll_write(cc, 0x0, 0x11100010);
5124 bcma_chipco_pll_write(cc, 0x1, 0x000c0c06);
5125 bcma_chipco_pll_write(cc, 0x2, 0x03000a08);
5126 bcma_chipco_pll_write(cc, 0x3, 0x00000000);
5127 bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
5128 bcma_chipco_pll_write(cc, 0x5, 0x88888815);
5129 }
5130 pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
5131 } else if (dev->dev->chip_id == 0x4716) {
5132 if (avoid) {
5133 bcma_chipco_pll_write(cc, 0x0, 0x11500060);
5134 bcma_chipco_pll_write(cc, 0x1, 0x080C0C06);
5135 bcma_chipco_pll_write(cc, 0x2, 0x0F600000);
5136 bcma_chipco_pll_write(cc, 0x3, 0x00000000);
5137 bcma_chipco_pll_write(cc, 0x4, 0x2001E924);
5138 bcma_chipco_pll_write(cc, 0x5, 0x88888815);
5139 } else {
5140 bcma_chipco_pll_write(cc, 0x0, 0x11100060);
5141 bcma_chipco_pll_write(cc, 0x1, 0x080c0c06);
5142 bcma_chipco_pll_write(cc, 0x2, 0x03000000);
5143 bcma_chipco_pll_write(cc, 0x3, 0x00000000);
5144 bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
5145 bcma_chipco_pll_write(cc, 0x5, 0x88888815);
5146 }
5147 pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD |
5148 BCMA_CC_PMU_CTL_NOILPONW;
5149 } else if (dev->dev->chip_id == 0x4322 ||
5150 dev->dev->chip_id == 0x4340 ||
5151 dev->dev->chip_id == 0x4341) {
5152 bcma_chipco_pll_write(cc, 0x0, 0x11100070);
5153 bcma_chipco_pll_write(cc, 0x1, 0x1014140a);
5154 bcma_chipco_pll_write(cc, 0x5, 0x88888854);
5155 if (avoid)
5156 bcma_chipco_pll_write(cc, 0x2, 0x05201828);
5157 else
5158 bcma_chipco_pll_write(cc, 0x2, 0x05001828);
5159 pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
5160 } else {
5161 return;
5162 }
5163 bcma_cc_set32(cc, BCMA_CC_PMU_CTL, pmu_ctl);
5164 break; 5157 break;
5165#endif 5158#endif
5166#ifdef CONFIG_B43_SSB 5159#ifdef CONFIG_B43_SSB
@@ -5531,8 +5524,9 @@ static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
5531static int b43_nphy_op_switch_channel(struct b43_wldev *dev, 5524static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
5532 unsigned int new_channel) 5525 unsigned int new_channel)
5533{ 5526{
5534 struct ieee80211_channel *channel = dev->wl->hw->conf.channel; 5527 struct ieee80211_channel *channel = dev->wl->hw->conf.chandef.chan;
5535 enum nl80211_channel_type channel_type = dev->wl->hw->conf.channel_type; 5528 enum nl80211_channel_type channel_type =
5529 cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
5536 5530
5537 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 5531 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
5538 if ((new_channel < 1) || (new_channel > 14)) 5532 if ((new_channel < 1) || (new_channel > 14))
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 092c0140c249..9a5b6bc27d24 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -54,10 +54,15 @@
54#define B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT 7 54#define B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT 7
55#define B43_NPHY_C1_INITGAIN_TRRX 0x1000 /* TR RX index */ 55#define B43_NPHY_C1_INITGAIN_TRRX 0x1000 /* TR RX index */
56#define B43_NPHY_C1_INITGAIN_TRTX 0x2000 /* TR TX index */ 56#define B43_NPHY_C1_INITGAIN_TRTX 0x2000 /* TR TX index */
57#define B43_NPHY_REV3_C1_INITGAIN_A B43_PHY_N(0x020)
57#define B43_NPHY_C1_CLIP1_HIGAIN B43_PHY_N(0x021) /* Core 1 clip1 high gain code */ 58#define B43_NPHY_C1_CLIP1_HIGAIN B43_PHY_N(0x021) /* Core 1 clip1 high gain code */
59#define B43_NPHY_REV3_C1_INITGAIN_B B43_PHY_N(0x021)
58#define B43_NPHY_C1_CLIP1_MEDGAIN B43_PHY_N(0x022) /* Core 1 clip1 medium gain code */ 60#define B43_NPHY_C1_CLIP1_MEDGAIN B43_PHY_N(0x022) /* Core 1 clip1 medium gain code */
61#define B43_NPHY_REV3_C1_CLIP_HIGAIN_A B43_PHY_N(0x022)
59#define B43_NPHY_C1_CLIP1_LOGAIN B43_PHY_N(0x023) /* Core 1 clip1 low gain code */ 62#define B43_NPHY_C1_CLIP1_LOGAIN B43_PHY_N(0x023) /* Core 1 clip1 low gain code */
63#define B43_NPHY_REV3_C1_CLIP_HIGAIN_B B43_PHY_N(0x023)
60#define B43_NPHY_C1_CLIP2_GAIN B43_PHY_N(0x024) /* Core 1 clip2 gain code */ 64#define B43_NPHY_C1_CLIP2_GAIN B43_PHY_N(0x024) /* Core 1 clip2 gain code */
65#define B43_NPHY_REV3_C1_CLIP_MEDGAIN_A B43_PHY_N(0x024)
61#define B43_NPHY_C1_FILTERGAIN B43_PHY_N(0x025) /* Core 1 filter gain */ 66#define B43_NPHY_C1_FILTERGAIN B43_PHY_N(0x025) /* Core 1 filter gain */
62#define B43_NPHY_C1_LPF_QHPF_BW B43_PHY_N(0x026) /* Core 1 LPF Q HP F bandwidth */ 67#define B43_NPHY_C1_LPF_QHPF_BW B43_PHY_N(0x026) /* Core 1 LPF Q HP F bandwidth */
63#define B43_NPHY_C1_CLIPWBTHRES B43_PHY_N(0x027) /* Core 1 clip wideband threshold */ 68#define B43_NPHY_C1_CLIPWBTHRES B43_PHY_N(0x027) /* Core 1 clip wideband threshold */
@@ -107,10 +112,15 @@
107#define B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT 7 112#define B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT 7
108#define B43_NPHY_C2_INITGAIN_TRRX 0x1000 /* TR RX index */ 113#define B43_NPHY_C2_INITGAIN_TRRX 0x1000 /* TR RX index */
109#define B43_NPHY_C2_INITGAIN_TRTX 0x2000 /* TR TX index */ 114#define B43_NPHY_C2_INITGAIN_TRTX 0x2000 /* TR TX index */
115#define B43_NPHY_REV3_C1_CLIP_MEDGAIN_B B43_PHY_N(0x036)
110#define B43_NPHY_C2_CLIP1_HIGAIN B43_PHY_N(0x037) /* Core 2 clip1 high gain code */ 116#define B43_NPHY_C2_CLIP1_HIGAIN B43_PHY_N(0x037) /* Core 2 clip1 high gain code */
117#define B43_NPHY_REV3_C1_CLIP_LOGAIN_A B43_PHY_N(0x037)
111#define B43_NPHY_C2_CLIP1_MEDGAIN B43_PHY_N(0x038) /* Core 2 clip1 medium gain code */ 118#define B43_NPHY_C2_CLIP1_MEDGAIN B43_PHY_N(0x038) /* Core 2 clip1 medium gain code */
119#define B43_NPHY_REV3_C1_CLIP_LOGAIN_B B43_PHY_N(0x038)
112#define B43_NPHY_C2_CLIP1_LOGAIN B43_PHY_N(0x039) /* Core 2 clip1 low gain code */ 120#define B43_NPHY_C2_CLIP1_LOGAIN B43_PHY_N(0x039) /* Core 2 clip1 low gain code */
121#define B43_NPHY_REV3_C1_CLIP2_GAIN_A B43_PHY_N(0x039)
113#define B43_NPHY_C2_CLIP2_GAIN B43_PHY_N(0x03A) /* Core 2 clip2 gain code */ 122#define B43_NPHY_C2_CLIP2_GAIN B43_PHY_N(0x03A) /* Core 2 clip2 gain code */
123#define B43_NPHY_REV3_C1_CLIP2_GAIN_B B43_PHY_N(0x03A)
114#define B43_NPHY_C2_FILTERGAIN B43_PHY_N(0x03B) /* Core 2 filter gain */ 124#define B43_NPHY_C2_FILTERGAIN B43_PHY_N(0x03B) /* Core 2 filter gain */
115#define B43_NPHY_C2_LPF_QHPF_BW B43_PHY_N(0x03C) /* Core 2 LPF Q HP F bandwidth */ 125#define B43_NPHY_C2_LPF_QHPF_BW B43_PHY_N(0x03C) /* Core 2 LPF Q HP F bandwidth */
116#define B43_NPHY_C2_CLIPWBTHRES B43_PHY_N(0x03D) /* Core 2 clip wideband threshold */ 126#define B43_NPHY_C2_CLIPWBTHRES B43_PHY_N(0x03D) /* Core 2 clip wideband threshold */
@@ -706,10 +716,146 @@
706#define B43_NPHY_TXPCTL_INIT B43_PHY_N(0x222) /* TX power control init */ 716#define B43_NPHY_TXPCTL_INIT B43_PHY_N(0x222) /* TX power control init */
707#define B43_NPHY_TXPCTL_INIT_PIDXI1 0x00FF /* Power index init 1 */ 717#define B43_NPHY_TXPCTL_INIT_PIDXI1 0x00FF /* Power index init 1 */
708#define B43_NPHY_TXPCTL_INIT_PIDXI1_SHIFT 0 718#define B43_NPHY_TXPCTL_INIT_PIDXI1_SHIFT 0
719#define B43_NPHY_ED_CRSEN B43_PHY_N(0x223)
720#define B43_NPHY_ED_CRS40ASSERTTHRESH0 B43_PHY_N(0x224)
721#define B43_NPHY_ED_CRS40ASSERTTHRESH1 B43_PHY_N(0x225)
722#define B43_NPHY_ED_CRS40DEASSERTTHRESH0 B43_PHY_N(0x226)
723#define B43_NPHY_ED_CRS40DEASSERTTHRESH1 B43_PHY_N(0x227)
724#define B43_NPHY_ED_CRS20LASSERTTHRESH0 B43_PHY_N(0x228)
725#define B43_NPHY_ED_CRS20LASSERTTHRESH1 B43_PHY_N(0x229)
726#define B43_NPHY_ED_CRS20LDEASSERTTHRESH0 B43_PHY_N(0x22A)
727#define B43_NPHY_ED_CRS20LDEASSERTTHRESH1 B43_PHY_N(0x22B)
728#define B43_NPHY_ED_CRS20UASSERTTHRESH0 B43_PHY_N(0x22C)
729#define B43_NPHY_ED_CRS20UASSERTTHRESH1 B43_PHY_N(0x22D)
730#define B43_NPHY_ED_CRS20UDEASSERTTHRESH0 B43_PHY_N(0x22E)
731#define B43_NPHY_ED_CRS20UDEASSERTTHRESH1 B43_PHY_N(0x22F)
732#define B43_NPHY_ED_CRS B43_PHY_N(0x230)
733#define B43_NPHY_TIMEOUTEN B43_PHY_N(0x231)
734#define B43_NPHY_OFDMPAYDECODETIMEOUTLEN B43_PHY_N(0x232)
735#define B43_NPHY_CCKPAYDECODETIMEOUTLEN B43_PHY_N(0x233)
736#define B43_NPHY_NONPAYDECODETIMEOUTLEN B43_PHY_N(0x234)
737#define B43_NPHY_TIMEOUTSTATUS B43_PHY_N(0x235)
738#define B43_NPHY_RFCTRLCORE0GPIO0 B43_PHY_N(0x236)
739#define B43_NPHY_RFCTRLCORE0GPIO1 B43_PHY_N(0x237)
740#define B43_NPHY_RFCTRLCORE0GPIO2 B43_PHY_N(0x238)
741#define B43_NPHY_RFCTRLCORE0GPIO3 B43_PHY_N(0x239)
742#define B43_NPHY_RFCTRLCORE1GPIO0 B43_PHY_N(0x23A)
743#define B43_NPHY_RFCTRLCORE1GPIO1 B43_PHY_N(0x23B)
744#define B43_NPHY_RFCTRLCORE1GPIO2 B43_PHY_N(0x23C)
745#define B43_NPHY_RFCTRLCORE1GPIO3 B43_PHY_N(0x23D)
746#define B43_NPHY_BPHYTESTCONTROL B43_PHY_N(0x23E)
747/* REV3+ */
748#define B43_NPHY_FORCEFRONT0 B43_PHY_N(0x23F)
749#define B43_NPHY_FORCEFRONT1 B43_PHY_N(0x240)
750#define B43_NPHY_NORMVARHYSTTH B43_PHY_N(0x241)
751#define B43_NPHY_TXCCKERROR B43_PHY_N(0x242)
752#define B43_NPHY_AFESEQINITDACGAIN B43_PHY_N(0x243)
753#define B43_NPHY_TXANTSWLUT B43_PHY_N(0x244)
754#define B43_NPHY_CORECONFIG B43_PHY_N(0x245)
755#define B43_NPHY_ANTENNADIVDWELLTIME B43_PHY_N(0x246)
756#define B43_NPHY_ANTENNACCKDIVDWELLTIME B43_PHY_N(0x247)
757#define B43_NPHY_ANTENNADIVBACKOFFGAIN B43_PHY_N(0x248)
758#define B43_NPHY_ANTENNADIVMINGAIN B43_PHY_N(0x249)
759#define B43_NPHY_BRDSEL_NORMVARHYSTTH B43_PHY_N(0x24A)
760#define B43_NPHY_RXANTSWITCHCTRL B43_PHY_N(0x24B)
761#define B43_NPHY_ENERGYDROPTIMEOUTLEN2 B43_PHY_N(0x24C)
762#define B43_NPHY_ML_LOG_TXEVM0 B43_PHY_N(0x250)
763#define B43_NPHY_ML_LOG_TXEVM1 B43_PHY_N(0x251)
764#define B43_NPHY_ML_LOG_TXEVM2 B43_PHY_N(0x252)
765#define B43_NPHY_ML_LOG_TXEVM3 B43_PHY_N(0x253)
766#define B43_NPHY_ML_LOG_TXEVM4 B43_PHY_N(0x254)
767#define B43_NPHY_ML_LOG_TXEVM5 B43_PHY_N(0x255)
768#define B43_NPHY_ML_LOG_TXEVM6 B43_PHY_N(0x256)
769#define B43_NPHY_ML_LOG_TXEVM7 B43_PHY_N(0x257)
770#define B43_NPHY_ML_SCALE_TWEAK B43_PHY_N(0x258)
771#define B43_NPHY_MLUA B43_PHY_N(0x259)
772#define B43_NPHY_ZFUA B43_PHY_N(0x25A)
773#define B43_NPHY_CHANUPSYM01 B43_PHY_N(0x25B)
774#define B43_NPHY_CHANUPSYM2 B43_PHY_N(0x25C)
775#define B43_NPHY_RXSTRNFILT20NUM00 B43_PHY_N(0x25D)
776#define B43_NPHY_RXSTRNFILT20NUM01 B43_PHY_N(0x25E)
777#define B43_NPHY_RXSTRNFILT20NUM02 B43_PHY_N(0x25F)
778#define B43_NPHY_RXSTRNFILT20DEN00 B43_PHY_N(0x260)
779#define B43_NPHY_RXSTRNFILT20DEN01 B43_PHY_N(0x261)
780#define B43_NPHY_RXSTRNFILT20NUM10 B43_PHY_N(0x262)
781#define B43_NPHY_RXSTRNFILT20NUM11 B43_PHY_N(0x263)
782#define B43_NPHY_RXSTRNFILT20NUM12 B43_PHY_N(0x264)
783#define B43_NPHY_RXSTRNFILT20DEN10 B43_PHY_N(0x265)
784#define B43_NPHY_RXSTRNFILT20DEN11 B43_PHY_N(0x266)
785#define B43_NPHY_RXSTRNFILT40NUM00 B43_PHY_N(0x267)
786#define B43_NPHY_RXSTRNFILT40NUM01 B43_PHY_N(0x268)
787#define B43_NPHY_RXSTRNFILT40NUM02 B43_PHY_N(0x269)
788#define B43_NPHY_RXSTRNFILT40DEN00 B43_PHY_N(0x26A)
789#define B43_NPHY_RXSTRNFILT40DEN01 B43_PHY_N(0x26B)
790#define B43_NPHY_RXSTRNFILT40NUM10 B43_PHY_N(0x26C)
791#define B43_NPHY_RXSTRNFILT40NUM11 B43_PHY_N(0x26D)
792#define B43_NPHY_RXSTRNFILT40NUM12 B43_PHY_N(0x26E)
793#define B43_NPHY_RXSTRNFILT40DEN10 B43_PHY_N(0x26F)
794#define B43_NPHY_RXSTRNFILT40DEN11 B43_PHY_N(0x270)
795#define B43_NPHY_CRSHIGHPOWTHRESHOLD1 B43_PHY_N(0x271)
796#define B43_NPHY_CRSHIGHPOWTHRESHOLD2 B43_PHY_N(0x272)
797#define B43_NPHY_CRSHIGHLOWPOWTHRESHOLD B43_PHY_N(0x273)
798#define B43_NPHY_CRSHIGHPOWTHRESHOLD1L B43_PHY_N(0x274)
799#define B43_NPHY_CRSHIGHPOWTHRESHOLD2L B43_PHY_N(0x275)
800#define B43_NPHY_CRSHIGHLOWPOWTHRESHOLDL B43_PHY_N(0x276)
801#define B43_NPHY_CRSHIGHPOWTHRESHOLD1U B43_PHY_N(0x277)
802#define B43_NPHY_CRSHIGHPOWTHRESHOLD2U B43_PHY_N(0x278)
803#define B43_NPHY_CRSHIGHLOWPOWTHRESHOLDU B43_PHY_N(0x279)
804#define B43_NPHY_CRSACIDETECTTHRESH B43_PHY_N(0x27A)
805#define B43_NPHY_CRSACIDETECTTHRESHL B43_PHY_N(0x27B)
806#define B43_NPHY_CRSACIDETECTTHRESHU B43_PHY_N(0x27C)
807#define B43_NPHY_CRSMINPOWER0 B43_PHY_N(0x27D)
808#define B43_NPHY_CRSMINPOWER1 B43_PHY_N(0x27E)
809#define B43_NPHY_CRSMINPOWER2 B43_PHY_N(0x27F)
810#define B43_NPHY_CRSMINPOWERL0 B43_PHY_N(0x280)
811#define B43_NPHY_CRSMINPOWERL1 B43_PHY_N(0x281)
812#define B43_NPHY_CRSMINPOWERL2 B43_PHY_N(0x282)
813#define B43_NPHY_CRSMINPOWERU0 B43_PHY_N(0x283)
814#define B43_NPHY_CRSMINPOWERU1 B43_PHY_N(0x284)
815#define B43_NPHY_CRSMINPOWERU2 B43_PHY_N(0x285)
816#define B43_NPHY_STRPARAM B43_PHY_N(0x286)
817#define B43_NPHY_STRPARAML B43_PHY_N(0x287)
818#define B43_NPHY_STRPARAMU B43_PHY_N(0x288)
819#define B43_NPHY_BPHYCRSMINPOWER0 B43_PHY_N(0x289)
820#define B43_NPHY_BPHYCRSMINPOWER1 B43_PHY_N(0x28A)
821#define B43_NPHY_BPHYCRSMINPOWER2 B43_PHY_N(0x28B)
822#define B43_NPHY_BPHYFILTDEN0COEF B43_PHY_N(0x28C)
823#define B43_NPHY_BPHYFILTDEN1COEF B43_PHY_N(0x28D)
824#define B43_NPHY_BPHYFILTDEN2COEF B43_PHY_N(0x28E)
825#define B43_NPHY_BPHYFILTNUM0COEF B43_PHY_N(0x28F)
826#define B43_NPHY_BPHYFILTNUM1COEF B43_PHY_N(0x290)
827#define B43_NPHY_BPHYFILTNUM2COEF B43_PHY_N(0x291)
828#define B43_NPHY_BPHYFILTNUM01COEF2 B43_PHY_N(0x292)
829#define B43_NPHY_BPHYFILTBYPASS B43_PHY_N(0x293)
830#define B43_NPHY_SGILTRNOFFSET B43_PHY_N(0x294)
831#define B43_NPHY_RADAR_T2_MIN B43_PHY_N(0x295)
832#define B43_NPHY_TXPWRCTRLDAMPING B43_PHY_N(0x296)
709#define B43_NPHY_PAPD_EN0 B43_PHY_N(0x297) /* PAPD Enable0 TBD */ 833#define B43_NPHY_PAPD_EN0 B43_PHY_N(0x297) /* PAPD Enable0 TBD */
710#define B43_NPHY_EPS_TABLE_ADJ0 B43_PHY_N(0x298) /* EPS Table Adj0 TBD */ 834#define B43_NPHY_EPS_TABLE_ADJ0 B43_PHY_N(0x298) /* EPS Table Adj0 TBD */
835#define B43_NPHY_EPS_OVERRIDEI_0 B43_PHY_N(0x299)
836#define B43_NPHY_EPS_OVERRIDEQ_0 B43_PHY_N(0x29A)
711#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */ 837#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */
712#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */ 838#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */
839#define B43_NPHY_EPS_OVERRIDEI_1 B43_PHY_N(0x29D)
840#define B43_NPHY_EPS_OVERRIDEQ_1 B43_PHY_N(0x29E)
841#define B43_NPHY_PAPD_CAL_ADDRESS B43_PHY_N(0x29F)
842#define B43_NPHY_PAPD_CAL_YREFEPSILON B43_PHY_N(0x2A0)
843#define B43_NPHY_PAPD_CAL_SETTLE B43_PHY_N(0x2A1)
844#define B43_NPHY_PAPD_CAL_CORRELATE B43_PHY_N(0x2A2)
845#define B43_NPHY_PAPD_CAL_SHIFTS0 B43_PHY_N(0x2A3)
846#define B43_NPHY_PAPD_CAL_SHIFTS1 B43_PHY_N(0x2A4)
847#define B43_NPHY_SAMPLE_START_ADDR B43_PHY_N(0x2A5)
848#define B43_NPHY_RADAR_ADC_TO_DBM B43_PHY_N(0x2A6)
849#define B43_NPHY_REV3_C2_INITGAIN_A B43_PHY_N(0x2A7)
850#define B43_NPHY_REV3_C2_INITGAIN_B B43_PHY_N(0x2A8)
851#define B43_NPHY_REV3_C2_CLIP_HIGAIN_A B43_PHY_N(0x2A9)
852#define B43_NPHY_REV3_C2_CLIP_HIGAIN_B B43_PHY_N(0x2AA)
853#define B43_NPHY_REV3_C2_CLIP_MEDGAIN_A B43_PHY_N(0x2AB)
854#define B43_NPHY_REV3_C2_CLIP_MEDGAIN_B B43_PHY_N(0x2AC)
855#define B43_NPHY_REV3_C2_CLIP_LOGAIN_A B43_PHY_N(0x2AD)
856#define B43_NPHY_REV3_C2_CLIP_LOGAIN_B B43_PHY_N(0x2AE)
857#define B43_NPHY_REV3_C2_CLIP2_GAIN_A B43_PHY_N(0x2AF)
858#define B43_NPHY_REV3_C2_CLIP2_GAIN_B B43_PHY_N(0x2B0)
713 859
714#define B43_PHY_B_BBCFG B43_PHY_N_BMODE(0x001) /* BB config */ 860#define B43_PHY_B_BBCFG B43_PHY_N_BMODE(0x001) /* BB config */
715#define B43_PHY_B_TEST B43_PHY_N_BMODE(0x00A) 861#define B43_PHY_B_TEST B43_PHY_N_BMODE(0x00A)
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index ce037fb6789a..b4fd9345d673 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -2980,7 +2980,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
2980 .rx = prefix##_rx, \ 2980 .rx = prefix##_rx, \
2981 .rx_length = ARRAY_SIZE(prefix##_rx) 2981 .rx_length = ARRAY_SIZE(prefix##_rx)
2982 2982
2983struct b2056_inittabs_pts b2056_inittabs[] = { 2983static const struct b2056_inittabs_pts b2056_inittabs[] = {
2984 [3] = { INITTABSPTS(b2056_inittab_rev3) }, 2984 [3] = { INITTABSPTS(b2056_inittab_rev3) },
2985 [4] = { INITTABSPTS(b2056_inittab_rev4) }, 2985 [4] = { INITTABSPTS(b2056_inittab_rev4) },
2986 [5] = { INITTABSPTS(b2056_inittab_rev5) }, 2986 [5] = { INITTABSPTS(b2056_inittab_rev5) },
@@ -9035,7 +9035,7 @@ static void b2056_upload_inittab(struct b43_wldev *dev, bool ghz5,
9035void b2056_upload_inittabs(struct b43_wldev *dev, 9035void b2056_upload_inittabs(struct b43_wldev *dev,
9036 bool ghz5, bool ignore_uploadflag) 9036 bool ghz5, bool ignore_uploadflag)
9037{ 9037{
9038 struct b2056_inittabs_pts *pts; 9038 const struct b2056_inittabs_pts *pts;
9039 9039
9040 if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) { 9040 if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
9041 B43_WARN_ON(1); 9041 B43_WARN_ON(1);
@@ -9057,7 +9057,7 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
9057 9057
9058void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5) 9058void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
9059{ 9059{
9060 struct b2056_inittabs_pts *pts; 9060 const struct b2056_inittabs_pts *pts;
9061 const struct b2056_inittab_entry *e; 9061 const struct b2056_inittab_entry *e;
9062 9062
9063 if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) { 9063 if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
diff --git a/drivers/net/wireless/b43/radio_2059.c b/drivers/net/wireless/b43/radio_2059.c
index d4ce8a12ff9a..38e31d857e3e 100644
--- a/drivers/net/wireless/b43/radio_2059.c
+++ b/drivers/net/wireless/b43/radio_2059.c
@@ -27,7 +27,7 @@
27 27
28#define RADIOREGS(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \ 28#define RADIOREGS(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
29 r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \ 29 r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
30 r20, r21, r22, r23, r24, r25, r26, r27, r28) \ 30 r20) \
31 .radio_syn16 = r00, \ 31 .radio_syn16 = r00, \
32 .radio_syn17 = r01, \ 32 .radio_syn17 = r01, \
33 .radio_syn22 = r02, \ 33 .radio_syn22 = r02, \
@@ -41,22 +41,14 @@
41 .radio_syn41 = r10, \ 41 .radio_syn41 = r10, \
42 .radio_syn43 = r11, \ 42 .radio_syn43 = r11, \
43 .radio_syn47 = r12, \ 43 .radio_syn47 = r12, \
44 .radio_syn4a = r13, \ 44 .radio_rxtx4a = r13, \
45 .radio_syn58 = r14, \ 45 .radio_rxtx58 = r14, \
46 .radio_syn5a = r15, \ 46 .radio_rxtx5a = r15, \
47 .radio_syn6a = r16, \ 47 .radio_rxtx6a = r16, \
48 .radio_syn6d = r17, \ 48 .radio_rxtx6d = r17, \
49 .radio_syn6e = r18, \ 49 .radio_rxtx6e = r18, \
50 .radio_syn92 = r19, \ 50 .radio_rxtx92 = r19, \
51 .radio_syn98 = r20, \ 51 .radio_rxtx98 = r20
52 .radio_rxtx4a = r21, \
53 .radio_rxtx58 = r22, \
54 .radio_rxtx5a = r23, \
55 .radio_rxtx6a = r24, \
56 .radio_rxtx6d = r25, \
57 .radio_rxtx6e = r26, \
58 .radio_rxtx92 = r27, \
59 .radio_rxtx98 = r28
60 52
61#define PHYREGS(r0, r1, r2, r3, r4, r5) \ 53#define PHYREGS(r0, r1, r2, r3, r4, r5) \
62 .phy_regs.bw1 = r0, \ 54 .phy_regs.bw1 = r0, \
@@ -70,91 +62,78 @@ static const struct b43_phy_ht_channeltab_e_radio2059 b43_phy_ht_channeltab_radi
70 { .freq = 2412, 62 { .freq = 2412,
71 RADIOREGS(0x48, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x6c, 63 RADIOREGS(0x48, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x6c,
72 0x09, 0x0f, 0x0a, 0x00, 0x0a, 0x00, 0x61, 0x03, 64 0x09, 0x0f, 0x0a, 0x00, 0x0a, 0x00, 0x61, 0x03,
73 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
74 0x00, 0x00, 0x00, 0xf0, 0x00), 65 0x00, 0x00, 0x00, 0xf0, 0x00),
75 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443), 66 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
76 }, 67 },
77 { .freq = 2417, 68 { .freq = 2417,
78 RADIOREGS(0x4b, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x71, 69 RADIOREGS(0x4b, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x71,
79 0x09, 0x0f, 0x0a, 0x00, 0x0a, 0x00, 0x61, 0x03, 70 0x09, 0x0f, 0x0a, 0x00, 0x0a, 0x00, 0x61, 0x03,
80 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
81 0x00, 0x00, 0x00, 0xf0, 0x00), 71 0x00, 0x00, 0x00, 0xf0, 0x00),
82 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441), 72 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
83 }, 73 },
84 { .freq = 2422, 74 { .freq = 2422,
85 RADIOREGS(0x4e, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x76, 75 RADIOREGS(0x4e, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x76,
86 0x09, 0x0f, 0x09, 0x00, 0x09, 0x00, 0x61, 0x03, 76 0x09, 0x0f, 0x09, 0x00, 0x09, 0x00, 0x61, 0x03,
87 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
88 0x00, 0x00, 0x00, 0xf0, 0x00), 77 0x00, 0x00, 0x00, 0xf0, 0x00),
89 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f), 78 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
90 }, 79 },
91 { .freq = 2427, 80 { .freq = 2427,
92 RADIOREGS(0x52, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x7b, 81 RADIOREGS(0x52, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x7b,
93 0x09, 0x0f, 0x09, 0x00, 0x09, 0x00, 0x61, 0x03, 82 0x09, 0x0f, 0x09, 0x00, 0x09, 0x00, 0x61, 0x03,
94 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
95 0x00, 0x00, 0x00, 0xf0, 0x00), 83 0x00, 0x00, 0x00, 0xf0, 0x00),
96 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d), 84 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
97 }, 85 },
98 { .freq = 2432, 86 { .freq = 2432,
99 RADIOREGS(0x55, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x80, 87 RADIOREGS(0x55, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x80,
100 0x09, 0x0f, 0x08, 0x00, 0x08, 0x00, 0x61, 0x03, 88 0x09, 0x0f, 0x08, 0x00, 0x08, 0x00, 0x61, 0x03,
101 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
102 0x00, 0x00, 0x00, 0xf0, 0x00), 89 0x00, 0x00, 0x00, 0xf0, 0x00),
103 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a), 90 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
104 }, 91 },
105 { .freq = 2437, 92 { .freq = 2437,
106 RADIOREGS(0x58, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x85, 93 RADIOREGS(0x58, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x85,
107 0x09, 0x0f, 0x08, 0x00, 0x08, 0x00, 0x61, 0x03, 94 0x09, 0x0f, 0x08, 0x00, 0x08, 0x00, 0x61, 0x03,
108 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
109 0x00, 0x00, 0x00, 0xf0, 0x00), 95 0x00, 0x00, 0x00, 0xf0, 0x00),
110 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438), 96 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
111 }, 97 },
112 { .freq = 2442, 98 { .freq = 2442,
113 RADIOREGS(0x5c, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x8a, 99 RADIOREGS(0x5c, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x8a,
114 0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x61, 0x03, 100 0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x61, 0x03,
115 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
116 0x00, 0x00, 0x00, 0xf0, 0x00), 101 0x00, 0x00, 0x00, 0xf0, 0x00),
117 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436), 102 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
118 }, 103 },
119 { .freq = 2447, 104 { .freq = 2447,
120 RADIOREGS(0x5f, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x8f, 105 RADIOREGS(0x5f, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x8f,
121 0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x61, 0x03, 106 0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x61, 0x03,
122 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
123 0x00, 0x00, 0x00, 0xf0, 0x00), 107 0x00, 0x00, 0x00, 0xf0, 0x00),
124 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434), 108 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
125 }, 109 },
126 { .freq = 2452, 110 { .freq = 2452,
127 RADIOREGS(0x62, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x94, 111 RADIOREGS(0x62, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x94,
128 0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x61, 0x03, 112 0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x61, 0x03,
129 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
130 0x00, 0x00, 0x00, 0xf0, 0x00), 113 0x00, 0x00, 0x00, 0xf0, 0x00),
131 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431), 114 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
132 }, 115 },
133 { .freq = 2457, 116 { .freq = 2457,
134 RADIOREGS(0x66, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x99, 117 RADIOREGS(0x66, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x99,
135 0x09, 0x0f, 0x06, 0x00, 0x06, 0x00, 0x61, 0x03, 118 0x09, 0x0f, 0x06, 0x00, 0x06, 0x00, 0x61, 0x03,
136 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
137 0x00, 0x00, 0x00, 0xf0, 0x00), 119 0x00, 0x00, 0x00, 0xf0, 0x00),
138 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f), 120 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
139 }, 121 },
140 { .freq = 2462, 122 { .freq = 2462,
141 RADIOREGS(0x69, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x9e, 123 RADIOREGS(0x69, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x9e,
142 0x09, 0x0f, 0x06, 0x00, 0x06, 0x00, 0x61, 0x03, 124 0x09, 0x0f, 0x06, 0x00, 0x06, 0x00, 0x61, 0x03,
143 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
144 0x00, 0x00, 0x00, 0xf0, 0x00), 125 0x00, 0x00, 0x00, 0xf0, 0x00),
145 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d), 126 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
146 }, 127 },
147 { .freq = 2467, 128 { .freq = 2467,
148 RADIOREGS(0x6c, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0xa3, 129 RADIOREGS(0x6c, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0xa3,
149 0x09, 0x0f, 0x05, 0x00, 0x05, 0x00, 0x61, 0x03, 130 0x09, 0x0f, 0x05, 0x00, 0x05, 0x00, 0x61, 0x03,
150 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
151 0x00, 0x00, 0x00, 0xf0, 0x00), 131 0x00, 0x00, 0x00, 0xf0, 0x00),
152 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b), 132 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
153 }, 133 },
154 { .freq = 2472, 134 { .freq = 2472,
155 RADIOREGS(0x70, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0xa8, 135 RADIOREGS(0x70, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0xa8,
156 0x09, 0x0f, 0x05, 0x00, 0x05, 0x00, 0x61, 0x03, 136 0x09, 0x0f, 0x05, 0x00, 0x05, 0x00, 0x61, 0x03,
157 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
158 0x00, 0x00, 0x00, 0xf0, 0x00), 137 0x00, 0x00, 0x00, 0xf0, 0x00),
159 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429), 138 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
160 }, 139 },
diff --git a/drivers/net/wireless/b43/radio_2059.h b/drivers/net/wireless/b43/radio_2059.h
index e4d69e55e9fe..40a82d7f510c 100644
--- a/drivers/net/wireless/b43/radio_2059.h
+++ b/drivers/net/wireless/b43/radio_2059.h
@@ -5,9 +5,9 @@
5 5
6#include "phy_ht.h" 6#include "phy_ht.h"
7 7
8#define R2059_SYN 0x000 8#define R2059_C1 0x000
9#define R2059_TXRX0 0x400 9#define R2059_C2 0x400
10#define R2059_RXRX1 0x800 10#define R2059_C3 0x800
11#define R2059_ALL 0xC00 11#define R2059_ALL 0xC00
12 12
13/* Values for various registers uploaded on channel switching */ 13/* Values for various registers uploaded on channel switching */
@@ -28,14 +28,6 @@ struct b43_phy_ht_channeltab_e_radio2059 {
28 u8 radio_syn41; 28 u8 radio_syn41;
29 u8 radio_syn43; 29 u8 radio_syn43;
30 u8 radio_syn47; 30 u8 radio_syn47;
31 u8 radio_syn4a;
32 u8 radio_syn58;
33 u8 radio_syn5a;
34 u8 radio_syn6a;
35 u8 radio_syn6d;
36 u8 radio_syn6e;
37 u8 radio_syn92;
38 u8 radio_syn98;
39 u8 radio_rxtx4a; 31 u8 radio_rxtx4a;
40 u8 radio_rxtx58; 32 u8 radio_rxtx58;
41 u8 radio_rxtx5a; 33 u8 radio_rxtx5a;
diff --git a/drivers/net/wireless/b43/sdio.h b/drivers/net/wireless/b43/sdio.h
index fb633094403a..1e93926f388f 100644
--- a/drivers/net/wireless/b43/sdio.h
+++ b/drivers/net/wireless/b43/sdio.h
@@ -25,12 +25,12 @@ void b43_sdio_exit(void);
25#else /* CONFIG_B43_SDIO */ 25#else /* CONFIG_B43_SDIO */
26 26
27 27
28int b43_sdio_request_irq(struct b43_wldev *dev, 28static inline int b43_sdio_request_irq(struct b43_wldev *dev,
29 void (*handler)(struct b43_wldev *dev)) 29 void (*handler)(struct b43_wldev *dev))
30{ 30{
31 return -ENODEV; 31 return -ENODEV;
32} 32}
33void b43_sdio_free_irq(struct b43_wldev *dev) 33static inline void b43_sdio_free_irq(struct b43_wldev *dev)
34{ 34{
35} 35}
36static inline int b43_sdio_init(void) 36static inline int b43_sdio_init(void)
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index aaca60c6f575..94c755fdda14 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -2174,7 +2174,7 @@ static const u16 b43_ntab_loftlt1_r3[] = {
2174/* volatile tables, PHY revision >= 3 */ 2174/* volatile tables, PHY revision >= 3 */
2175 2175
2176/* indexed by antswctl2g */ 2176/* indexed by antswctl2g */
2177static const u16 b43_ntab_antswctl2g_r3[4][32] = { 2177static const u16 b43_ntab_antswctl_r3[4][32] = {
2178 { 2178 {
2179 0x0082, 0x0082, 0x0211, 0x0222, 0x0328, 2179 0x0082, 0x0082, 0x0211, 0x0222, 0x0328,
2180 0x0000, 0x0000, 0x0000, 0x0144, 0x0000, 2180 0x0000, 0x0000, 0x0000, 0x0144, 0x0000,
@@ -2800,7 +2800,7 @@ static const struct nphy_rf_control_override_rev7
2800 { 0x0010, 0x344, 0x345, 0x0010, 4 }, 2800 { 0x0010, 0x344, 0x345, 0x0010, 4 },
2801}; 2801};
2802 2802
2803struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = { 2803static struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = {
2804 { 10, 14, 19, 27 }, 2804 { 10, 14, 19, 27 },
2805 { -5, 6, 10, 15 }, 2805 { -5, 6, 10, 15 },
2806 { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, 2806 { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
@@ -2811,7 +2811,7 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = {
2811 0x18, 0x18, 0x18, 2811 0x18, 0x18, 0x18,
2812 0x01D0, 0x5, 2812 0x01D0, 0x5,
2813}; 2813};
2814struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][4] = { 2814static struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][4] = {
2815 { /* 2GHz */ 2815 { /* 2GHz */
2816 { /* PHY rev 3 */ 2816 { /* PHY rev 3 */
2817 { 7, 11, 16, 23 }, 2817 { 7, 11, 16, 23 },
@@ -3095,9 +3095,55 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
3095} 3095}
3096 3096
3097#define ntab_upload(dev, offset, data) do { \ 3097#define ntab_upload(dev, offset, data) do { \
3098 b43_ntab_write_bulk(dev, offset, offset##_SIZE, data); \ 3098 b43_ntab_write_bulk(dev, offset, ARRAY_SIZE(data), data); \
3099 } while (0) 3099 } while (0)
3100void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev) 3100
3101static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
3102{
3103 struct ssb_sprom *sprom = dev->dev->bus_sprom;
3104 u8 antswlut;
3105
3106 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
3107 antswlut = sprom->fem.ghz5.antswlut;
3108 else
3109 antswlut = sprom->fem.ghz2.antswlut;
3110
3111 /* Static tables */
3112 ntab_upload(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
3113 ntab_upload(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
3114 ntab_upload(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
3115 ntab_upload(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
3116 ntab_upload(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
3117 ntab_upload(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3);
3118 ntab_upload(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3);
3119 ntab_upload(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
3120 ntab_upload(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
3121 ntab_upload(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
3122 ntab_upload(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3);
3123 ntab_upload(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3);
3124 ntab_upload(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
3125 ntab_upload(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
3126 ntab_upload(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
3127 ntab_upload(dev, B43_NTAB_C0_ESTPLT_R3, b43_ntab_estimatepowerlt0_r3);
3128 ntab_upload(dev, B43_NTAB_C1_ESTPLT_R3, b43_ntab_estimatepowerlt1_r3);
3129 ntab_upload(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
3130 ntab_upload(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
3131 ntab_upload(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
3132 ntab_upload(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
3133 ntab_upload(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
3134 ntab_upload(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
3135 ntab_upload(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
3136 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
3137
3138 /* Volatile tables */
3139 if (antswlut < ARRAY_SIZE(b43_ntab_antswctl_r3))
3140 ntab_upload(dev, B43_NTAB_ANT_SW_CTL_R3,
3141 b43_ntab_antswctl_r3[antswlut]);
3142 else
3143 B43_WARN_ON(1);
3144}
3145
3146static void b43_nphy_tables_init_rev0(struct b43_wldev *dev)
3101{ 3147{
3102 /* Static tables */ 3148 /* Static tables */
3103 ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct); 3149 ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
@@ -3130,48 +3176,13 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
3130 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1); 3176 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
3131} 3177}
3132 3178
3133#define ntab_upload_r3(dev, offset, data) do { \ 3179/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables */
3134 b43_ntab_write_bulk(dev, offset, ARRAY_SIZE(data), data); \ 3180void b43_nphy_tables_init(struct b43_wldev *dev)
3135 } while (0)
3136void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
3137{ 3181{
3138 struct ssb_sprom *sprom = dev->dev->bus_sprom; 3182 if (dev->phy.rev >= 3)
3139 3183 b43_nphy_tables_init_rev3(dev);
3140 /* Static tables */
3141 ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
3142 ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
3143 ntab_upload_r3(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
3144 ntab_upload_r3(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
3145 ntab_upload_r3(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
3146 ntab_upload_r3(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3);
3147 ntab_upload_r3(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3);
3148 ntab_upload_r3(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
3149 ntab_upload_r3(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
3150 ntab_upload_r3(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
3151 ntab_upload_r3(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3);
3152 ntab_upload_r3(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3);
3153 ntab_upload_r3(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
3154 ntab_upload_r3(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
3155 ntab_upload_r3(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
3156 ntab_upload_r3(dev, B43_NTAB_C0_ESTPLT_R3,
3157 b43_ntab_estimatepowerlt0_r3);
3158 ntab_upload_r3(dev, B43_NTAB_C1_ESTPLT_R3,
3159 b43_ntab_estimatepowerlt1_r3);
3160 ntab_upload_r3(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
3161 ntab_upload_r3(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
3162 ntab_upload_r3(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
3163 ntab_upload_r3(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
3164 ntab_upload_r3(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
3165 ntab_upload_r3(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
3166 ntab_upload_r3(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
3167 ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
3168
3169 /* Volatile tables */
3170 if (sprom->fem.ghz2.antswlut < ARRAY_SIZE(b43_ntab_antswctl2g_r3))
3171 ntab_upload_r3(dev, B43_NTAB_ANT_SW_CTL_R3,
3172 b43_ntab_antswctl2g_r3[sprom->fem.ghz2.antswlut]);
3173 else 3184 else
3174 B43_WARN_ON(1); 3185 b43_nphy_tables_init_rev0(dev);
3175} 3186}
3176 3187
3177/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */ 3188/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index c600700ceedc..9ff33adcff89 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -115,22 +115,22 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
115#define B43_NTAB_NOISEVAR11_SIZE 256 115#define B43_NTAB_NOISEVAR11_SIZE 256
116#define B43_NTAB_C0_ESTPLT B43_NTAB8 (0x1A, 0x000) /* Estimate Power Lookup Table Core 0 */ 116#define B43_NTAB_C0_ESTPLT B43_NTAB8 (0x1A, 0x000) /* Estimate Power Lookup Table Core 0 */
117#define B43_NTAB_C0_ESTPLT_SIZE 64 117#define B43_NTAB_C0_ESTPLT_SIZE 64
118#define B43_NTAB_C1_ESTPLT B43_NTAB8 (0x1B, 0x000) /* Estimate Power Lookup Table Core 1 */
119#define B43_NTAB_C1_ESTPLT_SIZE 64
120#define B43_NTAB_C0_ADJPLT B43_NTAB8 (0x1A, 0x040) /* Adjust Power Lookup Table Core 0 */ 118#define B43_NTAB_C0_ADJPLT B43_NTAB8 (0x1A, 0x040) /* Adjust Power Lookup Table Core 0 */
121#define B43_NTAB_C0_ADJPLT_SIZE 128 119#define B43_NTAB_C0_ADJPLT_SIZE 128
122#define B43_NTAB_C1_ADJPLT B43_NTAB8 (0x1B, 0x040) /* Adjust Power Lookup Table Core 1 */
123#define B43_NTAB_C1_ADJPLT_SIZE 128
124#define B43_NTAB_C0_GAINCTL B43_NTAB32(0x1A, 0x0C0) /* Gain Control Lookup Table Core 0 */ 120#define B43_NTAB_C0_GAINCTL B43_NTAB32(0x1A, 0x0C0) /* Gain Control Lookup Table Core 0 */
125#define B43_NTAB_C0_GAINCTL_SIZE 128 121#define B43_NTAB_C0_GAINCTL_SIZE 128
126#define B43_NTAB_C1_GAINCTL B43_NTAB32(0x1B, 0x0C0) /* Gain Control Lookup Table Core 1 */
127#define B43_NTAB_C1_GAINCTL_SIZE 128
128#define B43_NTAB_C0_IQLT B43_NTAB32(0x1A, 0x140) /* IQ Lookup Table Core 0 */ 122#define B43_NTAB_C0_IQLT B43_NTAB32(0x1A, 0x140) /* IQ Lookup Table Core 0 */
129#define B43_NTAB_C0_IQLT_SIZE 128 123#define B43_NTAB_C0_IQLT_SIZE 128
130#define B43_NTAB_C1_IQLT B43_NTAB32(0x1B, 0x140) /* IQ Lookup Table Core 1 */
131#define B43_NTAB_C1_IQLT_SIZE 128
132#define B43_NTAB_C0_LOFEEDTH B43_NTAB16(0x1A, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 0 */ 124#define B43_NTAB_C0_LOFEEDTH B43_NTAB16(0x1A, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 0 */
133#define B43_NTAB_C0_LOFEEDTH_SIZE 128 125#define B43_NTAB_C0_LOFEEDTH_SIZE 128
126#define B43_NTAB_C1_ESTPLT B43_NTAB8 (0x1B, 0x000) /* Estimate Power Lookup Table Core 1 */
127#define B43_NTAB_C1_ESTPLT_SIZE 64
128#define B43_NTAB_C1_ADJPLT B43_NTAB8 (0x1B, 0x040) /* Adjust Power Lookup Table Core 1 */
129#define B43_NTAB_C1_ADJPLT_SIZE 128
130#define B43_NTAB_C1_GAINCTL B43_NTAB32(0x1B, 0x0C0) /* Gain Control Lookup Table Core 1 */
131#define B43_NTAB_C1_GAINCTL_SIZE 128
132#define B43_NTAB_C1_IQLT B43_NTAB32(0x1B, 0x140) /* IQ Lookup Table Core 1 */
133#define B43_NTAB_C1_IQLT_SIZE 128
134#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */ 134#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
135#define B43_NTAB_C1_LOFEEDTH_SIZE 128 135#define B43_NTAB_C1_LOFEEDTH_SIZE 128
136 136
@@ -154,15 +154,17 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
154#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 0) /* channel estimate */ 154#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 0) /* channel estimate */
155#define B43_NTAB_FRAMELT_R3 B43_NTAB8(24, 0) /* frame lookup */ 155#define B43_NTAB_FRAMELT_R3 B43_NTAB8(24, 0) /* frame lookup */
156#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8(26, 0) /* estimated power lookup 0 */ 156#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8(26, 0) /* estimated power lookup 0 */
157#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8(27, 0) /* estimated power lookup 1 */
158#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8(26, 64) /* adjusted power lookup 0 */ 157#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8(26, 64) /* adjusted power lookup 0 */
159#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8(27, 64) /* adjusted power lookup 1 */
160#define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */ 158#define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */
161#define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */
162#define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */ 159#define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */
163#define B43_NTAB_C1_IQLT_R3 B43_NTAB32(27, 320) /* I/Q lookup 1 */
164#define B43_NTAB_C0_LOFEEDTH_R3 B43_NTAB16(26, 448) /* Local Oscillator Feed Through lookup 0 */ 160#define B43_NTAB_C0_LOFEEDTH_R3 B43_NTAB16(26, 448) /* Local Oscillator Feed Through lookup 0 */
161#define B43_NTAB_C0_PAPD_COMP_R3 B43_NTAB16(26, 576)
162#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8(27, 0) /* estimated power lookup 1 */
163#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8(27, 64) /* adjusted power lookup 1 */
164#define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */
165#define B43_NTAB_C1_IQLT_R3 B43_NTAB32(27, 320) /* I/Q lookup 1 */
165#define B43_NTAB_C1_LOFEEDTH_R3 B43_NTAB16(27, 448) /* Local Oscillator Feed Through lookup 1 */ 166#define B43_NTAB_C1_LOFEEDTH_R3 B43_NTAB16(27, 448) /* Local Oscillator Feed Through lookup 1 */
167#define B43_NTAB_C1_PAPD_COMP_R3 B43_NTAB16(27, 576)
166 168
167#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18 169#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18
168#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18 170#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18
@@ -182,8 +184,7 @@ void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value);
182void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset, 184void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
183 unsigned int nr_elements, const void *_data); 185 unsigned int nr_elements, const void *_data);
184 186
185void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev); 187void b43_nphy_tables_init(struct b43_wldev *dev);
186void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev);
187 188
188const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev); 189const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev);
189 190
diff --git a/drivers/net/wireless/b43/tables_phy_lcn.c b/drivers/net/wireless/b43/tables_phy_lcn.c
index 5176363cadf2..e347b8d80ea4 100644
--- a/drivers/net/wireless/b43/tables_phy_lcn.c
+++ b/drivers/net/wireless/b43/tables_phy_lcn.c
@@ -313,7 +313,7 @@ static const u32 b43_lcntab_0x18[] = {
313 * TX gain. 313 * TX gain.
314 **************************************************/ 314 **************************************************/
315 315
316const struct b43_lcntab_tx_gain_tbl_entry 316static const struct b43_lcntab_tx_gain_tbl_entry
317 b43_lcntab_tx_gain_tbl_2ghz_ext_pa_rev0[B43_LCNTAB_TX_GAIN_SIZE] = { 317 b43_lcntab_tx_gain_tbl_2ghz_ext_pa_rev0[B43_LCNTAB_TX_GAIN_SIZE] = {
318 { 0x03, 0x00, 0x1f, 0x0, 0x48 }, 318 { 0x03, 0x00, 0x1f, 0x0, 0x48 },
319 { 0x03, 0x00, 0x1f, 0x0, 0x46 }, 319 { 0x03, 0x00, 0x1f, 0x0, 0x46 },
@@ -449,7 +449,7 @@ const struct b43_lcntab_tx_gain_tbl_entry
449 * SW control. 449 * SW control.
450 **************************************************/ 450 **************************************************/
451 451
452const u16 b43_lcntab_sw_ctl_4313_epa_rev0[] = { 452static const u16 b43_lcntab_sw_ctl_4313_epa_rev0[] = {
453 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 453 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008,
454 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 454 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001,
455 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 455 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008,
@@ -631,7 +631,7 @@ static void b43_phy_lcn_upload_static_tables(struct b43_wldev *dev)
631 lcntab_upload(dev, B43_LCNTAB32(0x18, 0), b43_lcntab_0x18); 631 lcntab_upload(dev, B43_LCNTAB32(0x18, 0), b43_lcntab_0x18);
632} 632}
633 633
634void b43_phy_lcn_load_tx_gain_tab(struct b43_wldev *dev, 634static void b43_phy_lcn_load_tx_gain_tab(struct b43_wldev *dev,
635 const struct b43_lcntab_tx_gain_tbl_entry *gain_table) 635 const struct b43_lcntab_tx_gain_tbl_entry *gain_table)
636{ 636{
637 u32 i; 637 u32 i;
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 2d3c6644f82d..faeafe219c57 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -334,13 +334,9 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring)
334 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 334 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
335 B43legacy_DMA_RINGMEMSIZE, 335 B43legacy_DMA_RINGMEMSIZE,
336 &(ring->dmabase), 336 &(ring->dmabase),
337 GFP_KERNEL); 337 GFP_KERNEL | __GFP_ZERO);
338 if (!ring->descbase) { 338 if (!ring->descbase)
339 b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
340 " failed\n");
341 return -ENOMEM; 339 return -ENOMEM;
342 }
343 memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE);
344 340
345 return 0; 341 return 0;
346} 342}
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 8c3f70e1a013..572668821862 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2720,7 +2720,7 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
2720 goto out_unlock_mutex; 2720 goto out_unlock_mutex;
2721 2721
2722 /* Switch the PHY mode (if necessary). */ 2722 /* Switch the PHY mode (if necessary). */
2723 switch (conf->channel->band) { 2723 switch (conf->chandef.chan->band) {
2724 case IEEE80211_BAND_2GHZ: 2724 case IEEE80211_BAND_2GHZ:
2725 if (phy->type == B43legacy_PHYTYPE_B) 2725 if (phy->type == B43legacy_PHYTYPE_B)
2726 new_phymode = B43legacy_PHYMODE_B; 2726 new_phymode = B43legacy_PHYMODE_B;
@@ -2748,8 +2748,9 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
2748 2748
2749 /* Switch to the requested channel. 2749 /* Switch to the requested channel.
2750 * The firmware takes care of races with the TX handler. */ 2750 * The firmware takes care of races with the TX handler. */
2751 if (conf->channel->hw_value != phy->channel) 2751 if (conf->chandef.chan->hw_value != phy->channel)
2752 b43legacy_radio_selectchannel(dev, conf->channel->hw_value, 0); 2752 b43legacy_radio_selectchannel(dev, conf->chandef.chan->hw_value,
2753 0);
2753 2754
2754 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_MONITOR); 2755 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_MONITOR);
2755 2756
@@ -3558,7 +3559,7 @@ static int b43legacy_op_get_survey(struct ieee80211_hw *hw, int idx,
3558 if (idx != 0) 3559 if (idx != 0)
3559 return -ENOENT; 3560 return -ENOENT;
3560 3561
3561 survey->channel = conf->channel; 3562 survey->channel = conf->chandef.chan;
3562 survey->filled = SURVEY_INFO_NOISE_DBM; 3563 survey->filled = SURVEY_INFO_NOISE_DBM;
3563 survey->noise = dev->stats.link_noise; 3564 survey->noise = dev->stats.link_noise;
3564 3565
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index 1d92d874ebb6..fc8a0fa6d3b2 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -12,8 +12,9 @@ config BRCMSMAC
12 select CORDIC 12 select CORDIC
13 ---help--- 13 ---help---
14 This module adds support for PCIe wireless adapters based on Broadcom 14 This module adds support for PCIe wireless adapters based on Broadcom
15 IEEE802.11n SoftMAC chipsets. If you choose to build a module, it'll 15 IEEE802.11n SoftMAC chipsets. It also has WLAN led support, which will
16 be called brcmsmac.ko. 16 be available if you select BCMA_DRIVER_GPIO. If you choose to build a
17 module, the driver will be called brcmsmac.ko.
17 18
18config BRCMFMAC 19config BRCMFMAC
19 tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver" 20 tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver"
@@ -36,15 +37,6 @@ config BRCMFMAC_SDIO
36 IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to 37 IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
37 use the driver for a SDIO wireless card. 38 use the driver for a SDIO wireless card.
38 39
39config BRCMFMAC_SDIO_OOB
40 bool "Out of band interrupt support for SDIO interface chipset"
41 depends on BRCMFMAC_SDIO
42 ---help---
43 This option enables out-of-band interrupt support for Broadcom
44 SDIO Wifi chipset using fullmac in order to gain better
45 performance and deep sleep wake up capability on certain
46 platforms. Say N if you are unsure.
47
48config BRCMFMAC_USB 40config BRCMFMAC_USB
49 bool "USB bus interface support for FullMAC driver" 41 bool "USB bus interface support for FullMAC driver"
50 depends on USB 42 depends on USB
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 756e19fc2795..8e9b1221b32c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -26,10 +26,12 @@ brcmfmac-objs += \
26 wl_cfg80211.o \ 26 wl_cfg80211.o \
27 fwil.o \ 27 fwil.o \
28 fweh.o \ 28 fweh.o \
29 fwsignal.o \
29 p2p.o \ 30 p2p.o \
30 dhd_cdc.o \ 31 dhd_cdc.o \
31 dhd_common.o \ 32 dhd_common.o \
32 dhd_linux.o 33 dhd_linux.o \
34 btcoex.o
33brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ 35brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
34 dhd_sdio.o \ 36 dhd_sdio.o \
35 bcmsdh.o \ 37 bcmsdh.o \
@@ -39,3 +41,5 @@ brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
39 usb.o 41 usb.o
40brcmfmac-$(CONFIG_BRCMDBG) += \ 42brcmfmac-$(CONFIG_BRCMDBG) += \
41 dhd_dbg.o 43 dhd_dbg.o
44brcmfmac-$(CONFIG_BRCM_TRACING) += \
45 tracepoint.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 11fd1c735589..4891e3df2058 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -25,6 +25,7 @@
25#include <linux/mmc/sdio.h> 25#include <linux/mmc/sdio.h>
26#include <linux/mmc/sdio_func.h> 26#include <linux/mmc/sdio_func.h>
27#include <linux/mmc/card.h> 27#include <linux/mmc/card.h>
28#include <linux/platform_data/brcmfmac-sdio.h>
28 29
29#include <defs.h> 30#include <defs.h>
30#include <brcm_hw_ids.h> 31#include <brcm_hw_ids.h>
@@ -37,16 +38,15 @@
37 38
38#define SDIOH_API_ACCESS_RETRY_LIMIT 2 39#define SDIOH_API_ACCESS_RETRY_LIMIT 2
39 40
40#ifdef CONFIG_BRCMFMAC_SDIO_OOB 41
41static irqreturn_t brcmf_sdio_irqhandler(int irq, void *dev_id) 42static irqreturn_t brcmf_sdio_oob_irqhandler(int irq, void *dev_id)
42{ 43{
43 struct brcmf_bus *bus_if = dev_get_drvdata(dev_id); 44 struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
44 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 45 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
45 46
46 brcmf_dbg(INTR, "oob intr triggered\n"); 47 brcmf_dbg(INTR, "OOB intr triggered\n");
47 48
48 /* 49 /* out-of-band interrupt is level-triggered which won't
49 * out-of-band interrupt is level-triggered which won't
50 * be cleared until dpc 50 * be cleared until dpc
51 */ 51 */
52 if (sdiodev->irq_en) { 52 if (sdiodev->irq_en) {
@@ -59,72 +59,12 @@ static irqreturn_t brcmf_sdio_irqhandler(int irq, void *dev_id)
59 return IRQ_HANDLED; 59 return IRQ_HANDLED;
60} 60}
61 61
62int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev) 62static void brcmf_sdio_ib_irqhandler(struct sdio_func *func)
63{
64 int ret = 0;
65 u8 data;
66 unsigned long flags;
67
68 brcmf_dbg(TRACE, "Entering: irq %d\n", sdiodev->irq);
69
70 ret = request_irq(sdiodev->irq, brcmf_sdio_irqhandler,
71 sdiodev->irq_flags, "brcmf_oob_intr",
72 &sdiodev->func[1]->dev);
73 if (ret != 0)
74 return ret;
75 spin_lock_init(&sdiodev->irq_en_lock);
76 spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
77 sdiodev->irq_en = true;
78 spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
79
80 ret = enable_irq_wake(sdiodev->irq);
81 if (ret != 0)
82 return ret;
83 sdiodev->irq_wake = true;
84
85 sdio_claim_host(sdiodev->func[1]);
86
87 /* must configure SDIO_CCCR_IENx to enable irq */
88 data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
89 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
90 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
91
92 /* redirect, configure and enable io for interrupt signal */
93 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
94 if (sdiodev->irq_flags & IRQF_TRIGGER_HIGH)
95 data |= SDIO_SEPINT_ACT_HI;
96 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
97
98 sdio_release_host(sdiodev->func[1]);
99
100 return 0;
101}
102
103int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
104{
105 brcmf_dbg(TRACE, "Entering\n");
106
107 sdio_claim_host(sdiodev->func[1]);
108 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
109 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
110 sdio_release_host(sdiodev->func[1]);
111
112 if (sdiodev->irq_wake) {
113 disable_irq_wake(sdiodev->irq);
114 sdiodev->irq_wake = false;
115 }
116 free_irq(sdiodev->irq, &sdiodev->func[1]->dev);
117 sdiodev->irq_en = false;
118
119 return 0;
120}
121#else /* CONFIG_BRCMFMAC_SDIO_OOB */
122static void brcmf_sdio_irqhandler(struct sdio_func *func)
123{ 63{
124 struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev); 64 struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
125 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 65 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
126 66
127 brcmf_dbg(INTR, "ib intr triggered\n"); 67 brcmf_dbg(INTR, "IB intr triggered\n");
128 68
129 brcmf_sdbrcm_isr(sdiodev->bus); 69 brcmf_sdbrcm_isr(sdiodev->bus);
130} 70}
@@ -136,28 +76,89 @@ static void brcmf_sdio_dummy_irqhandler(struct sdio_func *func)
136 76
137int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev) 77int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
138{ 78{
139 brcmf_dbg(TRACE, "Entering\n"); 79 int ret = 0;
80 u8 data;
81 unsigned long flags;
140 82
141 sdio_claim_host(sdiodev->func[1]); 83 if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
142 sdio_claim_irq(sdiodev->func[1], brcmf_sdio_irqhandler); 84 brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
143 sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler); 85 sdiodev->pdata->oob_irq_nr);
144 sdio_release_host(sdiodev->func[1]); 86 ret = request_irq(sdiodev->pdata->oob_irq_nr,
87 brcmf_sdio_oob_irqhandler,
88 sdiodev->pdata->oob_irq_flags,
89 "brcmf_oob_intr",
90 &sdiodev->func[1]->dev);
91 if (ret != 0) {
92 brcmf_err("request_irq failed %d\n", ret);
93 return ret;
94 }
95 sdiodev->oob_irq_requested = true;
96 spin_lock_init(&sdiodev->irq_en_lock);
97 spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
98 sdiodev->irq_en = true;
99 spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
100
101 ret = enable_irq_wake(sdiodev->pdata->oob_irq_nr);
102 if (ret != 0) {
103 brcmf_err("enable_irq_wake failed %d\n", ret);
104 return ret;
105 }
106 sdiodev->irq_wake = true;
107
108 sdio_claim_host(sdiodev->func[1]);
109
110 /* must configure SDIO_CCCR_IENx to enable irq */
111 data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
112 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
113 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
114
115 /* redirect, configure and enable io for interrupt signal */
116 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
117 if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
118 data |= SDIO_SEPINT_ACT_HI;
119 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
120
121 sdio_release_host(sdiodev->func[1]);
122 } else {
123 brcmf_dbg(SDIO, "Entering\n");
124 sdio_claim_host(sdiodev->func[1]);
125 sdio_claim_irq(sdiodev->func[1], brcmf_sdio_ib_irqhandler);
126 sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler);
127 sdio_release_host(sdiodev->func[1]);
128 }
145 129
146 return 0; 130 return 0;
147} 131}
148 132
149int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev) 133int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
150{ 134{
151 brcmf_dbg(TRACE, "Entering\n"); 135 brcmf_dbg(SDIO, "Entering\n");
152 136
153 sdio_claim_host(sdiodev->func[1]); 137 if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
154 sdio_release_irq(sdiodev->func[2]); 138 sdio_claim_host(sdiodev->func[1]);
155 sdio_release_irq(sdiodev->func[1]); 139 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
156 sdio_release_host(sdiodev->func[1]); 140 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
141 sdio_release_host(sdiodev->func[1]);
142
143 if (sdiodev->oob_irq_requested) {
144 sdiodev->oob_irq_requested = false;
145 if (sdiodev->irq_wake) {
146 disable_irq_wake(sdiodev->pdata->oob_irq_nr);
147 sdiodev->irq_wake = false;
148 }
149 free_irq(sdiodev->pdata->oob_irq_nr,
150 &sdiodev->func[1]->dev);
151 sdiodev->irq_en = false;
152 }
153 } else {
154 sdio_claim_host(sdiodev->func[1]);
155 sdio_release_irq(sdiodev->func[2]);
156 sdio_release_irq(sdiodev->func[1]);
157 sdio_release_host(sdiodev->func[1]);
158 }
157 159
158 return 0; 160 return 0;
159} 161}
160#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
161 162
162int 163int
163brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address) 164brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
@@ -253,9 +254,9 @@ u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
253 u8 data; 254 u8 data;
254 int retval; 255 int retval;
255 256
256 brcmf_dbg(INFO, "addr:0x%08x\n", addr); 257 brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
257 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false); 258 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
258 brcmf_dbg(INFO, "data:0x%02x\n", data); 259 brcmf_dbg(SDIO, "data:0x%02x\n", data);
259 260
260 if (ret) 261 if (ret)
261 *ret = retval; 262 *ret = retval;
@@ -268,9 +269,9 @@ u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
268 u32 data; 269 u32 data;
269 int retval; 270 int retval;
270 271
271 brcmf_dbg(INFO, "addr:0x%08x\n", addr); 272 brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
272 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false); 273 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
273 brcmf_dbg(INFO, "data:0x%08x\n", data); 274 brcmf_dbg(SDIO, "data:0x%08x\n", data);
274 275
275 if (ret) 276 if (ret)
276 *ret = retval; 277 *ret = retval;
@@ -283,7 +284,7 @@ void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
283{ 284{
284 int retval; 285 int retval;
285 286
286 brcmf_dbg(INFO, "addr:0x%08x, data:0x%02x\n", addr, data); 287 brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
287 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true); 288 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
288 289
289 if (ret) 290 if (ret)
@@ -295,7 +296,7 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
295{ 296{
296 int retval; 297 int retval;
297 298
298 brcmf_dbg(INFO, "addr:0x%08x, data:0x%08x\n", addr, data); 299 brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
299 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true); 300 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
300 301
301 if (ret) 302 if (ret)
@@ -358,7 +359,7 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
358 uint width; 359 uint width;
359 int err = 0; 360 int err = 0;
360 361
361 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", 362 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
362 fn, addr, pkt->len); 363 fn, addr, pkt->len);
363 364
364 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; 365 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
@@ -381,7 +382,7 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
381 uint width; 382 uint width;
382 int err = 0; 383 int err = 0;
383 384
384 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", 385 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
385 fn, addr, pktq->qlen); 386 fn, addr, pktq->qlen);
386 387
387 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; 388 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
@@ -428,7 +429,7 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
428 uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; 429 uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
429 int err = 0; 430 int err = 0;
430 431
431 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", 432 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
432 fn, addr, pkt->len); 433 fn, addr, pkt->len);
433 434
434 /* Async not implemented yet */ 435 /* Async not implemented yet */
@@ -457,48 +458,92 @@ done:
457 return err; 458 return err;
458} 459}
459 460
460int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr, 461int
461 u8 *buf, uint nbytes) 462brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
463 u8 *data, uint size)
462{ 464{
463 struct sk_buff *mypkt; 465 int bcmerror = 0;
464 bool write = rw ? SDIOH_WRITE : SDIOH_READ; 466 struct sk_buff *pkt;
465 int err; 467 u32 sdaddr;
468 uint dsize;
469
470 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
471 pkt = dev_alloc_skb(dsize);
472 if (!pkt) {
473 brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
474 return -EIO;
475 }
476 pkt->priority = 0;
466 477
467 addr &= SBSDIO_SB_OFT_ADDR_MASK; 478 /* Determine initial transfer parameters */
468 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 479 sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
480 if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
481 dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
482 else
483 dsize = size;
469 484
470 mypkt = brcmu_pkt_buf_get_skb(nbytes); 485 sdio_claim_host(sdiodev->func[1]);
471 if (!mypkt) { 486
472 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n", 487 /* Do the transfer(s) */
473 nbytes); 488 while (size) {
474 return -EIO; 489 /* Set the backplane window to include the start address */
490 bcmerror = brcmf_sdcard_set_sbaddr_window(sdiodev, address);
491 if (bcmerror)
492 break;
493
494 brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
495 write ? "write" : "read", dsize,
496 sdaddr, address & SBSDIO_SBWINDOW_MASK);
497
498 sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
499 sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
500
501 skb_put(pkt, dsize);
502 if (write)
503 memcpy(pkt->data, data, dsize);
504 bcmerror = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC,
505 write, SDIO_FUNC_1,
506 sdaddr, pkt);
507 if (bcmerror) {
508 brcmf_err("membytes transfer failed\n");
509 break;
510 }
511 if (!write)
512 memcpy(data, pkt->data, dsize);
513 skb_trim(pkt, dsize);
514
515 /* Adjust for next transfer (if any) */
516 size -= dsize;
517 if (size) {
518 data += dsize;
519 address += dsize;
520 sdaddr = 0;
521 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
522 }
475 } 523 }
476 524
477 /* For a write, copy the buffer data into the packet. */ 525 dev_kfree_skb(pkt);
478 if (write)
479 memcpy(mypkt->data, buf, nbytes);
480 526
481 err = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, write, 527 /* Return the window to backplane enumeration space for core access */
482 SDIO_FUNC_1, addr, mypkt); 528 if (brcmf_sdcard_set_sbaddr_window(sdiodev, sdiodev->sbwad))
529 brcmf_err("FAILED to set window back to 0x%x\n",
530 sdiodev->sbwad);
483 531
484 /* For a read, copy the packet data back to the buffer. */ 532 sdio_release_host(sdiodev->func[1]);
485 if (!err && !write)
486 memcpy(buf, mypkt->data, nbytes);
487 533
488 brcmu_pkt_buf_free_skb(mypkt); 534 return bcmerror;
489 return err;
490} 535}
491 536
492int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn) 537int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
493{ 538{
494 char t_func = (char)fn; 539 char t_func = (char)fn;
495 brcmf_dbg(TRACE, "Enter\n"); 540 brcmf_dbg(SDIO, "Enter\n");
496 541
497 /* issue abort cmd52 command through F0 */ 542 /* issue abort cmd52 command through F0 */
498 brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0, 543 brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
499 SDIO_CCCR_ABORT, &t_func); 544 SDIO_CCCR_ABORT, &t_func);
500 545
501 brcmf_dbg(TRACE, "Exit\n"); 546 brcmf_dbg(SDIO, "Exit\n");
502 return 0; 547 return 0;
503} 548}
504 549
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index d92d373733d7..44fa0cdbf97b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -26,6 +26,7 @@
26#include <linux/sched.h> /* request_irq() */ 26#include <linux/sched.h> /* request_irq() */
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/platform_data/brcmfmac-sdio.h>
29#include <net/cfg80211.h> 30#include <net/cfg80211.h>
30 31
31#include <defs.h> 32#include <defs.h>
@@ -40,32 +41,30 @@
40 41
41#define DMA_ALIGN_MASK 0x03 42#define DMA_ALIGN_MASK 0x03
42 43
44#define SDIO_DEVICE_ID_BROADCOM_43143 43143
43#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324 45#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
44#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 46#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
45#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 47#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
46#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 48#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
49#define SDIO_DEVICE_ID_BROADCOM_4335 0x4335
47 50
48#define SDIO_FUNC1_BLOCKSIZE 64 51#define SDIO_FUNC1_BLOCKSIZE 64
49#define SDIO_FUNC2_BLOCKSIZE 512 52#define SDIO_FUNC2_BLOCKSIZE 512
50 53
51/* devices we support, null terminated */ 54/* devices we support, null terminated */
52static const struct sdio_device_id brcmf_sdmmc_ids[] = { 55static const struct sdio_device_id brcmf_sdmmc_ids[] = {
56 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43143)},
53 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)}, 57 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
54 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)}, 58 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
55 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)}, 59 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
56 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)}, 60 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
61 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4335)},
57 { /* end: all zeroes */ }, 62 { /* end: all zeroes */ },
58}; 63};
59MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); 64MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
60 65
61#ifdef CONFIG_BRCMFMAC_SDIO_OOB 66static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
62static struct list_head oobirq_lh; 67
63struct brcmf_sdio_oobirq {
64 unsigned int irq;
65 unsigned long flags;
66 struct list_head list;
67};
68#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
69 68
70static bool 69static bool
71brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev) 70brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
@@ -139,7 +138,7 @@ int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
139{ 138{
140 int err_ret; 139 int err_ret;
141 140
142 brcmf_dbg(INFO, "rw=%d, func=%d, addr=0x%05x\n", rw, func, regaddr); 141 brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x\n", rw, func, regaddr);
143 142
144 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_byte_wait); 143 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_byte_wait);
145 if (brcmf_pm_resume_error(sdiodev)) 144 if (brcmf_pm_resume_error(sdiodev))
@@ -179,7 +178,7 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
179 return -EINVAL; 178 return -EINVAL;
180 } 179 }
181 180
182 brcmf_dbg(INFO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n", 181 brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
183 rw, func, addr, nbytes); 182 rw, func, addr, nbytes);
184 183
185 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait); 184 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
@@ -252,7 +251,7 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
252 251
253 struct sk_buff *pkt; 252 struct sk_buff *pkt;
254 253
255 brcmf_dbg(TRACE, "Enter\n"); 254 brcmf_dbg(SDIO, "Enter\n");
256 255
257 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait); 256 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait);
258 if (brcmf_pm_resume_error(sdiodev)) 257 if (brcmf_pm_resume_error(sdiodev))
@@ -270,7 +269,7 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
270 write ? "TX" : "RX", pkt, SGCount, addr, 269 write ? "TX" : "RX", pkt, SGCount, addr,
271 pkt_len, err_ret); 270 pkt_len, err_ret);
272 } else { 271 } else {
273 brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n", 272 brcmf_dbg(SDIO, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
274 write ? "TX" : "RX", pkt, SGCount, addr, 273 write ? "TX" : "RX", pkt, SGCount, addr,
275 pkt_len); 274 pkt_len);
276 } 275 }
@@ -280,7 +279,7 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
280 SGCount++; 279 SGCount++;
281 } 280 }
282 281
283 brcmf_dbg(TRACE, "Exit\n"); 282 brcmf_dbg(SDIO, "Exit\n");
284 return err_ret; 283 return err_ret;
285} 284}
286 285
@@ -295,7 +294,7 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
295 uint pkt_len; 294 uint pkt_len;
296 bool fifo = (fix_inc == SDIOH_DATA_FIX); 295 bool fifo = (fix_inc == SDIOH_DATA_FIX);
297 296
298 brcmf_dbg(TRACE, "Enter\n"); 297 brcmf_dbg(SDIO, "Enter\n");
299 298
300 if (pkt == NULL) 299 if (pkt == NULL)
301 return -EINVAL; 300 return -EINVAL;
@@ -314,7 +313,7 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
314 brcmf_err("%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n", 313 brcmf_err("%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
315 write ? "TX" : "RX", pkt, addr, pkt_len, status); 314 write ? "TX" : "RX", pkt, addr, pkt_len, status);
316 } else { 315 } else {
317 brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n", 316 brcmf_dbg(SDIO, "%s xfr'd %p, addr=0x%05x, len=%d\n",
318 write ? "TX" : "RX", pkt, addr, pkt_len); 317 write ? "TX" : "RX", pkt, addr, pkt_len);
319 } 318 }
320 319
@@ -350,12 +349,12 @@ static int brcmf_sdioh_enablefuncs(struct brcmf_sdio_dev *sdiodev)
350 u32 fbraddr; 349 u32 fbraddr;
351 u8 func; 350 u8 func;
352 351
353 brcmf_dbg(TRACE, "\n"); 352 brcmf_dbg(SDIO, "\n");
354 353
355 /* Get the Card's common CIS address */ 354 /* Get the Card's common CIS address */
356 sdiodev->func_cis_ptr[0] = brcmf_sdioh_get_cisaddr(sdiodev, 355 sdiodev->func_cis_ptr[0] = brcmf_sdioh_get_cisaddr(sdiodev,
357 SDIO_CCCR_CIS); 356 SDIO_CCCR_CIS);
358 brcmf_dbg(INFO, "Card's Common CIS Ptr = 0x%x\n", 357 brcmf_dbg(SDIO, "Card's Common CIS Ptr = 0x%x\n",
359 sdiodev->func_cis_ptr[0]); 358 sdiodev->func_cis_ptr[0]);
360 359
361 /* Get the Card's function CIS (for each function) */ 360 /* Get the Card's function CIS (for each function) */
@@ -363,7 +362,7 @@ static int brcmf_sdioh_enablefuncs(struct brcmf_sdio_dev *sdiodev)
363 func <= sdiodev->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) { 362 func <= sdiodev->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
364 sdiodev->func_cis_ptr[func] = 363 sdiodev->func_cis_ptr[func] =
365 brcmf_sdioh_get_cisaddr(sdiodev, SDIO_FBR_CIS + fbraddr); 364 brcmf_sdioh_get_cisaddr(sdiodev, SDIO_FBR_CIS + fbraddr);
366 brcmf_dbg(INFO, "Function %d CIS Ptr = 0x%x\n", 365 brcmf_dbg(SDIO, "Function %d CIS Ptr = 0x%x\n",
367 func, sdiodev->func_cis_ptr[func]); 366 func, sdiodev->func_cis_ptr[func]);
368 } 367 }
369 368
@@ -382,7 +381,7 @@ int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev)
382{ 381{
383 int err_ret = 0; 382 int err_ret = 0;
384 383
385 brcmf_dbg(TRACE, "\n"); 384 brcmf_dbg(SDIO, "\n");
386 385
387 sdiodev->num_funcs = 2; 386 sdiodev->num_funcs = 2;
388 387
@@ -404,13 +403,13 @@ int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev)
404 403
405out: 404out:
406 sdio_release_host(sdiodev->func[1]); 405 sdio_release_host(sdiodev->func[1]);
407 brcmf_dbg(TRACE, "Done\n"); 406 brcmf_dbg(SDIO, "Done\n");
408 return err_ret; 407 return err_ret;
409} 408}
410 409
411void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev) 410void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev)
412{ 411{
413 brcmf_dbg(TRACE, "\n"); 412 brcmf_dbg(SDIO, "\n");
414 413
415 /* Disable Function 2 */ 414 /* Disable Function 2 */
416 sdio_claim_host(sdiodev->func[2]); 415 sdio_claim_host(sdiodev->func[2]);
@@ -424,33 +423,6 @@ void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev)
424 423
425} 424}
426 425
427#ifdef CONFIG_BRCMFMAC_SDIO_OOB
428static int brcmf_sdio_getintrcfg(struct brcmf_sdio_dev *sdiodev)
429{
430 struct brcmf_sdio_oobirq *oobirq_entry;
431
432 if (list_empty(&oobirq_lh)) {
433 brcmf_err("no valid oob irq resource\n");
434 return -ENXIO;
435 }
436
437 oobirq_entry = list_first_entry(&oobirq_lh, struct brcmf_sdio_oobirq,
438 list);
439
440 sdiodev->irq = oobirq_entry->irq;
441 sdiodev->irq_flags = oobirq_entry->flags;
442 list_del(&oobirq_entry->list);
443 kfree(oobirq_entry);
444
445 return 0;
446}
447#else
448static inline int brcmf_sdio_getintrcfg(struct brcmf_sdio_dev *sdiodev)
449{
450 return 0;
451}
452#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
453
454static int brcmf_ops_sdio_probe(struct sdio_func *func, 426static int brcmf_ops_sdio_probe(struct sdio_func *func,
455 const struct sdio_device_id *id) 427 const struct sdio_device_id *id)
456{ 428{
@@ -458,11 +430,11 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
458 struct brcmf_sdio_dev *sdiodev; 430 struct brcmf_sdio_dev *sdiodev;
459 struct brcmf_bus *bus_if; 431 struct brcmf_bus *bus_if;
460 432
461 brcmf_dbg(TRACE, "Enter\n"); 433 brcmf_dbg(SDIO, "Enter\n");
462 brcmf_dbg(TRACE, "Class=%x\n", func->class); 434 brcmf_dbg(SDIO, "Class=%x\n", func->class);
463 brcmf_dbg(TRACE, "sdio vendor ID: 0x%04x\n", func->vendor); 435 brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
464 brcmf_dbg(TRACE, "sdio device ID: 0x%04x\n", func->device); 436 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
465 brcmf_dbg(TRACE, "Function#: %d\n", func->num); 437 brcmf_dbg(SDIO, "Function#: %d\n", func->num);
466 438
467 /* Consume func num 1 but dont do anything with it. */ 439 /* Consume func num 1 but dont do anything with it. */
468 if (func->num == 1) 440 if (func->num == 1)
@@ -491,23 +463,21 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
491 dev_set_drvdata(&func->dev, bus_if); 463 dev_set_drvdata(&func->dev, bus_if);
492 dev_set_drvdata(&sdiodev->func[1]->dev, bus_if); 464 dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
493 sdiodev->dev = &sdiodev->func[1]->dev; 465 sdiodev->dev = &sdiodev->func[1]->dev;
466 sdiodev->pdata = brcmfmac_sdio_pdata;
494 467
495 atomic_set(&sdiodev->suspend, false); 468 atomic_set(&sdiodev->suspend, false);
496 init_waitqueue_head(&sdiodev->request_byte_wait); 469 init_waitqueue_head(&sdiodev->request_byte_wait);
497 init_waitqueue_head(&sdiodev->request_word_wait); 470 init_waitqueue_head(&sdiodev->request_word_wait);
498 init_waitqueue_head(&sdiodev->request_chain_wait); 471 init_waitqueue_head(&sdiodev->request_chain_wait);
499 init_waitqueue_head(&sdiodev->request_buffer_wait); 472 init_waitqueue_head(&sdiodev->request_buffer_wait);
500 err = brcmf_sdio_getintrcfg(sdiodev);
501 if (err)
502 goto fail;
503 473
504 brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n"); 474 brcmf_dbg(SDIO, "F2 found, calling brcmf_sdio_probe...\n");
505 err = brcmf_sdio_probe(sdiodev); 475 err = brcmf_sdio_probe(sdiodev);
506 if (err) { 476 if (err) {
507 brcmf_err("F2 error, probe failed %d...\n", err); 477 brcmf_err("F2 error, probe failed %d...\n", err);
508 goto fail; 478 goto fail;
509 } 479 }
510 brcmf_dbg(TRACE, "F2 init completed...\n"); 480 brcmf_dbg(SDIO, "F2 init completed...\n");
511 return 0; 481 return 0;
512 482
513fail: 483fail:
@@ -523,10 +493,10 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
523 struct brcmf_bus *bus_if; 493 struct brcmf_bus *bus_if;
524 struct brcmf_sdio_dev *sdiodev; 494 struct brcmf_sdio_dev *sdiodev;
525 495
526 brcmf_dbg(TRACE, "Enter\n"); 496 brcmf_dbg(SDIO, "Enter\n");
527 brcmf_dbg(TRACE, "sdio vendor ID: 0x%04x\n", func->vendor); 497 brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
528 brcmf_dbg(TRACE, "sdio device ID: 0x%04x\n", func->device); 498 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
529 brcmf_dbg(TRACE, "Function: %d\n", func->num); 499 brcmf_dbg(SDIO, "Function: %d\n", func->num);
530 500
531 if (func->num != 1 && func->num != 2) 501 if (func->num != 1 && func->num != 2)
532 return; 502 return;
@@ -543,7 +513,7 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
543 kfree(sdiodev); 513 kfree(sdiodev);
544 } 514 }
545 515
546 brcmf_dbg(TRACE, "Exit\n"); 516 brcmf_dbg(SDIO, "Exit\n");
547} 517}
548 518
549#ifdef CONFIG_PM_SLEEP 519#ifdef CONFIG_PM_SLEEP
@@ -554,7 +524,7 @@ static int brcmf_sdio_suspend(struct device *dev)
554 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 524 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
555 int ret = 0; 525 int ret = 0;
556 526
557 brcmf_dbg(TRACE, "\n"); 527 brcmf_dbg(SDIO, "\n");
558 528
559 atomic_set(&sdiodev->suspend, true); 529 atomic_set(&sdiodev->suspend, true);
560 530
@@ -594,7 +564,7 @@ static const struct dev_pm_ops brcmf_sdio_pm_ops = {
594static struct sdio_driver brcmf_sdmmc_driver = { 564static struct sdio_driver brcmf_sdmmc_driver = {
595 .probe = brcmf_ops_sdio_probe, 565 .probe = brcmf_ops_sdio_probe,
596 .remove = brcmf_ops_sdio_remove, 566 .remove = brcmf_ops_sdio_remove,
597 .name = "brcmfmac", 567 .name = BRCMFMAC_SDIO_PDATA_NAME,
598 .id_table = brcmf_sdmmc_ids, 568 .id_table = brcmf_sdmmc_ids,
599#ifdef CONFIG_PM_SLEEP 569#ifdef CONFIG_PM_SLEEP
600 .drv = { 570 .drv = {
@@ -603,83 +573,65 @@ static struct sdio_driver brcmf_sdmmc_driver = {
603#endif /* CONFIG_PM_SLEEP */ 573#endif /* CONFIG_PM_SLEEP */
604}; 574};
605 575
606#ifdef CONFIG_BRCMFMAC_SDIO_OOB
607static int brcmf_sdio_pd_probe(struct platform_device *pdev) 576static int brcmf_sdio_pd_probe(struct platform_device *pdev)
608{ 577{
609 struct resource *res; 578 int ret;
610 struct brcmf_sdio_oobirq *oobirq_entry;
611 int i, ret;
612 579
613 INIT_LIST_HEAD(&oobirq_lh); 580 brcmf_dbg(SDIO, "Enter\n");
614 581
615 for (i = 0; ; i++) { 582 brcmfmac_sdio_pdata = pdev->dev.platform_data;
616 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
617 if (!res)
618 break;
619 583
620 oobirq_entry = kzalloc(sizeof(struct brcmf_sdio_oobirq), 584 if (brcmfmac_sdio_pdata->power_on)
621 GFP_KERNEL); 585 brcmfmac_sdio_pdata->power_on();
622 if (!oobirq_entry)
623 return -ENOMEM;
624 oobirq_entry->irq = res->start;
625 oobirq_entry->flags = res->flags & IRQF_TRIGGER_MASK;
626 list_add_tail(&oobirq_entry->list, &oobirq_lh);
627 }
628 if (i == 0)
629 return -ENXIO;
630 586
631 ret = sdio_register_driver(&brcmf_sdmmc_driver); 587 ret = sdio_register_driver(&brcmf_sdmmc_driver);
632
633 if (ret) 588 if (ret)
634 brcmf_err("sdio_register_driver failed: %d\n", ret); 589 brcmf_err("sdio_register_driver failed: %d\n", ret);
635 590
636 return ret; 591 return ret;
637} 592}
638 593
639static struct platform_driver brcmf_sdio_pd = { 594static int brcmf_sdio_pd_remove(struct platform_device *pdev)
640 .probe = brcmf_sdio_pd_probe,
641 .driver = {
642 .name = "brcmf_sdio_pd"
643 }
644};
645
646void brcmf_sdio_exit(void)
647{ 595{
648 brcmf_dbg(TRACE, "Enter\n"); 596 brcmf_dbg(SDIO, "Enter\n");
597
598 if (brcmfmac_sdio_pdata->power_off)
599 brcmfmac_sdio_pdata->power_off();
649 600
650 sdio_unregister_driver(&brcmf_sdmmc_driver); 601 sdio_unregister_driver(&brcmf_sdmmc_driver);
651 602
652 platform_driver_unregister(&brcmf_sdio_pd); 603 return 0;
653} 604}
654 605
655void brcmf_sdio_init(void) 606static struct platform_driver brcmf_sdio_pd = {
656{ 607 .remove = brcmf_sdio_pd_remove,
657 int ret; 608 .driver = {
658 609 .name = BRCMFMAC_SDIO_PDATA_NAME
659 brcmf_dbg(TRACE, "Enter\n"); 610 }
660 611};
661 ret = platform_driver_register(&brcmf_sdio_pd);
662 612
663 if (ret)
664 brcmf_err("platform_driver_register failed: %d\n", ret);
665}
666#else
667void brcmf_sdio_exit(void) 613void brcmf_sdio_exit(void)
668{ 614{
669 brcmf_dbg(TRACE, "Enter\n"); 615 brcmf_dbg(SDIO, "Enter\n");
670 616
671 sdio_unregister_driver(&brcmf_sdmmc_driver); 617 if (brcmfmac_sdio_pdata)
618 platform_driver_unregister(&brcmf_sdio_pd);
619 else
620 sdio_unregister_driver(&brcmf_sdmmc_driver);
672} 621}
673 622
674void brcmf_sdio_init(void) 623void brcmf_sdio_init(void)
675{ 624{
676 int ret; 625 int ret;
677 626
678 brcmf_dbg(TRACE, "Enter\n"); 627 brcmf_dbg(SDIO, "Enter\n");
679 628
680 ret = sdio_register_driver(&brcmf_sdmmc_driver); 629 ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
630 if (ret == -ENODEV) {
631 brcmf_dbg(SDIO, "No platform data available, registering without.\n");
632 ret = sdio_register_driver(&brcmf_sdmmc_driver);
633 }
681 634
682 if (ret) 635 if (ret)
683 brcmf_err("sdio_register_driver failed: %d\n", ret); 636 brcmf_err("driver registration failed: %d\n", ret);
684} 637}
685#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c
new file mode 100644
index 000000000000..0cb591b050b3
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c
@@ -0,0 +1,497 @@
1/*
2 * Copyright (c) 2013 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#include <linux/slab.h>
17#include <linux/netdevice.h>
18#include <net/cfg80211.h>
19
20#include <brcmu_wifi.h>
21#include <brcmu_utils.h>
22#include <defs.h>
23#include <dhd.h>
24#include <dhd_dbg.h>
25#include "fwil.h"
26#include "fwil_types.h"
27#include "btcoex.h"
28#include "p2p.h"
29#include "wl_cfg80211.h"
30
31/* T1 start SCO/eSCO priority suppression */
32#define BRCMF_BTCOEX_OPPR_WIN_TIME 2000
33
34/* BT registers values during DHCP */
35#define BRCMF_BT_DHCP_REG50 0x8022
36#define BRCMF_BT_DHCP_REG51 0
37#define BRCMF_BT_DHCP_REG64 0
38#define BRCMF_BT_DHCP_REG65 0
39#define BRCMF_BT_DHCP_REG71 0
40#define BRCMF_BT_DHCP_REG66 0x2710
41#define BRCMF_BT_DHCP_REG41 0x33
42#define BRCMF_BT_DHCP_REG68 0x190
43
44/* number of samples for SCO detection */
45#define BRCMF_BT_SCO_SAMPLES 12
46
47/**
48* enum brcmf_btcoex_state - BT coex DHCP state machine states
49* @BRCMF_BT_DHCP_IDLE: DCHP is idle
50* @BRCMF_BT_DHCP_START: DHCP started, wait before
51* boosting wifi priority
52* @BRCMF_BT_DHCP_OPPR_WIN: graceful DHCP opportunity ended,
53* boost wifi priority
54* @BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT: wifi priority boost end,
55* restore defaults
56*/
57enum brcmf_btcoex_state {
58 BRCMF_BT_DHCP_IDLE,
59 BRCMF_BT_DHCP_START,
60 BRCMF_BT_DHCP_OPPR_WIN,
61 BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT
62};
63
64/**
65 * struct brcmf_btcoex_info - BT coex related information
66 * @vif: interface for which request was done.
67 * @timer: timer for DHCP state machine
68 * @timeout: configured timeout.
69 * @timer_on: DHCP timer active
70 * @dhcp_done: DHCP finished before T1/T2 timer expiration
71 * @bt_state: DHCP state machine state
72 * @work: DHCP state machine work
73 * @cfg: driver private data for cfg80211 interface
74 * @reg66: saved value of btc_params 66
75 * @reg41: saved value of btc_params 41
76 * @reg68: saved value of btc_params 68
77 * @saved_regs_part1: flag indicating regs 66,41,68
78 * have been saved
79 * @reg51: saved value of btc_params 51
80 * @reg64: saved value of btc_params 64
81 * @reg65: saved value of btc_params 65
82 * @reg71: saved value of btc_params 71
83 * @saved_regs_part1: flag indicating regs 50,51,64,65,71
84 * have been saved
85 */
86struct brcmf_btcoex_info {
87 struct brcmf_cfg80211_vif *vif;
88 struct timer_list timer;
89 u16 timeout;
90 bool timer_on;
91 bool dhcp_done;
92 enum brcmf_btcoex_state bt_state;
93 struct work_struct work;
94 struct brcmf_cfg80211_info *cfg;
95 u32 reg66;
96 u32 reg41;
97 u32 reg68;
98 bool saved_regs_part1;
99 u32 reg50;
100 u32 reg51;
101 u32 reg64;
102 u32 reg65;
103 u32 reg71;
104 bool saved_regs_part2;
105};
106
107/**
108 * brcmf_btcoex_params_write() - write btc_params firmware variable
109 * @ifp: interface
110 * @addr: btc_params register number
111 * @data: data to write
112 */
113static s32 brcmf_btcoex_params_write(struct brcmf_if *ifp, u32 addr, u32 data)
114{
115 struct {
116 __le32 addr;
117 __le32 data;
118 } reg_write;
119
120 reg_write.addr = cpu_to_le32(addr);
121 reg_write.data = cpu_to_le32(data);
122 return brcmf_fil_iovar_data_set(ifp, "btc_params",
123 &reg_write, sizeof(reg_write));
124}
125
126/**
127 * brcmf_btcoex_params_read() - read btc_params firmware variable
128 * @ifp: interface
129 * @addr: btc_params register number
130 * @data: read data
131 */
132static s32 brcmf_btcoex_params_read(struct brcmf_if *ifp, u32 addr, u32 *data)
133{
134 *data = addr;
135
136 return brcmf_fil_iovar_int_get(ifp, "btc_params", data);
137}
138
139/**
140 * brcmf_btcoex_boost_wifi() - control BT SCO/eSCO parameters
141 * @btci: BT coex info
142 * @trump_sco:
143 * true - set SCO/eSCO parameters for compatibility
144 * during DHCP window
145 * false - restore saved parameter values
146 *
147 * Enhanced BT COEX settings for eSCO compatibility during DHCP window
148 */
149static void brcmf_btcoex_boost_wifi(struct brcmf_btcoex_info *btci,
150 bool trump_sco)
151{
152 struct brcmf_if *ifp = btci->cfg->pub->iflist[0];
153
154 if (trump_sco && !btci->saved_regs_part2) {
155 /* this should reduce eSCO agressive
156 * retransmit w/o breaking it
157 */
158
159 /* save current */
160 brcmf_dbg(TRACE, "new SCO/eSCO coex algo {save & override}\n");
161 brcmf_btcoex_params_read(ifp, 50, &btci->reg50);
162 brcmf_btcoex_params_read(ifp, 51, &btci->reg51);
163 brcmf_btcoex_params_read(ifp, 64, &btci->reg64);
164 brcmf_btcoex_params_read(ifp, 65, &btci->reg65);
165 brcmf_btcoex_params_read(ifp, 71, &btci->reg71);
166
167 btci->saved_regs_part2 = true;
168 brcmf_dbg(TRACE,
169 "saved bt_params[50,51,64,65,71]: 0x%x 0x%x 0x%x 0x%x 0x%x\n",
170 btci->reg50, btci->reg51, btci->reg64,
171 btci->reg65, btci->reg71);
172
173 /* pacify the eSco */
174 brcmf_btcoex_params_write(ifp, 50, BRCMF_BT_DHCP_REG50);
175 brcmf_btcoex_params_write(ifp, 51, BRCMF_BT_DHCP_REG51);
176 brcmf_btcoex_params_write(ifp, 64, BRCMF_BT_DHCP_REG64);
177 brcmf_btcoex_params_write(ifp, 65, BRCMF_BT_DHCP_REG65);
178 brcmf_btcoex_params_write(ifp, 71, BRCMF_BT_DHCP_REG71);
179
180 } else if (btci->saved_regs_part2) {
181 /* restore previously saved bt params */
182 brcmf_dbg(TRACE, "Do new SCO/eSCO coex algo {restore}\n");
183 brcmf_btcoex_params_write(ifp, 50, btci->reg50);
184 brcmf_btcoex_params_write(ifp, 51, btci->reg51);
185 brcmf_btcoex_params_write(ifp, 64, btci->reg64);
186 brcmf_btcoex_params_write(ifp, 65, btci->reg65);
187 brcmf_btcoex_params_write(ifp, 71, btci->reg71);
188
189 brcmf_dbg(TRACE,
190 "restored bt_params[50,51,64,65,71]: 0x%x 0x%x 0x%x 0x%x 0x%x\n",
191 btci->reg50, btci->reg51, btci->reg64,
192 btci->reg65, btci->reg71);
193
194 btci->saved_regs_part2 = false;
195 } else {
196 brcmf_err("attempted to restore not saved BTCOEX params\n");
197 }
198}
199
200/**
201 * brcmf_btcoex_is_sco_active() - check if SCO/eSCO is active
202 * @ifp: interface
203 *
204 * return: true if SCO/eSCO session is active
205 */
206static bool brcmf_btcoex_is_sco_active(struct brcmf_if *ifp)
207{
208 int ioc_res = 0;
209 bool res = false;
210 int sco_id_cnt = 0;
211 u32 param27;
212 int i;
213
214 for (i = 0; i < BRCMF_BT_SCO_SAMPLES; i++) {
215 ioc_res = brcmf_btcoex_params_read(ifp, 27, &param27);
216
217 if (ioc_res < 0) {
218 brcmf_err("ioc read btc params error\n");
219 break;
220 }
221
222 brcmf_dbg(TRACE, "sample[%d], btc_params 27:%x\n", i, param27);
223
224 if ((param27 & 0x6) == 2) { /* count both sco & esco */
225 sco_id_cnt++;
226 }
227
228 if (sco_id_cnt > 2) {
229 brcmf_dbg(TRACE,
230 "sco/esco detected, pkt id_cnt:%d samples:%d\n",
231 sco_id_cnt, i);
232 res = true;
233 break;
234 }
235 }
236 brcmf_dbg(TRACE, "exit: result=%d\n", res);
237 return res;
238}
239
240/**
241 * btcmf_btcoex_save_part1() - save first step parameters.
242 */
243static void btcmf_btcoex_save_part1(struct brcmf_btcoex_info *btci)
244{
245 struct brcmf_if *ifp = btci->vif->ifp;
246
247 if (!btci->saved_regs_part1) {
248 /* Retrieve and save original reg value */
249 brcmf_btcoex_params_read(ifp, 66, &btci->reg66);
250 brcmf_btcoex_params_read(ifp, 41, &btci->reg41);
251 brcmf_btcoex_params_read(ifp, 68, &btci->reg68);
252 btci->saved_regs_part1 = true;
253 brcmf_dbg(TRACE,
254 "saved btc_params regs (66,41,68) 0x%x 0x%x 0x%x\n",
255 btci->reg66, btci->reg41,
256 btci->reg68);
257 }
258}
259
260/**
261 * brcmf_btcoex_restore_part1() - restore first step parameters.
262 */
263static void brcmf_btcoex_restore_part1(struct brcmf_btcoex_info *btci)
264{
265 struct brcmf_if *ifp;
266
267 if (btci->saved_regs_part1) {
268 btci->saved_regs_part1 = false;
269 ifp = btci->vif->ifp;
270 brcmf_btcoex_params_write(ifp, 66, btci->reg66);
271 brcmf_btcoex_params_write(ifp, 41, btci->reg41);
272 brcmf_btcoex_params_write(ifp, 68, btci->reg68);
273 brcmf_dbg(TRACE,
274 "restored btc_params regs {66,41,68} 0x%x 0x%x 0x%x\n",
275 btci->reg66, btci->reg41,
276 btci->reg68);
277 }
278}
279
280/**
281 * brcmf_btcoex_timerfunc() - BT coex timer callback
282 */
283static void brcmf_btcoex_timerfunc(ulong data)
284{
285 struct brcmf_btcoex_info *bt_local = (struct brcmf_btcoex_info *)data;
286 brcmf_dbg(TRACE, "enter\n");
287
288 bt_local->timer_on = false;
289 schedule_work(&bt_local->work);
290}
291
292/**
293 * brcmf_btcoex_handler() - BT coex state machine work handler
294 * @work: work
295 */
296static void brcmf_btcoex_handler(struct work_struct *work)
297{
298 struct brcmf_btcoex_info *btci;
299 btci = container_of(work, struct brcmf_btcoex_info, work);
300 if (btci->timer_on) {
301 btci->timer_on = false;
302 del_timer_sync(&btci->timer);
303 }
304
305 switch (btci->bt_state) {
306 case BRCMF_BT_DHCP_START:
307 /* DHCP started provide OPPORTUNITY window
308 to get DHCP address
309 */
310 brcmf_dbg(TRACE, "DHCP started\n");
311 btci->bt_state = BRCMF_BT_DHCP_OPPR_WIN;
312 if (btci->timeout < BRCMF_BTCOEX_OPPR_WIN_TIME) {
313 mod_timer(&btci->timer, btci->timer.expires);
314 } else {
315 btci->timeout -= BRCMF_BTCOEX_OPPR_WIN_TIME;
316 mod_timer(&btci->timer,
317 jiffies +
318 msecs_to_jiffies(BRCMF_BTCOEX_OPPR_WIN_TIME));
319 }
320 btci->timer_on = true;
321 break;
322
323 case BRCMF_BT_DHCP_OPPR_WIN:
324 if (btci->dhcp_done) {
325 brcmf_dbg(TRACE, "DHCP done before T1 expiration\n");
326 goto idle;
327 }
328
329 /* DHCP is not over yet, start lowering BT priority */
330 brcmf_dbg(TRACE, "DHCP T1:%d expired\n",
331 BRCMF_BTCOEX_OPPR_WIN_TIME);
332 brcmf_btcoex_boost_wifi(btci, true);
333
334 btci->bt_state = BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT;
335 mod_timer(&btci->timer,
336 jiffies + msecs_to_jiffies(btci->timeout));
337 btci->timer_on = true;
338 break;
339
340 case BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT:
341 if (btci->dhcp_done)
342 brcmf_dbg(TRACE, "DHCP done before T2 expiration\n");
343 else
344 brcmf_dbg(TRACE, "DHCP T2:%d expired\n",
345 BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT);
346
347 goto idle;
348
349 default:
350 brcmf_err("invalid state=%d !!!\n", btci->bt_state);
351 goto idle;
352 }
353
354 return;
355
356idle:
357 btci->bt_state = BRCMF_BT_DHCP_IDLE;
358 btci->timer_on = false;
359 brcmf_btcoex_boost_wifi(btci, false);
360 cfg80211_crit_proto_stopped(&btci->vif->wdev, GFP_KERNEL);
361 brcmf_btcoex_restore_part1(btci);
362 btci->vif = NULL;
363}
364
365/**
366 * brcmf_btcoex_attach() - initialize BT coex data
367 * @cfg: driver private cfg80211 data
368 *
369 * return: 0 on success
370 */
371int brcmf_btcoex_attach(struct brcmf_cfg80211_info *cfg)
372{
373 struct brcmf_btcoex_info *btci = NULL;
374 brcmf_dbg(TRACE, "enter\n");
375
376 btci = kmalloc(sizeof(struct brcmf_btcoex_info), GFP_KERNEL);
377 if (!btci)
378 return -ENOMEM;
379
380 btci->bt_state = BRCMF_BT_DHCP_IDLE;
381
382 /* Set up timer for BT */
383 btci->timer_on = false;
384 btci->timeout = BRCMF_BTCOEX_OPPR_WIN_TIME;
385 init_timer(&btci->timer);
386 btci->timer.data = (ulong)btci;
387 btci->timer.function = brcmf_btcoex_timerfunc;
388 btci->cfg = cfg;
389 btci->saved_regs_part1 = false;
390 btci->saved_regs_part2 = false;
391
392 INIT_WORK(&btci->work, brcmf_btcoex_handler);
393
394 cfg->btcoex = btci;
395 return 0;
396}
397
398/**
399 * brcmf_btcoex_detach - clean BT coex data
400 * @cfg: driver private cfg80211 data
401 */
402void brcmf_btcoex_detach(struct brcmf_cfg80211_info *cfg)
403{
404 brcmf_dbg(TRACE, "enter\n");
405
406 if (!cfg->btcoex)
407 return;
408
409 if (cfg->btcoex->timer_on) {
410 cfg->btcoex->timer_on = false;
411 del_timer_sync(&cfg->btcoex->timer);
412 }
413
414 cancel_work_sync(&cfg->btcoex->work);
415
416 brcmf_btcoex_boost_wifi(cfg->btcoex, false);
417 brcmf_btcoex_restore_part1(cfg->btcoex);
418
419 kfree(cfg->btcoex);
420 cfg->btcoex = NULL;
421}
422
423static void brcmf_btcoex_dhcp_start(struct brcmf_btcoex_info *btci)
424{
425 struct brcmf_if *ifp = btci->vif->ifp;
426
427 btcmf_btcoex_save_part1(btci);
428 /* set new regs values */
429 brcmf_btcoex_params_write(ifp, 66, BRCMF_BT_DHCP_REG66);
430 brcmf_btcoex_params_write(ifp, 41, BRCMF_BT_DHCP_REG41);
431 brcmf_btcoex_params_write(ifp, 68, BRCMF_BT_DHCP_REG68);
432 btci->dhcp_done = false;
433 btci->bt_state = BRCMF_BT_DHCP_START;
434 schedule_work(&btci->work);
435 brcmf_dbg(TRACE, "enable BT DHCP Timer\n");
436}
437
438static void brcmf_btcoex_dhcp_end(struct brcmf_btcoex_info *btci)
439{
440 /* Stop any bt timer because DHCP session is done */
441 btci->dhcp_done = true;
442 if (btci->timer_on) {
443 brcmf_dbg(TRACE, "disable BT DHCP Timer\n");
444 btci->timer_on = false;
445 del_timer_sync(&btci->timer);
446
447 /* schedule worker if transition to IDLE is needed */
448 if (btci->bt_state != BRCMF_BT_DHCP_IDLE) {
449 brcmf_dbg(TRACE, "bt_state:%d\n",
450 btci->bt_state);
451 schedule_work(&btci->work);
452 }
453 } else {
454 /* Restore original values */
455 brcmf_btcoex_restore_part1(btci);
456 }
457}
458
459/**
460 * brcmf_btcoex_set_mode - set BT coex mode
461 * @cfg: driver private cfg80211 data
462 * @mode: Wifi-Bluetooth coexistence mode
463 *
464 * return: 0 on success
465 */
466int brcmf_btcoex_set_mode(struct brcmf_cfg80211_vif *vif,
467 enum brcmf_btcoex_mode mode, u16 duration)
468{
469 struct brcmf_cfg80211_info *cfg = wiphy_priv(vif->wdev.wiphy);
470 struct brcmf_btcoex_info *btci = cfg->btcoex;
471 struct brcmf_if *ifp = cfg->pub->iflist[0];
472
473 switch (mode) {
474 case BRCMF_BTCOEX_DISABLED:
475 brcmf_dbg(TRACE, "DHCP session starts\n");
476 if (btci->bt_state != BRCMF_BT_DHCP_IDLE)
477 return -EBUSY;
478 /* Start BT timer only for SCO connection */
479 if (brcmf_btcoex_is_sco_active(ifp)) {
480 btci->timeout = duration;
481 btci->vif = vif;
482 brcmf_btcoex_dhcp_start(btci);
483 }
484 break;
485
486 case BRCMF_BTCOEX_ENABLED:
487 brcmf_dbg(TRACE, "DHCP session ends\n");
488 if (btci->bt_state != BRCMF_BT_DHCP_IDLE &&
489 vif == btci->vif) {
490 brcmf_btcoex_dhcp_end(btci);
491 }
492 break;
493 default:
494 brcmf_dbg(TRACE, "Unknown mode, ignored\n");
495 }
496 return 0;
497}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.h b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.h
new file mode 100644
index 000000000000..19647c68aa9e
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2013 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#ifndef WL_BTCOEX_H_
17#define WL_BTCOEX_H_
18
19enum brcmf_btcoex_mode {
20 BRCMF_BTCOEX_DISABLED,
21 BRCMF_BTCOEX_ENABLED
22};
23
24int brcmf_btcoex_attach(struct brcmf_cfg80211_info *cfg);
25void brcmf_btcoex_detach(struct brcmf_cfg80211_info *cfg);
26int brcmf_btcoex_set_mode(struct brcmf_cfg80211_vif *vif,
27 enum brcmf_btcoex_mode mode, u16 duration);
28
29#endif /* WL_BTCOEX_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index ef6f23be6d32..28db9cf39672 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -28,6 +28,7 @@
28/******************************************************************************* 28/*******************************************************************************
29 * IO codes that are interpreted by dongle firmware 29 * IO codes that are interpreted by dongle firmware
30 ******************************************************************************/ 30 ******************************************************************************/
31#define BRCMF_C_GET_VERSION 1
31#define BRCMF_C_UP 2 32#define BRCMF_C_UP 2
32#define BRCMF_C_DOWN 3 33#define BRCMF_C_DOWN 3
33#define BRCMF_C_SET_PROMISC 10 34#define BRCMF_C_SET_PROMISC 10
@@ -72,6 +73,7 @@
72#define BRCMF_C_SET_WSEC 134 73#define BRCMF_C_SET_WSEC 134
73#define BRCMF_C_GET_PHY_NOISE 135 74#define BRCMF_C_GET_PHY_NOISE 135
74#define BRCMF_C_GET_BSS_INFO 136 75#define BRCMF_C_GET_BSS_INFO 136
76#define BRCMF_C_GET_BANDLIST 140
75#define BRCMF_C_SET_SCB_TIMEOUT 158 77#define BRCMF_C_SET_SCB_TIMEOUT 158
76#define BRCMF_C_GET_PHYLIST 180 78#define BRCMF_C_GET_PHYLIST 180
77#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185 79#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
@@ -475,6 +477,11 @@ struct brcmf_sta_info_le {
475 __le32 rx_decrypt_failures; /* # of packet decrypted failed */ 477 __le32 rx_decrypt_failures; /* # of packet decrypted failed */
476}; 478};
477 479
480struct brcmf_chanspec_list {
481 __le32 count; /* # of entries */
482 __le32 element[1]; /* variable length uint32 list */
483};
484
478/* 485/*
479 * WLC_E_PROBRESP_MSG 486 * WLC_E_PROBRESP_MSG
480 * WLC_E_P2P_PROBREQ_MSG 487 * WLC_E_P2P_PROBREQ_MSG
@@ -501,6 +508,7 @@ struct brcmf_dcmd {
501/* Forward decls for struct brcmf_pub (see below) */ 508/* Forward decls for struct brcmf_pub (see below) */
502struct brcmf_proto; /* device communication protocol info */ 509struct brcmf_proto; /* device communication protocol info */
503struct brcmf_cfg80211_dev; /* cfg80211 device info */ 510struct brcmf_cfg80211_dev; /* cfg80211 device info */
511struct brcmf_fws_info; /* firmware signalling info */
504 512
505/* Common structure for module and instance linkage */ 513/* Common structure for module and instance linkage */
506struct brcmf_pub { 514struct brcmf_pub {
@@ -527,6 +535,10 @@ struct brcmf_pub {
527 unsigned char proto_buf[BRCMF_DCMD_MAXLEN]; 535 unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
528 536
529 struct brcmf_fweh_info fweh; 537 struct brcmf_fweh_info fweh;
538
539 bool fw_signals;
540 struct brcmf_fws_info *fws;
541 spinlock_t fws_spinlock;
530#ifdef DEBUG 542#ifdef DEBUG
531 struct dentry *dbgfs_dir; 543 struct dentry *dbgfs_dir;
532#endif 544#endif
@@ -537,10 +549,25 @@ struct brcmf_if_event {
537 u8 action; 549 u8 action;
538 u8 flags; 550 u8 flags;
539 u8 bssidx; 551 u8 bssidx;
552 u8 role;
540}; 553};
541 554
542/* forward declaration */ 555/* forward declarations */
543struct brcmf_cfg80211_vif; 556struct brcmf_cfg80211_vif;
557struct brcmf_fws_mac_descriptor;
558
559/**
560 * enum brcmf_netif_stop_reason - reason for stopping netif queue.
561 *
562 * @BRCMF_NETIF_STOP_REASON_FWS_FC:
563 * netif stopped due to firmware signalling flow control.
564 * @BRCMF_NETIF_STOP_REASON_BLOCK_BUS:
565 * netif stopped due to bus blocking.
566 */
567enum brcmf_netif_stop_reason {
568 BRCMF_NETIF_STOP_REASON_FWS_FC = 1,
569 BRCMF_NETIF_STOP_REASON_BLOCK_BUS = 2
570};
544 571
545/** 572/**
546 * struct brcmf_if - interface control information. 573 * struct brcmf_if - interface control information.
@@ -549,9 +576,13 @@ struct brcmf_cfg80211_vif;
549 * @vif: points to cfg80211 specific interface information. 576 * @vif: points to cfg80211 specific interface information.
550 * @ndev: associated network device. 577 * @ndev: associated network device.
551 * @stats: interface specific network statistics. 578 * @stats: interface specific network statistics.
579 * @setmacaddr_work: worker object for setting mac address.
580 * @multicast_work: worker object for multicast provisioning.
581 * @fws_desc: interface specific firmware-signalling descriptor.
552 * @ifidx: interface index in device firmware. 582 * @ifidx: interface index in device firmware.
553 * @bssidx: index of bss associated with this interface. 583 * @bssidx: index of bss associated with this interface.
554 * @mac_addr: assigned mac address. 584 * @mac_addr: assigned mac address.
585 * @netif_stop: bitmap indicates reason why netif queues are stopped.
555 * @pend_8021x_cnt: tracks outstanding number of 802.1x frames. 586 * @pend_8021x_cnt: tracks outstanding number of 802.1x frames.
556 * @pend_8021x_wait: used for signalling change in count. 587 * @pend_8021x_wait: used for signalling change in count.
557 */ 588 */
@@ -562,9 +593,11 @@ struct brcmf_if {
562 struct net_device_stats stats; 593 struct net_device_stats stats;
563 struct work_struct setmacaddr_work; 594 struct work_struct setmacaddr_work;
564 struct work_struct multicast_work; 595 struct work_struct multicast_work;
596 struct brcmf_fws_mac_descriptor *fws_desc;
565 int ifidx; 597 int ifidx;
566 s32 bssidx; 598 s32 bssidx;
567 u8 mac_addr[ETH_ALEN]; 599 u8 mac_addr[ETH_ALEN];
600 u8 netif_stop;
568 atomic_t pend_8021x_cnt; 601 atomic_t pend_8021x_cnt;
569 wait_queue_head_t pend_8021x_wait; 602 wait_queue_head_t pend_8021x_wait;
570}; 603};
@@ -582,13 +615,17 @@ extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
582 void *buf, uint len); 615 void *buf, uint len);
583 616
584/* Remove any protocol-specific data header. */ 617/* Remove any protocol-specific data header. */
585extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx, 618extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
586 struct sk_buff *rxp); 619 struct sk_buff *rxp);
587 620
588extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked); 621extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
589extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, 622extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx,
590 s32 ifidx, char *name, u8 *mac_addr); 623 s32 ifidx, char *name, u8 *mac_addr);
591extern void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx); 624extern void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
625void brcmf_txflowblock_if(struct brcmf_if *ifp,
626 enum brcmf_netif_stop_reason reason, bool state);
592extern u32 brcmf_get_chip_info(struct brcmf_if *ifp); 627extern u32 brcmf_get_chip_info(struct brcmf_if *ifp);
628extern void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
629 bool success);
593 630
594#endif /* _BRCMF_H_ */ 631#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index ad25c3408b59..080395f49fa5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -39,10 +39,12 @@ struct brcmf_bus_dcmd {
39 * @txdata: send a data frame to the dongle (callee disposes skb). 39 * @txdata: send a data frame to the dongle (callee disposes skb).
40 * @txctl: transmit a control request message to dongle. 40 * @txctl: transmit a control request message to dongle.
41 * @rxctl: receive a control response message from dongle. 41 * @rxctl: receive a control response message from dongle.
42 * @gettxq: obtain a reference of bus transmit queue (optional).
42 * 43 *
43 * This structure provides an abstract interface towards the 44 * This structure provides an abstract interface towards the
44 * bus specific driver. For control messages to common driver 45 * bus specific driver. For control messages to common driver
45 * will assure there is only one active transaction. 46 * will assure there is only one active transaction. Unless
47 * indicated otherwise these callbacks are mandatory.
46 */ 48 */
47struct brcmf_bus_ops { 49struct brcmf_bus_ops {
48 int (*init)(struct device *dev); 50 int (*init)(struct device *dev);
@@ -50,6 +52,7 @@ struct brcmf_bus_ops {
50 int (*txdata)(struct device *dev, struct sk_buff *skb); 52 int (*txdata)(struct device *dev, struct sk_buff *skb);
51 int (*txctl)(struct device *dev, unsigned char *msg, uint len); 53 int (*txctl)(struct device *dev, unsigned char *msg, uint len);
52 int (*rxctl)(struct device *dev, unsigned char *msg, uint len); 54 int (*rxctl)(struct device *dev, unsigned char *msg, uint len);
55 struct pktq * (*gettxq)(struct device *dev);
53}; 56};
54 57
55/** 58/**
@@ -115,6 +118,14 @@ int brcmf_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint len)
115 return bus->ops->rxctl(bus->dev, msg, len); 118 return bus->ops->rxctl(bus->dev, msg, len);
116} 119}
117 120
121static inline
122struct pktq *brcmf_bus_gettxq(struct brcmf_bus *bus)
123{
124 if (!bus->ops->gettxq)
125 return ERR_PTR(-ENOENT);
126
127 return bus->ops->gettxq(bus->dev);
128}
118/* 129/*
119 * interface functions from common layer 130 * interface functions from common layer
120 */ 131 */
@@ -134,7 +145,7 @@ extern void brcmf_dev_reset(struct device *dev);
134/* Indication from bus module to change flow-control state */ 145/* Indication from bus module to change flow-control state */
135extern void brcmf_txflowblock(struct device *dev, bool state); 146extern void brcmf_txflowblock(struct device *dev, bool state);
136 147
137/* Notify tx completion */ 148/* Notify the bus has transferred the tx packet to firmware */
138extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, 149extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
139 bool success); 150 bool success);
140 151
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index a2354d951dd7..59c77aa3b959 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -28,6 +28,7 @@
28#include "dhd.h" 28#include "dhd.h"
29#include "dhd_proto.h" 29#include "dhd_proto.h"
30#include "dhd_bus.h" 30#include "dhd_bus.h"
31#include "fwsignal.h"
31#include "dhd_dbg.h" 32#include "dhd_dbg.h"
32 33
33struct brcmf_proto_cdc_dcmd { 34struct brcmf_proto_cdc_dcmd {
@@ -71,13 +72,26 @@ struct brcmf_proto_cdc_dcmd {
71 ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | \ 72 ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | \
72 ((idx) << BDC_FLAG2_IF_SHIFT))) 73 ((idx) << BDC_FLAG2_IF_SHIFT)))
73 74
75/**
76 * struct brcmf_proto_bdc_header - BDC header format
77 *
78 * @flags: flags contain protocol and checksum info.
79 * @priority: 802.1d priority and USB flow control info (bit 4:7).
80 * @flags2: additional flags containing dongle interface index.
81 * @data_offset: start of packet data. header is following by firmware signals.
82 */
74struct brcmf_proto_bdc_header { 83struct brcmf_proto_bdc_header {
75 u8 flags; 84 u8 flags;
76 u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */ 85 u8 priority;
77 u8 flags2; 86 u8 flags2;
78 u8 data_offset; 87 u8 data_offset;
79}; 88};
80 89
90/*
91 * maximum length of firmware signal data between
92 * the BDC header and packet data in the tx path.
93 */
94#define BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES 12
81 95
82#define RETRIES 2 /* # of retries to retrieve matching dcmd response */ 96#define RETRIES 2 /* # of retries to retrieve matching dcmd response */
83#define BUS_HEADER_LEN (16+64) /* Must be atleast SDPCM_RESERVE 97#define BUS_HEADER_LEN (16+64) /* Must be atleast SDPCM_RESERVE
@@ -258,7 +272,7 @@ static void pkt_set_sum_good(struct sk_buff *skb, bool x)
258 skb->ip_summed = (x ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE); 272 skb->ip_summed = (x ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
259} 273}
260 274
261void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, 275void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset,
262 struct sk_buff *pktbuf) 276 struct sk_buff *pktbuf)
263{ 277{
264 struct brcmf_proto_bdc_header *h; 278 struct brcmf_proto_bdc_header *h;
@@ -266,7 +280,6 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
266 brcmf_dbg(CDC, "Enter\n"); 280 brcmf_dbg(CDC, "Enter\n");
267 281
268 /* Push BDC header used to convey priority for buses that don't */ 282 /* Push BDC header used to convey priority for buses that don't */
269
270 skb_push(pktbuf, BDC_HEADER_LEN); 283 skb_push(pktbuf, BDC_HEADER_LEN);
271 284
272 h = (struct brcmf_proto_bdc_header *)(pktbuf->data); 285 h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
@@ -277,11 +290,11 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
277 290
278 h->priority = (pktbuf->priority & BDC_PRIORITY_MASK); 291 h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
279 h->flags2 = 0; 292 h->flags2 = 0;
280 h->data_offset = 0; 293 h->data_offset = offset;
281 BDC_SET_IF_IDX(h, ifidx); 294 BDC_SET_IF_IDX(h, ifidx);
282} 295}
283 296
284int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx, 297int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
285 struct sk_buff *pktbuf) 298 struct sk_buff *pktbuf)
286{ 299{
287 struct brcmf_proto_bdc_header *h; 300 struct brcmf_proto_bdc_header *h;
@@ -290,8 +303,8 @@ int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx,
290 303
291 /* Pop BDC header used to convey priority for buses that don't */ 304 /* Pop BDC header used to convey priority for buses that don't */
292 305
293 if (pktbuf->len < BDC_HEADER_LEN) { 306 if (pktbuf->len <= BDC_HEADER_LEN) {
294 brcmf_err("rx data too short (%d < %d)\n", 307 brcmf_dbg(INFO, "rx data too short (%d <= %d)\n",
295 pktbuf->len, BDC_HEADER_LEN); 308 pktbuf->len, BDC_HEADER_LEN);
296 return -EBADE; 309 return -EBADE;
297 } 310 }
@@ -328,7 +341,10 @@ int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx,
328 pktbuf->priority = h->priority & BDC_PRIORITY_MASK; 341 pktbuf->priority = h->priority & BDC_PRIORITY_MASK;
329 342
330 skb_pull(pktbuf, BDC_HEADER_LEN); 343 skb_pull(pktbuf, BDC_HEADER_LEN);
331 skb_pull(pktbuf, h->data_offset << 2); 344 if (do_fws)
345 brcmf_fws_hdrpull(drvr, *ifidx, h->data_offset << 2, pktbuf);
346 else
347 skb_pull(pktbuf, h->data_offset << 2);
332 348
333 if (pktbuf->len == 0) 349 if (pktbuf->len == 0)
334 return -ENODATA; 350 return -ENODATA;
@@ -350,7 +366,7 @@ int brcmf_proto_attach(struct brcmf_pub *drvr)
350 } 366 }
351 367
352 drvr->prot = cdc; 368 drvr->prot = cdc;
353 drvr->hdrlen += BDC_HEADER_LEN; 369 drvr->hdrlen += BDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
354 drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN + 370 drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
355 sizeof(struct brcmf_proto_cdc_dcmd) + ROUND_UP_MARGIN; 371 sizeof(struct brcmf_proto_cdc_dcmd) + ROUND_UP_MARGIN;
356 return 0; 372 return 0;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 4544342a0428..be0787cab24f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -24,6 +24,7 @@
24#include "dhd_proto.h" 24#include "dhd_proto.h"
25#include "dhd_dbg.h" 25#include "dhd_dbg.h"
26#include "fwil.h" 26#include "fwil.h"
27#include "tracepoint.h"
27 28
28#define PKTFILTER_BUF_SIZE 128 29#define PKTFILTER_BUF_SIZE 128
29#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */ 30#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */
@@ -373,3 +374,35 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
373done: 374done:
374 return err; 375 return err;
375} 376}
377
378#ifdef CONFIG_BRCM_TRACING
379void __brcmf_err(const char *func, const char *fmt, ...)
380{
381 struct va_format vaf = {
382 .fmt = fmt,
383 };
384 va_list args;
385
386 va_start(args, fmt);
387 vaf.va = &args;
388 pr_err("%s: %pV", func, &vaf);
389 trace_brcmf_err(func, &vaf);
390 va_end(args);
391}
392#endif
393#if defined(CONFIG_BRCM_TRACING) || defined(CONFIG_BRCMDBG)
394void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...)
395{
396 struct va_format vaf = {
397 .fmt = fmt,
398 };
399 va_list args;
400
401 va_start(args, fmt);
402 vaf.va = &args;
403 if (brcmf_msg_level & level)
404 pr_debug("%s %pV", func, &vaf);
405 trace_brcmf_dbg(level, func, &vaf);
406 va_end(args);
407}
408#endif
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
index 57671eddf79d..202869cd0932 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -22,6 +22,7 @@
22#include "dhd.h" 22#include "dhd.h"
23#include "dhd_bus.h" 23#include "dhd_bus.h"
24#include "dhd_dbg.h" 24#include "dhd_dbg.h"
25#include "tracepoint.h"
25 26
26static struct dentry *root_folder; 27static struct dentry *root_folder;
27 28
@@ -123,3 +124,82 @@ void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
123 debugfs_create_file("counters", S_IRUGO, dentry, 124 debugfs_create_file("counters", S_IRUGO, dentry,
124 sdcnt, &brcmf_debugfs_sdio_counter_ops); 125 sdcnt, &brcmf_debugfs_sdio_counter_ops);
125} 126}
127
128static
129ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data,
130 size_t count, loff_t *ppos)
131{
132 struct brcmf_fws_stats *fwstats = f->private_data;
133 char buf[650];
134 int res;
135
136 /* only allow read from start */
137 if (*ppos > 0)
138 return 0;
139
140 res = scnprintf(buf, sizeof(buf),
141 "header_pulls: %u\n"
142 "header_only_pkt: %u\n"
143 "tlv_parse_failed: %u\n"
144 "tlv_invalid_type: %u\n"
145 "mac_update_fails: %u\n"
146 "ps_update_fails: %u\n"
147 "if_update_fails: %u\n"
148 "pkt2bus: %u\n"
149 "generic_error: %u\n"
150 "rollback_success: %u\n"
151 "rollback_failed: %u\n"
152 "delayq_full: %u\n"
153 "supprq_full: %u\n"
154 "txs_indicate: %u\n"
155 "txs_discard: %u\n"
156 "txs_suppr_core: %u\n"
157 "txs_suppr_ps: %u\n"
158 "txs_tossed: %u\n"
159 "send_pkts: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n"
160 "fifo_credits_sent: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n",
161 fwstats->header_pulls,
162 fwstats->header_only_pkt,
163 fwstats->tlv_parse_failed,
164 fwstats->tlv_invalid_type,
165 fwstats->mac_update_failed,
166 fwstats->mac_ps_update_failed,
167 fwstats->if_update_failed,
168 fwstats->pkt2bus,
169 fwstats->generic_error,
170 fwstats->rollback_success,
171 fwstats->rollback_failed,
172 fwstats->delayq_full_error,
173 fwstats->supprq_full_error,
174 fwstats->txs_indicate,
175 fwstats->txs_discard,
176 fwstats->txs_supp_core,
177 fwstats->txs_supp_ps,
178 fwstats->txs_tossed,
179 fwstats->send_pkts[0], fwstats->send_pkts[1],
180 fwstats->send_pkts[2], fwstats->send_pkts[3],
181 fwstats->send_pkts[4],
182 fwstats->fifo_credits_sent[0],
183 fwstats->fifo_credits_sent[1],
184 fwstats->fifo_credits_sent[2],
185 fwstats->fifo_credits_sent[3],
186 fwstats->fifo_credits_sent[4]);
187
188 return simple_read_from_buffer(data, count, ppos, buf, res);
189}
190
191static const struct file_operations brcmf_debugfs_fws_stats_ops = {
192 .owner = THIS_MODULE,
193 .open = simple_open,
194 .read = brcmf_debugfs_fws_stats_read
195};
196
197void brcmf_debugfs_create_fws_stats(struct brcmf_pub *drvr,
198 struct brcmf_fws_stats *stats)
199{
200 struct dentry *dentry = drvr->dbgfs_dir;
201
202 if (!IS_ERR_OR_NULL(dentry))
203 debugfs_create_file("fws_stats", S_IRUGO, dentry,
204 stats, &brcmf_debugfs_fws_stats_ops);
205}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index bc013cbe06f6..009c87bfd9ae 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -34,6 +34,7 @@
34#define BRCMF_SCAN_VAL 0x00004000 34#define BRCMF_SCAN_VAL 0x00004000
35#define BRCMF_CONN_VAL 0x00008000 35#define BRCMF_CONN_VAL 0x00008000
36#define BRCMF_CDC_VAL 0x00010000 36#define BRCMF_CDC_VAL 0x00010000
37#define BRCMF_SDIO_VAL 0x00020000
37 38
38/* set default print format */ 39/* set default print format */
39#undef pr_fmt 40#undef pr_fmt
@@ -43,6 +44,7 @@
43 * debugging is not selected. When debugging the driver error 44 * debugging is not selected. When debugging the driver error
44 * messages are as important as other tracing or even more so. 45 * messages are as important as other tracing or even more so.
45 */ 46 */
47#ifndef CONFIG_BRCM_TRACING
46#ifdef CONFIG_BRCMDBG 48#ifdef CONFIG_BRCMDBG
47#define brcmf_err(fmt, ...) pr_err("%s: " fmt, __func__, ##__VA_ARGS__) 49#define brcmf_err(fmt, ...) pr_err("%s: " fmt, __func__, ##__VA_ARGS__)
48#else 50#else
@@ -52,15 +54,21 @@
52 pr_err("%s: " fmt, __func__, ##__VA_ARGS__); \ 54 pr_err("%s: " fmt, __func__, ##__VA_ARGS__); \
53 } while (0) 55 } while (0)
54#endif 56#endif
57#else
58__printf(2, 3)
59void __brcmf_err(const char *func, const char *fmt, ...);
60#define brcmf_err(fmt, ...) \
61 __brcmf_err(__func__, fmt, ##__VA_ARGS__)
62#endif
55 63
56#if defined(DEBUG) 64#if defined(DEBUG) || defined(CONFIG_BRCM_TRACING)
57 65__printf(3, 4)
66void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...);
58#define brcmf_dbg(level, fmt, ...) \ 67#define brcmf_dbg(level, fmt, ...) \
59do { \ 68do { \
60 if (brcmf_msg_level & BRCMF_##level##_VAL) \ 69 __brcmf_dbg(BRCMF_##level##_VAL, __func__, \
61 pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ 70 fmt, ##__VA_ARGS__); \
62} while (0) 71} while (0)
63
64#define BRCMF_DATA_ON() (brcmf_msg_level & BRCMF_DATA_VAL) 72#define BRCMF_DATA_ON() (brcmf_msg_level & BRCMF_DATA_VAL)
65#define BRCMF_CTL_ON() (brcmf_msg_level & BRCMF_CTL_VAL) 73#define BRCMF_CTL_ON() (brcmf_msg_level & BRCMF_CTL_VAL)
66#define BRCMF_HDRS_ON() (brcmf_msg_level & BRCMF_HDRS_VAL) 74#define BRCMF_HDRS_ON() (brcmf_msg_level & BRCMF_HDRS_VAL)
@@ -69,7 +77,7 @@ do { \
69#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL) 77#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL)
70#define BRCMF_FIL_ON() (brcmf_msg_level & BRCMF_FIL_VAL) 78#define BRCMF_FIL_ON() (brcmf_msg_level & BRCMF_FIL_VAL)
71 79
72#else /* (defined DEBUG) || (defined DEBUG) */ 80#else /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
73 81
74#define brcmf_dbg(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__) 82#define brcmf_dbg(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
75 83
@@ -81,10 +89,11 @@ do { \
81#define BRCMF_EVENT_ON() 0 89#define BRCMF_EVENT_ON() 0
82#define BRCMF_FIL_ON() 0 90#define BRCMF_FIL_ON() 0
83 91
84#endif /* defined(DEBUG) */ 92#endif /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
85 93
86#define brcmf_dbg_hex_dump(test, data, len, fmt, ...) \ 94#define brcmf_dbg_hex_dump(test, data, len, fmt, ...) \
87do { \ 95do { \
96 trace_brcmf_hexdump((void *)data, len); \
88 if (test) \ 97 if (test) \
89 brcmu_dbg_hex_dump(data, len, fmt, ##__VA_ARGS__); \ 98 brcmu_dbg_hex_dump(data, len, fmt, ##__VA_ARGS__); \
90} while (0) 99} while (0)
@@ -125,6 +134,32 @@ struct brcmf_sdio_count {
125 ulong rx_readahead_cnt; /* packets where header read-ahead was used */ 134 ulong rx_readahead_cnt; /* packets where header read-ahead was used */
126}; 135};
127 136
137struct brcmf_fws_stats {
138 u32 tlv_parse_failed;
139 u32 tlv_invalid_type;
140 u32 header_only_pkt;
141 u32 header_pulls;
142 u32 pkt2bus;
143 u32 send_pkts[5];
144 u32 fifo_credits_sent[5];
145 u32 fifo_credits_back[6];
146 u32 generic_error;
147 u32 mac_update_failed;
148 u32 mac_ps_update_failed;
149 u32 if_update_failed;
150 u32 packet_request_failed;
151 u32 credit_request_failed;
152 u32 rollback_success;
153 u32 rollback_failed;
154 u32 delayq_full_error;
155 u32 supprq_full_error;
156 u32 txs_indicate;
157 u32 txs_discard;
158 u32 txs_supp_core;
159 u32 txs_supp_ps;
160 u32 txs_tossed;
161};
162
128struct brcmf_pub; 163struct brcmf_pub;
129#ifdef DEBUG 164#ifdef DEBUG
130void brcmf_debugfs_init(void); 165void brcmf_debugfs_init(void);
@@ -134,6 +169,8 @@ void brcmf_debugfs_detach(struct brcmf_pub *drvr);
134struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr); 169struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
135void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr, 170void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
136 struct brcmf_sdio_count *sdcnt); 171 struct brcmf_sdio_count *sdcnt);
172void brcmf_debugfs_create_fws_stats(struct brcmf_pub *drvr,
173 struct brcmf_fws_stats *stats);
137#else 174#else
138static inline void brcmf_debugfs_init(void) 175static inline void brcmf_debugfs_init(void)
139{ 176{
@@ -148,6 +185,10 @@ static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr)
148static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr) 185static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr)
149{ 186{
150} 187}
188static inline void brcmf_debugfs_create_fws_stats(struct brcmf_pub *drvr,
189 struct brcmf_fws_stats *stats)
190{
191}
151#endif 192#endif
152 193
153#endif /* _BRCMF_DBG_H_ */ 194#endif /* _BRCMF_DBG_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index c06cea88df0d..59c25463e428 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -30,17 +30,18 @@
30#include "p2p.h" 30#include "p2p.h"
31#include "wl_cfg80211.h" 31#include "wl_cfg80211.h"
32#include "fwil.h" 32#include "fwil.h"
33#include "fwsignal.h"
33 34
34MODULE_AUTHOR("Broadcom Corporation"); 35MODULE_AUTHOR("Broadcom Corporation");
35MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver."); 36MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
36MODULE_SUPPORTED_DEVICE("Broadcom 802.11 WLAN fullmac cards");
37MODULE_LICENSE("Dual BSD/GPL"); 37MODULE_LICENSE("Dual BSD/GPL");
38 38
39#define MAX_WAIT_FOR_8021X_TX 50 /* msecs */ 39#define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
40 40
41/* Error bits */ 41/* Error bits */
42int brcmf_msg_level; 42int brcmf_msg_level;
43module_param(brcmf_msg_level, int, 0); 43module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
44MODULE_PARM_DESC(debug, "level of debug output");
44 45
45/* P2P0 enable */ 46/* P2P0 enable */
46static int brcmf_p2p_enable; 47static int brcmf_p2p_enable;
@@ -222,18 +223,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
222 goto done; 223 goto done;
223 } 224 }
224 225
225 /* handle ethernet header */ 226 ret = brcmf_fws_process_skb(ifp, skb);
226 eh = (struct ethhdr *)(skb->data);
227 if (is_multicast_ether_addr(eh->h_dest))
228 drvr->tx_multicast++;
229 if (ntohs(eh->h_proto) == ETH_P_PAE)
230 atomic_inc(&ifp->pend_8021x_cnt);
231
232 /* If the protocol uses a data header, apply it */
233 brcmf_proto_hdrpush(drvr, ifp->ifidx, skb);
234
235 /* Use bus module to send data frame */
236 ret = brcmf_bus_txdata(drvr->bus_if, skb);
237 227
238done: 228done:
239 if (ret) { 229 if (ret) {
@@ -247,9 +237,27 @@ done:
247 return NETDEV_TX_OK; 237 return NETDEV_TX_OK;
248} 238}
249 239
240void brcmf_txflowblock_if(struct brcmf_if *ifp,
241 enum brcmf_netif_stop_reason reason, bool state)
242{
243 if (!ifp)
244 return;
245
246 brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
247 ifp->bssidx, ifp->netif_stop, reason, state);
248 if (state) {
249 if (!ifp->netif_stop)
250 netif_stop_queue(ifp->ndev);
251 ifp->netif_stop |= reason;
252 } else {
253 ifp->netif_stop &= ~reason;
254 if (!ifp->netif_stop)
255 netif_wake_queue(ifp->ndev);
256 }
257}
258
250void brcmf_txflowblock(struct device *dev, bool state) 259void brcmf_txflowblock(struct device *dev, bool state)
251{ 260{
252 struct net_device *ndev;
253 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 261 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
254 struct brcmf_pub *drvr = bus_if->drvr; 262 struct brcmf_pub *drvr = bus_if->drvr;
255 int i; 263 int i;
@@ -257,13 +265,8 @@ void brcmf_txflowblock(struct device *dev, bool state)
257 brcmf_dbg(TRACE, "Enter\n"); 265 brcmf_dbg(TRACE, "Enter\n");
258 266
259 for (i = 0; i < BRCMF_MAX_IFS; i++) 267 for (i = 0; i < BRCMF_MAX_IFS; i++)
260 if (drvr->iflist[i]) { 268 brcmf_txflowblock_if(drvr->iflist[i],
261 ndev = drvr->iflist[i]->ndev; 269 BRCMF_NETIF_STOP_REASON_BLOCK_BUS, state);
262 if (state)
263 netif_stop_queue(ndev);
264 else
265 netif_wake_queue(ndev);
266 }
267} 270}
268 271
269void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list) 272void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
@@ -283,7 +286,7 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
283 skb_unlink(skb, skb_list); 286 skb_unlink(skb, skb_list);
284 287
285 /* process and remove protocol-specific header */ 288 /* process and remove protocol-specific header */
286 ret = brcmf_proto_hdrpull(drvr, &ifidx, skb); 289 ret = brcmf_proto_hdrpull(drvr, drvr->fw_signals, &ifidx, skb);
287 ifp = drvr->iflist[ifidx]; 290 ifp = drvr->iflist[ifidx];
288 291
289 if (ret || !ifp || !ifp->ndev) { 292 if (ret || !ifp || !ifp->ndev) {
@@ -320,13 +323,8 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
320 /* Strip header, count, deliver upward */ 323 /* Strip header, count, deliver upward */
321 skb_pull(skb, ETH_HLEN); 324 skb_pull(skb, ETH_HLEN);
322 325
323 /* Process special event packets and then discard them */ 326 /* Process special event packets */
324 brcmf_fweh_process_skb(drvr, skb, &ifidx); 327 brcmf_fweh_process_skb(drvr, skb);
325
326 if (drvr->iflist[ifidx]) {
327 ifp = drvr->iflist[ifidx];
328 ifp->ndev->last_rx = jiffies;
329 }
330 328
331 if (!(ifp->ndev->flags & IFF_UP)) { 329 if (!(ifp->ndev->flags & IFF_UP)) {
332 brcmu_pkt_buf_free_skb(skb); 330 brcmu_pkt_buf_free_skb(skb);
@@ -349,31 +347,49 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
349 } 347 }
350} 348}
351 349
352void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success) 350void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
351 bool success)
353{ 352{
354 u8 ifidx; 353 struct brcmf_if *ifp;
355 struct ethhdr *eh; 354 struct ethhdr *eh;
355 u8 ifidx;
356 u16 type; 356 u16 type;
357 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 357 int res;
358 struct brcmf_pub *drvr = bus_if->drvr;
359 struct brcmf_if *ifp;
360 358
361 brcmf_proto_hdrpull(drvr, &ifidx, txp); 359 res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
362 360
363 ifp = drvr->iflist[ifidx]; 361 ifp = drvr->iflist[ifidx];
364 if (!ifp) 362 if (!ifp)
365 return; 363 goto done;
366 364
367 eh = (struct ethhdr *)(txp->data); 365 if (res == 0) {
368 type = ntohs(eh->h_proto); 366 eh = (struct ethhdr *)(txp->data);
367 type = ntohs(eh->h_proto);
369 368
370 if (type == ETH_P_PAE) { 369 if (type == ETH_P_PAE) {
371 atomic_dec(&ifp->pend_8021x_cnt); 370 atomic_dec(&ifp->pend_8021x_cnt);
372 if (waitqueue_active(&ifp->pend_8021x_wait)) 371 if (waitqueue_active(&ifp->pend_8021x_wait))
373 wake_up(&ifp->pend_8021x_wait); 372 wake_up(&ifp->pend_8021x_wait);
373 }
374 } 374 }
375 if (!success) 375 if (!success)
376 ifp->stats.tx_errors++; 376 ifp->stats.tx_errors++;
377done:
378 brcmu_pkt_buf_free_skb(txp);
379}
380
381void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
382{
383 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
384 struct brcmf_pub *drvr = bus_if->drvr;
385
386 /* await txstatus signal for firmware if active */
387 if (brcmf_fws_fc_active(drvr->fws)) {
388 if (!success)
389 brcmf_fws_bustxfail(drvr->fws, txp);
390 } else {
391 brcmf_txfinalize(drvr, txp, success);
392 }
377} 393}
378 394
379static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev) 395static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
@@ -734,28 +750,35 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
734 } 750 }
735 } 751 }
736 752
737 /* Allocate netdev, including space for private structure */ 753 if (!brcmf_p2p_enable && bssidx == 1) {
738 ndev = alloc_netdev(sizeof(struct brcmf_if), name, ether_setup); 754 /* this is P2P_DEVICE interface */
739 if (!ndev) { 755 brcmf_dbg(INFO, "allocate non-netdev interface\n");
740 brcmf_err("OOM - alloc_netdev\n"); 756 ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
741 return ERR_PTR(-ENOMEM); 757 if (!ifp)
758 return ERR_PTR(-ENOMEM);
759 } else {
760 brcmf_dbg(INFO, "allocate netdev interface\n");
761 /* Allocate netdev, including space for private structure */
762 ndev = alloc_netdev(sizeof(*ifp), name, ether_setup);
763 if (!ndev)
764 return ERR_PTR(-ENOMEM);
765
766 ifp = netdev_priv(ndev);
767 ifp->ndev = ndev;
742 } 768 }
743 769
744 ifp = netdev_priv(ndev);
745 ifp->ndev = ndev;
746 ifp->drvr = drvr; 770 ifp->drvr = drvr;
747 drvr->iflist[bssidx] = ifp; 771 drvr->iflist[bssidx] = ifp;
748 ifp->ifidx = ifidx; 772 ifp->ifidx = ifidx;
749 ifp->bssidx = bssidx; 773 ifp->bssidx = bssidx;
750 774
751
752 init_waitqueue_head(&ifp->pend_8021x_wait); 775 init_waitqueue_head(&ifp->pend_8021x_wait);
753 776
754 if (mac_addr != NULL) 777 if (mac_addr != NULL)
755 memcpy(ifp->mac_addr, mac_addr, ETH_ALEN); 778 memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
756 779
757 brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n", 780 brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
758 current->pid, ifp->ndev->name, ifp->mac_addr); 781 current->pid, name, ifp->mac_addr);
759 782
760 return ifp; 783 return ifp;
761} 784}
@@ -787,11 +810,13 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
787 } 810 }
788 811
789 unregister_netdev(ifp->ndev); 812 unregister_netdev(ifp->ndev);
790 drvr->iflist[bssidx] = NULL;
791 if (bssidx == 0) 813 if (bssidx == 0)
792 brcmf_cfg80211_detach(drvr->config); 814 brcmf_cfg80211_detach(drvr->config);
793 free_netdev(ifp->ndev); 815 free_netdev(ifp->ndev);
816 } else {
817 kfree(ifp);
794 } 818 }
819 drvr->iflist[bssidx] = NULL;
795} 820}
796 821
797int brcmf_attach(uint bus_hdrlen, struct device *dev) 822int brcmf_attach(uint bus_hdrlen, struct device *dev)
@@ -873,6 +898,13 @@ int brcmf_bus_start(struct device *dev)
873 if (ret < 0) 898 if (ret < 0)
874 goto fail; 899 goto fail;
875 900
901 drvr->fw_signals = true;
902 ret = brcmf_fws_init(drvr);
903 if (ret < 0)
904 goto fail;
905
906 brcmf_fws_add_interface(ifp);
907
876 drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev); 908 drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
877 if (drvr->config == NULL) { 909 if (drvr->config == NULL) {
878 ret = -ENOMEM; 910 ret = -ENOMEM;
@@ -889,6 +921,10 @@ fail:
889 brcmf_err("failed: %d\n", ret); 921 brcmf_err("failed: %d\n", ret);
890 if (drvr->config) 922 if (drvr->config)
891 brcmf_cfg80211_detach(drvr->config); 923 brcmf_cfg80211_detach(drvr->config);
924 if (drvr->fws) {
925 brcmf_fws_del_interface(ifp);
926 brcmf_fws_deinit(drvr);
927 }
892 free_netdev(ifp->ndev); 928 free_netdev(ifp->ndev);
893 drvr->iflist[0] = NULL; 929 drvr->iflist[0] = NULL;
894 if (p2p_ifp) { 930 if (p2p_ifp) {
@@ -944,14 +980,18 @@ void brcmf_detach(struct device *dev)
944 980
945 /* make sure primary interface removed last */ 981 /* make sure primary interface removed last */
946 for (i = BRCMF_MAX_IFS-1; i > -1; i--) 982 for (i = BRCMF_MAX_IFS-1; i > -1; i--)
947 if (drvr->iflist[i]) 983 if (drvr->iflist[i]) {
984 brcmf_fws_del_interface(drvr->iflist[i]);
948 brcmf_del_if(drvr, i); 985 brcmf_del_if(drvr, i);
986 }
949 987
950 brcmf_bus_detach(drvr); 988 brcmf_bus_detach(drvr);
951 989
952 if (drvr->prot) 990 if (drvr->prot)
953 brcmf_proto_detach(drvr); 991 brcmf_proto_detach(drvr);
954 992
993 brcmf_fws_deinit(drvr);
994
955 brcmf_debugfs_detach(drvr); 995 brcmf_debugfs_detach(drvr);
956 bus_if->drvr = NULL; 996 bus_if->drvr = NULL;
957 kfree(drvr); 997 kfree(drvr);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index 48fa70302192..ef9179883748 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -33,7 +33,7 @@ extern void brcmf_proto_stop(struct brcmf_pub *drvr);
33/* Add any protocol-specific data header. 33/* Add any protocol-specific data header.
34 * Caller must reserve prot_hdrlen prepend space. 34 * Caller must reserve prot_hdrlen prepend space.
35 */ 35 */
36extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, 36extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
37 struct sk_buff *txp); 37 struct sk_buff *txp);
38 38
39/* Sets dongle media info (drv_version, mac address). */ 39/* Sets dongle media info (drv_version, mac address). */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 35fc68be158d..d2487518bd2a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -31,6 +31,7 @@
31#include <linux/bcma/bcma.h> 31#include <linux/bcma/bcma.h>
32#include <linux/debugfs.h> 32#include <linux/debugfs.h>
33#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
34#include <linux/platform_data/brcmfmac-sdio.h>
34#include <asm/unaligned.h> 35#include <asm/unaligned.h>
35#include <defs.h> 36#include <defs.h>
36#include <brcmu_wifi.h> 37#include <brcmu_wifi.h>
@@ -94,6 +95,7 @@ struct rte_console {
94 95
95#include "dhd_bus.h" 96#include "dhd_bus.h"
96#include "dhd_dbg.h" 97#include "dhd_dbg.h"
98#include "tracepoint.h"
97 99
98#define TXQLEN 2048 /* bulk tx queue length */ 100#define TXQLEN 2048 /* bulk tx queue length */
99#define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */ 101#define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
@@ -323,6 +325,9 @@ MODULE_FIRMWARE(BRCMF_SDIO_NV_NAME);
323 */ 325 */
324#define BRCMF_IDLE_INTERVAL 1 326#define BRCMF_IDLE_INTERVAL 1
325 327
328#define KSO_WAIT_US 50
329#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
330
326/* 331/*
327 * Conversion of 802.1D priority to precedence level 332 * Conversion of 802.1D priority to precedence level
328 */ 333 */
@@ -332,95 +337,6 @@ static uint prio2prec(u32 prio)
332 (prio^2) : prio; 337 (prio^2) : prio;
333} 338}
334 339
335/* core registers */
336struct sdpcmd_regs {
337 u32 corecontrol; /* 0x00, rev8 */
338 u32 corestatus; /* rev8 */
339 u32 PAD[1];
340 u32 biststatus; /* rev8 */
341
342 /* PCMCIA access */
343 u16 pcmciamesportaladdr; /* 0x010, rev8 */
344 u16 PAD[1];
345 u16 pcmciamesportalmask; /* rev8 */
346 u16 PAD[1];
347 u16 pcmciawrframebc; /* rev8 */
348 u16 PAD[1];
349 u16 pcmciaunderflowtimer; /* rev8 */
350 u16 PAD[1];
351
352 /* interrupt */
353 u32 intstatus; /* 0x020, rev8 */
354 u32 hostintmask; /* rev8 */
355 u32 intmask; /* rev8 */
356 u32 sbintstatus; /* rev8 */
357 u32 sbintmask; /* rev8 */
358 u32 funcintmask; /* rev4 */
359 u32 PAD[2];
360 u32 tosbmailbox; /* 0x040, rev8 */
361 u32 tohostmailbox; /* rev8 */
362 u32 tosbmailboxdata; /* rev8 */
363 u32 tohostmailboxdata; /* rev8 */
364
365 /* synchronized access to registers in SDIO clock domain */
366 u32 sdioaccess; /* 0x050, rev8 */
367 u32 PAD[3];
368
369 /* PCMCIA frame control */
370 u8 pcmciaframectrl; /* 0x060, rev8 */
371 u8 PAD[3];
372 u8 pcmciawatermark; /* rev8 */
373 u8 PAD[155];
374
375 /* interrupt batching control */
376 u32 intrcvlazy; /* 0x100, rev8 */
377 u32 PAD[3];
378
379 /* counters */
380 u32 cmd52rd; /* 0x110, rev8 */
381 u32 cmd52wr; /* rev8 */
382 u32 cmd53rd; /* rev8 */
383 u32 cmd53wr; /* rev8 */
384 u32 abort; /* rev8 */
385 u32 datacrcerror; /* rev8 */
386 u32 rdoutofsync; /* rev8 */
387 u32 wroutofsync; /* rev8 */
388 u32 writebusy; /* rev8 */
389 u32 readwait; /* rev8 */
390 u32 readterm; /* rev8 */
391 u32 writeterm; /* rev8 */
392 u32 PAD[40];
393 u32 clockctlstatus; /* rev8 */
394 u32 PAD[7];
395
396 u32 PAD[128]; /* DMA engines */
397
398 /* SDIO/PCMCIA CIS region */
399 char cis[512]; /* 0x400-0x5ff, rev6 */
400
401 /* PCMCIA function control registers */
402 char pcmciafcr[256]; /* 0x600-6ff, rev6 */
403 u16 PAD[55];
404
405 /* PCMCIA backplane access */
406 u16 backplanecsr; /* 0x76E, rev6 */
407 u16 backplaneaddr0; /* rev6 */
408 u16 backplaneaddr1; /* rev6 */
409 u16 backplaneaddr2; /* rev6 */
410 u16 backplaneaddr3; /* rev6 */
411 u16 backplanedata0; /* rev6 */
412 u16 backplanedata1; /* rev6 */
413 u16 backplanedata2; /* rev6 */
414 u16 backplanedata3; /* rev6 */
415 u16 PAD[31];
416
417 /* sprom "size" & "blank" info */
418 u16 spromstatus; /* 0x7BE, rev2 */
419 u32 PAD[464];
420
421 u16 PAD[0x80];
422};
423
424#ifdef DEBUG 340#ifdef DEBUG
425/* Device console log buffer state */ 341/* Device console log buffer state */
426struct brcmf_console { 342struct brcmf_console {
@@ -587,12 +503,14 @@ struct brcmf_sdio {
587 503
588 bool txoff; /* Transmit flow-controlled */ 504 bool txoff; /* Transmit flow-controlled */
589 struct brcmf_sdio_count sdcnt; 505 struct brcmf_sdio_count sdcnt;
506 bool sr_enabled; /* SaveRestore enabled */
507 bool sleeping; /* SDIO bus sleeping */
590}; 508};
591 509
592/* clkstate */ 510/* clkstate */
593#define CLK_NONE 0 511#define CLK_NONE 0
594#define CLK_SDONLY 1 512#define CLK_SDONLY 1
595#define CLK_PENDING 2 /* Not used yet */ 513#define CLK_PENDING 2
596#define CLK_AVAIL 3 514#define CLK_AVAIL 3
597 515
598#ifdef DEBUG 516#ifdef DEBUG
@@ -600,7 +518,7 @@ static int qcount[NUMPRIO];
600static int tx_packets[NUMPRIO]; 518static int tx_packets[NUMPRIO];
601#endif /* DEBUG */ 519#endif /* DEBUG */
602 520
603#define SDIO_DRIVE_STRENGTH 6 /* in milliamps */ 521#define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */
604 522
605#define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL) 523#define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
606 524
@@ -664,6 +582,62 @@ w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
664 return ret; 582 return ret;
665} 583}
666 584
585static int
586brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
587{
588 u8 wr_val = 0, rd_val, cmp_val, bmask;
589 int err = 0;
590 int try_cnt = 0;
591
592 brcmf_dbg(TRACE, "Enter\n");
593
594 wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
595 /* 1st KSO write goes to AOS wake up core if device is asleep */
596 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
597 wr_val, &err);
598 if (err) {
599 brcmf_err("SDIO_AOS KSO write error: %d\n", err);
600 return err;
601 }
602
603 if (on) {
604 /* device WAKEUP through KSO:
605 * write bit 0 & read back until
606 * both bits 0 (kso bit) & 1 (dev on status) are set
607 */
608 cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |
609 SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
610 bmask = cmp_val;
611 usleep_range(2000, 3000);
612 } else {
613 /* Put device to sleep, turn off KSO */
614 cmp_val = 0;
615 /* only check for bit0, bit1(dev on status) may not
616 * get cleared right away
617 */
618 bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
619 }
620
621 do {
622 /* reliable KSO bit set/clr:
623 * the sdiod sleep write access is synced to PMU 32khz clk
624 * just one write attempt may fail,
625 * read it back until it matches written value
626 */
627 rd_val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
628 &err);
629 if (((rd_val & bmask) == cmp_val) && !err)
630 break;
631 brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
632 try_cnt, MAX_KSO_ATTEMPTS, err);
633 udelay(KSO_WAIT_US);
634 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
635 wr_val, &err);
636 } while (try_cnt++ < MAX_KSO_ATTEMPTS);
637
638 return err;
639}
640
667#define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND) 641#define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
668 642
669#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE) 643#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
@@ -675,10 +649,15 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
675 u8 clkctl, clkreq, devctl; 649 u8 clkctl, clkreq, devctl;
676 unsigned long timeout; 650 unsigned long timeout;
677 651
678 brcmf_dbg(TRACE, "Enter\n"); 652 brcmf_dbg(SDIO, "Enter\n");
679 653
680 clkctl = 0; 654 clkctl = 0;
681 655
656 if (bus->sr_enabled) {
657 bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
658 return 0;
659 }
660
682 if (on) { 661 if (on) {
683 /* Request HT Avail */ 662 /* Request HT Avail */
684 clkreq = 663 clkreq =
@@ -713,7 +692,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
713 devctl |= SBSDIO_DEVCTL_CA_INT_ONLY; 692 devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
714 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL, 693 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
715 devctl, &err); 694 devctl, &err);
716 brcmf_dbg(INFO, "CLKCTL: set PENDING\n"); 695 brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
717 bus->clkstate = CLK_PENDING; 696 bus->clkstate = CLK_PENDING;
718 697
719 return 0; 698 return 0;
@@ -750,7 +729,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
750 729
751 /* Mark clock available */ 730 /* Mark clock available */
752 bus->clkstate = CLK_AVAIL; 731 bus->clkstate = CLK_AVAIL;
753 brcmf_dbg(INFO, "CLKCTL: turned ON\n"); 732 brcmf_dbg(SDIO, "CLKCTL: turned ON\n");
754 733
755#if defined(DEBUG) 734#if defined(DEBUG)
756 if (!bus->alp_only) { 735 if (!bus->alp_only) {
@@ -775,7 +754,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
775 bus->clkstate = CLK_SDONLY; 754 bus->clkstate = CLK_SDONLY;
776 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 755 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
777 clkreq, &err); 756 clkreq, &err);
778 brcmf_dbg(INFO, "CLKCTL: turned OFF\n"); 757 brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
779 if (err) { 758 if (err) {
780 brcmf_err("Failed access turning clock off: %d\n", 759 brcmf_err("Failed access turning clock off: %d\n",
781 err); 760 err);
@@ -788,7 +767,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
788/* Change idle/active SD state */ 767/* Change idle/active SD state */
789static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on) 768static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
790{ 769{
791 brcmf_dbg(TRACE, "Enter\n"); 770 brcmf_dbg(SDIO, "Enter\n");
792 771
793 if (on) 772 if (on)
794 bus->clkstate = CLK_SDONLY; 773 bus->clkstate = CLK_SDONLY;
@@ -805,7 +784,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
805 uint oldstate = bus->clkstate; 784 uint oldstate = bus->clkstate;
806#endif /* DEBUG */ 785#endif /* DEBUG */
807 786
808 brcmf_dbg(TRACE, "Enter\n"); 787 brcmf_dbg(SDIO, "Enter\n");
809 788
810 /* Early exit if we're already there */ 789 /* Early exit if we're already there */
811 if (bus->clkstate == target) { 790 if (bus->clkstate == target) {
@@ -849,12 +828,69 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
849 break; 828 break;
850 } 829 }
851#ifdef DEBUG 830#ifdef DEBUG
852 brcmf_dbg(INFO, "%d -> %d\n", oldstate, bus->clkstate); 831 brcmf_dbg(SDIO, "%d -> %d\n", oldstate, bus->clkstate);
853#endif /* DEBUG */ 832#endif /* DEBUG */
854 833
855 return 0; 834 return 0;
856} 835}
857 836
837static int
838brcmf_sdbrcm_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
839{
840 int err = 0;
841 brcmf_dbg(TRACE, "Enter\n");
842 brcmf_dbg(SDIO, "request %s currently %s\n",
843 (sleep ? "SLEEP" : "WAKE"),
844 (bus->sleeping ? "SLEEP" : "WAKE"));
845
846 /* If SR is enabled control bus state with KSO */
847 if (bus->sr_enabled) {
848 /* Done if we're already in the requested state */
849 if (sleep == bus->sleeping)
850 goto end;
851
852 /* Going to sleep */
853 if (sleep) {
854 /* Don't sleep if something is pending */
855 if (atomic_read(&bus->intstatus) ||
856 atomic_read(&bus->ipend) > 0 ||
857 (!atomic_read(&bus->fcstate) &&
858 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
859 data_ok(bus)))
860 return -EBUSY;
861 err = brcmf_sdbrcm_kso_control(bus, false);
862 /* disable watchdog */
863 if (!err)
864 brcmf_sdbrcm_wd_timer(bus, 0);
865 } else {
866 bus->idlecount = 0;
867 err = brcmf_sdbrcm_kso_control(bus, true);
868 }
869 if (!err) {
870 /* Change state */
871 bus->sleeping = sleep;
872 brcmf_dbg(SDIO, "new state %s\n",
873 (sleep ? "SLEEP" : "WAKE"));
874 } else {
875 brcmf_err("error while changing bus sleep state %d\n",
876 err);
877 return err;
878 }
879 }
880
881end:
882 /* control clocks */
883 if (sleep) {
884 if (!bus->sr_enabled)
885 brcmf_sdbrcm_clkctl(bus, CLK_NONE, pendok);
886 } else {
887 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, pendok);
888 }
889
890 return err;
891
892}
893
858static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus) 894static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
859{ 895{
860 u32 intstatus = 0; 896 u32 intstatus = 0;
@@ -862,7 +898,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
862 u8 fcbits; 898 u8 fcbits;
863 int ret; 899 int ret;
864 900
865 brcmf_dbg(TRACE, "Enter\n"); 901 brcmf_dbg(SDIO, "Enter\n");
866 902
867 /* Read mailbox data and ack that we did so */ 903 /* Read mailbox data and ack that we did so */
868 ret = r_sdreg32(bus, &hmb_data, 904 ret = r_sdreg32(bus, &hmb_data,
@@ -875,7 +911,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
875 911
876 /* Dongle recomposed rx frames, accept them again */ 912 /* Dongle recomposed rx frames, accept them again */
877 if (hmb_data & HMB_DATA_NAKHANDLED) { 913 if (hmb_data & HMB_DATA_NAKHANDLED) {
878 brcmf_dbg(INFO, "Dongle reports NAK handled, expect rtx of %d\n", 914 brcmf_dbg(SDIO, "Dongle reports NAK handled, expect rtx of %d\n",
879 bus->rx_seq); 915 bus->rx_seq);
880 if (!bus->rxskip) 916 if (!bus->rxskip)
881 brcmf_err("unexpected NAKHANDLED!\n"); 917 brcmf_err("unexpected NAKHANDLED!\n");
@@ -896,7 +932,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
896 "expecting %d\n", 932 "expecting %d\n",
897 bus->sdpcm_ver, SDPCM_PROT_VERSION); 933 bus->sdpcm_ver, SDPCM_PROT_VERSION);
898 else 934 else
899 brcmf_dbg(INFO, "Dongle ready, protocol version %d\n", 935 brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
900 bus->sdpcm_ver); 936 bus->sdpcm_ver);
901 } 937 }
902 938
@@ -970,7 +1006,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
970 if (!retries) 1006 if (!retries)
971 brcmf_err("count never zeroed: last 0x%04x\n", lastrbc); 1007 brcmf_err("count never zeroed: last 0x%04x\n", lastrbc);
972 else 1008 else
973 brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries); 1009 brcmf_dbg(SDIO, "flush took %d iterations\n", 0xffff - retries);
974 1010
975 if (rtx) { 1011 if (rtx) {
976 bus->sdcnt.rxrtx++; 1012 bus->sdcnt.rxrtx++;
@@ -1173,7 +1209,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1173 /* If packets, issue read(s) and send up packet chain */ 1209 /* If packets, issue read(s) and send up packet chain */
1174 /* Return sequence numbers consumed? */ 1210 /* Return sequence numbers consumed? */
1175 1211
1176 brcmf_dbg(TRACE, "start: glomd %p glom %p\n", 1212 brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
1177 bus->glomd, skb_peek(&bus->glom)); 1213 bus->glomd, skb_peek(&bus->glom));
1178 1214
1179 /* If there's a descriptor, generate the packet chain */ 1215 /* If there's a descriptor, generate the packet chain */
@@ -1546,7 +1582,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1546 struct sk_buff_head pktlist; /* needed for bus interface */ 1582 struct sk_buff_head pktlist; /* needed for bus interface */
1547 u16 pad; /* Number of pad bytes to read */ 1583 u16 pad; /* Number of pad bytes to read */
1548 uint rxleft = 0; /* Remaining number of frames allowed */ 1584 uint rxleft = 0; /* Remaining number of frames allowed */
1549 int sdret; /* Return code from calls */ 1585 int ret; /* Return code from calls */
1550 uint rxcount = 0; /* Total frames read */ 1586 uint rxcount = 0; /* Total frames read */
1551 struct brcmf_sdio_read *rd = &bus->cur_read, rd_new; 1587 struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
1552 u8 head_read = 0; 1588 u8 head_read = 0;
@@ -1577,15 +1613,15 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1577 /* read header first for unknow frame length */ 1613 /* read header first for unknow frame length */
1578 sdio_claim_host(bus->sdiodev->func[1]); 1614 sdio_claim_host(bus->sdiodev->func[1]);
1579 if (!rd->len) { 1615 if (!rd->len) {
1580 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, 1616 ret = brcmf_sdcard_recv_buf(bus->sdiodev,
1581 bus->sdiodev->sbwad, 1617 bus->sdiodev->sbwad,
1582 SDIO_FUNC_2, F2SYNC, 1618 SDIO_FUNC_2, F2SYNC,
1583 bus->rxhdr, 1619 bus->rxhdr,
1584 BRCMF_FIRSTREAD); 1620 BRCMF_FIRSTREAD);
1585 bus->sdcnt.f2rxhdrs++; 1621 bus->sdcnt.f2rxhdrs++;
1586 if (sdret < 0) { 1622 if (ret < 0) {
1587 brcmf_err("RXHEADER FAILED: %d\n", 1623 brcmf_err("RXHEADER FAILED: %d\n",
1588 sdret); 1624 ret);
1589 bus->sdcnt.rx_hdrfail++; 1625 bus->sdcnt.rx_hdrfail++;
1590 brcmf_sdbrcm_rxfail(bus, true, true); 1626 brcmf_sdbrcm_rxfail(bus, true, true);
1591 sdio_release_host(bus->sdiodev->func[1]); 1627 sdio_release_host(bus->sdiodev->func[1]);
@@ -1637,14 +1673,14 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1637 skb_pull(pkt, head_read); 1673 skb_pull(pkt, head_read);
1638 pkt_align(pkt, rd->len_left, BRCMF_SDALIGN); 1674 pkt_align(pkt, rd->len_left, BRCMF_SDALIGN);
1639 1675
1640 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, 1676 ret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1641 SDIO_FUNC_2, F2SYNC, pkt); 1677 SDIO_FUNC_2, F2SYNC, pkt);
1642 bus->sdcnt.f2rxdata++; 1678 bus->sdcnt.f2rxdata++;
1643 sdio_release_host(bus->sdiodev->func[1]); 1679 sdio_release_host(bus->sdiodev->func[1]);
1644 1680
1645 if (sdret < 0) { 1681 if (ret < 0) {
1646 brcmf_err("read %d bytes from channel %d failed: %d\n", 1682 brcmf_err("read %d bytes from channel %d failed: %d\n",
1647 rd->len, rd->channel, sdret); 1683 rd->len, rd->channel, ret);
1648 brcmu_pkt_buf_free_skb(pkt); 1684 brcmu_pkt_buf_free_skb(pkt);
1649 sdio_claim_host(bus->sdiodev->func[1]); 1685 sdio_claim_host(bus->sdiodev->func[1]);
1650 brcmf_sdbrcm_rxfail(bus, true, 1686 brcmf_sdbrcm_rxfail(bus, true,
@@ -1775,13 +1811,12 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
1775/* Writes a HW/SW header into the packet and sends it. */ 1811/* Writes a HW/SW header into the packet and sends it. */
1776/* Assumes: (a) header space already there, (b) caller holds lock */ 1812/* Assumes: (a) header space already there, (b) caller holds lock */
1777static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, 1813static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
1778 uint chan, bool free_pkt) 1814 uint chan)
1779{ 1815{
1780 int ret; 1816 int ret;
1781 u8 *frame; 1817 u8 *frame;
1782 u16 len, pad = 0; 1818 u16 len, pad = 0;
1783 u32 swheader; 1819 u32 swheader;
1784 struct sk_buff *new;
1785 int i; 1820 int i;
1786 1821
1787 brcmf_dbg(TRACE, "Enter\n"); 1822 brcmf_dbg(TRACE, "Enter\n");
@@ -1795,30 +1830,14 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
1795 brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n", 1830 brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n",
1796 skb_headroom(pkt), pad); 1831 skb_headroom(pkt), pad);
1797 bus->sdiodev->bus_if->tx_realloc++; 1832 bus->sdiodev->bus_if->tx_realloc++;
1798 new = brcmu_pkt_buf_get_skb(pkt->len + BRCMF_SDALIGN); 1833 ret = skb_cow(pkt, BRCMF_SDALIGN);
1799 if (!new) { 1834 if (ret)
1800 brcmf_err("couldn't allocate new %d-byte packet\n",
1801 pkt->len + BRCMF_SDALIGN);
1802 ret = -ENOMEM;
1803 goto done; 1835 goto done;
1804 } 1836 pad = ((unsigned long)frame % BRCMF_SDALIGN);
1805
1806 pkt_align(new, pkt->len, BRCMF_SDALIGN);
1807 memcpy(new->data, pkt->data, pkt->len);
1808 if (free_pkt)
1809 brcmu_pkt_buf_free_skb(pkt);
1810 /* free the pkt if canned one is not used */
1811 free_pkt = true;
1812 pkt = new;
1813 frame = (u8 *) (pkt->data);
1814 /* precondition: (frame % BRCMF_SDALIGN) == 0) */
1815 pad = 0;
1816 } else {
1817 skb_push(pkt, pad);
1818 frame = (u8 *) (pkt->data);
1819 /* precondition: pad + SDPCM_HDRLEN <= pkt->len */
1820 memset(frame, 0, pad + SDPCM_HDRLEN);
1821 } 1837 }
1838 skb_push(pkt, pad);
1839 frame = (u8 *) (pkt->data);
1840 memset(frame, 0, pad + SDPCM_HDRLEN);
1822 } 1841 }
1823 /* precondition: pad < BRCMF_SDALIGN */ 1842 /* precondition: pad < BRCMF_SDALIGN */
1824 1843
@@ -1833,8 +1852,8 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
1833 (((pad + 1852 (((pad +
1834 SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); 1853 SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
1835 1854
1836 put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN); 1855 *(((__le32 *) frame) + 1) = cpu_to_le32(swheader);
1837 put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader)); 1856 *(((__le32 *) frame) + 2) = 0;
1838 1857
1839#ifdef DEBUG 1858#ifdef DEBUG
1840 tx_packets[pkt->priority]++; 1859 tx_packets[pkt->priority]++;
@@ -1900,11 +1919,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
1900done: 1919done:
1901 /* restore pkt buffer pointer before calling tx complete routine */ 1920 /* restore pkt buffer pointer before calling tx complete routine */
1902 skb_pull(pkt, SDPCM_HDRLEN + pad); 1921 skb_pull(pkt, SDPCM_HDRLEN + pad);
1903 brcmf_txcomplete(bus->sdiodev->dev, pkt, ret != 0); 1922 brcmf_txcomplete(bus->sdiodev->dev, pkt, ret == 0);
1904
1905 if (free_pkt)
1906 brcmu_pkt_buf_free_skb(pkt);
1907
1908 return ret; 1923 return ret;
1909} 1924}
1910 1925
@@ -1932,7 +1947,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
1932 spin_unlock_bh(&bus->txqlock); 1947 spin_unlock_bh(&bus->txqlock);
1933 datalen = pkt->len - SDPCM_HDRLEN; 1948 datalen = pkt->len - SDPCM_HDRLEN;
1934 1949
1935 ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true); 1950 ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL);
1936 1951
1937 /* In poll mode, need to check for other events */ 1952 /* In poll mode, need to check for other events */
1938 if (!bus->intr && cnt) { 1953 if (!bus->intr && cnt) {
@@ -1980,7 +1995,7 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
1980 sdio_claim_host(bus->sdiodev->func[1]); 1995 sdio_claim_host(bus->sdiodev->func[1]);
1981 1996
1982 /* Enable clock for device interrupts */ 1997 /* Enable clock for device interrupts */
1983 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); 1998 brcmf_sdbrcm_bus_sleep(bus, false, false);
1984 1999
1985 /* Disable and clear interrupts at the chip level also */ 2000 /* Disable and clear interrupts at the chip level also */
1986 w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask)); 2001 w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
@@ -2032,23 +2047,19 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
2032 bus->tx_seq = bus->rx_seq = 0; 2047 bus->tx_seq = bus->rx_seq = 0;
2033} 2048}
2034 2049
2035#ifdef CONFIG_BRCMFMAC_SDIO_OOB
2036static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus) 2050static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
2037{ 2051{
2038 unsigned long flags; 2052 unsigned long flags;
2039 2053
2040 spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags); 2054 if (bus->sdiodev->oob_irq_requested) {
2041 if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) { 2055 spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
2042 enable_irq(bus->sdiodev->irq); 2056 if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
2043 bus->sdiodev->irq_en = true; 2057 enable_irq(bus->sdiodev->pdata->oob_irq_nr);
2058 bus->sdiodev->irq_en = true;
2059 }
2060 spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags);
2044 } 2061 }
2045 spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags);
2046} 2062}
2047#else
2048static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
2049{
2050}
2051#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
2052 2063
2053static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus) 2064static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
2054{ 2065{
@@ -2116,7 +2127,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2116 sdio_claim_host(bus->sdiodev->func[1]); 2127 sdio_claim_host(bus->sdiodev->func[1]);
2117 2128
2118 /* If waiting for HTAVAIL, check status */ 2129 /* If waiting for HTAVAIL, check status */
2119 if (bus->clkstate == CLK_PENDING) { 2130 if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) {
2120 u8 clkctl, devctl = 0; 2131 u8 clkctl, devctl = 0;
2121 2132
2122#ifdef DEBUG 2133#ifdef DEBUG
@@ -2138,7 +2149,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2138 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; 2149 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2139 } 2150 }
2140 2151
2141 brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", 2152 brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
2142 devctl, clkctl); 2153 devctl, clkctl);
2143 2154
2144 if (SBSDIO_HTAV(clkctl)) { 2155 if (SBSDIO_HTAV(clkctl)) {
@@ -2162,7 +2173,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2162 } 2173 }
2163 2174
2164 /* Make sure backplane clock is on */ 2175 /* Make sure backplane clock is on */
2165 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, true); 2176 brcmf_sdbrcm_bus_sleep(bus, false, true);
2166 2177
2167 /* Pending interrupt indicates new device status */ 2178 /* Pending interrupt indicates new device status */
2168 if (atomic_read(&bus->ipend) > 0) { 2179 if (atomic_read(&bus->ipend) > 0) {
@@ -2308,12 +2319,22 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2308 if ((bus->clkstate != CLK_PENDING) 2319 if ((bus->clkstate != CLK_PENDING)
2309 && bus->idletime == BRCMF_IDLE_IMMEDIATE) { 2320 && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
2310 bus->activity = false; 2321 bus->activity = false;
2322 brcmf_dbg(SDIO, "idle state\n");
2311 sdio_claim_host(bus->sdiodev->func[1]); 2323 sdio_claim_host(bus->sdiodev->func[1]);
2312 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false); 2324 brcmf_sdbrcm_bus_sleep(bus, true, false);
2313 sdio_release_host(bus->sdiodev->func[1]); 2325 sdio_release_host(bus->sdiodev->func[1]);
2314 } 2326 }
2315} 2327}
2316 2328
2329static struct pktq *brcmf_sdbrcm_bus_gettxq(struct device *dev)
2330{
2331 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2332 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2333 struct brcmf_sdio *bus = sdiodev->bus;
2334
2335 return &bus->txq;
2336}
2337
2317static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) 2338static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2318{ 2339{
2319 int ret = -EBADE; 2340 int ret = -EBADE;
@@ -2343,7 +2364,6 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2343 if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) { 2364 if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
2344 skb_pull(pkt, SDPCM_HDRLEN); 2365 skb_pull(pkt, SDPCM_HDRLEN);
2345 brcmf_txcomplete(bus->sdiodev->dev, pkt, false); 2366 brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
2346 brcmu_pkt_buf_free_skb(pkt);
2347 brcmf_err("out of bus->txq !!!\n"); 2367 brcmf_err("out of bus->txq !!!\n");
2348 ret = -ENOSR; 2368 ret = -ENOSR;
2349 } else { 2369 } else {
@@ -2374,69 +2394,6 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2374 return ret; 2394 return ret;
2375} 2395}
2376 2396
2377static int
2378brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
2379 uint size)
2380{
2381 int bcmerror = 0;
2382 u32 sdaddr;
2383 uint dsize;
2384
2385 /* Determine initial transfer parameters */
2386 sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
2387 if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
2388 dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
2389 else
2390 dsize = size;
2391
2392 sdio_claim_host(bus->sdiodev->func[1]);
2393
2394 /* Set the backplane window to include the start address */
2395 bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev, address);
2396 if (bcmerror) {
2397 brcmf_err("window change failed\n");
2398 goto xfer_done;
2399 }
2400
2401 /* Do the transfer(s) */
2402 while (size) {
2403 brcmf_dbg(INFO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
2404 write ? "write" : "read", dsize,
2405 sdaddr, address & SBSDIO_SBWINDOW_MASK);
2406 bcmerror = brcmf_sdcard_rwdata(bus->sdiodev, write,
2407 sdaddr, data, dsize);
2408 if (bcmerror) {
2409 brcmf_err("membytes transfer failed\n");
2410 break;
2411 }
2412
2413 /* Adjust for next transfer (if any) */
2414 size -= dsize;
2415 if (size) {
2416 data += dsize;
2417 address += dsize;
2418 bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev,
2419 address);
2420 if (bcmerror) {
2421 brcmf_err("window change failed\n");
2422 break;
2423 }
2424 sdaddr = 0;
2425 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
2426 }
2427 }
2428
2429xfer_done:
2430 /* Return the window to backplane enumeration space for core access */
2431 if (brcmf_sdcard_set_sbaddr_window(bus->sdiodev, bus->sdiodev->sbwad))
2432 brcmf_err("FAILED to set window back to 0x%x\n",
2433 bus->sdiodev->sbwad);
2434
2435 sdio_release_host(bus->sdiodev->func[1]);
2436
2437 return bcmerror;
2438}
2439
2440#ifdef DEBUG 2397#ifdef DEBUG
2441#define CONSOLE_LINE_MAX 192 2398#define CONSOLE_LINE_MAX 192
2442 2399
@@ -2453,8 +2410,8 @@ static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
2453 2410
2454 /* Read console log struct */ 2411 /* Read console log struct */
2455 addr = bus->console_addr + offsetof(struct rte_console, log_le); 2412 addr = bus->console_addr + offsetof(struct rte_console, log_le);
2456 rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&c->log_le, 2413 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
2457 sizeof(c->log_le)); 2414 sizeof(c->log_le));
2458 if (rv < 0) 2415 if (rv < 0)
2459 return rv; 2416 return rv;
2460 2417
@@ -2479,7 +2436,7 @@ static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
2479 2436
2480 /* Read the console buffer */ 2437 /* Read the console buffer */
2481 addr = le32_to_cpu(c->log_le.buf); 2438 addr = le32_to_cpu(c->log_le.buf);
2482 rv = brcmf_sdbrcm_membytes(bus, false, addr, c->buf, c->bufsize); 2439 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
2483 if (rv < 0) 2440 if (rv < 0)
2484 return rv; 2441 return rv;
2485 2442
@@ -2604,7 +2561,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2604 2561
2605 /* Make sure backplane clock is on */ 2562 /* Make sure backplane clock is on */
2606 sdio_claim_host(bus->sdiodev->func[1]); 2563 sdio_claim_host(bus->sdiodev->func[1]);
2607 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); 2564 brcmf_sdbrcm_bus_sleep(bus, false, false);
2608 sdio_release_host(bus->sdiodev->func[1]); 2565 sdio_release_host(bus->sdiodev->func[1]);
2609 2566
2610 /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */ 2567 /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
@@ -2633,10 +2590,10 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2633 msecs_to_jiffies(2000)); 2590 msecs_to_jiffies(2000));
2634 2591
2635 if (!bus->ctrl_frame_stat) { 2592 if (!bus->ctrl_frame_stat) {
2636 brcmf_dbg(INFO, "ctrl_frame_stat == false\n"); 2593 brcmf_dbg(SDIO, "ctrl_frame_stat == false\n");
2637 ret = 0; 2594 ret = 0;
2638 } else { 2595 } else {
2639 brcmf_dbg(INFO, "ctrl_frame_stat == true\n"); 2596 brcmf_dbg(SDIO, "ctrl_frame_stat == true\n");
2640 ret = -1; 2597 ret = -1;
2641 } 2598 }
2642 } 2599 }
@@ -2662,6 +2619,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2662 2619
2663 bus->activity = false; 2620 bus->activity = false;
2664 sdio_claim_host(bus->sdiodev->func[1]); 2621 sdio_claim_host(bus->sdiodev->func[1]);
2622 brcmf_dbg(INFO, "idle\n");
2665 brcmf_sdbrcm_clkctl(bus, CLK_NONE, true); 2623 brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
2666 sdio_release_host(bus->sdiodev->func[1]); 2624 sdio_release_host(bus->sdiodev->func[1]);
2667 } else { 2625 } else {
@@ -2691,23 +2649,22 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
2691 struct sdpcm_shared_le sh_le; 2649 struct sdpcm_shared_le sh_le;
2692 __le32 addr_le; 2650 __le32 addr_le;
2693 2651
2694 shaddr = bus->ramsize - 4; 2652 shaddr = bus->ci->rambase + bus->ramsize - 4;
2695 2653
2696 /* 2654 /*
2697 * Read last word in socram to determine 2655 * Read last word in socram to determine
2698 * address of sdpcm_shared structure 2656 * address of sdpcm_shared structure
2699 */ 2657 */
2700 sdio_claim_host(bus->sdiodev->func[1]); 2658 sdio_claim_host(bus->sdiodev->func[1]);
2701 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); 2659 brcmf_sdbrcm_bus_sleep(bus, false, false);
2702 rv = brcmf_sdbrcm_membytes(bus, false, shaddr, 2660 rv = brcmf_sdio_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
2703 (u8 *)&addr_le, 4);
2704 sdio_release_host(bus->sdiodev->func[1]); 2661 sdio_release_host(bus->sdiodev->func[1]);
2705 if (rv < 0) 2662 if (rv < 0)
2706 return rv; 2663 return rv;
2707 2664
2708 addr = le32_to_cpu(addr_le); 2665 addr = le32_to_cpu(addr_le);
2709 2666
2710 brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr); 2667 brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
2711 2668
2712 /* 2669 /*
2713 * Check if addr is valid. 2670 * Check if addr is valid.
@@ -2720,8 +2677,8 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
2720 } 2677 }
2721 2678
2722 /* Read hndrte_shared structure */ 2679 /* Read hndrte_shared structure */
2723 rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&sh_le, 2680 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
2724 sizeof(struct sdpcm_shared_le)); 2681 sizeof(struct sdpcm_shared_le));
2725 if (rv < 0) 2682 if (rv < 0)
2726 return rv; 2683 return rv;
2727 2684
@@ -2734,8 +2691,8 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
2734 sh->console_addr = le32_to_cpu(sh_le.console_addr); 2691 sh->console_addr = le32_to_cpu(sh_le.console_addr);
2735 sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr); 2692 sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
2736 2693
2737 if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) { 2694 if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
2738 brcmf_err("sdpcm_shared version mismatch: dhd %d dongle %d\n", 2695 brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
2739 SDPCM_SHARED_VERSION, 2696 SDPCM_SHARED_VERSION,
2740 sh->flags & SDPCM_SHARED_VERSION_MASK); 2697 sh->flags & SDPCM_SHARED_VERSION_MASK);
2741 return -EPROTO; 2698 return -EPROTO;
@@ -2757,22 +2714,22 @@ static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
2757 2714
2758 /* obtain console information from device memory */ 2715 /* obtain console information from device memory */
2759 addr = sh->console_addr + offsetof(struct rte_console, log_le); 2716 addr = sh->console_addr + offsetof(struct rte_console, log_le);
2760 rv = brcmf_sdbrcm_membytes(bus, false, addr, 2717 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
2761 (u8 *)&sh_val, sizeof(u32)); 2718 (u8 *)&sh_val, sizeof(u32));
2762 if (rv < 0) 2719 if (rv < 0)
2763 return rv; 2720 return rv;
2764 console_ptr = le32_to_cpu(sh_val); 2721 console_ptr = le32_to_cpu(sh_val);
2765 2722
2766 addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size); 2723 addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
2767 rv = brcmf_sdbrcm_membytes(bus, false, addr, 2724 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
2768 (u8 *)&sh_val, sizeof(u32)); 2725 (u8 *)&sh_val, sizeof(u32));
2769 if (rv < 0) 2726 if (rv < 0)
2770 return rv; 2727 return rv;
2771 console_size = le32_to_cpu(sh_val); 2728 console_size = le32_to_cpu(sh_val);
2772 2729
2773 addr = sh->console_addr + offsetof(struct rte_console, log_le.idx); 2730 addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
2774 rv = brcmf_sdbrcm_membytes(bus, false, addr, 2731 rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
2775 (u8 *)&sh_val, sizeof(u32)); 2732 (u8 *)&sh_val, sizeof(u32));
2776 if (rv < 0) 2733 if (rv < 0)
2777 return rv; 2734 return rv;
2778 console_index = le32_to_cpu(sh_val); 2735 console_index = le32_to_cpu(sh_val);
@@ -2786,8 +2743,8 @@ static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
2786 2743
2787 /* obtain the console data from device */ 2744 /* obtain the console data from device */
2788 conbuf[console_size] = '\0'; 2745 conbuf[console_size] = '\0';
2789 rv = brcmf_sdbrcm_membytes(bus, false, console_ptr, (u8 *)conbuf, 2746 rv = brcmf_sdio_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
2790 console_size); 2747 console_size);
2791 if (rv < 0) 2748 if (rv < 0)
2792 goto done; 2749 goto done;
2793 2750
@@ -2817,21 +2774,18 @@ static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
2817 int error, res; 2774 int error, res;
2818 char buf[350]; 2775 char buf[350];
2819 struct brcmf_trap_info tr; 2776 struct brcmf_trap_info tr;
2820 int nbytes;
2821 loff_t pos = 0; 2777 loff_t pos = 0;
2822 2778
2823 if ((sh->flags & SDPCM_SHARED_TRAP) == 0) 2779 if ((sh->flags & SDPCM_SHARED_TRAP) == 0) {
2780 brcmf_dbg(INFO, "no trap in firmware\n");
2824 return 0; 2781 return 0;
2782 }
2825 2783
2826 error = brcmf_sdbrcm_membytes(bus, false, sh->trap_addr, (u8 *)&tr, 2784 error = brcmf_sdio_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
2827 sizeof(struct brcmf_trap_info)); 2785 sizeof(struct brcmf_trap_info));
2828 if (error < 0) 2786 if (error < 0)
2829 return error; 2787 return error;
2830 2788
2831 nbytes = brcmf_sdio_dump_console(bus, sh, data, count);
2832 if (nbytes < 0)
2833 return nbytes;
2834
2835 res = scnprintf(buf, sizeof(buf), 2789 res = scnprintf(buf, sizeof(buf),
2836 "dongle trap info: type 0x%x @ epc 0x%08x\n" 2790 "dongle trap info: type 0x%x @ epc 0x%08x\n"
2837 " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n" 2791 " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
@@ -2847,12 +2801,7 @@ static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
2847 le32_to_cpu(tr.r4), le32_to_cpu(tr.r5), 2801 le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
2848 le32_to_cpu(tr.r6), le32_to_cpu(tr.r7)); 2802 le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
2849 2803
2850 error = simple_read_from_buffer(data+nbytes, count, &pos, buf, res); 2804 return simple_read_from_buffer(data, count, &pos, buf, res);
2851 if (error < 0)
2852 return error;
2853
2854 nbytes += error;
2855 return nbytes;
2856} 2805}
2857 2806
2858static int brcmf_sdio_assert_info(struct brcmf_sdio *bus, 2807static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
@@ -2876,14 +2825,14 @@ static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
2876 2825
2877 sdio_claim_host(bus->sdiodev->func[1]); 2826 sdio_claim_host(bus->sdiodev->func[1]);
2878 if (sh->assert_file_addr != 0) { 2827 if (sh->assert_file_addr != 0) {
2879 error = brcmf_sdbrcm_membytes(bus, false, sh->assert_file_addr, 2828 error = brcmf_sdio_ramrw(bus->sdiodev, false,
2880 (u8 *)file, 80); 2829 sh->assert_file_addr, (u8 *)file, 80);
2881 if (error < 0) 2830 if (error < 0)
2882 return error; 2831 return error;
2883 } 2832 }
2884 if (sh->assert_exp_addr != 0) { 2833 if (sh->assert_exp_addr != 0) {
2885 error = brcmf_sdbrcm_membytes(bus, false, sh->assert_exp_addr, 2834 error = brcmf_sdio_ramrw(bus->sdiodev, false,
2886 (u8 *)expr, 80); 2835 sh->assert_exp_addr, (u8 *)expr, 80);
2887 if (error < 0) 2836 if (error < 0)
2888 return error; 2837 return error;
2889 } 2838 }
@@ -2934,14 +2883,20 @@ static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
2934 error = brcmf_sdio_assert_info(bus, &sh, data, count); 2883 error = brcmf_sdio_assert_info(bus, &sh, data, count);
2935 if (error < 0) 2884 if (error < 0)
2936 goto done; 2885 goto done;
2937
2938 nbytes = error; 2886 nbytes = error;
2939 error = brcmf_sdio_trap_info(bus, &sh, data, count); 2887
2888 error = brcmf_sdio_trap_info(bus, &sh, data+nbytes, count);
2889 if (error < 0)
2890 goto done;
2891 nbytes += error;
2892
2893 error = brcmf_sdio_dump_console(bus, &sh, data+nbytes, count);
2940 if (error < 0) 2894 if (error < 0)
2941 goto done; 2895 goto done;
2896 nbytes += error;
2942 2897
2943 error += nbytes; 2898 error = nbytes;
2944 *ppos += error; 2899 *ppos += nbytes;
2945done: 2900done:
2946 return error; 2901 return error;
2947} 2902}
@@ -3035,84 +2990,8 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
3035 return rxlen ? (int)rxlen : -ETIMEDOUT; 2990 return rxlen ? (int)rxlen : -ETIMEDOUT;
3036} 2991}
3037 2992
3038static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus) 2993static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
3039{ 2994{
3040 int bcmerror = 0;
3041 u32 varaddr;
3042 u32 varsizew;
3043 __le32 varsizew_le;
3044#ifdef DEBUG
3045 char *nvram_ularray;
3046#endif /* DEBUG */
3047
3048 /* Even if there are no vars are to be written, we still
3049 need to set the ramsize. */
3050 varaddr = (bus->ramsize - 4) - bus->varsz;
3051
3052 if (bus->vars) {
3053 /* Write the vars list */
3054 bcmerror = brcmf_sdbrcm_membytes(bus, true, varaddr,
3055 bus->vars, bus->varsz);
3056#ifdef DEBUG
3057 /* Verify NVRAM bytes */
3058 brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n",
3059 bus->varsz);
3060 nvram_ularray = kmalloc(bus->varsz, GFP_ATOMIC);
3061 if (!nvram_ularray)
3062 return -ENOMEM;
3063
3064 /* Upload image to verify downloaded contents. */
3065 memset(nvram_ularray, 0xaa, bus->varsz);
3066
3067 /* Read the vars list to temp buffer for comparison */
3068 bcmerror = brcmf_sdbrcm_membytes(bus, false, varaddr,
3069 nvram_ularray, bus->varsz);
3070 if (bcmerror) {
3071 brcmf_err("error %d on reading %d nvram bytes at 0x%08x\n",
3072 bcmerror, bus->varsz, varaddr);
3073 }
3074 /* Compare the org NVRAM with the one read from RAM */
3075 if (memcmp(bus->vars, nvram_ularray, bus->varsz))
3076 brcmf_err("Downloaded NVRAM image is corrupted\n");
3077 else
3078 brcmf_err("Download/Upload/Compare of NVRAM ok\n");
3079
3080 kfree(nvram_ularray);
3081#endif /* DEBUG */
3082 }
3083
3084 /* adjust to the user specified RAM */
3085 brcmf_dbg(INFO, "Physical memory size: %d\n", bus->ramsize);
3086 brcmf_dbg(INFO, "Vars are at %d, orig varsize is %d\n",
3087 varaddr, bus->varsz);
3088
3089 /*
3090 * Determine the length token:
3091 * Varsize, converted to words, in lower 16-bits, checksum
3092 * in upper 16-bits.
3093 */
3094 if (bcmerror) {
3095 varsizew = 0;
3096 varsizew_le = cpu_to_le32(0);
3097 } else {
3098 varsizew = bus->varsz / 4;
3099 varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
3100 varsizew_le = cpu_to_le32(varsizew);
3101 }
3102
3103 brcmf_dbg(INFO, "New varsize is %d, length token=0x%08x\n",
3104 bus->varsz, varsizew);
3105
3106 /* Write the length token to the last word */
3107 bcmerror = brcmf_sdbrcm_membytes(bus, true, (bus->ramsize - 4),
3108 (u8 *)&varsizew_le, 4);
3109
3110 return bcmerror;
3111}
3112
3113static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
3114{
3115 int bcmerror = 0;
3116 struct chip_info *ci = bus->ci; 2995 struct chip_info *ci = bus->ci;
3117 2996
3118 /* To enter download state, disable ARM and reset SOCRAM. 2997 /* To enter download state, disable ARM and reset SOCRAM.
@@ -3121,41 +3000,19 @@ static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
3121 if (enter) { 3000 if (enter) {
3122 bus->alp_only = true; 3001 bus->alp_only = true;
3123 3002
3124 ci->coredisable(bus->sdiodev, ci, BCMA_CORE_ARM_CM3); 3003 brcmf_sdio_chip_enter_download(bus->sdiodev, ci);
3125
3126 ci->resetcore(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM);
3127
3128 /* Clear the top bit of memory */
3129 if (bus->ramsize) {
3130 u32 zeros = 0;
3131 brcmf_sdbrcm_membytes(bus, true, bus->ramsize - 4,
3132 (u8 *)&zeros, 4);
3133 }
3134 } else { 3004 } else {
3135 if (!ci->iscoreup(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) { 3005 if (!brcmf_sdio_chip_exit_download(bus->sdiodev, ci, bus->vars,
3136 brcmf_err("SOCRAM core is down after reset?\n"); 3006 bus->varsz))
3137 bcmerror = -EBADE; 3007 return false;
3138 goto fail;
3139 }
3140
3141 bcmerror = brcmf_sdbrcm_write_vars(bus);
3142 if (bcmerror) {
3143 brcmf_err("no vars written to RAM\n");
3144 bcmerror = 0;
3145 }
3146
3147 w_sdreg32(bus, 0xFFFFFFFF,
3148 offsetof(struct sdpcmd_regs, intstatus));
3149
3150 ci->resetcore(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
3151 3008
3152 /* Allow HT Clock now that the ARM is running. */ 3009 /* Allow HT Clock now that the ARM is running. */
3153 bus->alp_only = false; 3010 bus->alp_only = false;
3154 3011
3155 bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD; 3012 bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD;
3156 } 3013 }
3157fail: 3014
3158 return bcmerror; 3015 return true;
3159} 3016}
3160 3017
3161static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus) 3018static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
@@ -3170,10 +3027,11 @@ static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
3170 3027
3171static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus) 3028static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
3172{ 3029{
3173 int offset = 0; 3030 int offset;
3174 uint len; 3031 uint len;
3175 u8 *memblock = NULL, *memptr; 3032 u8 *memblock = NULL, *memptr;
3176 int ret; 3033 int ret;
3034 u8 idx;
3177 3035
3178 brcmf_dbg(INFO, "Enter\n"); 3036 brcmf_dbg(INFO, "Enter\n");
3179 3037
@@ -3194,10 +3052,15 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
3194 memptr += (BRCMF_SDALIGN - 3052 memptr += (BRCMF_SDALIGN -
3195 ((u32)(unsigned long)memblock % BRCMF_SDALIGN)); 3053 ((u32)(unsigned long)memblock % BRCMF_SDALIGN));
3196 3054
3055 offset = bus->ci->rambase;
3056
3197 /* Download image */ 3057 /* Download image */
3198 while ((len = 3058 len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
3199 brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus))) { 3059 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4);
3200 ret = brcmf_sdbrcm_membytes(bus, true, offset, memptr, len); 3060 if (BRCMF_MAX_CORENUM != idx)
3061 memcpy(&bus->ci->rst_vec, memptr, sizeof(bus->ci->rst_vec));
3062 while (len) {
3063 ret = brcmf_sdio_ramrw(bus->sdiodev, true, offset, memptr, len);
3201 if (ret) { 3064 if (ret) {
3202 brcmf_err("error %d on writing %d membytes at 0x%08x\n", 3065 brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3203 ret, MEMBLOCK, offset); 3066 ret, MEMBLOCK, offset);
@@ -3205,6 +3068,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
3205 } 3068 }
3206 3069
3207 offset += MEMBLOCK; 3070 offset += MEMBLOCK;
3071 len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
3208 } 3072 }
3209 3073
3210err: 3074err:
@@ -3312,7 +3176,7 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3312 int bcmerror = -1; 3176 int bcmerror = -1;
3313 3177
3314 /* Keep arm in reset */ 3178 /* Keep arm in reset */
3315 if (brcmf_sdbrcm_download_state(bus, true)) { 3179 if (!brcmf_sdbrcm_download_state(bus, true)) {
3316 brcmf_err("error placing ARM core in reset\n"); 3180 brcmf_err("error placing ARM core in reset\n");
3317 goto err; 3181 goto err;
3318 } 3182 }
@@ -3328,7 +3192,7 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3328 } 3192 }
3329 3193
3330 /* Take arm out of reset */ 3194 /* Take arm out of reset */
3331 if (brcmf_sdbrcm_download_state(bus, false)) { 3195 if (!brcmf_sdbrcm_download_state(bus, false)) {
3332 brcmf_err("error getting out of ARM core reset\n"); 3196 brcmf_err("error getting out of ARM core reset\n");
3333 goto err; 3197 goto err;
3334 } 3198 }
@@ -3339,6 +3203,103 @@ err:
3339 return bcmerror; 3203 return bcmerror;
3340} 3204}
3341 3205
3206static bool brcmf_sdbrcm_sr_capable(struct brcmf_sdio *bus)
3207{
3208 u32 addr, reg;
3209
3210 brcmf_dbg(TRACE, "Enter\n");
3211
3212 /* old chips with PMU version less than 17 don't support save restore */
3213 if (bus->ci->pmurev < 17)
3214 return false;
3215
3216 /* read PMU chipcontrol register 3*/
3217 addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
3218 brcmf_sdio_regwl(bus->sdiodev, addr, 3, NULL);
3219 addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
3220 reg = brcmf_sdio_regrl(bus->sdiodev, addr, NULL);
3221
3222 return (bool)reg;
3223}
3224
3225static void brcmf_sdbrcm_sr_init(struct brcmf_sdio *bus)
3226{
3227 int err = 0;
3228 u8 val;
3229
3230 brcmf_dbg(TRACE, "Enter\n");
3231
3232 val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
3233 &err);
3234 if (err) {
3235 brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
3236 return;
3237 }
3238
3239 val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
3240 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
3241 val, &err);
3242 if (err) {
3243 brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
3244 return;
3245 }
3246
3247 /* Add CMD14 Support */
3248 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
3249 (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
3250 SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
3251 &err);
3252 if (err) {
3253 brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
3254 return;
3255 }
3256
3257 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3258 SBSDIO_FORCE_HT, &err);
3259 if (err) {
3260 brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
3261 return;
3262 }
3263
3264 /* set flag */
3265 bus->sr_enabled = true;
3266 brcmf_dbg(INFO, "SR enabled\n");
3267}
3268
3269/* enable KSO bit */
3270static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
3271{
3272 u8 val;
3273 int err = 0;
3274
3275 brcmf_dbg(TRACE, "Enter\n");
3276
3277 /* KSO bit added in SDIO core rev 12 */
3278 if (bus->ci->c_inf[1].rev < 12)
3279 return 0;
3280
3281 val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
3282 &err);
3283 if (err) {
3284 brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
3285 return err;
3286 }
3287
3288 if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
3289 val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
3290 SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
3291 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
3292 val, &err);
3293 if (err) {
3294 brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
3295 return err;
3296 }
3297 }
3298
3299 return 0;
3300}
3301
3302
3342static bool 3303static bool
3343brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus) 3304brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3344{ 3305{
@@ -3437,8 +3398,13 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
3437 ret = -ENODEV; 3398 ret = -ENODEV;
3438 } 3399 }
3439 3400
3440 /* Restore previous clock setting */ 3401 if (brcmf_sdbrcm_sr_capable(bus)) {
3441 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err); 3402 brcmf_sdbrcm_sr_init(bus);
3403 } else {
3404 /* Restore previous clock setting */
3405 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
3406 saveclk, &err);
3407 }
3442 3408
3443 if (ret == 0) { 3409 if (ret == 0) {
3444 ret = brcmf_sdio_intr_register(bus->sdiodev); 3410 ret = brcmf_sdio_intr_register(bus->sdiodev);
@@ -3499,7 +3465,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3499 brcmf_dbg(TIMER, "Enter\n"); 3465 brcmf_dbg(TIMER, "Enter\n");
3500 3466
3501 /* Poll period: check device if appropriate. */ 3467 /* Poll period: check device if appropriate. */
3502 if (bus->poll && (++bus->polltick >= bus->pollrate)) { 3468 if (!bus->sr_enabled &&
3469 bus->poll && (++bus->polltick >= bus->pollrate)) {
3503 u32 intstatus = 0; 3470 u32 intstatus = 0;
3504 3471
3505 /* Reset poll tick */ 3472 /* Reset poll tick */
@@ -3550,7 +3517,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3550 bus->console.count -= bus->console_interval; 3517 bus->console.count -= bus->console_interval;
3551 sdio_claim_host(bus->sdiodev->func[1]); 3518 sdio_claim_host(bus->sdiodev->func[1]);
3552 /* Make sure backplane clock is on */ 3519 /* Make sure backplane clock is on */
3553 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); 3520 brcmf_sdbrcm_bus_sleep(bus, false, false);
3554 if (brcmf_sdbrcm_readconsole(bus) < 0) 3521 if (brcmf_sdbrcm_readconsole(bus) < 0)
3555 /* stop on error */ 3522 /* stop on error */
3556 bus->console_interval = 0; 3523 bus->console_interval = 0;
@@ -3567,8 +3534,9 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3567 bus->activity = false; 3534 bus->activity = false;
3568 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS); 3535 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3569 } else { 3536 } else {
3537 brcmf_dbg(SDIO, "idle\n");
3570 sdio_claim_host(bus->sdiodev->func[1]); 3538 sdio_claim_host(bus->sdiodev->func[1]);
3571 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false); 3539 brcmf_sdbrcm_bus_sleep(bus, true, false);
3572 sdio_release_host(bus->sdiodev->func[1]); 3540 sdio_release_host(bus->sdiodev->func[1]);
3573 } 3541 }
3574 } 3542 }
@@ -3579,6 +3547,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3579 3547
3580static bool brcmf_sdbrcm_chipmatch(u16 chipid) 3548static bool brcmf_sdbrcm_chipmatch(u16 chipid)
3581{ 3549{
3550 if (chipid == BCM43143_CHIP_ID)
3551 return true;
3582 if (chipid == BCM43241_CHIP_ID) 3552 if (chipid == BCM43241_CHIP_ID)
3583 return true; 3553 return true;
3584 if (chipid == BCM4329_CHIP_ID) 3554 if (chipid == BCM4329_CHIP_ID)
@@ -3587,6 +3557,8 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
3587 return true; 3557 return true;
3588 if (chipid == BCM4334_CHIP_ID) 3558 if (chipid == BCM4334_CHIP_ID)
3589 return true; 3559 return true;
3560 if (chipid == BCM4335_CHIP_ID)
3561 return true;
3590 return false; 3562 return false;
3591} 3563}
3592 3564
@@ -3664,7 +3636,7 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
3664 int err = 0; 3636 int err = 0;
3665 int reg_addr; 3637 int reg_addr;
3666 u32 reg_val; 3638 u32 reg_val;
3667 u8 idx; 3639 u32 drivestrength;
3668 3640
3669 bus->alp_only = true; 3641 bus->alp_only = true;
3670 3642
@@ -3700,8 +3672,16 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
3700 goto fail; 3672 goto fail;
3701 } 3673 }
3702 3674
3703 brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, 3675 if (brcmf_sdbrcm_kso_init(bus)) {
3704 SDIO_DRIVE_STRENGTH); 3676 brcmf_err("error enabling KSO\n");
3677 goto fail;
3678 }
3679
3680 if ((bus->sdiodev->pdata) && (bus->sdiodev->pdata->drive_strength))
3681 drivestrength = bus->sdiodev->pdata->drive_strength;
3682 else
3683 drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
3684 brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
3705 3685
3706 /* Get info on the SOCRAM cores... */ 3686 /* Get info on the SOCRAM cores... */
3707 bus->ramsize = bus->ci->ramsize; 3687 bus->ramsize = bus->ci->ramsize;
@@ -3710,12 +3690,37 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
3710 goto fail; 3690 goto fail;
3711 } 3691 }
3712 3692
3713 /* Set core control so an SDIO reset does a backplane reset */ 3693 /* Set card control so an SDIO card reset does a WLAN backplane reset */
3714 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV); 3694 reg_val = brcmf_sdio_regrb(bus->sdiodev,
3715 reg_addr = bus->ci->c_inf[idx].base + 3695 SDIO_CCCR_BRCM_CARDCTRL, &err);
3716 offsetof(struct sdpcmd_regs, corecontrol); 3696 if (err)
3717 reg_val = brcmf_sdio_regrl(bus->sdiodev, reg_addr, NULL); 3697 goto fail;
3718 brcmf_sdio_regwl(bus->sdiodev, reg_addr, reg_val | CC_BPRESEN, NULL); 3698
3699 reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
3700
3701 brcmf_sdio_regwb(bus->sdiodev,
3702 SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
3703 if (err)
3704 goto fail;
3705
3706 /* set PMUControl so a backplane reset does PMU state reload */
3707 reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
3708 pmucontrol);
3709 reg_val = brcmf_sdio_regrl(bus->sdiodev,
3710 reg_addr,
3711 &err);
3712 if (err)
3713 goto fail;
3714
3715 reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
3716
3717 brcmf_sdio_regwl(bus->sdiodev,
3718 reg_addr,
3719 reg_val,
3720 &err);
3721 if (err)
3722 goto fail;
3723
3719 3724
3720 sdio_release_host(bus->sdiodev->func[1]); 3725 sdio_release_host(bus->sdiodev->func[1]);
3721 3726
@@ -3769,6 +3774,10 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
3769 bus->use_rxchain = false; 3774 bus->use_rxchain = false;
3770 bus->sd_rxchain = false; 3775 bus->sd_rxchain = false;
3771 3776
3777 /* SR state */
3778 bus->sleeping = false;
3779 bus->sr_enabled = false;
3780
3772 return true; 3781 return true;
3773} 3782}
3774 3783
@@ -3856,6 +3865,7 @@ static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
3856 .txdata = brcmf_sdbrcm_bus_txdata, 3865 .txdata = brcmf_sdbrcm_bus_txdata,
3857 .txctl = brcmf_sdbrcm_bus_txctl, 3866 .txctl = brcmf_sdbrcm_bus_txctl,
3858 .rxctl = brcmf_sdbrcm_bus_rxctl, 3867 .rxctl = brcmf_sdbrcm_bus_rxctl,
3868 .gettxq = brcmf_sdbrcm_bus_gettxq,
3859}; 3869};
3860 3870
3861void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) 3871void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
index e9d6f91a1f2b..5a64280e6485 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -20,6 +20,8 @@
20 20
21#include "dhd.h" 21#include "dhd.h"
22#include "dhd_dbg.h" 22#include "dhd_dbg.h"
23#include "tracepoint.h"
24#include "fwsignal.h"
23#include "fweh.h" 25#include "fweh.h"
24#include "fwil.h" 26#include "fwil.h"
25 27
@@ -154,7 +156,7 @@ static int brcmf_fweh_call_event_handler(struct brcmf_if *ifp,
154 fweh = &ifp->drvr->fweh; 156 fweh = &ifp->drvr->fweh;
155 157
156 /* handle the event if valid interface and handler */ 158 /* handle the event if valid interface and handler */
157 if (ifp->ndev && fweh->evt_handler[code]) 159 if (fweh->evt_handler[code])
158 err = fweh->evt_handler[code](ifp, emsg, data); 160 err = fweh->evt_handler[code](ifp, emsg, data);
159 else 161 else
160 brcmf_err("unhandled event %d ignored\n", code); 162 brcmf_err("unhandled event %d ignored\n", code);
@@ -179,9 +181,9 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
179 struct brcmf_if *ifp; 181 struct brcmf_if *ifp;
180 int err = 0; 182 int err = 0;
181 183
182 brcmf_dbg(EVENT, "action: %u idx: %u bsscfg: %u flags: %u\n", 184 brcmf_dbg(EVENT, "action: %u idx: %u bsscfg: %u flags: %u role: %u\n",
183 ifevent->action, ifevent->ifidx, 185 ifevent->action, ifevent->ifidx, ifevent->bssidx,
184 ifevent->bssidx, ifevent->flags); 186 ifevent->flags, ifevent->role);
185 187
186 if (ifevent->ifidx >= BRCMF_MAX_IFS) { 188 if (ifevent->ifidx >= BRCMF_MAX_IFS) {
187 brcmf_err("invalid interface index: %u\n", 189 brcmf_err("invalid interface index: %u\n",
@@ -198,15 +200,20 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
198 emsg->ifname, emsg->addr); 200 emsg->ifname, emsg->addr);
199 if (IS_ERR(ifp)) 201 if (IS_ERR(ifp))
200 return; 202 return;
201 203 brcmf_fws_add_interface(ifp);
202 if (!drvr->fweh.evt_handler[BRCMF_E_IF]) 204 if (!drvr->fweh.evt_handler[BRCMF_E_IF])
203 err = brcmf_net_attach(ifp, false); 205 err = brcmf_net_attach(ifp, false);
204 } 206 }
205 207
208 if (ifevent->action == BRCMF_E_IF_CHANGE)
209 brcmf_fws_reset_interface(ifp);
210
206 err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); 211 err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
207 212
208 if (ifevent->action == BRCMF_E_IF_DEL) 213 if (ifevent->action == BRCMF_E_IF_DEL) {
214 brcmf_fws_del_interface(ifp);
209 brcmf_del_if(drvr, ifevent->bssidx); 215 brcmf_del_if(drvr, ifevent->bssidx);
216 }
210} 217}
211 218
212/** 219/**
@@ -400,13 +407,12 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp)
400 * 407 *
401 * @drvr: driver information object. 408 * @drvr: driver information object.
402 * @event_packet: event packet to process. 409 * @event_packet: event packet to process.
403 * @ifidx: index of the firmware interface (may change).
404 * 410 *
405 * If the packet buffer contains a firmware event message it will 411 * If the packet buffer contains a firmware event message it will
406 * dispatch the event to a registered handler (using worker). 412 * dispatch the event to a registered handler (using worker).
407 */ 413 */
408void brcmf_fweh_process_event(struct brcmf_pub *drvr, 414void brcmf_fweh_process_event(struct brcmf_pub *drvr,
409 struct brcmf_event *event_packet, u8 *ifidx) 415 struct brcmf_event *event_packet)
410{ 416{
411 enum brcmf_fweh_event_code code; 417 enum brcmf_fweh_event_code code;
412 struct brcmf_fweh_info *fweh = &drvr->fweh; 418 struct brcmf_fweh_info *fweh = &drvr->fweh;
@@ -418,7 +424,6 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
418 /* get event info */ 424 /* get event info */
419 code = get_unaligned_be32(&event_packet->msg.event_type); 425 code = get_unaligned_be32(&event_packet->msg.event_type);
420 datalen = get_unaligned_be32(&event_packet->msg.datalen); 426 datalen = get_unaligned_be32(&event_packet->msg.datalen);
421 *ifidx = event_packet->msg.ifidx;
422 data = &event_packet[1]; 427 data = &event_packet[1];
423 428
424 if (code >= BRCMF_E_LAST) 429 if (code >= BRCMF_E_LAST)
@@ -435,7 +440,7 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
435 return; 440 return;
436 441
437 event->code = code; 442 event->code = code;
438 event->ifidx = *ifidx; 443 event->ifidx = event_packet->msg.ifidx;
439 444
440 /* use memcpy to get aligned event message */ 445 /* use memcpy to get aligned event message */
441 memcpy(&event->emsg, &event_packet->msg, sizeof(event->emsg)); 446 memcpy(&event->emsg, &event_packet->msg, sizeof(event->emsg));
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index 8c39b51dcccf..6ec5db9c60a5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -187,10 +187,10 @@ void brcmf_fweh_unregister(struct brcmf_pub *drvr,
187 enum brcmf_fweh_event_code code); 187 enum brcmf_fweh_event_code code);
188int brcmf_fweh_activate_events(struct brcmf_if *ifp); 188int brcmf_fweh_activate_events(struct brcmf_if *ifp);
189void brcmf_fweh_process_event(struct brcmf_pub *drvr, 189void brcmf_fweh_process_event(struct brcmf_pub *drvr,
190 struct brcmf_event *event_packet, u8 *ifidx); 190 struct brcmf_event *event_packet);
191 191
192static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr, 192static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
193 struct sk_buff *skb, u8 *ifidx) 193 struct sk_buff *skb)
194{ 194{
195 struct brcmf_event *event_packet; 195 struct brcmf_event *event_packet;
196 u8 *data; 196 u8 *data;
@@ -213,7 +213,7 @@ static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
213 if (usr_stype != BCMILCP_BCM_SUBTYPE_EVENT) 213 if (usr_stype != BCMILCP_BCM_SUBTYPE_EVENT)
214 return; 214 return;
215 215
216 brcmf_fweh_process_event(drvr, event_packet, ifidx); 216 brcmf_fweh_process_event(drvr, event_packet);
217} 217}
218 218
219#endif /* FWEH_H_ */ 219#endif /* FWEH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index 8d1def935b8d..04f395930d86 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -25,6 +25,7 @@
25#include "dhd.h" 25#include "dhd.h"
26#include "dhd_bus.h" 26#include "dhd_bus.h"
27#include "dhd_dbg.h" 27#include "dhd_dbg.h"
28#include "tracepoint.h"
28#include "fwil.h" 29#include "fwil.h"
29 30
30 31
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
new file mode 100644
index 000000000000..5352dc1fdf3c
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -0,0 +1,2067 @@
1/*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#include <linux/types.h>
17#include <linux/module.h>
18#include <linux/if_ether.h>
19#include <linux/spinlock.h>
20#include <linux/skbuff.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/err.h>
24#include <linux/jiffies.h>
25#include <uapi/linux/nl80211.h>
26#include <net/cfg80211.h>
27
28#include <brcmu_utils.h>
29#include <brcmu_wifi.h>
30#include "dhd.h"
31#include "dhd_proto.h"
32#include "dhd_dbg.h"
33#include "dhd_bus.h"
34#include "fwil.h"
35#include "fwil_types.h"
36#include "fweh.h"
37#include "fwsignal.h"
38#include "p2p.h"
39#include "wl_cfg80211.h"
40
41/**
42 * DOC: Firmware Signalling
43 *
44 * Firmware can send signals to host and vice versa, which are passed in the
45 * data packets using TLV based header. This signalling layer is on top of the
46 * BDC bus protocol layer.
47 */
48
49/*
50 * single definition for firmware-driver flow control tlv's.
51 *
52 * each tlv is specified by BRCMF_FWS_TLV_DEF(name, ID, length).
53 * A length value 0 indicates variable length tlv.
54 */
55#define BRCMF_FWS_TLV_DEFLIST \
56 BRCMF_FWS_TLV_DEF(MAC_OPEN, 1, 1) \
57 BRCMF_FWS_TLV_DEF(MAC_CLOSE, 2, 1) \
58 BRCMF_FWS_TLV_DEF(MAC_REQUEST_CREDIT, 3, 2) \
59 BRCMF_FWS_TLV_DEF(TXSTATUS, 4, 4) \
60 BRCMF_FWS_TLV_DEF(PKTTAG, 5, 4) \
61 BRCMF_FWS_TLV_DEF(MACDESC_ADD, 6, 8) \
62 BRCMF_FWS_TLV_DEF(MACDESC_DEL, 7, 8) \
63 BRCMF_FWS_TLV_DEF(RSSI, 8, 1) \
64 BRCMF_FWS_TLV_DEF(INTERFACE_OPEN, 9, 1) \
65 BRCMF_FWS_TLV_DEF(INTERFACE_CLOSE, 10, 1) \
66 BRCMF_FWS_TLV_DEF(FIFO_CREDITBACK, 11, 6) \
67 BRCMF_FWS_TLV_DEF(PENDING_TRAFFIC_BMP, 12, 2) \
68 BRCMF_FWS_TLV_DEF(MAC_REQUEST_PACKET, 13, 3) \
69 BRCMF_FWS_TLV_DEF(HOST_REORDER_RXPKTS, 14, 10) \
70 BRCMF_FWS_TLV_DEF(TRANS_ID, 18, 6) \
71 BRCMF_FWS_TLV_DEF(COMP_TXSTATUS, 19, 1) \
72 BRCMF_FWS_TLV_DEF(FILLER, 255, 0)
73
74/*
75 * enum brcmf_fws_tlv_type - definition of tlv identifiers.
76 */
77#define BRCMF_FWS_TLV_DEF(name, id, len) \
78 BRCMF_FWS_TYPE_ ## name = id,
79enum brcmf_fws_tlv_type {
80 BRCMF_FWS_TLV_DEFLIST
81 BRCMF_FWS_TYPE_INVALID
82};
83#undef BRCMF_FWS_TLV_DEF
84
85/*
86 * enum brcmf_fws_tlv_len - definition of tlv lengths.
87 */
88#define BRCMF_FWS_TLV_DEF(name, id, len) \
89 BRCMF_FWS_TYPE_ ## name ## _LEN = (len),
90enum brcmf_fws_tlv_len {
91 BRCMF_FWS_TLV_DEFLIST
92};
93#undef BRCMF_FWS_TLV_DEF
94
95#ifdef DEBUG
96/*
97 * brcmf_fws_tlv_names - array of tlv names.
98 */
99#define BRCMF_FWS_TLV_DEF(name, id, len) \
100 { id, #name },
101static struct {
102 enum brcmf_fws_tlv_type id;
103 const char *name;
104} brcmf_fws_tlv_names[] = {
105 BRCMF_FWS_TLV_DEFLIST
106};
107#undef BRCMF_FWS_TLV_DEF
108
109static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
110{
111 int i;
112
113 for (i = 0; i < ARRAY_SIZE(brcmf_fws_tlv_names); i++)
114 if (brcmf_fws_tlv_names[i].id == id)
115 return brcmf_fws_tlv_names[i].name;
116
117 return "INVALID";
118}
119#else
120static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
121{
122 return "NODEBUG";
123}
124#endif /* DEBUG */
125
126/*
127 * flags used to enable tlv signalling from firmware.
128 */
129#define BRCMF_FWS_FLAGS_RSSI_SIGNALS 0x0001
130#define BRCMF_FWS_FLAGS_XONXOFF_SIGNALS 0x0002
131#define BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS 0x0004
132#define BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE 0x0008
133#define BRCMF_FWS_FLAGS_PSQ_GENERATIONFSM_ENABLE 0x0010
134#define BRCMF_FWS_FLAGS_PSQ_ZERO_BUFFER_ENABLE 0x0020
135#define BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE 0x0040
136
137#define BRCMF_FWS_MAC_DESC_TABLE_SIZE 32
138#define BRCMF_FWS_MAC_DESC_ID_INVALID 0xff
139
140#define BRCMF_FWS_HOSTIF_FLOWSTATE_OFF 0
141#define BRCMF_FWS_HOSTIF_FLOWSTATE_ON 1
142#define BRCMF_FWS_FLOWCONTROL_HIWATER 128
143#define BRCMF_FWS_FLOWCONTROL_LOWATER 64
144
145#define BRCMF_FWS_PSQ_PREC_COUNT ((NL80211_NUM_ACS + 1) * 2)
146#define BRCMF_FWS_PSQ_LEN 256
147
148#define BRCMF_FWS_HTOD_FLAG_PKTFROMHOST 0x01
149#define BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED 0x02
150
151#define BRCMF_FWS_RET_OK_NOSCHEDULE 0
152#define BRCMF_FWS_RET_OK_SCHEDULE 1
153
154/**
155 * enum brcmf_fws_skb_state - indicates processing state of skb.
156 *
157 * @BRCMF_FWS_SKBSTATE_NEW: sk_buff is newly arrived in the driver.
158 * @BRCMF_FWS_SKBSTATE_DELAYED: sk_buff had to wait on queue.
159 * @BRCMF_FWS_SKBSTATE_SUPPRESSED: sk_buff has been suppressed by firmware.
160 */
161enum brcmf_fws_skb_state {
162 BRCMF_FWS_SKBSTATE_NEW,
163 BRCMF_FWS_SKBSTATE_DELAYED,
164 BRCMF_FWS_SKBSTATE_SUPPRESSED
165};
166
167/**
168 * struct brcmf_skbuff_cb - control buffer associated with skbuff.
169 *
170 * @if_flags: holds interface index and packet related flags.
171 * @htod: host to device packet identifier (used in PKTTAG tlv).
172 * @state: transmit state of the packet.
173 * @mac: descriptor related to destination for this packet.
174 *
175 * This information is stored in control buffer struct sk_buff::cb, which
176 * provides 48 bytes of storage so this structure should not exceed that.
177 */
178struct brcmf_skbuff_cb {
179 u16 if_flags;
180 u32 htod;
181 enum brcmf_fws_skb_state state;
182 struct brcmf_fws_mac_descriptor *mac;
183};
184
185/*
186 * macro casting skbuff control buffer to struct brcmf_skbuff_cb.
187 */
188#define brcmf_skbcb(skb) ((struct brcmf_skbuff_cb *)((skb)->cb))
189
190/*
191 * sk_buff control if flags
192 *
193 * b[11] - packet sent upon firmware request.
194 * b[10] - packet only contains signalling data.
195 * b[9] - packet is a tx packet.
196 * b[8] - packet uses FIFO credit (non-pspoll).
197 * b[7] - interface in AP mode.
198 * b[6:4] - AC FIFO number.
199 * b[3:0] - interface index.
200 */
201#define BRCMF_SKB_IF_FLAGS_REQUESTED_MASK 0x0800
202#define BRCMF_SKB_IF_FLAGS_REQUESTED_SHIFT 11
203#define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_MASK 0x0400
204#define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_SHIFT 10
205#define BRCMF_SKB_IF_FLAGS_TRANSMIT_MASK 0x0200
206#define BRCMF_SKB_IF_FLAGS_TRANSMIT_SHIFT 9
207#define BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK 0x0100
208#define BRCMF_SKB_IF_FLAGS_CREDITCHECK_SHIFT 8
209#define BRCMF_SKB_IF_FLAGS_IF_AP_MASK 0x0080
210#define BRCMF_SKB_IF_FLAGS_IF_AP_SHIFT 7
211#define BRCMF_SKB_IF_FLAGS_FIFO_MASK 0x0070
212#define BRCMF_SKB_IF_FLAGS_FIFO_SHIFT 4
213#define BRCMF_SKB_IF_FLAGS_INDEX_MASK 0x000f
214#define BRCMF_SKB_IF_FLAGS_INDEX_SHIFT 0
215
216#define brcmf_skb_if_flags_set_field(skb, field, value) \
217 brcmu_maskset16(&(brcmf_skbcb(skb)->if_flags), \
218 BRCMF_SKB_IF_FLAGS_ ## field ## _MASK, \
219 BRCMF_SKB_IF_FLAGS_ ## field ## _SHIFT, (value))
220#define brcmf_skb_if_flags_get_field(skb, field) \
221 brcmu_maskget16(brcmf_skbcb(skb)->if_flags, \
222 BRCMF_SKB_IF_FLAGS_ ## field ## _MASK, \
223 BRCMF_SKB_IF_FLAGS_ ## field ## _SHIFT)
224
225/*
226 * sk_buff control packet identifier
227 *
228 * 32-bit packet identifier used in PKTTAG tlv from host to dongle.
229 *
230 * - Generated at the host (e.g. dhd)
231 * - Seen as a generic sequence number by firmware except for the flags field.
232 *
233 * Generation : b[31] => generation number for this packet [host->fw]
234 * OR, current generation number [fw->host]
235 * Flags : b[30:27] => command, status flags
236 * FIFO-AC : b[26:24] => AC-FIFO id
237 * h-slot : b[23:8] => hanger-slot
238 * freerun : b[7:0] => A free running counter
239 */
240#define BRCMF_SKB_HTOD_TAG_GENERATION_MASK 0x80000000
241#define BRCMF_SKB_HTOD_TAG_GENERATION_SHIFT 31
242#define BRCMF_SKB_HTOD_TAG_FLAGS_MASK 0x78000000
243#define BRCMF_SKB_HTOD_TAG_FLAGS_SHIFT 27
244#define BRCMF_SKB_HTOD_TAG_FIFO_MASK 0x07000000
245#define BRCMF_SKB_HTOD_TAG_FIFO_SHIFT 24
246#define BRCMF_SKB_HTOD_TAG_HSLOT_MASK 0x00ffff00
247#define BRCMF_SKB_HTOD_TAG_HSLOT_SHIFT 8
248#define BRCMF_SKB_HTOD_TAG_FREERUN_MASK 0x000000ff
249#define BRCMF_SKB_HTOD_TAG_FREERUN_SHIFT 0
250
251#define brcmf_skb_htod_tag_set_field(skb, field, value) \
252 brcmu_maskset32(&(brcmf_skbcb(skb)->htod), \
253 BRCMF_SKB_HTOD_TAG_ ## field ## _MASK, \
254 BRCMF_SKB_HTOD_TAG_ ## field ## _SHIFT, (value))
255#define brcmf_skb_htod_tag_get_field(skb, field) \
256 brcmu_maskget32(brcmf_skbcb(skb)->htod, \
257 BRCMF_SKB_HTOD_TAG_ ## field ## _MASK, \
258 BRCMF_SKB_HTOD_TAG_ ## field ## _SHIFT)
259
260#define BRCMF_FWS_TXSTAT_GENERATION_MASK 0x80000000
261#define BRCMF_FWS_TXSTAT_GENERATION_SHIFT 31
262#define BRCMF_FWS_TXSTAT_FLAGS_MASK 0x78000000
263#define BRCMF_FWS_TXSTAT_FLAGS_SHIFT 27
264#define BRCMF_FWS_TXSTAT_FIFO_MASK 0x07000000
265#define BRCMF_FWS_TXSTAT_FIFO_SHIFT 24
266#define BRCMF_FWS_TXSTAT_HSLOT_MASK 0x00FFFF00
267#define BRCMF_FWS_TXSTAT_HSLOT_SHIFT 8
268#define BRCMF_FWS_TXSTAT_PKTID_MASK 0x00FFFFFF
269#define BRCMF_FWS_TXSTAT_PKTID_SHIFT 0
270
271#define brcmf_txstatus_get_field(txs, field) \
272 brcmu_maskget32(txs, BRCMF_FWS_TXSTAT_ ## field ## _MASK, \
273 BRCMF_FWS_TXSTAT_ ## field ## _SHIFT)
274
275/* How long to defer borrowing in jiffies */
276#define BRCMF_FWS_BORROW_DEFER_PERIOD (HZ / 10)
277
278/**
279 * enum brcmf_fws_fifo - fifo indices used by dongle firmware.
280 *
281 * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic.
282 * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic.
283 * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic.
284 * @BRCMF_FWS_FIFO_AC_VO: fifo for voice traffic.
285 * @BRCMF_FWS_FIFO_BCMC: fifo for broadcast/multicast (AP only).
286 * @BRCMF_FWS_FIFO_ATIM: fifo for ATIM (AP only).
287 * @BRCMF_FWS_FIFO_COUNT: number of fifos.
288 */
289enum brcmf_fws_fifo {
290 BRCMF_FWS_FIFO_AC_BK,
291 BRCMF_FWS_FIFO_AC_BE,
292 BRCMF_FWS_FIFO_AC_VI,
293 BRCMF_FWS_FIFO_AC_VO,
294 BRCMF_FWS_FIFO_BCMC,
295 BRCMF_FWS_FIFO_ATIM,
296 BRCMF_FWS_FIFO_COUNT
297};
298
299/**
300 * enum brcmf_fws_txstatus - txstatus flag values.
301 *
302 * @BRCMF_FWS_TXSTATUS_DISCARD:
303 * host is free to discard the packet.
304 * @BRCMF_FWS_TXSTATUS_CORE_SUPPRESS:
305 * 802.11 core suppressed the packet.
306 * @BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS:
307 * firmware suppress the packet as device is already in PS mode.
308 * @BRCMF_FWS_TXSTATUS_FW_TOSSED:
309 * firmware tossed the packet.
310 */
311enum brcmf_fws_txstatus {
312 BRCMF_FWS_TXSTATUS_DISCARD,
313 BRCMF_FWS_TXSTATUS_CORE_SUPPRESS,
314 BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS,
315 BRCMF_FWS_TXSTATUS_FW_TOSSED
316};
317
318enum brcmf_fws_fcmode {
319 BRCMF_FWS_FCMODE_NONE,
320 BRCMF_FWS_FCMODE_IMPLIED_CREDIT,
321 BRCMF_FWS_FCMODE_EXPLICIT_CREDIT
322};
323
324enum brcmf_fws_mac_desc_state {
325 BRCMF_FWS_STATE_OPEN = 1,
326 BRCMF_FWS_STATE_CLOSE
327};
328
329/**
330 * struct brcmf_fws_mac_descriptor - firmware signalling data per node/interface
331 *
332 * @occupied: slot is in use.
333 * @mac_handle: handle for mac entry determined by firmware.
334 * @interface_id: interface index.
335 * @state: current state.
336 * @suppressed: mac entry is suppressed.
337 * @generation: generation bit.
338 * @ac_bitmap: ac queue bitmap.
339 * @requested_credit: credits requested by firmware.
340 * @ea: ethernet address.
341 * @seq: per-node free-running sequence.
342 * @psq: power-save queue.
343 * @transit_count: packet in transit to firmware.
344 */
345struct brcmf_fws_mac_descriptor {
346 u8 occupied;
347 u8 mac_handle;
348 u8 interface_id;
349 u8 state;
350 bool suppressed;
351 u8 generation;
352 u8 ac_bitmap;
353 u8 requested_credit;
354 u8 requested_packet;
355 u8 ea[ETH_ALEN];
356 u8 seq[BRCMF_FWS_FIFO_COUNT];
357 struct pktq psq;
358 int transit_count;
359 int suppress_count;
360 int suppr_transit_count;
361 bool send_tim_signal;
362 u8 traffic_pending_bmp;
363 u8 traffic_lastreported_bmp;
364};
365
366#define BRCMF_FWS_HANGER_MAXITEMS 1024
367
368/**
369 * enum brcmf_fws_hanger_item_state - state of hanger item.
370 *
371 * @BRCMF_FWS_HANGER_ITEM_STATE_FREE: item is free for use.
372 * @BRCMF_FWS_HANGER_ITEM_STATE_INUSE: item is in use.
373 * @BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED: item was suppressed.
374 */
375enum brcmf_fws_hanger_item_state {
376 BRCMF_FWS_HANGER_ITEM_STATE_FREE = 1,
377 BRCMF_FWS_HANGER_ITEM_STATE_INUSE,
378 BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED
379};
380
381
382/**
383 * struct brcmf_fws_hanger_item - single entry for tx pending packet.
384 *
385 * @state: entry is either free or occupied.
386 * @gen: generation.
387 * @pkt: packet itself.
388 */
389struct brcmf_fws_hanger_item {
390 enum brcmf_fws_hanger_item_state state;
391 u8 gen;
392 struct sk_buff *pkt;
393};
394
395/**
396 * struct brcmf_fws_hanger - holds packets awaiting firmware txstatus.
397 *
398 * @pushed: packets pushed to await txstatus.
399 * @popped: packets popped upon handling txstatus.
400 * @failed_to_push: packets that could not be pushed.
401 * @failed_to_pop: packets that could not be popped.
402 * @failed_slotfind: packets for which failed to find an entry.
403 * @slot_pos: last returned item index for a free entry.
404 * @items: array of hanger items.
405 */
406struct brcmf_fws_hanger {
407 u32 pushed;
408 u32 popped;
409 u32 failed_to_push;
410 u32 failed_to_pop;
411 u32 failed_slotfind;
412 u32 slot_pos;
413 struct brcmf_fws_hanger_item items[BRCMF_FWS_HANGER_MAXITEMS];
414};
415
416struct brcmf_fws_macdesc_table {
417 struct brcmf_fws_mac_descriptor nodes[BRCMF_FWS_MAC_DESC_TABLE_SIZE];
418 struct brcmf_fws_mac_descriptor iface[BRCMF_MAX_IFS];
419 struct brcmf_fws_mac_descriptor other;
420};
421
422struct brcmf_fws_info {
423 struct brcmf_pub *drvr;
424 struct brcmf_fws_stats stats;
425 struct brcmf_fws_hanger hanger;
426 enum brcmf_fws_fcmode fcmode;
427 struct brcmf_fws_macdesc_table desc;
428 struct workqueue_struct *fws_wq;
429 struct work_struct fws_dequeue_work;
430 u32 fifo_enqpkt[BRCMF_FWS_FIFO_COUNT];
431 int fifo_credit[BRCMF_FWS_FIFO_COUNT];
432 int credits_borrowed[BRCMF_FWS_FIFO_AC_VO + 1];
433 int deq_node_pos[BRCMF_FWS_FIFO_COUNT];
434 u32 fifo_credit_map;
435 u32 fifo_delay_map;
436 unsigned long borrow_defer_timestamp;
437};
438
439/*
440 * brcmf_fws_prio2fifo - mapping from 802.1d priority to firmware fifo index.
441 */
442static const int brcmf_fws_prio2fifo[] = {
443 BRCMF_FWS_FIFO_AC_BE,
444 BRCMF_FWS_FIFO_AC_BK,
445 BRCMF_FWS_FIFO_AC_BK,
446 BRCMF_FWS_FIFO_AC_BE,
447 BRCMF_FWS_FIFO_AC_VI,
448 BRCMF_FWS_FIFO_AC_VI,
449 BRCMF_FWS_FIFO_AC_VO,
450 BRCMF_FWS_FIFO_AC_VO
451};
452
453static int fcmode;
454module_param(fcmode, int, S_IRUSR);
455MODULE_PARM_DESC(fcmode, "mode of firmware signalled flow control");
456
457#define BRCMF_FWS_TLV_DEF(name, id, len) \
458 case BRCMF_FWS_TYPE_ ## name: \
459 return len;
460
461/**
462 * brcmf_fws_get_tlv_len() - returns defined length for given tlv id.
463 *
464 * @fws: firmware-signalling information.
465 * @id: identifier of the TLV.
466 *
467 * Return: the specified length for the given TLV; Otherwise -EINVAL.
468 */
469static int brcmf_fws_get_tlv_len(struct brcmf_fws_info *fws,
470 enum brcmf_fws_tlv_type id)
471{
472 switch (id) {
473 BRCMF_FWS_TLV_DEFLIST
474 default:
475 fws->stats.tlv_invalid_type++;
476 break;
477 }
478 return -EINVAL;
479}
480#undef BRCMF_FWS_TLV_DEF
481
482static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
483{
484 u32 ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
485 return ifidx == *(int *)arg;
486}
487
488static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
489 int ifidx)
490{
491 bool (*matchfn)(struct sk_buff *, void *) = NULL;
492 struct sk_buff *skb;
493 int prec;
494
495 if (ifidx != -1)
496 matchfn = brcmf_fws_ifidx_match;
497 for (prec = 0; prec < q->num_prec; prec++) {
498 skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
499 while (skb) {
500 brcmu_pkt_buf_free_skb(skb);
501 skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
502 }
503 }
504}
505
506static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger)
507{
508 int i;
509
510 brcmf_dbg(TRACE, "enter\n");
511 memset(hanger, 0, sizeof(*hanger));
512 for (i = 0; i < ARRAY_SIZE(hanger->items); i++)
513 hanger->items[i].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
514}
515
516static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h)
517{
518 u32 i;
519
520 brcmf_dbg(TRACE, "enter\n");
521 i = (h->slot_pos + 1) % BRCMF_FWS_HANGER_MAXITEMS;
522
523 while (i != h->slot_pos) {
524 if (h->items[i].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
525 h->slot_pos = i;
526 goto done;
527 }
528 i++;
529 if (i == BRCMF_FWS_HANGER_MAXITEMS)
530 i = 0;
531 }
532 brcmf_err("all slots occupied\n");
533 h->failed_slotfind++;
534 i = BRCMF_FWS_HANGER_MAXITEMS;
535done:
536 brcmf_dbg(TRACE, "exit: %d\n", i);
537 return i;
538}
539
540static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h,
541 struct sk_buff *pkt, u32 slot_id)
542{
543 brcmf_dbg(TRACE, "enter\n");
544 if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
545 return -ENOENT;
546
547 if (h->items[slot_id].state != BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
548 brcmf_err("slot is not free\n");
549 h->failed_to_push++;
550 return -EINVAL;
551 }
552
553 h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_INUSE;
554 h->items[slot_id].pkt = pkt;
555 h->pushed++;
556 return 0;
557}
558
559static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
560 u32 slot_id, struct sk_buff **pktout,
561 bool remove_item)
562{
563 brcmf_dbg(TRACE, "enter\n");
564 if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
565 return -ENOENT;
566
567 if (h->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
568 brcmf_err("entry not in use\n");
569 h->failed_to_pop++;
570 return -EINVAL;
571 }
572
573 *pktout = h->items[slot_id].pkt;
574 if (remove_item) {
575 h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
576 h->items[slot_id].pkt = NULL;
577 h->items[slot_id].gen = 0xff;
578 h->popped++;
579 }
580 return 0;
581}
582
583static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
584 u32 slot_id, u8 gen)
585{
586 brcmf_dbg(TRACE, "enter\n");
587
588 if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
589 return -ENOENT;
590
591 h->items[slot_id].gen = gen;
592
593 if (h->items[slot_id].state != BRCMF_FWS_HANGER_ITEM_STATE_INUSE) {
594 brcmf_err("entry not in use\n");
595 return -EINVAL;
596 }
597
598 h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED;
599 return 0;
600}
601
602static int brcmf_fws_hanger_get_genbit(struct brcmf_fws_hanger *hanger,
603 struct sk_buff *pkt, u32 slot_id,
604 int *gen)
605{
606 brcmf_dbg(TRACE, "enter\n");
607 *gen = 0xff;
608
609 if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
610 return -ENOENT;
611
612 if (hanger->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
613 brcmf_err("slot not in use\n");
614 return -EINVAL;
615 }
616
617 *gen = hanger->items[slot_id].gen;
618 return 0;
619}
620
621static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws,
622 bool (*fn)(struct sk_buff *, void *),
623 int ifidx)
624{
625 struct brcmf_fws_hanger *h = &fws->hanger;
626 struct sk_buff *skb;
627 int i;
628 enum brcmf_fws_hanger_item_state s;
629
630 brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
631 for (i = 0; i < ARRAY_SIZE(h->items); i++) {
632 s = h->items[i].state;
633 if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE ||
634 s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED) {
635 skb = h->items[i].pkt;
636 if (fn == NULL || fn(skb, &ifidx)) {
637 /* suppress packets freed from psq */
638 if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE)
639 brcmu_pkt_buf_free_skb(skb);
640 h->items[i].state =
641 BRCMF_FWS_HANGER_ITEM_STATE_FREE;
642 }
643 }
644 }
645}
646
647static void brcmf_fws_init_mac_descriptor(struct brcmf_fws_mac_descriptor *desc,
648 u8 *addr, u8 ifidx)
649{
650 brcmf_dbg(TRACE,
651 "enter: desc %p ea=%pM, ifidx=%u\n", desc, addr, ifidx);
652 desc->occupied = 1;
653 desc->state = BRCMF_FWS_STATE_OPEN;
654 desc->requested_credit = 0;
655 /* depending on use may need ifp->bssidx instead */
656 desc->interface_id = ifidx;
657 desc->ac_bitmap = 0xff; /* update this when handling APSD */
658 if (addr)
659 memcpy(&desc->ea[0], addr, ETH_ALEN);
660}
661
662static
663void brcmf_fws_clear_mac_descriptor(struct brcmf_fws_mac_descriptor *desc)
664{
665 brcmf_dbg(TRACE,
666 "enter: ea=%pM, ifidx=%u\n", desc->ea, desc->interface_id);
667 desc->occupied = 0;
668 desc->state = BRCMF_FWS_STATE_CLOSE;
669 desc->requested_credit = 0;
670}
671
672static struct brcmf_fws_mac_descriptor *
673brcmf_fws_mac_descriptor_lookup(struct brcmf_fws_info *fws, u8 *ea)
674{
675 struct brcmf_fws_mac_descriptor *entry;
676 int i;
677
678 brcmf_dbg(TRACE, "enter: ea=%pM\n", ea);
679 if (ea == NULL)
680 return ERR_PTR(-EINVAL);
681
682 entry = &fws->desc.nodes[0];
683 for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++) {
684 if (entry->occupied && !memcmp(entry->ea, ea, ETH_ALEN))
685 return entry;
686 entry++;
687 }
688
689 return ERR_PTR(-ENOENT);
690}
691
692static struct brcmf_fws_mac_descriptor*
693brcmf_fws_find_mac_desc(struct brcmf_fws_info *fws, struct brcmf_if *ifp,
694 u8 *da)
695{
696 struct brcmf_fws_mac_descriptor *entry = &fws->desc.other;
697 bool multicast;
698 enum nl80211_iftype iftype;
699
700 brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
701
702 multicast = is_multicast_ether_addr(da);
703 iftype = brcmf_cfg80211_get_iftype(ifp);
704
705 /* Multicast destination and P2P clients get the interface entry.
706 * STA gets the interface entry if there is no exact match. For
707 * example, TDLS destinations have their own entry.
708 */
709 entry = NULL;
710 if ((multicast || iftype == NL80211_IFTYPE_STATION ||
711 iftype == NL80211_IFTYPE_P2P_CLIENT) && ifp->fws_desc)
712 entry = ifp->fws_desc;
713
714 if (entry != NULL && iftype != NL80211_IFTYPE_STATION)
715 goto done;
716
717 entry = brcmf_fws_mac_descriptor_lookup(fws, da);
718 if (IS_ERR(entry))
719 entry = &fws->desc.other;
720
721done:
722 brcmf_dbg(TRACE, "exit: entry=%p\n", entry);
723 return entry;
724}
725
726static bool brcmf_fws_mac_desc_closed(struct brcmf_fws_info *fws,
727 struct brcmf_fws_mac_descriptor *entry,
728 int fifo)
729{
730 struct brcmf_fws_mac_descriptor *if_entry;
731 bool closed;
732
733 /* for unique destination entries the related interface
734 * may be closed.
735 */
736 if (entry->mac_handle) {
737 if_entry = &fws->desc.iface[entry->interface_id];
738 if (if_entry->state == BRCMF_FWS_STATE_CLOSE)
739 return true;
740 }
741 /* an entry is closed when the state is closed and
742 * the firmware did not request anything.
743 */
744 closed = entry->state == BRCMF_FWS_STATE_CLOSE &&
745 !entry->requested_credit && !entry->requested_packet;
746
747 /* Or firmware does not allow traffic for given fifo */
748 return closed || !(entry->ac_bitmap & BIT(fifo));
749}
750
751static void brcmf_fws_mac_desc_cleanup(struct brcmf_fws_info *fws,
752 struct brcmf_fws_mac_descriptor *entry,
753 int ifidx)
754{
755 brcmf_dbg(TRACE, "enter: entry=(ea=%pM, ifid=%d), ifidx=%d\n",
756 entry->ea, entry->interface_id, ifidx);
757 if (entry->occupied && (ifidx == -1 || ifidx == entry->interface_id)) {
758 brcmf_dbg(TRACE, "flush psq: ifidx=%d, qlen=%d\n",
759 ifidx, entry->psq.len);
760 brcmf_fws_psq_flush(fws, &entry->psq, ifidx);
761 entry->occupied = !!(entry->psq.len);
762 }
763}
764
765static void brcmf_fws_bus_txq_cleanup(struct brcmf_fws_info *fws,
766 bool (*fn)(struct sk_buff *, void *),
767 int ifidx)
768{
769 struct brcmf_fws_hanger_item *hi;
770 struct pktq *txq;
771 struct sk_buff *skb;
772 int prec;
773 u32 hslot;
774
775 brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
776 txq = brcmf_bus_gettxq(fws->drvr->bus_if);
777 if (IS_ERR(txq)) {
778 brcmf_dbg(TRACE, "no txq to clean up\n");
779 return;
780 }
781
782 for (prec = 0; prec < txq->num_prec; prec++) {
783 skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx);
784 while (skb) {
785 hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
786 hi = &fws->hanger.items[hslot];
787 WARN_ON(skb != hi->pkt);
788 hi->state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
789 brcmu_pkt_buf_free_skb(skb);
790 skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx);
791 }
792 }
793}
794
795static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx)
796{
797 int i;
798 struct brcmf_fws_mac_descriptor *table;
799 bool (*matchfn)(struct sk_buff *, void *) = NULL;
800
801 brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
802 if (fws == NULL)
803 return;
804
805 if (ifidx != -1)
806 matchfn = brcmf_fws_ifidx_match;
807
808 /* cleanup individual nodes */
809 table = &fws->desc.nodes[0];
810 for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++)
811 brcmf_fws_mac_desc_cleanup(fws, &table[i], ifidx);
812
813 brcmf_fws_mac_desc_cleanup(fws, &fws->desc.other, ifidx);
814 brcmf_fws_bus_txq_cleanup(fws, matchfn, ifidx);
815 brcmf_fws_hanger_cleanup(fws, matchfn, ifidx);
816}
817
818static void brcmf_fws_tim_update(struct brcmf_fws_info *ctx,
819 struct brcmf_fws_mac_descriptor *entry,
820 int prec)
821{
822 brcmf_dbg(TRACE, "enter: ea=%pM\n", entry->ea);
823 if (entry->state == BRCMF_FWS_STATE_CLOSE) {
824 /* check delayedQ and suppressQ in one call using bitmap */
825 if (brcmu_pktq_mlen(&entry->psq, 3 << (prec * 2)) == 0)
826 entry->traffic_pending_bmp =
827 entry->traffic_pending_bmp & ~NBITVAL(prec);
828 else
829 entry->traffic_pending_bmp =
830 entry->traffic_pending_bmp | NBITVAL(prec);
831 }
832 /* request a TIM update to firmware at the next piggyback opportunity */
833 if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp)
834 entry->send_tim_signal = true;
835}
836
837static void
838brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq,
839 u8 if_id)
840{
841 struct brcmf_if *ifp = fws->drvr->iflist[if_id];
842
843 if (WARN_ON(!ifp))
844 return;
845
846 brcmf_dbg(TRACE,
847 "enter: bssidx=%d, ifidx=%d\n", ifp->bssidx, ifp->ifidx);
848
849 if ((ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) &&
850 pq->len <= BRCMF_FWS_FLOWCONTROL_LOWATER)
851 brcmf_txflowblock_if(ifp,
852 BRCMF_NETIF_STOP_REASON_FWS_FC, false);
853 if (!(ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) &&
854 pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER)
855 brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FWS_FC, true);
856 return;
857}
858
859static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi)
860{
861 brcmf_dbg(CTL, "rssi %d\n", rssi);
862 return 0;
863}
864
865static
866int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
867{
868 struct brcmf_fws_mac_descriptor *entry, *existing;
869 u8 mac_handle;
870 u8 ifidx;
871 u8 *addr;
872
873 mac_handle = *data++;
874 ifidx = *data++;
875 addr = data;
876
877 entry = &fws->desc.nodes[mac_handle & 0x1F];
878 if (type == BRCMF_FWS_TYPE_MACDESC_DEL) {
879 brcmf_dbg(TRACE, "deleting mac %pM idx %d\n", addr, ifidx);
880 if (entry->occupied) {
881 brcmf_fws_mac_desc_cleanup(fws, entry, -1);
882 brcmf_fws_clear_mac_descriptor(entry);
883 } else
884 fws->stats.mac_update_failed++;
885 return 0;
886 }
887
888 brcmf_dbg(TRACE,
889 "add mac %pM handle %u idx %d\n", addr, mac_handle, ifidx);
890 existing = brcmf_fws_mac_descriptor_lookup(fws, addr);
891 if (IS_ERR(existing)) {
892 if (!entry->occupied) {
893 entry->mac_handle = mac_handle;
894 brcmf_fws_init_mac_descriptor(entry, addr, ifidx);
895 brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
896 BRCMF_FWS_PSQ_LEN);
897 } else {
898 fws->stats.mac_update_failed++;
899 }
900 } else {
901 if (entry != existing) {
902 brcmf_dbg(TRACE, "relocate mac\n");
903 memcpy(entry, existing,
904 offsetof(struct brcmf_fws_mac_descriptor, psq));
905 entry->mac_handle = mac_handle;
906 brcmf_fws_clear_mac_descriptor(existing);
907 } else {
908 brcmf_dbg(TRACE, "use existing\n");
909 WARN_ON(entry->mac_handle != mac_handle);
910 /* TODO: what should we do here: continue, reinit, .. */
911 }
912 }
913 return 0;
914}
915
916static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
917 u8 type, u8 *data)
918{
919 struct brcmf_fws_mac_descriptor *entry;
920 u8 mac_handle;
921 int i;
922
923 mac_handle = data[0];
924 entry = &fws->desc.nodes[mac_handle & 0x1F];
925 if (!entry->occupied) {
926 fws->stats.mac_ps_update_failed++;
927 return -ESRCH;
928 }
929
930 /* a state update should wipe old credits? */
931 entry->requested_credit = 0;
932 if (type == BRCMF_FWS_TYPE_MAC_OPEN) {
933 entry->state = BRCMF_FWS_STATE_OPEN;
934 return BRCMF_FWS_RET_OK_SCHEDULE;
935 } else {
936 entry->state = BRCMF_FWS_STATE_CLOSE;
937 for (i = BRCMF_FWS_FIFO_AC_BE; i < NL80211_NUM_ACS; i++)
938 brcmf_fws_tim_update(fws, entry, i);
939 }
940 return BRCMF_FWS_RET_OK_NOSCHEDULE;
941}
942
943static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
944 u8 type, u8 *data)
945{
946 struct brcmf_fws_mac_descriptor *entry;
947 u8 ifidx;
948 int ret;
949
950 ifidx = data[0];
951
952 brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
953 if (ifidx >= BRCMF_MAX_IFS) {
954 ret = -ERANGE;
955 goto fail;
956 }
957
958 entry = &fws->desc.iface[ifidx];
959 if (!entry->occupied) {
960 ret = -ESRCH;
961 goto fail;
962 }
963
964 switch (type) {
965 case BRCMF_FWS_TYPE_INTERFACE_OPEN:
966 entry->state = BRCMF_FWS_STATE_OPEN;
967 return BRCMF_FWS_RET_OK_SCHEDULE;
968 case BRCMF_FWS_TYPE_INTERFACE_CLOSE:
969 entry->state = BRCMF_FWS_STATE_CLOSE;
970 return BRCMF_FWS_RET_OK_NOSCHEDULE;
971 default:
972 ret = -EINVAL;
973 break;
974 }
975fail:
976 fws->stats.if_update_failed++;
977 return ret;
978}
979
980static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
981 u8 *data)
982{
983 struct brcmf_fws_mac_descriptor *entry;
984
985 entry = &fws->desc.nodes[data[1] & 0x1F];
986 if (!entry->occupied) {
987 if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT)
988 fws->stats.credit_request_failed++;
989 else
990 fws->stats.packet_request_failed++;
991 return -ESRCH;
992 }
993
994 if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT)
995 entry->requested_credit = data[0];
996 else
997 entry->requested_packet = data[0];
998
999 entry->ac_bitmap = data[2];
1000 return BRCMF_FWS_RET_OK_SCHEDULE;
1001}
1002
1003static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
1004 u8 fifo, u8 credits)
1005{
1006 int lender_ac;
1007 int *borrowed;
1008 int *fifo_credit;
1009
1010 if (!credits)
1011 return;
1012
1013 if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
1014 (fws->credits_borrowed[0])) {
1015 for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0;
1016 lender_ac--) {
1017 borrowed = &fws->credits_borrowed[lender_ac];
1018 if (*borrowed) {
1019 fws->fifo_credit_map |= (1 << lender_ac);
1020 fifo_credit = &fws->fifo_credit[lender_ac];
1021 if (*borrowed >= credits) {
1022 *borrowed -= credits;
1023 *fifo_credit += credits;
1024 return;
1025 } else {
1026 credits -= *borrowed;
1027 *fifo_credit += *borrowed;
1028 *borrowed = 0;
1029 }
1030 }
1031 }
1032 }
1033
1034 fws->fifo_credit_map |= 1 << fifo;
1035 fws->fifo_credit[fifo] += credits;
1036}
1037
1038static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws)
1039{
1040 /* only schedule dequeue when there are credits for delayed traffic */
1041 if (fws->fifo_credit_map & fws->fifo_delay_map)
1042 queue_work(fws->fws_wq, &fws->fws_dequeue_work);
1043}
1044
1045static void brcmf_skb_pick_up_credit(struct brcmf_fws_info *fws, int fifo,
1046 struct sk_buff *p)
1047{
1048 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(p)->mac;
1049
1050 if (brcmf_skbcb(p)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) {
1051 if (fws->fcmode != BRCMF_FWS_FCMODE_IMPLIED_CREDIT)
1052 return;
1053 brcmf_fws_return_credits(fws, fifo, 1);
1054 } else {
1055 /*
1056 * if this packet did not count against FIFO credit, it
1057 * must have taken a requested_credit from the destination
1058 * entry (for pspoll etc.)
1059 */
1060 if (!brcmf_skb_if_flags_get_field(p, REQUESTED))
1061 entry->requested_credit++;
1062 }
1063 brcmf_fws_schedule_deq(fws);
1064}
1065
1066static int brcmf_fws_enq(struct brcmf_fws_info *fws,
1067 enum brcmf_fws_skb_state state, int fifo,
1068 struct sk_buff *p)
1069{
1070 int prec = 2 * fifo;
1071 u32 *qfull_stat = &fws->stats.delayq_full_error;
1072
1073 struct brcmf_fws_mac_descriptor *entry;
1074
1075 entry = brcmf_skbcb(p)->mac;
1076 if (entry == NULL) {
1077 brcmf_err("no mac descriptor found for skb %p\n", p);
1078 return -ENOENT;
1079 }
1080
1081 brcmf_dbg(TRACE, "enter: ea=%pM, qlen=%d\n", entry->ea, entry->psq.len);
1082 if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) {
1083 prec += 1;
1084 qfull_stat = &fws->stats.supprq_full_error;
1085 }
1086
1087 if (brcmu_pktq_penq(&entry->psq, prec, p) == NULL) {
1088 *qfull_stat += 1;
1089 return -ENFILE;
1090 }
1091
1092 /* increment total enqueued packet count */
1093 fws->fifo_delay_map |= 1 << fifo;
1094 fws->fifo_enqpkt[fifo]++;
1095
1096 /* update the sk_buff state */
1097 brcmf_skbcb(p)->state = state;
1098 if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED)
1099 entry->suppress_count++;
1100
1101 /*
1102 * A packet has been pushed so update traffic
1103 * availability bitmap, if applicable
1104 */
1105 brcmf_fws_tim_update(fws, entry, fifo);
1106 brcmf_fws_flow_control_check(fws, &entry->psq,
1107 brcmf_skb_if_flags_get_field(p, INDEX));
1108 return 0;
1109}
1110
1111static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
1112{
1113 struct brcmf_fws_mac_descriptor *table;
1114 struct brcmf_fws_mac_descriptor *entry;
1115 struct sk_buff *p;
1116 int use_credit = 1;
1117 int num_nodes;
1118 int node_pos;
1119 int prec_out;
1120 int pmsk;
1121 int i;
1122
1123 table = (struct brcmf_fws_mac_descriptor *)&fws->desc;
1124 num_nodes = sizeof(fws->desc) / sizeof(struct brcmf_fws_mac_descriptor);
1125 node_pos = fws->deq_node_pos[fifo];
1126
1127 for (i = 0; i < num_nodes; i++) {
1128 entry = &table[(node_pos + i) % num_nodes];
1129 if (!entry->occupied ||
1130 brcmf_fws_mac_desc_closed(fws, entry, fifo))
1131 continue;
1132
1133 if (entry->suppressed)
1134 pmsk = 2;
1135 else
1136 pmsk = 3;
1137 p = brcmu_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out);
1138 if (p == NULL) {
1139 if (entry->suppressed) {
1140 if (entry->suppr_transit_count >
1141 entry->suppress_count)
1142 return NULL;
1143 entry->suppressed = false;
1144 p = brcmu_pktq_mdeq(&entry->psq,
1145 1 << (fifo * 2), &prec_out);
1146 }
1147 }
1148 if (p == NULL)
1149 continue;
1150
1151 /* did the packet come from suppress sub-queue? */
1152 if (entry->requested_credit > 0) {
1153 entry->requested_credit--;
1154 /*
1155 * if the packet was pulled out while destination is in
1156 * closed state but had a non-zero packets requested,
1157 * then this should not count against the FIFO credit.
1158 * That is due to the fact that the firmware will
1159 * most likely hold onto this packet until a suitable
1160 * time later to push it to the appropriate AC FIFO.
1161 */
1162 if (entry->state == BRCMF_FWS_STATE_CLOSE)
1163 use_credit = 0;
1164 } else if (entry->requested_packet > 0) {
1165 entry->requested_packet--;
1166 brcmf_skb_if_flags_set_field(p, REQUESTED, 1);
1167 if (entry->state == BRCMF_FWS_STATE_CLOSE)
1168 use_credit = 0;
1169 }
1170 brcmf_skb_if_flags_set_field(p, CREDITCHECK, use_credit);
1171
1172 /* move dequeue position to ensure fair round-robin */
1173 fws->deq_node_pos[fifo] = (node_pos + i + 1) % num_nodes;
1174 brcmf_fws_flow_control_check(fws, &entry->psq,
1175 brcmf_skb_if_flags_get_field(p,
1176 INDEX)
1177 );
1178 /*
1179 * A packet has been picked up, update traffic
1180 * availability bitmap, if applicable
1181 */
1182 brcmf_fws_tim_update(fws, entry, fifo);
1183
1184 /*
1185 * decrement total enqueued fifo packets and
1186 * clear delay bitmap if done.
1187 */
1188 fws->fifo_enqpkt[fifo]--;
1189 if (fws->fifo_enqpkt[fifo] == 0)
1190 fws->fifo_delay_map &= ~(1 << fifo);
1191 goto done;
1192 }
1193 p = NULL;
1194done:
1195 brcmf_dbg(TRACE, "exit: fifo %d skb %p\n", fifo, p);
1196 return p;
1197}
1198
1199static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
1200 struct sk_buff *skb, u32 genbit)
1201{
1202 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
1203 u32 hslot;
1204 int ret;
1205
1206 hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
1207
1208 /* this packet was suppressed */
1209 if (!entry->suppressed || entry->generation != genbit) {
1210 entry->suppressed = true;
1211 entry->suppress_count = brcmu_pktq_mlen(&entry->psq,
1212 1 << (fifo * 2 + 1));
1213 entry->suppr_transit_count = entry->transit_count;
1214 }
1215
1216 entry->generation = genbit;
1217
1218 ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb);
1219 if (ret != 0) {
1220 /* suppress q is full, drop this packet */
1221 brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
1222 true);
1223 } else {
1224 /*
1225 * Mark suppressed to avoid a double free during
1226 * wlfc cleanup
1227 */
1228 brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot,
1229 genbit);
1230 entry->suppress_count++;
1231 }
1232
1233 return ret;
1234}
1235
1236static int
1237brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
1238 u32 genbit)
1239{
1240 u32 fifo;
1241 int ret;
1242 bool remove_from_hanger = true;
1243 struct sk_buff *skb;
1244 struct brcmf_fws_mac_descriptor *entry = NULL;
1245
1246 brcmf_dbg(TRACE, "status: flags=0x%X, hslot=%d\n",
1247 flags, hslot);
1248
1249 if (flags == BRCMF_FWS_TXSTATUS_DISCARD)
1250 fws->stats.txs_discard++;
1251 else if (flags == BRCMF_FWS_TXSTATUS_CORE_SUPPRESS) {
1252 fws->stats.txs_supp_core++;
1253 remove_from_hanger = false;
1254 } else if (flags == BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS) {
1255 fws->stats.txs_supp_ps++;
1256 remove_from_hanger = false;
1257 } else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED)
1258 fws->stats.txs_tossed++;
1259 else
1260 brcmf_err("unexpected txstatus\n");
1261
1262 ret = brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
1263 remove_from_hanger);
1264 if (ret != 0) {
1265 brcmf_err("no packet in hanger slot: hslot=%d\n", hslot);
1266 return ret;
1267 }
1268
1269 entry = brcmf_skbcb(skb)->mac;
1270 if (WARN_ON(!entry)) {
1271 brcmu_pkt_buf_free_skb(skb);
1272 return -EINVAL;
1273 }
1274
1275 /* pick up the implicit credit from this packet */
1276 fifo = brcmf_skb_htod_tag_get_field(skb, FIFO);
1277 brcmf_skb_pick_up_credit(fws, fifo, skb);
1278
1279 if (!remove_from_hanger)
1280 ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit);
1281
1282 if (remove_from_hanger || ret) {
1283 entry->transit_count--;
1284 if (entry->suppressed)
1285 entry->suppr_transit_count--;
1286
1287 brcmf_txfinalize(fws->drvr, skb, true);
1288 }
1289 return 0;
1290}
1291
1292static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
1293 u8 *data)
1294{
1295 int i;
1296
1297 if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) {
1298 brcmf_dbg(INFO, "ignored\n");
1299 return BRCMF_FWS_RET_OK_NOSCHEDULE;
1300 }
1301
1302 brcmf_dbg(TRACE, "enter: data %pM\n", data);
1303 for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++)
1304 brcmf_fws_return_credits(fws, i, data[i]);
1305
1306 brcmf_dbg(INFO, "map: credit %x delay %x\n", fws->fifo_credit_map,
1307 fws->fifo_delay_map);
1308 return BRCMF_FWS_RET_OK_SCHEDULE;
1309}
1310
1311static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
1312{
1313 __le32 status_le;
1314 u32 status;
1315 u32 hslot;
1316 u32 genbit;
1317 u8 flags;
1318
1319 fws->stats.txs_indicate++;
1320 memcpy(&status_le, data, sizeof(status_le));
1321 status = le32_to_cpu(status_le);
1322 flags = brcmf_txstatus_get_field(status, FLAGS);
1323 hslot = brcmf_txstatus_get_field(status, HSLOT);
1324 genbit = brcmf_txstatus_get_field(status, GENERATION);
1325
1326 return brcmf_fws_txstatus_process(fws, flags, hslot, genbit);
1327}
1328
1329static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data)
1330{
1331 __le32 timestamp;
1332
1333 memcpy(&timestamp, &data[2], sizeof(timestamp));
1334 brcmf_dbg(INFO, "received: seq %d, timestamp %d\n", data[1],
1335 le32_to_cpu(timestamp));
1336 return 0;
1337}
1338
1339/* using macro so sparse checking does not complain
1340 * about locking imbalance.
1341 */
1342#define brcmf_fws_lock(drvr, flags) \
1343do { \
1344 flags = 0; \
1345 spin_lock_irqsave(&((drvr)->fws_spinlock), (flags)); \
1346} while (0)
1347
1348/* using macro so sparse checking does not complain
1349 * about locking imbalance.
1350 */
1351#define brcmf_fws_unlock(drvr, flags) \
1352 spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags))
1353
1354static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
1355 const struct brcmf_event_msg *e,
1356 void *data)
1357{
1358 struct brcmf_fws_info *fws = ifp->drvr->fws;
1359 int i;
1360 ulong flags;
1361 u8 *credits = data;
1362
1363 if (e->datalen < BRCMF_FWS_FIFO_COUNT) {
1364 brcmf_err("event payload too small (%d)\n", e->datalen);
1365 return -EINVAL;
1366 }
1367
1368 brcmf_dbg(TRACE, "enter: credits %pM\n", credits);
1369 brcmf_fws_lock(ifp->drvr, flags);
1370 for (i = 0; i < ARRAY_SIZE(fws->fifo_credit); i++) {
1371 if (*credits)
1372 fws->fifo_credit_map |= 1 << i;
1373 else
1374 fws->fifo_credit_map &= ~(1 << i);
1375 fws->fifo_credit[i] = *credits++;
1376 }
1377 brcmf_fws_schedule_deq(fws);
1378 brcmf_fws_unlock(ifp->drvr, flags);
1379 return 0;
1380}
1381
1382int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
1383 struct sk_buff *skb)
1384{
1385 struct brcmf_fws_info *fws = drvr->fws;
1386 ulong flags;
1387 u8 *signal_data;
1388 s16 data_len;
1389 u8 type;
1390 u8 len;
1391 u8 *data;
1392 s32 status;
1393 s32 err;
1394
1395 brcmf_dbg(TRACE, "enter: ifidx %d, skblen %u, sig %d\n",
1396 ifidx, skb->len, signal_len);
1397
1398 WARN_ON(signal_len > skb->len);
1399
1400 /* if flow control disabled, skip to packet data and leave */
1401 if (!signal_len || !drvr->fw_signals) {
1402 skb_pull(skb, signal_len);
1403 return 0;
1404 }
1405
1406 /* lock during tlv parsing */
1407 brcmf_fws_lock(drvr, flags);
1408
1409 fws->stats.header_pulls++;
1410 data_len = signal_len;
1411 signal_data = skb->data;
1412
1413 status = BRCMF_FWS_RET_OK_NOSCHEDULE;
1414 while (data_len > 0) {
1415 /* extract tlv info */
1416 type = signal_data[0];
1417
1418 /* FILLER type is actually not a TLV, but
1419 * a single byte that can be skipped.
1420 */
1421 if (type == BRCMF_FWS_TYPE_FILLER) {
1422 signal_data += 1;
1423 data_len -= 1;
1424 continue;
1425 }
1426 len = signal_data[1];
1427 data = signal_data + 2;
1428
1429 brcmf_dbg(INFO, "tlv type=%d (%s), len=%d, data[0]=%d\n", type,
1430 brcmf_fws_get_tlv_name(type), len, *data);
1431
1432 /* abort parsing when length invalid */
1433 if (data_len < len + 2)
1434 break;
1435
1436 if (len != brcmf_fws_get_tlv_len(fws, type))
1437 break;
1438
1439 err = BRCMF_FWS_RET_OK_NOSCHEDULE;
1440 switch (type) {
1441 case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
1442 case BRCMF_FWS_TYPE_COMP_TXSTATUS:
1443 break;
1444 case BRCMF_FWS_TYPE_MACDESC_ADD:
1445 case BRCMF_FWS_TYPE_MACDESC_DEL:
1446 brcmf_fws_macdesc_indicate(fws, type, data);
1447 break;
1448 case BRCMF_FWS_TYPE_MAC_OPEN:
1449 case BRCMF_FWS_TYPE_MAC_CLOSE:
1450 err = brcmf_fws_macdesc_state_indicate(fws, type, data);
1451 break;
1452 case BRCMF_FWS_TYPE_INTERFACE_OPEN:
1453 case BRCMF_FWS_TYPE_INTERFACE_CLOSE:
1454 err = brcmf_fws_interface_state_indicate(fws, type,
1455 data);
1456 break;
1457 case BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT:
1458 case BRCMF_FWS_TYPE_MAC_REQUEST_PACKET:
1459 err = brcmf_fws_request_indicate(fws, type, data);
1460 break;
1461 case BRCMF_FWS_TYPE_TXSTATUS:
1462 brcmf_fws_txstatus_indicate(fws, data);
1463 break;
1464 case BRCMF_FWS_TYPE_FIFO_CREDITBACK:
1465 err = brcmf_fws_fifocreditback_indicate(fws, data);
1466 break;
1467 case BRCMF_FWS_TYPE_RSSI:
1468 brcmf_fws_rssi_indicate(fws, *data);
1469 break;
1470 case BRCMF_FWS_TYPE_TRANS_ID:
1471 brcmf_fws_dbg_seqnum_check(fws, data);
1472 break;
1473 case BRCMF_FWS_TYPE_PKTTAG:
1474 case BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP:
1475 default:
1476 fws->stats.tlv_invalid_type++;
1477 break;
1478 }
1479 if (err == BRCMF_FWS_RET_OK_SCHEDULE)
1480 status = BRCMF_FWS_RET_OK_SCHEDULE;
1481 signal_data += len + 2;
1482 data_len -= len + 2;
1483 }
1484
1485 if (data_len != 0)
1486 fws->stats.tlv_parse_failed++;
1487
1488 if (status == BRCMF_FWS_RET_OK_SCHEDULE)
1489 brcmf_fws_schedule_deq(fws);
1490
1491 /* signalling processing result does
1492 * not affect the actual ethernet packet.
1493 */
1494 skb_pull(skb, signal_len);
1495
1496 /* this may be a signal-only packet
1497 */
1498 if (skb->len == 0)
1499 fws->stats.header_only_pkt++;
1500
1501 brcmf_fws_unlock(drvr, flags);
1502 return 0;
1503}
1504
1505static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
1506{
1507 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
1508 u8 *wlh;
1509 u16 data_offset = 0;
1510 u8 fillers;
1511 __le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod);
1512
1513 brcmf_dbg(TRACE, "enter: ea=%pM, ifidx=%u, pkttag=0x%08X\n",
1514 entry->ea, entry->interface_id, le32_to_cpu(pkttag));
1515 if (entry->send_tim_signal)
1516 data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
1517
1518 /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
1519 data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN;
1520 fillers = round_up(data_offset, 4) - data_offset;
1521 data_offset += fillers;
1522
1523 skb_push(skb, data_offset);
1524 wlh = skb->data;
1525
1526 wlh[0] = BRCMF_FWS_TYPE_PKTTAG;
1527 wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN;
1528 memcpy(&wlh[2], &pkttag, sizeof(pkttag));
1529 wlh += BRCMF_FWS_TYPE_PKTTAG_LEN + 2;
1530
1531 if (entry->send_tim_signal) {
1532 entry->send_tim_signal = 0;
1533 wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP;
1534 wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
1535 wlh[2] = entry->mac_handle;
1536 wlh[3] = entry->traffic_pending_bmp;
1537 wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2;
1538 entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
1539 }
1540 if (fillers)
1541 memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers);
1542
1543 brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX),
1544 data_offset >> 2, skb);
1545 return 0;
1546}
1547
1548static int brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
1549 struct sk_buff *p)
1550{
1551 struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
1552 struct brcmf_fws_mac_descriptor *entry = skcb->mac;
1553 int rc = 0;
1554 bool header_needed;
1555 int hslot = BRCMF_FWS_HANGER_MAXITEMS;
1556 u8 free_ctr;
1557 u8 ifidx;
1558 u8 flags;
1559
1560 header_needed = skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED;
1561
1562 if (header_needed) {
1563 /* obtaining free slot may fail, but that will be caught
1564 * by the hanger push. This assures the packet has a BDC
1565 * header upon return.
1566 */
1567 hslot = brcmf_fws_hanger_get_free_slot(&fws->hanger);
1568 free_ctr = entry->seq[fifo];
1569 brcmf_skb_htod_tag_set_field(p, HSLOT, hslot);
1570 brcmf_skb_htod_tag_set_field(p, FREERUN, free_ctr);
1571 brcmf_skb_htod_tag_set_field(p, GENERATION, 1);
1572 entry->transit_count++;
1573 }
1574 brcmf_skb_if_flags_set_field(p, TRANSMIT, 1);
1575 brcmf_skb_htod_tag_set_field(p, FIFO, fifo);
1576
1577 flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST;
1578 if (!(skcb->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK)) {
1579 /*
1580 Indicate that this packet is being sent in response to an
1581 explicit request from the firmware side.
1582 */
1583 flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED;
1584 }
1585 brcmf_skb_htod_tag_set_field(p, FLAGS, flags);
1586 if (header_needed) {
1587 brcmf_fws_hdrpush(fws, p);
1588 rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot);
1589 if (rc)
1590 brcmf_err("hanger push failed: rc=%d\n", rc);
1591 } else {
1592 int gen;
1593
1594 /* remove old header */
1595 rc = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, p);
1596 if (rc == 0) {
1597 hslot = brcmf_skb_htod_tag_get_field(p, HSLOT);
1598 brcmf_fws_hanger_get_genbit(&fws->hanger, p,
1599 hslot, &gen);
1600 brcmf_skb_htod_tag_set_field(p, GENERATION, gen);
1601
1602 /* push new header */
1603 brcmf_fws_hdrpush(fws, p);
1604 }
1605 }
1606
1607 return rc;
1608}
1609
1610static void
1611brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb)
1612{
1613 /*
1614 put the packet back to the head of queue
1615
1616 - suppressed packet goes back to suppress sub-queue
1617 - pull out the header, if new or delayed packet
1618
1619 Note: hslot is used only when header removal is done.
1620 */
1621 struct brcmf_fws_mac_descriptor *entry;
1622 enum brcmf_fws_skb_state state;
1623 struct sk_buff *pktout;
1624 int rc = 0;
1625 int fifo;
1626 int hslot;
1627 u8 ifidx;
1628
1629 fifo = brcmf_skb_if_flags_get_field(skb, FIFO);
1630 state = brcmf_skbcb(skb)->state;
1631 entry = brcmf_skbcb(skb)->mac;
1632
1633 if (entry != NULL) {
1634 if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) {
1635 /* wl-header is saved for suppressed packets */
1636 pktout = brcmu_pktq_penq_head(&entry->psq, 2 * fifo + 1,
1637 skb);
1638 if (pktout == NULL) {
1639 brcmf_err("suppress queue full\n");
1640 rc = -ENOSPC;
1641 }
1642 } else {
1643 hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
1644
1645 /* remove header first */
1646 rc = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
1647 if (rc) {
1648 brcmf_err("header removal failed\n");
1649 /* free the hanger slot */
1650 brcmf_fws_hanger_poppkt(&fws->hanger, hslot,
1651 &pktout, true);
1652 rc = -EINVAL;
1653 goto fail;
1654 }
1655
1656 /* delay-q packets are going to delay-q */
1657 pktout = brcmu_pktq_penq_head(&entry->psq,
1658 2 * fifo, skb);
1659 if (pktout == NULL) {
1660 brcmf_err("delay queue full\n");
1661 rc = -ENOSPC;
1662 }
1663
1664 /* free the hanger slot */
1665 brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &pktout,
1666 true);
1667
1668 /* decrement sequence count */
1669 entry->seq[fifo]--;
1670 }
1671 /*
1672 if this packet did not count against FIFO credit, it must have
1673 taken a requested_credit from the firmware (for pspoll etc.)
1674 */
1675 if (!(brcmf_skbcb(skb)->if_flags &
1676 BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK))
1677 entry->requested_credit++;
1678 } else {
1679 brcmf_err("no mac entry linked\n");
1680 rc = -ENOENT;
1681 }
1682
1683
1684fail:
1685 if (rc) {
1686 brcmf_txfinalize(fws->drvr, skb, false);
1687 fws->stats.rollback_failed++;
1688 } else
1689 fws->stats.rollback_success++;
1690}
1691
1692static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws)
1693{
1694 int lender_ac;
1695
1696 if (time_after(fws->borrow_defer_timestamp, jiffies))
1697 return -ENAVAIL;
1698
1699 for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) {
1700 if (fws->fifo_credit[lender_ac]) {
1701 fws->credits_borrowed[lender_ac]++;
1702 fws->fifo_credit[lender_ac]--;
1703 if (fws->fifo_credit[lender_ac] == 0)
1704 fws->fifo_credit_map &= ~(1 << lender_ac);
1705 brcmf_dbg(TRACE, "borrow credit from: %d\n", lender_ac);
1706 return 0;
1707 }
1708 }
1709 return -ENAVAIL;
1710}
1711
1712static int brcmf_fws_consume_credit(struct brcmf_fws_info *fws, int fifo,
1713 struct sk_buff *skb)
1714{
1715 struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
1716 int *credit = &fws->fifo_credit[fifo];
1717 int use_credit = 1;
1718
1719 brcmf_dbg(TRACE, "enter: ac=%d, credits=%d\n", fifo, *credit);
1720
1721 if (entry->requested_credit > 0) {
1722 /*
1723 * if the packet was pulled out while destination is in
1724 * closed state but had a non-zero packets requested,
1725 * then this should not count against the FIFO credit.
1726 * That is due to the fact that the firmware will
1727 * most likely hold onto this packet until a suitable
1728 * time later to push it to the appropriate AC FIFO.
1729 */
1730 entry->requested_credit--;
1731 if (entry->state == BRCMF_FWS_STATE_CLOSE)
1732 use_credit = 0;
1733 } else if (entry->requested_packet > 0) {
1734 entry->requested_packet--;
1735 brcmf_skb_if_flags_set_field(skb, REQUESTED, 1);
1736 if (entry->state == BRCMF_FWS_STATE_CLOSE)
1737 use_credit = 0;
1738 }
1739 brcmf_skb_if_flags_set_field(skb, CREDITCHECK, use_credit);
1740 if (!use_credit) {
1741 brcmf_dbg(TRACE, "exit: no creditcheck set\n");
1742 return 0;
1743 }
1744
1745 if (fifo != BRCMF_FWS_FIFO_AC_BE)
1746 fws->borrow_defer_timestamp = jiffies +
1747 BRCMF_FWS_BORROW_DEFER_PERIOD;
1748
1749 if (!(*credit)) {
1750 /* Try to borrow a credit from other queue */
1751 if (fifo == BRCMF_FWS_FIFO_AC_BE &&
1752 brcmf_fws_borrow_credit(fws) == 0)
1753 return 0;
1754
1755 brcmf_dbg(TRACE, "exit: ac=%d, credits depleted\n", fifo);
1756 return -ENAVAIL;
1757 }
1758 (*credit)--;
1759 if (!(*credit))
1760 fws->fifo_credit_map &= ~(1 << fifo);
1761 brcmf_dbg(TRACE, "exit: ac=%d, credits=%d\n", fifo, *credit);
1762 return 0;
1763}
1764
1765static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
1766 struct sk_buff *skb)
1767{
1768 struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
1769 struct brcmf_fws_mac_descriptor *entry;
1770 struct brcmf_bus *bus = fws->drvr->bus_if;
1771 int rc;
1772
1773 entry = skcb->mac;
1774 if (IS_ERR(entry))
1775 return PTR_ERR(entry);
1776
1777 rc = brcmf_fws_precommit_skb(fws, fifo, skb);
1778 if (rc < 0) {
1779 fws->stats.generic_error++;
1780 goto rollback;
1781 }
1782
1783 rc = brcmf_bus_txdata(bus, skb);
1784 if (rc < 0)
1785 goto rollback;
1786
1787 entry->seq[fifo]++;
1788 fws->stats.pkt2bus++;
1789 if (brcmf_skbcb(skb)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) {
1790 fws->stats.send_pkts[fifo]++;
1791 fws->stats.fifo_credits_sent[fifo]++;
1792 }
1793
1794 return rc;
1795
1796rollback:
1797 brcmf_fws_rollback_toq(fws, skb);
1798 return rc;
1799}
1800
1801int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
1802{
1803 struct brcmf_pub *drvr = ifp->drvr;
1804 struct brcmf_fws_info *fws = drvr->fws;
1805 struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
1806 struct ethhdr *eh = (struct ethhdr *)(skb->data);
1807 ulong flags;
1808 int fifo = BRCMF_FWS_FIFO_BCMC;
1809 bool multicast = is_multicast_ether_addr(eh->h_dest);
1810
1811 /* determine the priority */
1812 if (!skb->priority)
1813 skb->priority = cfg80211_classify8021d(skb);
1814
1815 drvr->tx_multicast += !!multicast;
1816 if (ntohs(eh->h_proto) == ETH_P_PAE)
1817 atomic_inc(&ifp->pend_8021x_cnt);
1818
1819 if (!brcmf_fws_fc_active(fws)) {
1820 /* If the protocol uses a data header, apply it */
1821 brcmf_proto_hdrpush(drvr, ifp->ifidx, 0, skb);
1822
1823 /* Use bus module to send data frame */
1824 return brcmf_bus_txdata(drvr->bus_if, skb);
1825 }
1826
1827 /* set control buffer information */
1828 skcb->if_flags = 0;
1829 skcb->mac = brcmf_fws_find_mac_desc(fws, ifp, eh->h_dest);
1830 skcb->state = BRCMF_FWS_SKBSTATE_NEW;
1831 brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx);
1832 if (!multicast)
1833 fifo = brcmf_fws_prio2fifo[skb->priority];
1834 brcmf_skb_if_flags_set_field(skb, FIFO, fifo);
1835
1836 brcmf_dbg(TRACE, "ea=%pM, multi=%d, fifo=%d\n", eh->h_dest,
1837 multicast, fifo);
1838
1839 brcmf_fws_lock(drvr, flags);
1840 if (skcb->mac->suppressed ||
1841 brcmf_fws_mac_desc_closed(fws, skcb->mac, fifo) ||
1842 brcmu_pktq_mlen(&skcb->mac->psq, 3 << (fifo * 2)) ||
1843 (!multicast &&
1844 brcmf_fws_consume_credit(fws, fifo, skb) < 0)) {
1845 /* enqueue the packet in delayQ */
1846 drvr->fws->fifo_delay_map |= 1 << fifo;
1847 brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb);
1848 } else {
1849 if (brcmf_fws_commit_skb(fws, fifo, skb))
1850 if (!multicast)
1851 brcmf_skb_pick_up_credit(fws, fifo, skb);
1852 }
1853 brcmf_fws_unlock(drvr, flags);
1854 return 0;
1855}
1856
1857void brcmf_fws_reset_interface(struct brcmf_if *ifp)
1858{
1859 struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
1860
1861 brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
1862 if (!entry)
1863 return;
1864
1865 brcmf_fws_init_mac_descriptor(entry, ifp->mac_addr, ifp->ifidx);
1866}
1867
1868void brcmf_fws_add_interface(struct brcmf_if *ifp)
1869{
1870 struct brcmf_fws_info *fws = ifp->drvr->fws;
1871 struct brcmf_fws_mac_descriptor *entry;
1872
1873 brcmf_dbg(TRACE, "enter: idx=%d, mac=%pM\n",
1874 ifp->bssidx, ifp->mac_addr);
1875 if (!ifp->ndev || !ifp->drvr->fw_signals)
1876 return;
1877
1878 entry = &fws->desc.iface[ifp->ifidx];
1879 ifp->fws_desc = entry;
1880 brcmf_fws_init_mac_descriptor(entry, ifp->mac_addr, ifp->ifidx);
1881 brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
1882 BRCMF_FWS_PSQ_LEN);
1883}
1884
1885void brcmf_fws_del_interface(struct brcmf_if *ifp)
1886{
1887 struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
1888 ulong flags;
1889
1890 brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
1891 if (!entry)
1892 return;
1893
1894 brcmf_fws_lock(ifp->drvr, flags);
1895 ifp->fws_desc = NULL;
1896 brcmf_fws_clear_mac_descriptor(entry);
1897 brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx);
1898 brcmf_fws_unlock(ifp->drvr, flags);
1899}
1900
1901static void brcmf_fws_dequeue_worker(struct work_struct *worker)
1902{
1903 struct brcmf_fws_info *fws;
1904 struct sk_buff *skb;
1905 ulong flags;
1906 int fifo;
1907 int credit;
1908
1909 fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work);
1910
1911 brcmf_dbg(TRACE, "enter: fws=%p\n", fws);
1912 brcmf_fws_lock(fws->drvr, flags);
1913 for (fifo = NL80211_NUM_ACS; fifo >= 0; fifo--) {
1914 brcmf_dbg(TRACE, "fifo %d credit %d\n", fifo,
1915 fws->fifo_credit[fifo]);
1916 for (credit = 0; credit < fws->fifo_credit[fifo]; /* nop */) {
1917 skb = brcmf_fws_deq(fws, fifo);
1918 if (!skb || brcmf_fws_commit_skb(fws, fifo, skb))
1919 break;
1920 if (brcmf_skbcb(skb)->if_flags &
1921 BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK)
1922 credit++;
1923 }
1924 if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
1925 (credit == fws->fifo_credit[fifo])) {
1926 fws->fifo_credit[fifo] -= credit;
1927 while (brcmf_fws_borrow_credit(fws) == 0) {
1928 skb = brcmf_fws_deq(fws, fifo);
1929 if (!skb) {
1930 brcmf_fws_return_credits(fws, fifo, 1);
1931 break;
1932 }
1933 if (brcmf_fws_commit_skb(fws, fifo, skb)) {
1934 brcmf_fws_return_credits(fws, fifo, 1);
1935 break;
1936 }
1937 }
1938 } else {
1939 fws->fifo_credit[fifo] -= credit;
1940 }
1941 }
1942 brcmf_fws_unlock(fws->drvr, flags);
1943}
1944
1945int brcmf_fws_init(struct brcmf_pub *drvr)
1946{
1947 u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS;
1948 int rc;
1949
1950 if (!drvr->fw_signals)
1951 return 0;
1952
1953 spin_lock_init(&drvr->fws_spinlock);
1954
1955 drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
1956 if (!drvr->fws) {
1957 rc = -ENOMEM;
1958 goto fail;
1959 }
1960
1961 /* set linkage back */
1962 drvr->fws->drvr = drvr;
1963 drvr->fws->fcmode = fcmode;
1964
1965 drvr->fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
1966 if (drvr->fws->fws_wq == NULL) {
1967 brcmf_err("workqueue creation failed\n");
1968 rc = -EBADF;
1969 goto fail;
1970 }
1971 INIT_WORK(&drvr->fws->fws_dequeue_work, brcmf_fws_dequeue_worker);
1972
1973 /* enable firmware signalling if fcmode active */
1974 if (drvr->fws->fcmode != BRCMF_FWS_FCMODE_NONE)
1975 tlv |= BRCMF_FWS_FLAGS_XONXOFF_SIGNALS |
1976 BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS |
1977 BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE;
1978
1979 rc = brcmf_fweh_register(drvr, BRCMF_E_FIFO_CREDIT_MAP,
1980 brcmf_fws_notify_credit_map);
1981 if (rc < 0) {
1982 brcmf_err("register credit map handler failed\n");
1983 goto fail;
1984 }
1985
1986 /* setting the iovar may fail if feature is unsupported
1987 * so leave the rc as is so driver initialization can
1988 * continue.
1989 */
1990 if (brcmf_fil_iovar_int_set(drvr->iflist[0], "tlv", tlv)) {
1991 brcmf_err("failed to set bdcv2 tlv signaling\n");
1992 goto fail_event;
1993 }
1994
1995 brcmf_fws_hanger_init(&drvr->fws->hanger);
1996 brcmf_fws_init_mac_descriptor(&drvr->fws->desc.other, NULL, 0);
1997 brcmu_pktq_init(&drvr->fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
1998 BRCMF_FWS_PSQ_LEN);
1999
2000 /* create debugfs file for statistics */
2001 brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats);
2002
2003 /* TODO: remove upon feature delivery */
2004 brcmf_err("%s bdcv2 tlv signaling [%x]\n",
2005 drvr->fw_signals ? "enabled" : "disabled", tlv);
2006 return 0;
2007
2008fail_event:
2009 brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP);
2010fail:
2011 brcmf_fws_deinit(drvr);
2012 return rc;
2013}
2014
2015void brcmf_fws_deinit(struct brcmf_pub *drvr)
2016{
2017 struct brcmf_fws_info *fws = drvr->fws;
2018 ulong flags;
2019
2020 if (!fws)
2021 return;
2022
2023 /* disable firmware signalling entirely
2024 * to avoid using the workqueue.
2025 */
2026 drvr->fw_signals = false;
2027
2028 if (drvr->fws->fws_wq)
2029 destroy_workqueue(drvr->fws->fws_wq);
2030
2031 /* cleanup */
2032 brcmf_fws_lock(drvr, flags);
2033 brcmf_fws_cleanup(fws, -1);
2034 drvr->fws = NULL;
2035 brcmf_fws_unlock(drvr, flags);
2036
2037 /* free top structure */
2038 kfree(fws);
2039}
2040
2041bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
2042{
2043 if (!fws)
2044 return false;
2045
2046 brcmf_dbg(TRACE, "enter: mode=%d\n", fws->fcmode);
2047 return fws->fcmode != BRCMF_FWS_FCMODE_NONE;
2048}
2049
2050void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
2051{
2052 ulong flags;
2053
2054 brcmf_fws_lock(fws->drvr, flags);
2055 brcmf_fws_txstatus_process(fws, BRCMF_FWS_TXSTATUS_FW_TOSSED,
2056 brcmf_skb_htod_tag_get_field(skb, HSLOT), 0);
2057 /* the packet never reached firmware so reclaim credit */
2058 if (fws->fcmode == BRCMF_FWS_FCMODE_EXPLICIT_CREDIT &&
2059 brcmf_skbcb(skb)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) {
2060 brcmf_fws_return_credits(fws,
2061 brcmf_skb_htod_tag_get_field(skb,
2062 FIFO),
2063 1);
2064 brcmf_fws_schedule_deq(fws);
2065 }
2066 brcmf_fws_unlock(fws->drvr, flags);
2067}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
new file mode 100644
index 000000000000..fbe483d23752
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright (c) 2012 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17
18#ifndef FWSIGNAL_H_
19#define FWSIGNAL_H_
20
21int brcmf_fws_init(struct brcmf_pub *drvr);
22void brcmf_fws_deinit(struct brcmf_pub *drvr);
23bool brcmf_fws_fc_active(struct brcmf_fws_info *fws);
24int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
25 struct sk_buff *skb);
26int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb);
27
28void brcmf_fws_reset_interface(struct brcmf_if *ifp);
29void brcmf_fws_add_interface(struct brcmf_if *ifp);
30void brcmf_fws_del_interface(struct brcmf_if *ifp);
31void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
32
33#endif /* FWSIGNAL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index 4166e642068b..2b90da0d85f3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -15,6 +15,7 @@
15 */ 15 */
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
18#include <net/cfg80211.h> 19#include <net/cfg80211.h>
19 20
20#include <brcmu_wifi.h> 21#include <brcmu_wifi.h>
@@ -423,29 +424,6 @@ static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
423 424
424 425
425/** 426/**
426 * brcmf_p2p_chnr_to_chspec() - convert channel number to chanspec.
427 *
428 * @channel: channel number
429 */
430static u16 brcmf_p2p_chnr_to_chspec(u16 channel)
431{
432 u16 chanspec;
433
434 chanspec = channel & WL_CHANSPEC_CHAN_MASK;
435
436 if (channel <= CH_MAX_2G_CHANNEL)
437 chanspec |= WL_CHANSPEC_BAND_2G;
438 else
439 chanspec |= WL_CHANSPEC_BAND_5G;
440
441 chanspec |= WL_CHANSPEC_BW_20;
442 chanspec |= WL_CHANSPEC_CTL_SB_NONE;
443
444 return chanspec;
445}
446
447
448/**
449 * brcmf_p2p_set_firmware() - prepare firmware for peer-to-peer operation. 427 * brcmf_p2p_set_firmware() - prepare firmware for peer-to-peer operation.
450 * 428 *
451 * @ifp: ifp to use for iovars (primary). 429 * @ifp: ifp to use for iovars (primary).
@@ -455,7 +433,9 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
455{ 433{
456 s32 ret = 0; 434 s32 ret = 0;
457 435
436 brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
458 brcmf_fil_iovar_int_set(ifp, "apsta", 1); 437 brcmf_fil_iovar_int_set(ifp, "apsta", 1);
438 brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
459 439
460 /* In case of COB type, firmware has default mac address 440 /* In case of COB type, firmware has default mac address
461 * After Initializing firmware, we have to set current mac address to 441 * After Initializing firmware, we have to set current mac address to
@@ -473,28 +453,35 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
473 * brcmf_p2p_generate_bss_mac() - derive mac addresses for P2P. 453 * brcmf_p2p_generate_bss_mac() - derive mac addresses for P2P.
474 * 454 *
475 * @p2p: P2P specific data. 455 * @p2p: P2P specific data.
456 * @dev_addr: optional device address.
476 * 457 *
477 * P2P needs mac addresses for P2P device and interface. These are 458 * P2P needs mac addresses for P2P device and interface. If no device
478 * derived from the primary net device, ie. the permanent ethernet 459 * address it specified, these are derived from the primary net device, ie.
479 * address of the device. 460 * the permanent ethernet address of the device.
480 */ 461 */
481static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p) 462static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
482{ 463{
483 struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; 464 struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
484 struct brcmf_if *p2p_ifp = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->ifp; 465 bool local_admin = false;
466
467 if (!dev_addr || is_zero_ether_addr(dev_addr)) {
468 dev_addr = pri_ifp->mac_addr;
469 local_admin = true;
470 }
485 471
486 /* Generate the P2P Device Address. This consists of the device's 472 /* Generate the P2P Device Address. This consists of the device's
487 * primary MAC address with the locally administered bit set. 473 * primary MAC address with the locally administered bit set.
488 */ 474 */
489 memcpy(p2p->dev_addr, pri_ifp->mac_addr, ETH_ALEN); 475 memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
490 p2p->dev_addr[0] |= 0x02; 476 if (local_admin)
491 memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN); 477 p2p->dev_addr[0] |= 0x02;
492 478
493 /* Generate the P2P Interface Address. If the discovery and connection 479 /* Generate the P2P Interface Address. If the discovery and connection
494 * BSSCFGs need to simultaneously co-exist, then this address must be 480 * BSSCFGs need to simultaneously co-exist, then this address must be
495 * different from the P2P Device Address, but also locally administered. 481 * different from the P2P Device Address, but also locally administered.
496 */ 482 */
497 memcpy(p2p->int_addr, p2p->dev_addr, ETH_ALEN); 483 memcpy(p2p->int_addr, p2p->dev_addr, ETH_ALEN);
484 p2p->int_addr[0] |= 0x02;
498 p2p->int_addr[4] ^= 0x80; 485 p2p->int_addr[4] ^= 0x80;
499} 486}
500 487
@@ -773,7 +760,7 @@ exit:
773 * validates the channels in the request. 760 * validates the channels in the request.
774 */ 761 */
775static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg, 762static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
776 struct net_device *ndev, 763 struct brcmf_if *ifp,
777 struct cfg80211_scan_request *request, 764 struct cfg80211_scan_request *request,
778 u16 action) 765 u16 action)
779{ 766{
@@ -827,7 +814,8 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
827 IEEE80211_CHAN_PASSIVE_SCAN)) 814 IEEE80211_CHAN_PASSIVE_SCAN))
828 continue; 815 continue;
829 816
830 chanspecs[i] = channel_to_chanspec(chan); 817 chanspecs[i] = channel_to_chanspec(&p2p->cfg->d11inf,
818 chan);
831 brcmf_dbg(INFO, "%d: chan=%d, channel spec=%x\n", 819 brcmf_dbg(INFO, "%d: chan=%d, channel spec=%x\n",
832 num_nodfs, chan->hw_value, chanspecs[i]); 820 num_nodfs, chan->hw_value, chanspecs[i]);
833 num_nodfs++; 821 num_nodfs++;
@@ -935,8 +923,8 @@ static s32
935brcmf_p2p_discover_listen(struct brcmf_p2p_info *p2p, u16 channel, u32 duration) 923brcmf_p2p_discover_listen(struct brcmf_p2p_info *p2p, u16 channel, u32 duration)
936{ 924{
937 struct brcmf_cfg80211_vif *vif; 925 struct brcmf_cfg80211_vif *vif;
926 struct brcmu_chan ch;
938 s32 err = 0; 927 s32 err = 0;
939 u16 chanspec;
940 928
941 vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; 929 vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
942 if (!vif) { 930 if (!vif) {
@@ -951,9 +939,11 @@ brcmf_p2p_discover_listen(struct brcmf_p2p_info *p2p, u16 channel, u32 duration)
951 goto exit; 939 goto exit;
952 } 940 }
953 941
954 chanspec = brcmf_p2p_chnr_to_chspec(channel); 942 ch.chnum = channel;
943 ch.bw = BRCMU_CHAN_BW_20;
944 p2p->cfg->d11inf.encchspec(&ch);
955 err = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_LISTEN, 945 err = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_LISTEN,
956 chanspec, (u16)duration); 946 ch.chspec, (u16)duration);
957 if (!err) { 947 if (!err) {
958 set_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status); 948 set_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status);
959 p2p->remain_on_channel_cookie++; 949 p2p->remain_on_channel_cookie++;
@@ -1065,6 +1055,7 @@ static s32 brcmf_p2p_act_frm_search(struct brcmf_p2p_info *p2p, u16 channel)
1065 u32 channel_cnt; 1055 u32 channel_cnt;
1066 u16 *default_chan_list; 1056 u16 *default_chan_list;
1067 u32 i; 1057 u32 i;
1058 struct brcmu_chan ch;
1068 1059
1069 brcmf_dbg(TRACE, "Enter\n"); 1060 brcmf_dbg(TRACE, "Enter\n");
1070 1061
@@ -1079,15 +1070,23 @@ static s32 brcmf_p2p_act_frm_search(struct brcmf_p2p_info *p2p, u16 channel)
1079 err = -ENOMEM; 1070 err = -ENOMEM;
1080 goto exit; 1071 goto exit;
1081 } 1072 }
1073 ch.bw = BRCMU_CHAN_BW_20;
1082 if (channel) { 1074 if (channel) {
1075 ch.chnum = channel;
1076 p2p->cfg->d11inf.encchspec(&ch);
1083 /* insert same channel to the chan_list */ 1077 /* insert same channel to the chan_list */
1084 for (i = 0; i < channel_cnt; i++) 1078 for (i = 0; i < channel_cnt; i++)
1085 default_chan_list[i] = 1079 default_chan_list[i] = ch.chspec;
1086 brcmf_p2p_chnr_to_chspec(channel);
1087 } else { 1080 } else {
1088 default_chan_list[0] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_1); 1081 ch.chnum = SOCIAL_CHAN_1;
1089 default_chan_list[1] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_2); 1082 p2p->cfg->d11inf.encchspec(&ch);
1090 default_chan_list[2] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_3); 1083 default_chan_list[0] = ch.chspec;
1084 ch.chnum = SOCIAL_CHAN_2;
1085 p2p->cfg->d11inf.encchspec(&ch);
1086 default_chan_list[1] = ch.chspec;
1087 ch.chnum = SOCIAL_CHAN_3;
1088 p2p->cfg->d11inf.encchspec(&ch);
1089 default_chan_list[2] = ch.chspec;
1091 } 1090 }
1092 err = brcmf_p2p_escan(p2p, channel_cnt, default_chan_list, 1091 err = brcmf_p2p_escan(p2p, channel_cnt, default_chan_list,
1093 WL_P2P_DISC_ST_SEARCH, WL_ESCAN_ACTION_START, 1092 WL_P2P_DISC_ST_SEARCH, WL_ESCAN_ACTION_START,
@@ -1217,6 +1216,7 @@ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
1217{ 1216{
1218 struct brcmf_p2p_info *p2p = &cfg->p2p; 1217 struct brcmf_p2p_info *p2p = &cfg->p2p;
1219 struct afx_hdl *afx_hdl = &p2p->afx_hdl; 1218 struct afx_hdl *afx_hdl = &p2p->afx_hdl;
1219 struct brcmu_chan ch;
1220 u8 *ie; 1220 u8 *ie;
1221 s32 err; 1221 s32 err;
1222 u8 p2p_dev_addr[ETH_ALEN]; 1222 u8 p2p_dev_addr[ETH_ALEN];
@@ -1242,8 +1242,12 @@ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
1242 p2p_dev_addr, sizeof(p2p_dev_addr)); 1242 p2p_dev_addr, sizeof(p2p_dev_addr));
1243 if ((err >= 0) && 1243 if ((err >= 0) &&
1244 (!memcmp(p2p_dev_addr, afx_hdl->tx_dst_addr, ETH_ALEN))) { 1244 (!memcmp(p2p_dev_addr, afx_hdl->tx_dst_addr, ETH_ALEN))) {
1245 afx_hdl->peer_chan = bi->ctl_ch ? bi->ctl_ch : 1245 if (!bi->ctl_ch) {
1246 CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec)); 1246 ch.chspec = le16_to_cpu(bi->chanspec);
1247 cfg->d11inf.decchspec(&ch);
1248 bi->ctl_ch = ch.chnum;
1249 }
1250 afx_hdl->peer_chan = bi->ctl_ch;
1247 brcmf_dbg(TRACE, "ACTION FRAME SCAN : Peer %pM found, channel : %d\n", 1251 brcmf_dbg(TRACE, "ACTION FRAME SCAN : Peer %pM found, channel : %d\n",
1248 afx_hdl->tx_dst_addr, afx_hdl->peer_chan); 1252 afx_hdl->tx_dst_addr, afx_hdl->peer_chan);
1249 complete(&afx_hdl->act_frm_scan); 1253 complete(&afx_hdl->act_frm_scan);
@@ -1261,7 +1265,7 @@ static void
1261brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg) 1265brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
1262{ 1266{
1263 struct brcmf_p2p_info *p2p = &cfg->p2p; 1267 struct brcmf_p2p_info *p2p = &cfg->p2p;
1264 struct net_device *ndev = cfg->escan_info.ndev; 1268 struct brcmf_if *ifp = cfg->escan_info.ifp;
1265 1269
1266 if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) && 1270 if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) &&
1267 (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) || 1271 (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) ||
@@ -1271,12 +1275,12 @@ brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
1271 * So abort scan for off channel completion. 1275 * So abort scan for off channel completion.
1272 */ 1276 */
1273 if (p2p->af_sent_channel) 1277 if (p2p->af_sent_channel)
1274 brcmf_notify_escan_complete(cfg, ndev, true, true); 1278 brcmf_notify_escan_complete(cfg, ifp, true, true);
1275 } else if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN, 1279 } else if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
1276 &p2p->status)) { 1280 &p2p->status)) {
1277 brcmf_dbg(TRACE, "*** Wake UP ** abort listen for next af frame\n"); 1281 brcmf_dbg(TRACE, "*** Wake UP ** abort listen for next af frame\n");
1278 /* So abort scan to cancel listen */ 1282 /* So abort scan to cancel listen */
1279 brcmf_notify_escan_complete(cfg, ndev, true, true); 1283 brcmf_notify_escan_complete(cfg, ifp, true, true);
1280 } 1284 }
1281} 1285}
1282 1286
@@ -1350,12 +1354,14 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
1350 u8 *frame = (u8 *)(rxframe + 1); 1354 u8 *frame = (u8 *)(rxframe + 1);
1351 struct brcmf_p2p_pub_act_frame *act_frm; 1355 struct brcmf_p2p_pub_act_frame *act_frm;
1352 struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm; 1356 struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
1353 u16 chanspec = be16_to_cpu(rxframe->chanspec); 1357 struct brcmu_chan ch;
1354 struct ieee80211_mgmt *mgmt_frame; 1358 struct ieee80211_mgmt *mgmt_frame;
1355 s32 freq; 1359 s32 freq;
1356 u16 mgmt_type; 1360 u16 mgmt_type;
1357 u8 action; 1361 u8 action;
1358 1362
1363 ch.chspec = be16_to_cpu(rxframe->chanspec);
1364 cfg->d11inf.decchspec(&ch);
1359 /* Check if wpa_supplicant has registered for this frame */ 1365 /* Check if wpa_supplicant has registered for this frame */
1360 brcmf_dbg(INFO, "ifp->vif->mgmt_rx_reg %04x\n", ifp->vif->mgmt_rx_reg); 1366 brcmf_dbg(INFO, "ifp->vif->mgmt_rx_reg %04x\n", ifp->vif->mgmt_rx_reg);
1361 mgmt_type = (IEEE80211_STYPE_ACTION & IEEE80211_FCTL_STYPE) >> 4; 1367 mgmt_type = (IEEE80211_STYPE_ACTION & IEEE80211_FCTL_STYPE) >> 4;
@@ -1374,7 +1380,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
1374 &p2p->status) && 1380 &p2p->status) &&
1375 (memcmp(afx_hdl->tx_dst_addr, e->addr, 1381 (memcmp(afx_hdl->tx_dst_addr, e->addr,
1376 ETH_ALEN) == 0)) { 1382 ETH_ALEN) == 0)) {
1377 afx_hdl->peer_chan = CHSPEC_CHANNEL(chanspec); 1383 afx_hdl->peer_chan = ch.chnum;
1378 brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n", 1384 brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n",
1379 afx_hdl->peer_chan); 1385 afx_hdl->peer_chan);
1380 complete(&afx_hdl->act_frm_scan); 1386 complete(&afx_hdl->act_frm_scan);
@@ -1384,7 +1390,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
1384 /* After complete GO Negotiation, roll back to mpc mode */ 1390 /* After complete GO Negotiation, roll back to mpc mode */
1385 if ((action == P2P_PAF_GON_CONF) || 1391 if ((action == P2P_PAF_GON_CONF) ||
1386 (action == P2P_PAF_PROVDIS_RSP)) 1392 (action == P2P_PAF_PROVDIS_RSP))
1387 brcmf_set_mpc(ifp->ndev, 1); 1393 brcmf_set_mpc(ifp, 1);
1388 if (action == P2P_PAF_GON_CONF) { 1394 if (action == P2P_PAF_GON_CONF) {
1389 brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n"); 1395 brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n");
1390 clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status); 1396 clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
@@ -1417,11 +1423,12 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
1417 memcpy(&mgmt_frame->u, frame, mgmt_frame_len); 1423 memcpy(&mgmt_frame->u, frame, mgmt_frame_len);
1418 mgmt_frame_len += offsetof(struct ieee80211_mgmt, u); 1424 mgmt_frame_len += offsetof(struct ieee80211_mgmt, u);
1419 1425
1420 freq = ieee80211_channel_to_frequency(CHSPEC_CHANNEL(chanspec), 1426 freq = ieee80211_channel_to_frequency(ch.chnum,
1421 CHSPEC_IS2G(chanspec) ? 1427 ch.band == BRCMU_CHAN_BAND_2G ?
1422 IEEE80211_BAND_2GHZ : 1428 IEEE80211_BAND_2GHZ :
1423 IEEE80211_BAND_5GHZ); 1429 IEEE80211_BAND_5GHZ);
1424 wdev = ifp->ndev->ieee80211_ptr; 1430
1431 wdev = &ifp->vif->wdev;
1425 cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 1432 cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len,
1426 GFP_ATOMIC); 1433 GFP_ATOMIC);
1427 1434
@@ -1637,6 +1644,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
1637 struct brcmf_fil_af_params_le *af_params) 1644 struct brcmf_fil_af_params_le *af_params)
1638{ 1645{
1639 struct brcmf_p2p_info *p2p = &cfg->p2p; 1646 struct brcmf_p2p_info *p2p = &cfg->p2p;
1647 struct brcmf_if *ifp = netdev_priv(ndev);
1640 struct brcmf_fil_action_frame_le *action_frame; 1648 struct brcmf_fil_action_frame_le *action_frame;
1641 struct brcmf_config_af_params config_af_params; 1649 struct brcmf_config_af_params config_af_params;
1642 struct afx_hdl *afx_hdl = &p2p->afx_hdl; 1650 struct afx_hdl *afx_hdl = &p2p->afx_hdl;
@@ -1725,7 +1733,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
1725 1733
1726 /* To make sure to send successfully action frame, turn off mpc */ 1734 /* To make sure to send successfully action frame, turn off mpc */
1727 if (config_af_params.mpc_onoff == 0) 1735 if (config_af_params.mpc_onoff == 0)
1728 brcmf_set_mpc(ndev, 0); 1736 brcmf_set_mpc(ifp, 0);
1729 1737
1730 /* set status and destination address before sending af */ 1738 /* set status and destination address before sending af */
1731 if (p2p->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) { 1739 if (p2p->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
@@ -1753,7 +1761,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
1753 * care of current piggback algo, lets abort the scan here 1761 * care of current piggback algo, lets abort the scan here
1754 * itself. 1762 * itself.
1755 */ 1763 */
1756 brcmf_notify_escan_complete(cfg, ndev, true, true); 1764 brcmf_notify_escan_complete(cfg, ifp, true, true);
1757 1765
1758 /* update channel */ 1766 /* update channel */
1759 af_params->channel = cpu_to_le32(afx_hdl->peer_chan); 1767 af_params->channel = cpu_to_le32(afx_hdl->peer_chan);
@@ -1820,7 +1828,7 @@ exit:
1820 clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status); 1828 clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status);
1821 /* if all done, turn mpc on again */ 1829 /* if all done, turn mpc on again */
1822 if (config_af_params.mpc_onoff == 1) 1830 if (config_af_params.mpc_onoff == 1)
1823 brcmf_set_mpc(ndev, 1); 1831 brcmf_set_mpc(ifp, 1);
1824 1832
1825 return ack; 1833 return ack;
1826} 1834}
@@ -1839,10 +1847,10 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
1839 struct brcmf_cfg80211_info *cfg = ifp->drvr->config; 1847 struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
1840 struct brcmf_p2p_info *p2p = &cfg->p2p; 1848 struct brcmf_p2p_info *p2p = &cfg->p2p;
1841 struct afx_hdl *afx_hdl = &p2p->afx_hdl; 1849 struct afx_hdl *afx_hdl = &p2p->afx_hdl;
1842 struct wireless_dev *wdev;
1843 struct brcmf_cfg80211_vif *vif = ifp->vif; 1850 struct brcmf_cfg80211_vif *vif = ifp->vif;
1844 struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data; 1851 struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data;
1845 u16 chanspec = be16_to_cpu(rxframe->chanspec); 1852 u16 chanspec = be16_to_cpu(rxframe->chanspec);
1853 struct brcmu_chan ch;
1846 u8 *mgmt_frame; 1854 u8 *mgmt_frame;
1847 u32 mgmt_frame_len; 1855 u32 mgmt_frame_len;
1848 s32 freq; 1856 s32 freq;
@@ -1851,9 +1859,12 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
1851 brcmf_dbg(INFO, "Enter: event %d reason %d\n", e->event_code, 1859 brcmf_dbg(INFO, "Enter: event %d reason %d\n", e->event_code,
1852 e->reason); 1860 e->reason);
1853 1861
1862 ch.chspec = be16_to_cpu(rxframe->chanspec);
1863 cfg->d11inf.decchspec(&ch);
1864
1854 if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) && 1865 if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) &&
1855 (memcmp(afx_hdl->tx_dst_addr, e->addr, ETH_ALEN) == 0)) { 1866 (memcmp(afx_hdl->tx_dst_addr, e->addr, ETH_ALEN) == 0)) {
1856 afx_hdl->peer_chan = CHSPEC_CHANNEL(chanspec); 1867 afx_hdl->peer_chan = ch.chnum;
1857 brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n", 1868 brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n",
1858 afx_hdl->peer_chan); 1869 afx_hdl->peer_chan);
1859 complete(&afx_hdl->act_frm_scan); 1870 complete(&afx_hdl->act_frm_scan);
@@ -1878,12 +1889,13 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
1878 1889
1879 mgmt_frame = (u8 *)(rxframe + 1); 1890 mgmt_frame = (u8 *)(rxframe + 1);
1880 mgmt_frame_len = e->datalen - sizeof(*rxframe); 1891 mgmt_frame_len = e->datalen - sizeof(*rxframe);
1881 freq = ieee80211_channel_to_frequency(CHSPEC_CHANNEL(chanspec), 1892 freq = ieee80211_channel_to_frequency(ch.chnum,
1882 CHSPEC_IS2G(chanspec) ? 1893 ch.band == BRCMU_CHAN_BAND_2G ?
1883 IEEE80211_BAND_2GHZ : 1894 IEEE80211_BAND_2GHZ :
1884 IEEE80211_BAND_5GHZ); 1895 IEEE80211_BAND_5GHZ);
1885 wdev = ifp->ndev->ieee80211_ptr; 1896
1886 cfg80211_rx_mgmt(wdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC); 1897 cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len,
1898 GFP_ATOMIC);
1887 1899
1888 brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n", 1900 brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n",
1889 mgmt_frame_len, e->datalen, chanspec, freq); 1901 mgmt_frame_len, e->datalen, chanspec, freq);
@@ -1934,7 +1946,8 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg)
1934 1946
1935 p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif; 1947 p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif;
1936 1948
1937 brcmf_p2p_generate_bss_mac(p2p); 1949 brcmf_p2p_generate_bss_mac(p2p, NULL);
1950 memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN);
1938 brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr); 1951 brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
1939 1952
1940 /* Initialize P2P Discovery in the firmware */ 1953 /* Initialize P2P Discovery in the firmware */
@@ -2001,21 +2014,19 @@ static void brcmf_p2p_get_current_chanspec(struct brcmf_p2p_info *p2p,
2001{ 2014{
2002 struct brcmf_if *ifp; 2015 struct brcmf_if *ifp;
2003 struct brcmf_fil_chan_info_le ci; 2016 struct brcmf_fil_chan_info_le ci;
2017 struct brcmu_chan ch;
2004 s32 err; 2018 s32 err;
2005 2019
2006 ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; 2020 ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
2007 2021
2008 *chanspec = 11 & WL_CHANSPEC_CHAN_MASK; 2022 ch.chnum = 11;
2009 2023
2010 err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CHANNEL, &ci, sizeof(ci)); 2024 err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CHANNEL, &ci, sizeof(ci));
2011 if (!err) { 2025 if (!err)
2012 *chanspec = le32_to_cpu(ci.hw_channel) & WL_CHANSPEC_CHAN_MASK; 2026 ch.chnum = le32_to_cpu(ci.hw_channel);
2013 if (*chanspec < CH_MAX_2G_CHANNEL) 2027 ch.bw = BRCMU_CHAN_BW_20;
2014 *chanspec |= WL_CHANSPEC_BAND_2G; 2028 p2p->cfg->d11inf.encchspec(&ch);
2015 else 2029 *chanspec = ch.chspec;
2016 *chanspec |= WL_CHANSPEC_BAND_5G;
2017 }
2018 *chanspec |= WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
2019} 2030}
2020 2031
2021/** 2032/**
@@ -2040,13 +2051,13 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
2040 brcmf_err("vif for P2PAPI_BSSCFG_PRIMARY does not exist\n"); 2051 brcmf_err("vif for P2PAPI_BSSCFG_PRIMARY does not exist\n");
2041 return -EPERM; 2052 return -EPERM;
2042 } 2053 }
2043 brcmf_notify_escan_complete(cfg, vif->ifp->ndev, true, true); 2054 brcmf_notify_escan_complete(cfg, vif->ifp, true, true);
2044 vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif; 2055 vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
2045 if (!vif) { 2056 if (!vif) {
2046 brcmf_err("vif for P2PAPI_BSSCFG_CONNECTION does not exist\n"); 2057 brcmf_err("vif for P2PAPI_BSSCFG_CONNECTION does not exist\n");
2047 return -EPERM; 2058 return -EPERM;
2048 } 2059 }
2049 brcmf_set_mpc(vif->ifp->ndev, 0); 2060 brcmf_set_mpc(vif->ifp, 0);
2050 2061
2051 /* In concurrency case, STA may be already associated in a particular */ 2062 /* In concurrency case, STA may be already associated in a particular */
2052 /* channel. so retrieve the current channel of primary interface and */ 2063 /* channel. so retrieve the current channel of primary interface and */
@@ -2124,13 +2135,105 @@ static int brcmf_p2p_release_p2p_if(struct brcmf_cfg80211_vif *vif)
2124} 2135}
2125 2136
2126/** 2137/**
2138 * brcmf_p2p_create_p2pdev() - create a P2P_DEVICE virtual interface.
2139 *
2140 * @p2p: P2P specific data.
2141 * @wiphy: wiphy device of new interface.
2142 * @addr: mac address for this new interface.
2143 */
2144static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
2145 struct wiphy *wiphy,
2146 u8 *addr)
2147{
2148 struct brcmf_cfg80211_vif *p2p_vif;
2149 struct brcmf_if *p2p_ifp;
2150 struct brcmf_if *pri_ifp;
2151 int err;
2152 u32 bssidx;
2153
2154 if (p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
2155 return ERR_PTR(-ENOSPC);
2156
2157 p2p_vif = brcmf_alloc_vif(p2p->cfg, NL80211_IFTYPE_P2P_DEVICE,
2158 false);
2159 if (IS_ERR(p2p_vif)) {
2160 brcmf_err("could not create discovery vif\n");
2161 return (struct wireless_dev *)p2p_vif;
2162 }
2163
2164 pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
2165 brcmf_p2p_generate_bss_mac(p2p, addr);
2166 brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
2167
2168 brcmf_cfg80211_arm_vif_event(p2p->cfg, p2p_vif);
2169
2170 /* Initialize P2P Discovery in the firmware */
2171 err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
2172 if (err < 0) {
2173 brcmf_err("set p2p_disc error\n");
2174 brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL);
2175 goto fail;
2176 }
2177
2178 /* wait for firmware event */
2179 err = brcmf_cfg80211_wait_vif_event_timeout(p2p->cfg, BRCMF_E_IF_ADD,
2180 msecs_to_jiffies(1500));
2181 brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL);
2182 if (!err) {
2183 brcmf_err("timeout occurred\n");
2184 err = -EIO;
2185 goto fail;
2186 }
2187
2188 /* discovery interface created */
2189 p2p_ifp = p2p_vif->ifp;
2190 p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif;
2191 memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN);
2192 memcpy(&p2p_vif->wdev.address, p2p->dev_addr, sizeof(p2p->dev_addr));
2193
2194 /* verify bsscfg index for P2P discovery */
2195 err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
2196 if (err < 0) {
2197 brcmf_err("retrieving discover bsscfg index failed\n");
2198 goto fail;
2199 }
2200
2201 WARN_ON(p2p_ifp->bssidx != bssidx);
2202
2203 init_completion(&p2p->send_af_done);
2204 INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
2205 init_completion(&p2p->afx_hdl.act_frm_scan);
2206 init_completion(&p2p->wait_next_af);
2207
2208 return &p2p_vif->wdev;
2209
2210fail:
2211 brcmf_free_vif(p2p_vif);
2212 return ERR_PTR(err);
2213}
2214
2215/**
2216 * brcmf_p2p_delete_p2pdev() - delete P2P_DEVICE virtual interface.
2217 *
2218 * @vif: virtual interface object to delete.
2219 */
2220static void brcmf_p2p_delete_p2pdev(struct brcmf_cfg80211_vif *vif)
2221{
2222 struct brcmf_p2p_info *p2p = &vif->ifp->drvr->config->p2p;
2223
2224 cfg80211_unregister_wdev(&vif->wdev);
2225 p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
2226 brcmf_free_vif(vif);
2227}
2228
2229/**
2127 * brcmf_p2p_add_vif() - create a new P2P virtual interface. 2230 * brcmf_p2p_add_vif() - create a new P2P virtual interface.
2128 * 2231 *
2129 * @wiphy: wiphy device of new interface. 2232 * @wiphy: wiphy device of new interface.
2130 * @name: name of the new interface. 2233 * @name: name of the new interface.
2131 * @type: nl80211 interface type. 2234 * @type: nl80211 interface type.
2132 * @flags: TBD 2235 * @flags: not used.
2133 * @params: TBD 2236 * @params: contains mac address for P2P device.
2134 */ 2237 */
2135struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, 2238struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
2136 enum nl80211_iftype type, u32 *flags, 2239 enum nl80211_iftype type, u32 *flags,
@@ -2157,6 +2260,9 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
2157 iftype = BRCMF_FIL_P2P_IF_GO; 2260 iftype = BRCMF_FIL_P2P_IF_GO;
2158 mode = WL_MODE_AP; 2261 mode = WL_MODE_AP;
2159 break; 2262 break;
2263 case NL80211_IFTYPE_P2P_DEVICE:
2264 return brcmf_p2p_create_p2pdev(&cfg->p2p, wiphy,
2265 params->macaddr);
2160 default: 2266 default:
2161 return ERR_PTR(-EOPNOTSUPP); 2267 return ERR_PTR(-EOPNOTSUPP);
2162 } 2268 }
@@ -2244,6 +2350,8 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
2244 break; 2350 break;
2245 2351
2246 case NL80211_IFTYPE_P2P_DEVICE: 2352 case NL80211_IFTYPE_P2P_DEVICE:
2353 brcmf_p2p_delete_p2pdev(vif);
2354 return 0;
2247 default: 2355 default:
2248 return -ENOTSUPP; 2356 return -ENOTSUPP;
2249 break; 2357 break;
@@ -2275,3 +2383,33 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
2275 2383
2276 return err; 2384 return err;
2277} 2385}
2386
2387int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev)
2388{
2389 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2390 struct brcmf_p2p_info *p2p = &cfg->p2p;
2391 struct brcmf_cfg80211_vif *vif;
2392 int err;
2393
2394 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
2395 mutex_lock(&cfg->usr_sync);
2396 err = brcmf_p2p_enable_discovery(p2p);
2397 if (!err)
2398 set_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state);
2399 mutex_unlock(&cfg->usr_sync);
2400 return err;
2401}
2402
2403void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev)
2404{
2405 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2406 struct brcmf_p2p_info *p2p = &cfg->p2p;
2407 struct brcmf_cfg80211_vif *vif;
2408
2409 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
2410 mutex_lock(&cfg->usr_sync);
2411 (void)brcmf_p2p_deinit_discovery(p2p);
2412 brcmf_abort_scanning(cfg);
2413 clear_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state);
2414 mutex_unlock(&cfg->usr_sync);
2415}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index 14be2d5530ce..ca72177388b9 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -40,6 +40,15 @@
40#define BCM4329_CORE_ARM_BASE 0x18002000 40#define BCM4329_CORE_ARM_BASE 0x18002000
41#define BCM4329_RAMSIZE 0x48000 41#define BCM4329_RAMSIZE 0x48000
42 42
43/* bcm43143 */
44/* SDIO device core */
45#define BCM43143_CORE_BUS_BASE 0x18002000
46/* internal memory core */
47#define BCM43143_CORE_SOCRAM_BASE 0x18004000
48/* ARM Cortex M3 core, ID 0x82a */
49#define BCM43143_CORE_ARM_BASE 0x18003000
50#define BCM43143_RAMSIZE 0x70000
51
43#define SBCOREREV(sbidh) \ 52#define SBCOREREV(sbidh) \
44 ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \ 53 ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
45 ((sbidh) & SSB_IDHIGH_RCLO)) 54 ((sbidh) & SSB_IDHIGH_RCLO))
@@ -52,6 +61,9 @@
52#define CIB_REV_MASK 0xff000000 61#define CIB_REV_MASK 0xff000000
53#define CIB_REV_SHIFT 24 62#define CIB_REV_SHIFT 24
54 63
64/* ARM CR4 core specific control flag bits */
65#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
66
55#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu)) 67#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
56/* SDIO Pad drive strength to select value mappings */ 68/* SDIO Pad drive strength to select value mappings */
57struct sdiod_drive_str { 69struct sdiod_drive_str {
@@ -70,6 +82,14 @@ static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
70 {0, 0x1} 82 {0, 0x1}
71}; 83};
72 84
85/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
86static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
87 {16, 0x7},
88 {12, 0x5},
89 {8, 0x3},
90 {4, 0x1}
91};
92
73u8 93u8
74brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid) 94brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
75{ 95{
@@ -149,7 +169,7 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
149 169
150static void 170static void
151brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev, 171brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
152 struct chip_info *ci, u16 coreid) 172 struct chip_info *ci, u16 coreid, u32 core_bits)
153{ 173{
154 u32 regdata, base; 174 u32 regdata, base;
155 u8 idx; 175 u8 idx;
@@ -235,7 +255,7 @@ brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
235 255
236static void 256static void
237brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev, 257brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
238 struct chip_info *ci, u16 coreid) 258 struct chip_info *ci, u16 coreid, u32 core_bits)
239{ 259{
240 u8 idx; 260 u8 idx;
241 u32 regdata; 261 u32 regdata;
@@ -249,19 +269,36 @@ brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
249 if ((regdata & BCMA_RESET_CTL_RESET) != 0) 269 if ((regdata & BCMA_RESET_CTL_RESET) != 0)
250 return; 270 return;
251 271
252 brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, 0, NULL); 272 /* ensure no pending backplane operation
253 regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, 273 * 300uc should be sufficient for backplane ops to be finish
274 * extra 10ms is taken into account for firmware load stage
275 * after 10300us carry on disabling the core anyway
276 */
277 SPINWAIT(brcmf_sdio_regrl(sdiodev,
278 ci->c_inf[idx].wrapbase+BCMA_RESET_ST,
279 NULL), 10300);
280 regdata = brcmf_sdio_regrl(sdiodev,
281 ci->c_inf[idx].wrapbase+BCMA_RESET_ST,
254 NULL); 282 NULL);
255 udelay(10); 283 if (regdata)
284 brcmf_err("disabling core 0x%x with reset status %x\n",
285 coreid, regdata);
256 286
257 brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL, 287 brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
258 BCMA_RESET_CTL_RESET, NULL); 288 BCMA_RESET_CTL_RESET, NULL);
259 udelay(1); 289 udelay(1);
290
291 brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
292 core_bits, NULL);
293 regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
294 NULL);
295 usleep_range(10, 20);
296
260} 297}
261 298
262static void 299static void
263brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev, 300brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
264 struct chip_info *ci, u16 coreid) 301 struct chip_info *ci, u16 coreid, u32 core_bits)
265{ 302{
266 u32 regdata; 303 u32 regdata;
267 u8 idx; 304 u8 idx;
@@ -272,7 +309,7 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
272 * Must do the disable sequence first to work for 309 * Must do the disable sequence first to work for
273 * arbitrary current core state. 310 * arbitrary current core state.
274 */ 311 */
275 brcmf_sdio_sb_coredisable(sdiodev, ci, coreid); 312 brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, 0);
276 313
277 /* 314 /*
278 * Now do the initialization sequence. 315 * Now do the initialization sequence.
@@ -325,7 +362,7 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
325 362
326static void 363static void
327brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev, 364brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
328 struct chip_info *ci, u16 coreid) 365 struct chip_info *ci, u16 coreid, u32 core_bits)
329{ 366{
330 u8 idx; 367 u8 idx;
331 u32 regdata; 368 u32 regdata;
@@ -333,31 +370,69 @@ brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
333 idx = brcmf_sdio_chip_getinfidx(ci, coreid); 370 idx = brcmf_sdio_chip_getinfidx(ci, coreid);
334 371
335 /* must disable first to work for arbitrary current core state */ 372 /* must disable first to work for arbitrary current core state */
336 brcmf_sdio_ai_coredisable(sdiodev, ci, coreid); 373 brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, core_bits);
337 374
338 /* now do initialization sequence */ 375 /* now do initialization sequence */
339 brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, 376 brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
340 BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL); 377 core_bits | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
341 regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, 378 regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
342 NULL); 379 NULL);
343 brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL, 380 brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
344 0, NULL); 381 0, NULL);
382 regdata = brcmf_sdio_regrl(sdiodev,
383 ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
384 NULL);
345 udelay(1); 385 udelay(1);
346 386
347 brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, 387 brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
348 BCMA_IOCTL_CLK, NULL); 388 core_bits | BCMA_IOCTL_CLK, NULL);
349 regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, 389 regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
350 NULL); 390 NULL);
351 udelay(1); 391 udelay(1);
352} 392}
353 393
394#ifdef DEBUG
395/* safety check for chipinfo */
396static int brcmf_sdio_chip_cichk(struct chip_info *ci)
397{
398 u8 core_idx;
399
400 /* check RAM core presence for ARM CM3 core */
401 core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
402 if (BRCMF_MAX_CORENUM != core_idx) {
403 core_idx = brcmf_sdio_chip_getinfidx(ci,
404 BCMA_CORE_INTERNAL_MEM);
405 if (BRCMF_MAX_CORENUM == core_idx) {
406 brcmf_err("RAM core not provided with ARM CM3 core\n");
407 return -ENODEV;
408 }
409 }
410
411 /* check RAM base for ARM CR4 core */
412 core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
413 if (BRCMF_MAX_CORENUM != core_idx) {
414 if (ci->rambase == 0) {
415 brcmf_err("RAM base not provided with ARM CR4 core\n");
416 return -ENOMEM;
417 }
418 }
419
420 return 0;
421}
422#else /* DEBUG */
423static inline int brcmf_sdio_chip_cichk(struct chip_info *ci)
424{
425 return 0;
426}
427#endif
428
354static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev, 429static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
355 struct chip_info *ci, u32 regs) 430 struct chip_info *ci, u32 regs)
356{ 431{
357 u32 regdata; 432 u32 regdata;
433 int ret;
358 434
359 /* 435 /* Get CC core rev
360 * Get CC core rev
361 * Chipid is assume to be at offset 0 from regs arg 436 * Chipid is assume to be at offset 0 from regs arg
362 * For different chiptypes or old sdio hosts w/o chipcommon, 437 * For different chiptypes or old sdio hosts w/o chipcommon,
363 * other ways of recognition should be added here. 438 * other ways of recognition should be added here.
@@ -375,6 +450,23 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
375 450
376 /* Address of cores for new chips should be added here */ 451 /* Address of cores for new chips should be added here */
377 switch (ci->chip) { 452 switch (ci->chip) {
453 case BCM43143_CHIP_ID:
454 ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
455 ci->c_inf[0].cib = 0x2b000000;
456 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
457 ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
458 ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
459 ci->c_inf[1].cib = 0x18000000;
460 ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
461 ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
462 ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
463 ci->c_inf[2].cib = 0x14000000;
464 ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
465 ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
466 ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
467 ci->c_inf[3].cib = 0x07000000;
468 ci->ramsize = BCM43143_RAMSIZE;
469 break;
378 case BCM43241_CHIP_ID: 470 case BCM43241_CHIP_ID:
379 ci->c_inf[0].wrapbase = 0x18100000; 471 ci->c_inf[0].wrapbase = 0x18100000;
380 ci->c_inf[0].cib = 0x2a084411; 472 ci->c_inf[0].cib = 0x2a084411;
@@ -435,11 +527,29 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
435 ci->c_inf[3].cib = 0x07004211; 527 ci->c_inf[3].cib = 0x07004211;
436 ci->ramsize = 0x80000; 528 ci->ramsize = 0x80000;
437 break; 529 break;
530 case BCM4335_CHIP_ID:
531 ci->c_inf[0].wrapbase = 0x18100000;
532 ci->c_inf[0].cib = 0x2b084411;
533 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
534 ci->c_inf[1].base = 0x18005000;
535 ci->c_inf[1].wrapbase = 0x18105000;
536 ci->c_inf[1].cib = 0x0f004211;
537 ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
538 ci->c_inf[2].base = 0x18002000;
539 ci->c_inf[2].wrapbase = 0x18102000;
540 ci->c_inf[2].cib = 0x01084411;
541 ci->ramsize = 0xc0000;
542 ci->rambase = 0x180000;
543 break;
438 default: 544 default:
439 brcmf_err("chipid 0x%x is not supported\n", ci->chip); 545 brcmf_err("chipid 0x%x is not supported\n", ci->chip);
440 return -ENODEV; 546 return -ENODEV;
441 } 547 }
442 548
549 ret = brcmf_sdio_chip_cichk(ci);
550 if (ret)
551 return ret;
552
443 switch (ci->socitype) { 553 switch (ci->socitype) {
444 case SOCI_SB: 554 case SOCI_SB:
445 ci->iscoreup = brcmf_sdio_sb_iscoreup; 555 ci->iscoreup = brcmf_sdio_sb_iscoreup;
@@ -539,7 +649,7 @@ brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
539 * Make sure any on-chip ARM is off (in case strapping is wrong), 649 * Make sure any on-chip ARM is off (in case strapping is wrong),
540 * or downloaded code was already running. 650 * or downloaded code was already running.
541 */ 651 */
542 ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3); 652 ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0);
543} 653}
544 654
545int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev, 655int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
@@ -600,21 +710,37 @@ void
600brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev, 710brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
601 struct chip_info *ci, u32 drivestrength) 711 struct chip_info *ci, u32 drivestrength)
602{ 712{
603 struct sdiod_drive_str *str_tab = NULL; 713 const struct sdiod_drive_str *str_tab = NULL;
604 u32 str_mask = 0; 714 u32 str_mask;
605 u32 str_shift = 0; 715 u32 str_shift;
606 char chn[8]; 716 char chn[8];
607 u32 base = ci->c_inf[0].base; 717 u32 base = ci->c_inf[0].base;
718 u32 i;
719 u32 drivestrength_sel = 0;
720 u32 cc_data_temp;
721 u32 addr;
608 722
609 if (!(ci->c_inf[0].caps & CC_CAP_PMU)) 723 if (!(ci->c_inf[0].caps & CC_CAP_PMU))
610 return; 724 return;
611 725
612 switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) { 726 switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
613 case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12): 727 case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
614 str_tab = (struct sdiod_drive_str *)&sdiod_drvstr_tab1_1v8; 728 str_tab = sdiod_drvstr_tab1_1v8;
615 str_mask = 0x00003800; 729 str_mask = 0x00003800;
616 str_shift = 11; 730 str_shift = 11;
617 break; 731 break;
732 case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
733 /* note: 43143 does not support tristate */
734 i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
735 if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
736 str_tab = sdiod_drvstr_tab2_3v3;
737 str_mask = 0x00000007;
738 str_shift = 0;
739 } else
740 brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
741 brcmf_sdio_chip_name(ci->chip, chn, 8),
742 drivestrength);
743 break;
618 default: 744 default:
619 brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n", 745 brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
620 brcmf_sdio_chip_name(ci->chip, chn, 8), 746 brcmf_sdio_chip_name(ci->chip, chn, 8),
@@ -623,30 +749,207 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
623 } 749 }
624 750
625 if (str_tab != NULL) { 751 if (str_tab != NULL) {
626 u32 drivestrength_sel = 0;
627 u32 cc_data_temp;
628 int i;
629
630 for (i = 0; str_tab[i].strength != 0; i++) { 752 for (i = 0; str_tab[i].strength != 0; i++) {
631 if (drivestrength >= str_tab[i].strength) { 753 if (drivestrength >= str_tab[i].strength) {
632 drivestrength_sel = str_tab[i].sel; 754 drivestrength_sel = str_tab[i].sel;
633 break; 755 break;
634 } 756 }
635 } 757 }
636 758 addr = CORE_CC_REG(base, chipcontrol_addr);
637 brcmf_sdio_regwl(sdiodev, CORE_CC_REG(base, chipcontrol_addr), 759 brcmf_sdio_regwl(sdiodev, addr, 1, NULL);
638 1, NULL); 760 cc_data_temp = brcmf_sdio_regrl(sdiodev, addr, NULL);
639 cc_data_temp =
640 brcmf_sdio_regrl(sdiodev,
641 CORE_CC_REG(base, chipcontrol_addr),
642 NULL);
643 cc_data_temp &= ~str_mask; 761 cc_data_temp &= ~str_mask;
644 drivestrength_sel <<= str_shift; 762 drivestrength_sel <<= str_shift;
645 cc_data_temp |= drivestrength_sel; 763 cc_data_temp |= drivestrength_sel;
646 brcmf_sdio_regwl(sdiodev, CORE_CC_REG(base, chipcontrol_addr), 764 brcmf_sdio_regwl(sdiodev, addr, cc_data_temp, NULL);
647 cc_data_temp, NULL);
648 765
649 brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n", 766 brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
650 drivestrength, cc_data_temp); 767 str_tab[i].strength, drivestrength, cc_data_temp);
651 } 768 }
652} 769}
770
771#ifdef DEBUG
772static bool
773brcmf_sdio_chip_verifynvram(struct brcmf_sdio_dev *sdiodev, u32 nvram_addr,
774 char *nvram_dat, uint nvram_sz)
775{
776 char *nvram_ularray;
777 int err;
778 bool ret = true;
779
780 /* read back and verify */
781 brcmf_dbg(INFO, "Compare NVRAM dl & ul; size=%d\n", nvram_sz);
782 nvram_ularray = kmalloc(nvram_sz, GFP_KERNEL);
783 /* do not proceed while no memory but */
784 if (!nvram_ularray)
785 return true;
786
787 /* Upload image to verify downloaded contents. */
788 memset(nvram_ularray, 0xaa, nvram_sz);
789
790 /* Read the vars list to temp buffer for comparison */
791 err = brcmf_sdio_ramrw(sdiodev, false, nvram_addr, nvram_ularray,
792 nvram_sz);
793 if (err) {
794 brcmf_err("error %d on reading %d nvram bytes at 0x%08x\n",
795 err, nvram_sz, nvram_addr);
796 } else if (memcmp(nvram_dat, nvram_ularray, nvram_sz)) {
797 brcmf_err("Downloaded NVRAM image is corrupted\n");
798 ret = false;
799 }
800 kfree(nvram_ularray);
801
802 return ret;
803}
804#else /* DEBUG */
805static inline bool
806brcmf_sdio_chip_verifynvram(struct brcmf_sdio_dev *sdiodev, u32 nvram_addr,
807 char *nvram_dat, uint nvram_sz)
808{
809 return true;
810}
811#endif /* DEBUG */
812
813static bool brcmf_sdio_chip_writenvram(struct brcmf_sdio_dev *sdiodev,
814 struct chip_info *ci,
815 char *nvram_dat, uint nvram_sz)
816{
817 int err;
818 u32 nvram_addr;
819 u32 token;
820 __le32 token_le;
821
822 nvram_addr = (ci->ramsize - 4) - nvram_sz + ci->rambase;
823
824 /* Write the vars list */
825 err = brcmf_sdio_ramrw(sdiodev, true, nvram_addr, nvram_dat, nvram_sz);
826 if (err) {
827 brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n",
828 err, nvram_sz, nvram_addr);
829 return false;
830 }
831
832 if (!brcmf_sdio_chip_verifynvram(sdiodev, nvram_addr,
833 nvram_dat, nvram_sz))
834 return false;
835
836 /* generate token:
837 * nvram size, converted to words, in lower 16-bits, checksum
838 * in upper 16-bits.
839 */
840 token = nvram_sz / 4;
841 token = (~token << 16) | (token & 0x0000FFFF);
842 token_le = cpu_to_le32(token);
843
844 brcmf_dbg(INFO, "RAM size: %d\n", ci->ramsize);
845 brcmf_dbg(INFO, "nvram is placed at %d, size %d, token=0x%08x\n",
846 nvram_addr, nvram_sz, token);
847
848 /* Write the length token to the last word */
849 if (brcmf_sdio_ramrw(sdiodev, true, (ci->ramsize - 4 + ci->rambase),
850 (u8 *)&token_le, 4))
851 return false;
852
853 return true;
854}
855
856static void
857brcmf_sdio_chip_cm3_enterdl(struct brcmf_sdio_dev *sdiodev,
858 struct chip_info *ci)
859{
860 u32 zeros = 0;
861
862 ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0);
863 ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0);
864
865 /* clear length token */
866 brcmf_sdio_ramrw(sdiodev, true, ci->ramsize - 4, (u8 *)&zeros, 4);
867}
868
869static bool
870brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
871 char *nvram_dat, uint nvram_sz)
872{
873 u8 core_idx;
874 u32 reg_addr;
875
876 if (!ci->iscoreup(sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
877 brcmf_err("SOCRAM core is down after reset?\n");
878 return false;
879 }
880
881 if (!brcmf_sdio_chip_writenvram(sdiodev, ci, nvram_dat, nvram_sz))
882 return false;
883
884 /* clear all interrupts */
885 core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
886 reg_addr = ci->c_inf[core_idx].base;
887 reg_addr += offsetof(struct sdpcmd_regs, intstatus);
888 brcmf_sdio_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
889
890 ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0);
891
892 return true;
893}
894
895static inline void
896brcmf_sdio_chip_cr4_enterdl(struct brcmf_sdio_dev *sdiodev,
897 struct chip_info *ci)
898{
899 ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4,
900 ARMCR4_BCMA_IOCTL_CPUHALT);
901}
902
903static bool
904brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
905 char *nvram_dat, uint nvram_sz)
906{
907 u8 core_idx;
908 u32 reg_addr;
909
910 if (!brcmf_sdio_chip_writenvram(sdiodev, ci, nvram_dat, nvram_sz))
911 return false;
912
913 /* clear all interrupts */
914 core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
915 reg_addr = ci->c_inf[core_idx].base;
916 reg_addr += offsetof(struct sdpcmd_regs, intstatus);
917 brcmf_sdio_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
918
919 /* Write reset vector to address 0 */
920 brcmf_sdio_ramrw(sdiodev, true, 0, (void *)&ci->rst_vec,
921 sizeof(ci->rst_vec));
922
923 /* restore ARM */
924 ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, 0);
925
926 return true;
927}
928
929void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
930 struct chip_info *ci)
931{
932 u8 arm_core_idx;
933
934 arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
935 if (BRCMF_MAX_CORENUM != arm_core_idx) {
936 brcmf_sdio_chip_cm3_enterdl(sdiodev, ci);
937 return;
938 }
939
940 brcmf_sdio_chip_cr4_enterdl(sdiodev, ci);
941}
942
943bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
944 struct chip_info *ci, char *nvram_dat,
945 uint nvram_sz)
946{
947 u8 arm_core_idx;
948
949 arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
950 if (BRCMF_MAX_CORENUM != arm_core_idx)
951 return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci, nvram_dat,
952 nvram_sz);
953
954 return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, nvram_dat, nvram_sz);
955}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
index ce974d76bd92..83c041f1bf4a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
@@ -73,15 +73,17 @@ struct chip_info {
73 u32 pmurev; 73 u32 pmurev;
74 u32 pmucaps; 74 u32 pmucaps;
75 u32 ramsize; 75 u32 ramsize;
76 u32 rambase;
77 u32 rst_vec; /* reset vertor for ARM CR4 core */
76 78
77 bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci, 79 bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
78 u16 coreid); 80 u16 coreid);
79 u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci, 81 u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
80 u16 coreid); 82 u16 coreid);
81 void (*coredisable)(struct brcmf_sdio_dev *sdiodev, 83 void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
82 struct chip_info *ci, u16 coreid); 84 struct chip_info *ci, u16 coreid, u32 core_bits);
83 void (*resetcore)(struct brcmf_sdio_dev *sdiodev, 85 void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
84 struct chip_info *ci, u16 coreid); 86 struct chip_info *ci, u16 coreid, u32 core_bits);
85}; 87};
86 88
87struct sbconfig { 89struct sbconfig {
@@ -124,6 +126,95 @@ struct sbconfig {
124 u32 sbidhigh; /* identification */ 126 u32 sbidhigh; /* identification */
125}; 127};
126 128
129/* sdio core registers */
130struct sdpcmd_regs {
131 u32 corecontrol; /* 0x00, rev8 */
132 u32 corestatus; /* rev8 */
133 u32 PAD[1];
134 u32 biststatus; /* rev8 */
135
136 /* PCMCIA access */
137 u16 pcmciamesportaladdr; /* 0x010, rev8 */
138 u16 PAD[1];
139 u16 pcmciamesportalmask; /* rev8 */
140 u16 PAD[1];
141 u16 pcmciawrframebc; /* rev8 */
142 u16 PAD[1];
143 u16 pcmciaunderflowtimer; /* rev8 */
144 u16 PAD[1];
145
146 /* interrupt */
147 u32 intstatus; /* 0x020, rev8 */
148 u32 hostintmask; /* rev8 */
149 u32 intmask; /* rev8 */
150 u32 sbintstatus; /* rev8 */
151 u32 sbintmask; /* rev8 */
152 u32 funcintmask; /* rev4 */
153 u32 PAD[2];
154 u32 tosbmailbox; /* 0x040, rev8 */
155 u32 tohostmailbox; /* rev8 */
156 u32 tosbmailboxdata; /* rev8 */
157 u32 tohostmailboxdata; /* rev8 */
158
159 /* synchronized access to registers in SDIO clock domain */
160 u32 sdioaccess; /* 0x050, rev8 */
161 u32 PAD[3];
162
163 /* PCMCIA frame control */
164 u8 pcmciaframectrl; /* 0x060, rev8 */
165 u8 PAD[3];
166 u8 pcmciawatermark; /* rev8 */
167 u8 PAD[155];
168
169 /* interrupt batching control */
170 u32 intrcvlazy; /* 0x100, rev8 */
171 u32 PAD[3];
172
173 /* counters */
174 u32 cmd52rd; /* 0x110, rev8 */
175 u32 cmd52wr; /* rev8 */
176 u32 cmd53rd; /* rev8 */
177 u32 cmd53wr; /* rev8 */
178 u32 abort; /* rev8 */
179 u32 datacrcerror; /* rev8 */
180 u32 rdoutofsync; /* rev8 */
181 u32 wroutofsync; /* rev8 */
182 u32 writebusy; /* rev8 */
183 u32 readwait; /* rev8 */
184 u32 readterm; /* rev8 */
185 u32 writeterm; /* rev8 */
186 u32 PAD[40];
187 u32 clockctlstatus; /* rev8 */
188 u32 PAD[7];
189
190 u32 PAD[128]; /* DMA engines */
191
192 /* SDIO/PCMCIA CIS region */
193 char cis[512]; /* 0x400-0x5ff, rev6 */
194
195 /* PCMCIA function control registers */
196 char pcmciafcr[256]; /* 0x600-6ff, rev6 */
197 u16 PAD[55];
198
199 /* PCMCIA backplane access */
200 u16 backplanecsr; /* 0x76E, rev6 */
201 u16 backplaneaddr0; /* rev6 */
202 u16 backplaneaddr1; /* rev6 */
203 u16 backplaneaddr2; /* rev6 */
204 u16 backplaneaddr3; /* rev6 */
205 u16 backplanedata0; /* rev6 */
206 u16 backplanedata1; /* rev6 */
207 u16 backplanedata2; /* rev6 */
208 u16 backplanedata3; /* rev6 */
209 u16 PAD[31];
210
211 /* sprom "size" & "blank" info */
212 u16 spromstatus; /* 0x7BE, rev2 */
213 u32 PAD[464];
214
215 u16 PAD[0x80];
216};
217
127extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev, 218extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
128 struct chip_info **ci_ptr, u32 regs); 219 struct chip_info **ci_ptr, u32 regs);
129extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr); 220extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
@@ -131,6 +222,10 @@ extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
131 struct chip_info *ci, 222 struct chip_info *ci,
132 u32 drivestrength); 223 u32 drivestrength);
133extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid); 224extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
134 225extern void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
226 struct chip_info *ci);
227extern bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
228 struct chip_info *ci, char *nvram_dat,
229 uint nvram_sz);
135 230
136#endif /* _BRCMFMAC_SDIO_CHIP_H_ */ 231#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 0d30afd8c672..7c1b6332747e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -48,7 +48,13 @@
48#define SBSDIO_NUM_FUNCTION 3 48#define SBSDIO_NUM_FUNCTION 3
49 49
50/* function 0 vendor specific CCCR registers */ 50/* function 0 vendor specific CCCR registers */
51#define SDIO_CCCR_BRCM_SEPINT 0xf2 51#define SDIO_CCCR_BRCM_CARDCAP 0xf0
52#define SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT 0x02
53#define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT 0x04
54#define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC 0x08
55#define SDIO_CCCR_BRCM_CARDCTRL 0xf1
56#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET 0x02
57#define SDIO_CCCR_BRCM_SEPINT 0xf2
52 58
53#define SDIO_SEPINT_MASK 0x01 59#define SDIO_SEPINT_MASK 0x01
54#define SDIO_SEPINT_OE 0x02 60#define SDIO_SEPINT_OE 0x02
@@ -97,9 +103,23 @@
97#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B 103#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B
98/* Read Frame Byte Count High */ 104/* Read Frame Byte Count High */
99#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C 105#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C
106/* MesBusyCtl (rev 11) */
107#define SBSDIO_FUNC1_MESBUSYCTRL 0x1001D
108/* Sdio Core Rev 12 */
109#define SBSDIO_FUNC1_WAKEUPCTRL 0x1001E
110#define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK 0x1
111#define SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT 0
112#define SBSDIO_FUNC1_WCTRL_HTWAIT_MASK 0x2
113#define SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT 1
114#define SBSDIO_FUNC1_SLEEPCSR 0x1001F
115#define SBSDIO_FUNC1_SLEEPCSR_KSO_MASK 0x1
116#define SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT 0
117#define SBSDIO_FUNC1_SLEEPCSR_KSO_EN 1
118#define SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK 0x2
119#define SBSDIO_FUNC1_SLEEPCSR_DEVON_SHIFT 1
100 120
101#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */ 121#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */
102#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */ 122#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001F /* f1 misc register end */
103 123
104/* function 1 OCP space */ 124/* function 1 OCP space */
105 125
@@ -154,13 +174,11 @@ struct brcmf_sdio_dev {
154 wait_queue_head_t request_buffer_wait; 174 wait_queue_head_t request_buffer_wait;
155 struct device *dev; 175 struct device *dev;
156 struct brcmf_bus *bus_if; 176 struct brcmf_bus *bus_if;
157#ifdef CONFIG_BRCMFMAC_SDIO_OOB 177 struct brcmfmac_sdio_platform_data *pdata;
158 unsigned int irq; /* oob interrupt number */ 178 bool oob_irq_requested;
159 unsigned long irq_flags; /* board specific oob flags */
160 bool irq_en; /* irq enable flags */ 179 bool irq_en; /* irq enable flags */
161 spinlock_t irq_en_lock; 180 spinlock_t irq_en_lock;
162 bool irq_wake; /* irq wake enable flags */ 181 bool irq_wake; /* irq wake enable flags */
163#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
164}; 182};
165 183
166/* Register/deregister interrupt handler. */ 184/* Register/deregister interrupt handler. */
@@ -224,6 +242,8 @@ brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
224 */ 242 */
225extern int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, 243extern int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw,
226 u32 addr, u8 *buf, uint nbytes); 244 u32 addr, u8 *buf, uint nbytes);
245extern int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write,
246 u32 address, u8 *data, uint size);
227 247
228/* Issue an abort to the specified function */ 248/* Issue an abort to the specified function */
229extern int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn); 249extern int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c
new file mode 100644
index 000000000000..b505db48c60d
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c
@@ -0,0 +1,22 @@
1/*
2 * Copyright (c) 2012 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/module.h> /* bug in tracepoint.h, it should include this */
18
19#ifndef __CHECKER__
20#define CREATE_TRACE_POINTS
21#include "tracepoint.h"
22#endif
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
new file mode 100644
index 000000000000..9df1f7a681e0
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
@@ -0,0 +1,101 @@
1/*
2 * Copyright (c) 2013 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#if !defined(BRCMF_TRACEPOINT_H_) || defined(TRACE_HEADER_MULTI_READ)
17#define BRCMF_TRACEPOINT_H_
18
19#include <linux/types.h>
20#include <linux/tracepoint.h>
21
22#ifndef CONFIG_BRCM_TRACING
23
24#undef TRACE_EVENT
25#define TRACE_EVENT(name, proto, ...) \
26static inline void trace_ ## name(proto) {}
27
28#undef DECLARE_EVENT_CLASS
29#define DECLARE_EVENT_CLASS(...)
30
31#undef DEFINE_EVENT
32#define DEFINE_EVENT(evt_class, name, proto, ...) \
33static inline void trace_ ## name(proto) {}
34
35#endif /* CONFIG_BRCM_TRACING */
36
37#undef TRACE_SYSTEM
38#define TRACE_SYSTEM brcmfmac
39
40#define MAX_MSG_LEN 100
41
42TRACE_EVENT(brcmf_err,
43 TP_PROTO(const char *func, struct va_format *vaf),
44 TP_ARGS(func, vaf),
45 TP_STRUCT__entry(
46 __string(func, func)
47 __dynamic_array(char, msg, MAX_MSG_LEN)
48 ),
49 TP_fast_assign(
50 __assign_str(func, func);
51 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
52 MAX_MSG_LEN, vaf->fmt,
53 *vaf->va) >= MAX_MSG_LEN);
54 ),
55 TP_printk("%s: %s", __get_str(func), __get_str(msg))
56);
57
58TRACE_EVENT(brcmf_dbg,
59 TP_PROTO(u32 level, const char *func, struct va_format *vaf),
60 TP_ARGS(level, func, vaf),
61 TP_STRUCT__entry(
62 __field(u32, level)
63 __string(func, func)
64 __dynamic_array(char, msg, MAX_MSG_LEN)
65 ),
66 TP_fast_assign(
67 __entry->level = level;
68 __assign_str(func, func);
69 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
70 MAX_MSG_LEN, vaf->fmt,
71 *vaf->va) >= MAX_MSG_LEN);
72 ),
73 TP_printk("%s: %s", __get_str(func), __get_str(msg))
74);
75
76TRACE_EVENT(brcmf_hexdump,
77 TP_PROTO(void *data, size_t len),
78 TP_ARGS(data, len),
79 TP_STRUCT__entry(
80 __field(unsigned long, len)
81 __dynamic_array(u8, hdata, len)
82 ),
83 TP_fast_assign(
84 __entry->len = len;
85 memcpy(__get_dynamic_array(hdata), data, len);
86 ),
87 TP_printk("hexdump [length=%lu]", __entry->len)
88);
89
90#ifdef CONFIG_BRCM_TRACING
91
92#undef TRACE_INCLUDE_PATH
93#define TRACE_INCLUDE_PATH .
94#undef TRACE_INCLUDE_FILE
95#define TRACE_INCLUDE_FILE tracepoint
96
97#include <trace/define_trace.h>
98
99#endif /* CONFIG_BRCM_TRACING */
100
101#endif /* BRCMF_TRACEPOINT_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 42289e9ea886..01aed7ad6bec 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -112,11 +112,6 @@ struct brcmf_usbdev_info {
112static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo, 112static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
113 struct brcmf_usbreq *req); 113 struct brcmf_usbreq *req);
114 114
115MODULE_AUTHOR("Broadcom Corporation");
116MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac usb driver.");
117MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN fullmac usb cards");
118MODULE_LICENSE("Dual BSD/GPL");
119
120static struct brcmf_usbdev *brcmf_usb_get_buspub(struct device *dev) 115static struct brcmf_usbdev *brcmf_usb_get_buspub(struct device *dev)
121{ 116{
122 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 117 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
@@ -422,8 +417,6 @@ static void brcmf_usb_tx_complete(struct urb *urb)
422 brcmf_usb_del_fromq(devinfo, req); 417 brcmf_usb_del_fromq(devinfo, req);
423 418
424 brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0); 419 brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
425
426 brcmu_pkt_buf_free_skb(req->skb);
427 req->skb = NULL; 420 req->skb = NULL;
428 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount); 421 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
429 if (devinfo->tx_freecount > devinfo->tx_high_watermark && 422 if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
@@ -577,15 +570,17 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
577 int ret; 570 int ret;
578 571
579 brcmf_dbg(USB, "Enter, skb=%p\n", skb); 572 brcmf_dbg(USB, "Enter, skb=%p\n", skb);
580 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) 573 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
581 return -EIO; 574 ret = -EIO;
575 goto fail;
576 }
582 577
583 req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq, 578 req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq,
584 &devinfo->tx_freecount); 579 &devinfo->tx_freecount);
585 if (!req) { 580 if (!req) {
586 brcmu_pkt_buf_free_skb(skb);
587 brcmf_err("no req to send\n"); 581 brcmf_err("no req to send\n");
588 return -ENOMEM; 582 ret = -ENOMEM;
583 goto fail;
589 } 584 }
590 585
591 req->skb = skb; 586 req->skb = skb;
@@ -598,18 +593,21 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
598 if (ret) { 593 if (ret) {
599 brcmf_err("brcmf_usb_tx usb_submit_urb FAILED\n"); 594 brcmf_err("brcmf_usb_tx usb_submit_urb FAILED\n");
600 brcmf_usb_del_fromq(devinfo, req); 595 brcmf_usb_del_fromq(devinfo, req);
601 brcmu_pkt_buf_free_skb(req->skb);
602 req->skb = NULL; 596 req->skb = NULL;
603 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, 597 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req,
604 &devinfo->tx_freecount); 598 &devinfo->tx_freecount);
605 } else { 599 goto fail;
606 if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
607 !devinfo->tx_flowblock) {
608 brcmf_txflowblock(dev, true);
609 devinfo->tx_flowblock = true;
610 }
611 } 600 }
612 601
602 if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
603 !devinfo->tx_flowblock) {
604 brcmf_txflowblock(dev, true);
605 devinfo->tx_flowblock = true;
606 }
607 return 0;
608
609fail:
610 brcmf_txcomplete(dev, skb, false);
613 return ret; 611 return ret;
614} 612}
615 613
@@ -1485,6 +1483,7 @@ static struct usb_device_id brcmf_usb_devid_table[] = {
1485 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) }, 1483 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) },
1486 { } 1484 { }
1487}; 1485};
1486
1488MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table); 1487MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table);
1489MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME); 1488MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME);
1490MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME); 1489MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 78da3eff75e8..6d758f285352 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -26,8 +26,10 @@
26#include <brcmu_wifi.h> 26#include <brcmu_wifi.h>
27#include "dhd.h" 27#include "dhd.h"
28#include "dhd_dbg.h" 28#include "dhd_dbg.h"
29#include "tracepoint.h"
29#include "fwil_types.h" 30#include "fwil_types.h"
30#include "p2p.h" 31#include "p2p.h"
32#include "btcoex.h"
31#include "wl_cfg80211.h" 33#include "wl_cfg80211.h"
32#include "fwil.h" 34#include "fwil.h"
33 35
@@ -182,64 +184,6 @@ static struct ieee80211_channel __wl_5ghz_a_channels[] = {
182 CHAN5G(216, 0), 184 CHAN5G(216, 0),
183}; 185};
184 186
185static struct ieee80211_channel __wl_5ghz_n_channels[] = {
186 CHAN5G(32, 0), CHAN5G(34, 0),
187 CHAN5G(36, 0), CHAN5G(38, 0),
188 CHAN5G(40, 0), CHAN5G(42, 0),
189 CHAN5G(44, 0), CHAN5G(46, 0),
190 CHAN5G(48, 0), CHAN5G(50, 0),
191 CHAN5G(52, 0), CHAN5G(54, 0),
192 CHAN5G(56, 0), CHAN5G(58, 0),
193 CHAN5G(60, 0), CHAN5G(62, 0),
194 CHAN5G(64, 0), CHAN5G(66, 0),
195 CHAN5G(68, 0), CHAN5G(70, 0),
196 CHAN5G(72, 0), CHAN5G(74, 0),
197 CHAN5G(76, 0), CHAN5G(78, 0),
198 CHAN5G(80, 0), CHAN5G(82, 0),
199 CHAN5G(84, 0), CHAN5G(86, 0),
200 CHAN5G(88, 0), CHAN5G(90, 0),
201 CHAN5G(92, 0), CHAN5G(94, 0),
202 CHAN5G(96, 0), CHAN5G(98, 0),
203 CHAN5G(100, 0), CHAN5G(102, 0),
204 CHAN5G(104, 0), CHAN5G(106, 0),
205 CHAN5G(108, 0), CHAN5G(110, 0),
206 CHAN5G(112, 0), CHAN5G(114, 0),
207 CHAN5G(116, 0), CHAN5G(118, 0),
208 CHAN5G(120, 0), CHAN5G(122, 0),
209 CHAN5G(124, 0), CHAN5G(126, 0),
210 CHAN5G(128, 0), CHAN5G(130, 0),
211 CHAN5G(132, 0), CHAN5G(134, 0),
212 CHAN5G(136, 0), CHAN5G(138, 0),
213 CHAN5G(140, 0), CHAN5G(142, 0),
214 CHAN5G(144, 0), CHAN5G(145, 0),
215 CHAN5G(146, 0), CHAN5G(147, 0),
216 CHAN5G(148, 0), CHAN5G(149, 0),
217 CHAN5G(150, 0), CHAN5G(151, 0),
218 CHAN5G(152, 0), CHAN5G(153, 0),
219 CHAN5G(154, 0), CHAN5G(155, 0),
220 CHAN5G(156, 0), CHAN5G(157, 0),
221 CHAN5G(158, 0), CHAN5G(159, 0),
222 CHAN5G(160, 0), CHAN5G(161, 0),
223 CHAN5G(162, 0), CHAN5G(163, 0),
224 CHAN5G(164, 0), CHAN5G(165, 0),
225 CHAN5G(166, 0), CHAN5G(168, 0),
226 CHAN5G(170, 0), CHAN5G(172, 0),
227 CHAN5G(174, 0), CHAN5G(176, 0),
228 CHAN5G(178, 0), CHAN5G(180, 0),
229 CHAN5G(182, 0), CHAN5G(184, 0),
230 CHAN5G(186, 0), CHAN5G(188, 0),
231 CHAN5G(190, 0), CHAN5G(192, 0),
232 CHAN5G(194, 0), CHAN5G(196, 0),
233 CHAN5G(198, 0), CHAN5G(200, 0),
234 CHAN5G(202, 0), CHAN5G(204, 0),
235 CHAN5G(206, 0), CHAN5G(208, 0),
236 CHAN5G(210, 0), CHAN5G(212, 0),
237 CHAN5G(214, 0), CHAN5G(216, 0),
238 CHAN5G(218, 0), CHAN5G(220, 0),
239 CHAN5G(222, 0), CHAN5G(224, 0),
240 CHAN5G(226, 0), CHAN5G(228, 0),
241};
242
243static struct ieee80211_supported_band __wl_band_2ghz = { 187static struct ieee80211_supported_band __wl_band_2ghz = {
244 .band = IEEE80211_BAND_2GHZ, 188 .band = IEEE80211_BAND_2GHZ,
245 .channels = __wl_2ghz_channels, 189 .channels = __wl_2ghz_channels,
@@ -256,12 +200,28 @@ static struct ieee80211_supported_band __wl_band_5ghz_a = {
256 .n_bitrates = wl_a_rates_size, 200 .n_bitrates = wl_a_rates_size,
257}; 201};
258 202
259static struct ieee80211_supported_band __wl_band_5ghz_n = { 203/* This is to override regulatory domains defined in cfg80211 module (reg.c)
260 .band = IEEE80211_BAND_5GHZ, 204 * By default world regulatory domain defined in reg.c puts the flags
261 .channels = __wl_5ghz_n_channels, 205 * NL80211_RRF_PASSIVE_SCAN and NL80211_RRF_NO_IBSS for 5GHz channels (for
262 .n_channels = ARRAY_SIZE(__wl_5ghz_n_channels), 206 * 36..48 and 149..165). With respect to these flags, wpa_supplicant doesn't
263 .bitrates = wl_a_rates, 207 * start p2p operations on 5GHz channels. All the changes in world regulatory
264 .n_bitrates = wl_a_rates_size, 208 * domain are to be done here.
209 */
210static const struct ieee80211_regdomain brcmf_regdom = {
211 .n_reg_rules = 4,
212 .alpha2 = "99",
213 .reg_rules = {
214 /* IEEE 802.11b/g, channels 1..11 */
215 REG_RULE(2412-10, 2472+10, 40, 6, 20, 0),
216 /* If any */
217 /* IEEE 802.11 channel 14 - Only JP enables
218 * this and for 802.11b only
219 */
220 REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
221 /* IEEE 802.11a, channel 36..64 */
222 REG_RULE(5150-10, 5350+10, 40, 6, 20, 0),
223 /* IEEE 802.11a, channel 100..165 */
224 REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), }
265}; 225};
266 226
267static const u32 __wl_cipher_suites[] = { 227static const u32 __wl_cipher_suites[] = {
@@ -375,22 +335,16 @@ static u8 brcmf_mw_to_qdbm(u16 mw)
375 return qdbm; 335 return qdbm;
376} 336}
377 337
378u16 channel_to_chanspec(struct ieee80211_channel *ch) 338u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
339 struct ieee80211_channel *ch)
379{ 340{
380 u16 chanspec; 341 struct brcmu_chan ch_inf;
381 342
382 chanspec = ieee80211_frequency_to_channel(ch->center_freq); 343 ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq);
383 chanspec &= WL_CHANSPEC_CHAN_MASK; 344 ch_inf.bw = BRCMU_CHAN_BW_20;
345 d11inf->encchspec(&ch_inf);
384 346
385 if (ch->band == IEEE80211_BAND_2GHZ) 347 return ch_inf.chspec;
386 chanspec |= WL_CHANSPEC_BAND_2G;
387 else
388 chanspec |= WL_CHANSPEC_BAND_5G;
389
390 chanspec |= WL_CHANSPEC_BW_20;
391 chanspec |= WL_CHANSPEC_CTL_SB_NONE;
392
393 return chanspec;
394} 348}
395 349
396/* Traverse a string of 1-byte tag/1-byte length/variable-length value 350/* Traverse a string of 1-byte tag/1-byte length/variable-length value
@@ -523,17 +477,16 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
523 return ERR_PTR(-EOPNOTSUPP); 477 return ERR_PTR(-EOPNOTSUPP);
524 case NL80211_IFTYPE_P2P_CLIENT: 478 case NL80211_IFTYPE_P2P_CLIENT:
525 case NL80211_IFTYPE_P2P_GO: 479 case NL80211_IFTYPE_P2P_GO:
480 case NL80211_IFTYPE_P2P_DEVICE:
526 return brcmf_p2p_add_vif(wiphy, name, type, flags, params); 481 return brcmf_p2p_add_vif(wiphy, name, type, flags, params);
527 case NL80211_IFTYPE_UNSPECIFIED: 482 case NL80211_IFTYPE_UNSPECIFIED:
528 case NL80211_IFTYPE_P2P_DEVICE:
529 default: 483 default:
530 return ERR_PTR(-EINVAL); 484 return ERR_PTR(-EINVAL);
531 } 485 }
532} 486}
533 487
534void brcmf_set_mpc(struct net_device *ndev, int mpc) 488void brcmf_set_mpc(struct brcmf_if *ifp, int mpc)
535{ 489{
536 struct brcmf_if *ifp = netdev_priv(ndev);
537 s32 err = 0; 490 s32 err = 0;
538 491
539 if (check_vif_up(ifp->vif)) { 492 if (check_vif_up(ifp->vif)) {
@@ -546,10 +499,9 @@ void brcmf_set_mpc(struct net_device *ndev, int mpc)
546 } 499 }
547} 500}
548 501
549s32 502s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
550brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, 503 struct brcmf_if *ifp, bool aborted,
551 struct net_device *ndev, 504 bool fw_abort)
552 bool aborted, bool fw_abort)
553{ 505{
554 struct brcmf_scan_params_le params_le; 506 struct brcmf_scan_params_le params_le;
555 struct cfg80211_scan_request *scan_request; 507 struct cfg80211_scan_request *scan_request;
@@ -580,7 +532,7 @@ brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
580 /* Scan is aborted by setting channel_list[0] to -1 */ 532 /* Scan is aborted by setting channel_list[0] to -1 */
581 params_le.channel_list[0] = cpu_to_le16(-1); 533 params_le.channel_list[0] = cpu_to_le16(-1);
582 /* E-Scan (or anyother type) can be aborted by SCAN */ 534 /* E-Scan (or anyother type) can be aborted by SCAN */
583 err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN, 535 err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
584 &params_le, sizeof(params_le)); 536 &params_le, sizeof(params_le));
585 if (err) 537 if (err)
586 brcmf_err("Scan abort failed\n"); 538 brcmf_err("Scan abort failed\n");
@@ -594,12 +546,12 @@ brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
594 cfg->sched_escan = false; 546 cfg->sched_escan = false;
595 if (!aborted) 547 if (!aborted)
596 cfg80211_sched_scan_results(cfg_to_wiphy(cfg)); 548 cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
597 brcmf_set_mpc(ndev, 1); 549 brcmf_set_mpc(ifp, 1);
598 } else if (scan_request) { 550 } else if (scan_request) {
599 brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n", 551 brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
600 aborted ? "Aborted" : "Done"); 552 aborted ? "Aborted" : "Done");
601 cfg80211_scan_done(scan_request, aborted); 553 cfg80211_scan_done(scan_request, aborted);
602 brcmf_set_mpc(ndev, 1); 554 brcmf_set_mpc(ifp, 1);
603 } 555 }
604 if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) 556 if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
605 brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n"); 557 brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n");
@@ -619,9 +571,9 @@ int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
619 571
620 if (ndev) { 572 if (ndev) {
621 if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status) && 573 if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status) &&
622 cfg->escan_info.ndev == ndev) 574 cfg->escan_info.ifp == netdev_priv(ndev))
623 brcmf_notify_escan_complete(cfg, ndev, true, 575 brcmf_notify_escan_complete(cfg, netdev_priv(ndev),
624 true); 576 true, true);
625 577
626 brcmf_fil_iovar_int_set(netdev_priv(ndev), "mpc", 1); 578 brcmf_fil_iovar_int_set(netdev_priv(ndev), "mpc", 1);
627 } 579 }
@@ -637,9 +589,9 @@ int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
637 return -EOPNOTSUPP; 589 return -EOPNOTSUPP;
638 case NL80211_IFTYPE_P2P_CLIENT: 590 case NL80211_IFTYPE_P2P_CLIENT:
639 case NL80211_IFTYPE_P2P_GO: 591 case NL80211_IFTYPE_P2P_GO:
592 case NL80211_IFTYPE_P2P_DEVICE:
640 return brcmf_p2p_del_vif(wiphy, wdev); 593 return brcmf_p2p_del_vif(wiphy, wdev);
641 case NL80211_IFTYPE_UNSPECIFIED: 594 case NL80211_IFTYPE_UNSPECIFIED:
642 case NL80211_IFTYPE_P2P_DEVICE:
643 default: 595 default:
644 return -EINVAL; 596 return -EINVAL;
645 } 597 }
@@ -723,7 +675,8 @@ done:
723 return err; 675 return err;
724} 676}
725 677
726static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le, 678static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
679 struct brcmf_scan_params_le *params_le,
727 struct cfg80211_scan_request *request) 680 struct cfg80211_scan_request *request)
728{ 681{
729 u32 n_ssids; 682 u32 n_ssids;
@@ -755,7 +708,8 @@ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
755 n_channels); 708 n_channels);
756 if (n_channels > 0) { 709 if (n_channels > 0) {
757 for (i = 0; i < n_channels; i++) { 710 for (i = 0; i < n_channels; i++) {
758 chanspec = channel_to_chanspec(request->channels[i]); 711 chanspec = channel_to_chanspec(&cfg->d11inf,
712 request->channels[i]);
759 brcmf_dbg(SCAN, "Chan : %d, Channel spec: %x\n", 713 brcmf_dbg(SCAN, "Chan : %d, Channel spec: %x\n",
760 request->channels[i]->hw_value, chanspec); 714 request->channels[i]->hw_value, chanspec);
761 params_le->channel_list[i] = cpu_to_le16(chanspec); 715 params_le->channel_list[i] = cpu_to_le16(chanspec);
@@ -803,7 +757,7 @@ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
803} 757}
804 758
805static s32 759static s32
806brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev, 760brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp,
807 struct cfg80211_scan_request *request, u16 action) 761 struct cfg80211_scan_request *request, u16 action)
808{ 762{
809 s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE + 763 s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE +
@@ -827,13 +781,12 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
827 goto exit; 781 goto exit;
828 } 782 }
829 BUG_ON(params_size + sizeof("escan") >= BRCMF_DCMD_MEDLEN); 783 BUG_ON(params_size + sizeof("escan") >= BRCMF_DCMD_MEDLEN);
830 brcmf_escan_prep(&params->params_le, request); 784 brcmf_escan_prep(cfg, &params->params_le, request);
831 params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION); 785 params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
832 params->action = cpu_to_le16(action); 786 params->action = cpu_to_le16(action);
833 params->sync_id = cpu_to_le16(0x1234); 787 params->sync_id = cpu_to_le16(0x1234);
834 788
835 err = brcmf_fil_iovar_data_set(netdev_priv(ndev), "escan", 789 err = brcmf_fil_iovar_data_set(ifp, "escan", params, params_size);
836 params, params_size);
837 if (err) { 790 if (err) {
838 if (err == -EBUSY) 791 if (err == -EBUSY)
839 brcmf_dbg(INFO, "system busy : escan canceled\n"); 792 brcmf_dbg(INFO, "system busy : escan canceled\n");
@@ -848,7 +801,7 @@ exit:
848 801
849static s32 802static s32
850brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy, 803brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
851 struct net_device *ndev, struct cfg80211_scan_request *request) 804 struct brcmf_if *ifp, struct cfg80211_scan_request *request)
852{ 805{
853 s32 err; 806 s32 err;
854 u32 passive_scan; 807 u32 passive_scan;
@@ -856,35 +809,35 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
856 struct escan_info *escan = &cfg->escan_info; 809 struct escan_info *escan = &cfg->escan_info;
857 810
858 brcmf_dbg(SCAN, "Enter\n"); 811 brcmf_dbg(SCAN, "Enter\n");
859 escan->ndev = ndev; 812 escan->ifp = ifp;
860 escan->wiphy = wiphy; 813 escan->wiphy = wiphy;
861 escan->escan_state = WL_ESCAN_STATE_SCANNING; 814 escan->escan_state = WL_ESCAN_STATE_SCANNING;
862 passive_scan = cfg->active_scan ? 0 : 1; 815 passive_scan = cfg->active_scan ? 0 : 1;
863 err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PASSIVE_SCAN, 816 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PASSIVE_SCAN,
864 passive_scan); 817 passive_scan);
865 if (err) { 818 if (err) {
866 brcmf_err("error (%d)\n", err); 819 brcmf_err("error (%d)\n", err);
867 return err; 820 return err;
868 } 821 }
869 brcmf_set_mpc(ndev, 0); 822 brcmf_set_mpc(ifp, 0);
870 results = (struct brcmf_scan_results *)cfg->escan_info.escan_buf; 823 results = (struct brcmf_scan_results *)cfg->escan_info.escan_buf;
871 results->version = 0; 824 results->version = 0;
872 results->count = 0; 825 results->count = 0;
873 results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE; 826 results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE;
874 827
875 err = escan->run(cfg, ndev, request, WL_ESCAN_ACTION_START); 828 err = escan->run(cfg, ifp, request, WL_ESCAN_ACTION_START);
876 if (err) 829 if (err)
877 brcmf_set_mpc(ndev, 1); 830 brcmf_set_mpc(ifp, 1);
878 return err; 831 return err;
879} 832}
880 833
881static s32 834static s32
882brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev, 835brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
883 struct cfg80211_scan_request *request, 836 struct cfg80211_scan_request *request,
884 struct cfg80211_ssid *this_ssid) 837 struct cfg80211_ssid *this_ssid)
885{ 838{
886 struct brcmf_if *ifp = netdev_priv(ndev); 839 struct brcmf_if *ifp = vif->ifp;
887 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev); 840 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
888 struct cfg80211_ssid *ssids; 841 struct cfg80211_ssid *ssids;
889 struct brcmf_cfg80211_scan_req *sr = &cfg->scan_req_int; 842 struct brcmf_cfg80211_scan_req *sr = &cfg->scan_req_int;
890 u32 passive_scan; 843 u32 passive_scan;
@@ -904,16 +857,19 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
904 cfg->scan_status); 857 cfg->scan_status);
905 return -EAGAIN; 858 return -EAGAIN;
906 } 859 }
860 if (test_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) {
861 brcmf_err("Scanning suppressed: status (%lu)\n",
862 cfg->scan_status);
863 return -EAGAIN;
864 }
907 if (test_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) { 865 if (test_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) {
908 brcmf_err("Connecting: status (%lu)\n", ifp->vif->sme_state); 866 brcmf_err("Connecting: status (%lu)\n", ifp->vif->sme_state);
909 return -EAGAIN; 867 return -EAGAIN;
910 } 868 }
911 869
912 /* If scan req comes for p2p0, send it over primary I/F */ 870 /* If scan req comes for p2p0, send it over primary I/F */
913 if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif) { 871 if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
914 ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; 872 vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
915 ndev = ifp->ndev;
916 }
917 873
918 /* Arm scan timeout timer */ 874 /* Arm scan timeout timer */
919 mod_timer(&cfg->escan_timeout, jiffies + 875 mod_timer(&cfg->escan_timeout, jiffies +
@@ -934,11 +890,11 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
934 set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); 890 set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
935 if (escan_req) { 891 if (escan_req) {
936 cfg->escan_info.run = brcmf_run_escan; 892 cfg->escan_info.run = brcmf_run_escan;
937 err = brcmf_p2p_scan_prep(wiphy, request, ifp->vif); 893 err = brcmf_p2p_scan_prep(wiphy, request, vif);
938 if (err) 894 if (err)
939 goto scan_out; 895 goto scan_out;
940 896
941 err = brcmf_do_escan(cfg, wiphy, ndev, request); 897 err = brcmf_do_escan(cfg, wiphy, vif->ifp, request);
942 if (err) 898 if (err)
943 goto scan_out; 899 goto scan_out;
944 } else { 900 } else {
@@ -962,7 +918,7 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
962 brcmf_err("WLC_SET_PASSIVE_SCAN error (%d)\n", err); 918 brcmf_err("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
963 goto scan_out; 919 goto scan_out;
964 } 920 }
965 brcmf_set_mpc(ndev, 0); 921 brcmf_set_mpc(ifp, 0);
966 err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN, 922 err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
967 &sr->ssid_le, sizeof(sr->ssid_le)); 923 &sr->ssid_le, sizeof(sr->ssid_le));
968 if (err) { 924 if (err) {
@@ -972,7 +928,7 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
972 else 928 else
973 brcmf_err("WLC_SCAN error (%d)\n", err); 929 brcmf_err("WLC_SCAN error (%d)\n", err);
974 930
975 brcmf_set_mpc(ndev, 1); 931 brcmf_set_mpc(ifp, 1);
976 goto scan_out; 932 goto scan_out;
977 } 933 }
978 } 934 }
@@ -990,16 +946,15 @@ scan_out:
990static s32 946static s32
991brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) 947brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
992{ 948{
993 struct net_device *ndev = request->wdev->netdev; 949 struct brcmf_cfg80211_vif *vif;
994 s32 err = 0; 950 s32 err = 0;
995 951
996 brcmf_dbg(TRACE, "Enter\n"); 952 brcmf_dbg(TRACE, "Enter\n");
997 953 vif = container_of(request->wdev, struct brcmf_cfg80211_vif, wdev);
998 if (!check_vif_up(container_of(request->wdev, 954 if (!check_vif_up(vif))
999 struct brcmf_cfg80211_vif, wdev)))
1000 return -EIO; 955 return -EIO;
1001 956
1002 err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL); 957 err = brcmf_cfg80211_escan(wiphy, vif, request, NULL);
1003 958
1004 if (err) 959 if (err)
1005 brcmf_err("scan error (%d)\n", err); 960 brcmf_err("scan error (%d)\n", err);
@@ -1097,6 +1052,7 @@ static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
1097 1052
1098static void brcmf_link_down(struct brcmf_cfg80211_vif *vif) 1053static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
1099{ 1054{
1055 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy);
1100 s32 err = 0; 1056 s32 err = 0;
1101 1057
1102 brcmf_dbg(TRACE, "Enter\n"); 1058 brcmf_dbg(TRACE, "Enter\n");
@@ -1110,6 +1066,8 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
1110 clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state); 1066 clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
1111 } 1067 }
1112 clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state); 1068 clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
1069 clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
1070 brcmf_btcoex_set_mode(vif, BRCMF_BTCOEX_ENABLED, 0);
1113 brcmf_dbg(TRACE, "Exit\n"); 1071 brcmf_dbg(TRACE, "Exit\n");
1114} 1072}
1115 1073
@@ -1229,7 +1187,8 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
1229 params->chandef.chan->center_freq); 1187 params->chandef.chan->center_freq);
1230 if (params->channel_fixed) { 1188 if (params->channel_fixed) {
1231 /* adding chanspec */ 1189 /* adding chanspec */
1232 chanspec = channel_to_chanspec(params->chandef.chan); 1190 chanspec = channel_to_chanspec(&cfg->d11inf,
1191 params->chandef.chan);
1233 join_params.params_le.chanspec_list[0] = 1192 join_params.params_le.chanspec_list[0] =
1234 cpu_to_le16(chanspec); 1193 cpu_to_le16(chanspec);
1235 join_params.params_le.chanspec_num = cpu_to_le32(1); 1194 join_params.params_le.chanspec_num = cpu_to_le32(1);
@@ -1619,7 +1578,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1619 if (chan) { 1578 if (chan) {
1620 cfg->channel = 1579 cfg->channel =
1621 ieee80211_frequency_to_channel(chan->center_freq); 1580 ieee80211_frequency_to_channel(chan->center_freq);
1622 chanspec = channel_to_chanspec(chan); 1581 chanspec = channel_to_chanspec(&cfg->d11inf, chan);
1623 brcmf_dbg(CONN, "channel=%d, center_req=%d, chanspec=0x%04x\n", 1582 brcmf_dbg(CONN, "channel=%d, center_req=%d, chanspec=0x%04x\n",
1624 cfg->channel, chan->center_freq, chanspec); 1583 cfg->channel, chan->center_freq, chanspec);
1625 } else { 1584 } else {
@@ -2278,6 +2237,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
2278 struct ieee80211_channel *notify_channel; 2237 struct ieee80211_channel *notify_channel;
2279 struct cfg80211_bss *bss; 2238 struct cfg80211_bss *bss;
2280 struct ieee80211_supported_band *band; 2239 struct ieee80211_supported_band *band;
2240 struct brcmu_chan ch;
2281 s32 err = 0; 2241 s32 err = 0;
2282 u16 channel; 2242 u16 channel;
2283 u32 freq; 2243 u32 freq;
@@ -2292,8 +2252,12 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
2292 return 0; 2252 return 0;
2293 } 2253 }
2294 2254
2295 channel = bi->ctl_ch ? bi->ctl_ch : 2255 if (!bi->ctl_ch) {
2296 CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec)); 2256 ch.chspec = le16_to_cpu(bi->chanspec);
2257 cfg->d11inf.decchspec(&ch);
2258 bi->ctl_ch = ch.chnum;
2259 }
2260 channel = bi->ctl_ch;
2297 2261
2298 if (channel <= CH_MAX_2G_CHANNEL) 2262 if (channel <= CH_MAX_2G_CHANNEL)
2299 band = wiphy->bands[IEEE80211_BAND_2GHZ]; 2263 band = wiphy->bands[IEEE80211_BAND_2GHZ];
@@ -2368,9 +2332,9 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
2368 struct brcmf_bss_info_le *bi = NULL; 2332 struct brcmf_bss_info_le *bi = NULL;
2369 struct ieee80211_supported_band *band; 2333 struct ieee80211_supported_band *band;
2370 struct cfg80211_bss *bss; 2334 struct cfg80211_bss *bss;
2335 struct brcmu_chan ch;
2371 u8 *buf = NULL; 2336 u8 *buf = NULL;
2372 s32 err = 0; 2337 s32 err = 0;
2373 u16 channel;
2374 u32 freq; 2338 u32 freq;
2375 u16 notify_capability; 2339 u16 notify_capability;
2376 u16 notify_interval; 2340 u16 notify_interval;
@@ -2397,15 +2361,15 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
2397 2361
2398 bi = (struct brcmf_bss_info_le *)(buf + 4); 2362 bi = (struct brcmf_bss_info_le *)(buf + 4);
2399 2363
2400 channel = bi->ctl_ch ? bi->ctl_ch : 2364 ch.chspec = le16_to_cpu(bi->chanspec);
2401 CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec)); 2365 cfg->d11inf.decchspec(&ch);
2402 2366
2403 if (channel <= CH_MAX_2G_CHANNEL) 2367 if (ch.band == BRCMU_CHAN_BAND_2G)
2404 band = wiphy->bands[IEEE80211_BAND_2GHZ]; 2368 band = wiphy->bands[IEEE80211_BAND_2GHZ];
2405 else 2369 else
2406 band = wiphy->bands[IEEE80211_BAND_5GHZ]; 2370 band = wiphy->bands[IEEE80211_BAND_5GHZ];
2407 2371
2408 freq = ieee80211_channel_to_frequency(channel, band->band); 2372 freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
2409 notify_channel = ieee80211_get_channel(wiphy, freq); 2373 notify_channel = ieee80211_get_channel(wiphy, freq);
2410 2374
2411 notify_capability = le16_to_cpu(bi->capability); 2375 notify_capability = le16_to_cpu(bi->capability);
@@ -2414,7 +2378,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
2414 notify_ielen = le32_to_cpu(bi->ie_length); 2378 notify_ielen = le32_to_cpu(bi->ie_length);
2415 notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100; 2379 notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
2416 2380
2417 brcmf_dbg(CONN, "channel: %d(%d)\n", channel, freq); 2381 brcmf_dbg(CONN, "channel: %d(%d)\n", ch.chnum, freq);
2418 brcmf_dbg(CONN, "capability: %X\n", notify_capability); 2382 brcmf_dbg(CONN, "capability: %X\n", notify_capability);
2419 brcmf_dbg(CONN, "beacon interval: %d\n", notify_interval); 2383 brcmf_dbg(CONN, "beacon interval: %d\n", notify_interval);
2420 brcmf_dbg(CONN, "signal: %d\n", notify_signal); 2384 brcmf_dbg(CONN, "signal: %d\n", notify_signal);
@@ -2510,7 +2474,7 @@ void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
2510 set_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status); 2474 set_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
2511 if (cfg->scan_request) { 2475 if (cfg->scan_request) {
2512 escan->escan_state = WL_ESCAN_STATE_IDLE; 2476 escan->escan_state = WL_ESCAN_STATE_IDLE;
2513 brcmf_notify_escan_complete(cfg, escan->ndev, true, true); 2477 brcmf_notify_escan_complete(cfg, escan->ifp, true, true);
2514 } 2478 }
2515 clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); 2479 clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
2516 clear_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status); 2480 clear_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
@@ -2522,7 +2486,7 @@ static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
2522 container_of(work, struct brcmf_cfg80211_info, 2486 container_of(work, struct brcmf_cfg80211_info,
2523 escan_timeout_work); 2487 escan_timeout_work);
2524 2488
2525 brcmf_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); 2489 brcmf_notify_escan_complete(cfg, cfg->escan_info.ifp, true, true);
2526} 2490}
2527 2491
2528static void brcmf_escan_timeout(unsigned long data) 2492static void brcmf_escan_timeout(unsigned long data)
@@ -2537,12 +2501,19 @@ static void brcmf_escan_timeout(unsigned long data)
2537} 2501}
2538 2502
2539static s32 2503static s32
2540brcmf_compare_update_same_bss(struct brcmf_bss_info_le *bss, 2504brcmf_compare_update_same_bss(struct brcmf_cfg80211_info *cfg,
2505 struct brcmf_bss_info_le *bss,
2541 struct brcmf_bss_info_le *bss_info_le) 2506 struct brcmf_bss_info_le *bss_info_le)
2542{ 2507{
2508 struct brcmu_chan ch_bss, ch_bss_info_le;
2509
2510 ch_bss.chspec = le16_to_cpu(bss->chanspec);
2511 cfg->d11inf.decchspec(&ch_bss);
2512 ch_bss_info_le.chspec = le16_to_cpu(bss_info_le->chanspec);
2513 cfg->d11inf.decchspec(&ch_bss_info_le);
2514
2543 if (!memcmp(&bss_info_le->BSSID, &bss->BSSID, ETH_ALEN) && 2515 if (!memcmp(&bss_info_le->BSSID, &bss->BSSID, ETH_ALEN) &&
2544 (CHSPEC_BAND(le16_to_cpu(bss_info_le->chanspec)) == 2516 ch_bss.band == ch_bss_info_le.band &&
2545 CHSPEC_BAND(le16_to_cpu(bss->chanspec))) &&
2546 bss_info_le->SSID_len == bss->SSID_len && 2517 bss_info_le->SSID_len == bss->SSID_len &&
2547 !memcmp(bss_info_le->SSID, bss->SSID, bss_info_le->SSID_len)) { 2518 !memcmp(bss_info_le->SSID, bss->SSID, bss_info_le->SSID_len)) {
2548 if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) == 2519 if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) ==
@@ -2573,7 +2544,6 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
2573 const struct brcmf_event_msg *e, void *data) 2544 const struct brcmf_event_msg *e, void *data)
2574{ 2545{
2575 struct brcmf_cfg80211_info *cfg = ifp->drvr->config; 2546 struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
2576 struct net_device *ndev = ifp->ndev;
2577 s32 status; 2547 s32 status;
2578 s32 err = 0; 2548 s32 err = 0;
2579 struct brcmf_escan_result_le *escan_result_le; 2549 struct brcmf_escan_result_le *escan_result_le;
@@ -2586,9 +2556,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
2586 2556
2587 status = e->status; 2557 status = e->status;
2588 2558
2589 if (!ndev || !test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) { 2559 if (!test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
2590 brcmf_err("scan not ready ndev %p drv_status %x\n", ndev, 2560 brcmf_err("scan not ready, bssidx=%d\n", ifp->bssidx);
2591 !test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status));
2592 return -EPERM; 2561 return -EPERM;
2593 } 2562 }
2594 2563
@@ -2642,7 +2611,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
2642 bss = bss ? (struct brcmf_bss_info_le *) 2611 bss = bss ? (struct brcmf_bss_info_le *)
2643 ((unsigned char *)bss + 2612 ((unsigned char *)bss +
2644 le32_to_cpu(bss->length)) : list->bss_info_le; 2613 le32_to_cpu(bss->length)) : list->bss_info_le;
2645 if (brcmf_compare_update_same_bss(bss, bss_info_le)) 2614 if (brcmf_compare_update_same_bss(cfg, bss,
2615 bss_info_le))
2646 goto exit; 2616 goto exit;
2647 } 2617 }
2648 memcpy(&(cfg->escan_info.escan_buf[list->buflen]), 2618 memcpy(&(cfg->escan_info.escan_buf[list->buflen]),
@@ -2659,7 +2629,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
2659 cfg->escan_info.escan_buf; 2629 cfg->escan_info.escan_buf;
2660 brcmf_inform_bss(cfg); 2630 brcmf_inform_bss(cfg);
2661 aborted = status != BRCMF_E_STATUS_SUCCESS; 2631 aborted = status != BRCMF_E_STATUS_SUCCESS;
2662 brcmf_notify_escan_complete(cfg, ndev, aborted, 2632 brcmf_notify_escan_complete(cfg, ifp, aborted,
2663 false); 2633 false);
2664 } else 2634 } else
2665 brcmf_dbg(SCAN, "Ignored scan complete result 0x%x\n", 2635 brcmf_dbg(SCAN, "Ignored scan complete result 0x%x\n",
@@ -2737,7 +2707,7 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
2737 brcmf_abort_scanning(cfg); 2707 brcmf_abort_scanning(cfg);
2738 2708
2739 /* Turn off watchdog timer */ 2709 /* Turn off watchdog timer */
2740 brcmf_set_mpc(ndev, 1); 2710 brcmf_set_mpc(netdev_priv(ndev), 1);
2741 2711
2742exit: 2712exit:
2743 brcmf_dbg(TRACE, "Exit\n"); 2713 brcmf_dbg(TRACE, "Exit\n");
@@ -2895,7 +2865,6 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
2895 const struct brcmf_event_msg *e, void *data) 2865 const struct brcmf_event_msg *e, void *data)
2896{ 2866{
2897 struct brcmf_cfg80211_info *cfg = ifp->drvr->config; 2867 struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
2898 struct net_device *ndev = ifp->ndev;
2899 struct brcmf_pno_net_info_le *netinfo, *netinfo_start; 2868 struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
2900 struct cfg80211_scan_request *request = NULL; 2869 struct cfg80211_scan_request *request = NULL;
2901 struct cfg80211_ssid *ssid = NULL; 2870 struct cfg80211_ssid *ssid = NULL;
@@ -2979,7 +2948,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
2979 } 2948 }
2980 2949
2981 set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); 2950 set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
2982 err = brcmf_do_escan(cfg, wiphy, ndev, request); 2951 err = brcmf_do_escan(cfg, wiphy, ifp, request);
2983 if (err) { 2952 if (err) {
2984 clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); 2953 clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
2985 goto out_err; 2954 goto out_err;
@@ -3051,16 +3020,21 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
3051 int i; 3020 int i;
3052 int ret = 0; 3021 int ret = 0;
3053 3022
3054 brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n", 3023 brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n",
3055 request->n_match_sets, request->n_ssids); 3024 request->n_match_sets, request->n_ssids);
3056 if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) { 3025 if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
3057 brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status); 3026 brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status);
3058 return -EAGAIN; 3027 return -EAGAIN;
3059 } 3028 }
3029 if (test_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) {
3030 brcmf_err("Scanning suppressed: status (%lu)\n",
3031 cfg->scan_status);
3032 return -EAGAIN;
3033 }
3060 3034
3061 if (!request || !request->n_ssids || !request->n_match_sets) { 3035 if (!request->n_ssids || !request->n_match_sets) {
3062 brcmf_err("Invalid sched scan req!! n_ssids:%d\n", 3036 brcmf_err("Invalid sched scan req!! n_ssids:%d\n",
3063 request ? request->n_ssids : 0); 3037 request->n_ssids);
3064 return -EINVAL; 3038 return -EINVAL;
3065 } 3039 }
3066 3040
@@ -3136,7 +3110,7 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
3136 brcmf_dbg(SCAN, "enter\n"); 3110 brcmf_dbg(SCAN, "enter\n");
3137 brcmf_dev_pno_clean(ndev); 3111 brcmf_dev_pno_clean(ndev);
3138 if (cfg->sched_escan) 3112 if (cfg->sched_escan)
3139 brcmf_notify_escan_complete(cfg, ndev, true, true); 3113 brcmf_notify_escan_complete(cfg, netdev_priv(ndev), true, true);
3140 return 0; 3114 return 0;
3141} 3115}
3142 3116
@@ -3708,7 +3682,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3708 ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len); 3682 ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len);
3709 } 3683 }
3710 3684
3711 brcmf_set_mpc(ndev, 0); 3685 brcmf_set_mpc(ifp, 0);
3712 3686
3713 /* find the RSN_IE */ 3687 /* find the RSN_IE */
3714 rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, 3688 rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
@@ -3816,7 +3790,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3816 3790
3817exit: 3791exit:
3818 if (err) 3792 if (err)
3819 brcmf_set_mpc(ndev, 1); 3793 brcmf_set_mpc(ifp, 1);
3820 return err; 3794 return err;
3821} 3795}
3822 3796
@@ -3856,7 +3830,7 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
3856 if (err < 0) 3830 if (err < 0)
3857 brcmf_err("bss_enable config failed %d\n", err); 3831 brcmf_err("bss_enable config failed %d\n", err);
3858 } 3832 }
3859 brcmf_set_mpc(ndev, 1); 3833 brcmf_set_mpc(ifp, 1);
3860 set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); 3834 set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
3861 clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); 3835 clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
3862 3836
@@ -3913,13 +3887,13 @@ brcmf_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
3913 struct wireless_dev *wdev, 3887 struct wireless_dev *wdev,
3914 u16 frame_type, bool reg) 3888 u16 frame_type, bool reg)
3915{ 3889{
3916 struct brcmf_if *ifp = netdev_priv(wdev->netdev); 3890 struct brcmf_cfg80211_vif *vif;
3917 struct brcmf_cfg80211_vif *vif = ifp->vif;
3918 u16 mgmt_type; 3891 u16 mgmt_type;
3919 3892
3920 brcmf_dbg(TRACE, "Enter, frame_type %04x, reg=%d\n", frame_type, reg); 3893 brcmf_dbg(TRACE, "Enter, frame_type %04x, reg=%d\n", frame_type, reg);
3921 3894
3922 mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4; 3895 mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4;
3896 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
3923 if (reg) 3897 if (reg)
3924 vif->mgmt_rx_reg |= BIT(mgmt_type); 3898 vif->mgmt_rx_reg |= BIT(mgmt_type);
3925 else 3899 else
@@ -3935,7 +3909,6 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3935{ 3909{
3936 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 3910 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
3937 const struct ieee80211_mgmt *mgmt; 3911 const struct ieee80211_mgmt *mgmt;
3938 struct brcmf_if *ifp;
3939 struct brcmf_cfg80211_vif *vif; 3912 struct brcmf_cfg80211_vif *vif;
3940 s32 err = 0; 3913 s32 err = 0;
3941 s32 ie_offset; 3914 s32 ie_offset;
@@ -3971,8 +3944,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3971 ie_offset = DOT11_MGMT_HDR_LEN + 3944 ie_offset = DOT11_MGMT_HDR_LEN +
3972 DOT11_BCN_PRB_FIXED_LEN; 3945 DOT11_BCN_PRB_FIXED_LEN;
3973 ie_len = len - ie_offset; 3946 ie_len = len - ie_offset;
3974 ifp = netdev_priv(wdev->netdev); 3947 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
3975 vif = ifp->vif;
3976 if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) 3948 if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif)
3977 vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif; 3949 vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
3978 err = brcmf_vif_set_mgmt_ie(vif, 3950 err = brcmf_vif_set_mgmt_ie(vif,
@@ -4007,7 +3979,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
4007 *cookie, le16_to_cpu(action_frame->len), 3979 *cookie, le16_to_cpu(action_frame->len),
4008 chan->center_freq); 3980 chan->center_freq);
4009 3981
4010 ack = brcmf_p2p_send_action_frame(cfg, wdev->netdev, 3982 ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg),
4011 af_params); 3983 af_params);
4012 3984
4013 cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack, 3985 cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
@@ -4045,6 +4017,39 @@ exit:
4045 return err; 4017 return err;
4046} 4018}
4047 4019
4020static int brcmf_cfg80211_crit_proto_start(struct wiphy *wiphy,
4021 struct wireless_dev *wdev,
4022 enum nl80211_crit_proto_id proto,
4023 u16 duration)
4024{
4025 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
4026 struct brcmf_cfg80211_vif *vif;
4027
4028 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
4029
4030 /* only DHCP support for now */
4031 if (proto != NL80211_CRIT_PROTO_DHCP)
4032 return -EINVAL;
4033
4034 /* suppress and abort scanning */
4035 set_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
4036 brcmf_abort_scanning(cfg);
4037
4038 return brcmf_btcoex_set_mode(vif, BRCMF_BTCOEX_DISABLED, duration);
4039}
4040
4041static void brcmf_cfg80211_crit_proto_stop(struct wiphy *wiphy,
4042 struct wireless_dev *wdev)
4043{
4044 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
4045 struct brcmf_cfg80211_vif *vif;
4046
4047 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
4048
4049 brcmf_btcoex_set_mode(vif, BRCMF_BTCOEX_ENABLED, 0);
4050 clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
4051}
4052
4048static struct cfg80211_ops wl_cfg80211_ops = { 4053static struct cfg80211_ops wl_cfg80211_ops = {
4049 .add_virtual_intf = brcmf_cfg80211_add_iface, 4054 .add_virtual_intf = brcmf_cfg80211_add_iface,
4050 .del_virtual_intf = brcmf_cfg80211_del_iface, 4055 .del_virtual_intf = brcmf_cfg80211_del_iface,
@@ -4079,6 +4084,10 @@ static struct cfg80211_ops wl_cfg80211_ops = {
4079 .mgmt_tx = brcmf_cfg80211_mgmt_tx, 4084 .mgmt_tx = brcmf_cfg80211_mgmt_tx,
4080 .remain_on_channel = brcmf_p2p_remain_on_channel, 4085 .remain_on_channel = brcmf_p2p_remain_on_channel,
4081 .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel, 4086 .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel,
4087 .start_p2p_device = brcmf_p2p_start_device,
4088 .stop_p2p_device = brcmf_p2p_stop_device,
4089 .crit_proto_start = brcmf_cfg80211_crit_proto_start,
4090 .crit_proto_stop = brcmf_cfg80211_crit_proto_stop,
4082#ifdef CONFIG_NL80211_TESTMODE 4091#ifdef CONFIG_NL80211_TESTMODE
4083 .testmode_cmd = brcmf_cfg80211_testmode 4092 .testmode_cmd = brcmf_cfg80211_testmode
4084#endif 4093#endif
@@ -4162,6 +4171,11 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
4162 BIT(IEEE80211_STYPE_AUTH >> 4) | 4171 BIT(IEEE80211_STYPE_AUTH >> 4) |
4163 BIT(IEEE80211_STYPE_DEAUTH >> 4) | 4172 BIT(IEEE80211_STYPE_DEAUTH >> 4) |
4164 BIT(IEEE80211_STYPE_ACTION >> 4) 4173 BIT(IEEE80211_STYPE_ACTION >> 4)
4174 },
4175 [NL80211_IFTYPE_P2P_DEVICE] = {
4176 .tx = 0xffff,
4177 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
4178 BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
4165 } 4179 }
4166}; 4180};
4167 4181
@@ -4187,13 +4201,6 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
4187 wiphy->iface_combinations = brcmf_iface_combos; 4201 wiphy->iface_combinations = brcmf_iface_combos;
4188 wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos); 4202 wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
4189 wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; 4203 wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
4190 wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set
4191 * it as 11a by default.
4192 * This will be updated with
4193 * 11n phy tables in
4194 * "ifconfig up"
4195 * if phy has 11n capability
4196 */
4197 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 4204 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
4198 wiphy->cipher_suites = __wl_cipher_suites; 4205 wiphy->cipher_suites = __wl_cipher_suites;
4199 wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); 4206 wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
@@ -4203,6 +4210,9 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
4203 wiphy->mgmt_stypes = brcmf_txrx_stypes; 4210 wiphy->mgmt_stypes = brcmf_txrx_stypes;
4204 wiphy->max_remain_on_channel_duration = 5000; 4211 wiphy->max_remain_on_channel_duration = 5000;
4205 brcmf_wiphy_pno_params(wiphy); 4212 brcmf_wiphy_pno_params(wiphy);
4213 brcmf_dbg(INFO, "Registering custom regulatory\n");
4214 wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
4215 wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
4206 err = wiphy_register(wiphy); 4216 err = wiphy_register(wiphy);
4207 if (err < 0) { 4217 if (err < 0) {
4208 brcmf_err("Could not register wiphy device (%d)\n", err); 4218 brcmf_err("Could not register wiphy device (%d)\n", err);
@@ -4386,9 +4396,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
4386 struct ieee80211_channel *notify_channel = NULL; 4396 struct ieee80211_channel *notify_channel = NULL;
4387 struct ieee80211_supported_band *band; 4397 struct ieee80211_supported_band *band;
4388 struct brcmf_bss_info_le *bi; 4398 struct brcmf_bss_info_le *bi;
4399 struct brcmu_chan ch;
4389 u32 freq; 4400 u32 freq;
4390 s32 err = 0; 4401 s32 err = 0;
4391 u32 target_channel;
4392 u8 *buf; 4402 u8 *buf;
4393 4403
4394 brcmf_dbg(TRACE, "Enter\n"); 4404 brcmf_dbg(TRACE, "Enter\n");
@@ -4412,15 +4422,15 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
4412 goto done; 4422 goto done;
4413 4423
4414 bi = (struct brcmf_bss_info_le *)(buf + 4); 4424 bi = (struct brcmf_bss_info_le *)(buf + 4);
4415 target_channel = bi->ctl_ch ? bi->ctl_ch : 4425 ch.chspec = le16_to_cpu(bi->chanspec);
4416 CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec)); 4426 cfg->d11inf.decchspec(&ch);
4417 4427
4418 if (target_channel <= CH_MAX_2G_CHANNEL) 4428 if (ch.band == BRCMU_CHAN_BAND_2G)
4419 band = wiphy->bands[IEEE80211_BAND_2GHZ]; 4429 band = wiphy->bands[IEEE80211_BAND_2GHZ];
4420 else 4430 else
4421 band = wiphy->bands[IEEE80211_BAND_5GHZ]; 4431 band = wiphy->bands[IEEE80211_BAND_5GHZ];
4422 4432
4423 freq = ieee80211_channel_to_frequency(target_channel, band->band); 4433 freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
4424 notify_channel = ieee80211_get_channel(wiphy, freq); 4434 notify_channel = ieee80211_get_channel(wiphy, freq);
4425 4435
4426done: 4436done:
@@ -4621,9 +4631,11 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
4621 4631
4622 ifp->vif = vif; 4632 ifp->vif = vif;
4623 vif->ifp = ifp; 4633 vif->ifp = ifp;
4624 vif->wdev.netdev = ifp->ndev; 4634 if (ifp->ndev) {
4625 ifp->ndev->ieee80211_ptr = &vif->wdev; 4635 vif->wdev.netdev = ifp->ndev;
4626 SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy)); 4636 ifp->ndev->ieee80211_ptr = &vif->wdev;
4637 SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
4638 }
4627 mutex_unlock(&event->vif_event_lock); 4639 mutex_unlock(&event->vif_event_lock);
4628 wake_up(&event->vif_wq); 4640 wake_up(&event->vif_wq);
4629 return 0; 4641 return 0;
@@ -4772,6 +4784,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
4772 struct brcmf_cfg80211_vif *vif; 4784 struct brcmf_cfg80211_vif *vif;
4773 struct brcmf_if *ifp; 4785 struct brcmf_if *ifp;
4774 s32 err = 0; 4786 s32 err = 0;
4787 s32 io_type;
4775 4788
4776 if (!ndev) { 4789 if (!ndev) {
4777 brcmf_err("ndev is invalid\n"); 4790 brcmf_err("ndev is invalid\n");
@@ -4812,6 +4825,21 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
4812 brcmf_err("P2P initilisation failed (%d)\n", err); 4825 brcmf_err("P2P initilisation failed (%d)\n", err);
4813 goto cfg80211_p2p_attach_out; 4826 goto cfg80211_p2p_attach_out;
4814 } 4827 }
4828 err = brcmf_btcoex_attach(cfg);
4829 if (err) {
4830 brcmf_err("BT-coex initialisation failed (%d)\n", err);
4831 brcmf_p2p_detach(&cfg->p2p);
4832 goto cfg80211_p2p_attach_out;
4833 }
4834
4835 err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_VERSION,
4836 &io_type);
4837 if (err) {
4838 brcmf_err("Failed to get D11 version (%d)\n", err);
4839 goto cfg80211_p2p_attach_out;
4840 }
4841 cfg->d11inf.io_type = (u8)io_type;
4842 brcmu_d11_attach(&cfg->d11inf);
4815 4843
4816 return cfg; 4844 return cfg;
4817 4845
@@ -4830,6 +4858,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
4830 struct brcmf_cfg80211_vif *tmp; 4858 struct brcmf_cfg80211_vif *tmp;
4831 4859
4832 wl_deinit_priv(cfg); 4860 wl_deinit_priv(cfg);
4861 brcmf_btcoex_detach(cfg);
4833 list_for_each_entry_safe(vif, tmp, &cfg->vif_list, list) { 4862 list_for_each_entry_safe(vif, tmp, &cfg->vif_list, list) {
4834 brcmf_free_vif(vif); 4863 brcmf_free_vif(vif);
4835 } 4864 }
@@ -4926,34 +4955,234 @@ dongle_scantime_out:
4926 return err; 4955 return err;
4927} 4956}
4928 4957
4929static s32 wl_update_wiphybands(struct brcmf_cfg80211_info *cfg) 4958
4959static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
4960{
4961 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
4962 struct ieee80211_channel *band_chan_arr;
4963 struct brcmf_chanspec_list *list;
4964 struct brcmu_chan ch;
4965 s32 err;
4966 u8 *pbuf;
4967 u32 i, j;
4968 u32 total;
4969 enum ieee80211_band band;
4970 u32 channel;
4971 u32 *n_cnt;
4972 bool ht40_allowed;
4973 u32 index;
4974 u32 ht40_flag;
4975 bool update;
4976 u32 array_size;
4977
4978 pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
4979
4980 if (pbuf == NULL)
4981 return -ENOMEM;
4982
4983 list = (struct brcmf_chanspec_list *)pbuf;
4984
4985 err = brcmf_fil_iovar_data_get(ifp, "chanspecs", pbuf,
4986 BRCMF_DCMD_MEDLEN);
4987 if (err) {
4988 brcmf_err("get chanspecs error (%d)\n", err);
4989 goto exit;
4990 }
4991
4992 __wl_band_2ghz.n_channels = 0;
4993 __wl_band_5ghz_a.n_channels = 0;
4994
4995 total = le32_to_cpu(list->count);
4996 for (i = 0; i < total; i++) {
4997 ch.chspec = (u16)le32_to_cpu(list->element[i]);
4998 cfg->d11inf.decchspec(&ch);
4999
5000 if (ch.band == BRCMU_CHAN_BAND_2G) {
5001 band_chan_arr = __wl_2ghz_channels;
5002 array_size = ARRAY_SIZE(__wl_2ghz_channels);
5003 n_cnt = &__wl_band_2ghz.n_channels;
5004 band = IEEE80211_BAND_2GHZ;
5005 ht40_allowed = (bw_cap == WLC_N_BW_40ALL);
5006 } else if (ch.band == BRCMU_CHAN_BAND_5G) {
5007 band_chan_arr = __wl_5ghz_a_channels;
5008 array_size = ARRAY_SIZE(__wl_5ghz_a_channels);
5009 n_cnt = &__wl_band_5ghz_a.n_channels;
5010 band = IEEE80211_BAND_5GHZ;
5011 ht40_allowed = !(bw_cap == WLC_N_BW_20ALL);
5012 } else {
5013 brcmf_err("Invalid channel Sepc. 0x%x.\n", ch.chspec);
5014 continue;
5015 }
5016 if (!ht40_allowed && ch.bw == BRCMU_CHAN_BW_40)
5017 continue;
5018 update = false;
5019 for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
5020 if (band_chan_arr[j].hw_value == ch.chnum) {
5021 update = true;
5022 break;
5023 }
5024 }
5025 if (update)
5026 index = j;
5027 else
5028 index = *n_cnt;
5029 if (index < array_size) {
5030 band_chan_arr[index].center_freq =
5031 ieee80211_channel_to_frequency(ch.chnum, band);
5032 band_chan_arr[index].hw_value = ch.chnum;
5033
5034 if (ch.bw == BRCMU_CHAN_BW_40 && ht40_allowed) {
5035 /* assuming the order is HT20, HT40 Upper,
5036 * HT40 lower from chanspecs
5037 */
5038 ht40_flag = band_chan_arr[index].flags &
5039 IEEE80211_CHAN_NO_HT40;
5040 if (ch.sb == BRCMU_CHAN_SB_U) {
5041 if (ht40_flag == IEEE80211_CHAN_NO_HT40)
5042 band_chan_arr[index].flags &=
5043 ~IEEE80211_CHAN_NO_HT40;
5044 band_chan_arr[index].flags |=
5045 IEEE80211_CHAN_NO_HT40PLUS;
5046 } else {
5047 /* It should be one of
5048 * IEEE80211_CHAN_NO_HT40 or
5049 * IEEE80211_CHAN_NO_HT40PLUS
5050 */
5051 band_chan_arr[index].flags &=
5052 ~IEEE80211_CHAN_NO_HT40;
5053 if (ht40_flag == IEEE80211_CHAN_NO_HT40)
5054 band_chan_arr[index].flags |=
5055 IEEE80211_CHAN_NO_HT40MINUS;
5056 }
5057 } else {
5058 band_chan_arr[index].flags =
5059 IEEE80211_CHAN_NO_HT40;
5060 ch.bw = BRCMU_CHAN_BW_20;
5061 cfg->d11inf.encchspec(&ch);
5062 channel = ch.chspec;
5063 err = brcmf_fil_bsscfg_int_get(ifp,
5064 "per_chan_info",
5065 &channel);
5066 if (!err) {
5067 if (channel & WL_CHAN_RADAR)
5068 band_chan_arr[index].flags |=
5069 (IEEE80211_CHAN_RADAR |
5070 IEEE80211_CHAN_NO_IBSS);
5071 if (channel & WL_CHAN_PASSIVE)
5072 band_chan_arr[index].flags |=
5073 IEEE80211_CHAN_PASSIVE_SCAN;
5074 }
5075 }
5076 if (!update)
5077 (*n_cnt)++;
5078 }
5079 }
5080exit:
5081 kfree(pbuf);
5082 return err;
5083}
5084
5085
5086static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
4930{ 5087{
4931 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); 5088 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
4932 struct wiphy *wiphy; 5089 struct wiphy *wiphy;
4933 s32 phy_list; 5090 s32 phy_list;
5091 u32 band_list[3];
5092 u32 nmode;
5093 u32 bw_cap = 0;
4934 s8 phy; 5094 s8 phy;
4935 s32 err = 0; 5095 s32 err;
5096 u32 nband;
5097 s32 i;
5098 struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS];
5099 s32 index;
4936 5100
4937 err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_PHYLIST, 5101 err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_PHYLIST,
4938 &phy_list, sizeof(phy_list)); 5102 &phy_list, sizeof(phy_list));
4939 if (err) { 5103 if (err) {
4940 brcmf_err("error (%d)\n", err); 5104 brcmf_err("BRCMF_C_GET_PHYLIST error (%d)\n", err);
4941 return err; 5105 return err;
4942 } 5106 }
4943 5107
4944 phy = ((char *)&phy_list)[0]; 5108 phy = ((char *)&phy_list)[0];
4945 brcmf_dbg(INFO, "%c phy\n", phy); 5109 brcmf_dbg(INFO, "BRCMF_C_GET_PHYLIST reported: %c phy\n", phy);
4946 if (phy == 'n' || phy == 'a') { 5110
4947 wiphy = cfg_to_wiphy(cfg); 5111
4948 wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n; 5112 err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST,
5113 &band_list, sizeof(band_list));
5114 if (err) {
5115 brcmf_err("BRCMF_C_GET_BANDLIST error (%d)\n", err);
5116 return err;
5117 }
5118 brcmf_dbg(INFO, "BRCMF_C_GET_BANDLIST reported: 0x%08x 0x%08x 0x%08x phy\n",
5119 band_list[0], band_list[1], band_list[2]);
5120
5121 err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode);
5122 if (err) {
5123 brcmf_err("nmode error (%d)\n", err);
5124 } else {
5125 err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &bw_cap);
5126 if (err)
5127 brcmf_err("mimo_bw_cap error (%d)\n", err);
4949 } 5128 }
5129 brcmf_dbg(INFO, "nmode=%d, mimo_bw_cap=%d\n", nmode, bw_cap);
5130
5131 err = brcmf_construct_reginfo(cfg, bw_cap);
5132 if (err) {
5133 brcmf_err("brcmf_construct_reginfo failed (%d)\n", err);
5134 return err;
5135 }
5136
5137 nband = band_list[0];
5138 memset(bands, 0, sizeof(bands));
5139
5140 for (i = 1; i <= nband && i < ARRAY_SIZE(band_list); i++) {
5141 index = -1;
5142 if ((band_list[i] == WLC_BAND_5G) &&
5143 (__wl_band_5ghz_a.n_channels > 0)) {
5144 index = IEEE80211_BAND_5GHZ;
5145 bands[index] = &__wl_band_5ghz_a;
5146 if ((bw_cap == WLC_N_BW_40ALL) ||
5147 (bw_cap == WLC_N_BW_20IN2G_40IN5G))
5148 bands[index]->ht_cap.cap |=
5149 IEEE80211_HT_CAP_SGI_40;
5150 } else if ((band_list[i] == WLC_BAND_2G) &&
5151 (__wl_band_2ghz.n_channels > 0)) {
5152 index = IEEE80211_BAND_2GHZ;
5153 bands[index] = &__wl_band_2ghz;
5154 if (bw_cap == WLC_N_BW_40ALL)
5155 bands[index]->ht_cap.cap |=
5156 IEEE80211_HT_CAP_SGI_40;
5157 }
5158
5159 if ((index >= 0) && nmode) {
5160 bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
5161 bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
5162 bands[index]->ht_cap.ht_supported = true;
5163 bands[index]->ht_cap.ampdu_factor =
5164 IEEE80211_HT_MAX_AMPDU_64K;
5165 bands[index]->ht_cap.ampdu_density =
5166 IEEE80211_HT_MPDU_DENSITY_16;
5167 /* An HT shall support all EQM rates for one spatial
5168 * stream
5169 */
5170 bands[index]->ht_cap.mcs.rx_mask[0] = 0xff;
5171 }
5172 }
5173
5174 wiphy = cfg_to_wiphy(cfg);
5175 wiphy->bands[IEEE80211_BAND_2GHZ] = bands[IEEE80211_BAND_2GHZ];
5176 wiphy->bands[IEEE80211_BAND_5GHZ] = bands[IEEE80211_BAND_5GHZ];
5177 wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
4950 5178
4951 return err; 5179 return err;
4952} 5180}
4953 5181
5182
4954static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_info *cfg) 5183static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_info *cfg)
4955{ 5184{
4956 return wl_update_wiphybands(cfg); 5185 return brcmf_update_wiphybands(cfg);
4957} 5186}
4958 5187
4959static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) 5188static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
@@ -5059,6 +5288,13 @@ s32 brcmf_cfg80211_down(struct net_device *ndev)
5059 return err; 5288 return err;
5060} 5289}
5061 5290
5291enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp)
5292{
5293 struct wireless_dev *wdev = &ifp->vif->wdev;
5294
5295 return wdev->iftype;
5296}
5297
5062u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state) 5298u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state)
5063{ 5299{
5064 struct brcmf_cfg80211_vif *vif; 5300 struct brcmf_cfg80211_vif *vif;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index 8b5d4989906c..a71cff84cdcf 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -17,6 +17,9 @@
17#ifndef _wl_cfg80211_h_ 17#ifndef _wl_cfg80211_h_
18#define _wl_cfg80211_h_ 18#define _wl_cfg80211_h_
19 19
20/* for brcmu_d11inf */
21#include <brcmu_d11.h>
22
20#define WL_NUM_SCAN_MAX 10 23#define WL_NUM_SCAN_MAX 10
21#define WL_NUM_PMKIDS_MAX MAXPMKID 24#define WL_NUM_PMKIDS_MAX MAXPMKID
22#define WL_TLV_INFO_MAX 1024 25#define WL_TLV_INFO_MAX 1024
@@ -74,14 +77,16 @@
74 77
75 78
76/** 79/**
77 * enum brcmf_scan_status - dongle scan status 80 * enum brcmf_scan_status - scan engine status
78 * 81 *
79 * @BRCMF_SCAN_STATUS_BUSY: scanning in progress on dongle. 82 * @BRCMF_SCAN_STATUS_BUSY: scanning in progress on dongle.
80 * @BRCMF_SCAN_STATUS_ABORT: scan being aborted on dongle. 83 * @BRCMF_SCAN_STATUS_ABORT: scan being aborted on dongle.
84 * @BRCMF_SCAN_STATUS_SUPPRESS: scanning is suppressed in driver.
81 */ 85 */
82enum brcmf_scan_status { 86enum brcmf_scan_status {
83 BRCMF_SCAN_STATUS_BUSY, 87 BRCMF_SCAN_STATUS_BUSY,
84 BRCMF_SCAN_STATUS_ABORT, 88 BRCMF_SCAN_STATUS_ABORT,
89 BRCMF_SCAN_STATUS_SUPPRESS,
85}; 90};
86 91
87/** 92/**
@@ -238,9 +243,8 @@ struct escan_info {
238 u32 escan_state; 243 u32 escan_state;
239 u8 escan_buf[WL_ESCAN_BUF_SIZE]; 244 u8 escan_buf[WL_ESCAN_BUF_SIZE];
240 struct wiphy *wiphy; 245 struct wiphy *wiphy;
241 struct net_device *ndev; 246 struct brcmf_if *ifp;
242 s32 (*run)(struct brcmf_cfg80211_info *cfg, 247 s32 (*run)(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp,
243 struct net_device *ndev,
244 struct cfg80211_scan_request *request, u16 action); 248 struct cfg80211_scan_request *request, u16 action);
245}; 249};
246 250
@@ -347,6 +351,7 @@ struct brcmf_cfg80211_vif_event {
347 * @wiphy: wiphy object for cfg80211 interface. 351 * @wiphy: wiphy object for cfg80211 interface.
348 * @conf: dongle configuration. 352 * @conf: dongle configuration.
349 * @p2p: peer-to-peer specific information. 353 * @p2p: peer-to-peer specific information.
354 * @btcoex: Bluetooth coexistence information.
350 * @scan_request: cfg80211 scan request object. 355 * @scan_request: cfg80211 scan request object.
351 * @usr_sync: mainly for dongle up/down synchronization. 356 * @usr_sync: mainly for dongle up/down synchronization.
352 * @bss_list: bss_list holding scanned ap information. 357 * @bss_list: bss_list holding scanned ap information.
@@ -380,6 +385,7 @@ struct brcmf_cfg80211_info {
380 struct wiphy *wiphy; 385 struct wiphy *wiphy;
381 struct brcmf_cfg80211_conf *conf; 386 struct brcmf_cfg80211_conf *conf;
382 struct brcmf_p2p_info p2p; 387 struct brcmf_p2p_info p2p;
388 struct brcmf_btcoex_info *btcoex;
383 struct cfg80211_scan_request *scan_request; 389 struct cfg80211_scan_request *scan_request;
384 struct mutex usr_sync; 390 struct mutex usr_sync;
385 struct brcmf_scan_results *bss_list; 391 struct brcmf_scan_results *bss_list;
@@ -409,6 +415,7 @@ struct brcmf_cfg80211_info {
409 u8 vif_cnt; 415 u8 vif_cnt;
410 struct brcmf_cfg80211_vif_event vif_event; 416 struct brcmf_cfg80211_vif_event vif_event;
411 struct completion vif_disabled; 417 struct completion vif_disabled;
418 struct brcmu_d11inf d11inf;
412}; 419};
413 420
414/** 421/**
@@ -475,6 +482,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
475void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg); 482void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
476s32 brcmf_cfg80211_up(struct net_device *ndev); 483s32 brcmf_cfg80211_up(struct net_device *ndev);
477s32 brcmf_cfg80211_down(struct net_device *ndev); 484s32 brcmf_cfg80211_down(struct net_device *ndev);
485enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
478 486
479struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, 487struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
480 enum nl80211_iftype type, 488 enum nl80211_iftype type,
@@ -485,7 +493,8 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
485 const u8 *vndr_ie_buf, u32 vndr_ie_len); 493 const u8 *vndr_ie_buf, u32 vndr_ie_len);
486s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif); 494s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
487struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key); 495struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key);
488u16 channel_to_chanspec(struct ieee80211_channel *ch); 496u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
497 struct ieee80211_channel *ch);
489u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state); 498u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state);
490void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg, 499void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
491 struct brcmf_cfg80211_vif *vif); 500 struct brcmf_cfg80211_vif *vif);
@@ -493,9 +502,9 @@ bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg);
493int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg, 502int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
494 u8 action, ulong timeout); 503 u8 action, ulong timeout);
495s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, 504s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
496 struct net_device *ndev, 505 struct brcmf_if *ifp, bool aborted,
497 bool aborted, bool fw_abort); 506 bool fw_abort);
498void brcmf_set_mpc(struct net_device *ndev, int mpc); 507void brcmf_set_mpc(struct brcmf_if *ndev, int mpc);
499void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg); 508void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
500 509
501#endif /* _wl_cfg80211_h_ */ 510#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
index d3d4151c3eda..32464acccd90 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
@@ -21,7 +21,7 @@ ccflags-y := \
21 -Idrivers/net/wireless/brcm80211/brcmsmac/phy \ 21 -Idrivers/net/wireless/brcm80211/brcmsmac/phy \
22 -Idrivers/net/wireless/brcm80211/include 22 -Idrivers/net/wireless/brcm80211/include
23 23
24BRCMSMAC_OFILES := \ 24brcmsmac-y := \
25 mac80211_if.o \ 25 mac80211_if.o \
26 ucode_loader.o \ 26 ucode_loader.o \
27 ampdu.o \ 27 ampdu.o \
@@ -43,7 +43,6 @@ BRCMSMAC_OFILES := \
43 brcms_trace_events.o \ 43 brcms_trace_events.o \
44 debug.o 44 debug.o
45 45
46MODULEPFX := brcmsmac 46brcmsmac-$(CONFIG_BCMA_DRIVER_GPIO) += led.o
47 47
48obj-$(CONFIG_BRCMSMAC) += $(MODULEPFX).o 48obj-$(CONFIG_BRCMSMAC) += brcmsmac.o
49$(MODULEPFX)-objs = $(BRCMSMAC_OFILES)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index f0888a9ee32e..e4fd1ee3d690 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -318,12 +318,6 @@
318#define IS_SIM(chippkg) \ 318#define IS_SIM(chippkg) \
319 ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID)) 319 ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
320 320
321#ifdef DEBUG
322#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
323#else
324#define SI_MSG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
325#endif /* DEBUG */
326
327#define GOODCOREADDR(x, b) \ 321#define GOODCOREADDR(x, b) \
328 (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \ 322 (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
329 IS_ALIGNED((x), SI_CORE_SIZE)) 323 IS_ALIGNED((x), SI_CORE_SIZE))
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 10ee314c4229..cc87926f5055 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -379,7 +379,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
379 u8 local_constraint_qdbm) 379 u8 local_constraint_qdbm)
380{ 380{
381 struct brcms_c_info *wlc = wlc_cm->wlc; 381 struct brcms_c_info *wlc = wlc_cm->wlc;
382 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel; 382 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.chandef.chan;
383 struct txpwr_limits txpwr; 383 struct txpwr_limits txpwr;
384 384
385 brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr); 385 brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
@@ -404,7 +404,7 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
404 struct txpwr_limits *txpwr) 404 struct txpwr_limits *txpwr)
405{ 405{
406 struct brcms_c_info *wlc = wlc_cm->wlc; 406 struct brcms_c_info *wlc = wlc_cm->wlc;
407 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel; 407 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.chandef.chan;
408 uint i; 408 uint i;
409 uint chan; 409 uint chan;
410 int maxpwr; 410 int maxpwr;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
index 3f659e09f1cc..9035cc4d6ff3 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/d11.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
@@ -457,6 +457,7 @@ struct d11regs {
457/*== maccontrol register ==*/ 457/*== maccontrol register ==*/
458#define MCTL_GMODE (1U << 31) 458#define MCTL_GMODE (1U << 31)
459#define MCTL_DISCARD_PMQ (1 << 30) 459#define MCTL_DISCARD_PMQ (1 << 30)
460#define MCTL_TBTTHOLD (1 << 28)
460#define MCTL_WAKE (1 << 26) 461#define MCTL_WAKE (1 << 26)
461#define MCTL_HPS (1 << 25) 462#define MCTL_HPS (1 << 25)
462#define MCTL_PROMISC (1 << 24) 463#define MCTL_PROMISC (1 << 24)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/led.c b/drivers/net/wireless/brcm80211/brcmsmac/led.c
new file mode 100644
index 000000000000..74b17cecb189
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/led.c
@@ -0,0 +1,126 @@
1#include <net/mac80211.h>
2#include <linux/bcma/bcma_driver_chipcommon.h>
3#include <linux/gpio.h>
4
5#include "mac80211_if.h"
6#include "pub.h"
7#include "main.h"
8#include "led.h"
9
10 /* number of leds */
11#define BRCMS_LED_NO 4
12 /* behavior mask */
13#define BRCMS_LED_BEH_MASK 0x7f
14 /* activelow (polarity) bit */
15#define BRCMS_LED_AL_MASK 0x80
16 /* radio enabled */
17#define BRCMS_LED_RADIO 3
18
19static void brcms_radio_led_ctrl(struct brcms_info *wl, bool state)
20{
21 if (wl->radio_led.gpio == -1)
22 return;
23
24 if (wl->radio_led.active_low)
25 state = !state;
26
27 if (state)
28 gpio_set_value(wl->radio_led.gpio, 1);
29 else
30 gpio_set_value(wl->radio_led.gpio, 0);
31}
32
33
34/* Callback from the LED subsystem. */
35static void brcms_led_brightness_set(struct led_classdev *led_dev,
36 enum led_brightness brightness)
37{
38 struct brcms_info *wl = container_of(led_dev,
39 struct brcms_info, led_dev);
40 brcms_radio_led_ctrl(wl, brightness);
41}
42
43void brcms_led_unregister(struct brcms_info *wl)
44{
45 if (wl->led_dev.dev)
46 led_classdev_unregister(&wl->led_dev);
47 if (wl->radio_led.gpio != -1)
48 gpio_free(wl->radio_led.gpio);
49}
50
51int brcms_led_register(struct brcms_info *wl)
52{
53 int i, err;
54 struct brcms_led *radio_led = &wl->radio_led;
55 /* get CC core */
56 struct bcma_drv_cc *cc_drv = &wl->wlc->hw->d11core->bus->drv_cc;
57 struct gpio_chip *bcma_gpio = &cc_drv->gpio;
58 struct ssb_sprom *sprom = &wl->wlc->hw->d11core->bus->sprom;
59 u8 *leds[] = { &sprom->gpio0,
60 &sprom->gpio1,
61 &sprom->gpio2,
62 &sprom->gpio3 };
63 unsigned gpio = -1;
64 bool active_low = false;
65
66 /* none by default */
67 radio_led->gpio = -1;
68 radio_led->active_low = false;
69
70 if (!bcma_gpio || !gpio_is_valid(bcma_gpio->base))
71 return -ENODEV;
72
73 /* find radio enabled LED */
74 for (i = 0; i < BRCMS_LED_NO; i++) {
75 u8 led = *leds[i];
76 if ((led & BRCMS_LED_BEH_MASK) == BRCMS_LED_RADIO) {
77 gpio = bcma_gpio->base + i;
78 if (led & BRCMS_LED_AL_MASK)
79 active_low = true;
80 break;
81 }
82 }
83
84 if (gpio == -1 || !gpio_is_valid(gpio))
85 return -ENODEV;
86
87 /* request and configure LED gpio */
88 err = gpio_request_one(gpio,
89 active_low ? GPIOF_OUT_INIT_HIGH
90 : GPIOF_OUT_INIT_LOW,
91 "radio on");
92 if (err) {
93 wiphy_err(wl->wiphy, "requesting led gpio %d failed (err: %d)\n",
94 gpio, err);
95 return err;
96 }
97 err = gpio_direction_output(gpio, 1);
98 if (err) {
99 wiphy_err(wl->wiphy, "cannot set led gpio %d to output (err: %d)\n",
100 gpio, err);
101 return err;
102 }
103
104 snprintf(wl->radio_led.name, sizeof(wl->radio_led.name),
105 "brcmsmac-%s:radio", wiphy_name(wl->wiphy));
106
107 wl->led_dev.name = wl->radio_led.name;
108 wl->led_dev.default_trigger =
109 ieee80211_get_radio_led_name(wl->pub->ieee_hw);
110 wl->led_dev.brightness_set = brcms_led_brightness_set;
111 err = led_classdev_register(wiphy_dev(wl->wiphy), &wl->led_dev);
112
113 if (err) {
114 wiphy_err(wl->wiphy, "cannot register led device: %s (err: %d)\n",
115 wl->radio_led.name, err);
116 return err;
117 }
118
119 wiphy_info(wl->wiphy, "registered radio enabled led device: %s gpio: %d\n",
120 wl->radio_led.name,
121 gpio);
122 radio_led->gpio = gpio;
123 radio_led->active_low = active_low;
124
125 return 0;
126}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/led.h b/drivers/net/wireless/brcm80211/brcmsmac/led.h
new file mode 100644
index 000000000000..17a0b1f5dbcf
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/led.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright (c) 2012 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _BRCM_LED_H_
18#define _BRCM_LED_H_
19struct brcms_led {
20 char name[32];
21 unsigned gpio;
22 bool active_low;
23};
24
25#ifdef CONFIG_BCMA_DRIVER_GPIO
26void brcms_led_unregister(struct brcms_info *wl);
27int brcms_led_register(struct brcms_info *wl);
28#else
29static inline void brcms_led_unregister(struct brcms_info *wl) {};
30static inline int brcms_led_register(struct brcms_info *wl)
31{
32 return -ENOTSUPP;
33};
34#endif
35
36#endif /* _BRCM_LED_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index e2340b231aa1..3a6544710c8a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2010 Broadcom Corporation 2 * Copyright (c) 2010 Broadcom Corporation
3 * Copyright (c) 2013 Hauke Mehrtens <hauke@hauke-m.de>
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -34,6 +35,7 @@
34#include "mac80211_if.h" 35#include "mac80211_if.h"
35#include "main.h" 36#include "main.h"
36#include "debug.h" 37#include "debug.h"
38#include "led.h"
37 39
38#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */ 40#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
39#define BRCMS_FLUSH_TIMEOUT 500 /* msec */ 41#define BRCMS_FLUSH_TIMEOUT 500 /* msec */
@@ -334,6 +336,7 @@ static void brcms_remove(struct bcma_device *pdev)
334 struct brcms_info *wl = hw->priv; 336 struct brcms_info *wl = hw->priv;
335 337
336 if (wl->wlc) { 338 if (wl->wlc) {
339 brcms_led_unregister(wl);
337 wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); 340 wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
338 wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); 341 wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
339 ieee80211_unregister_hw(hw); 342 ieee80211_unregister_hw(hw);
@@ -487,18 +490,26 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
487{ 490{
488 struct brcms_info *wl = hw->priv; 491 struct brcms_info *wl = hw->priv;
489 492
490 /* Just STA for now */ 493 /* Just STA, AP and ADHOC for now */
491 if (vif->type != NL80211_IFTYPE_STATION) { 494 if (vif->type != NL80211_IFTYPE_STATION &&
495 vif->type != NL80211_IFTYPE_AP &&
496 vif->type != NL80211_IFTYPE_ADHOC) {
492 brcms_err(wl->wlc->hw->d11core, 497 brcms_err(wl->wlc->hw->d11core,
493 "%s: Attempt to add type %d, only STA for now\n", 498 "%s: Attempt to add type %d, only STA, AP and AdHoc for now\n",
494 __func__, vif->type); 499 __func__, vif->type);
495 return -EOPNOTSUPP; 500 return -EOPNOTSUPP;
496 } 501 }
497 502
498 spin_lock_bh(&wl->lock); 503 spin_lock_bh(&wl->lock);
499 memcpy(wl->pub->cur_etheraddr, vif->addr, sizeof(vif->addr));
500 wl->mute_tx = false; 504 wl->mute_tx = false;
501 brcms_c_mute(wl->wlc, false); 505 brcms_c_mute(wl->wlc, false);
506 if (vif->type == NL80211_IFTYPE_STATION)
507 brcms_c_start_station(wl->wlc, vif->addr);
508 else if (vif->type == NL80211_IFTYPE_AP)
509 brcms_c_start_ap(wl->wlc, vif->addr, vif->bss_conf.bssid,
510 vif->bss_conf.ssid, vif->bss_conf.ssid_len);
511 else if (vif->type == NL80211_IFTYPE_ADHOC)
512 brcms_c_start_adhoc(wl->wlc, vif->addr);
502 spin_unlock_bh(&wl->lock); 513 spin_unlock_bh(&wl->lock);
503 514
504 return 0; 515 return 0;
@@ -546,10 +557,10 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
546 new_int); 557 new_int);
547 } 558 }
548 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 559 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
549 if (conf->channel_type == NL80211_CHAN_HT20 || 560 if (conf->chandef.width == NL80211_CHAN_WIDTH_20 ||
550 conf->channel_type == NL80211_CHAN_NO_HT) 561 conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
551 err = brcms_c_set_channel(wl->wlc, 562 err = brcms_c_set_channel(wl->wlc,
552 conf->channel->hw_value); 563 conf->chandef.chan->hw_value);
553 else 564 else
554 err = -ENOTSUPP; 565 err = -ENOTSUPP;
555 } 566 }
@@ -650,14 +661,43 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
650 brcms_c_set_addrmatch(wl->wlc, RCM_BSSID_OFFSET, info->bssid); 661 brcms_c_set_addrmatch(wl->wlc, RCM_BSSID_OFFSET, info->bssid);
651 spin_unlock_bh(&wl->lock); 662 spin_unlock_bh(&wl->lock);
652 } 663 }
653 if (changed & BSS_CHANGED_BEACON) 664 if (changed & BSS_CHANGED_SSID) {
665 /* BSSID changed, for whatever reason (IBSS and managed mode) */
666 spin_lock_bh(&wl->lock);
667 brcms_c_set_ssid(wl->wlc, info->ssid, info->ssid_len);
668 spin_unlock_bh(&wl->lock);
669 }
670 if (changed & BSS_CHANGED_BEACON) {
654 /* Beacon data changed, retrieve new beacon (beaconing modes) */ 671 /* Beacon data changed, retrieve new beacon (beaconing modes) */
655 brcms_err(core, "%s: beacon changed\n", __func__); 672 struct sk_buff *beacon;
673 u16 tim_offset = 0;
674
675 spin_lock_bh(&wl->lock);
676 beacon = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL);
677 brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset,
678 info->dtim_period);
679 spin_unlock_bh(&wl->lock);
680 }
681
682 if (changed & BSS_CHANGED_AP_PROBE_RESP) {
683 struct sk_buff *probe_resp;
684
685 spin_lock_bh(&wl->lock);
686 probe_resp = ieee80211_proberesp_get(hw, vif);
687 brcms_c_set_new_probe_resp(wl->wlc, probe_resp);
688 spin_unlock_bh(&wl->lock);
689 }
656 690
657 if (changed & BSS_CHANGED_BEACON_ENABLED) { 691 if (changed & BSS_CHANGED_BEACON_ENABLED) {
658 /* Beaconing should be enabled/disabled (beaconing modes) */ 692 /* Beaconing should be enabled/disabled (beaconing modes) */
659 brcms_err(core, "%s: Beacon enabled: %s\n", __func__, 693 brcms_err(core, "%s: Beacon enabled: %s\n", __func__,
660 info->enable_beacon ? "true" : "false"); 694 info->enable_beacon ? "true" : "false");
695 if (info->enable_beacon &&
696 hw->wiphy->flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) {
697 brcms_c_enable_probe_resp(wl->wlc, true);
698 } else {
699 brcms_c_enable_probe_resp(wl->wlc, false);
700 }
661 } 701 }
662 702
663 if (changed & BSS_CHANGED_CQM) { 703 if (changed & BSS_CHANGED_CQM) {
@@ -855,7 +895,7 @@ static bool brcms_tx_flush_completed(struct brcms_info *wl)
855 return result; 895 return result;
856} 896}
857 897
858static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop) 898static void brcms_ops_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
859{ 899{
860 struct brcms_info *wl = hw->priv; 900 struct brcms_info *wl = hw->priv;
861 int ret; 901 int ret;
@@ -870,6 +910,28 @@ static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
870 "ret=%d\n", jiffies_to_msecs(ret)); 910 "ret=%d\n", jiffies_to_msecs(ret));
871} 911}
872 912
913static u64 brcms_ops_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
914{
915 struct brcms_info *wl = hw->priv;
916 u64 tsf;
917
918 spin_lock_bh(&wl->lock);
919 tsf = brcms_c_tsf_get(wl->wlc);
920 spin_unlock_bh(&wl->lock);
921
922 return tsf;
923}
924
925static void brcms_ops_set_tsf(struct ieee80211_hw *hw,
926 struct ieee80211_vif *vif, u64 tsf)
927{
928 struct brcms_info *wl = hw->priv;
929
930 spin_lock_bh(&wl->lock);
931 brcms_c_tsf_set(wl->wlc, tsf);
932 spin_unlock_bh(&wl->lock);
933}
934
873static const struct ieee80211_ops brcms_ops = { 935static const struct ieee80211_ops brcms_ops = {
874 .tx = brcms_ops_tx, 936 .tx = brcms_ops_tx,
875 .start = brcms_ops_start, 937 .start = brcms_ops_start,
@@ -886,6 +948,8 @@ static const struct ieee80211_ops brcms_ops = {
886 .ampdu_action = brcms_ops_ampdu_action, 948 .ampdu_action = brcms_ops_ampdu_action,
887 .rfkill_poll = brcms_ops_rfkill_poll, 949 .rfkill_poll = brcms_ops_rfkill_poll,
888 .flush = brcms_ops_flush, 950 .flush = brcms_ops_flush,
951 .get_tsf = brcms_ops_get_tsf,
952 .set_tsf = brcms_ops_set_tsf,
889}; 953};
890 954
891void brcms_dpc(unsigned long data) 955void brcms_dpc(unsigned long data)
@@ -1004,7 +1068,16 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
1004 1068
1005 /* channel change time is dependent on chip and band */ 1069 /* channel change time is dependent on chip and band */
1006 hw->channel_change_time = 7 * 1000; 1070 hw->channel_change_time = 7 * 1000;
1007 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1071 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1072 BIT(NL80211_IFTYPE_AP) |
1073 BIT(NL80211_IFTYPE_ADHOC);
1074
1075 /*
1076 * deactivate sending probe responses by ucude, because this will
1077 * cause problems when WPS is used.
1078 *
1079 * hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
1080 */
1008 1081
1009 hw->rate_control_algorithm = "minstrel_ht"; 1082 hw->rate_control_algorithm = "minstrel_ht";
1010 1083
@@ -1151,6 +1224,8 @@ static int brcms_bcma_probe(struct bcma_device *pdev)
1151 pr_err("%s: brcms_attach failed!\n", __func__); 1224 pr_err("%s: brcms_attach failed!\n", __func__);
1152 return -ENODEV; 1225 return -ENODEV;
1153 } 1226 }
1227 brcms_led_register(wl);
1228
1154 return 0; 1229 return 0;
1155} 1230}
1156 1231
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 947ccacf43e6..4090032e81a2 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -20,8 +20,10 @@
20#include <linux/timer.h> 20#include <linux/timer.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/workqueue.h> 22#include <linux/workqueue.h>
23#include <linux/leds.h>
23 24
24#include "ucode_loader.h" 25#include "ucode_loader.h"
26#include "led.h"
25/* 27/*
26 * Starting index for 5G rates in the 28 * Starting index for 5G rates in the
27 * legacy rate table. 29 * legacy rate table.
@@ -81,6 +83,8 @@ struct brcms_info {
81 struct wiphy *wiphy; 83 struct wiphy *wiphy;
82 struct brcms_ucode ucode; 84 struct brcms_ucode ucode;
83 bool mute_tx; 85 bool mute_tx;
86 struct brcms_led radio_led;
87 struct led_classdev led_dev;
84}; 88};
85 89
86/* misc callbacks */ 90/* misc callbacks */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 8ef02dca8f8c..28e7aeedd184 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2010 Broadcom Corporation 2 * Copyright (c) 2010 Broadcom Corporation
3 * Copyright (c) 2013 Hauke Mehrtens <hauke@hauke-m.de>
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -448,6 +449,10 @@ static void brcms_c_detach_mfree(struct brcms_c_info *wlc)
448 kfree(wlc->corestate); 449 kfree(wlc->corestate);
449 kfree(wlc->hw->bandstate[0]); 450 kfree(wlc->hw->bandstate[0]);
450 kfree(wlc->hw); 451 kfree(wlc->hw);
452 if (wlc->beacon)
453 dev_kfree_skb_any(wlc->beacon);
454 if (wlc->probe_resp)
455 dev_kfree_skb_any(wlc->probe_resp);
451 456
452 /* free the wlc */ 457 /* free the wlc */
453 kfree(wlc); 458 kfree(wlc);
@@ -1069,7 +1074,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
1069 1074
1070static void brcms_c_tbtt(struct brcms_c_info *wlc) 1075static void brcms_c_tbtt(struct brcms_c_info *wlc)
1071{ 1076{
1072 if (!wlc->bsscfg->BSS) 1077 if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC)
1073 /* 1078 /*
1074 * DirFrmQ is now valid...defer setting until end 1079 * DirFrmQ is now valid...defer setting until end
1075 * of ATIM window 1080 * of ATIM window
@@ -2163,6 +2168,32 @@ void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
2163 } 2168 }
2164} 2169}
2165 2170
2171void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr)
2172{
2173 memcpy(wlc->pub->cur_etheraddr, addr, sizeof(wlc->pub->cur_etheraddr));
2174 wlc->bsscfg->type = BRCMS_TYPE_STATION;
2175}
2176
2177void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr, const u8 *bssid,
2178 u8 *ssid, size_t ssid_len)
2179{
2180 brcms_c_set_ssid(wlc, ssid, ssid_len);
2181
2182 memcpy(wlc->pub->cur_etheraddr, addr, sizeof(wlc->pub->cur_etheraddr));
2183 memcpy(wlc->bsscfg->BSSID, bssid, sizeof(wlc->bsscfg->BSSID));
2184 wlc->bsscfg->type = BRCMS_TYPE_AP;
2185
2186 brcms_b_mctrl(wlc->hw, MCTL_AP | MCTL_INFRA, MCTL_AP | MCTL_INFRA);
2187}
2188
2189void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr)
2190{
2191 memcpy(wlc->pub->cur_etheraddr, addr, sizeof(wlc->pub->cur_etheraddr));
2192 wlc->bsscfg->type = BRCMS_TYPE_ADHOC;
2193
2194 brcms_b_mctrl(wlc->hw, MCTL_AP | MCTL_INFRA, 0);
2195}
2196
2166/* Initialize GPIOs that are controlled by D11 core */ 2197/* Initialize GPIOs that are controlled by D11 core */
2167static void brcms_c_gpio_init(struct brcms_c_info *wlc) 2198static void brcms_c_gpio_init(struct brcms_c_info *wlc)
2168{ 2199{
@@ -3043,8 +3074,6 @@ static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail)
3043 */ 3074 */
3044static bool brcms_c_ps_allowed(struct brcms_c_info *wlc) 3075static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
3045{ 3076{
3046 struct brcms_bss_cfg *cfg = wlc->bsscfg;
3047
3048 /* disallow PS when one of the following global conditions meets */ 3077 /* disallow PS when one of the following global conditions meets */
3049 if (!wlc->pub->associated) 3078 if (!wlc->pub->associated)
3050 return false; 3079 return false;
@@ -3053,16 +3082,11 @@ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
3053 if (wlc->filter_flags & FIF_PROMISC_IN_BSS) 3082 if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
3054 return false; 3083 return false;
3055 3084
3056 if (cfg->associated) { 3085 if (wlc->bsscfg->type == BRCMS_TYPE_AP)
3057 /* 3086 return false;
3058 * disallow PS when one of the following
3059 * bsscfg specific conditions meets
3060 */
3061 if (!cfg->BSS)
3062 return false;
3063 3087
3088 if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC)
3064 return false; 3089 return false;
3065 }
3066 3090
3067 return true; 3091 return true;
3068} 3092}
@@ -3771,7 +3795,7 @@ static int brcms_c_set_mac(struct brcms_bss_cfg *bsscfg)
3771 struct brcms_c_info *wlc = bsscfg->wlc; 3795 struct brcms_c_info *wlc = bsscfg->wlc;
3772 3796
3773 /* enter the MAC addr into the RXE match registers */ 3797 /* enter the MAC addr into the RXE match registers */
3774 brcms_c_set_addrmatch(wlc, RCM_MAC_OFFSET, bsscfg->cur_etheraddr); 3798 brcms_c_set_addrmatch(wlc, RCM_MAC_OFFSET, wlc->pub->cur_etheraddr);
3775 3799
3776 brcms_c_ampdu_macaddr_upd(wlc); 3800 brcms_c_ampdu_macaddr_upd(wlc);
3777 3801
@@ -3787,6 +3811,15 @@ static void brcms_c_set_bssid(struct brcms_bss_cfg *bsscfg)
3787 brcms_c_set_addrmatch(bsscfg->wlc, RCM_BSSID_OFFSET, bsscfg->BSSID); 3811 brcms_c_set_addrmatch(bsscfg->wlc, RCM_BSSID_OFFSET, bsscfg->BSSID);
3788} 3812}
3789 3813
3814void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid, size_t ssid_len)
3815{
3816 u8 len = min_t(u8, sizeof(wlc->bsscfg->SSID), ssid_len);
3817 memset(wlc->bsscfg->SSID, 0, sizeof(wlc->bsscfg->SSID));
3818
3819 memcpy(wlc->bsscfg->SSID, ssid, len);
3820 wlc->bsscfg->SSID_len = len;
3821}
3822
3790static void brcms_b_set_shortslot(struct brcms_hardware *wlc_hw, bool shortslot) 3823static void brcms_b_set_shortslot(struct brcms_hardware *wlc_hw, bool shortslot)
3791{ 3824{
3792 wlc_hw->shortslot = shortslot; 3825 wlc_hw->shortslot = shortslot;
@@ -3821,7 +3854,7 @@ static void brcms_c_set_home_chanspec(struct brcms_c_info *wlc, u16 chanspec)
3821 if (wlc->home_chanspec != chanspec) { 3854 if (wlc->home_chanspec != chanspec) {
3822 wlc->home_chanspec = chanspec; 3855 wlc->home_chanspec = chanspec;
3823 3856
3824 if (wlc->bsscfg->associated) 3857 if (wlc->pub->associated)
3825 wlc->bsscfg->current_bss->chanspec = chanspec; 3858 wlc->bsscfg->current_bss->chanspec = chanspec;
3826 } 3859 }
3827} 3860}
@@ -4091,10 +4124,14 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
4091 *shm_entry++); 4124 *shm_entry++);
4092 } 4125 }
4093 4126
4094 if (suspend) { 4127 if (suspend)
4095 brcms_c_suspend_mac_and_wait(wlc); 4128 brcms_c_suspend_mac_and_wait(wlc);
4129
4130 brcms_c_update_beacon(wlc);
4131 brcms_c_update_probe_resp(wlc, false);
4132
4133 if (suspend)
4096 brcms_c_enable_mac(wlc); 4134 brcms_c_enable_mac(wlc);
4097 }
4098} 4135}
4099 4136
4100static void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend) 4137static void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
@@ -4332,7 +4369,6 @@ static void brcms_c_info_init(struct brcms_c_info *wlc, int unit)
4332 4369
4333 /* WME QoS mode is Auto by default */ 4370 /* WME QoS mode is Auto by default */
4334 wlc->pub->_ampdu = AMPDU_AGG_HOST; 4371 wlc->pub->_ampdu = AMPDU_AGG_HOST;
4335 wlc->pub->bcmerror = 0;
4336} 4372}
4337 4373
4338static uint brcms_c_attach_module(struct brcms_c_info *wlc) 4374static uint brcms_c_attach_module(struct brcms_c_info *wlc)
@@ -5072,8 +5108,8 @@ int brcms_c_up(struct brcms_c_info *wlc)
5072 struct brcms_bss_cfg *bsscfg = wlc->bsscfg; 5108 struct brcms_bss_cfg *bsscfg = wlc->bsscfg;
5073 mboolset(wlc->pub->radio_disabled, 5109 mboolset(wlc->pub->radio_disabled,
5074 WL_RADIO_HW_DISABLE); 5110 WL_RADIO_HW_DISABLE);
5075 5111 if (bsscfg->type == BRCMS_TYPE_STATION ||
5076 if (bsscfg->enable && bsscfg->BSS) 5112 bsscfg->type == BRCMS_TYPE_ADHOC)
5077 brcms_err(wlc->hw->d11core, 5113 brcms_err(wlc->hw->d11core,
5078 "wl%d: up: rfdisable -> " 5114 "wl%d: up: rfdisable -> "
5079 "bsscfg_disable()\n", 5115 "bsscfg_disable()\n",
@@ -5099,7 +5135,7 @@ int brcms_c_up(struct brcms_c_info *wlc)
5099 wlc->pub->up = true; 5135 wlc->pub->up = true;
5100 5136
5101 if (wlc->bandinit_pending) { 5137 if (wlc->bandinit_pending) {
5102 ch = wlc->pub->ieee_hw->conf.channel; 5138 ch = wlc->pub->ieee_hw->conf.chandef.chan;
5103 brcms_c_suspend_mac_and_wait(wlc); 5139 brcms_c_suspend_mac_and_wait(wlc);
5104 brcms_c_set_chanspec(wlc, ch20mhz_chspec(ch->hw_value)); 5140 brcms_c_set_chanspec(wlc, ch20mhz_chspec(ch->hw_value));
5105 wlc->bandinit_pending = false; 5141 wlc->bandinit_pending = false;
@@ -5434,7 +5470,7 @@ static void brcms_c_ofdm_rateset_war(struct brcms_c_info *wlc)
5434 u8 r; 5470 u8 r;
5435 bool war = false; 5471 bool war = false;
5436 5472
5437 if (wlc->bsscfg->associated) 5473 if (wlc->pub->associated)
5438 r = wlc->bsscfg->current_bss->rateset.rates[0]; 5474 r = wlc->bsscfg->current_bss->rateset.rates[0];
5439 else 5475 else
5440 r = wlc->default_bss->rateset.rates[0]; 5476 r = wlc->default_bss->rateset.rates[0];
@@ -5528,7 +5564,7 @@ int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs)
5528 /* merge rateset coming in with the current mcsset */ 5564 /* merge rateset coming in with the current mcsset */
5529 if (wlc->pub->_n_enab & SUPPORT_11N) { 5565 if (wlc->pub->_n_enab & SUPPORT_11N) {
5530 struct brcms_bss_info *mcsset_bss; 5566 struct brcms_bss_info *mcsset_bss;
5531 if (wlc->bsscfg->associated) 5567 if (wlc->pub->associated)
5532 mcsset_bss = wlc->bsscfg->current_bss; 5568 mcsset_bss = wlc->bsscfg->current_bss;
5533 else 5569 else
5534 mcsset_bss = wlc->default_bss; 5570 mcsset_bss = wlc->default_bss;
@@ -5543,12 +5579,36 @@ int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs)
5543 return bcmerror; 5579 return bcmerror;
5544} 5580}
5545 5581
5582static void brcms_c_time_lock(struct brcms_c_info *wlc)
5583{
5584 bcma_set32(wlc->hw->d11core, D11REGOFFS(maccontrol), MCTL_TBTTHOLD);
5585 /* Commit the write */
5586 bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol));
5587}
5588
5589static void brcms_c_time_unlock(struct brcms_c_info *wlc)
5590{
5591 bcma_mask32(wlc->hw->d11core, D11REGOFFS(maccontrol), ~MCTL_TBTTHOLD);
5592 /* Commit the write */
5593 bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol));
5594}
5595
5546int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period) 5596int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period)
5547{ 5597{
5598 u32 bcnint_us;
5599
5548 if (period == 0) 5600 if (period == 0)
5549 return -EINVAL; 5601 return -EINVAL;
5550 5602
5551 wlc->default_bss->beacon_period = period; 5603 wlc->default_bss->beacon_period = period;
5604
5605 bcnint_us = period << 10;
5606 brcms_c_time_lock(wlc);
5607 bcma_write32(wlc->hw->d11core, D11REGOFFS(tsf_cfprep),
5608 (bcnint_us << CFPREP_CBI_SHIFT));
5609 bcma_write32(wlc->hw->d11core, D11REGOFFS(tsf_cfpstart), bcnint_us);
5610 brcms_c_time_unlock(wlc);
5611
5552 return 0; 5612 return 0;
5553} 5613}
5554 5614
@@ -7291,72 +7351,110 @@ brcms_c_mod_prb_rsp_rate_table(struct brcms_c_info *wlc, uint frame_len)
7291 } 7351 }
7292} 7352}
7293 7353
7294/* Max buffering needed for beacon template/prb resp template is 142 bytes. 7354int brcms_c_get_header_len(void)
7295 *
7296 * PLCP header is 6 bytes.
7297 * 802.11 A3 header is 24 bytes.
7298 * Max beacon frame body template length is 112 bytes.
7299 * Max probe resp frame body template length is 110 bytes.
7300 *
7301 * *len on input contains the max length of the packet available.
7302 *
7303 * The *len value is set to the number of bytes in buf used, and starts
7304 * with the PLCP and included up to, but not including, the 4 byte FCS.
7305 */
7306static void
7307brcms_c_bcn_prb_template(struct brcms_c_info *wlc, u16 type,
7308 u32 bcn_rspec,
7309 struct brcms_bss_cfg *cfg, u16 *buf, int *len)
7310{ 7355{
7311 static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255}; 7356 return TXOFF;
7312 struct cck_phy_hdr *plcp; 7357}
7313 struct ieee80211_mgmt *h;
7314 int hdr_len, body_len;
7315
7316 hdr_len = D11_PHY_HDR_LEN + DOT11_MAC_HDR_LEN;
7317 7358
7318 /* calc buffer size provided for frame body */ 7359static void brcms_c_beacon_write(struct brcms_c_info *wlc,
7319 body_len = *len - hdr_len; 7360 struct sk_buff *beacon, u16 tim_offset,
7320 /* return actual size */ 7361 u16 dtim_period, bool bcn0, bool bcn1)
7321 *len = hdr_len + body_len; 7362{
7363 size_t len;
7364 struct ieee80211_tx_info *tx_info;
7365 struct brcms_hardware *wlc_hw = wlc->hw;
7366 struct ieee80211_hw *ieee_hw = brcms_c_pub(wlc)->ieee_hw;
7322 7367
7323 /* format PHY and MAC headers */ 7368 /* Get tx_info */
7324 memset(buf, 0, hdr_len); 7369 tx_info = IEEE80211_SKB_CB(beacon);
7325 7370
7326 plcp = (struct cck_phy_hdr *) buf; 7371 len = min_t(size_t, beacon->len, BCN_TMPL_LEN);
7372 wlc->bcn_rspec = ieee80211_get_tx_rate(ieee_hw, tx_info)->hw_value;
7327 7373
7328 /* 7374 brcms_c_compute_plcp(wlc, wlc->bcn_rspec,
7329 * PLCP for Probe Response frames are filled in from 7375 len + FCS_LEN - D11_PHY_HDR_LEN, beacon->data);
7330 * core's rate table
7331 */
7332 if (type == IEEE80211_STYPE_BEACON)
7333 /* fill in PLCP */
7334 brcms_c_compute_plcp(wlc, bcn_rspec,
7335 (DOT11_MAC_HDR_LEN + body_len + FCS_LEN),
7336 (u8 *) plcp);
7337 7376
7338 /* "Regular" and 16 MBSS but not for 4 MBSS */ 7377 /* "Regular" and 16 MBSS but not for 4 MBSS */
7339 /* Update the phytxctl for the beacon based on the rspec */ 7378 /* Update the phytxctl for the beacon based on the rspec */
7340 brcms_c_beacon_phytxctl_txant_upd(wlc, bcn_rspec); 7379 brcms_c_beacon_phytxctl_txant_upd(wlc, wlc->bcn_rspec);
7341 7380
7342 h = (struct ieee80211_mgmt *)&plcp[1]; 7381 if (bcn0) {
7382 /* write the probe response into the template region */
7383 brcms_b_write_template_ram(wlc_hw, T_BCN0_TPL_BASE,
7384 (len + 3) & ~3, beacon->data);
7343 7385
7344 /* fill in 802.11 header */ 7386 /* write beacon length to SCR */
7345 h->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | type); 7387 brcms_b_write_shm(wlc_hw, M_BCN0_FRM_BYTESZ, (u16) len);
7388 }
7389 if (bcn1) {
7390 /* write the probe response into the template region */
7391 brcms_b_write_template_ram(wlc_hw, T_BCN1_TPL_BASE,
7392 (len + 3) & ~3, beacon->data);
7346 7393
7347 /* DUR is 0 for multicast bcn, or filled in by MAC for prb resp */ 7394 /* write beacon length to SCR */
7348 /* A1 filled in by MAC for prb resp, broadcast for bcn */ 7395 brcms_b_write_shm(wlc_hw, M_BCN1_FRM_BYTESZ, (u16) len);
7349 if (type == IEEE80211_STYPE_BEACON) 7396 }
7350 memcpy(&h->da, &ether_bcast, ETH_ALEN);
7351 memcpy(&h->sa, &cfg->cur_etheraddr, ETH_ALEN);
7352 memcpy(&h->bssid, &cfg->BSSID, ETH_ALEN);
7353 7397
7354 /* SEQ filled in by MAC */ 7398 if (tim_offset != 0) {
7399 brcms_b_write_shm(wlc_hw, M_TIMBPOS_INBEACON,
7400 tim_offset + D11B_PHY_HDR_LEN);
7401 brcms_b_write_shm(wlc_hw, M_DOT11_DTIMPERIOD, dtim_period);
7402 } else {
7403 brcms_b_write_shm(wlc_hw, M_TIMBPOS_INBEACON,
7404 len + D11B_PHY_HDR_LEN);
7405 brcms_b_write_shm(wlc_hw, M_DOT11_DTIMPERIOD, 0);
7406 }
7355} 7407}
7356 7408
7357int brcms_c_get_header_len(void) 7409static void brcms_c_update_beacon_hw(struct brcms_c_info *wlc,
7410 struct sk_buff *beacon, u16 tim_offset,
7411 u16 dtim_period)
7358{ 7412{
7359 return TXOFF; 7413 struct brcms_hardware *wlc_hw = wlc->hw;
7414 struct bcma_device *core = wlc_hw->d11core;
7415
7416 /* Hardware beaconing for this config */
7417 u32 both_valid = MCMD_BCN0VLD | MCMD_BCN1VLD;
7418
7419 /* Check if both templates are in use, if so sched. an interrupt
7420 * that will call back into this routine
7421 */
7422 if ((bcma_read32(core, D11REGOFFS(maccommand)) & both_valid) == both_valid)
7423 /* clear any previous status */
7424 bcma_write32(core, D11REGOFFS(macintstatus), MI_BCNTPL);
7425
7426 if (wlc->beacon_template_virgin) {
7427 wlc->beacon_template_virgin = false;
7428 brcms_c_beacon_write(wlc, beacon, tim_offset, dtim_period, true,
7429 true);
7430 /* mark beacon0 valid */
7431 bcma_set32(core, D11REGOFFS(maccommand), MCMD_BCN0VLD);
7432 return;
7433 }
7434
7435 /* Check that after scheduling the interrupt both of the
7436 * templates are still busy. if not clear the int. & remask
7437 */
7438 if ((bcma_read32(core, D11REGOFFS(maccommand)) & both_valid) == both_valid) {
7439 wlc->defmacintmask |= MI_BCNTPL;
7440 return;
7441 }
7442
7443 if (!(bcma_read32(core, D11REGOFFS(maccommand)) & MCMD_BCN0VLD)) {
7444 brcms_c_beacon_write(wlc, beacon, tim_offset, dtim_period, true,
7445 false);
7446 /* mark beacon0 valid */
7447 bcma_set32(core, D11REGOFFS(maccommand), MCMD_BCN0VLD);
7448 return;
7449 }
7450 if (!(bcma_read32(core, D11REGOFFS(maccommand)) & MCMD_BCN1VLD)) {
7451 brcms_c_beacon_write(wlc, beacon, tim_offset, dtim_period,
7452 false, true);
7453 /* mark beacon0 valid */
7454 bcma_set32(core, D11REGOFFS(maccommand), MCMD_BCN1VLD);
7455 return;
7456 }
7457 return;
7360} 7458}
7361 7459
7362/* 7460/*
@@ -7366,9 +7464,57 @@ void brcms_c_update_beacon(struct brcms_c_info *wlc)
7366{ 7464{
7367 struct brcms_bss_cfg *bsscfg = wlc->bsscfg; 7465 struct brcms_bss_cfg *bsscfg = wlc->bsscfg;
7368 7466
7369 if (bsscfg->up && !bsscfg->BSS) 7467 if (wlc->pub->up && (bsscfg->type == BRCMS_TYPE_AP ||
7468 bsscfg->type == BRCMS_TYPE_ADHOC)) {
7370 /* Clear the soft intmask */ 7469 /* Clear the soft intmask */
7371 wlc->defmacintmask &= ~MI_BCNTPL; 7470 wlc->defmacintmask &= ~MI_BCNTPL;
7471 if (!wlc->beacon)
7472 return;
7473 brcms_c_update_beacon_hw(wlc, wlc->beacon,
7474 wlc->beacon_tim_offset,
7475 wlc->beacon_dtim_period);
7476 }
7477}
7478
7479void brcms_c_set_new_beacon(struct brcms_c_info *wlc, struct sk_buff *beacon,
7480 u16 tim_offset, u16 dtim_period)
7481{
7482 if (!beacon)
7483 return;
7484 if (wlc->beacon)
7485 dev_kfree_skb_any(wlc->beacon);
7486 wlc->beacon = beacon;
7487
7488 /* add PLCP */
7489 skb_push(wlc->beacon, D11_PHY_HDR_LEN);
7490 wlc->beacon_tim_offset = tim_offset;
7491 wlc->beacon_dtim_period = dtim_period;
7492 brcms_c_update_beacon(wlc);
7493}
7494
7495void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
7496 struct sk_buff *probe_resp)
7497{
7498 if (!probe_resp)
7499 return;
7500 if (wlc->probe_resp)
7501 dev_kfree_skb_any(wlc->probe_resp);
7502 wlc->probe_resp = probe_resp;
7503
7504 /* add PLCP */
7505 skb_push(wlc->probe_resp, D11_PHY_HDR_LEN);
7506 brcms_c_update_probe_resp(wlc, false);
7507}
7508
7509void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable)
7510{
7511 /*
7512 * prevent ucode from sending probe responses by setting the timeout
7513 * to 1, it can not send it in that time frame.
7514 */
7515 wlc->prb_resp_timeout = enable ? BRCMS_PRB_RESP_TIMEOUT : 1;
7516 brcms_b_write_shm(wlc->hw, M_PRS_MAXTIME, wlc->prb_resp_timeout);
7517 /* TODO: if (enable) => also deactivate receiving of probe request */
7372} 7518}
7373 7519
7374/* Write ssid into shared memory */ 7520/* Write ssid into shared memory */
@@ -7390,30 +7536,19 @@ brcms_c_shm_ssid_upd(struct brcms_c_info *wlc, struct brcms_bss_cfg *cfg)
7390static void 7536static void
7391brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc, 7537brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
7392 struct brcms_bss_cfg *cfg, 7538 struct brcms_bss_cfg *cfg,
7539 struct sk_buff *probe_resp,
7393 bool suspend) 7540 bool suspend)
7394{ 7541{
7395 u16 *prb_resp; 7542 int len;
7396 int len = BCN_TMPL_LEN;
7397
7398 prb_resp = kmalloc(BCN_TMPL_LEN, GFP_ATOMIC);
7399 if (!prb_resp)
7400 return;
7401
7402 /*
7403 * write the probe response to hardware, or save in
7404 * the config structure
7405 */
7406 7543
7407 /* create the probe response template */ 7544 len = min_t(size_t, probe_resp->len, BCN_TMPL_LEN);
7408 brcms_c_bcn_prb_template(wlc, IEEE80211_STYPE_PROBE_RESP, 0,
7409 cfg, prb_resp, &len);
7410 7545
7411 if (suspend) 7546 if (suspend)
7412 brcms_c_suspend_mac_and_wait(wlc); 7547 brcms_c_suspend_mac_and_wait(wlc);
7413 7548
7414 /* write the probe response into the template region */ 7549 /* write the probe response into the template region */
7415 brcms_b_write_template_ram(wlc->hw, T_PRS_TPL_BASE, 7550 brcms_b_write_template_ram(wlc->hw, T_PRS_TPL_BASE,
7416 (len + 3) & ~3, prb_resp); 7551 (len + 3) & ~3, probe_resp->data);
7417 7552
7418 /* write the length of the probe response frame (+PLCP/-FCS) */ 7553 /* write the length of the probe response frame (+PLCP/-FCS) */
7419 brcms_b_write_shm(wlc->hw, M_PRB_RESP_FRM_LEN, (u16) len); 7554 brcms_b_write_shm(wlc->hw, M_PRB_RESP_FRM_LEN, (u16) len);
@@ -7427,13 +7562,11 @@ brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
7427 * PLCP header for the call to brcms_c_mod_prb_rsp_rate_table() 7562 * PLCP header for the call to brcms_c_mod_prb_rsp_rate_table()
7428 * by subtracting the PLCP len and adding the FCS. 7563 * by subtracting the PLCP len and adding the FCS.
7429 */ 7564 */
7430 len += (-D11_PHY_HDR_LEN + FCS_LEN); 7565 brcms_c_mod_prb_rsp_rate_table(wlc,
7431 brcms_c_mod_prb_rsp_rate_table(wlc, (u16) len); 7566 (u16)len + FCS_LEN - D11_PHY_HDR_LEN);
7432 7567
7433 if (suspend) 7568 if (suspend)
7434 brcms_c_enable_mac(wlc); 7569 brcms_c_enable_mac(wlc);
7435
7436 kfree(prb_resp);
7437} 7570}
7438 7571
7439void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend) 7572void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend)
@@ -7441,8 +7574,13 @@ void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend)
7441 struct brcms_bss_cfg *bsscfg = wlc->bsscfg; 7574 struct brcms_bss_cfg *bsscfg = wlc->bsscfg;
7442 7575
7443 /* update AP or IBSS probe responses */ 7576 /* update AP or IBSS probe responses */
7444 if (bsscfg->up && !bsscfg->BSS) 7577 if (wlc->pub->up && (bsscfg->type == BRCMS_TYPE_AP ||
7445 brcms_c_bss_update_probe_resp(wlc, bsscfg, suspend); 7578 bsscfg->type == BRCMS_TYPE_ADHOC)) {
7579 if (!wlc->probe_resp)
7580 return;
7581 brcms_c_bss_update_probe_resp(wlc, bsscfg, wlc->probe_resp,
7582 suspend);
7583 }
7446} 7584}
7447 7585
7448int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo, 7586int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
@@ -7481,7 +7619,6 @@ void brcms_c_scan_stop(struct brcms_c_info *wlc)
7481void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state) 7619void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state)
7482{ 7620{
7483 wlc->pub->associated = state; 7621 wlc->pub->associated = state;
7484 wlc->bsscfg->associated = state;
7485} 7622}
7486 7623
7487/* 7624/*
@@ -7526,6 +7663,36 @@ void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
7526 brcms_c_bcn_li_upd(wlc); 7663 brcms_c_bcn_li_upd(wlc);
7527} 7664}
7528 7665
7666u64 brcms_c_tsf_get(struct brcms_c_info *wlc)
7667{
7668 u32 tsf_h, tsf_l;
7669 u64 tsf;
7670
7671 brcms_b_read_tsf(wlc->hw, &tsf_l, &tsf_h);
7672
7673 tsf = tsf_h;
7674 tsf <<= 32;
7675 tsf |= tsf_l;
7676
7677 return tsf;
7678}
7679
7680void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf)
7681{
7682 u32 tsf_h, tsf_l;
7683
7684 brcms_c_time_lock(wlc);
7685
7686 tsf_l = tsf;
7687 tsf_h = (tsf >> 32);
7688
7689 /* read the tsf timer low, then high to get an atomic read */
7690 bcma_write32(wlc->hw->d11core, D11REGOFFS(tsf_timerlow), tsf_l);
7691 bcma_write32(wlc->hw->d11core, D11REGOFFS(tsf_timerhigh), tsf_h);
7692
7693 brcms_c_time_unlock(wlc);
7694}
7695
7529int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr) 7696int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr)
7530{ 7697{
7531 uint qdbm; 7698 uint qdbm;
@@ -7737,6 +7904,10 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
7737 brcms_rfkill_set_hw_state(wlc->wl); 7904 brcms_rfkill_set_hw_state(wlc->wl);
7738 } 7905 }
7739 7906
7907 /* BCN template is available */
7908 if (macintstatus & MI_BCNTPL)
7909 brcms_c_update_beacon(wlc);
7910
7740 /* it isn't done and needs to be resched if macintstatus is non-zero */ 7911 /* it isn't done and needs to be resched if macintstatus is non-zero */
7741 return wlc->macintstatus != 0; 7912 return wlc->macintstatus != 0;
7742 7913
@@ -7748,7 +7919,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
7748void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx) 7919void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
7749{ 7920{
7750 struct bcma_device *core = wlc->hw->d11core; 7921 struct bcma_device *core = wlc->hw->d11core;
7751 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel; 7922 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.chandef.chan;
7752 u16 chanspec; 7923 u16 chanspec;
7753 7924
7754 brcms_dbg_info(core, "wl%d\n", wlc->pub->unit); 7925 brcms_dbg_info(core, "wl%d\n", wlc->pub->unit);
@@ -7765,7 +7936,7 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
7765 brcms_c_set_bssid(wlc->bsscfg); 7936 brcms_c_set_bssid(wlc->bsscfg);
7766 7937
7767 /* Update tsf_cfprep if associated and up */ 7938 /* Update tsf_cfprep if associated and up */
7768 if (wlc->pub->associated && wlc->bsscfg->up) { 7939 if (wlc->pub->associated && wlc->pub->up) {
7769 u32 bi; 7940 u32 bi;
7770 7941
7771 /* get beacon period and convert to uS */ 7942 /* get beacon period and convert to uS */
@@ -7810,9 +7981,14 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
7810 7981
7811 /* read the ucode version if we have not yet done so */ 7982 /* read the ucode version if we have not yet done so */
7812 if (wlc->ucode_rev == 0) { 7983 if (wlc->ucode_rev == 0) {
7813 wlc->ucode_rev = 7984 u16 rev;
7814 brcms_b_read_shm(wlc->hw, M_BOM_REV_MAJOR) << NBITS(u16); 7985 u16 patch;
7815 wlc->ucode_rev |= brcms_b_read_shm(wlc->hw, M_BOM_REV_MINOR); 7986
7987 rev = brcms_b_read_shm(wlc->hw, M_BOM_REV_MAJOR);
7988 patch = brcms_b_read_shm(wlc->hw, M_BOM_REV_MINOR);
7989 wlc->ucode_rev = (rev << NBITS(u16)) | patch;
7990 snprintf(wlc->wiphy->fw_version,
7991 sizeof(wlc->wiphy->fw_version), "%u.%u", rev, patch);
7816 } 7992 }
7817 7993
7818 /* ..now really unleash hell (allow the MAC out of suspend) */ 7994 /* ..now really unleash hell (allow the MAC out of suspend) */
@@ -7868,6 +8044,7 @@ brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
7868 pub->unit = unit; 8044 pub->unit = unit;
7869 pub->_piomode = piomode; 8045 pub->_piomode = piomode;
7870 wlc->bandinit_pending = false; 8046 wlc->bandinit_pending = false;
8047 wlc->beacon_template_virgin = true;
7871 8048
7872 /* populate struct brcms_c_info with default values */ 8049 /* populate struct brcms_c_info with default values */
7873 brcms_c_info_init(wlc, unit); 8050 brcms_c_info_init(wlc, unit);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index fb447747c2c6..b5d7a38b53fe 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -492,6 +492,8 @@ struct brcms_c_info {
492 bool radio_monitor; 492 bool radio_monitor;
493 bool going_down; 493 bool going_down;
494 494
495 bool beacon_template_virgin;
496
495 struct brcms_timer *wdtimer; 497 struct brcms_timer *wdtimer;
496 struct brcms_timer *radio_timer; 498 struct brcms_timer *radio_timer;
497 499
@@ -561,6 +563,11 @@ struct brcms_c_info {
561 563
562 struct wiphy *wiphy; 564 struct wiphy *wiphy;
563 struct scb pri_scb; 565 struct scb pri_scb;
566
567 struct sk_buff *beacon;
568 u16 beacon_tim_offset;
569 u16 beacon_dtim_period;
570 struct sk_buff *probe_resp;
564}; 571};
565 572
566/* antsel module specific state */ 573/* antsel module specific state */
@@ -576,14 +583,17 @@ struct antsel_info {
576 struct brcms_antselcfg antcfg_cur; /* current antenna config (auto) */ 583 struct brcms_antselcfg antcfg_cur; /* current antenna config (auto) */
577}; 584};
578 585
586enum brcms_bss_type {
587 BRCMS_TYPE_STATION,
588 BRCMS_TYPE_AP,
589 BRCMS_TYPE_ADHOC,
590};
591
579/* 592/*
580 * BSS configuration state 593 * BSS configuration state
581 * 594 *
582 * wlc: wlc to which this bsscfg belongs to. 595 * wlc: wlc to which this bsscfg belongs to.
583 * up: is this configuration up operational 596 * type: interface type
584 * enable: is this configuration enabled
585 * associated: is BSS in ASSOCIATED state
586 * BSS: infraustructure or adhoc
587 * SSID_len: the length of SSID 597 * SSID_len: the length of SSID
588 * SSID: SSID string 598 * SSID: SSID string
589 * 599 *
@@ -599,14 +609,10 @@ struct antsel_info {
599 */ 609 */
600struct brcms_bss_cfg { 610struct brcms_bss_cfg {
601 struct brcms_c_info *wlc; 611 struct brcms_c_info *wlc;
602 bool up; 612 enum brcms_bss_type type;
603 bool enable;
604 bool associated;
605 bool BSS;
606 u8 SSID_len; 613 u8 SSID_len;
607 u8 SSID[IEEE80211_MAX_SSID_LEN]; 614 u8 SSID[IEEE80211_MAX_SSID_LEN];
608 u8 BSSID[ETH_ALEN]; 615 u8 BSSID[ETH_ALEN];
609 u8 cur_etheraddr[ETH_ALEN];
610 struct brcms_bss_info *current_bss; 616 struct brcms_bss_info *current_bss;
611}; 617};
612 618
@@ -631,7 +637,6 @@ extern u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
631extern void brcms_c_inval_dma_pkts(struct brcms_hardware *hw, 637extern void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
632 struct ieee80211_sta *sta, 638 struct ieee80211_sta *sta,
633 void (*dma_callback_fn)); 639 void (*dma_callback_fn));
634extern void brcms_c_update_beacon(struct brcms_c_info *wlc);
635extern void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend); 640extern void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
636extern int brcms_c_set_nmode(struct brcms_c_info *wlc); 641extern int brcms_c_set_nmode(struct brcms_c_info *wlc);
637extern void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc, 642extern void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
index 91937c5025ce..b0fd807f2b2b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -198,8 +198,6 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
198 198
199void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val) 199void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
200{ 200{
201 struct si_info *sii = container_of(pi->sh->sih, struct si_info, pub);
202
203 if ((D11REV_GE(pi->sh->corerev, 24)) || 201 if ((D11REV_GE(pi->sh->corerev, 24)) ||
204 (D11REV_IS(pi->sh->corerev, 22) 202 (D11REV_IS(pi->sh->corerev, 22)
205 && (pi->pubpi.phy_type != PHY_TYPE_SSN))) { 203 && (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
@@ -211,7 +209,7 @@ void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
211 bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val); 209 bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val);
212 } 210 }
213 211
214 if ((sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) && 212 if ((pi->d11core->bus->hosttype == BCMA_HOSTTYPE_PCI) &&
215 (++pi->phy_wreg >= pi->phy_wreg_limit)) { 213 (++pi->phy_wreg >= pi->phy_wreg_limit)) {
216 (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); 214 (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
217 pi->phy_wreg = 0; 215 pi->phy_wreg = 0;
@@ -297,10 +295,8 @@ void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
297 if (addr == 0x72) 295 if (addr == 0x72)
298 (void)bcma_read16(pi->d11core, D11REGOFFS(phyregdata)); 296 (void)bcma_read16(pi->d11core, D11REGOFFS(phyregdata));
299#else 297#else
300 struct si_info *sii = container_of(pi->sh->sih, struct si_info, pub);
301
302 bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16)); 298 bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16));
303 if ((sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) && 299 if ((pi->d11core->bus->hosttype == BCMA_HOSTTYPE_PCI) &&
304 (++pi->phy_wreg >= pi->phy_wreg_limit)) { 300 (++pi->phy_wreg >= pi->phy_wreg_limit)) {
305 pi->phy_wreg = 0; 301 pi->phy_wreg = 0;
306 (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion)); 302 (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
@@ -374,7 +370,6 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
374 if (sh == NULL) 370 if (sh == NULL)
375 return NULL; 371 return NULL;
376 372
377 sh->sih = shp->sih;
378 sh->physhim = shp->physhim; 373 sh->physhim = shp->physhim;
379 sh->unit = shp->unit; 374 sh->unit = shp->unit;
380 sh->corerev = shp->corerev; 375 sh->corerev = shp->corerev;
@@ -2911,29 +2906,24 @@ void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode)
2911 mod_phy_reg(pi, 0x44c, (0x1 << 2), (1) << 2); 2906 mod_phy_reg(pi, 0x44c, (0x1 << 2), (1) << 2);
2912 2907
2913 } 2908 }
2914 ai_cc_reg(pi->sh->sih, 2909
2915 offsetof(struct chipcregs, gpiocontrol), 2910 bcma_chipco_gpio_control(&pi->d11core->bus->drv_cc,
2916 ~0x0, 0x0); 2911 0x0, 0x0);
2917 ai_cc_reg(pi->sh->sih, 2912 bcma_chipco_gpio_out(&pi->d11core->bus->drv_cc,
2918 offsetof(struct chipcregs, gpioout), 2913 ~0x40, 0x40);
2919 0x40, 0x40); 2914 bcma_chipco_gpio_outen(&pi->d11core->bus->drv_cc,
2920 ai_cc_reg(pi->sh->sih, 2915 ~0x40, 0x40);
2921 offsetof(struct chipcregs, gpioouten),
2922 0x40, 0x40);
2923 } else { 2916 } else {
2924 mod_phy_reg(pi, 0x44c, (0x1 << 2), (0) << 2); 2917 mod_phy_reg(pi, 0x44c, (0x1 << 2), (0) << 2);
2925 2918
2926 mod_phy_reg(pi, 0x44d, (0x1 << 2), (0) << 2); 2919 mod_phy_reg(pi, 0x44d, (0x1 << 2), (0) << 2);
2927 2920
2928 ai_cc_reg(pi->sh->sih, 2921 bcma_chipco_gpio_out(&pi->d11core->bus->drv_cc,
2929 offsetof(struct chipcregs, gpioout), 2922 ~0x40, 0x00);
2930 0x40, 0x00); 2923 bcma_chipco_gpio_outen(&pi->d11core->bus->drv_cc,
2931 ai_cc_reg(pi->sh->sih, 2924 ~0x40, 0x00);
2932 offsetof(struct chipcregs, gpioouten), 2925 bcma_chipco_gpio_control(&pi->d11core->bus->drv_cc,
2933 0x40, 0x0); 2926 0x0, 0x40);
2934 ai_cc_reg(pi->sh->sih,
2935 offsetof(struct chipcregs, gpiocontrol),
2936 ~0x0, 0x40);
2937 } 2927 }
2938 } 2928 }
2939} 2929}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
index af00e2c2b266..1dc767c31653 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
@@ -488,7 +488,6 @@ struct lcnphy_cal_results {
488struct shared_phy { 488struct shared_phy {
489 struct brcms_phy *phy_head; 489 struct brcms_phy *phy_head;
490 uint unit; 490 uint unit;
491 struct si_pub *sih;
492 struct phy_shim_info *physhim; 491 struct phy_shim_info *physhim;
493 uint corerev; 492 uint corerev;
494 u32 machwcap; 493 u32 machwcap;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index 18d37645e2cd..3d6b16ce4687 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1595,11 +1595,15 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
1595 if (channel == 1 || channel == 2 || channel == 3 || 1595 if (channel == 1 || channel == 2 || channel == 3 ||
1596 channel == 4 || channel == 9 || 1596 channel == 4 || channel == 9 ||
1597 channel == 10 || channel == 11 || channel == 12) { 1597 channel == 10 || channel == 11 || channel == 12) {
1598 si_pmu_pllcontrol(pi->sh->sih, 0x2, 0xffffffff, 0x03000c04); 1598 bcma_chipco_pll_write(&pi->d11core->bus->drv_cc, 0x2,
1599 si_pmu_pllcontrol(pi->sh->sih, 0x3, 0xffffff, 0x0); 1599 0x03000c04);
1600 si_pmu_pllcontrol(pi->sh->sih, 0x4, 0xffffffff, 0x200005c0); 1600 bcma_chipco_pll_maskset(&pi->d11core->bus->drv_cc, 0x3,
1601 1601 ~0x00ffffff, 0x0);
1602 si_pmu_pllupd(pi->sh->sih); 1602 bcma_chipco_pll_write(&pi->d11core->bus->drv_cc, 0x4,
1603 0x200005c0);
1604
1605 bcma_cc_set32(&pi->d11core->bus->drv_cc, BCMA_CC_PMU_CTL,
1606 BCMA_CC_PMU_CTL_PLL_UPD);
1603 write_phy_reg(pi, 0x942, 0); 1607 write_phy_reg(pi, 0x942, 0);
1604 wlc_lcnphy_txrx_spur_avoidance_mode(pi, false); 1608 wlc_lcnphy_txrx_spur_avoidance_mode(pi, false);
1605 pi_lcn->lcnphy_spurmod = false; 1609 pi_lcn->lcnphy_spurmod = false;
@@ -1607,11 +1611,15 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
1607 1611
1608 write_phy_reg(pi, 0x425, 0x5907); 1612 write_phy_reg(pi, 0x425, 0x5907);
1609 } else { 1613 } else {
1610 si_pmu_pllcontrol(pi->sh->sih, 0x2, 0xffffffff, 0x03140c04); 1614 bcma_chipco_pll_write(&pi->d11core->bus->drv_cc, 0x2,
1611 si_pmu_pllcontrol(pi->sh->sih, 0x3, 0xffffff, 0x333333); 1615 0x03140c04);
1612 si_pmu_pllcontrol(pi->sh->sih, 0x4, 0xffffffff, 0x202c2820); 1616 bcma_chipco_pll_maskset(&pi->d11core->bus->drv_cc, 0x3,
1613 1617 ~0x00ffffff, 0x333333);
1614 si_pmu_pllupd(pi->sh->sih); 1618 bcma_chipco_pll_write(&pi->d11core->bus->drv_cc, 0x4,
1619 0x202c2820);
1620
1621 bcma_cc_set32(&pi->d11core->bus->drv_cc, BCMA_CC_PMU_CTL,
1622 BCMA_CC_PMU_CTL_PLL_UPD);
1615 write_phy_reg(pi, 0x942, 0); 1623 write_phy_reg(pi, 0x942, 0);
1616 wlc_lcnphy_txrx_spur_avoidance_mode(pi, true); 1624 wlc_lcnphy_txrx_spur_avoidance_mode(pi, true);
1617 1625
@@ -4755,9 +4763,10 @@ void wlc_phy_init_lcnphy(struct brcms_phy *pi)
4755 4763
4756 wlc_phy_chanspec_set((struct brcms_phy_pub *) pi, pi->radio_chanspec); 4764 wlc_phy_chanspec_set((struct brcms_phy_pub *) pi, pi->radio_chanspec);
4757 4765
4758 si_pmu_regcontrol(pi->sh->sih, 0, 0xf, 0x9); 4766 bcma_chipco_regctl_maskset(&pi->d11core->bus->drv_cc, 0, ~0xf, 0x9);
4759 4767
4760 si_pmu_chipcontrol(pi->sh->sih, 0, 0xffffffff, 0x03CDDDDD); 4768 bcma_chipco_chipctl_maskset(&pi->d11core->bus->drv_cc, 0, 0x0,
4769 0x03CDDDDD);
4761 4770
4762 if ((pi->sh->boardflags & BFL_FEM) 4771 if ((pi->sh->boardflags & BFL_FEM)
4763 && wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) 4772 && wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
@@ -4968,7 +4977,7 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
4968 pi->hwpwrctrl_capable = true; 4977 pi->hwpwrctrl_capable = true;
4969 } 4978 }
4970 4979
4971 pi->xtalfreq = si_pmu_alp_clock(pi->sh->sih); 4980 pi->xtalfreq = bcma_chipco_get_alp_clock(&pi->d11core->bus->drv_cc);
4972 pi_lcn->lcnphy_papd_rxGnCtrl_init = 0; 4981 pi_lcn->lcnphy_papd_rxGnCtrl_init = 0;
4973 4982
4974 pi->pi_fptr.init = wlc_phy_init_lcnphy; 4983 pi->pi_fptr.init = wlc_phy_init_lcnphy;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index 65db9b7458dc..3e9f5b25be63 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -19321,14 +19321,13 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
19321 (pi->sh->chippkg == BCMA_PKG_ID_BCM4718))) { 19321 (pi->sh->chippkg == BCMA_PKG_ID_BCM4718))) {
19322 if ((pi->sh->boardflags & BFL_EXTLNA) && 19322 if ((pi->sh->boardflags & BFL_EXTLNA) &&
19323 (CHSPEC_IS2G(pi->radio_chanspec))) 19323 (CHSPEC_IS2G(pi->radio_chanspec)))
19324 ai_cc_reg(pi->sh->sih, 19324 bcma_cc_set32(&pi->d11core->bus->drv_cc,
19325 offsetof(struct chipcregs, chipcontrol), 19325 BCMA_CC_CHIPCTL, 0x40);
19326 0x40, 0x40);
19327 } 19326 }
19328 19327
19329 if ((!PHY_IPA(pi)) && (pi->sh->chip == BCMA_CHIP_ID_BCM5357)) 19328 if ((!PHY_IPA(pi)) && (pi->sh->chip == BCMA_CHIP_ID_BCM5357))
19330 si_pmu_chipcontrol(pi->sh->sih, 1, CCTRL5357_EXTPA, 19329 bcma_chipco_chipctl_maskset(&pi->d11core->bus->drv_cc, 1,
19331 CCTRL5357_EXTPA); 19330 ~CCTRL5357_EXTPA, CCTRL5357_EXTPA);
19332 19331
19333 if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) && 19332 if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) &&
19334 CHSPEC_IS40(pi->radio_chanspec)) { 19333 CHSPEC_IS40(pi->radio_chanspec)) {
@@ -21133,7 +21132,6 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
21133 const struct nphy_sfo_cfg *ci) 21132 const struct nphy_sfo_cfg *ci)
21134{ 21133{
21135 u16 val; 21134 u16 val;
21136 struct si_info *sii = container_of(pi->sh->sih, struct si_info, pub);
21137 21135
21138 val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand; 21136 val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
21139 if (CHSPEC_IS5G(chanspec) && !val) { 21137 if (CHSPEC_IS5G(chanspec) && !val) {
@@ -21221,11 +21219,11 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
21221 21219
21222 if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) || 21220 if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
21223 (pi->sh->chip == BCMA_CHIP_ID_BCM43225)) { 21221 (pi->sh->chip == BCMA_CHIP_ID_BCM43225)) {
21224 bcma_pmu_spuravoid_pllupdate(&sii->icbus->drv_cc, 21222 bcma_pmu_spuravoid_pllupdate(&pi->d11core->bus->drv_cc,
21225 spuravoid); 21223 spuravoid);
21226 } else { 21224 } else {
21227 wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false); 21225 wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
21228 bcma_pmu_spuravoid_pllupdate(&sii->icbus->drv_cc, 21226 bcma_pmu_spuravoid_pllupdate(&pi->d11core->bus->drv_cc,
21229 spuravoid); 21227 spuravoid);
21230 wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true); 21228 wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
21231 } 21229 }
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
index 7e9df566c733..71b80381f3ad 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
@@ -115,60 +115,6 @@ u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
115 return (u16) delay; 115 return (u16) delay;
116} 116}
117 117
118/* Read/write a chipcontrol reg */
119u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
120{
121 ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_addr), ~0, reg);
122 return ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_data),
123 mask, val);
124}
125
126/* Read/write a regcontrol reg */
127u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
128{
129 ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_addr), ~0, reg);
130 return ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_data),
131 mask, val);
132}
133
134/* Read/write a pllcontrol reg */
135u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
136{
137 ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_addr), ~0, reg);
138 return ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_data),
139 mask, val);
140}
141
142/* PMU PLL update */
143void si_pmu_pllupd(struct si_pub *sih)
144{
145 ai_cc_reg(sih, offsetof(struct chipcregs, pmucontrol),
146 PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
147}
148
149/* query alp/xtal clock frequency */
150u32 si_pmu_alp_clock(struct si_pub *sih)
151{
152 u32 clock = ALP_CLOCK;
153
154 /* bail out with default */
155 if (!(ai_get_cccaps(sih) & CC_CAP_PMU))
156 return clock;
157
158 switch (ai_get_chip_id(sih)) {
159 case BCMA_CHIP_ID_BCM43224:
160 case BCMA_CHIP_ID_BCM43225:
161 case BCMA_CHIP_ID_BCM4313:
162 /* always 20Mhz */
163 clock = 20000 * 1000;
164 break;
165 default:
166 break;
167 }
168
169 return clock;
170}
171
172u32 si_pmu_measure_alpclk(struct si_pub *sih) 118u32 si_pmu_measure_alpclk(struct si_pub *sih)
173{ 119{
174 struct si_info *sii = container_of(sih, struct si_info, pub); 120 struct si_info *sii = container_of(sih, struct si_info, pub);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index f7cff873578b..20e2012d5a3a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -21,12 +21,6 @@
21#include "types.h" 21#include "types.h"
22 22
23extern u16 si_pmu_fast_pwrup_delay(struct si_pub *sih); 23extern u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
24extern void si_pmu_sprom_enable(struct si_pub *sih, bool enable);
25extern u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
26extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
27extern u32 si_pmu_alp_clock(struct si_pub *sih);
28extern void si_pmu_pllupd(struct si_pub *sih);
29extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
30extern u32 si_pmu_measure_alpclk(struct si_pub *sih); 24extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
31 25
32#endif /* _BRCM_PMU_H_ */ 26#endif /* _BRCM_PMU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index b0f14b7b8616..d36ea5e1cc49 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -164,8 +164,6 @@ struct brcms_pub {
164 164
165 u8 cur_etheraddr[ETH_ALEN]; /* our local ethernet address */ 165 u8 cur_etheraddr[ETH_ALEN]; /* our local ethernet address */
166 166
167 int bcmerror; /* last bcm error */
168
169 u32 radio_disabled; /* bit vector for radio disabled reasons */ 167 u32 radio_disabled; /* bit vector for radio disabled reasons */
170 168
171 u16 boardrev; /* version # of particular board */ 169 u16 boardrev; /* version # of particular board */
@@ -326,10 +324,25 @@ extern void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
326 s8 sslot_override); 324 s8 sslot_override);
327extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, 325extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc,
328 u8 interval); 326 u8 interval);
327extern u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
328extern void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
329extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr); 329extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
330extern int brcms_c_get_tx_power(struct brcms_c_info *wlc); 330extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
331extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc); 331extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
332extern void brcms_c_mute(struct brcms_c_info *wlc, bool on); 332extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
333extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc); 333extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
334extern void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
335extern void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr,
336 const u8 *bssid, u8 *ssid, size_t ssid_len);
337extern void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
338extern void brcms_c_update_beacon(struct brcms_c_info *wlc);
339extern void brcms_c_set_new_beacon(struct brcms_c_info *wlc,
340 struct sk_buff *beacon, u16 tim_offset,
341 u16 dtim_period);
342extern void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
343 struct sk_buff *probe_resp);
344extern void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
345extern void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid,
346 size_t ssid_len);
334 347
335#endif /* _BRCM_PUB_H_ */ 348#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmutil/Makefile b/drivers/net/wireless/brcm80211/brcmutil/Makefile
index 6281c416289e..8a928184016a 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmutil/Makefile
@@ -19,10 +19,5 @@ ccflags-y := \
19 -Idrivers/net/wireless/brcm80211/brcmutil \ 19 -Idrivers/net/wireless/brcm80211/brcmutil \
20 -Idrivers/net/wireless/brcm80211/include 20 -Idrivers/net/wireless/brcm80211/include
21 21
22BRCMUTIL_OFILES := \ 22obj-$(CONFIG_BRCMUTIL) += brcmutil.o
23 utils.o 23brcmutil-objs = utils.o d11.o
24
25MODULEPFX := brcmutil
26
27obj-$(CONFIG_BRCMUTIL) += $(MODULEPFX).o
28$(MODULEPFX)-objs = $(BRCMUTIL_OFILES)
diff --git a/drivers/net/wireless/brcm80211/brcmutil/d11.c b/drivers/net/wireless/brcm80211/brcmutil/d11.c
new file mode 100644
index 000000000000..30e54e2c6c9b
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmutil/d11.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright (c) 2013 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16/*********************channel spec common functions*********************/
17
18#include <linux/module.h>
19
20#include <brcmu_utils.h>
21#include <brcmu_wifi.h>
22#include <brcmu_d11.h>
23
24static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
25{
26 ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK;
27
28 switch (ch->bw) {
29 case BRCMU_CHAN_BW_20:
30 ch->chspec |= BRCMU_CHSPEC_D11N_BW_20 | BRCMU_CHSPEC_D11N_SB_N;
31 break;
32 case BRCMU_CHAN_BW_40:
33 default:
34 WARN_ON_ONCE(1);
35 break;
36 }
37
38 if (ch->chnum <= CH_MAX_2G_CHANNEL)
39 ch->chspec |= BRCMU_CHSPEC_D11N_BND_2G;
40 else
41 ch->chspec |= BRCMU_CHSPEC_D11N_BND_5G;
42}
43
44static void brcmu_d11ac_encchspec(struct brcmu_chan *ch)
45{
46 ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK;
47
48 switch (ch->bw) {
49 case BRCMU_CHAN_BW_20:
50 ch->chspec |= BRCMU_CHSPEC_D11AC_BW_20;
51 break;
52 case BRCMU_CHAN_BW_40:
53 case BRCMU_CHAN_BW_80:
54 case BRCMU_CHAN_BW_80P80:
55 case BRCMU_CHAN_BW_160:
56 default:
57 WARN_ON_ONCE(1);
58 break;
59 }
60
61 if (ch->chnum <= CH_MAX_2G_CHANNEL)
62 ch->chspec |= BRCMU_CHSPEC_D11AC_BND_2G;
63 else
64 ch->chspec |= BRCMU_CHSPEC_D11AC_BND_5G;
65}
66
67static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
68{
69 u16 val;
70
71 ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK);
72
73 switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) {
74 case BRCMU_CHSPEC_D11N_BW_20:
75 ch->bw = BRCMU_CHAN_BW_20;
76 break;
77 case BRCMU_CHSPEC_D11N_BW_40:
78 ch->bw = BRCMU_CHAN_BW_40;
79 val = ch->chspec & BRCMU_CHSPEC_D11N_SB_MASK;
80 if (val == BRCMU_CHSPEC_D11N_SB_L) {
81 ch->sb = BRCMU_CHAN_SB_L;
82 ch->chnum -= CH_10MHZ_APART;
83 } else {
84 ch->sb = BRCMU_CHAN_SB_U;
85 ch->chnum += CH_10MHZ_APART;
86 }
87 break;
88 default:
89 WARN_ON_ONCE(1);
90 break;
91 }
92
93 switch (ch->chspec & BRCMU_CHSPEC_D11N_BND_MASK) {
94 case BRCMU_CHSPEC_D11N_BND_5G:
95 ch->band = BRCMU_CHAN_BAND_5G;
96 break;
97 case BRCMU_CHSPEC_D11N_BND_2G:
98 ch->band = BRCMU_CHAN_BAND_2G;
99 break;
100 default:
101 WARN_ON_ONCE(1);
102 break;
103 }
104}
105
106static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
107{
108 u16 val;
109
110 ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK);
111
112 switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) {
113 case BRCMU_CHSPEC_D11AC_BW_20:
114 ch->bw = BRCMU_CHAN_BW_20;
115 break;
116 case BRCMU_CHSPEC_D11AC_BW_40:
117 ch->bw = BRCMU_CHAN_BW_40;
118 val = ch->chspec & BRCMU_CHSPEC_D11AC_SB_MASK;
119 if (val == BRCMU_CHSPEC_D11AC_SB_L) {
120 ch->sb = BRCMU_CHAN_SB_L;
121 ch->chnum -= CH_10MHZ_APART;
122 } else if (val == BRCMU_CHSPEC_D11AC_SB_U) {
123 ch->sb = BRCMU_CHAN_SB_U;
124 ch->chnum += CH_10MHZ_APART;
125 } else {
126 WARN_ON_ONCE(1);
127 }
128 break;
129 case BRCMU_CHSPEC_D11AC_BW_80:
130 ch->bw = BRCMU_CHAN_BW_80;
131 break;
132 case BRCMU_CHSPEC_D11AC_BW_8080:
133 case BRCMU_CHSPEC_D11AC_BW_160:
134 default:
135 WARN_ON_ONCE(1);
136 break;
137 }
138
139 switch (ch->chspec & BRCMU_CHSPEC_D11AC_BND_MASK) {
140 case BRCMU_CHSPEC_D11AC_BND_5G:
141 ch->band = BRCMU_CHAN_BAND_5G;
142 break;
143 case BRCMU_CHSPEC_D11AC_BND_2G:
144 ch->band = BRCMU_CHAN_BAND_2G;
145 break;
146 default:
147 WARN_ON_ONCE(1);
148 break;
149 }
150}
151
152void brcmu_d11_attach(struct brcmu_d11inf *d11inf)
153{
154 if (d11inf->io_type == BRCMU_D11N_IOTYPE) {
155 d11inf->encchspec = brcmu_d11n_encchspec;
156 d11inf->decchspec = brcmu_d11n_decchspec;
157 } else {
158 d11inf->encchspec = brcmu_d11ac_encchspec;
159 d11inf->decchspec = brcmu_d11ac_decchspec;
160 }
161}
162EXPORT_SYMBOL(brcmu_d11_attach);
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index 3e6405e06ac0..0f7e1c7b6f58 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -45,17 +45,9 @@ void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
45{ 45{
46 if (!skb) 46 if (!skb)
47 return; 47 return;
48
48 WARN_ON(skb->next); 49 WARN_ON(skb->next);
49 if (skb->destructor) 50 dev_kfree_skb_any(skb);
50 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
51 * destructor exists
52 */
53 dev_kfree_skb_any(skb);
54 else
55 /* can free immediately (even in_irq()) if destructor
56 * does not exist
57 */
58 dev_kfree_skb(skb);
59} 51}
60EXPORT_SYMBOL(brcmu_pkt_buf_free_skb); 52EXPORT_SYMBOL(brcmu_pkt_buf_free_skb);
61 53
@@ -116,6 +108,31 @@ struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec)
116} 108}
117EXPORT_SYMBOL(brcmu_pktq_pdeq); 109EXPORT_SYMBOL(brcmu_pktq_pdeq);
118 110
111/*
112 * precedence based dequeue with match function. Passing a NULL pointer
113 * for the match function parameter is considered to be a wildcard so
114 * any packet on the queue is returned. In that case it is no different
115 * from brcmu_pktq_pdeq() above.
116 */
117struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
118 bool (*match_fn)(struct sk_buff *skb,
119 void *arg), void *arg)
120{
121 struct sk_buff_head *q;
122 struct sk_buff *p, *next;
123
124 q = &pq->q[prec].skblist;
125 skb_queue_walk_safe(q, p, next) {
126 if (match_fn == NULL || match_fn(p, arg)) {
127 skb_unlink(p, q);
128 pq->len--;
129 return p;
130 }
131 }
132 return NULL;
133}
134EXPORT_SYMBOL(brcmu_pktq_pdeq_match);
135
119struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec) 136struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec)
120{ 137{
121 struct sk_buff_head *q; 138 struct sk_buff_head *q;
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index e8682855b73a..c1fe245bb07e 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -29,6 +29,7 @@
29 29
30/* Chipcommon Core Chip IDs */ 30/* Chipcommon Core Chip IDs */
31#define BCM4313_CHIP_ID 0x4313 31#define BCM4313_CHIP_ID 0x4313
32#define BCM43143_CHIP_ID 43143
32#define BCM43224_CHIP_ID 43224 33#define BCM43224_CHIP_ID 43224
33#define BCM43225_CHIP_ID 43225 34#define BCM43225_CHIP_ID 43225
34#define BCM43235_CHIP_ID 43235 35#define BCM43235_CHIP_ID 43235
@@ -39,5 +40,6 @@
39#define BCM4330_CHIP_ID 0x4330 40#define BCM4330_CHIP_ID 0x4330
40#define BCM4331_CHIP_ID 0x4331 41#define BCM4331_CHIP_ID 0x4331
41#define BCM4334_CHIP_ID 0x4334 42#define BCM4334_CHIP_ID 0x4334
43#define BCM4335_CHIP_ID 0x4335
42 44
43#endif /* _BRCM_HW_IDS_H_ */ 45#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
new file mode 100644
index 000000000000..92623f02b1c0
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
@@ -0,0 +1,145 @@
1/*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _BRCMU_D11_H_
18#define _BRCMU_D11_H_
19
20/* d11 io type */
21#define BRCMU_D11N_IOTYPE 1
22#define BRCMU_D11AC_IOTYPE 2
23
24/* A chanspec (channel specification) holds the channel number, band,
25 * bandwidth and control sideband
26 */
27
28/* chanspec binary format */
29
30#define BRCMU_CHSPEC_INVALID 255
31/* bit 0~7 channel number
32 * for 80+80 channels: bit 0~3 low channel id, bit 4~7 high channel id
33 */
34#define BRCMU_CHSPEC_CH_MASK 0x00ff
35#define BRCMU_CHSPEC_CH_SHIFT 0
36#define BRCMU_CHSPEC_CHL_MASK 0x000f
37#define BRCMU_CHSPEC_CHL_SHIFT 0
38#define BRCMU_CHSPEC_CHH_MASK 0x00f0
39#define BRCMU_CHSPEC_CHH_SHIFT 4
40
41/* bit 8~16 for dot 11n IO types
42 * bit 8~9 sideband
43 * bit 10~11 bandwidth
44 * bit 12~13 spectral band
45 * bit 14~15 not used
46 */
47#define BRCMU_CHSPEC_D11N_SB_MASK 0x0300
48#define BRCMU_CHSPEC_D11N_SB_SHIFT 8
49#define BRCMU_CHSPEC_D11N_SB_L 0x0100 /* control lower */
50#define BRCMU_CHSPEC_D11N_SB_U 0x0200 /* control upper */
51#define BRCMU_CHSPEC_D11N_SB_N 0x0300 /* none */
52#define BRCMU_CHSPEC_D11N_BW_MASK 0x0c00
53#define BRCMU_CHSPEC_D11N_BW_SHIFT 10
54#define BRCMU_CHSPEC_D11N_BW_10 0x0400
55#define BRCMU_CHSPEC_D11N_BW_20 0x0800
56#define BRCMU_CHSPEC_D11N_BW_40 0x0c00
57#define BRCMU_CHSPEC_D11N_BND_MASK 0x3000
58#define BRCMU_CHSPEC_D11N_BND_SHIFT 12
59#define BRCMU_CHSPEC_D11N_BND_5G 0x1000
60#define BRCMU_CHSPEC_D11N_BND_2G 0x2000
61
62/* bit 8~16 for dot 11ac IO types
63 * bit 8~10 sideband
64 * bit 11~13 bandwidth
65 * bit 14~15 spectral band
66 */
67#define BRCMU_CHSPEC_D11AC_SB_MASK 0x0700
68#define BRCMU_CHSPEC_D11AC_SB_SHIFT 8
69#define BRCMU_CHSPEC_D11AC_SB_LLL 0x0000
70#define BRCMU_CHSPEC_D11AC_SB_LLU 0x0100
71#define BRCMU_CHSPEC_D11AC_SB_LUL 0x0200
72#define BRCMU_CHSPEC_D11AC_SB_LUU 0x0300
73#define BRCMU_CHSPEC_D11AC_SB_ULL 0x0400
74#define BRCMU_CHSPEC_D11AC_SB_ULU 0x0500
75#define BRCMU_CHSPEC_D11AC_SB_UUL 0x0600
76#define BRCMU_CHSPEC_D11AC_SB_UUU 0x0700
77#define BRCMU_CHSPEC_D11AC_SB_LL BRCMU_CHSPEC_D11AC_SB_LLL
78#define BRCMU_CHSPEC_D11AC_SB_LU BRCMU_CHSPEC_D11AC_SB_LLU
79#define BRCMU_CHSPEC_D11AC_SB_UL BRCMU_CHSPEC_D11AC_SB_LUL
80#define BRCMU_CHSPEC_D11AC_SB_UU BRCMU_CHSPEC_D11AC_SB_LUU
81#define BRCMU_CHSPEC_D11AC_SB_L BRCMU_CHSPEC_D11AC_SB_LLL
82#define BRCMU_CHSPEC_D11AC_SB_U BRCMU_CHSPEC_D11AC_SB_LLU
83#define BRCMU_CHSPEC_D11AC_BW_MASK 0x3800
84#define BRCMU_CHSPEC_D11AC_BW_SHIFT 11
85#define BRCMU_CHSPEC_D11AC_BW_5 0x0000
86#define BRCMU_CHSPEC_D11AC_BW_10 0x0800
87#define BRCMU_CHSPEC_D11AC_BW_20 0x1000
88#define BRCMU_CHSPEC_D11AC_BW_40 0x1800
89#define BRCMU_CHSPEC_D11AC_BW_80 0x2000
90#define BRCMU_CHSPEC_D11AC_BW_160 0x2800
91#define BRCMU_CHSPEC_D11AC_BW_8080 0x3000
92#define BRCMU_CHSPEC_D11AC_BND_MASK 0xc000
93#define BRCMU_CHSPEC_D11AC_BND_SHIFT 14
94#define BRCMU_CHSPEC_D11AC_BND_2G 0x0000
95#define BRCMU_CHSPEC_D11AC_BND_3G 0x4000
96#define BRCMU_CHSPEC_D11AC_BND_4G 0x8000
97#define BRCMU_CHSPEC_D11AC_BND_5G 0xc000
98
99#define BRCMU_CHAN_BAND_2G 0
100#define BRCMU_CHAN_BAND_5G 1
101
102enum brcmu_chan_bw {
103 BRCMU_CHAN_BW_20,
104 BRCMU_CHAN_BW_40,
105 BRCMU_CHAN_BW_80,
106 BRCMU_CHAN_BW_80P80,
107 BRCMU_CHAN_BW_160,
108};
109
110enum brcmu_chan_sb {
111 BRCMU_CHAN_SB_NONE = 0,
112 BRCMU_CHAN_SB_L,
113 BRCMU_CHAN_SB_U,
114 BRCMU_CHAN_SB_LL,
115 BRCMU_CHAN_SB_LU,
116 BRCMU_CHAN_SB_UL,
117 BRCMU_CHAN_SB_UU,
118 BRCMU_CHAN_SB_LLL,
119 BRCMU_CHAN_SB_LLU,
120 BRCMU_CHAN_SB_LUL,
121 BRCMU_CHAN_SB_LUU,
122 BRCMU_CHAN_SB_ULL,
123 BRCMU_CHAN_SB_ULU,
124 BRCMU_CHAN_SB_UUL,
125 BRCMU_CHAN_SB_UUU,
126};
127
128struct brcmu_chan {
129 u16 chspec;
130 u8 chnum;
131 u8 band;
132 enum brcmu_chan_bw bw;
133 enum brcmu_chan_sb sb;
134};
135
136struct brcmu_d11inf {
137 u8 io_type;
138
139 void (*encchspec)(struct brcmu_chan *ch);
140 void (*decchspec)(struct brcmu_chan *ch);
141};
142
143extern void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
144
145#endif /* _BRCMU_CHANNELS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index 477b92ad3d62..898cacb8d01d 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -120,6 +120,10 @@ extern struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
120 struct sk_buff *p); 120 struct sk_buff *p);
121extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec); 121extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
122extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec); 122extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
123extern struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
124 bool (*match_fn)(struct sk_buff *p,
125 void *arg),
126 void *arg);
123 127
124/* packet primitives */ 128/* packet primitives */
125extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len); 129extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
@@ -173,6 +177,29 @@ extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
173/* ip address */ 177/* ip address */
174struct ipv4_addr; 178struct ipv4_addr;
175 179
180/*
181 * bitfield macros using masking and shift
182 *
183 * remark: the mask parameter should be a shifted mask.
184 */
185static inline void brcmu_maskset32(u32 *var, u32 mask, u8 shift, u32 value)
186{
187 value = (value << shift) & mask;
188 *var = (*var & ~mask) | value;
189}
190static inline u32 brcmu_maskget32(u32 var, u32 mask, u8 shift)
191{
192 return (var & mask) >> shift;
193}
194static inline void brcmu_maskset16(u16 *var, u16 mask, u8 shift, u16 value)
195{
196 value = (value << shift) & mask;
197 *var = (*var & ~mask) | value;
198}
199static inline u16 brcmu_maskget16(u16 var, u16 mask, u8 shift)
200{
201 return (var & mask) >> shift;
202}
176 203
177/* externs */ 204/* externs */
178/* format/print */ 205/* format/print */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
index c11a290a1edf..0505cc065e0d 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -32,8 +32,9 @@
32#define CH_20MHZ_APART 4 32#define CH_20MHZ_APART 4
33#define CH_10MHZ_APART 2 33#define CH_10MHZ_APART 2
34#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */ 34#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */
35#define CH_MIN_2G_CHANNEL 1
35#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */ 36#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */
36#define BRCM_MAX_2G_CHANNEL CH_MAX_2G_CHANNEL /* legacy define */ 37#define CH_MIN_5G_CHANNEL 34
37 38
38/* bandstate array indices */ 39/* bandstate array indices */
39#define BAND_2G_INDEX 0 /* wlc->bandstate[x] index */ 40#define BAND_2G_INDEX 0 /* wlc->bandstate[x] index */
@@ -60,6 +61,7 @@
60#define WL_CHANSPEC_BW_10 0x0400 61#define WL_CHANSPEC_BW_10 0x0400
61#define WL_CHANSPEC_BW_20 0x0800 62#define WL_CHANSPEC_BW_20 0x0800
62#define WL_CHANSPEC_BW_40 0x0C00 63#define WL_CHANSPEC_BW_40 0x0C00
64#define WL_CHANSPEC_BW_80 0x2000
63 65
64#define WL_CHANSPEC_BAND_MASK 0xf000 66#define WL_CHANSPEC_BAND_MASK 0xf000
65#define WL_CHANSPEC_BAND_SHIFT 12 67#define WL_CHANSPEC_BAND_SHIFT 12
@@ -67,6 +69,25 @@
67#define WL_CHANSPEC_BAND_2G 0x2000 69#define WL_CHANSPEC_BAND_2G 0x2000
68#define INVCHANSPEC 255 70#define INVCHANSPEC 255
69 71
72#define WL_CHAN_VALID_HW (1 << 0) /* valid with current HW */
73#define WL_CHAN_VALID_SW (1 << 1) /* valid with country sett. */
74#define WL_CHAN_BAND_5G (1 << 2) /* 5GHz-band channel */
75#define WL_CHAN_RADAR (1 << 3) /* radar sensitive channel */
76#define WL_CHAN_INACTIVE (1 << 4) /* inactive due to radar */
77#define WL_CHAN_PASSIVE (1 << 5) /* channel in passive mode */
78#define WL_CHAN_RESTRICTED (1 << 6) /* restricted use channel */
79
80/* values for band specific 40MHz capabilities */
81#define WLC_N_BW_20ALL 0
82#define WLC_N_BW_40ALL 1
83#define WLC_N_BW_20IN2G_40IN5G 2
84
85/* band types */
86#define WLC_BAND_AUTO 0 /* auto-select */
87#define WLC_BAND_5G 1 /* 5 Ghz */
88#define WLC_BAND_2G 2 /* 2.4 Ghz */
89#define WLC_BAND_ALL 3 /* all bands */
90
70#define CHSPEC_CHANNEL(chspec) ((u8)((chspec) & WL_CHANSPEC_CHAN_MASK)) 91#define CHSPEC_CHANNEL(chspec) ((u8)((chspec) & WL_CHANSPEC_CHAN_MASK))
71#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK) 92#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
72 93
@@ -79,10 +100,11 @@
79#define CHSPEC_IS20(chspec) \ 100#define CHSPEC_IS20(chspec) \
80 (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) 101 (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
81 102
82#ifndef CHSPEC_IS40
83#define CHSPEC_IS40(chspec) \ 103#define CHSPEC_IS40(chspec) \
84 (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40) 104 (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
85#endif 105
106#define CHSPEC_IS80(chspec) \
107 (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80)
86 108
87#define CHSPEC_IS5G(chspec) \ 109#define CHSPEC_IS5G(chspec) \
88 (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G) 110 (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
diff --git a/drivers/net/wireless/brcm80211/include/chipcommon.h b/drivers/net/wireless/brcm80211/include/chipcommon.h
index f96834a7c055..d242333b7559 100644
--- a/drivers/net/wireless/brcm80211/include/chipcommon.h
+++ b/drivers/net/wireless/brcm80211/include/chipcommon.h
@@ -205,7 +205,7 @@ struct chipcregs {
205 u32 res_req_timer_sel; 205 u32 res_req_timer_sel;
206 u32 res_req_timer; 206 u32 res_req_timer;
207 u32 res_req_mask; 207 u32 res_req_mask;
208 u32 PAD; 208 u32 pmucapabilities_ext; /* 0x64c, pmurev >=15 */
209 u32 chipcontrol_addr; /* 0x650 */ 209 u32 chipcontrol_addr; /* 0x650 */
210 u32 chipcontrol_data; /* 0x654 */ 210 u32 chipcontrol_data; /* 0x654 */
211 u32 regcontrol_addr; 211 u32 regcontrol_addr;
@@ -214,7 +214,11 @@ struct chipcregs {
214 u32 pllcontrol_data; 214 u32 pllcontrol_data;
215 u32 pmustrapopt; /* 0x668, corerev >= 28 */ 215 u32 pmustrapopt; /* 0x668, corerev >= 28 */
216 u32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */ 216 u32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */
217 u32 PAD[100]; 217 u32 retention_ctl; /* 0x670, pmurev >= 15 */
218 u32 PAD[3];
219 u32 retention_grpidx; /* 0x680 */
220 u32 retention_grpctl; /* 0x684 */
221 u32 PAD[94];
218 u16 sromotp[768]; 222 u16 sromotp[768];
219}; 223};
220 224
@@ -276,6 +280,12 @@ struct chipcregs {
276#define PCAP5_VC_SHIFT 22 280#define PCAP5_VC_SHIFT 22
277#define PCAP5_CC_MASK 0xf8000000 281#define PCAP5_CC_MASK 0xf8000000
278#define PCAP5_CC_SHIFT 27 282#define PCAP5_CC_SHIFT 27
283/* pmucapabilites_ext PMU rev >= 15 */
284#define PCAPEXT_SR_SUPPORTED_MASK (1 << 1)
285/* retention_ctl PMU rev >= 15 */
286#define PMU_RCTL_MACPHY_DISABLE_MASK (1 << 26)
287#define PMU_RCTL_LOGIC_DISABLE_MASK (1 << 27)
288
279 289
280/* 290/*
281* Maximum delay for the PMU state transition in us. 291* Maximum delay for the PMU state transition in us.
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index cb066f62879d..15920aaa5dd6 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -4167,17 +4167,11 @@ static ssize_t show_debug_level(struct device_driver *d, char *buf)
4167static ssize_t store_debug_level(struct device_driver *d, 4167static ssize_t store_debug_level(struct device_driver *d,
4168 const char *buf, size_t count) 4168 const char *buf, size_t count)
4169{ 4169{
4170 char *p = (char *)buf;
4171 u32 val; 4170 u32 val;
4171 int ret;
4172 4172
4173 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { 4173 ret = kstrtou32(buf, 0, &val);
4174 p++; 4174 if (ret)
4175 if (p[0] == 'x' || p[0] == 'X')
4176 p++;
4177 val = simple_strtoul(p, &p, 16);
4178 } else
4179 val = simple_strtoul(p, &p, 10);
4180 if (p == buf)
4181 IPW_DEBUG_INFO(": %s is not in hex or decimal form.\n", buf); 4175 IPW_DEBUG_INFO(": %s is not in hex or decimal form.\n", buf);
4182 else 4176 else
4183 ipw2100_debug_level = val; 4177 ipw2100_debug_level = val;
@@ -4238,27 +4232,15 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
4238{ 4232{
4239 struct ipw2100_priv *priv = dev_get_drvdata(d); 4233 struct ipw2100_priv *priv = dev_get_drvdata(d);
4240 struct net_device *dev = priv->net_dev; 4234 struct net_device *dev = priv->net_dev;
4241 char buffer[] = "00000000";
4242 unsigned long len =
4243 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
4244 unsigned long val; 4235 unsigned long val;
4245 char *p = buffer; 4236 int ret;
4246 4237
4247 (void)dev; /* kill unused-var warning for debug-only code */ 4238 (void)dev; /* kill unused-var warning for debug-only code */
4248 4239
4249 IPW_DEBUG_INFO("enter\n"); 4240 IPW_DEBUG_INFO("enter\n");
4250 4241
4251 strncpy(buffer, buf, len); 4242 ret = kstrtoul(buf, 0, &val);
4252 buffer[len] = 0; 4243 if (ret) {
4253
4254 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
4255 p++;
4256 if (p[0] == 'x' || p[0] == 'X')
4257 p++;
4258 val = simple_strtoul(p, &p, 16);
4259 } else
4260 val = simple_strtoul(p, &p, 10);
4261 if (p == buffer) {
4262 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name); 4244 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
4263 } else { 4245 } else {
4264 priv->ieee->scan_age = val; 4246 priv->ieee->scan_age = val;
@@ -4266,7 +4248,7 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
4266 } 4248 }
4267 4249
4268 IPW_DEBUG_INFO("exit\n"); 4250 IPW_DEBUG_INFO("exit\n");
4269 return len; 4251 return strnlen(buf, count);
4270} 4252}
4271 4253
4272static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age); 4254static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index c353b5f19c8c..b37a582ccbe7 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3477,7 +3477,7 @@ static struct attribute_group il3945_attribute_group = {
3477 .attrs = il3945_sysfs_entries, 3477 .attrs = il3945_sysfs_entries,
3478}; 3478};
3479 3479
3480struct ieee80211_ops il3945_mac_ops = { 3480static struct ieee80211_ops il3945_mac_ops __read_mostly = {
3481 .tx = il3945_mac_tx, 3481 .tx = il3945_mac_tx,
3482 .start = il3945_mac_start, 3482 .start = il3945_mac_start,
3483 .stop = il3945_mac_stop, 3483 .stop = il3945_mac_stop,
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index d4fd29ad90dc..c9f197d9ca1e 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -347,7 +347,7 @@ il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
347 347
348 psta = (struct il3945_sta_priv *)sta->drv_priv; 348 psta = (struct il3945_sta_priv *)sta->drv_priv;
349 rs_sta = &psta->rs_sta; 349 rs_sta = &psta->rs_sta;
350 sband = hw->wiphy->bands[conf->channel->band]; 350 sband = hw->wiphy->bands[conf->chandef.chan->band];
351 351
352 rs_sta->il = il; 352 rs_sta->il = il;
353 353
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index e0b9d7fa5de0..dc1e6da9976a 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -2379,10 +2379,8 @@ il3945_hw_set_hw_params(struct il_priv *il)
2379 il->_3945.shared_virt = 2379 il->_3945.shared_virt =
2380 dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared), 2380 dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
2381 &il->_3945.shared_phys, GFP_KERNEL); 2381 &il->_3945.shared_phys, GFP_KERNEL);
2382 if (!il->_3945.shared_virt) { 2382 if (!il->_3945.shared_virt)
2383 IL_ERR("failed to allocate pci memory\n");
2384 return -ENOMEM; 2383 return -ENOMEM;
2385 }
2386 2384
2387 il->hw_params.bcast_id = IL3945_BROADCAST_ID; 2385 il->hw_params.bcast_id = IL3945_BROADCAST_ID;
2388 2386
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
index 1d45075e0d5b..9a8703def0ba 100644
--- a/drivers/net/wireless/iwlegacy/3945.h
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -150,10 +150,6 @@ struct il3945_frame {
150 struct list_head list; 150 struct list_head list;
151}; 151};
152 152
153#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
154#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
155#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
156
157#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 153#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
158#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 154#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
159#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 155#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 7941eb3a0166..b8f82e688c72 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -612,7 +612,7 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
612 612
613/* Called for N_RX (legacy ABG frames), or 613/* Called for N_RX (legacy ABG frames), or
614 * N_RX_MPDU (HT high-throughput N frames). */ 614 * N_RX_MPDU (HT high-throughput N frames). */
615void 615static void
616il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) 616il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
617{ 617{
618 struct ieee80211_hdr *header; 618 struct ieee80211_hdr *header;
@@ -744,7 +744,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
744 744
745/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY). 745/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
746 * This will be used later in il_hdl_rx() for N_RX_MPDU. */ 746 * This will be used later in il_hdl_rx() for N_RX_MPDU. */
747void 747static void
748il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb) 748il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
749{ 749{
750 struct il_rx_pkt *pkt = rxb_addr(rxb); 750 struct il_rx_pkt *pkt = rxb_addr(rxb);
@@ -1250,7 +1250,7 @@ il4965_dump_fh(struct il_priv *il, char **buf, bool display)
1250 return 0; 1250 return 0;
1251} 1251}
1252 1252
1253void 1253static void
1254il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb) 1254il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
1255{ 1255{
1256 struct il_rx_pkt *pkt = rxb_addr(rxb); 1256 struct il_rx_pkt *pkt = rxb_addr(rxb);
@@ -1357,7 +1357,7 @@ il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
1357} 1357}
1358#endif 1358#endif
1359 1359
1360void 1360static void
1361il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb) 1361il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
1362{ 1362{
1363 const int recalib_seconds = 60; 1363 const int recalib_seconds = 60;
@@ -1399,7 +1399,7 @@ il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
1399 il4965_temperature_calib(il); 1399 il4965_temperature_calib(il);
1400} 1400}
1401 1401
1402void 1402static void
1403il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb) 1403il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
1404{ 1404{
1405 struct il_rx_pkt *pkt = rxb_addr(rxb); 1405 struct il_rx_pkt *pkt = rxb_addr(rxb);
@@ -1921,8 +1921,8 @@ drop_unlock:
1921static inline int 1921static inline int
1922il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size) 1922il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
1923{ 1923{
1924 ptr->addr = 1924 ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma,
1925 dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL); 1925 GFP_KERNEL);
1926 if (!ptr->addr) 1926 if (!ptr->addr)
1927 return -ENOMEM; 1927 return -ENOMEM;
1928 ptr->size = size; 1928 ptr->size = size;
@@ -2050,7 +2050,7 @@ il4965_txq_ctx_reset(struct il_priv *il)
2050 il_tx_queue_reset(il, txq_id); 2050 il_tx_queue_reset(il, txq_id);
2051} 2051}
2052 2052
2053void 2053static void
2054il4965_txq_ctx_unmap(struct il_priv *il) 2054il4965_txq_ctx_unmap(struct il_priv *il)
2055{ 2055{
2056 int txq_id; 2056 int txq_id;
@@ -2258,7 +2258,7 @@ il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
2258 2258
2259 spin_lock_irqsave(&il->sta_lock, flags); 2259 spin_lock_irqsave(&il->sta_lock, flags);
2260 tid_data = &il->stations[sta_id].tid[tid]; 2260 tid_data = &il->stations[sta_id].tid[tid];
2261 *ssn = SEQ_TO_SN(tid_data->seq_number); 2261 *ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2262 tid_data->agg.txq_id = txq_id; 2262 tid_data->agg.txq_id = txq_id;
2263 il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id); 2263 il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
2264 spin_unlock_irqrestore(&il->sta_lock, flags); 2264 spin_unlock_irqrestore(&il->sta_lock, flags);
@@ -2408,7 +2408,7 @@ il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
2408 /* aggregated HW queue */ 2408 /* aggregated HW queue */
2409 if (txq_id == tid_data->agg.txq_id && 2409 if (txq_id == tid_data->agg.txq_id &&
2410 q->read_ptr == q->write_ptr) { 2410 q->read_ptr == q->write_ptr) {
2411 u16 ssn = SEQ_TO_SN(tid_data->seq_number); 2411 u16 ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2412 int tx_fifo = il4965_get_fifo_from_tid(tid); 2412 int tx_fifo = il4965_get_fifo_from_tid(tid);
2413 D_HT("HW queue empty: continue DELBA flow\n"); 2413 D_HT("HW queue empty: continue DELBA flow\n");
2414 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo); 2414 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
@@ -2627,7 +2627,8 @@ il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
2627static inline u32 2627static inline u32
2628il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp) 2628il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
2629{ 2629{
2630 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN; 2630 return le32_to_cpup(&tx_resp->u.status +
2631 tx_resp->frame_count) & IEEE80211_MAX_SN;
2631} 2632}
2632 2633
2633static inline u32 2634static inline u32
@@ -2717,15 +2718,15 @@ il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
2717 hdr = (struct ieee80211_hdr *) skb->data; 2718 hdr = (struct ieee80211_hdr *) skb->data;
2718 2719
2719 sc = le16_to_cpu(hdr->seq_ctrl); 2720 sc = le16_to_cpu(hdr->seq_ctrl);
2720 if (idx != (SEQ_TO_SN(sc) & 0xff)) { 2721 if (idx != (IEEE80211_SEQ_TO_SN(sc) & 0xff)) {
2721 IL_ERR("BUG_ON idx doesn't match seq control" 2722 IL_ERR("BUG_ON idx doesn't match seq control"
2722 " idx=%d, seq_idx=%d, seq=%d\n", idx, 2723 " idx=%d, seq_idx=%d, seq=%d\n", idx,
2723 SEQ_TO_SN(sc), hdr->seq_ctrl); 2724 IEEE80211_SEQ_TO_SN(sc), hdr->seq_ctrl);
2724 return -1; 2725 return -1;
2725 } 2726 }
2726 2727
2727 D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx, 2728 D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
2728 SEQ_TO_SN(sc)); 2729 IEEE80211_SEQ_TO_SN(sc));
2729 2730
2730 sh = idx - start; 2731 sh = idx - start;
2731 if (sh > 64) { 2732 if (sh > 64) {
@@ -2895,7 +2896,7 @@ il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
2895 * Handles block-acknowledge notification from device, which reports success 2896 * Handles block-acknowledge notification from device, which reports success
2896 * of frames sent via aggregation. 2897 * of frames sent via aggregation.
2897 */ 2898 */
2898void 2899static void
2899il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb) 2900il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
2900{ 2901{
2901 struct il_rx_pkt *pkt = rxb_addr(rxb); 2902 struct il_rx_pkt *pkt = rxb_addr(rxb);
@@ -6056,7 +6057,7 @@ il4965_mac_channel_switch(struct ieee80211_hw *hw,
6056 struct il_priv *il = hw->priv; 6057 struct il_priv *il = hw->priv;
6057 const struct il_channel_info *ch_info; 6058 const struct il_channel_info *ch_info;
6058 struct ieee80211_conf *conf = &hw->conf; 6059 struct ieee80211_conf *conf = &hw->conf;
6059 struct ieee80211_channel *channel = ch_switch->channel; 6060 struct ieee80211_channel *channel = ch_switch->chandef.chan;
6060 struct il_ht_config *ht_conf = &il->current_ht_config; 6061 struct il_ht_config *ht_conf = &il->current_ht_config;
6061 u16 ch; 6062 u16 ch;
6062 6063
@@ -6093,23 +6094,21 @@ il4965_mac_channel_switch(struct ieee80211_hw *hw,
6093 il->current_ht_config.smps = conf->smps_mode; 6094 il->current_ht_config.smps = conf->smps_mode;
6094 6095
6095 /* Configure HT40 channels */ 6096 /* Configure HT40 channels */
6096 il->ht.enabled = conf_is_ht(conf); 6097 switch (cfg80211_get_chandef_type(&ch_switch->chandef)) {
6097 if (il->ht.enabled) { 6098 case NL80211_CHAN_NO_HT:
6098 if (conf_is_ht40_minus(conf)) { 6099 case NL80211_CHAN_HT20:
6099 il->ht.extension_chan_offset =
6100 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
6101 il->ht.is_40mhz = true;
6102 } else if (conf_is_ht40_plus(conf)) {
6103 il->ht.extension_chan_offset =
6104 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
6105 il->ht.is_40mhz = true;
6106 } else {
6107 il->ht.extension_chan_offset =
6108 IEEE80211_HT_PARAM_CHA_SEC_NONE;
6109 il->ht.is_40mhz = false;
6110 }
6111 } else
6112 il->ht.is_40mhz = false; 6100 il->ht.is_40mhz = false;
6101 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
6102 break;
6103 case NL80211_CHAN_HT40MINUS:
6104 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
6105 il->ht.is_40mhz = true;
6106 break;
6107 case NL80211_CHAN_HT40PLUS:
6108 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
6109 il->ht.is_40mhz = true;
6110 break;
6111 }
6113 6112
6114 if ((le16_to_cpu(il->staging.channel) != ch)) 6113 if ((le16_to_cpu(il->staging.channel) != ch))
6115 il->staging.flags = 0; 6114 il->staging.flags = 0;
@@ -6316,7 +6315,7 @@ il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
6316 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); 6315 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
6317} 6316}
6318 6317
6319const struct ieee80211_ops il4965_mac_ops = { 6318static const struct ieee80211_ops il4965_mac_ops = {
6320 .tx = il4965_mac_tx, 6319 .tx = il4965_mac_tx,
6321 .start = il4965_mac_start, 6320 .start = il4965_mac_start,
6322 .stop = il4965_mac_stop, 6321 .stop = il4965_mac_stop,
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 6c7493c2d698..1fc0b227e120 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2300,7 +2300,7 @@ il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
2300 2300
2301 sta_priv = (struct il_station_priv *)sta->drv_priv; 2301 sta_priv = (struct il_station_priv *)sta->drv_priv;
2302 lq_sta = &sta_priv->lq_sta; 2302 lq_sta = &sta_priv->lq_sta;
2303 sband = hw->wiphy->bands[conf->channel->band]; 2303 sband = hw->wiphy->bands[conf->chandef.chan->band];
2304 2304
2305 lq_sta->lq.sta_id = sta_id; 2305 lq_sta->lq.sta_id = sta_id;
2306 2306
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
index 91eb2d07fdb8..777a578294bd 100644
--- a/drivers/net/wireless/iwlegacy/4965.c
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -1493,7 +1493,7 @@ il4965_hw_channel_switch(struct il_priv *il,
1493 1493
1494 cmd.band = band; 1494 cmd.band = band;
1495 cmd.expect_beacon = 0; 1495 cmd.expect_beacon = 0;
1496 ch = ch_switch->channel->hw_value; 1496 ch = ch_switch->chandef.chan->hw_value;
1497 cmd.channel = cpu_to_le16(ch); 1497 cmd.channel = cpu_to_le16(ch);
1498 cmd.rxon_flags = il->staging.flags; 1498 cmd.rxon_flags = il->staging.flags;
1499 cmd.rxon_filter_flags = il->staging.filter_flags; 1499 cmd.rxon_filter_flags = il->staging.filter_flags;
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index e006ea831320..592d0aa634a8 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1122,7 +1122,7 @@ il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
1122 sizeof(struct il_powertable_cmd), cmd); 1122 sizeof(struct il_powertable_cmd), cmd);
1123} 1123}
1124 1124
1125int 1125static int
1126il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force) 1126il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
1127{ 1127{
1128 int ret; 1128 int ret;
@@ -2566,15 +2566,13 @@ il_rx_queue_alloc(struct il_priv *il)
2566 INIT_LIST_HEAD(&rxq->rx_used); 2566 INIT_LIST_HEAD(&rxq->rx_used);
2567 2567
2568 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ 2568 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
2569 rxq->bd = 2569 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
2570 dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, 2570 GFP_KERNEL);
2571 GFP_KERNEL);
2572 if (!rxq->bd) 2571 if (!rxq->bd)
2573 goto err_bd; 2572 goto err_bd;
2574 2573
2575 rxq->rb_stts = 2574 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status),
2576 dma_alloc_coherent(dev, sizeof(struct il_rb_status), 2575 &rxq->rb_stts_dma, GFP_KERNEL);
2577 &rxq->rb_stts_dma, GFP_KERNEL);
2578 if (!rxq->rb_stts) 2576 if (!rxq->rb_stts)
2579 goto err_rb; 2577 goto err_rb;
2580 2578
@@ -2941,10 +2939,9 @@ il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
2941 * shared with device */ 2939 * shared with device */
2942 txq->tfds = 2940 txq->tfds =
2943 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL); 2941 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
2944 if (!txq->tfds) { 2942 if (!txq->tfds)
2945 IL_ERR("Fail to alloc TFDs\n");
2946 goto error; 2943 goto error;
2947 } 2944
2948 txq->q.id = id; 2945 txq->q.id = id;
2949 2946
2950 return 0; 2947 return 0;
@@ -4704,8 +4701,7 @@ out:
4704} 4701}
4705EXPORT_SYMBOL(il_mac_change_interface); 4702EXPORT_SYMBOL(il_mac_change_interface);
4706 4703
4707void 4704void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
4708il_mac_flush(struct ieee80211_hw *hw, bool drop)
4709{ 4705{
4710 struct il_priv *il = hw->priv; 4706 struct il_priv *il = hw->priv;
4711 unsigned long timeout = jiffies + msecs_to_jiffies(500); 4707 unsigned long timeout = jiffies + msecs_to_jiffies(500);
@@ -4891,7 +4887,7 @@ il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
4891} 4887}
4892EXPORT_SYMBOL(il_add_beacon_time); 4888EXPORT_SYMBOL(il_add_beacon_time);
4893 4889
4894#ifdef CONFIG_PM 4890#ifdef CONFIG_PM_SLEEP
4895 4891
4896static int 4892static int
4897il_pci_suspend(struct device *device) 4893il_pci_suspend(struct device *device)
@@ -4942,7 +4938,7 @@ il_pci_resume(struct device *device)
4942SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume); 4938SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
4943EXPORT_SYMBOL(il_pm_ops); 4939EXPORT_SYMBOL(il_pm_ops);
4944 4940
4945#endif /* CONFIG_PM */ 4941#endif /* CONFIG_PM_SLEEP */
4946 4942
4947static void 4943static void
4948il_update_qos(struct il_priv *il) 4944il_update_qos(struct il_priv *il)
@@ -4975,7 +4971,7 @@ il_mac_config(struct ieee80211_hw *hw, u32 changed)
4975 struct il_priv *il = hw->priv; 4971 struct il_priv *il = hw->priv;
4976 const struct il_channel_info *ch_info; 4972 const struct il_channel_info *ch_info;
4977 struct ieee80211_conf *conf = &hw->conf; 4973 struct ieee80211_conf *conf = &hw->conf;
4978 struct ieee80211_channel *channel = conf->channel; 4974 struct ieee80211_channel *channel = conf->chandef.chan;
4979 struct il_ht_config *ht_conf = &il->current_ht_config; 4975 struct il_ht_config *ht_conf = &il->current_ht_config;
4980 unsigned long flags = 0; 4976 unsigned long flags = 0;
4981 int ret = 0; 4977 int ret = 0;
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 96f2025d936e..f8246f2d88f9 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -541,10 +541,6 @@ struct il_frame {
541 struct list_head list; 541 struct list_head list;
542}; 542};
543 543
544#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
545#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
546#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
547
548enum { 544enum {
549 CMD_SYNC = 0, 545 CMD_SYNC = 0,
550 CMD_SIZE_NORMAL = 0, 546 CMD_SIZE_NORMAL = 0,
@@ -1724,7 +1720,7 @@ void il_mac_remove_interface(struct ieee80211_hw *hw,
1724 struct ieee80211_vif *vif); 1720 struct ieee80211_vif *vif);
1725int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1721int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1726 enum nl80211_iftype newtype, bool newp2p); 1722 enum nl80211_iftype newtype, bool newp2p);
1727void il_mac_flush(struct ieee80211_hw *hw, bool drop); 1723void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
1728int il_alloc_txq_mem(struct il_priv *il); 1724int il_alloc_txq_mem(struct il_priv *il);
1729void il_free_txq_mem(struct il_priv *il); 1725void il_free_txq_mem(struct il_priv *il);
1730 1726
@@ -2235,9 +2231,8 @@ il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
2235 return -EINVAL; 2231 return -EINVAL;
2236 } 2232 }
2237 2233
2238 desc->v_addr = 2234 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
2239 dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr, 2235 &desc->p_addr, GFP_KERNEL);
2240 GFP_KERNEL);
2241 return (desc->v_addr != NULL) ? 0 : -ENOMEM; 2236 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
2242} 2237}
2243 2238
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index ba319cba3f1e..56c2040a955b 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -6,7 +6,6 @@ config IWLWIFI
6 select LEDS_CLASS 6 select LEDS_CLASS
7 select LEDS_TRIGGERS 7 select LEDS_TRIGGERS
8 select MAC80211_LEDS 8 select MAC80211_LEDS
9 select IWLDVM
10 ---help--- 9 ---help---
11 Select to build the driver supporting the: 10 Select to build the driver supporting the:
12 11
@@ -45,6 +44,7 @@ config IWLWIFI
45config IWLDVM 44config IWLDVM
46 tristate "Intel Wireless WiFi DVM Firmware support" 45 tristate "Intel Wireless WiFi DVM Firmware support"
47 depends on IWLWIFI 46 depends on IWLWIFI
47 default IWLWIFI
48 help 48 help
49 This is the driver supporting the DVM firmware which is 49 This is the driver supporting the DVM firmware which is
50 currently the only firmware available for existing devices. 50 currently the only firmware available for existing devices.
@@ -58,6 +58,15 @@ config IWLMVM
58 58
59 Say yes if you have such a device. 59 Say yes if you have such a device.
60 60
61# don't call it _MODULE -- will confuse Kconfig/fixdep/...
62config IWLWIFI_OPMODE_MODULAR
63 bool
64 default y if IWLDVM=m
65 default y if IWLMVM=m
66
67comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
68 depends on IWLWIFI && IWLDVM=n && IWLMVM=n
69
61menu "Debugging Options" 70menu "Debugging Options"
62 depends on IWLWIFI 71 depends on IWLWIFI
63 72
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 6c7800044a04..3b5613ea458b 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -7,8 +7,7 @@ iwlwifi-objs += iwl-notif-wait.o
7iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o 7iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
8iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o 8iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
9iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o 9iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
10iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o 10iwlwifi-objs += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o iwl-7000.o
11iwlwifi-objs += pcie/7000.o
12 11
13iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 12iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
14iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o 13iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 41ec27cb6efe..48545ab00311 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -73,6 +73,8 @@
73/* AUX (TX during scan dwell) queue */ 73/* AUX (TX during scan dwell) queue */
74#define IWL_AUX_QUEUE 10 74#define IWL_AUX_QUEUE 10
75 75
76#define IWL_INVALID_STATION 255
77
76/* device operations */ 78/* device operations */
77extern struct iwl_lib_ops iwl1000_lib; 79extern struct iwl_lib_ops iwl1000_lib;
78extern struct iwl_lib_ops iwl2000_lib; 80extern struct iwl_lib_ops iwl2000_lib;
@@ -170,13 +172,13 @@ int iwl_calib_set(struct iwl_priv *priv,
170 const struct iwl_calib_hdr *cmd, int len); 172 const struct iwl_calib_hdr *cmd, int len);
171void iwl_calib_free_results(struct iwl_priv *priv); 173void iwl_calib_free_results(struct iwl_priv *priv);
172int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, 174int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
173 char **buf, bool display); 175 char **buf);
174int iwlagn_hw_valid_rtc_data_addr(u32 addr); 176int iwlagn_hw_valid_rtc_data_addr(u32 addr);
175 177
176/* lib */ 178/* lib */
177int iwlagn_send_tx_power(struct iwl_priv *priv); 179int iwlagn_send_tx_power(struct iwl_priv *priv);
178void iwlagn_temperature(struct iwl_priv *priv); 180void iwlagn_temperature(struct iwl_priv *priv);
179int iwlagn_txfifo_flush(struct iwl_priv *priv); 181int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk);
180void iwlagn_dev_txfifo_flush(struct iwl_priv *priv); 182void iwlagn_dev_txfifo_flush(struct iwl_priv *priv);
181int iwlagn_send_beacon_cmd(struct iwl_priv *priv); 183int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
182int iwl_send_statistics_request(struct iwl_priv *priv, 184int iwl_send_statistics_request(struct iwl_priv *priv,
@@ -210,6 +212,8 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
210 struct ieee80211_sta *sta, u16 tid, u8 buf_size); 212 struct ieee80211_sta *sta, u16 tid, u8 buf_size);
211int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, 213int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
212 struct ieee80211_sta *sta, u16 tid); 214 struct ieee80211_sta *sta, u16 tid);
215int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
216 struct ieee80211_sta *sta, u16 tid);
213int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, 217int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
214 struct iwl_rx_cmd_buffer *rxb, 218 struct iwl_rx_cmd_buffer *rxb,
215 struct iwl_device_cmd *cmd); 219 struct iwl_device_cmd *cmd);
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index 6468de8634b0..d6c4cf2ad7c5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h
index 65e920cab2b7..cfddde194940 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.h
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 84e2c0fcfef6..95ca026ecc9d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -1526,6 +1526,7 @@ struct iwl_compressed_ba_resp {
1526 __le16 scd_ssn; 1526 __le16 scd_ssn;
1527 u8 txed; /* number of frames sent */ 1527 u8 txed; /* number of frames sent */
1528 u8 txed_2_done; /* number of frames acked */ 1528 u8 txed_2_done; /* number of frames acked */
1529 __le16 reserved1;
1529} __packed; 1530} __packed;
1530 1531
1531/* 1532/*
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 20806cae11b7..d5329489245a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -19,7 +19,7 @@
19 * USA 19 * USA
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called COPYING.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -2237,15 +2237,13 @@ static ssize_t iwl_dbgfs_log_event_read(struct file *file,
2237 size_t count, loff_t *ppos) 2237 size_t count, loff_t *ppos)
2238{ 2238{
2239 struct iwl_priv *priv = file->private_data; 2239 struct iwl_priv *priv = file->private_data;
2240 char *buf; 2240 char *buf = NULL;
2241 int pos = 0; 2241 ssize_t ret;
2242 ssize_t ret = -ENOMEM;
2243 2242
2244 ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true); 2243 ret = iwl_dump_nic_event_log(priv, true, &buf);
2245 if (buf) { 2244 if (ret > 0)
2246 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2245 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2247 kfree(buf); 2246 kfree(buf);
2248 }
2249 return ret; 2247 return ret;
2250} 2248}
2251 2249
@@ -2269,7 +2267,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
2269 if (sscanf(buf, "%d", &event_log_flag) != 1) 2267 if (sscanf(buf, "%d", &event_log_flag) != 1)
2270 return -EFAULT; 2268 return -EFAULT;
2271 if (event_log_flag == 1) 2269 if (event_log_flag == 1)
2272 iwl_dump_nic_event_log(priv, true, NULL, false); 2270 iwl_dump_nic_event_log(priv, true, NULL);
2273 2271
2274 return count; 2272 return count;
2275} 2273}
@@ -2324,6 +2322,28 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
2324 return count; 2322 return count;
2325} 2323}
2326 2324
2325static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
2326 const char __user *user_buf,
2327 size_t count, loff_t *ppos)
2328{
2329 struct iwl_priv *priv = file->private_data;
2330 bool restart_fw = iwlwifi_mod_params.restart_fw;
2331 int ret;
2332
2333 iwlwifi_mod_params.restart_fw = true;
2334
2335 mutex_lock(&priv->mutex);
2336
2337 /* take the return value to make compiler happy - it will fail anyway */
2338 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, CMD_SYNC, 0, NULL);
2339
2340 mutex_unlock(&priv->mutex);
2341
2342 iwlwifi_mod_params.restart_fw = restart_fw;
2343
2344 return count;
2345}
2346
2327DEBUGFS_READ_FILE_OPS(ucode_rx_stats); 2347DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
2328DEBUGFS_READ_FILE_OPS(ucode_tx_stats); 2348DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
2329DEBUGFS_READ_FILE_OPS(ucode_general_stats); 2349DEBUGFS_READ_FILE_OPS(ucode_general_stats);
@@ -2343,6 +2363,7 @@ DEBUGFS_READ_FILE_OPS(bt_traffic);
2343DEBUGFS_READ_WRITE_FILE_OPS(protection_mode); 2363DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
2344DEBUGFS_READ_FILE_OPS(reply_tx_error); 2364DEBUGFS_READ_FILE_OPS(reply_tx_error);
2345DEBUGFS_WRITE_FILE_OPS(echo_test); 2365DEBUGFS_WRITE_FILE_OPS(echo_test);
2366DEBUGFS_WRITE_FILE_OPS(fw_restart);
2346#ifdef CONFIG_IWLWIFI_DEBUG 2367#ifdef CONFIG_IWLWIFI_DEBUG
2347DEBUGFS_READ_WRITE_FILE_OPS(log_event); 2368DEBUGFS_READ_WRITE_FILE_OPS(log_event);
2348#endif 2369#endif
@@ -2400,6 +2421,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
2400 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); 2421 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
2401 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); 2422 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
2402 DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR); 2423 DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
2424 DEBUGFS_ADD_FILE(fw_restart, dir_debug, S_IWUSR);
2403#ifdef CONFIG_IWLWIFI_DEBUG 2425#ifdef CONFIG_IWLWIFI_DEBUG
2404 DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR); 2426 DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
2405#endif 2427#endif
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 15cca2ef9294..c48907c8ab43 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -379,7 +379,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
379 }; 379 };
380 380
381 cmd.band = priv->band == IEEE80211_BAND_2GHZ; 381 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
382 ch = ch_switch->channel->hw_value; 382 ch = ch_switch->chandef.chan->hw_value;
383 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", 383 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
384 ctx->active.channel, ch); 384 ctx->active.channel, ch);
385 cmd.channel = cpu_to_le16(ch); 385 cmd.channel = cpu_to_le16(ch);
@@ -414,7 +414,8 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
414 } 414 }
415 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", 415 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
416 cmd.switch_time); 416 cmd.switch_time);
417 cmd.expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR; 417 cmd.expect_beacon =
418 ch_switch->chandef.chan->flags & IEEE80211_CHAN_RADAR;
418 419
419 return iwl_dvm_send_cmd(priv, &hcmd); 420 return iwl_dvm_send_cmd(priv, &hcmd);
420} 421}
@@ -540,7 +541,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
540 hcmd.data[0] = cmd; 541 hcmd.data[0] = cmd;
541 542
542 cmd->band = priv->band == IEEE80211_BAND_2GHZ; 543 cmd->band = priv->band == IEEE80211_BAND_2GHZ;
543 ch = ch_switch->channel->hw_value; 544 ch = ch_switch->chandef.chan->hw_value;
544 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", 545 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
545 ctx->active.channel, ch); 546 ctx->active.channel, ch);
546 cmd->channel = cpu_to_le16(ch); 547 cmd->channel = cpu_to_le16(ch);
@@ -575,7 +576,8 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
575 } 576 }
576 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", 577 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
577 cmd->switch_time); 578 cmd->switch_time);
578 cmd->expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR; 579 cmd->expect_beacon =
580 ch_switch->chandef.chan->flags & IEEE80211_CHAN_RADAR;
579 581
580 err = iwl_dvm_send_cmd(priv, &hcmd); 582 err = iwl_dvm_send_cmd(priv, &hcmd);
581 kfree(cmd); 583 kfree(cmd);
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 44ca0e57f9f7..54f553380aa8 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -19,7 +19,7 @@
19 * USA 19 * USA
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called COPYING.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -136,7 +136,7 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
136 * 1. acquire mutex before calling 136 * 1. acquire mutex before calling
137 * 2. make sure rf is on and not in exit state 137 * 2. make sure rf is on and not in exit state
138 */ 138 */
139int iwlagn_txfifo_flush(struct iwl_priv *priv) 139int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk)
140{ 140{
141 struct iwl_txfifo_flush_cmd flush_cmd; 141 struct iwl_txfifo_flush_cmd flush_cmd;
142 struct iwl_host_cmd cmd = { 142 struct iwl_host_cmd cmd = {
@@ -162,6 +162,9 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv)
162 if (priv->nvm_data->sku_cap_11n_enable) 162 if (priv->nvm_data->sku_cap_11n_enable)
163 flush_cmd.queue_control |= IWL_AGG_TX_QUEUE_MSK; 163 flush_cmd.queue_control |= IWL_AGG_TX_QUEUE_MSK;
164 164
165 if (scd_q_msk)
166 flush_cmd.queue_control = cpu_to_le32(scd_q_msk);
167
165 IWL_DEBUG_INFO(priv, "queue control: 0x%x\n", 168 IWL_DEBUG_INFO(priv, "queue control: 0x%x\n",
166 flush_cmd.queue_control); 169 flush_cmd.queue_control);
167 flush_cmd.flush_control = cpu_to_le16(IWL_DROP_ALL); 170 flush_cmd.flush_control = cpu_to_le16(IWL_DROP_ALL);
@@ -173,7 +176,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
173{ 176{
174 mutex_lock(&priv->mutex); 177 mutex_lock(&priv->mutex);
175 ieee80211_stop_queues(priv->hw); 178 ieee80211_stop_queues(priv->hw);
176 if (iwlagn_txfifo_flush(priv)) { 179 if (iwlagn_txfifo_flush(priv, 0)) {
177 IWL_ERR(priv, "flush request fail\n"); 180 IWL_ERR(priv, "flush request fail\n");
178 goto done; 181 goto done;
179 } 182 }
@@ -1084,7 +1087,14 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
1084 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 1087 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1085 struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd; 1088 struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
1086 struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {}; 1089 struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
1087 struct iwlagn_d3_config_cmd d3_cfg_cmd = {}; 1090 struct iwlagn_d3_config_cmd d3_cfg_cmd = {
1091 /*
1092 * Program the minimum sleep time to 10 seconds, as many
1093 * platforms have issues processing a wakeup signal while
1094 * still being in the process of suspending.
1095 */
1096 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
1097 };
1088 struct wowlan_key_data key_data = { 1098 struct wowlan_key_data key_data = {
1089 .ctx = ctx, 1099 .ctx = ctx,
1090 .bssid = ctx->active.bssid_addr, 1100 .bssid = ctx->active.bssid_addr,
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 323e4a33fcac..cab23af0be9e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -777,9 +777,12 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
777 IWL_DEBUG_HT(priv, "start Tx\n"); 777 IWL_DEBUG_HT(priv, "start Tx\n");
778 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); 778 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
779 break; 779 break;
780 case IEEE80211_AMPDU_TX_STOP_CONT:
781 case IEEE80211_AMPDU_TX_STOP_FLUSH: 780 case IEEE80211_AMPDU_TX_STOP_FLUSH:
782 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 781 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
782 IWL_DEBUG_HT(priv, "Flush Tx\n");
783 ret = iwlagn_tx_agg_flush(priv, vif, sta, tid);
784 break;
785 case IEEE80211_AMPDU_TX_STOP_CONT:
783 IWL_DEBUG_HT(priv, "stop Tx\n"); 786 IWL_DEBUG_HT(priv, "stop Tx\n");
784 ret = iwlagn_tx_agg_stop(priv, vif, sta, tid); 787 ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
785 if ((ret == 0) && (priv->agg_tids_count > 0)) { 788 if ((ret == 0) && (priv->agg_tids_count > 0)) {
@@ -967,7 +970,7 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
967{ 970{
968 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 971 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
969 struct ieee80211_conf *conf = &hw->conf; 972 struct ieee80211_conf *conf = &hw->conf;
970 struct ieee80211_channel *channel = ch_switch->channel; 973 struct ieee80211_channel *channel = ch_switch->chandef.chan;
971 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 974 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
972 /* 975 /*
973 * MULTI-FIXME 976 * MULTI-FIXME
@@ -1005,11 +1008,21 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
1005 priv->current_ht_config.smps = conf->smps_mode; 1008 priv->current_ht_config.smps = conf->smps_mode;
1006 1009
1007 /* Configure HT40 channels */ 1010 /* Configure HT40 channels */
1008 ctx->ht.enabled = conf_is_ht(conf); 1011 switch (cfg80211_get_chandef_type(&ch_switch->chandef)) {
1009 if (ctx->ht.enabled) 1012 case NL80211_CHAN_NO_HT:
1010 iwlagn_config_ht40(conf, ctx); 1013 case NL80211_CHAN_HT20:
1011 else
1012 ctx->ht.is_40mhz = false; 1014 ctx->ht.is_40mhz = false;
1015 ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
1016 break;
1017 case NL80211_CHAN_HT40MINUS:
1018 ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1019 ctx->ht.is_40mhz = true;
1020 break;
1021 case NL80211_CHAN_HT40PLUS:
1022 ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1023 ctx->ht.is_40mhz = true;
1024 break;
1025 }
1013 1026
1014 if ((le16_to_cpu(ctx->staging.channel) != ch)) 1027 if ((le16_to_cpu(ctx->staging.channel) != ch))
1015 ctx->staging.flags = 0; 1028 ctx->staging.flags = 0;
@@ -1100,7 +1113,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
1100 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 1113 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
1101} 1114}
1102 1115
1103static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) 1116static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1104{ 1117{
1105 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1118 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1106 1119
@@ -1122,7 +1135,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
1122 */ 1135 */
1123 if (drop) { 1136 if (drop) {
1124 IWL_DEBUG_MAC80211(priv, "send flush command\n"); 1137 IWL_DEBUG_MAC80211(priv, "send flush command\n");
1125 if (iwlagn_txfifo_flush(priv)) { 1138 if (iwlagn_txfifo_flush(priv, 0)) {
1126 IWL_ERR(priv, "flush request fail\n"); 1139 IWL_ERR(priv, "flush request fail\n");
1127 goto done; 1140 goto done;
1128 } 1141 }
@@ -1137,7 +1150,8 @@ done:
1137static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw, 1150static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
1138 struct ieee80211_vif *vif, 1151 struct ieee80211_vif *vif,
1139 struct ieee80211_channel *channel, 1152 struct ieee80211_channel *channel,
1140 int duration) 1153 int duration,
1154 enum ieee80211_roc_type type)
1141{ 1155{
1142 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1156 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1143 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; 1157 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index b9e3517652d6..74d7572e7091 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -1795,7 +1795,7 @@ static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1795#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) 1795#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1796 1796
1797int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, 1797int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1798 char **buf, bool display) 1798 char **buf)
1799{ 1799{
1800 u32 base; /* SRAM byte address of event log header */ 1800 u32 base; /* SRAM byte address of event log header */
1801 u32 capacity; /* event log capacity in # entries */ 1801 u32 capacity; /* event log capacity in # entries */
@@ -1866,7 +1866,7 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1866 size); 1866 size);
1867 1867
1868#ifdef CONFIG_IWLWIFI_DEBUG 1868#ifdef CONFIG_IWLWIFI_DEBUG
1869 if (display) { 1869 if (buf) {
1870 if (full_log) 1870 if (full_log)
1871 bufsz = capacity * 48; 1871 bufsz = capacity * 48;
1872 else 1872 else
@@ -1962,7 +1962,7 @@ static void iwl_nic_error(struct iwl_op_mode *op_mode)
1962 priv->fw->fw_version); 1962 priv->fw->fw_version);
1963 1963
1964 iwl_dump_nic_error_log(priv); 1964 iwl_dump_nic_error_log(priv);
1965 iwl_dump_nic_event_log(priv, false, NULL, false); 1965 iwl_dump_nic_event_log(priv, false, NULL);
1966 1966
1967 iwlagn_fw_error(priv, false); 1967 iwlagn_fw_error(priv, false);
1968} 1968}
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index abe304267261..907bd6e50aad 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -2831,7 +2831,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2831 2831
2832 sta_priv = (struct iwl_station_priv *) sta->drv_priv; 2832 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2833 lq_sta = &sta_priv->lq_sta; 2833 lq_sta = &sta_priv->lq_sta;
2834 sband = hw->wiphy->bands[conf->channel->band]; 2834 sband = hw->wiphy->bands[conf->chandef.chan->band];
2835 2835
2836 2836
2837 lq_sta->lq.sta_id = sta_id; 2837 lq_sta->lq.sta_id = sta_id;
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index a82b6b39d4ff..707446fa00bd 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -78,8 +78,9 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
78 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 78 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
79#endif 79#endif
80 80
81 ctx->staging.channel = cpu_to_le16(priv->hw->conf.channel->hw_value); 81 ctx->staging.channel =
82 priv->band = priv->hw->conf.channel->band; 82 cpu_to_le16(priv->hw->conf.chandef.chan->hw_value);
83 priv->band = priv->hw->conf.chandef.chan->band;
83 84
84 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif); 85 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
85 86
@@ -951,7 +952,7 @@ static void iwl_calc_basic_rates(struct iwl_priv *priv,
951 unsigned long basic = ctx->vif->bss_conf.basic_rates; 952 unsigned long basic = ctx->vif->bss_conf.basic_rates;
952 int i; 953 int i;
953 954
954 sband = priv->hw->wiphy->bands[priv->hw->conf.channel->band]; 955 sband = priv->hw->wiphy->bands[priv->hw->conf.chandef.chan->band];
955 956
956 for_each_set_bit(i, &basic, BITS_PER_LONG) { 957 for_each_set_bit(i, &basic, BITS_PER_LONG) {
957 int hw = sband->bitrates[i].hw_value; 958 int hw = sband->bitrates[i].hw_value;
@@ -1159,7 +1160,7 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1159} 1160}
1160 1161
1161void iwlagn_config_ht40(struct ieee80211_conf *conf, 1162void iwlagn_config_ht40(struct ieee80211_conf *conf,
1162 struct iwl_rxon_context *ctx) 1163 struct iwl_rxon_context *ctx)
1163{ 1164{
1164 if (conf_is_ht40_minus(conf)) { 1165 if (conf_is_ht40_minus(conf)) {
1165 ctx->ht.extension_chan_offset = 1166 ctx->ht.extension_chan_offset =
@@ -1181,7 +1182,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
1181 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1182 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1182 struct iwl_rxon_context *ctx; 1183 struct iwl_rxon_context *ctx;
1183 struct ieee80211_conf *conf = &hw->conf; 1184 struct ieee80211_conf *conf = &hw->conf;
1184 struct ieee80211_channel *channel = conf->channel; 1185 struct ieee80211_channel *channel = conf->chandef.chan;
1185 int ret = 0; 1186 int ret = 0;
1186 1187
1187 IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed); 1188 IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 3a4aa5239c45..d69b55866714 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -19,7 +19,7 @@
19 * USA 19 * USA
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called COPYING.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index b775769f8322..db183b44e038 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -695,6 +695,7 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
695void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 695void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
696{ 696{
697 struct iwl_addsta_cmd sta_cmd; 697 struct iwl_addsta_cmd sta_cmd;
698 static const struct iwl_link_quality_cmd zero_lq = {};
698 struct iwl_link_quality_cmd lq; 699 struct iwl_link_quality_cmd lq;
699 int i; 700 int i;
700 bool found = false; 701 bool found = false;
@@ -733,7 +734,9 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
733 else 734 else
734 memcpy(&lq, priv->stations[i].lq, 735 memcpy(&lq, priv->stations[i].lq,
735 sizeof(struct iwl_link_quality_cmd)); 736 sizeof(struct iwl_link_quality_cmd));
736 send_lq = true; 737
738 if (!memcmp(&lq, &zero_lq, sizeof(lq)))
739 send_lq = true;
737 } 740 }
738 spin_unlock_bh(&priv->sta_lock); 741 spin_unlock_bh(&priv->sta_lock);
739 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 742 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.c b/drivers/net/wireless/iwlwifi/dvm/testmode.c
index dc6f965a123a..b89b9d9b9969 100644
--- a/drivers/net/wireless/iwlwifi/dvm/testmode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/testmode.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index d1a670d7b10c..a900aaf47790 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -19,7 +19,7 @@
19 * USA 19 * USA
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called COPYING.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -418,7 +418,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
418 " Tx flags = 0x%08x, agg.state = %d", 418 " Tx flags = 0x%08x, agg.state = %d",
419 info->flags, tid_data->agg.state); 419 info->flags, tid_data->agg.state);
420 IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d", 420 IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d",
421 sta_id, tid, SEQ_TO_SN(tid_data->seq_number)); 421 sta_id, tid,
422 IEEE80211_SEQ_TO_SN(tid_data->seq_number));
422 goto drop_unlock_sta; 423 goto drop_unlock_sta;
423 } 424 }
424 425
@@ -569,7 +570,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
569 return 0; 570 return 0;
570 } 571 }
571 572
572 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); 573 tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
573 574
574 /* There are still packets for this RA / TID in the HW */ 575 /* There are still packets for this RA / TID in the HW */
575 if (!test_bit(txq_id, priv->agg_q_alloc)) { 576 if (!test_bit(txq_id, priv->agg_q_alloc)) {
@@ -651,7 +652,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
651 652
652 spin_lock_bh(&priv->sta_lock); 653 spin_lock_bh(&priv->sta_lock);
653 tid_data = &priv->tid_data[sta_id][tid]; 654 tid_data = &priv->tid_data[sta_id][tid];
654 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); 655 tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
655 tid_data->agg.txq_id = txq_id; 656 tid_data->agg.txq_id = txq_id;
656 657
657 *ssn = tid_data->agg.ssn; 658 *ssn = tid_data->agg.ssn;
@@ -673,6 +674,51 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
673 return ret; 674 return ret;
674} 675}
675 676
677int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
678 struct ieee80211_sta *sta, u16 tid)
679{
680 struct iwl_tid_data *tid_data;
681 enum iwl_agg_state agg_state;
682 int sta_id, txq_id;
683 sta_id = iwl_sta_id(sta);
684
685 /*
686 * First set the agg state to OFF to avoid calling
687 * ieee80211_stop_tx_ba_cb in iwlagn_check_ratid_empty.
688 */
689 spin_lock_bh(&priv->sta_lock);
690
691 tid_data = &priv->tid_data[sta_id][tid];
692 txq_id = tid_data->agg.txq_id;
693 agg_state = tid_data->agg.state;
694 IWL_DEBUG_TX_QUEUES(priv, "Flush AGG: sta %d tid %d q %d state %d\n",
695 sta_id, tid, txq_id, tid_data->agg.state);
696
697 tid_data->agg.state = IWL_AGG_OFF;
698
699 spin_unlock_bh(&priv->sta_lock);
700
701 if (iwlagn_txfifo_flush(priv, BIT(txq_id)))
702 IWL_ERR(priv, "Couldn't flush the AGG queue\n");
703
704 if (test_bit(txq_id, priv->agg_q_alloc)) {
705 /*
706 * If the transport didn't know that we wanted to start
707 * agreggation, don't tell it that we want to stop them.
708 * This can happen when we don't get the addBA response on
709 * time, or we hadn't time to drain the AC queues.
710 */
711 if (agg_state == IWL_AGG_ON)
712 iwl_trans_txq_disable(priv->trans, txq_id);
713 else
714 IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
715 agg_state);
716 iwlagn_dealloc_agg_txq(priv, txq_id);
717 }
718
719 return 0;
720}
721
676int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, 722int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
677 struct ieee80211_sta *sta, u16 tid, u8 buf_size) 723 struct ieee80211_sta *sta, u16 tid, u8 buf_size)
678{ 724{
@@ -911,7 +957,7 @@ static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
911static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp) 957static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
912{ 958{
913 return le32_to_cpup((__le32 *)&tx_resp->status + 959 return le32_to_cpup((__le32 *)&tx_resp->status +
914 tx_resp->frame_count) & MAX_SN; 960 tx_resp->frame_count) & IEEE80211_MAX_SN;
915} 961}
916 962
917static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, 963static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
@@ -1148,7 +1194,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1148 1194
1149 if (tx_resp->frame_count == 1) { 1195 if (tx_resp->frame_count == 1) {
1150 u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl); 1196 u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
1151 next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10); 1197 next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
1152 1198
1153 if (is_agg) { 1199 if (is_agg) {
1154 /* If this is an aggregation queue, we can rely on the 1200 /* If this is an aggregation queue, we can rely on the
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 1a4ac9236a44..0a1cdc5e856b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -19,7 +19,7 @@
19 * USA 19 * USA
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called COPYING.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/pcie/1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index ff3389757281..c080ae3070b2 100644
--- a/drivers/net/wireless/iwlwifi/pcie/1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -29,7 +29,6 @@
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-csr.h" 30#include "iwl-csr.h"
31#include "iwl-agn-hw.h" 31#include "iwl-agn-hw.h"
32#include "cfg.h"
33 32
34/* Highest firmware API version supported */ 33/* Highest firmware API version supported */
35#define IWL1000_UCODE_API_MAX 5 34#define IWL1000_UCODE_API_MAX 5
diff --git a/drivers/net/wireless/iwlwifi/pcie/2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index e7de33128b16..a6ddd2f9fba0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -28,7 +28,6 @@
28#include <linux/stringify.h> 28#include <linux/stringify.h>
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-agn-hw.h" 30#include "iwl-agn-hw.h"
31#include "cfg.h"
32#include "dvm/commands.h" /* needed for BT for now */ 31#include "dvm/commands.h" /* needed for BT for now */
33 32
34/* Highest firmware API version supported */ 33/* Highest firmware API version supported */
diff --git a/drivers/net/wireless/iwlwifi/pcie/5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 5096f7c96ab6..403f3f224bf6 100644
--- a/drivers/net/wireless/iwlwifi/pcie/5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -29,7 +29,6 @@
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-agn-hw.h" 30#include "iwl-agn-hw.h"
31#include "iwl-csr.h" 31#include "iwl-csr.h"
32#include "cfg.h"
33 32
34/* Highest firmware API version supported */ 33/* Highest firmware API version supported */
35#define IWL5000_UCODE_API_MAX 5 34#define IWL5000_UCODE_API_MAX 5
diff --git a/drivers/net/wireless/iwlwifi/pcie/6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 801ff49796dd..b5ab8d1bcac0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -28,7 +28,6 @@
28#include <linux/stringify.h> 28#include <linux/stringify.h>
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-agn-hw.h" 30#include "iwl-agn-hw.h"
31#include "cfg.h"
32#include "dvm/commands.h" /* needed for BT for now */ 31#include "dvm/commands.h" /* needed for BT for now */
33 32
34/* Highest firmware API version supported */ 33/* Highest firmware API version supported */
diff --git a/drivers/net/wireless/iwlwifi/pcie/7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 6e35b2b72332..50263e87fe15 100644
--- a/drivers/net/wireless/iwlwifi/pcie/7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -1,34 +1,70 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. 3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify it 6 * GPL LICENSE SUMMARY
6 * under the terms of version 2 of the GNU General Public License as 7 *
8 * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
8 * 13 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * This program is distributed in the hope that it will be useful, but
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * more details. 17 * General Public License for more details.
13 * 18 *
14 * You should have received a copy of the GNU General Public License along with 19 * You should have received a copy of the GNU General Public License
15 * this program; if not, write to the Free Software Foundation, Inc., 20 * along with this program; if not, write to the Free Software
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
17 * 23 *
18 * The full GNU General Public License is included in this distribution in the 24 * The full GNU General Public License is included in this distribution
19 * file called LICENSE. 25 * in the file called COPYING.
20 * 26 *
21 * Contact Information: 27 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 * 30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
25 *****************************************************************************/ 62 *****************************************************************************/
26 63
27#include <linux/module.h> 64#include <linux/module.h>
28#include <linux/stringify.h> 65#include <linux/stringify.h>
29#include "iwl-config.h" 66#include "iwl-config.h"
30#include "iwl-agn-hw.h" 67#include "iwl-agn-hw.h"
31#include "cfg.h"
32 68
33/* Highest firmware API version supported */ 69/* Highest firmware API version supported */
34#define IWL7260_UCODE_API_MAX 6 70#define IWL7260_UCODE_API_MAX 6
@@ -70,7 +106,6 @@ static const struct iwl_base_params iwl7000_base_params = {
70}; 106};
71 107
72static const struct iwl_ht_params iwl7000_ht_params = { 108static const struct iwl_ht_params iwl7000_ht_params = {
73 .ht_greenfield_support = true,
74 .use_rts_for_aggregation = true, /* use rts/cts protection */ 109 .use_rts_for_aggregation = true, /* use rts/cts protection */
75 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), 110 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
76}; 111};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index e9975c54c276..6d73f943cefa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 743b48343358..c38aa8f77554 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -275,4 +275,51 @@ struct iwl_cfg {
275 const bool temp_offset_v2; 275 const bool temp_offset_v2;
276}; 276};
277 277
278/*
279 * This list declares the config structures for all devices.
280 */
281extern const struct iwl_cfg iwl5300_agn_cfg;
282extern const struct iwl_cfg iwl5100_agn_cfg;
283extern const struct iwl_cfg iwl5350_agn_cfg;
284extern const struct iwl_cfg iwl5100_bgn_cfg;
285extern const struct iwl_cfg iwl5100_abg_cfg;
286extern const struct iwl_cfg iwl5150_agn_cfg;
287extern const struct iwl_cfg iwl5150_abg_cfg;
288extern const struct iwl_cfg iwl6005_2agn_cfg;
289extern const struct iwl_cfg iwl6005_2abg_cfg;
290extern const struct iwl_cfg iwl6005_2bg_cfg;
291extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
292extern const struct iwl_cfg iwl6005_2agn_d_cfg;
293extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
294extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
295extern const struct iwl_cfg iwl1030_bgn_cfg;
296extern const struct iwl_cfg iwl1030_bg_cfg;
297extern const struct iwl_cfg iwl6030_2agn_cfg;
298extern const struct iwl_cfg iwl6030_2abg_cfg;
299extern const struct iwl_cfg iwl6030_2bgn_cfg;
300extern const struct iwl_cfg iwl6030_2bg_cfg;
301extern const struct iwl_cfg iwl6000i_2agn_cfg;
302extern const struct iwl_cfg iwl6000i_2abg_cfg;
303extern const struct iwl_cfg iwl6000i_2bg_cfg;
304extern const struct iwl_cfg iwl6000_3agn_cfg;
305extern const struct iwl_cfg iwl6050_2agn_cfg;
306extern const struct iwl_cfg iwl6050_2abg_cfg;
307extern const struct iwl_cfg iwl6150_bgn_cfg;
308extern const struct iwl_cfg iwl6150_bg_cfg;
309extern const struct iwl_cfg iwl1000_bgn_cfg;
310extern const struct iwl_cfg iwl1000_bg_cfg;
311extern const struct iwl_cfg iwl100_bgn_cfg;
312extern const struct iwl_cfg iwl100_bg_cfg;
313extern const struct iwl_cfg iwl130_bgn_cfg;
314extern const struct iwl_cfg iwl130_bg_cfg;
315extern const struct iwl_cfg iwl2000_2bgn_cfg;
316extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
317extern const struct iwl_cfg iwl2030_2bgn_cfg;
318extern const struct iwl_cfg iwl6035_2agn_cfg;
319extern const struct iwl_cfg iwl105_bgn_cfg;
320extern const struct iwl_cfg iwl105_bgn_d_cfg;
321extern const struct iwl_cfg iwl135_bgn_cfg;
322extern const struct iwl_cfg iwl7260_2ac_cfg;
323extern const struct iwl_cfg iwl3160_ac_cfg;
324
278#endif /* __IWL_CONFIG_H__ */ 325#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index df3463a38704..20e845d4da04 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 87535a67de76..8a44f594528d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -66,6 +66,7 @@
66#include <linux/device.h> 66#include <linux/device.h>
67#include <linux/interrupt.h> 67#include <linux/interrupt.h>
68#include <linux/export.h> 68#include <linux/export.h>
69#include "iwl-drv.h"
69#include "iwl-debug.h" 70#include "iwl-debug.h"
70#include "iwl-devtrace.h" 71#include "iwl-devtrace.h"
71 72
@@ -85,11 +86,11 @@ void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \
85} 86}
86 87
87__iwl_fn(warn) 88__iwl_fn(warn)
88EXPORT_SYMBOL_GPL(__iwl_warn); 89IWL_EXPORT_SYMBOL(__iwl_warn);
89__iwl_fn(info) 90__iwl_fn(info)
90EXPORT_SYMBOL_GPL(__iwl_info); 91IWL_EXPORT_SYMBOL(__iwl_info);
91__iwl_fn(crit) 92__iwl_fn(crit)
92EXPORT_SYMBOL_GPL(__iwl_crit); 93IWL_EXPORT_SYMBOL(__iwl_crit);
93 94
94void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only, 95void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
95 const char *fmt, ...) 96 const char *fmt, ...)
@@ -110,7 +111,7 @@ void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
110 trace_iwlwifi_err(&vaf); 111 trace_iwlwifi_err(&vaf);
111 va_end(args); 112 va_end(args);
112} 113}
113EXPORT_SYMBOL_GPL(__iwl_err); 114IWL_EXPORT_SYMBOL(__iwl_err);
114 115
115#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) 116#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
116void __iwl_dbg(struct device *dev, 117void __iwl_dbg(struct device *dev,
@@ -133,5 +134,5 @@ void __iwl_dbg(struct device *dev,
133 trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf); 134 trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
134 va_end(args); 135 va_end(args);
135} 136}
136EXPORT_SYMBOL_GPL(__iwl_dbg); 137IWL_EXPORT_SYMBOL(__iwl_dbg);
137#endif 138#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 81aa91fab5aa..4491c1c72cc7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -298,7 +298,7 @@ TRACE_EVENT(iwlwifi_dbg,
298 MAX_MSG_LEN, vaf->fmt, 298 MAX_MSG_LEN, vaf->fmt,
299 *vaf->va) >= MAX_MSG_LEN); 299 *vaf->va) >= MAX_MSG_LEN);
300 ), 300 ),
301 TP_printk("%s", (char *)__get_dynamic_array(msg)) 301 TP_printk("%s", __get_str(msg))
302); 302);
303 303
304#undef TRACE_SYSTEM 304#undef TRACE_SYSTEM
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index fbfd2d137117..39aad9893e0b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -912,8 +912,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
912 } 912 }
913 } 913 }
914 914
915 IWL_INFO(drv, "loaded firmware version %s", drv->fw.fw_version);
916
917 /* 915 /*
918 * In mvm uCode there is no difference between data and instructions 916 * In mvm uCode there is no difference between data and instructions
919 * sections. 917 * sections.
@@ -970,6 +968,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
970 else 968 else
971 op = &iwlwifi_opmode_table[DVM_OP_MODE]; 969 op = &iwlwifi_opmode_table[DVM_OP_MODE];
972 970
971 IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
972 drv->fw.fw_version, op->name);
973
973 /* add this device to the list of devices using this op_mode */ 974 /* add this device to the list of devices using this op_mode */
974 list_add_tail(&drv->list, &op->drv); 975 list_add_tail(&drv->list, &op->drv);
975 976
@@ -997,8 +998,13 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
997 * else from proceeding if the module fails to load 998 * else from proceeding if the module fails to load
998 * or hangs loading. 999 * or hangs loading.
999 */ 1000 */
1000 if (load_module) 1001 if (load_module) {
1001 request_module("%s", op->name); 1002 err = request_module("%s", op->name);
1003 if (err)
1004 IWL_ERR(drv,
1005 "failed to load module %s (error %d), is dynamic loading enabled?\n",
1006 op->name, err);
1007 }
1002 return; 1008 return;
1003 1009
1004 try_again: 1010 try_again:
@@ -1102,7 +1108,7 @@ void iwl_drv_stop(struct iwl_drv *drv)
1102 1108
1103/* shared module parameters */ 1109/* shared module parameters */
1104struct iwl_mod_params iwlwifi_mod_params = { 1110struct iwl_mod_params iwlwifi_mod_params = {
1105 .restart_fw = 1, 1111 .restart_fw = true,
1106 .plcp_check = true, 1112 .plcp_check = true,
1107 .bt_coex_active = true, 1113 .bt_coex_active = true,
1108 .power_level = IWL_POWER_INDEX_1, 1114 .power_level = IWL_POWER_INDEX_1,
@@ -1111,7 +1117,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
1111 .wd_disable = true, 1117 .wd_disable = true,
1112 /* the rest are 0 by default */ 1118 /* the rest are 0 by default */
1113}; 1119};
1114EXPORT_SYMBOL_GPL(iwlwifi_mod_params); 1120IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
1115 1121
1116int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops) 1122int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
1117{ 1123{
@@ -1135,7 +1141,7 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
1135 mutex_unlock(&iwlwifi_opmode_table_mtx); 1141 mutex_unlock(&iwlwifi_opmode_table_mtx);
1136 return -EIO; 1142 return -EIO;
1137} 1143}
1138EXPORT_SYMBOL_GPL(iwl_opmode_register); 1144IWL_EXPORT_SYMBOL(iwl_opmode_register);
1139 1145
1140void iwl_opmode_deregister(const char *name) 1146void iwl_opmode_deregister(const char *name)
1141{ 1147{
@@ -1157,7 +1163,7 @@ void iwl_opmode_deregister(const char *name)
1157 } 1163 }
1158 mutex_unlock(&iwlwifi_opmode_table_mtx); 1164 mutex_unlock(&iwlwifi_opmode_table_mtx);
1159} 1165}
1160EXPORT_SYMBOL_GPL(iwl_opmode_deregister); 1166IWL_EXPORT_SYMBOL(iwl_opmode_deregister);
1161 1167
1162static int __init iwl_drv_init(void) 1168static int __init iwl_drv_init(void)
1163{ 1169{
@@ -1207,8 +1213,8 @@ MODULE_PARM_DESC(11n_disable,
1207module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, 1213module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
1208 int, S_IRUGO); 1214 int, S_IRUGO);
1209MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)"); 1215MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
1210module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO); 1216module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, bool, S_IRUGO);
1211MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); 1217MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
1212 1218
1213module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling, 1219module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling,
1214 int, S_IRUGO); 1220 int, S_IRUGO);
@@ -1266,7 +1272,3 @@ module_param_named(auto_agg, iwlwifi_mod_params.auto_agg,
1266 bool, S_IRUGO); 1272 bool, S_IRUGO);
1267MODULE_PARM_DESC(auto_agg, 1273MODULE_PARM_DESC(auto_agg,
1268 "enable agg w/o check traffic load (default: enable)"); 1274 "enable agg w/o check traffic load (default: enable)");
1269
1270module_param_named(5ghz_disable, iwlwifi_mod_params.disable_5ghz,
1271 bool, S_IRUGO);
1272MODULE_PARM_DESC(5ghz_disable, "disable 5GHz band (default: 0 [enabled])");
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 594a5c71b272..7d1450916308 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -63,6 +63,8 @@
63#ifndef __iwl_drv_h__ 63#ifndef __iwl_drv_h__
64#define __iwl_drv_h__ 64#define __iwl_drv_h__
65 65
66#include <linux/module.h>
67
66/* for all modules */ 68/* for all modules */
67#define DRV_NAME "iwlwifi" 69#define DRV_NAME "iwlwifi"
68#define IWLWIFI_VERSION "in-tree:" 70#define IWLWIFI_VERSION "in-tree:"
@@ -123,4 +125,17 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
123 */ 125 */
124void iwl_drv_stop(struct iwl_drv *drv); 126void iwl_drv_stop(struct iwl_drv *drv);
125 127
128/*
129 * exported symbol management
130 *
131 * The driver can be split into multiple modules, in which case some symbols
132 * must be exported for the sub-modules. However, if it's not split and
133 * everything is built-in, then we can avoid that.
134 */
135#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
136#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_GPL(sym)
137#else
138#define IWL_EXPORT_SYMBOL(sym)
139#endif
140
126#endif /* __iwl_drv_h__ */ 141#endif /* __iwl_drv_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 034f2ff4f43d..600c9fdd7f71 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -62,6 +62,7 @@
62#include <linux/types.h> 62#include <linux/types.h>
63#include <linux/slab.h> 63#include <linux/slab.h>
64#include <linux/export.h> 64#include <linux/export.h>
65#include "iwl-drv.h"
65#include "iwl-modparams.h" 66#include "iwl-modparams.h"
66#include "iwl-eeprom-parse.h" 67#include "iwl-eeprom-parse.h"
67 68
@@ -749,7 +750,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
749 } 750 }
750 751
751 ht_info->ht_supported = true; 752 ht_info->ht_supported = true;
752 ht_info->cap = 0; 753 ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
753 754
754 if (iwlwifi_mod_params.amsdu_size_8K) 755 if (iwlwifi_mod_params.amsdu_size_8K)
755 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; 756 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
@@ -909,7 +910,7 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
909 kfree(data); 910 kfree(data);
910 return NULL; 911 return NULL;
911} 912}
912EXPORT_SYMBOL_GPL(iwl_parse_eeprom_data); 913IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data);
913 914
914/* helper functions */ 915/* helper functions */
915int iwl_nvm_check_version(struct iwl_nvm_data *data, 916int iwl_nvm_check_version(struct iwl_nvm_data *data,
@@ -928,4 +929,4 @@ int iwl_nvm_check_version(struct iwl_nvm_data *data,
928 data->calib_version, trans->cfg->nvm_calib_ver); 929 data->calib_version, trans->cfg->nvm_calib_ver);
929 return -EINVAL; 930 return -EINVAL;
930} 931}
931EXPORT_SYMBOL_GPL(iwl_nvm_check_version); 932IWL_EXPORT_SYMBOL(iwl_nvm_check_version);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index 683fe6a8c58f..37f115390b19 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
index ef4806f27cf8..e5f2e362ab0b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -63,6 +63,7 @@
63#include <linux/slab.h> 63#include <linux/slab.h>
64#include <linux/export.h> 64#include <linux/export.h>
65 65
66#include "iwl-drv.h"
66#include "iwl-debug.h" 67#include "iwl-debug.h"
67#include "iwl-eeprom-read.h" 68#include "iwl-eeprom-read.h"
68#include "iwl-io.h" 69#include "iwl-io.h"
@@ -460,4 +461,4 @@ int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
460 461
461 return ret; 462 return ret;
462} 463}
463EXPORT_SYMBOL_GPL(iwl_read_eeprom); 464IWL_EXPORT_SYMBOL(iwl_read_eeprom);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
index b2588c5cbf93..8e941f8bd7d6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index f5592fb3b1ed..484d318245fb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 90873eca35f7..8b6c6fd95ed0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index b545178e46e3..c4c446d41eb0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -73,12 +73,14 @@
73 * treats good CRC threshold as a boolean 73 * treats good CRC threshold as a boolean
74 * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). 74 * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
75 * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P. 75 * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
76 * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
76 */ 77 */
77enum iwl_ucode_tlv_flag { 78enum iwl_ucode_tlv_flag {
78 IWL_UCODE_TLV_FLAGS_PAN = BIT(0), 79 IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
79 IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1), 80 IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
80 IWL_UCODE_TLV_FLAGS_MFP = BIT(2), 81 IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
81 IWL_UCODE_TLV_FLAGS_P2P = BIT(3), 82 IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
83 IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
82}; 84};
83 85
84/* The default calibrate table size if not specified by firmware file */ 86/* The default calibrate table size if not specified by firmware file */
@@ -152,6 +154,19 @@ struct iwl_tlv_calib_ctrl {
152 __le32 event_trigger; 154 __le32 event_trigger;
153} __packed; 155} __packed;
154 156
157enum iwl_fw_phy_cfg {
158 FW_PHY_CFG_RADIO_TYPE_POS = 0,
159 FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS,
160 FW_PHY_CFG_RADIO_STEP_POS = 2,
161 FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS,
162 FW_PHY_CFG_RADIO_DASH_POS = 4,
163 FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS,
164 FW_PHY_CFG_TX_CHAIN_POS = 16,
165 FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS,
166 FW_PHY_CFG_RX_CHAIN_POS = 20,
167 FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
168};
169
155/** 170/**
156 * struct iwl_fw - variables associated with the firmware 171 * struct iwl_fw - variables associated with the firmware
157 * 172 *
@@ -188,4 +203,16 @@ struct iwl_fw {
188 bool mvm_fw; 203 bool mvm_fw;
189}; 204};
190 205
206static inline u8 iwl_fw_valid_tx_ant(const struct iwl_fw *fw)
207{
208 return (fw->phy_config & FW_PHY_CFG_TX_CHAIN) >>
209 FW_PHY_CFG_TX_CHAIN_POS;
210}
211
212static inline u8 iwl_fw_valid_rx_ant(const struct iwl_fw *fw)
213{
214 return (fw->phy_config & FW_PHY_CFG_RX_CHAIN) >>
215 FW_PHY_CFG_RX_CHAIN_POS;
216}
217
191#endif /* __iwl_fw_h__ */ 218#endif /* __iwl_fw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 276410d82de4..305c81f2c2b4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -29,6 +29,7 @@
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/export.h> 30#include <linux/export.h>
31 31
32#include "iwl-drv.h"
32#include "iwl-io.h" 33#include "iwl-io.h"
33#include "iwl-csr.h" 34#include "iwl-csr.h"
34#include "iwl-debug.h" 35#include "iwl-debug.h"
@@ -49,7 +50,7 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
49 50
50 return -ETIMEDOUT; 51 return -ETIMEDOUT;
51} 52}
52EXPORT_SYMBOL_GPL(iwl_poll_bit); 53IWL_EXPORT_SYMBOL(iwl_poll_bit);
53 54
54u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg) 55u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
55{ 56{
@@ -62,7 +63,7 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
62 63
63 return value; 64 return value;
64} 65}
65EXPORT_SYMBOL_GPL(iwl_read_direct32); 66IWL_EXPORT_SYMBOL(iwl_read_direct32);
66 67
67void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value) 68void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
68{ 69{
@@ -73,7 +74,7 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
73 iwl_trans_release_nic_access(trans, &flags); 74 iwl_trans_release_nic_access(trans, &flags);
74 } 75 }
75} 76}
76EXPORT_SYMBOL_GPL(iwl_write_direct32); 77IWL_EXPORT_SYMBOL(iwl_write_direct32);
77 78
78int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, 79int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
79 int timeout) 80 int timeout)
@@ -89,7 +90,7 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
89 90
90 return -ETIMEDOUT; 91 return -ETIMEDOUT;
91} 92}
92EXPORT_SYMBOL_GPL(iwl_poll_direct_bit); 93IWL_EXPORT_SYMBOL(iwl_poll_direct_bit);
93 94
94static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs) 95static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
95{ 96{
@@ -115,7 +116,7 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
115 } 116 }
116 return val; 117 return val;
117} 118}
118EXPORT_SYMBOL_GPL(iwl_read_prph); 119IWL_EXPORT_SYMBOL(iwl_read_prph);
119 120
120void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) 121void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
121{ 122{
@@ -126,7 +127,7 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
126 iwl_trans_release_nic_access(trans, &flags); 127 iwl_trans_release_nic_access(trans, &flags);
127 } 128 }
128} 129}
129EXPORT_SYMBOL_GPL(iwl_write_prph); 130IWL_EXPORT_SYMBOL(iwl_write_prph);
130 131
131void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) 132void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
132{ 133{
@@ -138,7 +139,7 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
138 iwl_trans_release_nic_access(trans, &flags); 139 iwl_trans_release_nic_access(trans, &flags);
139 } 140 }
140} 141}
141EXPORT_SYMBOL_GPL(iwl_set_bits_prph); 142IWL_EXPORT_SYMBOL(iwl_set_bits_prph);
142 143
143void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, 144void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
144 u32 bits, u32 mask) 145 u32 bits, u32 mask)
@@ -151,7 +152,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
151 iwl_trans_release_nic_access(trans, &flags); 152 iwl_trans_release_nic_access(trans, &flags);
152 } 153 }
153} 154}
154EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph); 155IWL_EXPORT_SYMBOL(iwl_set_bits_mask_prph);
155 156
156void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) 157void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
157{ 158{
@@ -164,4 +165,4 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
164 iwl_trans_release_nic_access(trans, &flags); 165 iwl_trans_release_nic_access(trans, &flags);
165 } 166 }
166} 167}
167EXPORT_SYMBOL_GPL(iwl_clear_bits_prph); 168IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index 2c2a729092f5..d6f6c37c09fd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -103,13 +103,12 @@ enum iwl_power_level {
103 * @ant_coupling: antenna coupling in dB, default = 0 103 * @ant_coupling: antenna coupling in dB, default = 0
104 * @bt_ch_announce: BT channel inhibition, default = enable 104 * @bt_ch_announce: BT channel inhibition, default = enable
105 * @auto_agg: enable agg. without check, default = true 105 * @auto_agg: enable agg. without check, default = true
106 * @disable_5ghz: disable 5GHz capability, default = false
107 */ 106 */
108struct iwl_mod_params { 107struct iwl_mod_params {
109 int sw_crypto; 108 int sw_crypto;
110 unsigned int disable_11n; 109 unsigned int disable_11n;
111 int amsdu_size_8K; 110 int amsdu_size_8K;
112 int restart_fw; 111 bool restart_fw;
113 bool plcp_check; 112 bool plcp_check;
114 int wd_disable; 113 int wd_disable;
115 bool bt_coex_active; 114 bool bt_coex_active;
@@ -120,7 +119,6 @@ struct iwl_mod_params {
120 int ant_coupling; 119 int ant_coupling;
121 bool bt_ch_announce; 120 bool bt_ch_announce;
122 bool auto_agg; 121 bool auto_agg;
123 bool disable_5ghz;
124}; 122};
125 123
126#endif /* #__iwl_modparams_h__ */ 124#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index c3affbc62cdf..940b8a9d5285 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -63,6 +63,7 @@
63#include <linux/sched.h> 63#include <linux/sched.h>
64#include <linux/export.h> 64#include <linux/export.h>
65 65
66#include "iwl-drv.h"
66#include "iwl-notif-wait.h" 67#include "iwl-notif-wait.h"
67 68
68 69
@@ -72,7 +73,7 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
72 INIT_LIST_HEAD(&notif_wait->notif_waits); 73 INIT_LIST_HEAD(&notif_wait->notif_waits);
73 init_waitqueue_head(&notif_wait->notif_waitq); 74 init_waitqueue_head(&notif_wait->notif_waitq);
74} 75}
75EXPORT_SYMBOL_GPL(iwl_notification_wait_init); 76IWL_EXPORT_SYMBOL(iwl_notification_wait_init);
76 77
77void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, 78void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
78 struct iwl_rx_packet *pkt) 79 struct iwl_rx_packet *pkt)
@@ -117,7 +118,7 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
117 if (triggered) 118 if (triggered)
118 wake_up_all(&notif_wait->notif_waitq); 119 wake_up_all(&notif_wait->notif_waitq);
119} 120}
120EXPORT_SYMBOL_GPL(iwl_notification_wait_notify); 121IWL_EXPORT_SYMBOL(iwl_notification_wait_notify);
121 122
122void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait) 123void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
123{ 124{
@@ -130,7 +131,7 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
130 131
131 wake_up_all(&notif_wait->notif_waitq); 132 wake_up_all(&notif_wait->notif_waitq);
132} 133}
133EXPORT_SYMBOL_GPL(iwl_abort_notification_waits); 134IWL_EXPORT_SYMBOL(iwl_abort_notification_waits);
134 135
135void 136void
136iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, 137iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
@@ -154,7 +155,7 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
154 list_add(&wait_entry->list, &notif_wait->notif_waits); 155 list_add(&wait_entry->list, &notif_wait->notif_waits);
155 spin_unlock_bh(&notif_wait->notif_wait_lock); 156 spin_unlock_bh(&notif_wait->notif_wait_lock);
156} 157}
157EXPORT_SYMBOL_GPL(iwl_init_notification_wait); 158IWL_EXPORT_SYMBOL(iwl_init_notification_wait);
158 159
159int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait, 160int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
160 struct iwl_notification_wait *wait_entry, 161 struct iwl_notification_wait *wait_entry,
@@ -178,7 +179,7 @@ int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
178 return -ETIMEDOUT; 179 return -ETIMEDOUT;
179 return 0; 180 return 0;
180} 181}
181EXPORT_SYMBOL_GPL(iwl_wait_notification); 182IWL_EXPORT_SYMBOL(iwl_wait_notification);
182 183
183void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait, 184void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
184 struct iwl_notification_wait *wait_entry) 185 struct iwl_notification_wait *wait_entry)
@@ -187,4 +188,4 @@ void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
187 list_del(&wait_entry->list); 188 list_del(&wait_entry->list);
188 spin_unlock_bh(&notif_wait->notif_wait_lock); 189 spin_unlock_bh(&notif_wait->notif_wait_lock);
189} 190}
190EXPORT_SYMBOL_GPL(iwl_remove_notification); 191IWL_EXPORT_SYMBOL(iwl_remove_notification);
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index c2ce764463a3..2e2f1c8c99f9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index a70213bdb83c..6199a0a597a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -62,6 +62,7 @@
62#include <linux/types.h> 62#include <linux/types.h>
63#include <linux/slab.h> 63#include <linux/slab.h>
64#include <linux/export.h> 64#include <linux/export.h>
65#include "iwl-drv.h"
65#include "iwl-modparams.h" 66#include "iwl-modparams.h"
66#include "iwl-nvm-parse.h" 67#include "iwl-nvm-parse.h"
67 68
@@ -149,6 +150,8 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
149 * @NVM_CHANNEL_DFS: dynamic freq selection candidate 150 * @NVM_CHANNEL_DFS: dynamic freq selection candidate
150 * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?) 151 * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
151 * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?) 152 * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
153 * @NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
154 * @NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
152 */ 155 */
153enum iwl_nvm_channel_flags { 156enum iwl_nvm_channel_flags {
154 NVM_CHANNEL_VALID = BIT(0), 157 NVM_CHANNEL_VALID = BIT(0),
@@ -158,6 +161,8 @@ enum iwl_nvm_channel_flags {
158 NVM_CHANNEL_DFS = BIT(7), 161 NVM_CHANNEL_DFS = BIT(7),
159 NVM_CHANNEL_WIDE = BIT(8), 162 NVM_CHANNEL_WIDE = BIT(8),
160 NVM_CHANNEL_40MHZ = BIT(9), 163 NVM_CHANNEL_40MHZ = BIT(9),
164 NVM_CHANNEL_80MHZ = BIT(10),
165 NVM_CHANNEL_160MHZ = BIT(11),
161}; 166};
162 167
163#define CHECK_AND_PRINT_I(x) \ 168#define CHECK_AND_PRINT_I(x) \
@@ -210,6 +215,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
210 else 215 else
211 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 216 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
212 } 217 }
218 if (!(ch_flags & NVM_CHANNEL_80MHZ))
219 channel->flags |= IEEE80211_CHAN_NO_80MHZ;
220 if (!(ch_flags & NVM_CHANNEL_160MHZ))
221 channel->flags |= IEEE80211_CHAN_NO_160MHZ;
213 222
214 if (!(ch_flags & NVM_CHANNEL_IBSS)) 223 if (!(ch_flags & NVM_CHANNEL_IBSS))
215 channel->flags |= IEEE80211_CHAN_NO_IBSS; 224 channel->flags |= IEEE80211_CHAN_NO_IBSS;
@@ -245,6 +254,43 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
245 return n_channels; 254 return n_channels;
246} 255}
247 256
257static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
258 struct iwl_nvm_data *data,
259 struct ieee80211_sta_vht_cap *vht_cap)
260{
261 /* For now, assume new devices with NVM are VHT capable */
262
263 vht_cap->vht_supported = true;
264
265 vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 |
266 IEEE80211_VHT_CAP_RXSTBC_1 |
267 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
268 7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
269
270 if (iwlwifi_mod_params.amsdu_size_8K)
271 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
272
273 vht_cap->vht_mcs.rx_mcs_map =
274 cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
275 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
276 IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
277 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
278 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
279 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
280 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
281 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
282
283 if (data->valid_rx_ant == 1 || cfg->rx_with_siso_diversity) {
284 vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
285 IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
286 /* this works because NOT_SUPPORTED == 3 */
287 vht_cap->vht_mcs.rx_mcs_map |=
288 cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
289 }
290
291 vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
292}
293
248static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, 294static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
249 struct iwl_nvm_data *data, const __le16 *nvm_sw) 295 struct iwl_nvm_data *data, const __le16 *nvm_sw)
250{ 296{
@@ -268,6 +314,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
268 n_used += iwl_init_sband_channels(data, sband, n_channels, 314 n_used += iwl_init_sband_channels(data, sband, n_channels,
269 IEEE80211_BAND_5GHZ); 315 IEEE80211_BAND_5GHZ);
270 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ); 316 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
317 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap);
271 318
272 if (n_channels != n_used) 319 if (n_channels != n_used)
273 IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", 320 IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
@@ -343,4 +390,4 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
343 390
344 return data; 391 return data;
345} 392}
346EXPORT_SYMBOL_GPL(iwl_parse_nvm_data); 393IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
index b2692bd287fa..e57fb989661e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 4a680019e117..98c7aa7346da 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index 3392011a8768..25745daa0d5d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -65,6 +65,7 @@
65#include <linux/string.h> 65#include <linux/string.h>
66#include <linux/export.h> 66#include <linux/export.h>
67 67
68#include "iwl-drv.h"
68#include "iwl-phy-db.h" 69#include "iwl-phy-db.h"
69#include "iwl-debug.h" 70#include "iwl-debug.h"
70#include "iwl-op-mode.h" 71#include "iwl-op-mode.h"
@@ -149,7 +150,7 @@ struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
149 /* TODO: add default values of the phy db. */ 150 /* TODO: add default values of the phy db. */
150 return phy_db; 151 return phy_db;
151} 152}
152EXPORT_SYMBOL(iwl_phy_db_init); 153IWL_EXPORT_SYMBOL(iwl_phy_db_init);
153 154
154/* 155/*
155 * get phy db section: returns a pointer to a phy db section specified by 156 * get phy db section: returns a pointer to a phy db section specified by
@@ -215,7 +216,7 @@ void iwl_phy_db_free(struct iwl_phy_db *phy_db)
215 216
216 kfree(phy_db); 217 kfree(phy_db);
217} 218}
218EXPORT_SYMBOL(iwl_phy_db_free); 219IWL_EXPORT_SYMBOL(iwl_phy_db_free);
219 220
220int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt, 221int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
221 gfp_t alloc_ctx) 222 gfp_t alloc_ctx)
@@ -260,7 +261,7 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
260 261
261 return 0; 262 return 0;
262} 263}
263EXPORT_SYMBOL(iwl_phy_db_set_section); 264IWL_EXPORT_SYMBOL(iwl_phy_db_set_section);
264 265
265static int is_valid_channel(u16 ch_id) 266static int is_valid_channel(u16 ch_id)
266{ 267{
@@ -495,4 +496,4 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
495 "Finished sending phy db non channel data\n"); 496 "Finished sending phy db non channel data\n");
496 return 0; 497 return 0;
497} 498}
498EXPORT_SYMBOL(iwl_send_phy_db_data); 499IWL_EXPORT_SYMBOL(iwl_send_phy_db_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
index d0e43d96ab38..ce983af79644 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.h
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index f76e9cad7757..386f2a7c87cb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.c b/drivers/net/wireless/iwlwifi/iwl-test.c
index ce0c67b425ee..5cfd55b86ed3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-test.c
+++ b/drivers/net/wireless/iwlwifi/iwl-test.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -64,6 +64,7 @@
64#include <linux/export.h> 64#include <linux/export.h>
65#include <net/netlink.h> 65#include <net/netlink.h>
66 66
67#include "iwl-drv.h"
67#include "iwl-io.h" 68#include "iwl-io.h"
68#include "iwl-fh.h" 69#include "iwl-fh.h"
69#include "iwl-prph.h" 70#include "iwl-prph.h"
@@ -271,7 +272,7 @@ static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
271 272
272 reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 273 reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
273 skb = iwl_test_alloc_reply(tst, reply_len + 20); 274 skb = iwl_test_alloc_reply(tst, reply_len + 20);
274 reply_buf = kmalloc(reply_len, GFP_KERNEL); 275 reply_buf = kmemdup(&pkt->hdr, reply_len, GFP_KERNEL);
275 if (!skb || !reply_buf) { 276 if (!skb || !reply_buf) {
276 kfree_skb(skb); 277 kfree_skb(skb);
277 kfree(reply_buf); 278 kfree(reply_buf);
@@ -279,7 +280,6 @@ static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
279 } 280 }
280 281
281 /* The reply is in a page, that we cannot send to user space. */ 282 /* The reply is in a page, that we cannot send to user space. */
282 memcpy(reply_buf, &(pkt->hdr), reply_len);
283 iwl_free_resp(&cmd); 283 iwl_free_resp(&cmd);
284 284
285 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, 285 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
@@ -653,7 +653,7 @@ int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
653 } 653 }
654 return 0; 654 return 0;
655} 655}
656EXPORT_SYMBOL_GPL(iwl_test_parse); 656IWL_EXPORT_SYMBOL(iwl_test_parse);
657 657
658/* 658/*
659 * Handle test commands. 659 * Handle test commands.
@@ -715,7 +715,7 @@ int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
715 } 715 }
716 return result; 716 return result;
717} 717}
718EXPORT_SYMBOL_GPL(iwl_test_handle_cmd); 718IWL_EXPORT_SYMBOL(iwl_test_handle_cmd);
719 719
720static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb, 720static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
721 struct netlink_callback *cb) 721 struct netlink_callback *cb)
@@ -803,7 +803,7 @@ int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
803 } 803 }
804 return result; 804 return result;
805} 805}
806EXPORT_SYMBOL_GPL(iwl_test_dump); 806IWL_EXPORT_SYMBOL(iwl_test_dump);
807 807
808/* 808/*
809 * Multicast a spontaneous messages from the device to the user space. 809 * Multicast a spontaneous messages from the device to the user space.
@@ -849,4 +849,4 @@ void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
849 if (tst->notify) 849 if (tst->notify)
850 iwl_test_send_rx(tst, rxb); 850 iwl_test_send_rx(tst, rxb);
851} 851}
852EXPORT_SYMBOL_GPL(iwl_test_rx); 852IWL_EXPORT_SYMBOL(iwl_test_rx);
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.h b/drivers/net/wireless/iwlwifi/iwl-test.h
index 7fbf4d717caa..8fbd21704840 100644
--- a/drivers/net/wireless/iwlwifi/iwl-test.h
+++ b/drivers/net/wireless/iwlwifi/iwl-test.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index a963f45c6849..98f48a9afc98 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 0cac2b7af78b..7a13790b5bfe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -114,9 +114,6 @@
114 * completely agnostic to these differences. 114 * completely agnostic to these differences.
115 * The transport does provide helper functionnality (i.e. SYNC / ASYNC mode), 115 * The transport does provide helper functionnality (i.e. SYNC / ASYNC mode),
116 */ 116 */
117#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
118#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
119#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
120#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) 117#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
121#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) 118#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
122#define SEQ_TO_INDEX(s) ((s) & 0xff) 119#define SEQ_TO_INDEX(s) ((s) & 0xff)
@@ -308,7 +305,6 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
308 * currently supports 305 * currently supports
309 */ 306 */
310#define IWL_MAX_HW_QUEUES 32 307#define IWL_MAX_HW_QUEUES 32
311#define IWL_INVALID_STATION 255
312#define IWL_MAX_TID_COUNT 8 308#define IWL_MAX_TID_COUNT 8
313#define IWL_FRAME_LIMIT 64 309#define IWL_FRAME_LIMIT 64
314 310
@@ -685,7 +681,7 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
685static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, 681static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
686 int fifo) 682 int fifo)
687{ 683{
688 iwl_trans_txq_enable(trans, queue, fifo, IWL_INVALID_STATION, 684 iwl_trans_txq_enable(trans, queue, fifo, -1,
689 IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0); 685 IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
690} 686}
691 687
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index 807b250ec396..2acc44b40986 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
2iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o 2iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o 3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
4iwlmvm-y += scan.o time-event.o rs.o 4iwlmvm-y += scan.o time-event.o rs.o
5iwlmvm-y += power.o 5iwlmvm-y += power.o bt-coex.o
6iwlmvm-y += led.o 6iwlmvm-y += led.o
7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o 7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
8iwlmvm-$(CONFIG_PM_SLEEP) += d3.o 8iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
diff --git a/drivers/net/wireless/iwlwifi/mvm/binding.c b/drivers/net/wireless/iwlwifi/mvm/binding.c
index 73d24aacb90a..93fd1457954b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/binding.c
+++ b/drivers/net/wireless/iwlwifi/mvm/binding.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
new file mode 100644
index 000000000000..810bfa5f6de0
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -0,0 +1,589 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <net/mac80211.h>
65
66#include "fw-api-bt-coex.h"
67#include "iwl-modparams.h"
68#include "mvm.h"
69#include "iwl-debug.h"
70
71#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant) \
72 [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) | \
73 ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS))
74
75static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
76 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1,
77 BT_COEX_PRIO_TBL_PRIO_BYPASS, 0),
78 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2,
79 BT_COEX_PRIO_TBL_PRIO_BYPASS, 1),
80 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1,
81 BT_COEX_PRIO_TBL_PRIO_LOW, 0),
82 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2,
83 BT_COEX_PRIO_TBL_PRIO_LOW, 1),
84 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1,
85 BT_COEX_PRIO_TBL_PRIO_HIGH, 0),
86 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2,
87 BT_COEX_PRIO_TBL_PRIO_HIGH, 1),
88 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM,
89 BT_COEX_PRIO_TBL_DISABLED, 0),
90 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52,
91 BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0),
92 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24,
93 BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0),
94 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE,
95 BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0),
96 0, 0, 0, 0, 0, 0,
97};
98
99#undef EVENT_PRIO_ANT
100
101/* BT Antenna Coupling Threshold (dB) */
102#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
103#define IWL_BT_LOAD_FORCE_SISO_THRESHOLD (3)
104
105#define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD (-62)
106#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65)
107#define BT_REDUCED_TX_POWER_BIT BIT(7)
108
109static inline bool is_loose_coex(void)
110{
111 return iwlwifi_mod_params.ant_coupling >
112 IWL_BT_ANTENNA_COUPLING_THRESHOLD;
113}
114
115int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
116{
117 return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
118 sizeof(struct iwl_bt_coex_prio_tbl_cmd),
119 &iwl_bt_prio_tbl);
120}
121
122static int iwl_send_bt_env(struct iwl_mvm *mvm, u8 action, u8 type)
123{
124 struct iwl_bt_coex_prot_env_cmd env_cmd;
125 int ret;
126
127 env_cmd.action = action;
128 env_cmd.type = type;
129 ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PROT_ENV, CMD_SYNC,
130 sizeof(env_cmd), &env_cmd);
131 if (ret)
132 IWL_ERR(mvm, "failed to send BT env command\n");
133 return ret;
134}
135
136enum iwl_bt_kill_msk {
137 BT_KILL_MSK_DEFAULT,
138 BT_KILL_MSK_SCO_HID_A2DP,
139 BT_KILL_MSK_REDUCED_TXPOW,
140 BT_KILL_MSK_MAX,
141};
142
143static const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
144 [BT_KILL_MSK_DEFAULT] = 0xffff0000,
145 [BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
146 [BT_KILL_MSK_REDUCED_TXPOW] = 0,
147};
148
149static const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
150 [BT_KILL_MSK_DEFAULT] = 0xffff0000,
151 [BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
152 [BT_KILL_MSK_REDUCED_TXPOW] = 0,
153};
154
155#define IWL_BT_DEFAULT_BOOST (0xf0f0f0f0)
156
157/* Tight Coex */
158static const __le32 iwl_tight_lookup[BT_COEX_LUT_SIZE] = {
159 cpu_to_le32(0xaaaaaaaa),
160 cpu_to_le32(0xaaaaaaaa),
161 cpu_to_le32(0xaeaaaaaa),
162 cpu_to_le32(0xaaaaaaaa),
163 cpu_to_le32(0xcc00ff28),
164 cpu_to_le32(0x0000aaaa),
165 cpu_to_le32(0xcc00aaaa),
166 cpu_to_le32(0x0000aaaa),
167 cpu_to_le32(0xc0004000),
168 cpu_to_le32(0x00000000),
169 cpu_to_le32(0xf0005000),
170 cpu_to_le32(0xf0005000),
171};
172
173/* Loose Coex */
174static const __le32 iwl_loose_lookup[BT_COEX_LUT_SIZE] = {
175 cpu_to_le32(0xaaaaaaaa),
176 cpu_to_le32(0xaaaaaaaa),
177 cpu_to_le32(0xaeaaaaaa),
178 cpu_to_le32(0xaaaaaaaa),
179 cpu_to_le32(0xcc00ff28),
180 cpu_to_le32(0x0000aaaa),
181 cpu_to_le32(0xcc00aaaa),
182 cpu_to_le32(0x0000aaaa),
183 cpu_to_le32(0x00000000),
184 cpu_to_le32(0x00000000),
185 cpu_to_le32(0xf0005000),
186 cpu_to_le32(0xf0005000),
187};
188
189/* Full concurrency */
190static const __le32 iwl_concurrent_lookup[BT_COEX_LUT_SIZE] = {
191 cpu_to_le32(0xaaaaaaaa),
192 cpu_to_le32(0xaaaaaaaa),
193 cpu_to_le32(0xaaaaaaaa),
194 cpu_to_le32(0xaaaaaaaa),
195 cpu_to_le32(0xaaaaaaaa),
196 cpu_to_le32(0xaaaaaaaa),
197 cpu_to_le32(0xaaaaaaaa),
198 cpu_to_le32(0xaaaaaaaa),
199 cpu_to_le32(0x00000000),
200 cpu_to_le32(0x00000000),
201 cpu_to_le32(0x00000000),
202 cpu_to_le32(0x00000000),
203};
204
205int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
206{
207 struct iwl_bt_coex_cmd cmd = {
208 .max_kill = 5,
209 .bt3_time_t7_value = 1,
210 .bt3_prio_sample_time = 2,
211 .bt3_timer_t2_value = 0xc,
212 };
213 int ret;
214
215 cmd.flags = iwlwifi_mod_params.bt_coex_active ?
216 BT_COEX_NW : BT_COEX_DISABLE;
217 cmd.flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE;
218
219 cmd.valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
220 BT_VALID_BT_PRIO_BOOST |
221 BT_VALID_MAX_KILL |
222 BT_VALID_3W_TMRS |
223 BT_VALID_KILL_ACK |
224 BT_VALID_KILL_CTS |
225 BT_VALID_REDUCED_TX_POWER |
226 BT_VALID_LUT);
227
228 if (is_loose_coex())
229 memcpy(&cmd.decision_lut, iwl_loose_lookup,
230 sizeof(iwl_tight_lookup));
231 else
232 memcpy(&cmd.decision_lut, iwl_tight_lookup,
233 sizeof(iwl_tight_lookup));
234
235 cmd.bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
236 cmd.kill_ack_msk =
237 cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
238 cmd.kill_cts_msk =
239 cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
240
241 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
242
243 /* go to CALIB state in internal BT-Coex state machine */
244 ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
245 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
246 if (ret)
247 return ret;
248
249 ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
250 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
251 if (ret)
252 return ret;
253
254 return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC,
255 sizeof(cmd), &cmd);
256}
257
258static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
259 bool reduced_tx_power)
260{
261 enum iwl_bt_kill_msk bt_kill_msk;
262 struct iwl_bt_coex_cmd cmd = {};
263 struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
264
265 lockdep_assert_held(&mvm->mutex);
266
267 if (reduced_tx_power) {
268 /* Reduced Tx power has precedence on the type of the profile */
269 bt_kill_msk = BT_KILL_MSK_REDUCED_TXPOW;
270 } else {
271 /* Low latency BT profile is active: give higher prio to BT */
272 if (BT_MBOX_MSG(notif, 3, SCO_STATE) ||
273 BT_MBOX_MSG(notif, 3, A2DP_STATE) ||
274 BT_MBOX_MSG(notif, 3, SNIFF_STATE))
275 bt_kill_msk = BT_KILL_MSK_SCO_HID_A2DP;
276 else
277 bt_kill_msk = BT_KILL_MSK_DEFAULT;
278 }
279
280 IWL_DEBUG_COEX(mvm,
281 "Update kill_msk: %d - SCO %sactive A2DP %sactive SNIFF %sactive\n",
282 bt_kill_msk,
283 BT_MBOX_MSG(notif, 3, SCO_STATE) ? "" : "in",
284 BT_MBOX_MSG(notif, 3, A2DP_STATE) ? "" : "in",
285 BT_MBOX_MSG(notif, 3, SNIFF_STATE) ? "" : "in");
286
287 /* Don't send HCMD if there is no update */
288 if (bt_kill_msk == mvm->bt_kill_msk)
289 return 0;
290
291 mvm->bt_kill_msk = bt_kill_msk;
292 cmd.kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
293 cmd.kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
294 cmd.valid_bit_msk = cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
295
296 IWL_DEBUG_COEX(mvm, "bt_kill_msk = %d\n", bt_kill_msk);
297 return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC,
298 sizeof(cmd), &cmd);
299}
300
301static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
302 bool enable)
303{
304 struct iwl_bt_coex_cmd cmd = {
305 .valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER),
306 .bt_reduced_tx_power = sta_id,
307 };
308 struct ieee80211_sta *sta;
309 struct iwl_mvm_sta *mvmsta;
310
311 /* This can happen if the station has been removed right now */
312 if (sta_id == IWL_MVM_STATION_COUNT)
313 return 0;
314
315 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
316 lockdep_is_held(&mvm->mutex));
317 mvmsta = (void *)sta->drv_priv;
318
319 /* nothing to do */
320 if (mvmsta->bt_reduced_txpower == enable)
321 return 0;
322
323 if (enable)
324 cmd.bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
325
326 IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
327 enable ? "en" : "dis", sta_id);
328
329 mvmsta->bt_reduced_txpower = enable;
330
331 /* Send ASYNC since this can be sent from an atomic context */
332 return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_ASYNC,
333 sizeof(cmd), &cmd);
334}
335
336struct iwl_bt_iterator_data {
337 struct iwl_bt_coex_profile_notif *notif;
338 struct iwl_mvm *mvm;
339 u32 num_bss_ifaces;
340 bool reduced_tx_power;
341};
342
343static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
344 struct ieee80211_vif *vif)
345{
346 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
347 struct iwl_bt_iterator_data *data = _data;
348 struct iwl_mvm *mvm = data->mvm;
349 struct ieee80211_chanctx_conf *chanctx_conf;
350 enum ieee80211_smps_mode smps_mode;
351 enum ieee80211_band band;
352 int ave_rssi;
353
354 if (vif->type != NL80211_IFTYPE_STATION)
355 return;
356
357 rcu_read_lock();
358 chanctx_conf = rcu_dereference(vif->chanctx_conf);
359 if (chanctx_conf && chanctx_conf->def.chan)
360 band = chanctx_conf->def.chan->band;
361 else
362 band = -1;
363 rcu_read_unlock();
364
365 smps_mode = IEEE80211_SMPS_AUTOMATIC;
366
367 if (band != IEEE80211_BAND_2GHZ) {
368 ieee80211_request_smps(vif, smps_mode);
369 return;
370 }
371
372 if (data->notif->bt_status)
373 smps_mode = IEEE80211_SMPS_DYNAMIC;
374
375 if (data->notif->bt_traffic_load >= IWL_BT_LOAD_FORCE_SISO_THRESHOLD)
376 smps_mode = IEEE80211_SMPS_STATIC;
377
378 IWL_DEBUG_COEX(data->mvm,
379 "mac %d: bt_status %d traffic_load %d smps_req %d\n",
380 mvmvif->id, data->notif->bt_status,
381 data->notif->bt_traffic_load, smps_mode);
382
383 ieee80211_request_smps(vif, smps_mode);
384
385 /* don't reduce the Tx power if in loose scheme */
386 if (is_loose_coex())
387 return;
388
389 data->num_bss_ifaces++;
390
391 /* reduced Txpower only if there are open BT connections, so ...*/
392 if (!BT_MBOX_MSG(data->notif, 3, OPEN_CON_2)) {
393 /* ... cancel reduced Tx power ... */
394 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
395 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
396 data->reduced_tx_power = false;
397
398 /* ... and there is no need to get reports on RSSI any more. */
399 ieee80211_disable_rssi_reports(vif);
400 return;
401 }
402
403 ave_rssi = ieee80211_ave_rssi(vif);
404
405 /* if the RSSI isn't valid, fake it is very low */
406 if (!ave_rssi)
407 ave_rssi = -100;
408 if (ave_rssi > BT_ENABLE_REDUCED_TXPOWER_THRESHOLD) {
409 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
410 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
411
412 /*
413 * bt_kill_msk can be BT_KILL_MSK_REDUCED_TXPOW only if all the
414 * BSS / P2P clients have rssi above threshold.
415 * We set the bt_kill_msk to BT_KILL_MSK_REDUCED_TXPOW before
416 * the iteration, if one interface's rssi isn't good enough,
417 * bt_kill_msk will be set to default values.
418 */
419 } else if (ave_rssi < BT_DISABLE_REDUCED_TXPOWER_THRESHOLD) {
420 if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
421 IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
422
423 /*
424 * One interface hasn't rssi above threshold, bt_kill_msk must
425 * be set to default values.
426 */
427 data->reduced_tx_power = false;
428 }
429
430 /* Begin to monitor the RSSI: it may influence the reduced Tx power */
431 ieee80211_enable_rssi_reports(vif, BT_DISABLE_REDUCED_TXPOWER_THRESHOLD,
432 BT_ENABLE_REDUCED_TXPOWER_THRESHOLD);
433}
434
435static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
436{
437 struct iwl_bt_iterator_data data = {
438 .mvm = mvm,
439 .notif = &mvm->last_bt_notif,
440 .reduced_tx_power = true,
441 };
442
443 ieee80211_iterate_active_interfaces_atomic(
444 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
445 iwl_mvm_bt_notif_iterator, &data);
446
447 /*
448 * If there are no BSS / P2P client interfaces, reduced Tx Power is
449 * irrelevant since it is based on the RSSI coming from the beacon.
450 * Use BT_KILL_MSK_DEFAULT in that case.
451 */
452 data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces;
453
454 if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
455 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
456}
457
458/* upon association, the fw will send in BT Coex notification */
459int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
460 struct iwl_rx_cmd_buffer *rxb,
461 struct iwl_device_cmd *dev_cmd)
462{
463 struct iwl_rx_packet *pkt = rxb_addr(rxb);
464 struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
465
466
467 IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
468 IWL_DEBUG_COEX(mvm, "\tBT %salive\n", notif->bt_status ? "" : "not ");
469 IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
470 IWL_DEBUG_COEX(mvm, "\tBT traffic load %d\n", notif->bt_traffic_load);
471 IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
472 notif->bt_agg_traffic_load);
473 IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
474
475 /* remember this notification for future use: rssi fluctuations */
476 memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
477
478 iwl_mvm_bt_coex_notif_handle(mvm);
479
480 /*
481 * This is an async handler for a notification, returning anything other
482 * than 0 doesn't make sense even if HCMD failed.
483 */
484 return 0;
485}
486
487static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
488 struct ieee80211_vif *vif)
489{
490 struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
491 struct iwl_bt_iterator_data *data = _data;
492 struct iwl_mvm *mvm = data->mvm;
493
494 struct ieee80211_sta *sta;
495 struct iwl_mvm_sta *mvmsta;
496
497 if (vif->type != NL80211_IFTYPE_STATION ||
498 mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
499 return;
500
501 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
502 lockdep_is_held(&mvm->mutex));
503 mvmsta = (void *)sta->drv_priv;
504
505 /*
506 * This interface doesn't support reduced Tx power (because of low
507 * RSSI probably), then set bt_kill_msk to default values.
508 */
509 if (!mvmsta->bt_reduced_txpower)
510 data->reduced_tx_power = false;
511 /* else - possibly leave it to BT_KILL_MSK_REDUCED_TXPOW */
512}
513
514void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
515 enum ieee80211_rssi_event rssi_event)
516{
517 struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
518 struct iwl_bt_iterator_data data = {
519 .mvm = mvm,
520 .reduced_tx_power = true,
521 };
522 int ret;
523
524 mutex_lock(&mvm->mutex);
525
526 /* Rssi update while not associated ?! */
527 if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
528 goto out_unlock;
529
530 /* No open connection - reports should be disabled */
531 if (!BT_MBOX_MSG(&mvm->last_bt_notif, 3, OPEN_CON_2))
532 goto out_unlock;
533
534 IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
535 rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
536
537 /*
538 * Check if rssi is good enough for reduced Tx power, but not in loose
539 * scheme.
540 */
541 if (rssi_event == RSSI_EVENT_LOW || is_loose_coex())
542 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
543 false);
544 else
545 ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
546
547 if (ret)
548 IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
549
550 ieee80211_iterate_active_interfaces_atomic(
551 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
552 iwl_mvm_bt_rssi_iterator, &data);
553
554 /*
555 * If there are no BSS / P2P client interfaces, reduced Tx Power is
556 * irrelevant since it is based on the RSSI coming from the beacon.
557 * Use BT_KILL_MSK_DEFAULT in that case.
558 */
559 data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces;
560
561 if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
562 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
563
564 out_unlock:
565 mutex_unlock(&mvm->mutex);
566}
567
568void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
569{
570 struct ieee80211_chanctx_conf *chanctx_conf;
571 enum ieee80211_band band;
572
573 rcu_read_lock();
574 chanctx_conf = rcu_dereference(vif->chanctx_conf);
575 if (chanctx_conf && chanctx_conf->def.chan)
576 band = chanctx_conf->def.chan->band;
577 else
578 band = -1;
579 rcu_read_unlock();
580
581 /* if we are in 2GHz we will get a notification from the fw */
582 if (band == IEEE80211_BAND_2GHZ)
583 return;
584
585 /* else, we can remove all the constraints */
586 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
587
588 iwl_mvm_bt_coex_notif_handle(mvm);
589}
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 994c8c263dc0..16bbdcc8627a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -62,8 +62,10 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63 63
64#include <linux/etherdevice.h> 64#include <linux/etherdevice.h>
65#include <linux/ip.h>
65#include <net/cfg80211.h> 66#include <net/cfg80211.h>
66#include <net/ipv6.h> 67#include <net/ipv6.h>
68#include <net/tcp.h>
67#include "iwl-modparams.h" 69#include "iwl-modparams.h"
68#include "fw-api.h" 70#include "fw-api.h"
69#include "mvm.h" 71#include "mvm.h"
@@ -402,6 +404,233 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
402 sizeof(cmd), &cmd); 404 sizeof(cmd), &cmd);
403} 405}
404 406
407enum iwl_mvm_tcp_packet_type {
408 MVM_TCP_TX_SYN,
409 MVM_TCP_RX_SYNACK,
410 MVM_TCP_TX_DATA,
411 MVM_TCP_RX_ACK,
412 MVM_TCP_RX_WAKE,
413 MVM_TCP_TX_FIN,
414};
415
416static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
417{
418 __sum16 check = tcp_v4_check(len, saddr, daddr, 0);
419 return cpu_to_le16(be16_to_cpu((__force __be16)check));
420}
421
422static void iwl_mvm_build_tcp_packet(struct iwl_mvm *mvm,
423 struct ieee80211_vif *vif,
424 struct cfg80211_wowlan_tcp *tcp,
425 void *_pkt, u8 *mask,
426 __le16 *pseudo_hdr_csum,
427 enum iwl_mvm_tcp_packet_type ptype)
428{
429 struct {
430 struct ethhdr eth;
431 struct iphdr ip;
432 struct tcphdr tcp;
433 u8 data[];
434 } __packed *pkt = _pkt;
435 u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr);
436 int i;
437
438 pkt->eth.h_proto = cpu_to_be16(ETH_P_IP),
439 pkt->ip.version = 4;
440 pkt->ip.ihl = 5;
441 pkt->ip.protocol = IPPROTO_TCP;
442
443 switch (ptype) {
444 case MVM_TCP_TX_SYN:
445 case MVM_TCP_TX_DATA:
446 case MVM_TCP_TX_FIN:
447 memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN);
448 memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN);
449 pkt->ip.ttl = 128;
450 pkt->ip.saddr = tcp->src;
451 pkt->ip.daddr = tcp->dst;
452 pkt->tcp.source = cpu_to_be16(tcp->src_port);
453 pkt->tcp.dest = cpu_to_be16(tcp->dst_port);
454 /* overwritten for TX SYN later */
455 pkt->tcp.doff = sizeof(struct tcphdr) / 4;
456 pkt->tcp.window = cpu_to_be16(65000);
457 break;
458 case MVM_TCP_RX_SYNACK:
459 case MVM_TCP_RX_ACK:
460 case MVM_TCP_RX_WAKE:
461 memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN);
462 memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN);
463 pkt->ip.saddr = tcp->dst;
464 pkt->ip.daddr = tcp->src;
465 pkt->tcp.source = cpu_to_be16(tcp->dst_port);
466 pkt->tcp.dest = cpu_to_be16(tcp->src_port);
467 break;
468 default:
469 WARN_ON(1);
470 return;
471 }
472
473 switch (ptype) {
474 case MVM_TCP_TX_SYN:
475 /* firmware assumes 8 option bytes - 8 NOPs for now */
476 memset(pkt->data, 0x01, 8);
477 ip_tot_len += 8;
478 pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4;
479 pkt->tcp.syn = 1;
480 break;
481 case MVM_TCP_TX_DATA:
482 ip_tot_len += tcp->payload_len;
483 memcpy(pkt->data, tcp->payload, tcp->payload_len);
484 pkt->tcp.psh = 1;
485 pkt->tcp.ack = 1;
486 break;
487 case MVM_TCP_TX_FIN:
488 pkt->tcp.fin = 1;
489 pkt->tcp.ack = 1;
490 break;
491 case MVM_TCP_RX_SYNACK:
492 pkt->tcp.syn = 1;
493 pkt->tcp.ack = 1;
494 break;
495 case MVM_TCP_RX_ACK:
496 pkt->tcp.ack = 1;
497 break;
498 case MVM_TCP_RX_WAKE:
499 ip_tot_len += tcp->wake_len;
500 pkt->tcp.psh = 1;
501 pkt->tcp.ack = 1;
502 memcpy(pkt->data, tcp->wake_data, tcp->wake_len);
503 break;
504 }
505
506 switch (ptype) {
507 case MVM_TCP_TX_SYN:
508 case MVM_TCP_TX_DATA:
509 case MVM_TCP_TX_FIN:
510 pkt->ip.tot_len = cpu_to_be16(ip_tot_len);
511 pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl);
512 break;
513 case MVM_TCP_RX_WAKE:
514 for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) {
515 u8 tmp = tcp->wake_mask[i];
516 mask[i + 6] |= tmp << 6;
517 if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8))
518 mask[i + 7] = tmp >> 2;
519 }
520 /* fall through for ethernet/IP/TCP headers mask */
521 case MVM_TCP_RX_SYNACK:
522 case MVM_TCP_RX_ACK:
523 mask[0] = 0xff; /* match ethernet */
524 /*
525 * match ethernet, ip.version, ip.ihl
526 * the ip.ihl half byte is really masked out by firmware
527 */
528 mask[1] = 0x7f;
529 mask[2] = 0x80; /* match ip.protocol */
530 mask[3] = 0xfc; /* match ip.saddr, ip.daddr */
531 mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */
532 mask[5] = 0x80; /* match tcp flags */
533 /* leave rest (0 or set for MVM_TCP_RX_WAKE) */
534 break;
535 };
536
537 *pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr),
538 pkt->ip.saddr, pkt->ip.daddr);
539}
540
541static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
542 struct ieee80211_vif *vif,
543 struct cfg80211_wowlan_tcp *tcp)
544{
545 struct iwl_wowlan_remote_wake_config *cfg;
546 struct iwl_host_cmd cmd = {
547 .id = REMOTE_WAKE_CONFIG_CMD,
548 .len = { sizeof(*cfg), },
549 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
550 .flags = CMD_SYNC,
551 };
552 int ret;
553
554 if (!tcp)
555 return 0;
556
557 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
558 if (!cfg)
559 return -ENOMEM;
560 cmd.data[0] = cfg;
561
562 cfg->max_syn_retries = 10;
563 cfg->max_data_retries = 10;
564 cfg->tcp_syn_ack_timeout = 1; /* seconds */
565 cfg->tcp_ack_timeout = 1; /* seconds */
566
567 /* SYN (TX) */
568 iwl_mvm_build_tcp_packet(
569 mvm, vif, tcp, cfg->syn_tx.data, NULL,
570 &cfg->syn_tx.info.tcp_pseudo_header_checksum,
571 MVM_TCP_TX_SYN);
572 cfg->syn_tx.info.tcp_payload_length = 0;
573
574 /* SYN/ACK (RX) */
575 iwl_mvm_build_tcp_packet(
576 mvm, vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
577 &cfg->synack_rx.info.tcp_pseudo_header_checksum,
578 MVM_TCP_RX_SYNACK);
579 cfg->synack_rx.info.tcp_payload_length = 0;
580
581 /* KEEPALIVE/ACK (TX) */
582 iwl_mvm_build_tcp_packet(
583 mvm, vif, tcp, cfg->keepalive_tx.data, NULL,
584 &cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
585 MVM_TCP_TX_DATA);
586 cfg->keepalive_tx.info.tcp_payload_length =
587 cpu_to_le16(tcp->payload_len);
588 cfg->sequence_number_offset = tcp->payload_seq.offset;
589 /* length must be 0..4, the field is little endian */
590 cfg->sequence_number_length = tcp->payload_seq.len;
591 cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start);
592 cfg->keepalive_interval = cpu_to_le16(tcp->data_interval);
593 if (tcp->payload_tok.len) {
594 cfg->token_offset = tcp->payload_tok.offset;
595 cfg->token_length = tcp->payload_tok.len;
596 cfg->num_tokens =
597 cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len);
598 memcpy(cfg->tokens, tcp->payload_tok.token_stream,
599 tcp->tokens_size);
600 } else {
601 /* set tokens to max value to almost never run out */
602 cfg->num_tokens = cpu_to_le16(65535);
603 }
604
605 /* ACK (RX) */
606 iwl_mvm_build_tcp_packet(
607 mvm, vif, tcp, cfg->keepalive_ack_rx.data,
608 cfg->keepalive_ack_rx.rx_mask,
609 &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
610 MVM_TCP_RX_ACK);
611 cfg->keepalive_ack_rx.info.tcp_payload_length = 0;
612
613 /* WAKEUP (RX) */
614 iwl_mvm_build_tcp_packet(
615 mvm, vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
616 &cfg->wake_rx.info.tcp_pseudo_header_checksum,
617 MVM_TCP_RX_WAKE);
618 cfg->wake_rx.info.tcp_payload_length =
619 cpu_to_le16(tcp->wake_len);
620
621 /* FIN */
622 iwl_mvm_build_tcp_packet(
623 mvm, vif, tcp, cfg->fin_tx.data, NULL,
624 &cfg->fin_tx.info.tcp_pseudo_header_checksum,
625 MVM_TCP_TX_FIN);
626 cfg->fin_tx.info.tcp_payload_length = 0;
627
628 ret = iwl_mvm_send_cmd(mvm, &cmd);
629 kfree(cfg);
630
631 return ret;
632}
633
405struct iwl_d3_iter_data { 634struct iwl_d3_iter_data {
406 struct iwl_mvm *mvm; 635 struct iwl_mvm *mvm;
407 struct ieee80211_vif *vif; 636 struct ieee80211_vif *vif;
@@ -540,7 +769,14 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
540 struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; 769 struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
541 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; 770 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
542 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; 771 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
543 struct iwl_d3_manager_config d3_cfg_cmd = {}; 772 struct iwl_d3_manager_config d3_cfg_cmd = {
773 /*
774 * Program the minimum sleep time to 10 seconds, as many
775 * platforms have issues processing a wakeup signal while
776 * still being in the process of suspending.
777 */
778 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
779 };
544 struct wowlan_key_data key_data = { 780 struct wowlan_key_data key_data = {
545 .use_rsc_tsc = false, 781 .use_rsc_tsc = false,
546 .tkip = &tkip_cmd, 782 .tkip = &tkip_cmd,
@@ -637,9 +873,21 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
637 cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); 873 cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
638 874
639 if (wowlan->rfkill_release) 875 if (wowlan->rfkill_release)
640 d3_cfg_cmd.wakeup_flags |= 876 wowlan_config_cmd.wakeup_filter |=
641 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); 877 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
642 878
879 if (wowlan->tcp) {
880 /*
881 * Set the "link change" (really "link lost") flag as well
882 * since that implies losing the TCP connection.
883 */
884 wowlan_config_cmd.wakeup_filter |=
885 cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
886 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
887 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
888 IWL_WOWLAN_WAKEUP_LINK_CHANGE);
889 }
890
643 iwl_mvm_cancel_scan(mvm); 891 iwl_mvm_cancel_scan(mvm);
644 892
645 iwl_trans_stop_device(mvm->trans); 893 iwl_trans_stop_device(mvm->trans);
@@ -755,6 +1003,10 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
755 if (ret) 1003 if (ret)
756 goto out; 1004 goto out;
757 1005
1006 ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
1007 if (ret)
1008 goto out;
1009
758 /* must be last -- this switches firmware state */ 1010 /* must be last -- this switches firmware state */
759 ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SYNC, 1011 ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SYNC,
760 sizeof(d3_cfg_cmd), &d3_cfg_cmd); 1012 sizeof(d3_cfg_cmd), &d3_cfg_cmd);
@@ -874,6 +1126,15 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
874 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) 1126 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
875 wakeup.four_way_handshake = true; 1127 wakeup.four_way_handshake = true;
876 1128
1129 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
1130 wakeup.tcp_connlost = true;
1131
1132 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
1133 wakeup.tcp_nomoretokens = true;
1134
1135 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
1136 wakeup.tcp_match = true;
1137
877 if (status->wake_packet_bufsize) { 1138 if (status->wake_packet_bufsize) {
878 int pktsize = le32_to_cpu(status->wake_packet_bufsize); 1139 int pktsize = le32_to_cpu(status->wake_packet_bufsize);
879 int pktlen = le32_to_cpu(status->wake_packet_length); 1140 int pktlen = le32_to_cpu(status->wake_packet_length);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index c1bdb5582126..2053dccefcd6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -69,12 +69,6 @@ struct iwl_dbgfs_mvm_ctx {
69 struct ieee80211_vif *vif; 69 struct ieee80211_vif *vif;
70}; 70};
71 71
72static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
73{
74 file->private_data = inode->i_private;
75 return 0;
76}
77
78static ssize_t iwl_dbgfs_tx_flush_write(struct file *file, 72static ssize_t iwl_dbgfs_tx_flush_write(struct file *file,
79 const char __user *user_buf, 73 const char __user *user_buf,
80 size_t count, loff_t *ppos) 74 size_t count, loff_t *ppos)
@@ -306,10 +300,191 @@ static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file,
306 return count; 300 return count;
307} 301}
308 302
303static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
304 char __user *user_buf,
305 size_t count, loff_t *ppos)
306{
307 struct ieee80211_vif *vif = file->private_data;
308 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
309 struct iwl_mvm *mvm = mvmvif->dbgfs_data;
310 u8 ap_sta_id;
311 struct ieee80211_chanctx_conf *chanctx_conf;
312 char buf[512];
313 int bufsz = sizeof(buf);
314 int pos = 0;
315 int i;
316
317 mutex_lock(&mvm->mutex);
318
319 ap_sta_id = mvmvif->ap_sta_id;
320
321 pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
322 mvmvif->id, mvmvif->color);
323 pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
324 vif->bss_conf.bssid);
325 pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
326 for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++) {
327 pos += scnprintf(buf+pos, bufsz-pos,
328 "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n",
329 i, mvmvif->queue_params[i].txop,
330 mvmvif->queue_params[i].cw_min,
331 mvmvif->queue_params[i].cw_max,
332 mvmvif->queue_params[i].aifs,
333 mvmvif->queue_params[i].uapsd);
334 }
335
336 if (vif->type == NL80211_IFTYPE_STATION &&
337 ap_sta_id != IWL_MVM_STATION_COUNT) {
338 struct ieee80211_sta *sta;
339 struct iwl_mvm_sta *mvm_sta;
340
341 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
342 lockdep_is_held(&mvm->mutex));
343 mvm_sta = (void *)sta->drv_priv;
344 pos += scnprintf(buf+pos, bufsz-pos,
345 "ap_sta_id %d - reduced Tx power %d\n",
346 ap_sta_id, mvm_sta->bt_reduced_txpower);
347 }
348
349 rcu_read_lock();
350 chanctx_conf = rcu_dereference(vif->chanctx_conf);
351 if (chanctx_conf) {
352 pos += scnprintf(buf+pos, bufsz-pos,
353 "idle rx chains %d, active rx chains: %d\n",
354 chanctx_conf->rx_chains_static,
355 chanctx_conf->rx_chains_dynamic);
356 }
357 rcu_read_unlock();
358
359 mutex_unlock(&mvm->mutex);
360
361 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
362}
363
364#define BT_MBOX_MSG(_notif, _num, _field) \
365 ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
366 >> BT_MBOX##_num##_##_field##_POS)
367
368
369#define BT_MBOX_PRINT(_num, _field, _end) \
370 pos += scnprintf(buf + pos, bufsz - pos, \
371 "\t%s: %d%s", \
372 #_field, \
373 BT_MBOX_MSG(notif, _num, _field), \
374 true ? "\n" : ", ");
375
376static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
377 size_t count, loff_t *ppos)
378{
379 struct iwl_mvm *mvm = file->private_data;
380 struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
381 char *buf;
382 int ret, pos = 0, bufsz = sizeof(char) * 1024;
383
384 buf = kmalloc(bufsz, GFP_KERNEL);
385 if (!buf)
386 return -ENOMEM;
387
388 mutex_lock(&mvm->mutex);
389
390 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
391
392 BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
393 BT_MBOX_PRINT(0, LE_PROF1, false);
394 BT_MBOX_PRINT(0, LE_PROF2, false);
395 BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
396 BT_MBOX_PRINT(0, CHL_SEQ_N, false);
397 BT_MBOX_PRINT(0, INBAND_S, false);
398 BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
399 BT_MBOX_PRINT(0, LE_SCAN, false);
400 BT_MBOX_PRINT(0, LE_ADV, false);
401 BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
402 BT_MBOX_PRINT(0, OPEN_CON_1, true);
403
404 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
405
406 BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
407 BT_MBOX_PRINT(1, IP_SR, false);
408 BT_MBOX_PRINT(1, LE_MSTR, false);
409 BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
410 BT_MBOX_PRINT(1, MSG_TYPE, false);
411 BT_MBOX_PRINT(1, SSN, true);
412
413 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
414
415 BT_MBOX_PRINT(2, SNIFF_ACT, false);
416 BT_MBOX_PRINT(2, PAG, false);
417 BT_MBOX_PRINT(2, INQUIRY, false);
418 BT_MBOX_PRINT(2, CONN, false);
419 BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
420 BT_MBOX_PRINT(2, DISC, false);
421 BT_MBOX_PRINT(2, SCO_TX_ACT, false);
422 BT_MBOX_PRINT(2, SCO_RX_ACT, false);
423 BT_MBOX_PRINT(2, ESCO_RE_TX, false);
424 BT_MBOX_PRINT(2, SCO_DURATION, true);
425
426 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
427
428 BT_MBOX_PRINT(3, SCO_STATE, false);
429 BT_MBOX_PRINT(3, SNIFF_STATE, false);
430 BT_MBOX_PRINT(3, A2DP_STATE, false);
431 BT_MBOX_PRINT(3, ACL_STATE, false);
432 BT_MBOX_PRINT(3, MSTR_STATE, false);
433 BT_MBOX_PRINT(3, OBX_STATE, false);
434 BT_MBOX_PRINT(3, OPEN_CON_2, false);
435 BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
436 BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
437 BT_MBOX_PRINT(3, INBAND_P, false);
438 BT_MBOX_PRINT(3, MSG_TYPE_2, false);
439 BT_MBOX_PRINT(3, SSN_2, false);
440 BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
441
442 pos += scnprintf(buf+pos, bufsz-pos, "bt_status = %d\n",
443 notif->bt_status);
444 pos += scnprintf(buf+pos, bufsz-pos, "bt_open_conn = %d\n",
445 notif->bt_open_conn);
446 pos += scnprintf(buf+pos, bufsz-pos, "bt_traffic_load = %d\n",
447 notif->bt_traffic_load);
448 pos += scnprintf(buf+pos, bufsz-pos, "bt_agg_traffic_load = %d\n",
449 notif->bt_agg_traffic_load);
450 pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
451 notif->bt_ci_compliance);
452
453 mutex_unlock(&mvm->mutex);
454
455 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
456 kfree(buf);
457
458 return ret;
459}
460#undef BT_MBOX_PRINT
461
462static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
463 const char __user *user_buf,
464 size_t count, loff_t *ppos)
465{
466 struct iwl_mvm *mvm = file->private_data;
467 bool restart_fw = iwlwifi_mod_params.restart_fw;
468 int ret;
469
470 iwlwifi_mod_params.restart_fw = true;
471
472 mutex_lock(&mvm->mutex);
473
474 /* take the return value to make compiler happy - it will fail anyway */
475 ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, CMD_SYNC, 0, NULL);
476
477 mutex_unlock(&mvm->mutex);
478
479 iwlwifi_mod_params.restart_fw = restart_fw;
480
481 return count;
482}
483
309#define MVM_DEBUGFS_READ_FILE_OPS(name) \ 484#define MVM_DEBUGFS_READ_FILE_OPS(name) \
310static const struct file_operations iwl_dbgfs_##name##_ops = { \ 485static const struct file_operations iwl_dbgfs_##name##_ops = { \
311 .read = iwl_dbgfs_##name##_read, \ 486 .read = iwl_dbgfs_##name##_read, \
312 .open = iwl_dbgfs_open_file_generic, \ 487 .open = simple_open, \
313 .llseek = generic_file_llseek, \ 488 .llseek = generic_file_llseek, \
314} 489}
315 490
@@ -317,14 +492,14 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
317static const struct file_operations iwl_dbgfs_##name##_ops = { \ 492static const struct file_operations iwl_dbgfs_##name##_ops = { \
318 .write = iwl_dbgfs_##name##_write, \ 493 .write = iwl_dbgfs_##name##_write, \
319 .read = iwl_dbgfs_##name##_read, \ 494 .read = iwl_dbgfs_##name##_read, \
320 .open = iwl_dbgfs_open_file_generic, \ 495 .open = simple_open, \
321 .llseek = generic_file_llseek, \ 496 .llseek = generic_file_llseek, \
322}; 497};
323 498
324#define MVM_DEBUGFS_WRITE_FILE_OPS(name) \ 499#define MVM_DEBUGFS_WRITE_FILE_OPS(name) \
325static const struct file_operations iwl_dbgfs_##name##_ops = { \ 500static const struct file_operations iwl_dbgfs_##name##_ops = { \
326 .write = iwl_dbgfs_##name##_write, \ 501 .write = iwl_dbgfs_##name##_write, \
327 .open = iwl_dbgfs_open_file_generic, \ 502 .open = simple_open, \
328 .llseek = generic_file_llseek, \ 503 .llseek = generic_file_llseek, \
329}; 504};
330 505
@@ -345,8 +520,13 @@ MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush);
345MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain); 520MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain);
346MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram); 521MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
347MVM_DEBUGFS_READ_FILE_OPS(stations); 522MVM_DEBUGFS_READ_FILE_OPS(stations);
523MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
348MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow); 524MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
349MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow); 525MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
526MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
527
528/* Interface specific debugfs entries */
529MVM_DEBUGFS_READ_FILE_OPS(mac_params);
350 530
351int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) 531int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
352{ 532{
@@ -358,8 +538,10 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
358 MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR); 538 MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR);
359 MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR); 539 MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
360 MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); 540 MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
541 MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
361 MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR); 542 MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
362 MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR); 543 MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
544 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
363 545
364 /* 546 /*
365 * Create a symlink with mac80211. It will be removed when mac80211 547 * Create a symlink with mac80211. It will be removed when mac80211
@@ -376,3 +558,58 @@ err:
376 IWL_ERR(mvm, "Can't create the mvm debugfs directory\n"); 558 IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
377 return -ENOMEM; 559 return -ENOMEM;
378} 560}
561
562void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
563{
564 struct dentry *dbgfs_dir = vif->debugfs_dir;
565 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
566 char buf[100];
567
568 if (!dbgfs_dir)
569 return;
570
571 mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
572 mvmvif->dbgfs_data = mvm;
573
574 if (!mvmvif->dbgfs_dir) {
575 IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
576 dbgfs_dir->d_name.name);
577 return;
578 }
579
580 MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
581 S_IRUSR);
582
583 /*
584 * Create symlink for convenience pointing to interface specific
585 * debugfs entries for the driver. For example, under
586 * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
587 * find
588 * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
589 */
590 snprintf(buf, 100, "../../../%s/%s/%s/%s",
591 dbgfs_dir->d_parent->d_parent->d_name.name,
592 dbgfs_dir->d_parent->d_name.name,
593 dbgfs_dir->d_name.name,
594 mvmvif->dbgfs_dir->d_name.name);
595
596 mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
597 mvm->debugfs_dir, buf);
598 if (!mvmvif->dbgfs_slink)
599 IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n",
600 dbgfs_dir->d_name.name);
601 return;
602err:
603 IWL_ERR(mvm, "Can't create debugfs entity\n");
604}
605
606void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
607{
608 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
609
610 debugfs_remove(mvmvif->dbgfs_slink);
611 mvmvif->dbgfs_slink = NULL;
612
613 debugfs_remove_recursive(mvmvif->dbgfs_dir);
614 mvmvif->dbgfs_dir = NULL;
615}
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
new file mode 100644
index 000000000000..05c61d6f384e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
@@ -0,0 +1,319 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __fw_api_bt_coex_h__
64#define __fw_api_bt_coex_h__
65
66#include <linux/types.h>
67#include <linux/bitops.h>
68
69#define BITS(nb) (BIT(nb) - 1)
70
71/**
72 * enum iwl_bt_coex_flags - flags for BT_COEX command
73 * @BT_CH_PRIMARY_EN:
74 * @BT_CH_SECONDARY_EN:
75 * @BT_NOTIF_COEX_OFF:
76 * @BT_COEX_MODE_POS:
77 * @BT_COEX_MODE_MSK:
78 * @BT_COEX_DISABLE:
79 * @BT_COEX_2W:
80 * @BT_COEX_3W:
81 * @BT_COEX_NW:
82 * @BT_USE_DEFAULTS:
83 * @BT_SYNC_2_BT_DISABLE:
84 * @BT_COEX_CORUNNING_TBL_EN:
85 */
86enum iwl_bt_coex_flags {
87 BT_CH_PRIMARY_EN = BIT(0),
88 BT_CH_SECONDARY_EN = BIT(1),
89 BT_NOTIF_COEX_OFF = BIT(2),
90 BT_COEX_MODE_POS = 3,
91 BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS,
92 BT_COEX_DISABLE = 0x0 << BT_COEX_MODE_POS,
93 BT_COEX_2W = 0x1 << BT_COEX_MODE_POS,
94 BT_COEX_3W = 0x2 << BT_COEX_MODE_POS,
95 BT_COEX_NW = 0x3 << BT_COEX_MODE_POS,
96 BT_USE_DEFAULTS = BIT(6),
97 BT_SYNC_2_BT_DISABLE = BIT(7),
98 /*
99 * For future use - when the flags will be enlarged
100 * BT_COEX_CORUNNING_TBL_EN = BIT(8),
101 */
102};
103
104/*
105 * indicates what has changed in the BT_COEX command.
106 */
107enum iwl_bt_coex_valid_bit_msk {
108 BT_VALID_ENABLE = BIT(0),
109 BT_VALID_BT_PRIO_BOOST = BIT(1),
110 BT_VALID_MAX_KILL = BIT(2),
111 BT_VALID_3W_TMRS = BIT(3),
112 BT_VALID_KILL_ACK = BIT(4),
113 BT_VALID_KILL_CTS = BIT(5),
114 BT_VALID_REDUCED_TX_POWER = BIT(6),
115 BT_VALID_LUT = BIT(7),
116 BT_VALID_WIFI_RX_SW_PRIO_BOOST = BIT(8),
117 BT_VALID_WIFI_TX_SW_PRIO_BOOST = BIT(9),
118 BT_VALID_MULTI_PRIO_LUT = BIT(10),
119 BT_VALID_TRM_KICK_FILTER = BIT(11),
120 BT_VALID_CORUN_LUT_20 = BIT(12),
121 BT_VALID_CORUN_LUT_40 = BIT(13),
122 BT_VALID_ANT_ISOLATION = BIT(14),
123 BT_VALID_ANT_ISOLATION_THRS = BIT(15),
124 /*
125 * For future use - when the valid flags will be enlarged
126 * BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
127 * BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
128 */
129};
130
131/**
132 * enum iwl_bt_reduced_tx_power - allows to reduce txpower for WiFi frames.
133 * @BT_REDUCED_TX_POWER_CTL: reduce Tx power for control frames
134 * @BT_REDUCED_TX_POWER_DATA: reduce Tx power for data frames
135 *
136 * This mechanism allows to have BT and WiFi run concurrently. Since WiFi
137 * reduces its Tx power, it can work along with BT, hence reducing the amount
138 * of WiFi frames being killed by BT.
139 */
140enum iwl_bt_reduced_tx_power {
141 BT_REDUCED_TX_POWER_CTL = BIT(0),
142 BT_REDUCED_TX_POWER_DATA = BIT(1),
143};
144
145#define BT_COEX_LUT_SIZE (12)
146
147/**
148 * struct iwl_bt_coex_cmd - bt coex configuration command
149 * @flags:&enum iwl_bt_coex_flags
150 * @lead_time:
151 * @max_kill:
152 * @bt3_time_t7_value:
153 * @kill_ack_msk:
154 * @kill_cts_msk:
155 * @bt3_prio_sample_time:
156 * @bt3_timer_t2_value:
157 * @bt4_reaction_time:
158 * @decision_lut[12]:
159 * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
160 * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
161 * @bt_prio_boost: values for PTA boost register
162 * @wifi_tx_prio_boost: SW boost of wifi tx priority
163 * @wifi_rx_prio_boost: SW boost of wifi rx priority
164 *
165 * The structure is used for the BT_COEX command.
166 */
167struct iwl_bt_coex_cmd {
168 u8 flags;
169 u8 lead_time;
170 u8 max_kill;
171 u8 bt3_time_t7_value;
172 __le32 kill_ack_msk;
173 __le32 kill_cts_msk;
174 u8 bt3_prio_sample_time;
175 u8 bt3_timer_t2_value;
176 __le16 bt4_reaction_time;
177 __le32 decision_lut[BT_COEX_LUT_SIZE];
178 u8 bt_reduced_tx_power;
179 u8 reserved;
180 __le16 valid_bit_msk;
181 __le32 bt_prio_boost;
182 u8 reserved2;
183 u8 wifi_tx_prio_boost;
184 __le16 wifi_rx_prio_boost;
185} __packed; /* BT_COEX_CMD_API_S_VER_3 */
186
187#define BT_MBOX(n_dw, _msg, _pos, _nbits) \
188 BT_MBOX##n_dw##_##_msg##_POS = (_pos), \
189 BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
190
191enum iwl_bt_mxbox_dw0 {
192 BT_MBOX(0, LE_SLAVE_LAT, 0, 3),
193 BT_MBOX(0, LE_PROF1, 3, 1),
194 BT_MBOX(0, LE_PROF2, 4, 1),
195 BT_MBOX(0, LE_PROF_OTHER, 5, 1),
196 BT_MBOX(0, CHL_SEQ_N, 8, 4),
197 BT_MBOX(0, INBAND_S, 13, 1),
198 BT_MBOX(0, LE_MIN_RSSI, 16, 4),
199 BT_MBOX(0, LE_SCAN, 20, 1),
200 BT_MBOX(0, LE_ADV, 21, 1),
201 BT_MBOX(0, LE_MAX_TX_POWER, 24, 4),
202 BT_MBOX(0, OPEN_CON_1, 28, 2),
203};
204
205enum iwl_bt_mxbox_dw1 {
206 BT_MBOX(1, BR_MAX_TX_POWER, 0, 4),
207 BT_MBOX(1, IP_SR, 4, 1),
208 BT_MBOX(1, LE_MSTR, 5, 1),
209 BT_MBOX(1, AGGR_TRFC_LD, 8, 6),
210 BT_MBOX(1, MSG_TYPE, 16, 3),
211 BT_MBOX(1, SSN, 19, 2),
212};
213
214enum iwl_bt_mxbox_dw2 {
215 BT_MBOX(2, SNIFF_ACT, 0, 3),
216 BT_MBOX(2, PAG, 3, 1),
217 BT_MBOX(2, INQUIRY, 4, 1),
218 BT_MBOX(2, CONN, 5, 1),
219 BT_MBOX(2, SNIFF_INTERVAL, 8, 5),
220 BT_MBOX(2, DISC, 13, 1),
221 BT_MBOX(2, SCO_TX_ACT, 16, 2),
222 BT_MBOX(2, SCO_RX_ACT, 18, 2),
223 BT_MBOX(2, ESCO_RE_TX, 20, 2),
224 BT_MBOX(2, SCO_DURATION, 24, 6),
225};
226
227enum iwl_bt_mxbox_dw3 {
228 BT_MBOX(3, SCO_STATE, 0, 1),
229 BT_MBOX(3, SNIFF_STATE, 1, 1),
230 BT_MBOX(3, A2DP_STATE, 2, 1),
231 BT_MBOX(3, ACL_STATE, 3, 1),
232 BT_MBOX(3, MSTR_STATE, 4, 1),
233 BT_MBOX(3, OBX_STATE, 5, 1),
234 BT_MBOX(3, OPEN_CON_2, 8, 2),
235 BT_MBOX(3, TRAFFIC_LOAD, 10, 2),
236 BT_MBOX(3, CHL_SEQN_LSB, 12, 1),
237 BT_MBOX(3, INBAND_P, 13, 1),
238 BT_MBOX(3, MSG_TYPE_2, 16, 3),
239 BT_MBOX(3, SSN_2, 19, 2),
240 BT_MBOX(3, UPDATE_REQUEST, 21, 1),
241};
242
243#define BT_MBOX_MSG(_notif, _num, _field) \
244 ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
245 >> BT_MBOX##_num##_##_field##_POS)
246
247/**
248 * struct iwl_bt_coex_profile_notif - notification about BT coex
249 * @mbox_msg: message from BT to WiFi
250 * @:bt_status: 0 - off, 1 - on
251 * @:bt_open_conn: number of BT connections open
252 * @:bt_traffic_load: load of BT traffic
253 * @:bt_agg_traffic_load: aggregated load of BT traffic
254 * @:bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
255 */
256struct iwl_bt_coex_profile_notif {
257 __le32 mbox_msg[4];
258 u8 bt_status;
259 u8 bt_open_conn;
260 u8 bt_traffic_load;
261 u8 bt_agg_traffic_load;
262 u8 bt_ci_compliance;
263 u8 reserved[3];
264} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
265
266enum iwl_bt_coex_prio_table_event {
267 BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
268 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1,
269 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2,
270 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3,
271 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4,
272 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5,
273 BT_COEX_PRIO_TBL_EVT_DTIM = 6,
274 BT_COEX_PRIO_TBL_EVT_SCAN52 = 7,
275 BT_COEX_PRIO_TBL_EVT_SCAN24 = 8,
276 BT_COEX_PRIO_TBL_EVT_IDLE = 9,
277 BT_COEX_PRIO_TBL_EVT_MAX = 16,
278}; /* BT_COEX_PRIO_TABLE_EVENTS_API_E_VER_1 */
279
280enum iwl_bt_coex_prio_table_prio {
281 BT_COEX_PRIO_TBL_DISABLED = 0,
282 BT_COEX_PRIO_TBL_PRIO_LOW = 1,
283 BT_COEX_PRIO_TBL_PRIO_HIGH = 2,
284 BT_COEX_PRIO_TBL_PRIO_BYPASS = 3,
285 BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4,
286 BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5,
287 BT_COEX_PRIO_TBL_PRIO_COEX_IDLE = 6,
288 BT_COEX_PRIO_TBL_MAX = 8,
289}; /* BT_COEX_PRIO_TABLE_PRIORITIES_API_E_VER_1 */
290
291#define BT_COEX_PRIO_TBL_SHRD_ANT_POS (0)
292#define BT_COEX_PRIO_TBL_PRIO_POS (1)
293#define BT_COEX_PRIO_TBL_RESERVED_POS (4)
294
295/**
296 * struct iwl_bt_coex_prio_tbl_cmd - priority table for BT coex
297 * @prio_tbl:
298 */
299struct iwl_bt_coex_prio_tbl_cmd {
300 u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
301} __packed;
302
303enum iwl_bt_coex_env_action {
304 BT_COEX_ENV_CLOSE = 0,
305 BT_COEX_ENV_OPEN = 1,
306}; /* BT_COEX_PROT_ENV_ACTION_API_E_VER_1 */
307
308/**
309 * struct iwl_bt_coex_prot_env_cmd - BT Protection Envelope
310 * @action: enum %iwl_bt_coex_env_action
311 * @type: enum %iwl_bt_coex_prio_table_event
312 */
313struct iwl_bt_coex_prot_env_cmd {
314 u8 action; /* 0 = closed, 1 = open */
315 u8 type; /* 0 .. 15 */
316 u8 reserved[2];
317} __packed;
318
319#endif /* __fw_api_bt_coex_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index cf6f9a02fb74..51e015d1dfb2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -258,7 +258,7 @@ enum iwl_wowlan_wakeup_reason {
258 IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8), 258 IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8),
259 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9), 259 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9),
260 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10), 260 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10),
261 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11), 261 /* BIT(11) reserved */
262 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12), 262 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
263}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */ 263}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
264 264
@@ -277,6 +277,55 @@ struct iwl_wowlan_status {
277 u8 wake_packet[]; /* can be truncated from _length to _bufsize */ 277 u8 wake_packet[]; /* can be truncated from _length to _bufsize */
278} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */ 278} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
279 279
280#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64
281#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128
282#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048
283
284struct iwl_tcp_packet_info {
285 __le16 tcp_pseudo_header_checksum;
286 __le16 tcp_payload_length;
287} __packed; /* TCP_PACKET_INFO_API_S_VER_2 */
288
289struct iwl_tcp_packet {
290 struct iwl_tcp_packet_info info;
291 u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
292 u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN];
293} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
294
295struct iwl_remote_wake_packet {
296 struct iwl_tcp_packet_info info;
297 u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
298 u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN];
299} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
300
301struct iwl_wowlan_remote_wake_config {
302 __le32 connection_max_time; /* unused */
303 /* TCP_PROTOCOL_CONFIG_API_S_VER_1 */
304 u8 max_syn_retries;
305 u8 max_data_retries;
306 u8 tcp_syn_ack_timeout;
307 u8 tcp_ack_timeout;
308
309 struct iwl_tcp_packet syn_tx;
310 struct iwl_tcp_packet synack_rx;
311 struct iwl_tcp_packet keepalive_ack_rx;
312 struct iwl_tcp_packet fin_tx;
313
314 struct iwl_remote_wake_packet keepalive_tx;
315 struct iwl_remote_wake_packet wake_rx;
316
317 /* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */
318 u8 sequence_number_offset;
319 u8 sequence_number_length;
320 u8 token_offset;
321 u8 token_length;
322 /* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */
323 __le32 initial_sequence_number;
324 __le16 keepalive_interval;
325 __le16 num_tokens;
326 u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS];
327} __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */
328
280/* TODO: NetDetect API */ 329/* TODO: NetDetect API */
281 330
282#endif /* __fw_api_d3_h__ */ 331#endif /* __fw_api_d3_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
index ae39b7dfda7b..d68640ea41d4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index be36b7604b7f..81fe45f46be7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -68,73 +68,53 @@
68 68
69/** 69/**
70 * enum iwl_scan_flags - masks for power table command flags 70 * enum iwl_scan_flags - masks for power table command flags
71 * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
72 * receiver and transmitter. '0' - does not allow.
71 * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management, 73 * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
72 * '1' Driver enables PM (use rest of parameters) 74 * '1' Driver enables PM (use rest of parameters)
73 * @POWER_FLAGS_SLEEP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM, 75 * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
74 * '1' PM could sleep over DTIM till listen Interval. 76 * '1' PM could sleep over DTIM till listen Interval.
75 * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
76 * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
77 * access categories are both delivery and trigger enabled.
78 * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
79 * PBW Snoozing enabled
80 * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask 77 * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
78 * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
81*/ 79*/
82enum iwl_power_flags { 80enum iwl_power_flags {
83 POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(0), 81 POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
84 POWER_FLAGS_SLEEP_OVER_DTIM_MSK = BIT(1), 82 POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1),
85 POWER_FLAGS_LPRX_ENA_MSK = BIT(2), 83 POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2),
86 POWER_FLAGS_SNOOZE_ENA_MSK = BIT(3), 84 POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9),
87 POWER_FLAGS_BT_SCO_ENA = BIT(4), 85 POWER_FLAGS_LPRX_ENA_MSK = BIT(11),
88 POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(5)
89}; 86};
90 87
88#define IWL_POWER_VEC_SIZE 5
89
91/** 90/**
92 * struct iwl_powertable_cmd - Power Table Command 91 * struct iwl_powertable_cmd - Power Table Command
93 * POWER_TABLE_CMD = 0x77 (command, has simple generic response) 92 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
94 * 93 *
95 * @id_and_color: MAC contex identifier
96 * @action: Action on context - no action, add new,
97 * modify existent, remove
98 * @flags: Power table command flags from POWER_FLAGS_* 94 * @flags: Power table command flags from POWER_FLAGS_*
99 * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. 95 * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
100 * Minimum allowed:- 3 * DTIM 96 * Minimum allowed:- 3 * DTIM. Keep alive period must be
97 * set regardless of power scheme or current power state.
98 * FW use this value also when PM is disabled.
101 * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to 99 * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
102 * PSM transition - legacy PM 100 * PSM transition - legacy PM
103 * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to 101 * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
104 * PSM transition - legacy PM 102 * PSM transition - legacy PM
105 * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to 103 * @sleep_interval: not in use
106 * PSM transition - uAPSD 104 * @keep_alive_beacons: not in use
107 * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
108 * PSM transition - uAPSD
109 * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. 105 * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
110 * Default: 80dbm 106 * Default: 80dbm
111 * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
112 * @snooze_interval: TBD
113 * @snooze_window: TBD
114 * @snooze_step: TBD
115 * @qndp_tid: TBD
116 * @uapsd_ac_flags: TBD
117 * @uapsd_max_sp: TBD
118 */ 107 */
119struct iwl_powertable_cmd { 108struct iwl_powertable_cmd {
120 /* COMMON_INDEX_HDR_API_S_VER_1 */ 109 /* PM_POWER_TABLE_CMD_API_S_VER_5 */
121 __le32 id_and_color;
122 __le32 action;
123 __le16 flags; 110 __le16 flags;
124 u8 reserved; 111 u8 keep_alive_seconds;
125 __le16 keep_alive_seconds; 112 u8 debug_flags;
126 __le32 rx_data_timeout; 113 __le32 rx_data_timeout;
127 __le32 tx_data_timeout; 114 __le32 tx_data_timeout;
128 __le32 rx_data_timeout_uapsd; 115 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
129 __le32 tx_data_timeout_uapsd; 116 __le32 keep_alive_beacons;
130 u8 lprx_rssi_threshold; 117 __le32 lprx_rssi_threshold;
131 u8 num_skip_dtim;
132 __le16 snooze_interval;
133 __le16 snooze_window;
134 u8 snooze_step;
135 u8 qndp_tid;
136 u8 uapsd_ac_flags;
137 u8 uapsd_max_sp;
138} __packed; 118} __packed;
139 119
140#endif 120#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index aa3474d08231..fdd33bc0a594 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 670ac8f95e26..b60d14151721 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index 0acb53dda22d..a30691a8a85b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 2677914bf0a6..007a93b25bd7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -537,6 +537,12 @@ struct iwl_mac_beacon_cmd {
537 struct ieee80211_hdr frame[0]; 537 struct ieee80211_hdr frame[0];
538} __packed; 538} __packed;
539 539
540struct iwl_beacon_notif {
541 struct iwl_mvm_tx_resp beacon_notify_hdr;
542 __le64 tsf;
543 __le32 ibss_mgr_status;
544} __packed;
545
540/** 546/**
541 * enum iwl_dump_control - dump (flush) control flags 547 * enum iwl_dump_control - dump (flush) control flags
542 * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty 548 * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 2adb61f103f4..191dcae8ba47 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -70,6 +70,7 @@
70#include "fw-api-mac.h" 70#include "fw-api-mac.h"
71#include "fw-api-power.h" 71#include "fw-api-power.h"
72#include "fw-api-d3.h" 72#include "fw-api-d3.h"
73#include "fw-api-bt-coex.h"
73 74
74/* queue and FIFO numbers by usage */ 75/* queue and FIFO numbers by usage */
75enum { 76enum {
@@ -150,8 +151,10 @@ enum {
150 151
151 SET_CALIB_DEFAULT_CMD = 0x8e, 152 SET_CALIB_DEFAULT_CMD = 0x8e,
152 153
154 BEACON_NOTIFICATION = 0x90,
153 BEACON_TEMPLATE_CMD = 0x91, 155 BEACON_TEMPLATE_CMD = 0x91,
154 TX_ANT_CONFIGURATION_CMD = 0x98, 156 TX_ANT_CONFIGURATION_CMD = 0x98,
157 BT_CONFIG = 0x9b,
155 STATISTICS_NOTIFICATION = 0x9d, 158 STATISTICS_NOTIFICATION = 0x9d,
156 159
157 /* RF-KILL commands and notifications */ 160 /* RF-KILL commands and notifications */
@@ -162,6 +165,11 @@ enum {
162 REPLY_RX_MPDU_CMD = 0xc1, 165 REPLY_RX_MPDU_CMD = 0xc1,
163 BA_NOTIF = 0xc5, 166 BA_NOTIF = 0xc5,
164 167
168 /* BT Coex */
169 BT_COEX_PRIO_TABLE = 0xcc,
170 BT_COEX_PROT_ENV = 0xcd,
171 BT_PROFILE_NOTIFICATION = 0xce,
172
165 REPLY_DEBUG_CMD = 0xf0, 173 REPLY_DEBUG_CMD = 0xf0,
166 DEBUG_LOG_MSG = 0xf7, 174 DEBUG_LOG_MSG = 0xf7,
167 175
@@ -271,38 +279,7 @@ enum {
271 NVM_ACCESS_TARGET_EEPROM = 2, 279 NVM_ACCESS_TARGET_EEPROM = 2,
272}; 280};
273 281
274/** 282/* Section types for NVM_ACCESS_CMD */
275 * struct iwl_nvm_access_cmd_ver1 - Request the device to send the NVM.
276 * @op_code: 0 - read, 1 - write.
277 * @target: NVM_ACCESS_TARGET_*. should be 0 for read.
278 * @cache_refresh: 0 - None, 1- NVM.
279 * @offset: offset in the nvm data.
280 * @length: of the chunk.
281 * @data: empty on read, the NVM chunk on write
282 */
283struct iwl_nvm_access_cmd_ver1 {
284 u8 op_code;
285 u8 target;
286 u8 cache_refresh;
287 u8 reserved;
288 __le16 offset;
289 __le16 length;
290 u8 data[];
291} __packed; /* NVM_ACCESS_CMD_API_S_VER_1 */
292
293/**
294 * struct iwl_nvm_access_resp_ver1 - response to NVM_ACCESS_CMD
295 * @offset: the offset in the nvm data
296 * @length: of the chunk
297 * @data: the nvm chunk on when NVM_ACCESS_CMD was read, nothing on write
298 */
299struct iwl_nvm_access_resp_ver1 {
300 __le16 offset;
301 __le16 length;
302 u8 data[];
303} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_1 */
304
305/* Section types for NVM_ACCESS_CMD version 2 */
306enum { 283enum {
307 NVM_SECTION_TYPE_HW = 0, 284 NVM_SECTION_TYPE_HW = 0,
308 NVM_SECTION_TYPE_SW, 285 NVM_SECTION_TYPE_SW,
@@ -323,7 +300,7 @@ enum {
323 * @length: in bytes, to read/write 300 * @length: in bytes, to read/write
324 * @data: if write operation, the data to write. On read its empty 301 * @data: if write operation, the data to write. On read its empty
325 */ 302 */
326struct iwl_nvm_access_cmd_ver2 { 303struct iwl_nvm_access_cmd {
327 u8 op_code; 304 u8 op_code;
328 u8 target; 305 u8 target;
329 __le16 type; 306 __le16 type;
@@ -340,7 +317,7 @@ struct iwl_nvm_access_cmd_ver2 {
340 * @status: 0 for success, fail otherwise 317 * @status: 0 for success, fail otherwise
341 * @data: if read operation, the data returned. Empty on write. 318 * @data: if read operation, the data returned. Empty on write.
342 */ 319 */
343struct iwl_nvm_access_resp_ver2 { 320struct iwl_nvm_access_resp {
344 __le16 offset; 321 __le16 offset;
345 __le16 length; 322 __le16 length;
346 __le16 type; 323 __le16 type;
@@ -503,15 +480,34 @@ enum {
503 TE_DEP_TSF = 2, 480 TE_DEP_TSF = 2,
504 TE_EVENT_SOCIOPATHIC = 4, 481 TE_EVENT_SOCIOPATHIC = 4,
505}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */ 482}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
506 483/*
507/* When to send Time Event notifications and to whom (internal = FW) */ 484 * Supported Time event notifications configuration.
485 * A notification (both event and fragment) includes a status indicating weather
486 * the FW was able to schedule the event or not. For fragment start/end
487 * notification the status is always success. There is no start/end fragment
488 * notification for monolithic events.
489 *
490 * @TE_NOTIF_NONE: no notifications
491 * @TE_NOTIF_HOST_EVENT_START: request/receive notification on event start
492 * @TE_NOTIF_HOST_EVENT_END:request/receive notification on event end
493 * @TE_NOTIF_INTERNAL_EVENT_START: internal FW use
494 * @TE_NOTIF_INTERNAL_EVENT_END: internal FW use.
495 * @TE_NOTIF_HOST_FRAG_START: request/receive notification on frag start
496 * @TE_NOTIF_HOST_FRAG_END:request/receive notification on frag end
497 * @TE_NOTIF_INTERNAL_FRAG_START: internal FW use.
498 * @TE_NOTIF_INTERNAL_FRAG_END: internal FW use.
499 */
508enum { 500enum {
509 TE_NOTIF_NONE = 0, 501 TE_NOTIF_NONE = 0,
510 TE_NOTIF_HOST_START = 0x1, 502 TE_NOTIF_HOST_EVENT_START = 0x1,
511 TE_NOTIF_HOST_END = 0x2, 503 TE_NOTIF_HOST_EVENT_END = 0x2,
512 TE_NOTIF_INTERNAL_START = 0x4, 504 TE_NOTIF_INTERNAL_EVENT_START = 0x4,
513 TE_NOTIF_INTERNAL_END = 0x8 505 TE_NOTIF_INTERNAL_EVENT_END = 0x8,
514}; /* MAC_EVENT_ACTION_API_E_VER_1 */ 506 TE_NOTIF_HOST_FRAG_START = 0x10,
507 TE_NOTIF_HOST_FRAG_END = 0x20,
508 TE_NOTIF_INTERNAL_FRAG_START = 0x40,
509 TE_NOTIF_INTERNAL_FRAG_END = 0x80
510}; /* MAC_EVENT_ACTION_API_E_VER_2 */
515 511
516/* 512/*
517 * @TE_FRAG_NONE: fragmentation of the time event is NOT allowed. 513 * @TE_FRAG_NONE: fragmentation of the time event is NOT allowed.
@@ -794,6 +790,7 @@ struct iwl_phy_context_cmd {
794 * @byte_count: frame's byte-count 790 * @byte_count: frame's byte-count
795 * @frame_time: frame's time on the air, based on byte count and frame rate 791 * @frame_time: frame's time on the air, based on byte count and frame rate
796 * calculation 792 * calculation
793 * @mac_active_msk: what MACs were active when the frame was received
797 * 794 *
798 * Before each Rx, the device sends this data. It contains PHY information 795 * Before each Rx, the device sends this data. It contains PHY information
799 * about the reception of the packet. 796 * about the reception of the packet.
@@ -811,7 +808,7 @@ struct iwl_rx_phy_info {
811 __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT]; 808 __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
812 __le32 rate_n_flags; 809 __le32 rate_n_flags;
813 __le32 byte_count; 810 __le32 byte_count;
814 __le16 reserved2; 811 __le16 mac_active_msk;
815 __le16 frame_time; 812 __le16 frame_time;
816} __packed; 813} __packed;
817 814
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 500f818dba04..e18c92dd60ec 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -114,7 +114,7 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
114 .valid = cpu_to_le32(valid_tx_ant), 114 .valid = cpu_to_le32(valid_tx_ant),
115 }; 115 };
116 116
117 IWL_DEBUG_HC(mvm, "select valid tx ant: %u\n", valid_tx_ant); 117 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
118 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, CMD_SYNC, 118 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, CMD_SYNC,
119 sizeof(tx_ant_cmd), &tx_ant_cmd); 119 sizeof(tx_ant_cmd), &tx_ant_cmd);
120} 120}
@@ -134,9 +134,10 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
134 alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr); 134 alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
135 135
136 alive_data->valid = le16_to_cpu(palive->status) == IWL_ALIVE_STATUS_OK; 136 alive_data->valid = le16_to_cpu(palive->status) == IWL_ALIVE_STATUS_OK;
137 IWL_DEBUG_FW(mvm, "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", 137 IWL_DEBUG_FW(mvm,
138 "Alive ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
138 le16_to_cpu(palive->status), palive->ver_type, 139 le16_to_cpu(palive->status), palive->ver_type,
139 palive->ver_subtype); 140 palive->ver_subtype, palive->flags);
140 141
141 return true; 142 return true;
142} 143}
@@ -309,6 +310,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
309 goto error; 310 goto error;
310 } 311 }
311 312
313 ret = iwl_send_bt_prio_tbl(mvm);
314 if (ret)
315 goto error;
316
312 if (read_nvm) { 317 if (read_nvm) {
313 /* Read nvm */ 318 /* Read nvm */
314 ret = iwl_nvm_init(mvm); 319 ret = iwl_nvm_init(mvm);
@@ -322,16 +327,14 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
322 WARN_ON(ret); 327 WARN_ON(ret);
323 328
324 /* Send TX valid antennas before triggering calibrations */ 329 /* Send TX valid antennas before triggering calibrations */
325 ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant); 330 ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
326 if (ret) 331 if (ret)
327 goto error; 332 goto error;
328 333
329 /* WkP doesn't have all calibrations, need to set default values */ 334 /* need to set default values */
330 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 335 ret = iwl_set_default_calibrations(mvm);
331 ret = iwl_set_default_calibrations(mvm); 336 if (ret)
332 if (ret) 337 goto error;
333 goto error;
334 }
335 338
336 /* 339 /*
337 * Send phy configurations command to init uCode 340 * Send phy configurations command to init uCode
@@ -410,7 +413,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
410 goto error; 413 goto error;
411 } 414 }
412 415
413 ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant); 416 ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
417 if (ret)
418 goto error;
419
420 ret = iwl_send_bt_prio_tbl(mvm);
421 if (ret)
422 goto error;
423
424 ret = iwl_send_bt_init_conf(mvm);
414 if (ret) 425 if (ret)
415 goto error; 426 goto error;
416 427
@@ -456,7 +467,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
456 goto error; 467 goto error;
457 } 468 }
458 469
459 ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant); 470 ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
460 if (ret) 471 if (ret)
461 goto error; 472 goto error;
462 473
diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/iwlwifi/mvm/led.c
index 011906e73a05..2269a9e5cc67 100644
--- a/drivers/net/wireless/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/iwlwifi/mvm/led.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 341dbc0237ea..e6eca4d66f6c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -196,7 +196,7 @@ u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
196 u32 qmask, ac; 196 u32 qmask, ac;
197 197
198 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 198 if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
199 return BIT(IWL_OFFCHANNEL_QUEUE); 199 return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
200 200
201 qmask = (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) ? 201 qmask = (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) ?
202 BIT(vif->cab_queue) : 0; 202 BIT(vif->cab_queue) : 0;
@@ -553,9 +553,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
553 if (vif->bss_conf.qos) 553 if (vif->bss_conf.qos)
554 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); 554 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
555 555
556 /* Don't use cts to self as the fw doesn't support it currently. */
556 if (vif->bss_conf.use_cts_prot) 557 if (vif->bss_conf.use_cts_prot)
557 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT | 558 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
558 MAC_PROT_FLG_SELF_CTS_EN);
559 559
560 /* 560 /*
561 * I think that we should enable these 2 flags regardless the HT PROT 561 * I think that we should enable these 2 flags regardless the HT PROT
@@ -651,6 +651,13 @@ static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm,
651 /* Fill the common data for all mac context types */ 651 /* Fill the common data for all mac context types */
652 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); 652 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
653 653
654 /* Allow beacons to pass through as long as we are not associated,or we
655 * do not have dtim period information */
656 if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period)
657 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
658 else
659 cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON);
660
654 /* Fill the data specific for station mode */ 661 /* Fill the data specific for station mode */
655 iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta); 662 iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta);
656 663
@@ -662,6 +669,7 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm,
662 u32 action) 669 u32 action)
663{ 670{
664 struct iwl_mac_ctx_cmd cmd = {}; 671 struct iwl_mac_ctx_cmd cmd = {};
672 struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr;
665 673
666 WARN_ON(vif->type != NL80211_IFTYPE_STATION || !vif->p2p); 674 WARN_ON(vif->type != NL80211_IFTYPE_STATION || !vif->p2p);
667 675
@@ -671,7 +679,8 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm,
671 /* Fill the data specific for station mode */ 679 /* Fill the data specific for station mode */
672 iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta); 680 iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta);
673 681
674 cmd.p2p_sta.ctwin = cpu_to_le32(vif->bss_conf.p2p_ctwindow); 682 cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
683 IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
675 684
676 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); 685 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
677} 686}
@@ -685,7 +694,12 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
685 WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); 694 WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
686 695
687 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); 696 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
688 /* No other data to be filled */ 697
698 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC |
699 MAC_FILTER_IN_CONTROL_AND_MGMT |
700 MAC_FILTER_IN_BEACON |
701 MAC_FILTER_IN_PROBE_REQUEST);
702
689 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); 703 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
690} 704}
691 705
@@ -714,7 +728,9 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
714 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); 728 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
715 729
716 cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); 730 cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
717 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROMISC); 731
732 /* Override the filter flags to accept only probe requests */
733 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
718 734
719 /* 735 /*
720 * This flag should be set to true when the P2P Device is 736 * This flag should be set to true when the P2P Device is
@@ -789,7 +805,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
789 TX_CMD_FLG_TSF); 805 TX_CMD_FLG_TSF);
790 806
791 mvm->mgmt_last_antenna_idx = 807 mvm->mgmt_last_antenna_idx =
792 iwl_mvm_next_antenna(mvm, mvm->nvm_data->valid_tx_ant, 808 iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw),
793 mvm->mgmt_last_antenna_idx); 809 mvm->mgmt_last_antenna_idx);
794 810
795 beacon_cmd.tx.rate_n_flags = 811 beacon_cmd.tx.rate_n_flags =
@@ -846,10 +862,10 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
846 */ 862 */
847static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, 863static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
848 struct ieee80211_vif *vif, 864 struct ieee80211_vif *vif,
849 struct iwl_mac_data_ap *ctxt_ap) 865 struct iwl_mac_data_ap *ctxt_ap,
866 bool add)
850{ 867{
851 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 868 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
852 u32 curr_dev_time;
853 869
854 ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int); 870 ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
855 ctxt_ap->bi_reciprocal = 871 ctxt_ap->bi_reciprocal =
@@ -861,10 +877,19 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
861 vif->bss_conf.dtim_period)); 877 vif->bss_conf.dtim_period));
862 878
863 ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue); 879 ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
864 curr_dev_time = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
865 ctxt_ap->beacon_time = cpu_to_le32(curr_dev_time);
866 880
867 ctxt_ap->beacon_tsf = cpu_to_le64(curr_dev_time); 881 /*
882 * Only read the system time when the MAC is being added, when we
883 * just modify the MAC then we should keep the time -- the firmware
884 * can otherwise have a "jumping" TBTT.
885 */
886 if (add)
887 mvmvif->ap_beacon_time =
888 iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
889
890 ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time);
891
892 ctxt_ap->beacon_tsf = 0; /* unused */
868 893
869 /* TODO: Assume that the beacon id == mac context id */ 894 /* TODO: Assume that the beacon id == mac context id */
870 ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id); 895 ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id);
@@ -881,8 +906,12 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
881 /* Fill the common data for all mac context types */ 906 /* Fill the common data for all mac context types */
882 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); 907 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
883 908
909 /* Also enable probe requests to pass */
910 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
911
884 /* Fill the data specific for ap mode */ 912 /* Fill the data specific for ap mode */
885 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap); 913 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
914 action == FW_CTXT_ACTION_ADD);
886 915
887 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); 916 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
888} 917}
@@ -892,6 +921,7 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
892 u32 action) 921 u32 action)
893{ 922{
894 struct iwl_mac_ctx_cmd cmd = {}; 923 struct iwl_mac_ctx_cmd cmd = {};
924 struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr;
895 925
896 WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p); 926 WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p);
897 927
@@ -899,10 +929,14 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
899 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); 929 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
900 930
901 /* Fill the data specific for GO mode */ 931 /* Fill the data specific for GO mode */
902 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap); 932 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
933 action == FW_CTXT_ACTION_ADD);
903 934
904 cmd.go.ctwin = cpu_to_le32(vif->bss_conf.p2p_ctwindow); 935 cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow &
905 cmd.go.opp_ps_enabled = cpu_to_le32(!!vif->bss_conf.p2p_oppps); 936 IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
937 cmd.go.opp_ps_enabled =
938 cpu_to_le32(!!(noa->oppps_ctwindow &
939 IEEE80211_P2P_OPPPS_ENABLE_BIT));
906 940
907 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); 941 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
908} 942}
@@ -990,3 +1024,22 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
990 mvmvif->uploaded = false; 1024 mvmvif->uploaded = false;
991 return 0; 1025 return 0;
992} 1026}
1027
1028int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
1029 struct iwl_rx_cmd_buffer *rxb,
1030 struct iwl_device_cmd *cmd)
1031{
1032 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1033 struct iwl_beacon_notif *beacon = (void *)pkt->data;
1034 u16 status __maybe_unused =
1035 le16_to_cpu(beacon->beacon_notify_hdr.status.status);
1036 u32 rate __maybe_unused =
1037 le32_to_cpu(beacon->beacon_notify_hdr.initial_rate);
1038
1039 IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%16llX rate:%d\n",
1040 status & TX_STATUS_MSK,
1041 beacon->beacon_notify_hdr.failure_frame,
1042 le64_to_cpu(beacon->tsf),
1043 rate);
1044 return 0;
1045}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 7e169b085afe..dd158ec571fb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -65,7 +65,9 @@
65#include <linux/skbuff.h> 65#include <linux/skbuff.h>
66#include <linux/netdevice.h> 66#include <linux/netdevice.h>
67#include <linux/etherdevice.h> 67#include <linux/etherdevice.h>
68#include <linux/ip.h>
68#include <net/mac80211.h> 69#include <net/mac80211.h>
70#include <net/tcp.h>
69 71
70#include "iwl-op-mode.h" 72#include "iwl-op-mode.h"
71#include "iwl-io.h" 73#include "iwl-io.h"
@@ -102,10 +104,33 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
102 }, 104 },
103}; 105};
104 106
107#ifdef CONFIG_PM_SLEEP
108static const struct nl80211_wowlan_tcp_data_token_feature
109iwl_mvm_wowlan_tcp_token_feature = {
110 .min_len = 0,
111 .max_len = 255,
112 .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
113};
114
115static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
116 .tok = &iwl_mvm_wowlan_tcp_token_feature,
117 .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
118 sizeof(struct ethhdr) -
119 sizeof(struct iphdr) -
120 sizeof(struct tcphdr),
121 .data_interval_max = 65535, /* __le16 in API */
122 .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
123 sizeof(struct ethhdr) -
124 sizeof(struct iphdr) -
125 sizeof(struct tcphdr),
126 .seq = true,
127};
128#endif
129
105int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) 130int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
106{ 131{
107 struct ieee80211_hw *hw = mvm->hw; 132 struct ieee80211_hw *hw = mvm->hw;
108 int num_mac, ret; 133 int num_mac, ret, i;
109 134
110 /* Tell mac80211 our characteristics */ 135 /* Tell mac80211 our characteristics */
111 hw->flags = IEEE80211_HW_SIGNAL_DBM | 136 hw->flags = IEEE80211_HW_SIGNAL_DBM |
@@ -118,8 +143,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
118 IEEE80211_HW_AMPDU_AGGREGATION | 143 IEEE80211_HW_AMPDU_AGGREGATION |
119 IEEE80211_HW_TIMING_BEACON_ONLY; 144 IEEE80211_HW_TIMING_BEACON_ONLY;
120 145
121 hw->queues = IWL_FIRST_AMPDU_QUEUE; 146 hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
122 hw->offchannel_tx_hw_queue = IWL_OFFCHANNEL_QUEUE; 147 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
123 hw->rate_control_algorithm = "iwl-mvm-rs"; 148 hw->rate_control_algorithm = "iwl-mvm-rs";
124 149
125 /* 150 /*
@@ -149,18 +174,22 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
149 hw->wiphy->n_iface_combinations = 174 hw->wiphy->n_iface_combinations =
150 ARRAY_SIZE(iwl_mvm_iface_combinations); 175 ARRAY_SIZE(iwl_mvm_iface_combinations);
151 176
152 hw->wiphy->max_remain_on_channel_duration = 500; 177 hw->wiphy->max_remain_on_channel_duration = 10000;
153 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 178 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
154 179
155 /* Extract MAC address */ 180 /* Extract MAC address */
156 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); 181 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
157 hw->wiphy->addresses = mvm->addresses; 182 hw->wiphy->addresses = mvm->addresses;
158 hw->wiphy->n_addresses = 1; 183 hw->wiphy->n_addresses = 1;
159 num_mac = mvm->nvm_data->n_hw_addrs; 184
160 if (num_mac > 1) { 185 /* Extract additional MAC addresses if available */
161 memcpy(mvm->addresses[1].addr, mvm->addresses[0].addr, 186 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
187 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
188
189 for (i = 1; i < num_mac; i++) {
190 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
162 ETH_ALEN); 191 ETH_ALEN);
163 mvm->addresses[1].addr[5]++; 192 mvm->addresses[i].addr[5]++;
164 hw->wiphy->n_addresses++; 193 hw->wiphy->n_addresses++;
165 } 194 }
166 195
@@ -178,7 +207,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
178 207
179 hw->wiphy->hw_version = mvm->trans->hw_id; 208 hw->wiphy->hw_version = mvm->trans->hw_id;
180 209
181 if (iwlwifi_mod_params.power_save) 210 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
182 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; 211 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
183 else 212 else
184 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 213 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -206,6 +235,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
206 hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; 235 hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
207 hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; 236 hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
208 hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; 237 hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
238 hw->wiphy->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
209 } 239 }
210#endif 240#endif
211 241
@@ -227,7 +257,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
227 goto drop; 257 goto drop;
228 } 258 }
229 259
230 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_OFFCHANNEL_QUEUE && 260 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
231 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) 261 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
232 goto drop; 262 goto drop;
233 263
@@ -273,12 +303,18 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
273 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); 303 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
274 break; 304 break;
275 case IEEE80211_AMPDU_TX_START: 305 case IEEE80211_AMPDU_TX_START:
306 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) {
307 ret = -EINVAL;
308 break;
309 }
276 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); 310 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
277 break; 311 break;
278 case IEEE80211_AMPDU_TX_STOP_CONT: 312 case IEEE80211_AMPDU_TX_STOP_CONT:
313 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
314 break;
279 case IEEE80211_AMPDU_TX_STOP_FLUSH: 315 case IEEE80211_AMPDU_TX_STOP_FLUSH:
280 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 316 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
281 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); 317 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
282 break; 318 break;
283 case IEEE80211_AMPDU_TX_OPERATIONAL: 319 case IEEE80211_AMPDU_TX_OPERATIONAL:
284 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size); 320 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
@@ -466,11 +502,15 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
466 /* 502 /*
467 * TODO: remove this temporary code. 503 * TODO: remove this temporary code.
468 * Currently MVM FW supports power management only on single MAC. 504 * Currently MVM FW supports power management only on single MAC.
469 * Iterate and disable PM on all active interfaces. 505 * If new interface added, disable PM on existing interface.
506 * P2P device is a special case, since it is handled by FW similary to
507 * scan. If P2P deviced is added, PM remains enabled on existing
508 * interface.
470 * Note: the method below does not count the new interface being added 509 * Note: the method below does not count the new interface being added
471 * at this moment. 510 * at this moment.
472 */ 511 */
473 mvm->vif_count++; 512 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
513 mvm->vif_count++;
474 if (mvm->vif_count > 1) { 514 if (mvm->vif_count > 1) {
475 IWL_DEBUG_MAC80211(mvm, 515 IWL_DEBUG_MAC80211(mvm,
476 "Disable power on existing interfaces\n"); 516 "Disable power on existing interfaces\n");
@@ -526,6 +566,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
526 mvm->p2p_device_vif = vif; 566 mvm->p2p_device_vif = vif;
527 } 567 }
528 568
569 iwl_mvm_vif_dbgfs_register(mvm, vif);
529 goto out_unlock; 570 goto out_unlock;
530 571
531 out_unbind: 572 out_unbind:
@@ -539,10 +580,11 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
539 /* 580 /*
540 * TODO: remove this temporary code. 581 * TODO: remove this temporary code.
541 * Currently MVM FW supports power management only on single MAC. 582 * Currently MVM FW supports power management only on single MAC.
542 * Check if only one additional interface remains after rereasing 583 * Check if only one additional interface remains after releasing
543 * current one. Update power mode on the remaining interface. 584 * current one. Update power mode on the remaining interface.
544 */ 585 */
545 mvm->vif_count--; 586 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
587 mvm->vif_count--;
546 IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n", 588 IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
547 mvm->vif_count); 589 mvm->vif_count);
548 if (mvm->vif_count == 1) { 590 if (mvm->vif_count == 1) {
@@ -604,6 +646,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
604 646
605 mutex_lock(&mvm->mutex); 647 mutex_lock(&mvm->mutex);
606 648
649 iwl_mvm_vif_dbgfs_clean(mvm, vif);
650
607 /* 651 /*
608 * For AP/GO interface, the tear down of the resources allocated to the 652 * For AP/GO interface, the tear down of the resources allocated to the
609 * interface is be handled as part of the stop_ap flow. 653 * interface is be handled as part of the stop_ap flow.
@@ -627,7 +671,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
627 * Check if only one additional interface remains after removing 671 * Check if only one additional interface remains after removing
628 * current one. Update power mode on the remaining interface. 672 * current one. Update power mode on the remaining interface.
629 */ 673 */
630 if (mvm->vif_count) 674 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
631 mvm->vif_count--; 675 mvm->vif_count--;
632 IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n", 676 IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
633 mvm->vif_count); 677 mvm->vif_count);
@@ -677,6 +721,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
677 IWL_ERR(mvm, "failed to update quotas\n"); 721 IWL_ERR(mvm, "failed to update quotas\n");
678 return; 722 return;
679 } 723 }
724 iwl_mvm_bt_coex_vif_assoc(mvm, vif);
680 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { 725 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
681 /* remove AP station now that the MAC is unassoc */ 726 /* remove AP station now that the MAC is unassoc */
682 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); 727 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
@@ -895,7 +940,7 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
895 */ 940 */
896 break; 941 break;
897 case STA_NOTIFY_AWAKE: 942 case STA_NOTIFY_AWAKE:
898 if (WARN_ON(mvmsta->sta_id == IWL_INVALID_STATION)) 943 if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
899 break; 944 break;
900 iwl_mvm_sta_modify_ps_wake(mvm, sta); 945 iwl_mvm_sta_modify_ps_wake(mvm, sta);
901 break; 946 break;
@@ -1051,6 +1096,13 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
1051 1096
1052 switch (cmd) { 1097 switch (cmd) {
1053 case SET_KEY: 1098 case SET_KEY:
1099 if (vif->type == NL80211_IFTYPE_AP && !sta) {
1100 /* GTK on AP interface is a TX-only key, return 0 */
1101 ret = 0;
1102 key->hw_key_idx = STA_KEY_IDX_INVALID;
1103 break;
1104 }
1105
1054 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); 1106 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
1055 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, false); 1107 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, false);
1056 if (ret) { 1108 if (ret) {
@@ -1059,11 +1111,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
1059 * can't add key for RX, but we don't need it 1111 * can't add key for RX, but we don't need it
1060 * in the device for TX so still return 0 1112 * in the device for TX so still return 0
1061 */ 1113 */
1114 key->hw_key_idx = STA_KEY_IDX_INVALID;
1062 ret = 0; 1115 ret = 0;
1063 } 1116 }
1064 1117
1065 break; 1118 break;
1066 case DISABLE_KEY: 1119 case DISABLE_KEY:
1120 if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
1121 ret = 0;
1122 break;
1123 }
1124
1067 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); 1125 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
1068 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); 1126 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
1069 break; 1127 break;
@@ -1090,7 +1148,8 @@ static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
1090static int iwl_mvm_roc(struct ieee80211_hw *hw, 1148static int iwl_mvm_roc(struct ieee80211_hw *hw,
1091 struct ieee80211_vif *vif, 1149 struct ieee80211_vif *vif,
1092 struct ieee80211_channel *channel, 1150 struct ieee80211_channel *channel,
1093 int duration) 1151 int duration,
1152 enum ieee80211_roc_type type)
1094{ 1153{
1095 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1154 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1096 struct cfg80211_chan_def chandef; 1155 struct cfg80211_chan_def chandef;
@@ -1101,8 +1160,8 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
1101 return -EINVAL; 1160 return -EINVAL;
1102 } 1161 }
1103 1162
1104 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d)\n", channel->hw_value, 1163 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
1105 duration); 1164 duration, type);
1106 1165
1107 mutex_lock(&mvm->mutex); 1166 mutex_lock(&mvm->mutex);
1108 1167
@@ -1111,7 +1170,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
1111 &chandef, 1, 1); 1170 &chandef, 1, 1);
1112 1171
1113 /* Schedule the time events */ 1172 /* Schedule the time events */
1114 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration); 1173 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
1115 1174
1116 mutex_unlock(&mvm->mutex); 1175 mutex_unlock(&mvm->mutex);
1117 IWL_DEBUG_MAC80211(mvm, "leave\n"); 1176 IWL_DEBUG_MAC80211(mvm, "leave\n");
@@ -1215,6 +1274,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
1215 * will handle quota settings. 1274 * will handle quota settings.
1216 */ 1275 */
1217 if (vif->type == NL80211_IFTYPE_MONITOR) { 1276 if (vif->type == NL80211_IFTYPE_MONITOR) {
1277 mvmvif->monitor_active = true;
1218 ret = iwl_mvm_update_quotas(mvm, vif); 1278 ret = iwl_mvm_update_quotas(mvm, vif);
1219 if (ret) 1279 if (ret)
1220 goto out_remove_binding; 1280 goto out_remove_binding;
@@ -1245,15 +1305,16 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
1245 if (vif->type == NL80211_IFTYPE_AP) 1305 if (vif->type == NL80211_IFTYPE_AP)
1246 goto out_unlock; 1306 goto out_unlock;
1247 1307
1248 iwl_mvm_binding_remove_vif(mvm, vif);
1249 switch (vif->type) { 1308 switch (vif->type) {
1250 case NL80211_IFTYPE_MONITOR: 1309 case NL80211_IFTYPE_MONITOR:
1251 iwl_mvm_update_quotas(mvm, vif); 1310 mvmvif->monitor_active = false;
1311 iwl_mvm_update_quotas(mvm, NULL);
1252 break; 1312 break;
1253 default: 1313 default:
1254 break; 1314 break;
1255 } 1315 }
1256 1316
1317 iwl_mvm_binding_remove_vif(mvm, vif);
1257out_unlock: 1318out_unlock:
1258 mvmvif->phy_ctxt = NULL; 1319 mvmvif->phy_ctxt = NULL;
1259 mutex_unlock(&mvm->mutex); 1320 mutex_unlock(&mvm->mutex);
@@ -1274,6 +1335,15 @@ static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
1274 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); 1335 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
1275} 1336}
1276 1337
1338static void iwl_mvm_mac_rssi_callback(struct ieee80211_hw *hw,
1339 struct ieee80211_vif *vif,
1340 enum ieee80211_rssi_event rssi_event)
1341{
1342 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1343
1344 iwl_mvm_bt_rssi_event(mvm, vif, rssi_event);
1345}
1346
1277struct ieee80211_ops iwl_mvm_hw_ops = { 1347struct ieee80211_ops iwl_mvm_hw_ops = {
1278 .tx = iwl_mvm_mac_tx, 1348 .tx = iwl_mvm_mac_tx,
1279 .ampdu_action = iwl_mvm_mac_ampdu_action, 1349 .ampdu_action = iwl_mvm_mac_ampdu_action,
@@ -1297,6 +1367,7 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
1297 .update_tkip_key = iwl_mvm_mac_update_tkip_key, 1367 .update_tkip_key = iwl_mvm_mac_update_tkip_key,
1298 .remain_on_channel = iwl_mvm_roc, 1368 .remain_on_channel = iwl_mvm_roc,
1299 .cancel_remain_on_channel = iwl_mvm_cancel_roc, 1369 .cancel_remain_on_channel = iwl_mvm_cancel_roc,
1370 .rssi_callback = iwl_mvm_mac_rssi_callback,
1300 1371
1301 .add_chanctx = iwl_mvm_add_chanctx, 1372 .add_chanctx = iwl_mvm_add_chanctx,
1302 .remove_chanctx = iwl_mvm_remove_chanctx, 1373 .remove_chanctx = iwl_mvm_remove_chanctx,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index bdae700c769e..8269bc562951 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -79,7 +79,7 @@
79#include "fw-api.h" 79#include "fw-api.h"
80 80
81#define IWL_INVALID_MAC80211_QUEUE 0xff 81#define IWL_INVALID_MAC80211_QUEUE 0xff
82#define IWL_MVM_MAX_ADDRESSES 2 82#define IWL_MVM_MAX_ADDRESSES 5
83/* RSSI offset for WkP */ 83/* RSSI offset for WkP */
84#define IWL_RSSI_OFFSET 50 84#define IWL_RSSI_OFFSET 50
85 85
@@ -90,10 +90,6 @@ enum iwl_mvm_tx_fifo {
90 IWL_MVM_TX_FIFO_VO, 90 IWL_MVM_TX_FIFO_VO,
91}; 91};
92 92
93/* Placeholder */
94#define IWL_OFFCHANNEL_QUEUE 8
95#define IWL_FIRST_AMPDU_QUEUE 11
96
97extern struct ieee80211_ops iwl_mvm_hw_ops; 93extern struct ieee80211_ops iwl_mvm_hw_ops;
98/** 94/**
99 * struct iwl_mvm_mod_params - module parameters for iwlmvm 95 * struct iwl_mvm_mod_params - module parameters for iwlmvm
@@ -161,6 +157,8 @@ enum iwl_power_scheme {
161 * @uploaded: indicates the MAC context has been added to the device 157 * @uploaded: indicates the MAC context has been added to the device
162 * @ap_active: indicates that ap context is configured, and that the interface 158 * @ap_active: indicates that ap context is configured, and that the interface
163 * should get quota etc. 159 * should get quota etc.
160 * @monitor_active: indicates that monitor context is configured, and that the
161 * interface should get quota etc.
164 * @queue_params: QoS params for this MAC 162 * @queue_params: QoS params for this MAC
165 * @bcast_sta: station used for broadcast packets. Used by the following 163 * @bcast_sta: station used for broadcast packets. Used by the following
166 * vifs: P2P_DEVICE, GO and AP. 164 * vifs: P2P_DEVICE, GO and AP.
@@ -173,6 +171,9 @@ struct iwl_mvm_vif {
173 171
174 bool uploaded; 172 bool uploaded;
175 bool ap_active; 173 bool ap_active;
174 bool monitor_active;
175
176 u32 ap_beacon_time;
176 177
177 enum iwl_tsf_id tsf_id; 178 enum iwl_tsf_id tsf_id;
178 179
@@ -211,6 +212,7 @@ struct iwl_mvm_vif {
211 212
212#ifdef CONFIG_IWLWIFI_DEBUGFS 213#ifdef CONFIG_IWLWIFI_DEBUGFS
213 struct dentry *dbgfs_dir; 214 struct dentry *dbgfs_dir;
215 struct dentry *dbgfs_slink;
214 void *dbgfs_data; 216 void *dbgfs_data;
215#endif 217#endif
216}; 218};
@@ -279,10 +281,7 @@ struct iwl_mvm {
279 atomic_t queue_stop_count[IWL_MAX_HW_QUEUES]; 281 atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
280 282
281 struct iwl_nvm_data *nvm_data; 283 struct iwl_nvm_data *nvm_data;
282 /* eeprom blob for debugfs/testmode */ 284 /* NVM sections */
283 u8 *eeprom_blob;
284 size_t eeprom_blob_size;
285 /* NVM sections for 7000 family */
286 struct iwl_nvm_section nvm_sections[NVM_NUM_OF_SECTIONS]; 285 struct iwl_nvm_section nvm_sections[NVM_NUM_OF_SECTIONS];
287 286
288 /* EEPROM MAC addresses */ 287 /* EEPROM MAC addresses */
@@ -323,6 +322,13 @@ struct iwl_mvm {
323 * can hold 16 keys at most. Reflect this fact. 322 * can hold 16 keys at most. Reflect this fact.
324 */ 323 */
325 unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; 324 unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
325
326 /*
327 * This counter of created interfaces is referenced only in conjunction
328 * with FW limitation related to power management. Currently PM is
329 * supported only on a single interface.
330 * IMPORTANT: this variable counts all interfaces except P2P device.
331 */
326 u8 vif_count; 332 u8 vif_count;
327 333
328 struct led_classdev led; 334 struct led_classdev led;
@@ -332,6 +338,10 @@ struct iwl_mvm {
332#ifdef CONFIG_PM_SLEEP 338#ifdef CONFIG_PM_SLEEP
333 int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; 339 int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
334#endif 340#endif
341
342 /* BT-Coex */
343 u8 bt_kill_msk;
344 struct iwl_bt_coex_profile_notif last_bt_notif;
335}; 345};
336 346
337/* Extract MVM priv from op_mode and _hw */ 347/* Extract MVM priv from op_mode and _hw */
@@ -445,6 +455,9 @@ u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
445 struct ieee80211_vif *vif); 455 struct ieee80211_vif *vif);
446int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, 456int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
447 struct ieee80211_vif *vif); 457 struct ieee80211_vif *vif);
458int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
459 struct iwl_rx_cmd_buffer *rxb,
460 struct iwl_device_cmd *cmd);
448 461
449/* Bindings */ 462/* Bindings */
450int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 463int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -466,16 +479,22 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
466/* MVM debugfs */ 479/* MVM debugfs */
467#ifdef CONFIG_IWLWIFI_DEBUGFS 480#ifdef CONFIG_IWLWIFI_DEBUGFS
468int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); 481int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
469int iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 482void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
470 struct dentry *dbgfs_dir); 483void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
471void iwl_power_get_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
472 struct iwl_powertable_cmd *cmd);
473#else 484#else
474static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, 485static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
475 struct dentry *dbgfs_dir) 486 struct dentry *dbgfs_dir)
476{ 487{
477 return 0; 488 return 0;
478} 489}
490static inline void
491iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
492{
493}
494static inline void
495iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
496{
497}
479#endif /* CONFIG_IWLWIFI_DEBUGFS */ 498#endif /* CONFIG_IWLWIFI_DEBUGFS */
480 499
481/* rate scaling */ 500/* rate scaling */
@@ -485,6 +504,8 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
485/* power managment */ 504/* power managment */
486int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 505int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
487int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 506int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
507void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
508 struct iwl_powertable_cmd *cmd);
488 509
489int iwl_mvm_leds_init(struct iwl_mvm *mvm); 510int iwl_mvm_leds_init(struct iwl_mvm *mvm);
490void iwl_mvm_leds_exit(struct iwl_mvm *mvm); 511void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
@@ -502,4 +523,14 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
502void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, 523void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
503 struct ieee80211_vif *vif, int idx); 524 struct ieee80211_vif *vif, int idx);
504 525
526/* BT Coex */
527int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
528int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
529int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
530 struct iwl_rx_cmd_buffer *rxb,
531 struct iwl_device_cmd *cmd);
532void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
533 enum ieee80211_rssi_event rssi_event);
534void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
535
505#endif /* __IWL_MVM_H__ */ 536#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 20016bcbdeab..b8ec02f89acc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -74,26 +74,11 @@ static const int nvm_to_read[] = {
74 NVM_SECTION_TYPE_PRODUCTION, 74 NVM_SECTION_TYPE_PRODUCTION,
75}; 75};
76 76
77/* used to simplify the shared operations on NCM_ACCESS_CMD versions */ 77/* Default NVM size to read */
78union iwl_nvm_access_cmd { 78#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024);
79 struct iwl_nvm_access_cmd_ver1 ver1;
80 struct iwl_nvm_access_cmd_ver2 ver2;
81};
82union iwl_nvm_access_resp {
83 struct iwl_nvm_access_resp_ver1 ver1;
84 struct iwl_nvm_access_resp_ver2 ver2;
85};
86
87static inline void iwl_nvm_fill_read_ver1(struct iwl_nvm_access_cmd_ver1 *cmd,
88 u16 offset, u16 length)
89{
90 cmd->offset = cpu_to_le16(offset);
91 cmd->length = cpu_to_le16(length);
92 cmd->cache_refresh = 1;
93}
94 79
95static inline void iwl_nvm_fill_read_ver2(struct iwl_nvm_access_cmd_ver2 *cmd, 80static inline void iwl_nvm_fill_read(struct iwl_nvm_access_cmd *cmd,
96 u16 offset, u16 length, u16 section) 81 u16 offset, u16 length, u16 section)
97{ 82{
98 cmd->offset = cpu_to_le16(offset); 83 cmd->offset = cpu_to_le16(offset);
99 cmd->length = cpu_to_le16(length); 84 cmd->length = cpu_to_le16(length);
@@ -103,8 +88,8 @@ static inline void iwl_nvm_fill_read_ver2(struct iwl_nvm_access_cmd_ver2 *cmd,
103static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, 88static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
104 u16 offset, u16 length, u8 *data) 89 u16 offset, u16 length, u8 *data)
105{ 90{
106 union iwl_nvm_access_cmd nvm_access_cmd; 91 struct iwl_nvm_access_cmd nvm_access_cmd = {};
107 union iwl_nvm_access_resp *nvm_resp; 92 struct iwl_nvm_access_resp *nvm_resp;
108 struct iwl_rx_packet *pkt; 93 struct iwl_rx_packet *pkt;
109 struct iwl_host_cmd cmd = { 94 struct iwl_host_cmd cmd = {
110 .id = NVM_ACCESS_CMD, 95 .id = NVM_ACCESS_CMD,
@@ -114,18 +99,8 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
114 int ret, bytes_read, offset_read; 99 int ret, bytes_read, offset_read;
115 u8 *resp_data; 100 u8 *resp_data;
116 101
117 memset(&nvm_access_cmd, 0, sizeof(nvm_access_cmd)); 102 iwl_nvm_fill_read(&nvm_access_cmd, offset, length, section);
118 103 cmd.len[0] = sizeof(struct iwl_nvm_access_cmd);
119 /* TODO: not sure family should be the decider, maybe FW version? */
120 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
121 iwl_nvm_fill_read_ver2(&(nvm_access_cmd.ver2),
122 offset, length, section);
123 cmd.len[0] = sizeof(struct iwl_nvm_access_cmd_ver2);
124 } else {
125 iwl_nvm_fill_read_ver1(&(nvm_access_cmd.ver1),
126 offset, length);
127 cmd.len[0] = sizeof(struct iwl_nvm_access_cmd_ver1);
128 }
129 104
130 ret = iwl_mvm_send_cmd(mvm, &cmd); 105 ret = iwl_mvm_send_cmd(mvm, &cmd);
131 if (ret) 106 if (ret)
@@ -141,17 +116,10 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
141 116
142 /* Extract NVM response */ 117 /* Extract NVM response */
143 nvm_resp = (void *)pkt->data; 118 nvm_resp = (void *)pkt->data;
144 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 119 ret = le16_to_cpu(nvm_resp->status);
145 ret = le16_to_cpu(nvm_resp->ver2.status); 120 bytes_read = le16_to_cpu(nvm_resp->length);
146 bytes_read = le16_to_cpu(nvm_resp->ver2.length); 121 offset_read = le16_to_cpu(nvm_resp->offset);
147 offset_read = le16_to_cpu(nvm_resp->ver2.offset); 122 resp_data = nvm_resp->data;
148 resp_data = nvm_resp->ver2.data;
149 } else {
150 ret = le16_to_cpu(nvm_resp->ver1.length) <= 0;
151 bytes_read = le16_to_cpu(nvm_resp->ver1.length);
152 offset_read = le16_to_cpu(nvm_resp->ver1.offset);
153 resp_data = nvm_resp->ver1.data;
154 }
155 if (ret) { 123 if (ret) {
156 IWL_ERR(mvm, 124 IWL_ERR(mvm,
157 "NVM access command failed with status %d (device: %s)\n", 125 "NVM access command failed with status %d (device: %s)\n",
@@ -191,17 +159,10 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
191{ 159{
192 u16 length, offset = 0; 160 u16 length, offset = 0;
193 int ret; 161 int ret;
194 bool old_eeprom = mvm->cfg->device_family != IWL_DEVICE_FAMILY_7000;
195 162
196 length = (iwlwifi_mod_params.amsdu_size_8K ? (8 * 1024) : (4 * 1024)) 163 /* Set nvm section read length */
197 - sizeof(union iwl_nvm_access_cmd) 164 length = IWL_NVM_DEFAULT_CHUNK_SIZE;
198 - sizeof(struct iwl_rx_packet); 165
199 /*
200 * if length is greater than EEPROM size, truncate it because uCode
201 * doesn't check it by itself, and exit the loop when reached.
202 */
203 if (old_eeprom && length > mvm->cfg->base_params->eeprom_size)
204 length = mvm->cfg->base_params->eeprom_size;
205 ret = length; 166 ret = length;
206 167
207 /* Read the NVM until exhausted (reading less than requested) */ 168 /* Read the NVM until exhausted (reading less than requested) */
@@ -214,8 +175,6 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
214 return ret; 175 return ret;
215 } 176 }
216 offset += ret; 177 offset += ret;
217 if (old_eeprom && offset == mvm->cfg->base_params->eeprom_size)
218 break;
219 } 178 }
220 179
221 IWL_INFO(mvm, "NVM section %d read completed\n", section); 180 IWL_INFO(mvm, "NVM section %d read completed\n", section);
@@ -249,63 +208,31 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
249 int ret, i, section; 208 int ret, i, section;
250 u8 *nvm_buffer, *temp; 209 u8 *nvm_buffer, *temp;
251 210
252 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 211 /* TODO: find correct NVM max size for a section */
253 /* TODO: find correct NVM max size for a section */ 212 nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
254 nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, 213 GFP_KERNEL);
255 GFP_KERNEL); 214 if (!nvm_buffer)
256 if (!nvm_buffer) 215 return -ENOMEM;
257 return -ENOMEM; 216 for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
258 for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) { 217 section = nvm_to_read[i];
259 section = nvm_to_read[i]; 218 /* we override the constness for initial read */
260 /* we override the constness for initial read */ 219 ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
261 ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
262 if (ret < 0)
263 break;
264 temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
265 if (!temp) {
266 ret = -ENOMEM;
267 break;
268 }
269 mvm->nvm_sections[section].data = temp;
270 mvm->nvm_sections[section].length = ret;
271 }
272 kfree(nvm_buffer);
273 if (ret < 0) 220 if (ret < 0)
274 return ret; 221 break;
275 } else { 222 temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
276 /* allocate eeprom */ 223 if (!temp) {
277 mvm->eeprom_blob_size = mvm->cfg->base_params->eeprom_size; 224 ret = -ENOMEM;
278 IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM size = %zd\n", 225 break;
279 mvm->eeprom_blob_size);
280 mvm->eeprom_blob = kzalloc(mvm->eeprom_blob_size, GFP_KERNEL);
281 if (!mvm->eeprom_blob)
282 return -ENOMEM;
283
284 ret = iwl_nvm_read_section(mvm, 0, mvm->eeprom_blob);
285 if (ret != mvm->eeprom_blob_size) {
286 IWL_ERR(mvm, "Read partial NVM %d/%zd\n",
287 ret, mvm->eeprom_blob_size);
288 kfree(mvm->eeprom_blob);
289 mvm->eeprom_blob = NULL;
290 return -EINVAL;
291 } 226 }
227 mvm->nvm_sections[section].data = temp;
228 mvm->nvm_sections[section].length = ret;
292 } 229 }
230 kfree(nvm_buffer);
231 if (ret < 0)
232 return ret;
293 233
294 ret = 0; 234 ret = 0;
295 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) 235 mvm->nvm_data = iwl_parse_nvm_sections(mvm);
296 mvm->nvm_data = iwl_parse_nvm_sections(mvm);
297 else
298 mvm->nvm_data =
299 iwl_parse_eeprom_data(mvm->trans->dev,
300 mvm->cfg,
301 mvm->eeprom_blob,
302 mvm->eeprom_blob_size);
303
304 if (!mvm->nvm_data) {
305 kfree(mvm->eeprom_blob);
306 mvm->eeprom_blob = NULL;
307 ret = -ENOMEM;
308 }
309 236
310 return ret; 237 return ret;
311} 238}
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index d0f9c1e0475e..fe031d304d1e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -143,21 +143,12 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
143 u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; 143 u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
144 u32 reg_val = 0; 144 u32 reg_val = 0;
145 145
146 /* 146 radio_cfg_type = (mvm->fw->phy_config & FW_PHY_CFG_RADIO_TYPE) >>
147 * We can't upload the correct value to the INIT image 147 FW_PHY_CFG_RADIO_TYPE_POS;
148 * as we don't have nvm_data by that time. 148 radio_cfg_step = (mvm->fw->phy_config & FW_PHY_CFG_RADIO_STEP) >>
149 * 149 FW_PHY_CFG_RADIO_STEP_POS;
150 * TODO: Figure out what we should do here 150 radio_cfg_dash = (mvm->fw->phy_config & FW_PHY_CFG_RADIO_DASH) >>
151 */ 151 FW_PHY_CFG_RADIO_DASH_POS;
152 if (mvm->nvm_data) {
153 radio_cfg_type = mvm->nvm_data->radio_cfg_type;
154 radio_cfg_step = mvm->nvm_data->radio_cfg_step;
155 radio_cfg_dash = mvm->nvm_data->radio_cfg_dash;
156 } else {
157 radio_cfg_type = 0;
158 radio_cfg_step = 0;
159 radio_cfg_dash = 0;
160 }
161 152
162 /* SKU control */ 153 /* SKU control */
163 reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) << 154 reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
@@ -175,7 +166,6 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
175 166
176 /* silicon bits */ 167 /* silicon bits */
177 reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; 168 reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
178 reg_val |= CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
179 169
180 iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, 170 iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
181 CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | 171 CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
@@ -230,6 +220,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
230 RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false), 220 RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
231 RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false), 221 RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
232 222
223 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
224 RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
225
233 RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false), 226 RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
234 RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), 227 RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
235 228
@@ -274,6 +267,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
274 CMD(WEP_KEY), 267 CMD(WEP_KEY),
275 CMD(REPLY_RX_PHY_CMD), 268 CMD(REPLY_RX_PHY_CMD),
276 CMD(REPLY_RX_MPDU_CMD), 269 CMD(REPLY_RX_MPDU_CMD),
270 CMD(BEACON_NOTIFICATION),
277 CMD(BEACON_TEMPLATE_CMD), 271 CMD(BEACON_TEMPLATE_CMD),
278 CMD(STATISTICS_NOTIFICATION), 272 CMD(STATISTICS_NOTIFICATION),
279 CMD(TX_ANT_CONFIGURATION_CMD), 273 CMD(TX_ANT_CONFIGURATION_CMD),
@@ -293,6 +287,11 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
293 CMD(NET_DETECT_PROFILES_CMD), 287 CMD(NET_DETECT_PROFILES_CMD),
294 CMD(NET_DETECT_HOTSPOTS_CMD), 288 CMD(NET_DETECT_HOTSPOTS_CMD),
295 CMD(NET_DETECT_HOTSPOTS_QUERY_CMD), 289 CMD(NET_DETECT_HOTSPOTS_QUERY_CMD),
290 CMD(CARD_STATE_NOTIFICATION),
291 CMD(BT_COEX_PRIO_TABLE),
292 CMD(BT_COEX_PROT_ENV),
293 CMD(BT_PROFILE_NOTIFICATION),
294 CMD(BT_CONFIG),
296}; 295};
297#undef CMD 296#undef CMD
298 297
@@ -312,16 +311,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
312 }; 311 };
313 int err, scan_size; 312 int err, scan_size;
314 313
315 switch (cfg->device_family) {
316 case IWL_DEVICE_FAMILY_6030:
317 case IWL_DEVICE_FAMILY_6005:
318 case IWL_DEVICE_FAMILY_7000:
319 break;
320 default:
321 IWL_ERR(trans, "Trying to load mvm on an unsupported device\n");
322 return NULL;
323 }
324
325 /******************************** 314 /********************************
326 * 1. Allocating and configuring HW data 315 * 1. Allocating and configuring HW data
327 ********************************/ 316 ********************************/
@@ -363,8 +352,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
363 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); 352 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
364 trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K; 353 trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
365 354
366 /* TODO: this should really be a TLV */ 355 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
367 if (cfg->device_family == IWL_DEVICE_FAMILY_7000)
368 trans_cfg.bc_table_dword = true; 356 trans_cfg.bc_table_dword = true;
369 357
370 if (!iwlwifi_mod_params.wd_disable) 358 if (!iwlwifi_mod_params.wd_disable)
@@ -438,7 +426,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
438 out_free: 426 out_free:
439 iwl_phy_db_free(mvm->phy_db); 427 iwl_phy_db_free(mvm->phy_db);
440 kfree(mvm->scan_cmd); 428 kfree(mvm->scan_cmd);
441 kfree(mvm->eeprom_blob);
442 iwl_trans_stop_hw(trans, true); 429 iwl_trans_stop_hw(trans, true);
443 ieee80211_free_hw(mvm->hw); 430 ieee80211_free_hw(mvm->hw);
444 return NULL; 431 return NULL;
@@ -460,7 +447,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
460 iwl_phy_db_free(mvm->phy_db); 447 iwl_phy_db_free(mvm->phy_db);
461 mvm->phy_db = NULL; 448 mvm->phy_db = NULL;
462 449
463 kfree(mvm->eeprom_blob);
464 iwl_free_nvm_data(mvm->nvm_data); 450 iwl_free_nvm_data(mvm->nvm_data);
465 for (i = 0; i < NVM_NUM_OF_SECTIONS; i++) 451 for (i = 0; i < NVM_NUM_OF_SECTIONS; i++)
466 kfree(mvm->nvm_sections[i].data); 452 kfree(mvm->nvm_sections[i].data);
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index b428448f8ddf..a28a1d1f23eb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -142,7 +142,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
142 struct cfg80211_chan_def *chandef, 142 struct cfg80211_chan_def *chandef,
143 u8 chains_static, u8 chains_dynamic) 143 u8 chains_static, u8 chains_dynamic)
144{ 144{
145 u8 valid_rx_chains, active_cnt, idle_cnt; 145 u8 active_cnt, idle_cnt;
146 146
147 /* Set the channel info data */ 147 /* Set the channel info data */
148 cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ? 148 cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
@@ -153,22 +153,16 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
153 cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef); 153 cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
154 154
155 /* Set rx the chains */ 155 /* Set rx the chains */
156
157 /* TODO:
158 * Need to add on chain noise calibration limitations, and
159 * BT coex considerations.
160 */
161 valid_rx_chains = mvm->nvm_data->valid_rx_ant;
162 idle_cnt = chains_static; 156 idle_cnt = chains_static;
163 active_cnt = chains_dynamic; 157 active_cnt = chains_dynamic;
164 158
165 cmd->rxchain_info = cpu_to_le32(valid_rx_chains << 159 cmd->rxchain_info = cpu_to_le32(iwl_fw_valid_rx_ant(mvm->fw) <<
166 PHY_RX_CHAIN_VALID_POS); 160 PHY_RX_CHAIN_VALID_POS);
167 cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); 161 cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
168 cmd->rxchain_info |= cpu_to_le32(active_cnt << 162 cmd->rxchain_info |= cpu_to_le32(active_cnt <<
169 PHY_RX_CHAIN_MIMO_CNT_POS); 163 PHY_RX_CHAIN_MIMO_CNT_POS);
170 164
171 cmd->txchain_info = cpu_to_le32(mvm->nvm_data->valid_tx_ant); 165 cmd->txchain_info = cpu_to_le32(iwl_fw_valid_tx_ant(mvm->fw));
172} 166}
173 167
174/* 168/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 5a92a4978795..ed77e437aac4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -75,23 +75,48 @@
75 75
76#define POWER_KEEP_ALIVE_PERIOD_SEC 25 76#define POWER_KEEP_ALIVE_PERIOD_SEC 25
77 77
78static void iwl_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 78static void iwl_mvm_power_log(struct iwl_mvm *mvm,
79 struct iwl_powertable_cmd *cmd) 79 struct iwl_powertable_cmd *cmd)
80{
81 IWL_DEBUG_POWER(mvm,
82 "Sending power table command for power level %d, flags = 0x%X\n",
83 iwlmvm_mod_params.power_scheme,
84 le16_to_cpu(cmd->flags));
85 IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
86
87 if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
88 IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
89 le32_to_cpu(cmd->rx_data_timeout));
90 IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
91 le32_to_cpu(cmd->tx_data_timeout));
92 IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
93 cmd->lprx_rssi_threshold);
94 }
95}
96
97void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
98 struct iwl_powertable_cmd *cmd)
80{ 99{
81 struct ieee80211_hw *hw = mvm->hw; 100 struct ieee80211_hw *hw = mvm->hw;
82 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
83 struct ieee80211_chanctx_conf *chanctx_conf; 101 struct ieee80211_chanctx_conf *chanctx_conf;
84 struct ieee80211_channel *chan; 102 struct ieee80211_channel *chan;
85 int dtimper, dtimper_msec; 103 int dtimper, dtimper_msec;
86 int keep_alive; 104 int keep_alive;
87 bool radar_detect = false; 105 bool radar_detect = false;
88 106
89 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 107 /*
90 mvmvif->color)); 108 * Regardless of power management state the driver must set
91 cmd->action = cpu_to_le32(FW_CTXT_ACTION_MODIFY); 109 * keep alive period. FW will use it for sending keep alive NDPs
110 * immediately after association.
111 */
112 cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
92 113
93 if ((!vif->bss_conf.ps) || 114 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
94 (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)) 115 return;
116
117 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
118
119 if (!vif->bss_conf.ps)
95 return; 120 return;
96 121
97 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); 122 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -110,63 +135,29 @@ static void iwl_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
110 135
111 /* Check skip over DTIM conditions */ 136 /* Check skip over DTIM conditions */
112 if (!radar_detect && (dtimper <= 10) && 137 if (!radar_detect && (dtimper <= 10) &&
113 (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP)) { 138 (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP))
114 cmd->flags |= cpu_to_le16(POWER_FLAGS_SLEEP_OVER_DTIM_MSK); 139 cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
115 cmd->num_skip_dtim = 2;
116 }
117 140
118 /* Check that keep alive period is at least 3 * DTIM */ 141 /* Check that keep alive period is at least 3 * DTIM */
119 dtimper_msec = dtimper * vif->bss_conf.beacon_int; 142 dtimper_msec = dtimper * vif->bss_conf.beacon_int;
120 keep_alive = max_t(int, 3 * dtimper_msec, 143 keep_alive = max_t(int, 3 * dtimper_msec,
121 MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC); 144 MSEC_PER_SEC * cmd->keep_alive_seconds);
122 keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC); 145 keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
146 cmd->keep_alive_seconds = keep_alive;
123 147
124 cmd->keep_alive_seconds = cpu_to_le16(keep_alive); 148 cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
125 149 cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
126 if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP) {
127 /* TODO: Also for D3 (device sleep / WoWLAN) */
128 cmd->rx_data_timeout = cpu_to_le32(10);
129 cmd->tx_data_timeout = cpu_to_le32(10);
130 } else {
131 cmd->rx_data_timeout = cpu_to_le32(50);
132 cmd->tx_data_timeout = cpu_to_le32(50);
133 }
134} 150}
135 151
136int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 152int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
137{ 153{
138 struct iwl_powertable_cmd cmd = {}; 154 struct iwl_powertable_cmd cmd = {};
139 155
140 if (!iwlwifi_mod_params.power_save) {
141 IWL_DEBUG_POWER(mvm, "Power management is not allowed\n");
142 return 0;
143 }
144
145 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) 156 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
146 return 0; 157 return 0;
147 158
148 iwl_power_build_cmd(mvm, vif, &cmd); 159 iwl_mvm_power_build_cmd(mvm, vif, &cmd);
149 160 iwl_mvm_power_log(mvm, &cmd);
150 IWL_DEBUG_POWER(mvm,
151 "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
152 cmd.id_and_color, iwlmvm_mod_params.power_scheme,
153 le16_to_cpu(cmd.flags));
154
155 if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
156 IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
157 le16_to_cpu(cmd.keep_alive_seconds));
158 IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
159 le32_to_cpu(cmd.rx_data_timeout));
160 IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
161 le32_to_cpu(cmd.tx_data_timeout));
162 IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
163 le32_to_cpu(cmd.rx_data_timeout_uapsd));
164 IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
165 le32_to_cpu(cmd.tx_data_timeout_uapsd));
166 IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
167 cmd.lprx_rssi_threshold);
168 IWL_DEBUG_POWER(mvm, "DTIMs to skip = %u\n", cmd.num_skip_dtim);
169 }
170 161
171 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, 162 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
172 sizeof(cmd), &cmd); 163 sizeof(cmd), &cmd);
@@ -175,33 +166,15 @@ int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
175int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 166int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
176{ 167{
177 struct iwl_powertable_cmd cmd = {}; 168 struct iwl_powertable_cmd cmd = {};
178 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
179
180 if (!iwlwifi_mod_params.power_save) {
181 IWL_DEBUG_POWER(mvm, "Power management is not allowed\n");
182 return 0;
183 }
184 169
185 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) 170 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
186 return 0; 171 return 0;
187 172
188 cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 173 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
189 mvmvif->color)); 174 cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
190 cmd.action = cpu_to_le32(FW_CTXT_ACTION_MODIFY);
191 175
192 IWL_DEBUG_POWER(mvm, 176 iwl_mvm_power_log(mvm, &cmd);
193 "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
194 cmd.id_and_color, iwlmvm_mod_params.power_scheme,
195 le16_to_cpu(cmd.flags));
196 177
197 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC, 178 return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
198 sizeof(cmd), &cmd); 179 sizeof(cmd), &cmd);
199} 180}
200
201#ifdef CONFIG_IWLWIFI_DEBUGFS
202void iwl_power_get_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
203 struct iwl_powertable_cmd *cmd)
204{
205 iwl_power_build_cmd(mvm, vif, cmd);
206}
207#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 925628468146..a1e3e923ea3e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -114,7 +114,8 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
114 data->n_interfaces[id]++; 114 data->n_interfaces[id]++;
115 break; 115 break;
116 case NL80211_IFTYPE_MONITOR: 116 case NL80211_IFTYPE_MONITOR:
117 data->n_interfaces[id]++; 117 if (mvmvif->monitor_active)
118 data->n_interfaces[id]++;
118 break; 119 break;
119 case NL80211_IFTYPE_P2P_DEVICE: 120 case NL80211_IFTYPE_P2P_DEVICE:
120 break; 121 break;
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 56b636d9ab30..55334d542e26 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -680,12 +680,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
680 */ 680 */
681static bool rs_use_green(struct ieee80211_sta *sta) 681static bool rs_use_green(struct ieee80211_sta *sta)
682{ 682{
683 struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv; 683 /*
684 684 * There's a bug somewhere in this code that causes the
685 bool use_green = !(sta_priv->vif->bss_conf.ht_operation_mode & 685 * scaling to get stuck because GF+SGI can't be combined
686 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); 686 * in SISO rates. Until we find that bug, disable GF, it
687 687 * has only limited benefit and we still interoperate with
688 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && use_green; 688 * GF APs since we can always receive GF transmissions.
689 */
690 return false;
689} 691}
690 692
691/** 693/**
@@ -791,7 +793,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
791 793
792 if (num_of_ant(tbl->ant_type) > 1) 794 if (num_of_ant(tbl->ant_type) > 1)
793 tbl->ant_type = 795 tbl->ant_type =
794 first_antenna(mvm->nvm_data->valid_tx_ant); 796 first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
795 797
796 tbl->is_ht40 = 0; 798 tbl->is_ht40 = 0;
797 tbl->is_SGI = 0; 799 tbl->is_SGI = 0;
@@ -1233,7 +1235,7 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
1233 return -1; 1235 return -1;
1234 1236
1235 /* Need both Tx chains/antennas to support MIMO */ 1237 /* Need both Tx chains/antennas to support MIMO */
1236 if (num_of_ant(mvm->nvm_data->valid_tx_ant) < 2) 1238 if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 2)
1237 return -1; 1239 return -1;
1238 1240
1239 IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n"); 1241 IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n");
@@ -1285,7 +1287,7 @@ static int rs_switch_to_mimo3(struct iwl_mvm *mvm,
1285 return -1; 1287 return -1;
1286 1288
1287 /* Need both Tx chains/antennas to support MIMO */ 1289 /* Need both Tx chains/antennas to support MIMO */
1288 if (num_of_ant(mvm->nvm_data->valid_tx_ant) < 3) 1290 if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 3)
1289 return -1; 1291 return -1;
1290 1292
1291 IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO3\n"); 1293 IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO3\n");
@@ -1379,7 +1381,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
1379 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1381 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1380 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1382 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1381 u8 start_action; 1383 u8 start_action;
1382 u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant; 1384 u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
1383 u8 tx_chains_num = num_of_ant(valid_tx_ant); 1385 u8 tx_chains_num = num_of_ant(valid_tx_ant);
1384 int ret; 1386 int ret;
1385 u8 update_search_tbl_counter = 0; 1387 u8 update_search_tbl_counter = 0;
@@ -1512,7 +1514,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
1512 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1514 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1513 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1515 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1514 u8 start_action; 1516 u8 start_action;
1515 u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant; 1517 u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
1516 u8 tx_chains_num = num_of_ant(valid_tx_ant); 1518 u8 tx_chains_num = num_of_ant(valid_tx_ant);
1517 u8 update_search_tbl_counter = 0; 1519 u8 update_search_tbl_counter = 0;
1518 int ret; 1520 int ret;
@@ -1647,7 +1649,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
1647 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1649 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1648 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1650 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1649 u8 start_action; 1651 u8 start_action;
1650 u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant; 1652 u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
1651 u8 tx_chains_num = num_of_ant(valid_tx_ant); 1653 u8 tx_chains_num = num_of_ant(valid_tx_ant);
1652 u8 update_search_tbl_counter = 0; 1654 u8 update_search_tbl_counter = 0;
1653 int ret; 1655 int ret;
@@ -1784,7 +1786,7 @@ static int rs_move_mimo3_to_other(struct iwl_mvm *mvm,
1784 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1786 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1785 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1787 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1786 u8 start_action; 1788 u8 start_action;
1787 u8 valid_tx_ant = mvm->nvm_data->valid_tx_ant; 1789 u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
1788 u8 tx_chains_num = num_of_ant(valid_tx_ant); 1790 u8 tx_chains_num = num_of_ant(valid_tx_ant);
1789 int ret; 1791 int ret;
1790 u8 update_search_tbl_counter = 0; 1792 u8 update_search_tbl_counter = 0;
@@ -2447,7 +2449,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
2447 2449
2448 i = lq_sta->last_txrate_idx; 2450 i = lq_sta->last_txrate_idx;
2449 2451
2450 valid_tx_ant = mvm->nvm_data->valid_tx_ant; 2452 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
2451 2453
2452 if (!lq_sta->search_better_tbl) 2454 if (!lq_sta->search_better_tbl)
2453 active_tbl = lq_sta->active_tbl; 2455 active_tbl = lq_sta->active_tbl;
@@ -2637,15 +2639,15 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2637 2639
2638 /* These values will be overridden later */ 2640 /* These values will be overridden later */
2639 lq_sta->lq.single_stream_ant_msk = 2641 lq_sta->lq.single_stream_ant_msk =
2640 first_antenna(mvm->nvm_data->valid_tx_ant); 2642 first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
2641 lq_sta->lq.dual_stream_ant_msk = 2643 lq_sta->lq.dual_stream_ant_msk =
2642 mvm->nvm_data->valid_tx_ant & 2644 iwl_fw_valid_tx_ant(mvm->fw) &
2643 ~first_antenna(mvm->nvm_data->valid_tx_ant); 2645 ~first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
2644 if (!lq_sta->lq.dual_stream_ant_msk) { 2646 if (!lq_sta->lq.dual_stream_ant_msk) {
2645 lq_sta->lq.dual_stream_ant_msk = ANT_AB; 2647 lq_sta->lq.dual_stream_ant_msk = ANT_AB;
2646 } else if (num_of_ant(mvm->nvm_data->valid_tx_ant) == 2) { 2648 } else if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) == 2) {
2647 lq_sta->lq.dual_stream_ant_msk = 2649 lq_sta->lq.dual_stream_ant_msk =
2648 mvm->nvm_data->valid_tx_ant; 2650 iwl_fw_valid_tx_ant(mvm->fw);
2649 } 2651 }
2650 2652
2651 /* as default allow aggregation for all tids */ 2653 /* as default allow aggregation for all tids */
@@ -2706,7 +2708,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
2706 index++; 2708 index++;
2707 repeat_rate--; 2709 repeat_rate--;
2708 if (mvm) 2710 if (mvm)
2709 valid_tx_ant = mvm->nvm_data->valid_tx_ant; 2711 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
2710 2712
2711 /* Fill rest of rate table */ 2713 /* Fill rest of rate table */
2712 while (index < LINK_QUAL_MAX_RETRY_NUM) { 2714 while (index < LINK_QUAL_MAX_RETRY_NUM) {
@@ -2811,7 +2813,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
2811 u8 ant_sel_tx; 2813 u8 ant_sel_tx;
2812 2814
2813 mvm = lq_sta->drv; 2815 mvm = lq_sta->drv;
2814 valid_tx_ant = mvm->nvm_data->valid_tx_ant; 2816 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
2815 if (lq_sta->dbg_fixed_rate) { 2817 if (lq_sta->dbg_fixed_rate) {
2816 ant_sel_tx = 2818 ant_sel_tx =
2817 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) 2819 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
@@ -2882,9 +2884,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2882 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 2884 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
2883 lq_sta->dbg_fixed_rate); 2885 lq_sta->dbg_fixed_rate);
2884 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", 2886 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
2885 (mvm->nvm_data->valid_tx_ant & ANT_A) ? "ANT_A," : "", 2887 (iwl_fw_valid_tx_ant(mvm->fw) & ANT_A) ? "ANT_A," : "",
2886 (mvm->nvm_data->valid_tx_ant & ANT_B) ? "ANT_B," : "", 2888 (iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "",
2887 (mvm->nvm_data->valid_tx_ant & ANT_C) ? "ANT_C" : ""); 2889 (iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : "");
2888 desc += sprintf(buff+desc, "lq type %s\n", 2890 desc += sprintf(buff+desc, "lq type %s\n",
2889 (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); 2891 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
2890 if (is_Ht(tbl->lq_type)) { 2892 if (is_Ht(tbl->lq_type)) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index b0b190d0ec23..4dfc21a3e83e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 9b21b92aa8d1..2157b0f8ced5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -74,7 +74,7 @@
74static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm) 74static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
75{ 75{
76 u16 rx_chain; 76 u16 rx_chain;
77 u8 rx_ant = mvm->nvm_data->valid_rx_ant; 77 u8 rx_ant = iwl_fw_valid_rx_ant(mvm->fw);
78 78
79 rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS; 79 rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
80 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS; 80 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
@@ -115,7 +115,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
115 u32 tx_ant; 115 u32 tx_ant;
116 116
117 mvm->scan_last_antenna_idx = 117 mvm->scan_last_antenna_idx =
118 iwl_mvm_next_antenna(mvm, mvm->nvm_data->valid_tx_ant, 118 iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw),
119 mvm->scan_last_antenna_idx); 119 mvm->scan_last_antenna_idx);
120 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; 120 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
121 121
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 274f44e2ef60..0fd96e4da461 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -101,8 +101,55 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
101 } 101 }
102 add_sta_cmd.add_modify = update ? 1 : 0; 102 add_sta_cmd.add_modify = update ? 1 : 0;
103 103
104 /* STA_FLG_FAT_EN_MSK ? */ 104 add_sta_cmd.station_flags_msk |= cpu_to_le32(STA_FLG_FAT_EN_MSK |
105 /* STA_FLG_MIMO_EN_MSK ? */ 105 STA_FLG_MIMO_EN_MSK);
106
107 switch (sta->bandwidth) {
108 case IEEE80211_STA_RX_BW_160:
109 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
110 /* fall through */
111 case IEEE80211_STA_RX_BW_80:
112 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
113 /* fall through */
114 case IEEE80211_STA_RX_BW_40:
115 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
116 /* fall through */
117 case IEEE80211_STA_RX_BW_20:
118 if (sta->ht_cap.ht_supported)
119 add_sta_cmd.station_flags |=
120 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
121 break;
122 }
123
124 switch (sta->rx_nss) {
125 case 1:
126 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
127 break;
128 case 2:
129 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
130 break;
131 case 3 ... 8:
132 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
133 break;
134 }
135
136 switch (sta->smps_mode) {
137 case IEEE80211_SMPS_AUTOMATIC:
138 case IEEE80211_SMPS_NUM_MODES:
139 WARN_ON(1);
140 break;
141 case IEEE80211_SMPS_STATIC:
142 /* override NSS */
143 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
144 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
145 break;
146 case IEEE80211_SMPS_DYNAMIC:
147 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
148 break;
149 case IEEE80211_SMPS_OFF:
150 /* nothing */
151 break;
152 }
106 153
107 if (sta->ht_cap.ht_supported) { 154 if (sta->ht_cap.ht_supported) {
108 add_sta_cmd.station_flags_msk |= 155 add_sta_cmd.station_flags_msk |=
@@ -340,6 +387,9 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
340 387
341 if (vif->type == NL80211_IFTYPE_STATION && 388 if (vif->type == NL80211_IFTYPE_STATION &&
342 mvmvif->ap_sta_id == mvm_sta->sta_id) { 389 mvmvif->ap_sta_id == mvm_sta->sta_id) {
390 /* flush its queues here since we are freeing mvm_sta */
391 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
392
343 /* 393 /*
344 * Put a non-NULL since the fw station isn't removed. 394 * Put a non-NULL since the fw station isn't removed.
345 * It will be removed after the MAC will be set as 395 * It will be removed after the MAC will be set as
@@ -348,9 +398,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
348 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], 398 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
349 ERR_PTR(-EINVAL)); 399 ERR_PTR(-EINVAL));
350 400
351 /* flush its queues here since we are freeing mvm_sta */
352 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
353
354 /* if we are associated - we can't remove the AP STA now */ 401 /* if we are associated - we can't remove the AP STA now */
355 if (vif->bss_conf.assoc) 402 if (vif->bss_conf.assoc)
356 return ret; 403 return ret;
@@ -686,7 +733,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
686 733
687 spin_lock_bh(&mvmsta->lock); 734 spin_lock_bh(&mvmsta->lock);
688 tid_data = &mvmsta->tid_data[tid]; 735 tid_data = &mvmsta->tid_data[tid];
689 tid_data->ssn = SEQ_TO_SN(tid_data->seq_number); 736 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
690 tid_data->txq_id = txq_id; 737 tid_data->txq_id = txq_id;
691 *ssn = tid_data->ssn; 738 *ssn = tid_data->ssn;
692 739
@@ -789,7 +836,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
789 836
790 switch (tid_data->state) { 837 switch (tid_data->state) {
791 case IWL_AGG_ON: 838 case IWL_AGG_ON:
792 tid_data->ssn = SEQ_TO_SN(tid_data->seq_number); 839 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
793 840
794 IWL_DEBUG_TX_QUEUES(mvm, 841 IWL_DEBUG_TX_QUEUES(mvm,
795 "ssn = %d, next_recl = %d\n", 842 "ssn = %d, next_recl = %d\n",
@@ -834,6 +881,34 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
834 return err; 881 return err;
835} 882}
836 883
884int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
885 struct ieee80211_sta *sta, u16 tid)
886{
887 struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
888 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
889 u16 txq_id;
890
891 /*
892 * First set the agg state to OFF to avoid calling
893 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
894 */
895 spin_lock_bh(&mvmsta->lock);
896 txq_id = tid_data->txq_id;
897 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
898 mvmsta->sta_id, tid, txq_id, tid_data->state);
899 tid_data->state = IWL_AGG_OFF;
900 spin_unlock_bh(&mvmsta->lock);
901
902 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
903 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
904
905 iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
906 mvm->queue_to_mac80211[tid_data->txq_id] =
907 IWL_INVALID_MAC80211_QUEUE;
908
909 return 0;
910}
911
837static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) 912static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
838{ 913{
839 int i; 914 int i;
@@ -870,7 +945,7 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
870 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) 945 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT)
871 return mvmvif->ap_sta_id; 946 return mvmvif->ap_sta_id;
872 947
873 return IWL_INVALID_STATION; 948 return IWL_MVM_STATION_COUNT;
874} 949}
875 950
876static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, 951static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
@@ -1018,7 +1093,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1018 1093
1019 /* Get the station id from the mvm local station table */ 1094 /* Get the station id from the mvm local station table */
1020 sta_id = iwl_mvm_get_key_sta_id(vif, sta); 1095 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
1021 if (sta_id == IWL_INVALID_STATION) { 1096 if (sta_id == IWL_MVM_STATION_COUNT) {
1022 IWL_ERR(mvm, "Failed to find station id\n"); 1097 IWL_ERR(mvm, "Failed to find station id\n");
1023 return -EINVAL; 1098 return -EINVAL;
1024 } 1099 }
@@ -1113,7 +1188,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
1113 return -ENOENT; 1188 return -ENOENT;
1114 } 1189 }
1115 1190
1116 if (sta_id == IWL_INVALID_STATION) { 1191 if (sta_id == IWL_MVM_STATION_COUNT) {
1117 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); 1192 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
1118 return 0; 1193 return 0;
1119 } 1194 }
@@ -1179,7 +1254,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
1179 struct iwl_mvm_sta *mvm_sta; 1254 struct iwl_mvm_sta *mvm_sta;
1180 u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta); 1255 u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
1181 1256
1182 if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION)) 1257 if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
1183 return; 1258 return;
1184 1259
1185 rcu_read_lock(); 1260 rcu_read_lock();
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 896f88ac8145..12abd2d71835 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -271,6 +271,7 @@ struct iwl_mvm_tid_data {
271 * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for 271 * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
272 * tid. 272 * tid.
273 * @max_agg_bufsize: the maximal size of the AGG buffer for this station 273 * @max_agg_bufsize: the maximal size of the AGG buffer for this station
274 * @bt_reduced_txpower: is reduced tx power enabled for this station
274 * @lock: lock to protect the whole struct. Since %tid_data is access from Tx 275 * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
275 * and from Tx response flow, it needs a spinlock. 276 * and from Tx response flow, it needs a spinlock.
276 * @pending_frames: number of frames for this STA on the shared Tx queues. 277 * @pending_frames: number of frames for this STA on the shared Tx queues.
@@ -287,6 +288,7 @@ struct iwl_mvm_sta {
287 u32 mac_id_n_color; 288 u32 mac_id_n_color;
288 u16 tid_disable_agg; 289 u16 tid_disable_agg;
289 u8 max_agg_bufsize; 290 u8 max_agg_bufsize;
291 bool bt_reduced_txpower;
290 spinlock_t lock; 292 spinlock_t lock;
291 atomic_t pending_frames; 293 atomic_t pending_frames;
292 struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; 294 struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
@@ -348,6 +350,8 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
348 struct ieee80211_sta *sta, u16 tid, u8 buf_size); 350 struct ieee80211_sta *sta, u16 tid, u8 buf_size);
349int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 351int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
350 struct ieee80211_sta *sta, u16 tid); 352 struct ieee80211_sta *sta, u16 tid);
353int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
354 struct ieee80211_sta *sta, u16 tid);
351 355
352int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); 356int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
353int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, 357int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index e437e02c7149..ad9bbca99213 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -76,14 +76,12 @@
76#define TU_TO_JIFFIES(_tu) (usecs_to_jiffies((_tu) * 1024)) 76#define TU_TO_JIFFIES(_tu) (usecs_to_jiffies((_tu) * 1024))
77#define MSEC_TO_TU(_msec) (_msec*1000/1024) 77#define MSEC_TO_TU(_msec) (_msec*1000/1024)
78 78
79/* For ROC use a TE type which has priority high enough to be scheduled when 79/*
80 * there is a concurrent BSS or GO/AP. Currently, use a TE type that has 80 * For the high priority TE use a time event type that has similar priority to
81 * priority similar to the TE priority used for action scans by the FW. 81 * the FW's action scan priority.
82 * TODO: This needs to be changed, based on the reason for the ROC, i.e., use
83 * TE_P2P_DEVICE_DISCOVERABLE for remain on channel without mgmt skb, and use
84 * TE_P2P_DEVICE_ACTION_SCAN
85 */ 82 */
86#define IWL_MVM_ROC_TE_TYPE TE_P2P_DEVICE_ACTION_SCAN 83#define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
84#define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
87 85
88void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, 86void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
89 struct iwl_mvm_time_event_data *te_data) 87 struct iwl_mvm_time_event_data *te_data)
@@ -116,7 +114,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
116 * issue as it will have to complete before the next command is 114 * issue as it will have to complete before the next command is
117 * executed, and a new time event means a new command. 115 * executed, and a new time event means a new command.
118 */ 116 */
119 iwl_mvm_flush_tx_path(mvm, BIT(IWL_OFFCHANNEL_QUEUE), false); 117 iwl_mvm_flush_tx_path(mvm, BIT(IWL_MVM_OFFCHANNEL_QUEUE), false);
120} 118}
121 119
122static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) 120static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
@@ -168,7 +166,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
168 WARN_ONCE(!le32_to_cpu(notif->status), 166 WARN_ONCE(!le32_to_cpu(notif->status),
169 "Failed to schedule time event\n"); 167 "Failed to schedule time event\n");
170 168
171 if (le32_to_cpu(notif->action) == TE_NOTIF_HOST_END) { 169 if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) {
172 IWL_DEBUG_TE(mvm, 170 IWL_DEBUG_TE(mvm,
173 "TE ended - current time %lu, estimated end %lu\n", 171 "TE ended - current time %lu, estimated end %lu\n",
174 jiffies, te_data->end_jiffies); 172 jiffies, te_data->end_jiffies);
@@ -191,7 +189,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
191 } 189 }
192 190
193 iwl_mvm_te_clear_data(mvm, te_data); 191 iwl_mvm_te_clear_data(mvm, te_data);
194 } else if (le32_to_cpu(notif->action) == TE_NOTIF_HOST_START) { 192 } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) {
195 te_data->running = true; 193 te_data->running = true;
196 te_data->end_jiffies = jiffies + 194 te_data->end_jiffies = jiffies +
197 TU_TO_JIFFIES(te_data->duration); 195 TU_TO_JIFFIES(te_data->duration);
@@ -370,7 +368,8 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
370 time_cmd.interval_reciprocal = cpu_to_le32(iwl_mvm_reciprocal(1)); 368 time_cmd.interval_reciprocal = cpu_to_le32(iwl_mvm_reciprocal(1));
371 time_cmd.duration = cpu_to_le32(duration); 369 time_cmd.duration = cpu_to_le32(duration);
372 time_cmd.repeat = cpu_to_le32(1); 370 time_cmd.repeat = cpu_to_le32(1);
373 time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_START | TE_NOTIF_HOST_END); 371 time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START |
372 TE_NOTIF_HOST_EVENT_END);
374 373
375 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 374 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
376} 375}
@@ -438,7 +437,7 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
438} 437}
439 438
440int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 439int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
441 int duration) 440 int duration, enum ieee80211_roc_type type)
442{ 441{
443 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 442 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
444 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 443 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
@@ -459,27 +458,36 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
459 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); 458 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
460 time_cmd.id_and_color = 459 time_cmd.id_and_color =
461 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 460 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
462 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE); 461
462 switch (type) {
463 case IEEE80211_ROC_TYPE_NORMAL:
464 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
465 break;
466 case IEEE80211_ROC_TYPE_MGMT_TX:
467 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
468 break;
469 default:
470 WARN_ONCE(1, "Got an invalid ROC type\n");
471 return -EINVAL;
472 }
463 473
464 time_cmd.apply_time = cpu_to_le32(0); 474 time_cmd.apply_time = cpu_to_le32(0);
465 time_cmd.dep_policy = cpu_to_le32(TE_INDEPENDENT); 475 time_cmd.dep_policy = cpu_to_le32(TE_INDEPENDENT);
466 time_cmd.is_present = cpu_to_le32(1); 476 time_cmd.is_present = cpu_to_le32(1);
467
468 time_cmd.interval = cpu_to_le32(1); 477 time_cmd.interval = cpu_to_le32(1);
469 478
470 /* 479 /*
471 * IWL_MVM_ROC_TE_TYPE can have lower priority than other events 480 * The P2P Device TEs can have lower priority than other events
472 * that are being scheduled by the driver/fw, and thus it might not be 481 * that are being scheduled by the driver/fw, and thus it might not be
473 * scheduled. To improve the chances of it being scheduled, allow it to 482 * scheduled. To improve the chances of it being scheduled, allow them
474 * be fragmented. 483 * to be fragmented, and in addition allow them to be delayed.
475 * In addition, for the same reasons, allow to delay the scheduling of
476 * the time event.
477 */ 484 */
478 time_cmd.max_frags = cpu_to_le32(MSEC_TO_TU(duration)/20); 485 time_cmd.max_frags = cpu_to_le32(MSEC_TO_TU(duration)/20);
479 time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2)); 486 time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
480 time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration)); 487 time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
481 time_cmd.repeat = cpu_to_le32(1); 488 time_cmd.repeat = cpu_to_le32(1);
482 time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_START | TE_NOTIF_HOST_END); 489 time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START |
490 TE_NOTIF_HOST_EVENT_END);
483 491
484 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 492 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
485} 493}
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index 64fb57a5ab43..f86c51065ed3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -162,6 +162,7 @@ int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
162 * that the vif type is NL80211_IFTYPE_P2P_DEVICE 162 * that the vif type is NL80211_IFTYPE_P2P_DEVICE
163 * @duration: the requested duration in millisecond for the fw to be on the 163 * @duration: the requested duration in millisecond for the fw to be on the
164 * channel that is bound to the vif. 164 * channel that is bound to the vif.
165 * @type: the remain on channel request type
165 * 166 *
166 * This function can be used to issue a remain on channel session, 167 * This function can be used to issue a remain on channel session,
167 * which means that the fw will stay in the channel for the request %duration 168 * which means that the fw will stay in the channel for the request %duration
@@ -172,7 +173,7 @@ int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
172 * another notification to the driver. 173 * another notification to the driver.
173 */ 174 */
174int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 175int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
175 int duration); 176 int duration, enum ieee80211_roc_type type);
176 177
177/** 178/**
178 * iwl_mvm_stop_p2p_roc - stop remain on channel for p2p device functionlity 179 * iwl_mvm_stop_p2p_roc - stop remain on channel for p2p device functionlity
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 6645efe5c03e..479074303bd7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -205,7 +205,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
205 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); 205 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
206 206
207 mvm->mgmt_last_antenna_idx = 207 mvm->mgmt_last_antenna_idx =
208 iwl_mvm_next_antenna(mvm, mvm->nvm_data->valid_tx_ant, 208 iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw),
209 mvm->mgmt_last_antenna_idx); 209 mvm->mgmt_last_antenna_idx);
210 rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; 210 rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
211 211
@@ -365,7 +365,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
365 if (WARN_ON_ONCE(!mvmsta)) 365 if (WARN_ON_ONCE(!mvmsta))
366 return -1; 366 return -1;
367 367
368 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_INVALID_STATION)) 368 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
369 return -1; 369 return -1;
370 370
371 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id); 371 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id);
@@ -417,7 +417,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
417 spin_unlock(&mvmsta->lock); 417 spin_unlock(&mvmsta->lock);
418 418
419 if (mvmsta->vif->type == NL80211_IFTYPE_AP && 419 if (mvmsta->vif->type == NL80211_IFTYPE_AP &&
420 txq_id < IWL_FIRST_AMPDU_QUEUE) 420 txq_id < IWL_MVM_FIRST_AGG_QUEUE)
421 atomic_inc(&mvmsta->pending_frames); 421 atomic_inc(&mvmsta->pending_frames);
422 422
423 return 0; 423 return 0;
@@ -606,7 +606,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
606 info); 606 info);
607 607
608 /* Single frame failure in an AMPDU queue => send BAR */ 608 /* Single frame failure in an AMPDU queue => send BAR */
609 if (txq_id >= IWL_FIRST_AMPDU_QUEUE && 609 if (txq_id >= IWL_MVM_FIRST_AGG_QUEUE &&
610 !(info->flags & IEEE80211_TX_STAT_ACK)) 610 !(info->flags & IEEE80211_TX_STAT_ACK))
611 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 611 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
612 612
@@ -619,7 +619,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
619 ieee80211_tx_status_ni(mvm->hw, skb); 619 ieee80211_tx_status_ni(mvm->hw, skb);
620 } 620 }
621 621
622 if (txq_id >= IWL_FIRST_AMPDU_QUEUE) { 622 if (txq_id >= IWL_MVM_FIRST_AGG_QUEUE) {
623 /* If this is an aggregation queue, we use the ssn since: 623 /* If this is an aggregation queue, we use the ssn since:
624 * ssn = wifi seq_num % 256. 624 * ssn = wifi seq_num % 256.
625 * The seq_ctl is the sequence control of the packet to which 625 * The seq_ctl is the sequence control of the packet to which
@@ -637,14 +637,16 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
637 next_reclaimed = ssn; 637 next_reclaimed = ssn;
638 } else { 638 } else {
639 /* The next packet to be reclaimed is the one after this one */ 639 /* The next packet to be reclaimed is the one after this one */
640 next_reclaimed = SEQ_TO_SN(seq_ctl + 0x10); 640 next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10);
641 } 641 }
642 642
643 IWL_DEBUG_TX_REPLY(mvm, 643 IWL_DEBUG_TX_REPLY(mvm,
644 "TXQ %d status %s (0x%08x)\n\t\t\t\tinitial_rate 0x%x " 644 "TXQ %d status %s (0x%08x)\n",
645 "retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n", 645 txq_id, iwl_mvm_get_tx_fail_reason(status), status);
646 txq_id, iwl_mvm_get_tx_fail_reason(status), 646
647 status, le32_to_cpu(tx_resp->initial_rate), 647 IWL_DEBUG_TX_REPLY(mvm,
648 "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
649 le32_to_cpu(tx_resp->initial_rate),
648 tx_resp->failure_frame, SEQ_TO_INDEX(sequence), 650 tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
649 ssn, next_reclaimed, seq_ctl); 651 ssn, next_reclaimed, seq_ctl);
650 652
@@ -681,7 +683,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
681 * If there are no pending frames for this STA, notify mac80211 that 683 * If there are no pending frames for this STA, notify mac80211 that
682 * this station can go to sleep in its STA table. 684 * this station can go to sleep in its STA table.
683 */ 685 */
684 if (txq_id < IWL_FIRST_AMPDU_QUEUE && mvmsta && 686 if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && mvmsta &&
685 !WARN_ON(skb_freed > 1) && 687 !WARN_ON(skb_freed > 1) &&
686 mvmsta->vif->type == NL80211_IFTYPE_AP && 688 mvmsta->vif->type == NL80211_IFTYPE_AP &&
687 atomic_sub_and_test(skb_freed, &mvmsta->pending_frames)) { 689 atomic_sub_and_test(skb_freed, &mvmsta->pending_frames)) {
@@ -750,7 +752,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
750 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 752 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
751 struct ieee80211_sta *sta; 753 struct ieee80211_sta *sta;
752 754
753 if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < IWL_FIRST_AMPDU_QUEUE)) 755 if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < IWL_MVM_FIRST_AGG_QUEUE))
754 return; 756 return;
755 757
756 if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS)) 758 if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 000e842c2edd..687b34e387ac 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -253,8 +253,9 @@ int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
253u8 first_antenna(u8 mask) 253u8 first_antenna(u8 mask)
254{ 254{
255 BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */ 255 BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
256 WARN_ON_ONCE(!mask); /* ffs will return 0 if mask is zeroed */ 256 if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
257 return (u8)(BIT(ffs(mask))); 257 return BIT(0);
258 return BIT(ffs(mask) - 1);
258} 259}
259 260
260/* 261/*
@@ -462,7 +463,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
462 .data = { lq, }, 463 .data = { lq, },
463 }; 464 };
464 465
465 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) 466 if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT))
466 return -EINVAL; 467 return -EINVAL;
467 468
468 if (WARN_ON(init && (cmd.flags & CMD_ASYNC))) 469 if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
diff --git a/drivers/net/wireless/iwlwifi/pcie/cfg.h b/drivers/net/wireless/iwlwifi/pcie/cfg.h
deleted file mode 100644
index c6f8e83c3551..000000000000
--- a/drivers/net/wireless/iwlwifi/pcie/cfg.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_pci_h__
64#define __iwl_pci_h__
65
66
67/*
68 * This file declares the config structures for all devices.
69 */
70
71extern const struct iwl_cfg iwl5300_agn_cfg;
72extern const struct iwl_cfg iwl5100_agn_cfg;
73extern const struct iwl_cfg iwl5350_agn_cfg;
74extern const struct iwl_cfg iwl5100_bgn_cfg;
75extern const struct iwl_cfg iwl5100_abg_cfg;
76extern const struct iwl_cfg iwl5150_agn_cfg;
77extern const struct iwl_cfg iwl5150_abg_cfg;
78extern const struct iwl_cfg iwl6005_2agn_cfg;
79extern const struct iwl_cfg iwl6005_2abg_cfg;
80extern const struct iwl_cfg iwl6005_2bg_cfg;
81extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
82extern const struct iwl_cfg iwl6005_2agn_d_cfg;
83extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
84extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
85extern const struct iwl_cfg iwl1030_bgn_cfg;
86extern const struct iwl_cfg iwl1030_bg_cfg;
87extern const struct iwl_cfg iwl6030_2agn_cfg;
88extern const struct iwl_cfg iwl6030_2abg_cfg;
89extern const struct iwl_cfg iwl6030_2bgn_cfg;
90extern const struct iwl_cfg iwl6030_2bg_cfg;
91extern const struct iwl_cfg iwl6000i_2agn_cfg;
92extern const struct iwl_cfg iwl6000i_2abg_cfg;
93extern const struct iwl_cfg iwl6000i_2bg_cfg;
94extern const struct iwl_cfg iwl6000_3agn_cfg;
95extern const struct iwl_cfg iwl6050_2agn_cfg;
96extern const struct iwl_cfg iwl6050_2abg_cfg;
97extern const struct iwl_cfg iwl6150_bgn_cfg;
98extern const struct iwl_cfg iwl6150_bg_cfg;
99extern const struct iwl_cfg iwl1000_bgn_cfg;
100extern const struct iwl_cfg iwl1000_bg_cfg;
101extern const struct iwl_cfg iwl100_bgn_cfg;
102extern const struct iwl_cfg iwl100_bg_cfg;
103extern const struct iwl_cfg iwl130_bgn_cfg;
104extern const struct iwl_cfg iwl130_bg_cfg;
105extern const struct iwl_cfg iwl2000_2bgn_cfg;
106extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
107extern const struct iwl_cfg iwl2030_2bgn_cfg;
108extern const struct iwl_cfg iwl6035_2agn_cfg;
109extern const struct iwl_cfg iwl105_bgn_cfg;
110extern const struct iwl_cfg iwl105_bgn_d_cfg;
111extern const struct iwl_cfg iwl135_bgn_cfg;
112extern const struct iwl_cfg iwl7260_2ac_cfg;
113extern const struct iwl_cfg iwl3160_ac_cfg;
114
115#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 7bc0fb9128dd..8cb53ec2b77b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -69,8 +69,6 @@
69 69
70#include "iwl-trans.h" 70#include "iwl-trans.h"
71#include "iwl-drv.h" 71#include "iwl-drv.h"
72
73#include "cfg.h"
74#include "internal.h" 72#include "internal.h"
75 73
76#define IWL_PCI_DEVICE(dev, subdev, cfg) \ 74#define IWL_PCI_DEVICE(dev, subdev, cfg) \
@@ -243,6 +241,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
243 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, 241 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
244 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, 242 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
245 {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)}, 243 {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
244 {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
246 245
247/* 105 Series */ 246/* 105 Series */
248 {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)}, 247 {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
@@ -257,6 +256,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
257 256
258/* 7000 Series */ 257/* 7000 Series */
259 {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, 258 {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
259 {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_2ac_cfg)},
260 {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, 260 {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
261 {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_ac_cfg)}, 261 {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_ac_cfg)},
262 {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_ac_cfg)}, 262 {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 12c4f31ca8fb..50ba0a468f94 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -728,7 +728,8 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
728 728
729static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) 729static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
730{ 730{
731 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); 731 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
732 ((reg & 0x000FFFFF) | (3 << 24)));
732 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); 733 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
733} 734}
734 735
@@ -736,7 +737,7 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
736 u32 val) 737 u32 val)
737{ 738{
738 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, 739 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
739 ((addr & 0x0000FFFF) | (3 << 24))); 740 ((addr & 0x000FFFFF) | (3 << 24)));
740 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); 741 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
741} 742}
742 743
@@ -1383,28 +1384,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1383 return ret; 1384 return ret;
1384} 1385}
1385 1386
1386static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
1387 const char __user *user_buf,
1388 size_t count, loff_t *ppos)
1389{
1390 struct iwl_trans *trans = file->private_data;
1391
1392 if (!trans->op_mode)
1393 return -EAGAIN;
1394
1395 local_bh_disable();
1396 iwl_op_mode_nic_error(trans->op_mode);
1397 local_bh_enable();
1398
1399 return count;
1400}
1401
1402DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 1387DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
1403DEBUGFS_READ_FILE_OPS(fh_reg); 1388DEBUGFS_READ_FILE_OPS(fh_reg);
1404DEBUGFS_READ_FILE_OPS(rx_queue); 1389DEBUGFS_READ_FILE_OPS(rx_queue);
1405DEBUGFS_READ_FILE_OPS(tx_queue); 1390DEBUGFS_READ_FILE_OPS(tx_queue);
1406DEBUGFS_WRITE_FILE_OPS(csr); 1391DEBUGFS_WRITE_FILE_OPS(csr);
1407DEBUGFS_WRITE_FILE_OPS(fw_restart);
1408 1392
1409/* 1393/*
1410 * Create the debugfs files and directories 1394 * Create the debugfs files and directories
@@ -1418,7 +1402,6 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1418 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); 1402 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
1419 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); 1403 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
1420 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); 1404 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
1421 DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
1422 return 0; 1405 return 0;
1423 1406
1424err: 1407err:
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index cb5c6792e3a8..c5e30294c5ac 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -501,10 +501,8 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
501 * shared with device */ 501 * shared with device */
502 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 502 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
503 &txq->q.dma_addr, GFP_KERNEL); 503 &txq->q.dma_addr, GFP_KERNEL);
504 if (!txq->tfds) { 504 if (!txq->tfds)
505 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
506 goto error; 505 goto error;
507 }
508 506
509 BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs)); 507 BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
510 BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) != 508 BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
@@ -1063,7 +1061,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
1063 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); 1061 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
1064 1062
1065 /* If this queue is mapped to a certain station: it is an AGG queue */ 1063 /* If this queue is mapped to a certain station: it is an AGG queue */
1066 if (sta_id != IWL_INVALID_STATION) { 1064 if (sta_id >= 0) {
1067 u16 ra_tid = BUILD_RAxTID(sta_id, tid); 1065 u16 ra_tid = BUILD_RAxTID(sta_id, tid);
1068 1066
1069 /* Map receiver-address / traffic-ID to this queue */ 1067 /* Map receiver-address / traffic-ID to this queue */
@@ -1566,8 +1564,11 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1566 if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) 1564 if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
1567 return -EIO; 1565 return -EIO;
1568 1566
1569 if (test_bit(STATUS_RFKILL, &trans_pcie->status)) 1567 if (test_bit(STATUS_RFKILL, &trans_pcie->status)) {
1568 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1569 cmd->id);
1570 return -ERFKILL; 1570 return -ERFKILL;
1571 }
1571 1572
1572 if (cmd->flags & CMD_ASYNC) 1573 if (cmd->flags & CMD_ASYNC)
1573 return iwl_pcie_send_hcmd_async(trans, cmd); 1574 return iwl_pcie_send_hcmd_async(trans, cmd);
@@ -1609,7 +1610,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1609 * Check here that the packets are in the right place on the ring. 1610 * Check here that the packets are in the right place on the ring.
1610 */ 1611 */
1611#ifdef CONFIG_IWLWIFI_DEBUG 1612#ifdef CONFIG_IWLWIFI_DEBUG
1612 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1613 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1613 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) && 1614 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
1614 ((wifi_seq & 0xff) != q->write_ptr), 1615 ((wifi_seq & 0xff) != q->write_ptr),
1615 "Q: %d WiFi Seq %d tfdNum %d", 1616 "Q: %d WiFi Seq %d tfdNum %d",
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 7001856241e6..088de9d25c39 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -412,9 +412,9 @@ static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed)
412 struct ieee80211_conf *conf = &hw->conf; 412 struct ieee80211_conf *conf = &hw->conf;
413 lbtf_deb_enter(LBTF_DEB_MACOPS); 413 lbtf_deb_enter(LBTF_DEB_MACOPS);
414 414
415 if (conf->channel->center_freq != priv->cur_freq) { 415 if (conf->chandef.chan->center_freq != priv->cur_freq) {
416 priv->cur_freq = conf->channel->center_freq; 416 priv->cur_freq = conf->chandef.chan->center_freq;
417 lbtf_set_channel(priv, conf->channel->hw_value); 417 lbtf_set_channel(priv, conf->chandef.chan->hw_value);
418 } 418 }
419 lbtf_deb_leave(LBTF_DEB_MACOPS); 419 lbtf_deb_leave(LBTF_DEB_MACOPS);
420 return 0; 420 return 0;
@@ -537,7 +537,7 @@ static int lbtf_op_get_survey(struct ieee80211_hw *hw, int idx,
537 if (idx != 0) 537 if (idx != 0)
538 return -ENOENT; 538 return -ENOENT;
539 539
540 survey->channel = conf->channel; 540 survey->channel = conf->chandef.chan;
541 survey->filled = SURVEY_INFO_NOISE_DBM; 541 survey->filled = SURVEY_INFO_NOISE_DBM;
542 survey->noise = priv->noise; 542 survey->noise = priv->noise;
543 543
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index cffdf4fbf161..b878a32e7a98 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -25,6 +25,7 @@
25#include <linux/if_arp.h> 25#include <linux/if_arp.h>
26#include <linux/rtnetlink.h> 26#include <linux/rtnetlink.h>
27#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
28#include <linux/platform_device.h>
28#include <linux/debugfs.h> 29#include <linux/debugfs.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/ktime.h> 31#include <linux/ktime.h>
@@ -52,6 +53,10 @@ static bool paged_rx = false;
52module_param(paged_rx, bool, 0644); 53module_param(paged_rx, bool, 0644);
53MODULE_PARM_DESC(paged_rx, "Use paged SKBs for RX instead of linear ones"); 54MODULE_PARM_DESC(paged_rx, "Use paged SKBs for RX instead of linear ones");
54 55
56static bool rctbl = false;
57module_param(rctbl, bool, 0444);
58MODULE_PARM_DESC(rctbl, "Handle rate control table");
59
55/** 60/**
56 * enum hwsim_regtest - the type of regulatory tests we offer 61 * enum hwsim_regtest - the type of regulatory tests we offer
57 * 62 *
@@ -717,9 +722,17 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
717 rx_status.flag |= RX_FLAG_MACTIME_START; 722 rx_status.flag |= RX_FLAG_MACTIME_START;
718 rx_status.freq = chan->center_freq; 723 rx_status.freq = chan->center_freq;
719 rx_status.band = chan->band; 724 rx_status.band = chan->band;
720 rx_status.rate_idx = info->control.rates[0].idx; 725 if (info->control.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) {
721 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) 726 rx_status.rate_idx =
722 rx_status.flag |= RX_FLAG_HT; 727 ieee80211_rate_get_vht_mcs(&info->control.rates[0]);
728 rx_status.vht_nss =
729 ieee80211_rate_get_vht_nss(&info->control.rates[0]);
730 rx_status.flag |= RX_FLAG_VHT;
731 } else {
732 rx_status.rate_idx = info->control.rates[0].idx;
733 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
734 rx_status.flag |= RX_FLAG_HT;
735 }
723 if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 736 if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
724 rx_status.flag |= RX_FLAG_40MHZ; 737 rx_status.flag |= RX_FLAG_40MHZ;
725 if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) 738 if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
@@ -886,8 +899,12 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
886 if (control->sta) 899 if (control->sta)
887 hwsim_check_sta_magic(control->sta); 900 hwsim_check_sta_magic(control->sta);
888 901
889 txi->rate_driver_data[0] = channel; 902 if (rctbl)
903 ieee80211_get_tx_rates(txi->control.vif, control->sta, skb,
904 txi->control.rates,
905 ARRAY_SIZE(txi->control.rates));
890 906
907 txi->rate_driver_data[0] = channel;
891 mac80211_hwsim_monitor_rx(hw, skb, channel); 908 mac80211_hwsim_monitor_rx(hw, skb, channel);
892 909
893 /* wmediumd mode check */ 910 /* wmediumd mode check */
@@ -964,6 +981,12 @@ static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw,
964 newtype, vif->addr); 981 newtype, vif->addr);
965 hwsim_check_magic(vif); 982 hwsim_check_magic(vif);
966 983
984 /*
985 * interface may change from non-AP to AP in
986 * which case this needs to be set up again
987 */
988 vif->cab_queue = 0;
989
967 return 0; 990 return 0;
968} 991}
969 992
@@ -983,6 +1006,13 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
983{ 1006{
984 u32 _pid = ACCESS_ONCE(wmediumd_portid); 1007 u32 _pid = ACCESS_ONCE(wmediumd_portid);
985 1008
1009 if (rctbl) {
1010 struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
1011 ieee80211_get_tx_rates(txi->control.vif, NULL, skb,
1012 txi->control.rates,
1013 ARRAY_SIZE(txi->control.rates));
1014 }
1015
986 mac80211_hwsim_monitor_rx(hw, skb, chan); 1016 mac80211_hwsim_monitor_rx(hw, skb, chan);
987 1017
988 if (_pid) 1018 if (_pid)
@@ -1013,6 +1043,11 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
1013 if (skb == NULL) 1043 if (skb == NULL)
1014 return; 1044 return;
1015 info = IEEE80211_SKB_CB(skb); 1045 info = IEEE80211_SKB_CB(skb);
1046 if (rctbl)
1047 ieee80211_get_tx_rates(vif, NULL, skb,
1048 info->control.rates,
1049 ARRAY_SIZE(info->control.rates));
1050
1016 txrate = ieee80211_get_tx_rate(hw, info); 1051 txrate = ieee80211_get_tx_rate(hw, info);
1017 1052
1018 mgmt = (struct ieee80211_mgmt *) skb->data; 1053 mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -1056,11 +1091,13 @@ out:
1056 return HRTIMER_NORESTART; 1091 return HRTIMER_NORESTART;
1057} 1092}
1058 1093
1059static const char *hwsim_chantypes[] = { 1094static const char * const hwsim_chanwidths[] = {
1060 [NL80211_CHAN_NO_HT] = "noht", 1095 [NL80211_CHAN_WIDTH_20_NOHT] = "noht",
1061 [NL80211_CHAN_HT20] = "ht20", 1096 [NL80211_CHAN_WIDTH_20] = "ht20",
1062 [NL80211_CHAN_HT40MINUS] = "ht40-", 1097 [NL80211_CHAN_WIDTH_40] = "ht40",
1063 [NL80211_CHAN_HT40PLUS] = "ht40+", 1098 [NL80211_CHAN_WIDTH_80] = "vht80",
1099 [NL80211_CHAN_WIDTH_80P80] = "vht80p80",
1100 [NL80211_CHAN_WIDTH_160] = "vht160",
1064}; 1101};
1065 1102
1066static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed) 1103static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
@@ -1074,18 +1111,28 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
1074 [IEEE80211_SMPS_DYNAMIC] = "dynamic", 1111 [IEEE80211_SMPS_DYNAMIC] = "dynamic",
1075 }; 1112 };
1076 1113
1077 wiphy_debug(hw->wiphy, 1114 if (conf->chandef.chan)
1078 "%s (freq=%d/%s idle=%d ps=%d smps=%s)\n", 1115 wiphy_debug(hw->wiphy,
1079 __func__, 1116 "%s (freq=%d(%d - %d)/%s idle=%d ps=%d smps=%s)\n",
1080 conf->channel ? conf->channel->center_freq : 0, 1117 __func__,
1081 hwsim_chantypes[conf->channel_type], 1118 conf->chandef.chan->center_freq,
1082 !!(conf->flags & IEEE80211_CONF_IDLE), 1119 conf->chandef.center_freq1,
1083 !!(conf->flags & IEEE80211_CONF_PS), 1120 conf->chandef.center_freq2,
1084 smps_modes[conf->smps_mode]); 1121 hwsim_chanwidths[conf->chandef.width],
1122 !!(conf->flags & IEEE80211_CONF_IDLE),
1123 !!(conf->flags & IEEE80211_CONF_PS),
1124 smps_modes[conf->smps_mode]);
1125 else
1126 wiphy_debug(hw->wiphy,
1127 "%s (freq=0 idle=%d ps=%d smps=%s)\n",
1128 __func__,
1129 !!(conf->flags & IEEE80211_CONF_IDLE),
1130 !!(conf->flags & IEEE80211_CONF_PS),
1131 smps_modes[conf->smps_mode]);
1085 1132
1086 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); 1133 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1087 1134
1088 data->channel = conf->channel; 1135 data->channel = conf->chandef.chan;
1089 1136
1090 WARN_ON(data->channel && channels > 1); 1137 WARN_ON(data->channel && channels > 1);
1091 1138
@@ -1271,7 +1318,7 @@ static int mac80211_hwsim_get_survey(
1271 return -ENOENT; 1318 return -ENOENT;
1272 1319
1273 /* Current channel */ 1320 /* Current channel */
1274 survey->channel = conf->channel; 1321 survey->channel = conf->chandef.chan;
1275 1322
1276 /* 1323 /*
1277 * Magically conjured noise level --- this is only ok for simulated hardware. 1324 * Magically conjured noise level --- this is only ok for simulated hardware.
@@ -1389,7 +1436,7 @@ static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
1389 return 0; 1436 return 0;
1390} 1437}
1391 1438
1392static void mac80211_hwsim_flush(struct ieee80211_hw *hw, bool drop) 1439static void mac80211_hwsim_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1393{ 1440{
1394 /* Not implemented, queues only on kernel side */ 1441 /* Not implemented, queues only on kernel side */
1395} 1442}
@@ -1535,7 +1582,8 @@ static void hw_roc_done(struct work_struct *work)
1535static int mac80211_hwsim_roc(struct ieee80211_hw *hw, 1582static int mac80211_hwsim_roc(struct ieee80211_hw *hw,
1536 struct ieee80211_vif *vif, 1583 struct ieee80211_vif *vif,
1537 struct ieee80211_channel *chan, 1584 struct ieee80211_channel *chan,
1538 int duration) 1585 int duration,
1586 enum ieee80211_roc_type type)
1539{ 1587{
1540 struct mac80211_hwsim_data *hwsim = hw->priv; 1588 struct mac80211_hwsim_data *hwsim = hw->priv;
1541 1589
@@ -1668,6 +1716,7 @@ static void mac80211_hwsim_free(void)
1668 debugfs_remove(data->debugfs_ps); 1716 debugfs_remove(data->debugfs_ps);
1669 debugfs_remove(data->debugfs); 1717 debugfs_remove(data->debugfs);
1670 ieee80211_unregister_hw(data->hw); 1718 ieee80211_unregister_hw(data->hw);
1719 device_release_driver(data->dev);
1671 device_unregister(data->dev); 1720 device_unregister(data->dev);
1672 ieee80211_free_hw(data->hw); 1721 ieee80211_free_hw(data->hw);
1673 } 1722 }
@@ -1676,7 +1725,9 @@ static void mac80211_hwsim_free(void)
1676 1725
1677 1726
1678static struct device_driver mac80211_hwsim_driver = { 1727static struct device_driver mac80211_hwsim_driver = {
1679 .name = "mac80211_hwsim" 1728 .name = "mac80211_hwsim",
1729 .bus = &platform_bus_type,
1730 .owner = THIS_MODULE,
1680}; 1731};
1681 1732
1682static const struct net_device_ops hwsim_netdev_ops = { 1733static const struct net_device_ops hwsim_netdev_ops = {
@@ -2168,9 +2219,15 @@ static int __init init_mac80211_hwsim(void)
2168 spin_lock_init(&hwsim_radio_lock); 2219 spin_lock_init(&hwsim_radio_lock);
2169 INIT_LIST_HEAD(&hwsim_radios); 2220 INIT_LIST_HEAD(&hwsim_radios);
2170 2221
2222 err = driver_register(&mac80211_hwsim_driver);
2223 if (err)
2224 return err;
2225
2171 hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); 2226 hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
2172 if (IS_ERR(hwsim_class)) 2227 if (IS_ERR(hwsim_class)) {
2173 return PTR_ERR(hwsim_class); 2228 err = PTR_ERR(hwsim_class);
2229 goto failed_unregister_driver;
2230 }
2174 2231
2175 memset(addr, 0, ETH_ALEN); 2232 memset(addr, 0, ETH_ALEN);
2176 addr[0] = 0x02; 2233 addr[0] = 0x02;
@@ -2192,12 +2249,20 @@ static int __init init_mac80211_hwsim(void)
2192 "hwsim%d", i); 2249 "hwsim%d", i);
2193 if (IS_ERR(data->dev)) { 2250 if (IS_ERR(data->dev)) {
2194 printk(KERN_DEBUG 2251 printk(KERN_DEBUG
2195 "mac80211_hwsim: device_create " 2252 "mac80211_hwsim: device_create failed (%ld)\n",
2196 "failed (%ld)\n", PTR_ERR(data->dev)); 2253 PTR_ERR(data->dev));
2197 err = -ENOMEM; 2254 err = -ENOMEM;
2198 goto failed_drvdata; 2255 goto failed_drvdata;
2199 } 2256 }
2200 data->dev->driver = &mac80211_hwsim_driver; 2257 data->dev->driver = &mac80211_hwsim_driver;
2258 err = device_bind_driver(data->dev);
2259 if (err != 0) {
2260 printk(KERN_DEBUG
2261 "mac80211_hwsim: device_bind_driver failed (%d)\n",
2262 err);
2263 goto failed_hw;
2264 }
2265
2201 skb_queue_head_init(&data->pending); 2266 skb_queue_head_init(&data->pending);
2202 2267
2203 SET_IEEE80211_DEV(hw, data->dev); 2268 SET_IEEE80211_DEV(hw, data->dev);
@@ -2240,6 +2305,8 @@ static int __init init_mac80211_hwsim(void)
2240 IEEE80211_HW_AMPDU_AGGREGATION | 2305 IEEE80211_HW_AMPDU_AGGREGATION |
2241 IEEE80211_HW_WANT_MONITOR_VIF | 2306 IEEE80211_HW_WANT_MONITOR_VIF |
2242 IEEE80211_HW_QUEUE_CONTROL; 2307 IEEE80211_HW_QUEUE_CONTROL;
2308 if (rctbl)
2309 hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
2243 2310
2244 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | 2311 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
2245 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 2312 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -2291,9 +2358,6 @@ static int __init init_mac80211_hwsim(void)
2291 2358
2292 hw->wiphy->bands[band] = sband; 2359 hw->wiphy->bands[band] = sband;
2293 2360
2294 if (channels == 1)
2295 continue;
2296
2297 sband->vht_cap.vht_supported = true; 2361 sband->vht_cap.vht_supported = true;
2298 sband->vht_cap.cap = 2362 sband->vht_cap.cap =
2299 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | 2363 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
@@ -2499,6 +2563,8 @@ failed_drvdata:
2499 ieee80211_free_hw(hw); 2563 ieee80211_free_hw(hw);
2500failed: 2564failed:
2501 mac80211_hwsim_free(); 2565 mac80211_hwsim_free();
2566failed_unregister_driver:
2567 driver_unregister(&mac80211_hwsim_driver);
2502 return err; 2568 return err;
2503} 2569}
2504module_init(init_mac80211_hwsim); 2570module_init(init_mac80211_hwsim);
@@ -2511,5 +2577,6 @@ static void __exit exit_mac80211_hwsim(void)
2511 2577
2512 mac80211_hwsim_free(); 2578 mac80211_hwsim_free();
2513 unregister_netdev(hwsim_mon); 2579 unregister_netdev(hwsim_mon);
2580 driver_unregister(&mac80211_hwsim_driver);
2514} 2581}
2515module_exit(exit_mac80211_hwsim); 2582module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index cf43b3c29250..5e0eec4d71c7 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -200,7 +200,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
200 200
201 /* VHT Operation IE */ 201 /* VHT Operation IE */
202 if (bss_desc->bcn_vht_oper) { 202 if (bss_desc->bcn_vht_oper) {
203 if (priv->bss_mode == HostCmd_BSS_MODE_IBSS) { 203 if (priv->bss_mode == NL80211_IFTYPE_STATION) {
204 vht_op = (struct mwifiex_ie_types_vht_oper *)*buffer; 204 vht_op = (struct mwifiex_ie_types_vht_oper *)*buffer;
205 memset(vht_op, 0, sizeof(*vht_op)); 205 memset(vht_op, 0, sizeof(*vht_op));
206 vht_op->header.type = 206 vht_op->header.type =
@@ -259,3 +259,44 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
259 259
260 return ret_len; 260 return ret_len;
261} 261}
262
263int mwifiex_cmd_11ac_cfg(struct mwifiex_private *priv,
264 struct host_cmd_ds_command *cmd, u16 cmd_action,
265 struct mwifiex_11ac_vht_cfg *cfg)
266{
267 struct host_cmd_11ac_vht_cfg *vhtcfg = &cmd->params.vht_cfg;
268
269 cmd->command = cpu_to_le16(HostCmd_CMD_11AC_CFG);
270 cmd->size = cpu_to_le16(sizeof(struct host_cmd_11ac_vht_cfg) +
271 S_DS_GEN);
272 vhtcfg->action = cpu_to_le16(cmd_action);
273 vhtcfg->band_config = cfg->band_config;
274 vhtcfg->misc_config = cfg->misc_config;
275 vhtcfg->cap_info = cpu_to_le32(cfg->cap_info);
276 vhtcfg->mcs_tx_set = cpu_to_le32(cfg->mcs_tx_set);
277 vhtcfg->mcs_rx_set = cpu_to_le32(cfg->mcs_rx_set);
278
279 return 0;
280}
281
282/* This function initializes the BlockACK setup information for given
283 * mwifiex_private structure for 11ac enabled networks.
284 */
285void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv)
286{
287 priv->add_ba_param.timeout = MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT;
288
289 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
290 priv->add_ba_param.tx_win_size =
291 MWIFIEX_11AC_UAP_AMPDU_DEF_TXWINSIZE;
292 priv->add_ba_param.rx_win_size =
293 MWIFIEX_11AC_UAP_AMPDU_DEF_RXWINSIZE;
294 } else {
295 priv->add_ba_param.tx_win_size =
296 MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE;
297 priv->add_ba_param.rx_win_size =
298 MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
299 }
300
301 return;
302}
diff --git a/drivers/net/wireless/mwifiex/11ac.h b/drivers/net/wireless/mwifiex/11ac.h
index 80fd1ba46200..7c2c69b5b3eb 100644
--- a/drivers/net/wireless/mwifiex/11ac.h
+++ b/drivers/net/wireless/mwifiex/11ac.h
@@ -20,7 +20,24 @@
20#ifndef _MWIFIEX_11AC_H_ 20#ifndef _MWIFIEX_11AC_H_
21#define _MWIFIEX_11AC_H_ 21#define _MWIFIEX_11AC_H_
22 22
23#define VHT_CFG_2GHZ BIT(0)
24#define VHT_CFG_5GHZ BIT(1)
25
26enum vht_cfg_misc_config {
27 VHT_CAP_TX_OPERATION = 1,
28 VHT_CAP_ASSOCIATION,
29 VHT_CAP_UAP_ONLY
30};
31
32#define DEFAULT_VHT_MCS_SET 0xfffa
33#define DISABLE_VHT_MCS_SET 0xffff
34
35#define VHT_BW_80_160_80P80 BIT(2)
36
23int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv, 37int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
24 struct mwifiex_bssdescriptor *bss_desc, 38 struct mwifiex_bssdescriptor *bss_desc,
25 u8 **buffer); 39 u8 **buffer);
40int mwifiex_cmd_11ac_cfg(struct mwifiex_private *priv,
41 struct host_cmd_ds_command *cmd, u16 cmd_action,
42 struct mwifiex_11ac_vht_cfg *cfg);
26#endif /* _MWIFIEX_11AC_H_ */ 43#endif /* _MWIFIEX_11AC_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 45f19716687e..41e9d25a2d8e 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -679,3 +679,25 @@ void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra)
679 679
680 return; 680 return;
681} 681}
682
683/* This function initializes the BlockACK setup information for given
684 * mwifiex_private structure.
685 */
686void mwifiex_set_ba_params(struct mwifiex_private *priv)
687{
688 priv->add_ba_param.timeout = MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT;
689
690 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
691 priv->add_ba_param.tx_win_size =
692 MWIFIEX_UAP_AMPDU_DEF_TXWINSIZE;
693 priv->add_ba_param.rx_win_size =
694 MWIFIEX_UAP_AMPDU_DEF_RXWINSIZE;
695 } else {
696 priv->add_ba_param.tx_win_size =
697 MWIFIEX_STA_AMPDU_DEF_TXWINSIZE;
698 priv->add_ba_param.rx_win_size =
699 MWIFIEX_STA_AMPDU_DEF_RXWINSIZE;
700 }
701
702 return;
703}
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index af8fe6352eed..a78e0651409c 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -296,19 +296,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
296 break; 296 break;
297 } 297 }
298 if (ret != -EBUSY) { 298 if (ret != -EBUSY) {
299 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); 299 mwifiex_rotate_priolists(priv, pra_list, ptrindex);
300 if (mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
301 priv->wmm.packets_out[ptrindex]++;
302 priv->wmm.tid_tbl_ptr[ptrindex].ra_list_curr = pra_list;
303 }
304 /* Now bss_prio_cur pointer points to next node */
305 adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
306 list_first_entry(
307 &adapter->bss_prio_tbl[priv->bss_priority]
308 .bss_prio_cur->list,
309 struct mwifiex_bss_prio_node, list);
310 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
311 ra_list_flags);
312 } 300 }
313 301
314 return 0; 302 return 0;
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 5e796f847088..ada809f576fe 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -447,7 +447,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
447 end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1); 447 end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
448 del_timer(&tbl->timer_context.timer); 448 del_timer(&tbl->timer_context.timer);
449 mod_timer(&tbl->timer_context.timer, 449 mod_timer(&tbl->timer_context.timer,
450 jiffies + (MIN_FLUSH_TIMER_MS * win_size * HZ) / 1000); 450 jiffies + msecs_to_jiffies(MIN_FLUSH_TIMER_MS * win_size));
451 451
452 /* 452 /*
453 * If seq_num is less then starting win then ignore and drop the 453 * If seq_num is less then starting win then ignore and drop the
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index 97b245cbafd8..ecf28464367f 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -39,6 +39,7 @@ mwifiex-y += sta_tx.o
39mwifiex-y += sta_rx.o 39mwifiex-y += sta_rx.o
40mwifiex-y += uap_txrx.o 40mwifiex-y += uap_txrx.o
41mwifiex-y += cfg80211.o 41mwifiex-y += cfg80211.o
42mwifiex-y += ethtool.o
42mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o 43mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
43obj-$(CONFIG_MWIFIEX) += mwifiex.o 44obj-$(CONFIG_MWIFIEX) += mwifiex.o
44 45
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 8aaf56ade4d9..a0cb0770d319 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1374,6 +1374,18 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
1374 } 1374 }
1375 1375
1376 mwifiex_set_ht_params(priv, bss_cfg, params); 1376 mwifiex_set_ht_params(priv, bss_cfg, params);
1377
1378 if (priv->adapter->is_hw_11ac_capable) {
1379 mwifiex_set_vht_params(priv, bss_cfg, params);
1380 mwifiex_set_vht_width(priv, params->chandef.width,
1381 priv->ap_11ac_enabled);
1382 }
1383
1384 if (priv->ap_11ac_enabled)
1385 mwifiex_set_11ac_ba_params(priv);
1386 else
1387 mwifiex_set_ba_params(priv);
1388
1377 mwifiex_set_wmm_params(priv, bss_cfg, params); 1389 mwifiex_set_wmm_params(priv, bss_cfg, params);
1378 1390
1379 if (params->inactivity_timeout > 0) { 1391 if (params->inactivity_timeout > 0) {
@@ -1654,17 +1666,13 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
1654 struct cfg80211_connect_params *sme) 1666 struct cfg80211_connect_params *sme)
1655{ 1667{
1656 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1668 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1657 int ret = 0; 1669 int ret;
1658
1659 if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
1660 wiphy_err(wiphy, "received infra assoc request "
1661 "when station is in ibss mode\n");
1662 goto done;
1663 }
1664 1670
1665 if (priv->bss_mode == NL80211_IFTYPE_AP) { 1671 if (priv->bss_mode != NL80211_IFTYPE_STATION) {
1666 wiphy_err(wiphy, "skip association request for AP interface\n"); 1672 wiphy_err(wiphy,
1667 goto done; 1673 "%s: reject infra assoc request in non-STA mode\n",
1674 dev->name);
1675 return -EINVAL;
1668 } 1676 }
1669 1677
1670 wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n", 1678 wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
@@ -1672,7 +1680,6 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
1672 1680
1673 ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid, 1681 ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
1674 priv->bss_mode, sme->channel, sme, 0); 1682 priv->bss_mode, sme->channel, sme, 0);
1675done:
1676 if (!ret) { 1683 if (!ret) {
1677 cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0, 1684 cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0,
1678 NULL, 0, WLAN_STATUS_SUCCESS, 1685 NULL, 0, WLAN_STATUS_SUCCESS,
@@ -1933,66 +1940,10 @@ static void mwifiex_setup_vht_caps(struct ieee80211_sta_vht_cap *vht_info,
1933 struct mwifiex_private *priv) 1940 struct mwifiex_private *priv)
1934{ 1941{
1935 struct mwifiex_adapter *adapter = priv->adapter; 1942 struct mwifiex_adapter *adapter = priv->adapter;
1936 u32 vht_cap = 0, cap = adapter->hw_dot_11ac_dev_cap;
1937 1943
1938 vht_info->vht_supported = true; 1944 vht_info->vht_supported = true;
1939 1945
1940 switch (GET_VHTCAP_MAXMPDULEN(cap)) { 1946 vht_info->cap = adapter->hw_dot_11ac_dev_cap;
1941 case 0x00:
1942 vht_cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
1943 break;
1944 case 0x01:
1945 vht_cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
1946 break;
1947 case 0x10:
1948 vht_cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
1949 break;
1950 default:
1951 dev_err(adapter->dev, "unsupported MAX MPDU len\n");
1952 break;
1953 }
1954
1955 if (ISSUPP_11ACVHTHTCVHT(cap))
1956 vht_cap |= IEEE80211_VHT_CAP_HTC_VHT;
1957
1958 if (ISSUPP_11ACVHTTXOPPS(cap))
1959 vht_cap |= IEEE80211_VHT_CAP_VHT_TXOP_PS;
1960
1961 if (ISSUPP_11ACMURXBEAMFORMEE(cap))
1962 vht_cap |= IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
1963
1964 if (ISSUPP_11ACMUTXBEAMFORMEE(cap))
1965 vht_cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
1966
1967 if (ISSUPP_11ACSUBEAMFORMER(cap))
1968 vht_cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
1969
1970 if (ISSUPP_11ACSUBEAMFORMEE(cap))
1971 vht_cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
1972
1973 if (ISSUPP_11ACRXSTBC(cap))
1974 vht_cap |= IEEE80211_VHT_CAP_RXSTBC_1;
1975
1976 if (ISSUPP_11ACTXSTBC(cap))
1977 vht_cap |= IEEE80211_VHT_CAP_TXSTBC;
1978
1979 if (ISSUPP_11ACSGI160(cap))
1980 vht_cap |= IEEE80211_VHT_CAP_SHORT_GI_160;
1981
1982 if (ISSUPP_11ACSGI80(cap))
1983 vht_cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
1984
1985 if (ISSUPP_11ACLDPC(cap))
1986 vht_cap |= IEEE80211_VHT_CAP_RXLDPC;
1987
1988 if (ISSUPP_11ACBW8080(cap))
1989 vht_cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
1990
1991 if (ISSUPP_11ACBW160(cap))
1992 vht_cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
1993
1994 vht_info->cap = vht_cap;
1995
1996 /* Update MCS support for VHT */ 1947 /* Update MCS support for VHT */
1997 vht_info->vht_mcs.rx_mcs_map = cpu_to_le16( 1948 vht_info->vht_mcs.rx_mcs_map = cpu_to_le16(
1998 adapter->hw_dot_11ac_mcs_support & 0xFFFF); 1949 adapter->hw_dot_11ac_mcs_support & 0xFFFF);
@@ -2180,10 +2131,9 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2180 2131
2181 /* At start-up, wpa_supplicant tries to change the interface 2132 /* At start-up, wpa_supplicant tries to change the interface
2182 * to NL80211_IFTYPE_STATION if it is not managed mode. 2133 * to NL80211_IFTYPE_STATION if it is not managed mode.
2183 * So, we initialize it to STA mode.
2184 */ 2134 */
2185 wdev->iftype = NL80211_IFTYPE_STATION; 2135 wdev->iftype = NL80211_IFTYPE_P2P_CLIENT;
2186 priv->bss_mode = NL80211_IFTYPE_STATION; 2136 priv->bss_mode = NL80211_IFTYPE_P2P_CLIENT;
2187 2137
2188 /* Setting bss_type to P2P tells firmware that this interface 2138 /* Setting bss_type to P2P tells firmware that this interface
2189 * is receiving P2P peers found during find phase and doing 2139 * is receiving P2P peers found during find phase and doing
@@ -2197,6 +2147,9 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2197 priv->bss_started = 0; 2147 priv->bss_started = 0;
2198 priv->bss_num = 0; 2148 priv->bss_num = 0;
2199 2149
2150 if (mwifiex_cfg80211_init_p2p_client(priv))
2151 return ERR_PTR(-EFAULT);
2152
2200 break; 2153 break;
2201 default: 2154 default:
2202 wiphy_err(wiphy, "type not supported\n"); 2155 wiphy_err(wiphy, "type not supported\n");
@@ -2236,6 +2189,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2236 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 2189 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
2237 dev->watchdog_timeo = MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT; 2190 dev->watchdog_timeo = MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT;
2238 dev->hard_header_len += MWIFIEX_MIN_DATA_HEADER_LEN; 2191 dev->hard_header_len += MWIFIEX_MIN_DATA_HEADER_LEN;
2192 dev->ethtool_ops = &mwifiex_ethtool_ops;
2239 2193
2240 mdev_priv = netdev_priv(dev); 2194 mdev_priv = netdev_priv(dev);
2241 *((unsigned long *) mdev_priv) = (unsigned long) priv; 2195 *((unsigned long *) mdev_priv) = (unsigned long) priv;
@@ -2294,6 +2248,152 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
2294} 2248}
2295EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf); 2249EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf);
2296 2250
2251#ifdef CONFIG_PM
2252static bool
2253mwifiex_is_pattern_supported(struct cfg80211_wowlan_trig_pkt_pattern *pat,
2254 s8 *byte_seq)
2255{
2256 int j, k, valid_byte_cnt = 0;
2257 bool dont_care_byte = false;
2258
2259 for (j = 0; j < DIV_ROUND_UP(pat->pattern_len, 8); j++) {
2260 for (k = 0; k < 8; k++) {
2261 if (pat->mask[j] & 1 << k) {
2262 memcpy(byte_seq + valid_byte_cnt,
2263 &pat->pattern[j * 8 + k], 1);
2264 valid_byte_cnt++;
2265 if (dont_care_byte)
2266 return false;
2267 } else {
2268 if (valid_byte_cnt)
2269 dont_care_byte = true;
2270 }
2271
2272 if (valid_byte_cnt > MAX_BYTESEQ)
2273 return false;
2274 }
2275 }
2276
2277 byte_seq[MAX_BYTESEQ] = valid_byte_cnt;
2278
2279 return true;
2280}
2281
2282static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
2283 struct cfg80211_wowlan *wowlan)
2284{
2285 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
2286 struct mwifiex_ds_mef_cfg mef_cfg;
2287 struct mwifiex_mef_entry *mef_entry;
2288 int i, filt_num = 0, ret;
2289 bool first_pat = true;
2290 u8 byte_seq[MAX_BYTESEQ + 1];
2291 const u8 ipv4_mc_mac[] = {0x33, 0x33};
2292 const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
2293 struct mwifiex_private *priv =
2294 mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
2295
2296 if (!wowlan) {
2297 dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n");
2298 return 0;
2299 }
2300
2301 if (!priv->media_connected) {
2302 dev_warn(adapter->dev,
2303 "Can not configure WOWLAN in disconnected state\n");
2304 return 0;
2305 }
2306
2307 mef_entry = kzalloc(sizeof(*mef_entry), GFP_KERNEL);
2308 if (!mef_entry)
2309 return -ENOMEM;
2310
2311 memset(&mef_cfg, 0, sizeof(mef_cfg));
2312 mef_cfg.num_entries = 1;
2313 mef_cfg.mef_entry = mef_entry;
2314 mef_entry->mode = MEF_MODE_HOST_SLEEP;
2315 mef_entry->action = MEF_ACTION_ALLOW_AND_WAKEUP_HOST;
2316
2317 for (i = 0; i < wowlan->n_patterns; i++) {
2318 memset(byte_seq, 0, sizeof(byte_seq));
2319 if (!mwifiex_is_pattern_supported(&wowlan->patterns[i],
2320 byte_seq)) {
2321 wiphy_err(wiphy, "Pattern not supported\n");
2322 kfree(mef_entry);
2323 return -EOPNOTSUPP;
2324 }
2325
2326 if (!wowlan->patterns[i].pkt_offset) {
2327 if (!(byte_seq[0] & 0x01) &&
2328 (byte_seq[MAX_BYTESEQ] == 1)) {
2329 mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
2330 continue;
2331 } else if (is_broadcast_ether_addr(byte_seq)) {
2332 mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST;
2333 continue;
2334 } else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) &&
2335 (byte_seq[MAX_BYTESEQ] == 2)) ||
2336 (!memcmp(byte_seq, ipv6_mc_mac, 3) &&
2337 (byte_seq[MAX_BYTESEQ] == 3))) {
2338 mef_cfg.criteria |= MWIFIEX_CRITERIA_MULTICAST;
2339 continue;
2340 }
2341 }
2342
2343 mef_entry->filter[filt_num].repeat = 1;
2344 mef_entry->filter[filt_num].offset =
2345 wowlan->patterns[i].pkt_offset;
2346 memcpy(mef_entry->filter[filt_num].byte_seq, byte_seq,
2347 sizeof(byte_seq));
2348 mef_entry->filter[filt_num].filt_type = TYPE_EQ;
2349
2350 if (first_pat)
2351 first_pat = false;
2352 else
2353 mef_entry->filter[filt_num].filt_action = TYPE_AND;
2354
2355 filt_num++;
2356 }
2357
2358 if (wowlan->magic_pkt) {
2359 mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
2360 mef_entry->filter[filt_num].repeat = 16;
2361 memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
2362 ETH_ALEN);
2363 mef_entry->filter[filt_num].byte_seq[MAX_BYTESEQ] = ETH_ALEN;
2364 mef_entry->filter[filt_num].offset = 14;
2365 mef_entry->filter[filt_num].filt_type = TYPE_EQ;
2366 if (filt_num)
2367 mef_entry->filter[filt_num].filt_action = TYPE_OR;
2368 }
2369
2370 if (!mef_cfg.criteria)
2371 mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST |
2372 MWIFIEX_CRITERIA_UNICAST |
2373 MWIFIEX_CRITERIA_MULTICAST;
2374
2375 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MEF_CFG,
2376 HostCmd_ACT_GEN_SET, 0,
2377 &mef_cfg);
2378
2379 kfree(mef_entry);
2380 return ret;
2381}
2382
2383static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
2384{
2385 return 0;
2386}
2387
2388static void mwifiex_cfg80211_set_wakeup(struct wiphy *wiphy,
2389 bool enabled)
2390{
2391 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
2392
2393 device_set_wakeup_enable(adapter->dev, enabled);
2394}
2395#endif
2396
2297/* station cfg80211 operations */ 2397/* station cfg80211 operations */
2298static struct cfg80211_ops mwifiex_cfg80211_ops = { 2398static struct cfg80211_ops mwifiex_cfg80211_ops = {
2299 .add_virtual_intf = mwifiex_add_virtual_intf, 2399 .add_virtual_intf = mwifiex_add_virtual_intf,
@@ -2322,6 +2422,11 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
2322 .change_beacon = mwifiex_cfg80211_change_beacon, 2422 .change_beacon = mwifiex_cfg80211_change_beacon,
2323 .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config, 2423 .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
2324 .set_antenna = mwifiex_cfg80211_set_antenna, 2424 .set_antenna = mwifiex_cfg80211_set_antenna,
2425#ifdef CONFIG_PM
2426 .suspend = mwifiex_cfg80211_suspend,
2427 .resume = mwifiex_cfg80211_resume,
2428 .set_wakeup = mwifiex_cfg80211_set_wakeup,
2429#endif
2325}; 2430};
2326 2431
2327/* 2432/*
@@ -2380,6 +2485,14 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
2380 2485
2381 wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom); 2486 wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom);
2382 2487
2488#ifdef CONFIG_PM
2489 wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT;
2490 wiphy->wowlan.n_patterns = MWIFIEX_MAX_FILTERS;
2491 wiphy->wowlan.pattern_min_len = 1;
2492 wiphy->wowlan.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN;
2493 wiphy->wowlan.max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN;
2494#endif
2495
2383 wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 2496 wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
2384 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 2497 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
2385 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 2498 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index b5c8b962ce12..74db0d24a579 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -153,7 +153,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
153 " or cmd size is 0, not sending\n"); 153 " or cmd size is 0, not sending\n");
154 if (cmd_node->wait_q_enabled) 154 if (cmd_node->wait_q_enabled)
155 adapter->cmd_wait_q.status = -1; 155 adapter->cmd_wait_q.status = -1;
156 mwifiex_insert_cmd_to_free_q(adapter, cmd_node); 156 mwifiex_recycle_cmd_node(adapter, cmd_node);
157 return -1; 157 return -1;
158 } 158 }
159 159
@@ -167,7 +167,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
167 "DNLD_CMD: FW in reset state, ignore cmd %#x\n", 167 "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
168 cmd_code); 168 cmd_code);
169 mwifiex_complete_cmd(adapter, cmd_node); 169 mwifiex_complete_cmd(adapter, cmd_node);
170 mwifiex_insert_cmd_to_free_q(adapter, cmd_node); 170 mwifiex_recycle_cmd_node(adapter, cmd_node);
171 return -1; 171 return -1;
172 } 172 }
173 173
@@ -228,7 +228,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
228 adapter->cmd_sent = false; 228 adapter->cmd_sent = false;
229 if (cmd_node->wait_q_enabled) 229 if (cmd_node->wait_q_enabled)
230 adapter->cmd_wait_q.status = -1; 230 adapter->cmd_wait_q.status = -1;
231 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd); 231 mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
232 232
233 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); 233 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
234 adapter->curr_cmd = NULL; 234 adapter->curr_cmd = NULL;
@@ -250,7 +250,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
250 250
251 /* Setup the timer after transmit command */ 251 /* Setup the timer after transmit command */
252 mod_timer(&adapter->cmd_timer, 252 mod_timer(&adapter->cmd_timer,
253 jiffies + (MWIFIEX_TIMER_10S * HZ) / 1000); 253 jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
254 254
255 return 0; 255 return 0;
256} 256}
@@ -632,6 +632,20 @@ mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
632 spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags); 632 spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
633} 633}
634 634
635/* This function reuses a command node. */
636void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
637 struct cmd_ctrl_node *cmd_node)
638{
639 struct host_cmd_ds_command *host_cmd = (void *)cmd_node->cmd_skb->data;
640
641 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
642
643 atomic_dec(&adapter->cmd_pending);
644 dev_dbg(adapter->dev, "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
645 le16_to_cpu(host_cmd->command),
646 atomic_read(&adapter->cmd_pending));
647}
648
635/* 649/*
636 * This function queues a command to the command pending queue. 650 * This function queues a command to the command pending queue.
637 * 651 *
@@ -673,7 +687,9 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
673 list_add(&cmd_node->list, &adapter->cmd_pending_q); 687 list_add(&cmd_node->list, &adapter->cmd_pending_q);
674 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); 688 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
675 689
676 dev_dbg(adapter->dev, "cmd: QUEUE_CMD: cmd=%#x is queued\n", command); 690 atomic_inc(&adapter->cmd_pending);
691 dev_dbg(adapter->dev, "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n",
692 command, atomic_read(&adapter->cmd_pending));
677} 693}
678 694
679/* 695/*
@@ -783,7 +799,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
783 if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) { 799 if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
784 dev_err(adapter->dev, "CMD_RESP: %#x been canceled\n", 800 dev_err(adapter->dev, "CMD_RESP: %#x been canceled\n",
785 le16_to_cpu(resp->command)); 801 le16_to_cpu(resp->command));
786 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd); 802 mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
787 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); 803 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
788 adapter->curr_cmd = NULL; 804 adapter->curr_cmd = NULL;
789 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); 805 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
@@ -833,7 +849,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
833 if (adapter->curr_cmd->wait_q_enabled) 849 if (adapter->curr_cmd->wait_q_enabled)
834 adapter->cmd_wait_q.status = -1; 850 adapter->cmd_wait_q.status = -1;
835 851
836 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd); 852 mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
837 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); 853 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
838 adapter->curr_cmd = NULL; 854 adapter->curr_cmd = NULL;
839 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); 855 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
@@ -865,8 +881,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
865 if (adapter->curr_cmd->wait_q_enabled) 881 if (adapter->curr_cmd->wait_q_enabled)
866 adapter->cmd_wait_q.status = ret; 882 adapter->cmd_wait_q.status = ret;
867 883
868 /* Clean up and put current command back to cmd_free_q */ 884 mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
869 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
870 885
871 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); 886 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
872 adapter->curr_cmd = NULL; 887 adapter->curr_cmd = NULL;
@@ -993,7 +1008,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
993 mwifiex_complete_cmd(adapter, cmd_node); 1008 mwifiex_complete_cmd(adapter, cmd_node);
994 cmd_node->wait_q_enabled = false; 1009 cmd_node->wait_q_enabled = false;
995 } 1010 }
996 mwifiex_insert_cmd_to_free_q(adapter, cmd_node); 1011 mwifiex_recycle_cmd_node(adapter, cmd_node);
997 spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags); 1012 spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
998 } 1013 }
999 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); 1014 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
@@ -1040,7 +1055,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
1040 cmd_node = adapter->curr_cmd; 1055 cmd_node = adapter->curr_cmd;
1041 cmd_node->wait_q_enabled = false; 1056 cmd_node->wait_q_enabled = false;
1042 cmd_node->cmd_flag |= CMD_F_CANCELED; 1057 cmd_node->cmd_flag |= CMD_F_CANCELED;
1043 mwifiex_insert_cmd_to_free_q(adapter, cmd_node); 1058 mwifiex_recycle_cmd_node(adapter, cmd_node);
1044 mwifiex_complete_cmd(adapter, adapter->curr_cmd); 1059 mwifiex_complete_cmd(adapter, adapter->curr_cmd);
1045 adapter->curr_cmd = NULL; 1060 adapter->curr_cmd = NULL;
1046 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); 1061 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
@@ -1149,7 +1164,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1149 phs_cfg->params.hs_config.gpio, 1164 phs_cfg->params.hs_config.gpio,
1150 phs_cfg->params.hs_config.gap); 1165 phs_cfg->params.hs_config.gap);
1151 } 1166 }
1152 if (conditions != HOST_SLEEP_CFG_CANCEL) { 1167 if (conditions != HS_CFG_CANCEL) {
1153 adapter->is_hs_configured = true; 1168 adapter->is_hs_configured = true;
1154 if (adapter->iface_type == MWIFIEX_USB || 1169 if (adapter->iface_type == MWIFIEX_USB ||
1155 adapter->iface_type == MWIFIEX_PCIE) 1170 adapter->iface_type == MWIFIEX_PCIE)
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index e8a569aaa2e8..94cc09d48444 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -41,8 +41,15 @@
41#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2 41#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
42#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16 42#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
43 43
44#define MWIFIEX_AMPDU_DEF_TXWINSIZE 32 44#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE 16
45#define MWIFIEX_AMPDU_DEF_RXWINSIZE 16 45#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE 32
46#define MWIFIEX_UAP_AMPDU_DEF_TXWINSIZE 32
47#define MWIFIEX_UAP_AMPDU_DEF_RXWINSIZE 16
48#define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE 32
49#define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE 48
50#define MWIFIEX_11AC_UAP_AMPDU_DEF_TXWINSIZE 48
51#define MWIFIEX_11AC_UAP_AMPDU_DEF_RXWINSIZE 32
52
46#define MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT 0xffff 53#define MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT 0xffff
47 54
48#define MWIFIEX_RATE_BITMAP_MCS0 32 55#define MWIFIEX_RATE_BITMAP_MCS0 32
diff --git a/drivers/net/wireless/mwifiex/ethtool.c b/drivers/net/wireless/mwifiex/ethtool.c
new file mode 100644
index 000000000000..bfb39908b2c6
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/ethtool.c
@@ -0,0 +1,70 @@
1/*
2 * Marvell Wireless LAN device driver: ethtool
3 *
4 * Copyright (C) 2013, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "main.h"
21
22static void mwifiex_ethtool_get_wol(struct net_device *dev,
23 struct ethtool_wolinfo *wol)
24{
25 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
26 u32 conditions = le32_to_cpu(priv->adapter->hs_cfg.conditions);
27
28 wol->supported = WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY;
29
30 if (conditions == HS_CFG_COND_DEF)
31 return;
32
33 if (conditions & HS_CFG_COND_UNICAST_DATA)
34 wol->wolopts |= WAKE_UCAST;
35 if (conditions & HS_CFG_COND_MULTICAST_DATA)
36 wol->wolopts |= WAKE_MCAST;
37 if (conditions & HS_CFG_COND_BROADCAST_DATA)
38 wol->wolopts |= WAKE_BCAST;
39 if (conditions & HS_CFG_COND_MAC_EVENT)
40 wol->wolopts |= WAKE_PHY;
41}
42
43static int mwifiex_ethtool_set_wol(struct net_device *dev,
44 struct ethtool_wolinfo *wol)
45{
46 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
47 u32 conditions = 0;
48
49 if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY))
50 return -EOPNOTSUPP;
51
52 if (wol->wolopts & WAKE_UCAST)
53 conditions |= HS_CFG_COND_UNICAST_DATA;
54 if (wol->wolopts & WAKE_MCAST)
55 conditions |= HS_CFG_COND_MULTICAST_DATA;
56 if (wol->wolopts & WAKE_BCAST)
57 conditions |= HS_CFG_COND_BROADCAST_DATA;
58 if (wol->wolopts & WAKE_PHY)
59 conditions |= HS_CFG_COND_MAC_EVENT;
60 if (wol->wolopts == 0)
61 conditions |= HS_CFG_COND_DEF;
62 priv->adapter->hs_cfg.conditions = cpu_to_le32(conditions);
63
64 return 0;
65}
66
67const struct ethtool_ops mwifiex_ethtool_ops = {
68 .get_wol = mwifiex_ethtool_get_wol,
69 .set_wol = mwifiex_ethtool_set_wol,
70};
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 25acb0682c56..1f7578d553ec 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -230,40 +230,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
230 230
231#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(13)|BIT(14))) 231#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(13)|BIT(14)))
232 232
233#define GET_VHTCAP_MAXMPDULEN(vht_cap_info) (vht_cap_info & 0x3)
234#define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3) 233#define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3)
235#define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3) 234#define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3)
236#define SET_VHTNSSMCS(mcs_mapset, nss, value) (mcs_mapset |= (value & 0x3) << \ 235#define SET_VHTNSSMCS(mcs_mapset, nss, value) (mcs_mapset |= (value & 0x3) << \
237 (2 * (nss - 1))) 236 (2 * (nss - 1)))
238#define NO_NSS_SUPPORT 0x3 237#define NO_NSS_SUPPORT 0x3
239 238
240/* HW_SPEC: HTC-VHT supported */
241#define ISSUPP_11ACVHTHTCVHT(Dot11acDevCap) (Dot11acDevCap & BIT(22))
242/* HW_SPEC: VHT TXOP PS support */
243#define ISSUPP_11ACVHTTXOPPS(Dot11acDevCap) (Dot11acDevCap & BIT(21))
244/* HW_SPEC: MU RX beamformee support */
245#define ISSUPP_11ACMURXBEAMFORMEE(Dot11acDevCap) (Dot11acDevCap & BIT(20))
246/* HW_SPEC: MU TX beamformee support */
247#define ISSUPP_11ACMUTXBEAMFORMEE(Dot11acDevCap) (Dot11acDevCap & BIT(19))
248/* HW_SPEC: SU Beamformee support */
249#define ISSUPP_11ACSUBEAMFORMEE(Dot11acDevCap) (Dot11acDevCap & BIT(10))
250/* HW_SPEC: SU Beamformer support */
251#define ISSUPP_11ACSUBEAMFORMER(Dot11acDevCap) (Dot11acDevCap & BIT(9))
252/* HW_SPEC: Rx STBC support */
253#define ISSUPP_11ACRXSTBC(Dot11acDevCap) (Dot11acDevCap & BIT(8))
254/* HW_SPEC: Tx STBC support */
255#define ISSUPP_11ACTXSTBC(Dot11acDevCap) (Dot11acDevCap & BIT(7))
256/* HW_SPEC: Short GI support for 160MHz BW */
257#define ISSUPP_11ACSGI160(Dot11acDevCap) (Dot11acDevCap & BIT(6))
258/* HW_SPEC: Short GI support for 80MHz BW */
259#define ISSUPP_11ACSGI80(Dot11acDevCap) (Dot11acDevCap & BIT(5))
260/* HW_SPEC: LDPC coding support */
261#define ISSUPP_11ACLDPC(Dot11acDevCap) (Dot11acDevCap & BIT(4))
262/* HW_SPEC: Channel BW 20/40/80/160/80+80 MHz support */
263#define ISSUPP_11ACBW8080(Dot11acDevCap) (Dot11acDevCap & BIT(3))
264/* HW_SPEC: Channel BW 20/40/80/160 MHz support */
265#define ISSUPP_11ACBW160(Dot11acDevCap) (Dot11acDevCap & BIT(2))
266
267#define GET_DEVTXMCSMAP(dev_mcs_map) (dev_mcs_map >> 16) 239#define GET_DEVTXMCSMAP(dev_mcs_map) (dev_mcs_map >> 16)
268#define GET_DEVRXMCSMAP(dev_mcs_map) (dev_mcs_map & 0xFFFF) 240#define GET_DEVRXMCSMAP(dev_mcs_map) (dev_mcs_map & 0xFFFF)
269 241
@@ -300,6 +272,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
300#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f 272#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f
301#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083 273#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083
302#define HostCmd_CMD_VERSION_EXT 0x0097 274#define HostCmd_CMD_VERSION_EXT 0x0097
275#define HostCmd_CMD_MEF_CFG 0x009a
303#define HostCmd_CMD_RSSI_INFO 0x00a4 276#define HostCmd_CMD_RSSI_INFO 0x00a4
304#define HostCmd_CMD_FUNC_INIT 0x00a9 277#define HostCmd_CMD_FUNC_INIT 0x00a9
305#define HostCmd_CMD_FUNC_SHUTDOWN 0x00aa 278#define HostCmd_CMD_FUNC_SHUTDOWN 0x00aa
@@ -322,6 +295,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
322#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa 295#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa
323#define HostCmd_CMD_MGMT_FRAME_REG 0x010c 296#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
324#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d 297#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
298#define HostCmd_CMD_11AC_CFG 0x0112
325 299
326#define PROTOCOL_NO_SECURITY 0x01 300#define PROTOCOL_NO_SECURITY 0x01
327#define PROTOCOL_STATIC_WEP 0x02 301#define PROTOCOL_STATIC_WEP 0x02
@@ -376,10 +350,14 @@ enum P2P_MODES {
376#define HostCmd_SCAN_RADIO_TYPE_BG 0 350#define HostCmd_SCAN_RADIO_TYPE_BG 0
377#define HostCmd_SCAN_RADIO_TYPE_A 1 351#define HostCmd_SCAN_RADIO_TYPE_A 1
378 352
379#define HOST_SLEEP_CFG_CANCEL 0xffffffff 353#define HS_CFG_CANCEL 0xffffffff
380#define HOST_SLEEP_CFG_COND_DEF 0x00000000 354#define HS_CFG_COND_DEF 0x00000000
381#define HOST_SLEEP_CFG_GPIO_DEF 0xff 355#define HS_CFG_GPIO_DEF 0xff
382#define HOST_SLEEP_CFG_GAP_DEF 0 356#define HS_CFG_GAP_DEF 0
357#define HS_CFG_COND_BROADCAST_DATA 0x00000001
358#define HS_CFG_COND_UNICAST_DATA 0x00000002
359#define HS_CFG_COND_MAC_EVENT 0x00000004
360#define HS_CFG_COND_MULTICAST_DATA 0x00000008
383 361
384#define MWIFIEX_TIMEOUT_FOR_AP_RESP 0xfffc 362#define MWIFIEX_TIMEOUT_FOR_AP_RESP 0xfffc
385#define MWIFIEX_STATUS_CODE_AUTH_TIMEOUT 2 363#define MWIFIEX_STATUS_CODE_AUTH_TIMEOUT 2
@@ -469,6 +447,23 @@ enum P2P_MODES {
469#define EVENT_GET_BSS_TYPE(event_cause) \ 447#define EVENT_GET_BSS_TYPE(event_cause) \
470 (((event_cause) >> 24) & 0x00ff) 448 (((event_cause) >> 24) & 0x00ff)
471 449
450#define MWIFIEX_MAX_PATTERN_LEN 20
451#define MWIFIEX_MAX_OFFSET_LEN 50
452#define STACK_NBYTES 100
453#define TYPE_DNUM 1
454#define TYPE_BYTESEQ 2
455#define MAX_OPERAND 0x40
456#define TYPE_EQ (MAX_OPERAND+1)
457#define TYPE_EQ_DNUM (MAX_OPERAND+2)
458#define TYPE_EQ_BIT (MAX_OPERAND+3)
459#define TYPE_AND (MAX_OPERAND+4)
460#define TYPE_OR (MAX_OPERAND+5)
461#define MEF_MODE_HOST_SLEEP 1
462#define MEF_ACTION_ALLOW_AND_WAKEUP_HOST 3
463#define MWIFIEX_CRITERIA_BROADCAST BIT(0)
464#define MWIFIEX_CRITERIA_UNICAST BIT(1)
465#define MWIFIEX_CRITERIA_MULTICAST BIT(3)
466
472struct mwifiex_ie_types_header { 467struct mwifiex_ie_types_header {
473 __le16 type; 468 __le16 type;
474 __le16 len; 469 __le16 len;
@@ -1369,6 +1364,15 @@ struct host_cmd_ds_sys_config {
1369 u8 tlv[0]; 1364 u8 tlv[0];
1370}; 1365};
1371 1366
1367struct host_cmd_11ac_vht_cfg {
1368 __le16 action;
1369 u8 band_config;
1370 u8 misc_config;
1371 __le32 cap_info;
1372 __le32 mcs_tx_set;
1373 __le32 mcs_rx_set;
1374} __packed;
1375
1372struct host_cmd_tlv_akmp { 1376struct host_cmd_tlv_akmp {
1373 struct host_cmd_tlv tlv; 1377 struct host_cmd_tlv tlv;
1374 __le16 key_mgmt; 1378 __le16 key_mgmt;
@@ -1499,6 +1503,19 @@ struct host_cmd_ds_802_11_ibss_status {
1499 __le16 use_g_rate_protect; 1503 __le16 use_g_rate_protect;
1500} __packed; 1504} __packed;
1501 1505
1506struct mwifiex_fw_mef_entry {
1507 u8 mode;
1508 u8 action;
1509 __le16 exprsize;
1510 u8 expr[0];
1511} __packed;
1512
1513struct host_cmd_ds_mef_cfg {
1514 __le32 criteria;
1515 __le16 num_entries;
1516 struct mwifiex_fw_mef_entry mef_entry[0];
1517} __packed;
1518
1502#define CONNECTION_TYPE_INFRA 0 1519#define CONNECTION_TYPE_INFRA 0
1503#define CONNECTION_TYPE_ADHOC 1 1520#define CONNECTION_TYPE_ADHOC 1
1504#define CONNECTION_TYPE_AP 2 1521#define CONNECTION_TYPE_AP 2
@@ -1603,6 +1620,7 @@ struct host_cmd_ds_command {
1603 struct host_cmd_ds_remain_on_chan roc_cfg; 1620 struct host_cmd_ds_remain_on_chan roc_cfg;
1604 struct host_cmd_ds_p2p_mode_cfg mode_cfg; 1621 struct host_cmd_ds_p2p_mode_cfg mode_cfg;
1605 struct host_cmd_ds_802_11_ibss_status ibss_coalescing; 1622 struct host_cmd_ds_802_11_ibss_status ibss_coalescing;
1623 struct host_cmd_ds_mef_cfg mef_cfg;
1606 struct host_cmd_ds_mac_reg_access mac_reg; 1624 struct host_cmd_ds_mac_reg_access mac_reg;
1607 struct host_cmd_ds_bbp_reg_access bbp_reg; 1625 struct host_cmd_ds_bbp_reg_access bbp_reg;
1608 struct host_cmd_ds_rf_reg_access rf_reg; 1626 struct host_cmd_ds_rf_reg_access rf_reg;
@@ -1612,6 +1630,7 @@ struct host_cmd_ds_command {
1612 struct host_cmd_ds_802_11_eeprom_access eeprom; 1630 struct host_cmd_ds_802_11_eeprom_access eeprom;
1613 struct host_cmd_ds_802_11_subsc_evt subsc_evt; 1631 struct host_cmd_ds_802_11_subsc_evt subsc_evt;
1614 struct host_cmd_ds_sys_config uap_sys_config; 1632 struct host_cmd_ds_sys_config uap_sys_config;
1633 struct host_cmd_11ac_vht_cfg vht_cfg;
1615 } params; 1634 } params;
1616} __packed; 1635} __packed;
1617 1636
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 0ff4c37ab42a..9f44fda19db9 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -44,8 +44,6 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
44 44
45 bss_prio->priv = priv; 45 bss_prio->priv = priv;
46 INIT_LIST_HEAD(&bss_prio->list); 46 INIT_LIST_HEAD(&bss_prio->list);
47 if (!tbl[priv->bss_priority].bss_prio_cur)
48 tbl[priv->bss_priority].bss_prio_cur = bss_prio;
49 47
50 spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags); 48 spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
51 list_add_tail(&bss_prio->list, &tbl[priv->bss_priority].bss_prio_head); 49 list_add_tail(&bss_prio->list, &tbl[priv->bss_priority].bss_prio_head);
@@ -318,9 +316,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
318 adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K; 316 adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
319 317
320 adapter->is_hs_configured = false; 318 adapter->is_hs_configured = false;
321 adapter->hs_cfg.conditions = cpu_to_le32(HOST_SLEEP_CFG_COND_DEF); 319 adapter->hs_cfg.conditions = cpu_to_le32(HS_CFG_COND_DEF);
322 adapter->hs_cfg.gpio = HOST_SLEEP_CFG_GPIO_DEF; 320 adapter->hs_cfg.gpio = HS_CFG_GPIO_DEF;
323 adapter->hs_cfg.gap = HOST_SLEEP_CFG_GAP_DEF; 321 adapter->hs_cfg.gap = HS_CFG_GAP_DEF;
324 adapter->hs_activated = false; 322 adapter->hs_activated = false;
325 323
326 memset(adapter->event_body, 0, sizeof(adapter->event_body)); 324 memset(adapter->event_body, 0, sizeof(adapter->event_body));
@@ -525,7 +523,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
525 523
526 for (i = 0; i < adapter->priv_num; ++i) { 524 for (i = 0; i < adapter->priv_num; ++i) {
527 INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head); 525 INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head);
528 adapter->bss_prio_tbl[i].bss_prio_cur = NULL;
529 spin_lock_init(&adapter->bss_prio_tbl[i].bss_prio_lock); 526 spin_lock_init(&adapter->bss_prio_tbl[i].bss_prio_lock);
530 } 527 }
531 528
@@ -533,10 +530,8 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
533 if (!adapter->priv[i]) 530 if (!adapter->priv[i])
534 continue; 531 continue;
535 priv = adapter->priv[i]; 532 priv = adapter->priv[i];
536 for (j = 0; j < MAX_NUM_TID; ++j) { 533 for (j = 0; j < MAX_NUM_TID; ++j)
537 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[j].ra_list); 534 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[j].ra_list);
538 spin_lock_init(&priv->wmm.tid_tbl_ptr[j].tid_tbl_lock);
539 }
540 INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr); 535 INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
541 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); 536 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
542 INIT_LIST_HEAD(&priv->sta_list); 537 INIT_LIST_HEAD(&priv->sta_list);
@@ -627,42 +622,36 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
627{ 622{
628 int i; 623 int i;
629 struct mwifiex_adapter *adapter = priv->adapter; 624 struct mwifiex_adapter *adapter = priv->adapter;
630 struct mwifiex_bss_prio_node *bssprio_node, *tmp_node, **cur; 625 struct mwifiex_bss_prio_node *bssprio_node, *tmp_node;
631 struct list_head *head; 626 struct list_head *head;
632 spinlock_t *lock; /* bss priority lock */ 627 spinlock_t *lock; /* bss priority lock */
633 unsigned long flags; 628 unsigned long flags;
634 629
635 for (i = 0; i < adapter->priv_num; ++i) { 630 for (i = 0; i < adapter->priv_num; ++i) {
636 head = &adapter->bss_prio_tbl[i].bss_prio_head; 631 head = &adapter->bss_prio_tbl[i].bss_prio_head;
637 cur = &adapter->bss_prio_tbl[i].bss_prio_cur;
638 lock = &adapter->bss_prio_tbl[i].bss_prio_lock; 632 lock = &adapter->bss_prio_tbl[i].bss_prio_lock;
639 dev_dbg(adapter->dev, "info: delete BSS priority table," 633 dev_dbg(adapter->dev, "info: delete BSS priority table,"
640 " bss_type = %d, bss_num = %d, i = %d," 634 " bss_type = %d, bss_num = %d, i = %d,"
641 " head = %p, cur = %p\n", 635 " head = %p\n",
642 priv->bss_type, priv->bss_num, i, head, *cur); 636 priv->bss_type, priv->bss_num, i, head);
643 if (*cur) { 637
638 {
644 spin_lock_irqsave(lock, flags); 639 spin_lock_irqsave(lock, flags);
645 if (list_empty(head)) { 640 if (list_empty(head)) {
646 spin_unlock_irqrestore(lock, flags); 641 spin_unlock_irqrestore(lock, flags);
647 continue; 642 continue;
648 } 643 }
649 bssprio_node = list_first_entry(head,
650 struct mwifiex_bss_prio_node, list);
651 spin_unlock_irqrestore(lock, flags);
652
653 list_for_each_entry_safe(bssprio_node, tmp_node, head, 644 list_for_each_entry_safe(bssprio_node, tmp_node, head,
654 list) { 645 list) {
655 if (bssprio_node->priv == priv) { 646 if (bssprio_node->priv == priv) {
656 dev_dbg(adapter->dev, "info: Delete " 647 dev_dbg(adapter->dev, "info: Delete "
657 "node %p, next = %p\n", 648 "node %p, next = %p\n",
658 bssprio_node, tmp_node); 649 bssprio_node, tmp_node);
659 spin_lock_irqsave(lock, flags);
660 list_del(&bssprio_node->list); 650 list_del(&bssprio_node->list);
661 spin_unlock_irqrestore(lock, flags);
662 kfree(bssprio_node); 651 kfree(bssprio_node);
663 } 652 }
664 } 653 }
665 *cur = (struct mwifiex_bss_prio_node *)head; 654 spin_unlock_irqrestore(lock, flags);
666 } 655 }
667 } 656 }
668} 657}
@@ -713,7 +702,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
713 if (adapter->curr_cmd) { 702 if (adapter->curr_cmd) {
714 dev_warn(adapter->dev, "curr_cmd is still in processing\n"); 703 dev_warn(adapter->dev, "curr_cmd is still in processing\n");
715 del_timer(&adapter->cmd_timer); 704 del_timer(&adapter->cmd_timer);
716 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd); 705 mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
717 adapter->curr_cmd = NULL; 706 adapter->curr_cmd = NULL;
718 } 707 }
719 708
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index d85e6eb1f58a..7f27e45680b5 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -272,6 +272,14 @@ struct mwifiex_ds_pm_cfg {
272 } param; 272 } param;
273}; 273};
274 274
275struct mwifiex_11ac_vht_cfg {
276 u8 band_config;
277 u8 misc_config;
278 u32 cap_info;
279 u32 mcs_tx_set;
280 u32 mcs_rx_set;
281};
282
275struct mwifiex_ds_11n_tx_cfg { 283struct mwifiex_ds_11n_tx_cfg {
276 u16 tx_htcap; 284 u16 tx_htcap;
277 u16 tx_htinfo; 285 u16 tx_htinfo;
@@ -354,6 +362,29 @@ struct mwifiex_ds_misc_subsc_evt {
354 struct subsc_evt_cfg bcn_h_rssi_cfg; 362 struct subsc_evt_cfg bcn_h_rssi_cfg;
355}; 363};
356 364
365#define MAX_BYTESEQ 6 /* non-adjustable */
366#define MWIFIEX_MAX_FILTERS 10
367
368struct mwifiex_mef_filter {
369 u16 repeat;
370 u16 offset;
371 s8 byte_seq[MAX_BYTESEQ + 1];
372 u8 filt_type;
373 u8 filt_action;
374};
375
376struct mwifiex_mef_entry {
377 u8 mode;
378 u8 action;
379 struct mwifiex_mef_filter filter[MWIFIEX_MAX_FILTERS];
380};
381
382struct mwifiex_ds_mef_cfg {
383 u32 criteria;
384 u16 num_entries;
385 struct mwifiex_mef_entry *mef_entry;
386};
387
357#define MWIFIEX_MAX_VSIE_LEN (256) 388#define MWIFIEX_MAX_VSIE_LEN (256)
358#define MWIFIEX_MAX_VSIE_NUM (8) 389#define MWIFIEX_MAX_VSIE_NUM (8)
359#define MWIFIEX_VSIE_MASK_CLEAR 0x00 390#define MWIFIEX_VSIE_MASK_CLEAR 0x00
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 2fe0ceba4400..6bcb66e6e97c 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1295,6 +1295,14 @@ int mwifiex_associate(struct mwifiex_private *priv,
1295 (bss_desc->bss_mode != NL80211_IFTYPE_STATION)) 1295 (bss_desc->bss_mode != NL80211_IFTYPE_STATION))
1296 return -1; 1296 return -1;
1297 1297
1298 if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
1299 !bss_desc->disable_11n && !bss_desc->disable_11ac &&
1300 (priv->adapter->config_bands & BAND_GAC ||
1301 priv->adapter->config_bands & BAND_AAC))
1302 mwifiex_set_11ac_ba_params(priv);
1303 else
1304 mwifiex_set_ba_params(priv);
1305
1298 memcpy(&current_bssid, 1306 memcpy(&current_bssid,
1299 &priv->curr_bss_params.bss_descriptor.mac_address, 1307 &priv->curr_bss_params.bss_descriptor.mac_address,
1300 sizeof(current_bssid)); 1308 sizeof(current_bssid));
@@ -1323,6 +1331,13 @@ mwifiex_adhoc_start(struct mwifiex_private *priv,
1323 dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %d\n", 1331 dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %d\n",
1324 priv->curr_bss_params.band); 1332 priv->curr_bss_params.band);
1325 1333
1334 if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
1335 (priv->adapter->config_bands & BAND_GAC ||
1336 priv->adapter->config_bands & BAND_AAC))
1337 mwifiex_set_11ac_ba_params(priv);
1338 else
1339 mwifiex_set_ba_params(priv);
1340
1326 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_AD_HOC_START, 1341 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_AD_HOC_START,
1327 HostCmd_ACT_GEN_SET, 0, adhoc_ssid); 1342 HostCmd_ACT_GEN_SET, 0, adhoc_ssid);
1328} 1343}
@@ -1356,6 +1371,14 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
1356 return -1; 1371 return -1;
1357 } 1372 }
1358 1373
1374 if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
1375 !bss_desc->disable_11n && !bss_desc->disable_11ac &&
1376 (priv->adapter->config_bands & BAND_GAC ||
1377 priv->adapter->config_bands & BAND_AAC))
1378 mwifiex_set_11ac_ba_params(priv);
1379 else
1380 mwifiex_set_ba_params(priv);
1381
1359 dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n", 1382 dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n",
1360 priv->curr_bss_params.bss_descriptor.channel); 1383 priv->curr_bss_params.bss_descriptor.channel);
1361 dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n", 1384 dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n",
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9c802ede9c3b..121443a0f2a1 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -588,10 +588,19 @@ mwifiex_tx_timeout(struct net_device *dev)
588{ 588{
589 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 589 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
590 590
591 dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_type-num = %d-%d\n",
592 jiffies, priv->bss_type, priv->bss_num);
593 mwifiex_set_trans_start(dev);
594 priv->num_tx_timeout++; 591 priv->num_tx_timeout++;
592 priv->tx_timeout_cnt++;
593 dev_err(priv->adapter->dev,
594 "%lu : Tx timeout(#%d), bss_type-num = %d-%d\n",
595 jiffies, priv->tx_timeout_cnt, priv->bss_type, priv->bss_num);
596 mwifiex_set_trans_start(dev);
597
598 if (priv->tx_timeout_cnt > TX_TIMEOUT_THRESHOLD &&
599 priv->adapter->if_ops.card_reset) {
600 dev_err(priv->adapter->dev,
601 "tx_timeout_cnt exceeds threshold. Triggering card reset!\n");
602 priv->adapter->if_ops.card_reset(priv->adapter);
603 }
595} 604}
596 605
597/* 606/*
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 7035ade9af74..4ef67fca06d3 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -130,6 +130,9 @@ enum {
130#define MWIFIEX_USB_TYPE_DATA 0xBEADC0DE 130#define MWIFIEX_USB_TYPE_DATA 0xBEADC0DE
131#define MWIFIEX_USB_TYPE_EVENT 0xBEEFFACE 131#define MWIFIEX_USB_TYPE_EVENT 0xBEEFFACE
132 132
133/* Threshold for tx_timeout_cnt before we trigger a card reset */
134#define TX_TIMEOUT_THRESHOLD 6
135
133struct mwifiex_dbg { 136struct mwifiex_dbg {
134 u32 num_cmd_host_to_card_failure; 137 u32 num_cmd_host_to_card_failure;
135 u32 num_cmd_sleep_cfm_host_to_card_failure; 138 u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -210,15 +213,11 @@ struct mwifiex_ra_list_tbl {
210 213
211struct mwifiex_tid_tbl { 214struct mwifiex_tid_tbl {
212 struct list_head ra_list; 215 struct list_head ra_list;
213 /* spin lock for tid table */
214 spinlock_t tid_tbl_lock;
215 struct mwifiex_ra_list_tbl *ra_list_curr;
216}; 216};
217 217
218#define WMM_HIGHEST_PRIORITY 7 218#define WMM_HIGHEST_PRIORITY 7
219#define HIGH_PRIO_TID 7 219#define HIGH_PRIO_TID 7
220#define LOW_PRIO_TID 0 220#define LOW_PRIO_TID 0
221#define NO_PKT_PRIO_TID (-1)
222 221
223struct mwifiex_wmm_desc { 222struct mwifiex_wmm_desc {
224 struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID]; 223 struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID];
@@ -394,6 +393,8 @@ struct mwifiex_private {
394 u8 curr_addr[ETH_ALEN]; 393 u8 curr_addr[ETH_ALEN];
395 u8 media_connected; 394 u8 media_connected;
396 u32 num_tx_timeout; 395 u32 num_tx_timeout;
396 /* track consecutive timeout */
397 u8 tx_timeout_cnt;
397 struct net_device *netdev; 398 struct net_device *netdev;
398 struct net_device_stats stats; 399 struct net_device_stats stats;
399 u16 curr_pkt_filter; 400 u16 curr_pkt_filter;
@@ -793,6 +794,8 @@ void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter);
793 794
794void mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter, 795void mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
795 struct cmd_ctrl_node *cmd_node); 796 struct cmd_ctrl_node *cmd_node);
797void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
798 struct cmd_ctrl_node *cmd_node);
796 799
797void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, 800void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
798 struct cmd_ctrl_node *cmd_node, 801 struct cmd_ctrl_node *cmd_node,
@@ -907,12 +910,20 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
907void mwifiex_set_ht_params(struct mwifiex_private *priv, 910void mwifiex_set_ht_params(struct mwifiex_private *priv,
908 struct mwifiex_uap_bss_param *bss_cfg, 911 struct mwifiex_uap_bss_param *bss_cfg,
909 struct cfg80211_ap_settings *params); 912 struct cfg80211_ap_settings *params);
913void mwifiex_set_vht_params(struct mwifiex_private *priv,
914 struct mwifiex_uap_bss_param *bss_cfg,
915 struct cfg80211_ap_settings *params);
910void mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, 916void mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
911 struct cfg80211_ap_settings *params); 917 struct cfg80211_ap_settings *params);
918void mwifiex_set_vht_width(struct mwifiex_private *priv,
919 enum nl80211_chan_width width,
920 bool ap_11ac_disable);
912void 921void
913mwifiex_set_wmm_params(struct mwifiex_private *priv, 922mwifiex_set_wmm_params(struct mwifiex_private *priv,
914 struct mwifiex_uap_bss_param *bss_cfg, 923 struct mwifiex_uap_bss_param *bss_cfg,
915 struct cfg80211_ap_settings *params); 924 struct cfg80211_ap_settings *params);
925void mwifiex_set_ba_params(struct mwifiex_private *priv);
926void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv);
916 927
917/* 928/*
918 * This function checks if the queuing is RA based or not. 929 * This function checks if the queuing is RA based or not.
@@ -1098,11 +1109,15 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev);
1098 1109
1099void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config); 1110void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config);
1100 1111
1112int mwifiex_add_wowlan_magic_pkt_filter(struct mwifiex_adapter *adapter);
1113
1101int mwifiex_set_mgmt_ies(struct mwifiex_private *priv, 1114int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
1102 struct cfg80211_beacon_data *data); 1115 struct cfg80211_beacon_data *data);
1103int mwifiex_del_mgmt_ies(struct mwifiex_private *priv); 1116int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
1104u8 *mwifiex_11d_code_2_region(u8 code); 1117u8 *mwifiex_11d_code_2_region(u8 code);
1105 1118
1119extern const struct ethtool_ops mwifiex_ethtool_ops;
1120
1106#ifdef CONFIG_DEBUG_FS 1121#ifdef CONFIG_DEBUG_FS
1107void mwifiex_debugfs_init(void); 1122void mwifiex_debugfs_init(void);
1108void mwifiex_debugfs_remove(void); 1123void mwifiex_debugfs_remove(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index feb204613397..20c9c4c7b0b2 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -36,8 +36,6 @@ static u8 user_rmmod;
36static struct mwifiex_if_ops pcie_ops; 36static struct mwifiex_if_ops pcie_ops;
37 37
38static struct semaphore add_remove_card_sem; 38static struct semaphore add_remove_card_sem;
39static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter);
40static int mwifiex_pcie_resume(struct pci_dev *pdev);
41 39
42static int 40static int
43mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, 41mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
@@ -78,6 +76,82 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
78 return false; 76 return false;
79} 77}
80 78
79#ifdef CONFIG_PM
80/*
81 * Kernel needs to suspend all functions separately. Therefore all
82 * registered functions must have drivers with suspend and resume
83 * methods. Failing that the kernel simply removes the whole card.
84 *
85 * If already not suspended, this function allocates and sends a host
86 * sleep activate request to the firmware and turns off the traffic.
87 */
88static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
89{
90 struct mwifiex_adapter *adapter;
91 struct pcie_service_card *card;
92 int hs_actived;
93
94 if (pdev) {
95 card = (struct pcie_service_card *) pci_get_drvdata(pdev);
96 if (!card || !card->adapter) {
97 pr_err("Card or adapter structure is not valid\n");
98 return 0;
99 }
100 } else {
101 pr_err("PCIE device is not specified\n");
102 return 0;
103 }
104
105 adapter = card->adapter;
106
107 hs_actived = mwifiex_enable_hs(adapter);
108
109 /* Indicate device suspended */
110 adapter->is_suspended = true;
111
112 return 0;
113}
114
115/*
116 * Kernel needs to suspend all functions separately. Therefore all
117 * registered functions must have drivers with suspend and resume
118 * methods. Failing that the kernel simply removes the whole card.
119 *
120 * If already not resumed, this function turns on the traffic and
121 * sends a host sleep cancel request to the firmware.
122 */
123static int mwifiex_pcie_resume(struct pci_dev *pdev)
124{
125 struct mwifiex_adapter *adapter;
126 struct pcie_service_card *card;
127
128 if (pdev) {
129 card = (struct pcie_service_card *) pci_get_drvdata(pdev);
130 if (!card || !card->adapter) {
131 pr_err("Card or adapter structure is not valid\n");
132 return 0;
133 }
134 } else {
135 pr_err("PCIE device is not specified\n");
136 return 0;
137 }
138
139 adapter = card->adapter;
140
141 if (!adapter->is_suspended) {
142 dev_warn(adapter->dev, "Device already resumed\n");
143 return 0;
144 }
145
146 adapter->is_suspended = false;
147
148 mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
149 MWIFIEX_ASYNC_CMD);
150
151 return 0;
152}
153#endif
154
81/* 155/*
82 * This function probes an mwifiex device and registers it. It allocates 156 * This function probes an mwifiex device and registers it. It allocates
83 * the card structure, enables PCIE function number and initiates the 157 * the card structure, enables PCIE function number and initiates the
@@ -159,80 +233,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
159 kfree(card); 233 kfree(card);
160} 234}
161 235
162/*
163 * Kernel needs to suspend all functions separately. Therefore all
164 * registered functions must have drivers with suspend and resume
165 * methods. Failing that the kernel simply removes the whole card.
166 *
167 * If already not suspended, this function allocates and sends a host
168 * sleep activate request to the firmware and turns off the traffic.
169 */
170static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
171{
172 struct mwifiex_adapter *adapter;
173 struct pcie_service_card *card;
174 int hs_actived;
175
176 if (pdev) {
177 card = (struct pcie_service_card *) pci_get_drvdata(pdev);
178 if (!card || !card->adapter) {
179 pr_err("Card or adapter structure is not valid\n");
180 return 0;
181 }
182 } else {
183 pr_err("PCIE device is not specified\n");
184 return 0;
185 }
186
187 adapter = card->adapter;
188
189 hs_actived = mwifiex_enable_hs(adapter);
190
191 /* Indicate device suspended */
192 adapter->is_suspended = true;
193
194 return 0;
195}
196
197/*
198 * Kernel needs to suspend all functions separately. Therefore all
199 * registered functions must have drivers with suspend and resume
200 * methods. Failing that the kernel simply removes the whole card.
201 *
202 * If already not resumed, this function turns on the traffic and
203 * sends a host sleep cancel request to the firmware.
204 */
205static int mwifiex_pcie_resume(struct pci_dev *pdev)
206{
207 struct mwifiex_adapter *adapter;
208 struct pcie_service_card *card;
209
210 if (pdev) {
211 card = (struct pcie_service_card *) pci_get_drvdata(pdev);
212 if (!card || !card->adapter) {
213 pr_err("Card or adapter structure is not valid\n");
214 return 0;
215 }
216 } else {
217 pr_err("PCIE device is not specified\n");
218 return 0;
219 }
220
221 adapter = card->adapter;
222
223 if (!adapter->is_suspended) {
224 dev_warn(adapter->dev, "Device already resumed\n");
225 return 0;
226 }
227
228 adapter->is_suspended = false;
229
230 mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
231 MWIFIEX_ASYNC_CMD);
232
233 return 0;
234}
235
236static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = { 236static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = {
237 { 237 {
238 PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P, 238 PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
@@ -287,18 +287,13 @@ static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data)
287} 287}
288 288
289/* 289/*
290 * This function wakes up the card. 290 * This function adds delay loop to ensure FW is awake before proceeding.
291 *
292 * A host power up command is written to the card configuration
293 * register to wake up the card.
294 */ 291 */
295static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) 292static void mwifiex_pcie_dev_wakeup_delay(struct mwifiex_adapter *adapter)
296{ 293{
297 int i = 0; 294 int i = 0;
298 struct pcie_service_card *card = adapter->card;
299 const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
300 295
301 while (reg->sleep_cookie && mwifiex_pcie_ok_to_access_hw(adapter)) { 296 while (mwifiex_pcie_ok_to_access_hw(adapter)) {
302 i++; 297 i++;
303 usleep_range(10, 20); 298 usleep_range(10, 20);
304 /* 50ms max wait */ 299 /* 50ms max wait */
@@ -306,16 +301,32 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
306 break; 301 break;
307 } 302 }
308 303
304 return;
305}
306
307/* This function wakes up the card by reading fw_status register. */
308static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
309{
310 u32 fw_status;
311 struct pcie_service_card *card = adapter->card;
312 const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
313
309 dev_dbg(adapter->dev, "event: Wakeup device...\n"); 314 dev_dbg(adapter->dev, "event: Wakeup device...\n");
310 315
311 /* Enable interrupts or any chip access will wakeup device */ 316 if (reg->sleep_cookie)
312 if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, HOST_INTR_MASK)) { 317 mwifiex_pcie_dev_wakeup_delay(adapter);
313 dev_warn(adapter->dev, "Enable host interrupt failed\n"); 318
319 /* Reading fw_status register will wakeup device */
320 if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) {
321 dev_warn(adapter->dev, "Reading fw_status register failed\n");
314 return -1; 322 return -1;
315 } 323 }
316 324
317 dev_dbg(adapter->dev, "PCIE wakeup: Setting PS_STATE_AWAKE\n"); 325 if (reg->sleep_cookie) {
318 adapter->ps_state = PS_STATE_AWAKE; 326 mwifiex_pcie_dev_wakeup_delay(adapter);
327 dev_dbg(adapter->dev, "PCIE wakeup: Setting PS_STATE_AWAKE\n");
328 adapter->ps_state = PS_STATE_AWAKE;
329 }
319 330
320 return 0; 331 return 0;
321} 332}
@@ -561,7 +572,7 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
561 if (card->rx_buf_list[i]) { 572 if (card->rx_buf_list[i]) {
562 skb = card->rx_buf_list[i]; 573 skb = card->rx_buf_list[i];
563 pci_unmap_single(card->dev, desc2->paddr, 574 pci_unmap_single(card->dev, desc2->paddr,
564 skb->len, PCI_DMA_TODEVICE); 575 skb->len, PCI_DMA_FROMDEVICE);
565 dev_kfree_skb_any(skb); 576 dev_kfree_skb_any(skb);
566 } 577 }
567 memset(desc2, 0, sizeof(*desc2)); 578 memset(desc2, 0, sizeof(*desc2));
@@ -570,7 +581,7 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
570 if (card->rx_buf_list[i]) { 581 if (card->rx_buf_list[i]) {
571 skb = card->rx_buf_list[i]; 582 skb = card->rx_buf_list[i];
572 pci_unmap_single(card->dev, desc->paddr, 583 pci_unmap_single(card->dev, desc->paddr,
573 skb->len, PCI_DMA_TODEVICE); 584 skb->len, PCI_DMA_FROMDEVICE);
574 dev_kfree_skb_any(skb); 585 dev_kfree_skb_any(skb);
575 } 586 }
576 memset(desc, 0, sizeof(*desc)); 587 memset(desc, 0, sizeof(*desc));
@@ -850,9 +861,8 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
850 861
851 if (card && card->cmd_buf) { 862 if (card && card->cmd_buf) {
852 MWIFIEX_SKB_PACB(card->cmd_buf, &buf_pa); 863 MWIFIEX_SKB_PACB(card->cmd_buf, &buf_pa);
853 pci_unmap_single(card->dev, buf_pa, MWIFIEX_SIZE_OF_CMD_BUFFER, 864 pci_unmap_single(card->dev, buf_pa, card->cmd_buf->len,
854 PCI_DMA_TODEVICE); 865 PCI_DMA_TODEVICE);
855 dev_kfree_skb_any(card->cmd_buf);
856 } 866 }
857 return 0; 867 return 0;
858} 868}
@@ -1030,8 +1040,8 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
1030 u32 wrindx, num_tx_buffs, rx_val; 1040 u32 wrindx, num_tx_buffs, rx_val;
1031 int ret; 1041 int ret;
1032 dma_addr_t buf_pa; 1042 dma_addr_t buf_pa;
1033 struct mwifiex_pcie_buf_desc *desc; 1043 struct mwifiex_pcie_buf_desc *desc = NULL;
1034 struct mwifiex_pfu_buf_desc *desc2; 1044 struct mwifiex_pfu_buf_desc *desc2 = NULL;
1035 __le16 *tmp; 1045 __le16 *tmp;
1036 1046
1037 if (!(skb->data && skb->len)) { 1047 if (!(skb->data && skb->len)) {
@@ -1562,7 +1572,7 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
1562 skb_tmp = card->cmd_buf; 1572 skb_tmp = card->cmd_buf;
1563 if (skb_tmp) { 1573 if (skb_tmp) {
1564 MWIFIEX_SKB_PACB(skb_tmp, &buf_pa); 1574 MWIFIEX_SKB_PACB(skb_tmp, &buf_pa);
1565 pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE, 1575 pci_unmap_single(card->dev, buf_pa, skb_tmp->len,
1566 PCI_DMA_FROMDEVICE); 1576 PCI_DMA_FROMDEVICE);
1567 card->cmd_buf = NULL; 1577 card->cmd_buf = NULL;
1568 } 1578 }
@@ -1984,12 +1994,13 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
1984 } 1994 }
1985 } 1995 }
1986 } else if (!adapter->pps_uapsd_mode && 1996 } else if (!adapter->pps_uapsd_mode &&
1987 adapter->ps_state == PS_STATE_SLEEP) { 1997 adapter->ps_state == PS_STATE_SLEEP &&
1998 mwifiex_pcie_ok_to_access_hw(adapter)) {
1988 /* Potentially for PCIe we could get other 1999 /* Potentially for PCIe we could get other
1989 * interrupts like shared. Don't change power 2000 * interrupts like shared. Don't change power
1990 * state until cookie is set */ 2001 * state until cookie is set */
1991 if (mwifiex_pcie_ok_to_access_hw(adapter)) 2002 adapter->ps_state = PS_STATE_AWAKE;
1992 adapter->ps_state = PS_STATE_AWAKE; 2003 adapter->pm_wakeup_fw_try = false;
1993 } 2004 }
1994 } 2005 }
1995} 2006}
@@ -2112,7 +2123,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
2112 } 2123 }
2113 dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n", 2124 dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
2114 adapter->cmd_sent, adapter->data_sent); 2125 adapter->cmd_sent, adapter->data_sent);
2115 mwifiex_pcie_enable_host_int(adapter); 2126 if (adapter->ps_state != PS_STATE_SLEEP)
2127 mwifiex_pcie_enable_host_int(adapter);
2116 2128
2117 return 0; 2129 return 0;
2118} 2130}
@@ -2281,9 +2293,9 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
2281 if (pdev) { 2293 if (pdev) {
2282 pci_iounmap(pdev, card->pci_mmap); 2294 pci_iounmap(pdev, card->pci_mmap);
2283 pci_iounmap(pdev, card->pci_mmap1); 2295 pci_iounmap(pdev, card->pci_mmap1);
2284
2285 pci_release_regions(pdev);
2286 pci_disable_device(pdev); 2296 pci_disable_device(pdev);
2297 pci_release_region(pdev, 2);
2298 pci_release_region(pdev, 0);
2287 pci_set_drvdata(pdev, NULL); 2299 pci_set_drvdata(pdev, NULL);
2288 } 2300 }
2289} 2301}
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index e7f6deaf715e..9cf5d8f07df8 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1500,43 +1500,22 @@ static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
1500 if (ret) 1500 if (ret)
1501 goto done; 1501 goto done;
1502 1502
1503 /* Update current bss descriptor parameters */
1504 spin_lock_irqsave(&priv->curr_bcn_buf_lock, flags); 1503 spin_lock_irqsave(&priv->curr_bcn_buf_lock, flags);
1505 priv->curr_bss_params.bss_descriptor.bcn_wpa_ie = NULL;
1506 priv->curr_bss_params.bss_descriptor.wpa_offset = 0;
1507 priv->curr_bss_params.bss_descriptor.bcn_rsn_ie = NULL;
1508 priv->curr_bss_params.bss_descriptor.rsn_offset = 0;
1509 priv->curr_bss_params.bss_descriptor.bcn_wapi_ie = NULL;
1510 priv->curr_bss_params.bss_descriptor.wapi_offset = 0;
1511 priv->curr_bss_params.bss_descriptor.bcn_ht_cap = NULL;
1512 priv->curr_bss_params.bss_descriptor.ht_cap_offset = 0;
1513 priv->curr_bss_params.bss_descriptor.bcn_ht_oper = NULL;
1514 priv->curr_bss_params.bss_descriptor.ht_info_offset = 0;
1515 priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 = NULL;
1516 priv->curr_bss_params.bss_descriptor.bss_co_2040_offset = 0;
1517 priv->curr_bss_params.bss_descriptor.bcn_ext_cap = NULL;
1518 priv->curr_bss_params.bss_descriptor.ext_cap_offset = 0;
1519 priv->curr_bss_params.bss_descriptor.beacon_buf = NULL;
1520 priv->curr_bss_params.bss_descriptor.beacon_buf_size = 0;
1521 priv->curr_bss_params.bss_descriptor.bcn_vht_cap = NULL;
1522 priv->curr_bss_params.bss_descriptor.vht_cap_offset = 0;
1523 priv->curr_bss_params.bss_descriptor.bcn_vht_oper = NULL;
1524 priv->curr_bss_params.bss_descriptor.vht_info_offset = 0;
1525 priv->curr_bss_params.bss_descriptor.oper_mode = NULL;
1526 priv->curr_bss_params.bss_descriptor.oper_mode_offset = 0;
1527
1528 /* Disable 11ac by default. Enable it only where there
1529 * exist VHT_CAP IE in AP beacon
1530 */
1531 priv->curr_bss_params.bss_descriptor.disable_11ac = true;
1532
1533 /* Make a copy of current BSSID descriptor */ 1504 /* Make a copy of current BSSID descriptor */
1534 memcpy(&priv->curr_bss_params.bss_descriptor, bss_desc, 1505 memcpy(&priv->curr_bss_params.bss_descriptor, bss_desc,
1535 sizeof(priv->curr_bss_params.bss_descriptor)); 1506 sizeof(priv->curr_bss_params.bss_descriptor));
1507
1508 /* The contents of beacon_ie will be copied to its own buffer
1509 * in mwifiex_save_curr_bcn()
1510 */
1536 mwifiex_save_curr_bcn(priv); 1511 mwifiex_save_curr_bcn(priv);
1537 spin_unlock_irqrestore(&priv->curr_bcn_buf_lock, flags); 1512 spin_unlock_irqrestore(&priv->curr_bcn_buf_lock, flags);
1538 1513
1539done: 1514done:
1515 /* beacon_ie buffer was allocated in function
1516 * mwifiex_fill_new_bss_desc(). Free it now.
1517 */
1518 kfree(bss_desc->beacon_buf);
1540 kfree(bss_desc); 1519 kfree(bss_desc);
1541 return 0; 1520 return 0;
1542} 1521}
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index c55c5bb93134..b193e25977d2 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -24,6 +24,7 @@
24#include "main.h" 24#include "main.h"
25#include "wmm.h" 25#include "wmm.h"
26#include "11n.h" 26#include "11n.h"
27#include "11ac.h"
27 28
28/* 29/*
29 * This function prepares command to set/get RSSI information. 30 * This function prepares command to set/get RSSI information.
@@ -334,7 +335,7 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
334 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH); 335 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH);
335 336
336 if (!hs_activate && 337 if (!hs_activate &&
337 (hscfg_param->conditions != cpu_to_le32(HOST_SLEEP_CFG_CANCEL)) && 338 (hscfg_param->conditions != cpu_to_le32(HS_CFG_CANCEL)) &&
338 ((adapter->arp_filter_size > 0) && 339 ((adapter->arp_filter_size > 0) &&
339 (adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) { 340 (adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) {
340 dev_dbg(adapter->dev, 341 dev_dbg(adapter->dev,
@@ -1059,6 +1060,80 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
1059 return 0; 1060 return 0;
1060} 1061}
1061 1062
1063static int
1064mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv,
1065 struct mwifiex_mef_entry *mef_entry,
1066 u8 **buffer)
1067{
1068 struct mwifiex_mef_filter *filter = mef_entry->filter;
1069 int i, byte_len;
1070 u8 *stack_ptr = *buffer;
1071
1072 for (i = 0; i < MWIFIEX_MAX_FILTERS; i++) {
1073 filter = &mef_entry->filter[i];
1074 if (!filter->filt_type)
1075 break;
1076 *(__le32 *)stack_ptr = cpu_to_le32((u32)filter->repeat);
1077 stack_ptr += 4;
1078 *stack_ptr = TYPE_DNUM;
1079 stack_ptr += 1;
1080
1081 byte_len = filter->byte_seq[MAX_BYTESEQ];
1082 memcpy(stack_ptr, filter->byte_seq, byte_len);
1083 stack_ptr += byte_len;
1084 *stack_ptr = byte_len;
1085 stack_ptr += 1;
1086 *stack_ptr = TYPE_BYTESEQ;
1087 stack_ptr += 1;
1088
1089 *(__le32 *)stack_ptr = cpu_to_le32((u32)filter->offset);
1090 stack_ptr += 4;
1091 *stack_ptr = TYPE_DNUM;
1092 stack_ptr += 1;
1093
1094 *stack_ptr = filter->filt_type;
1095 stack_ptr += 1;
1096
1097 if (filter->filt_action) {
1098 *stack_ptr = filter->filt_action;
1099 stack_ptr += 1;
1100 }
1101
1102 if (stack_ptr - *buffer > STACK_NBYTES)
1103 return -1;
1104 }
1105
1106 *buffer = stack_ptr;
1107 return 0;
1108}
1109
1110static int
1111mwifiex_cmd_mef_cfg(struct mwifiex_private *priv,
1112 struct host_cmd_ds_command *cmd,
1113 struct mwifiex_ds_mef_cfg *mef)
1114{
1115 struct host_cmd_ds_mef_cfg *mef_cfg = &cmd->params.mef_cfg;
1116 u8 *pos = (u8 *)mef_cfg;
1117
1118 cmd->command = cpu_to_le16(HostCmd_CMD_MEF_CFG);
1119
1120 mef_cfg->criteria = cpu_to_le32(mef->criteria);
1121 mef_cfg->num_entries = cpu_to_le16(mef->num_entries);
1122 pos += sizeof(*mef_cfg);
1123 mef_cfg->mef_entry->mode = mef->mef_entry->mode;
1124 mef_cfg->mef_entry->action = mef->mef_entry->action;
1125 pos += sizeof(*(mef_cfg->mef_entry));
1126
1127 if (mwifiex_cmd_append_rpn_expression(priv, mef->mef_entry, &pos))
1128 return -1;
1129
1130 mef_cfg->mef_entry->exprsize =
1131 cpu_to_le16(pos - mef_cfg->mef_entry->expr);
1132 cmd->size = cpu_to_le16((u16) (pos - (u8 *)mef_cfg) + S_DS_GEN);
1133
1134 return 0;
1135}
1136
1062/* 1137/*
1063 * This function prepares the commands before sending them to the firmware. 1138 * This function prepares the commands before sending them to the firmware.
1064 * 1139 *
@@ -1184,6 +1259,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1184 cpu_to_le16(sizeof(struct host_cmd_ds_remain_on_chan) + 1259 cpu_to_le16(sizeof(struct host_cmd_ds_remain_on_chan) +
1185 S_DS_GEN); 1260 S_DS_GEN);
1186 break; 1261 break;
1262 case HostCmd_CMD_11AC_CFG:
1263 ret = mwifiex_cmd_11ac_cfg(priv, cmd_ptr, cmd_action, data_buf);
1264 break;
1187 case HostCmd_CMD_P2P_MODE_CFG: 1265 case HostCmd_CMD_P2P_MODE_CFG:
1188 cmd_ptr->command = cpu_to_le16(cmd_no); 1266 cmd_ptr->command = cpu_to_le16(cmd_no);
1189 cmd_ptr->params.mode_cfg.action = cpu_to_le16(cmd_action); 1267 cmd_ptr->params.mode_cfg.action = cpu_to_le16(cmd_action);
@@ -1273,6 +1351,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1273 case HostCmd_CMD_802_11_SUBSCRIBE_EVENT: 1351 case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
1274 ret = mwifiex_cmd_802_11_subsc_evt(priv, cmd_ptr, data_buf); 1352 ret = mwifiex_cmd_802_11_subsc_evt(priv, cmd_ptr, data_buf);
1275 break; 1353 break;
1354 case HostCmd_CMD_MEF_CFG:
1355 ret = mwifiex_cmd_mef_cfg(priv, cmd_ptr, data_buf);
1356 break;
1276 default: 1357 default:
1277 dev_err(priv->adapter->dev, 1358 dev_err(priv->adapter->dev,
1278 "PREP_CMD: unknown cmd- %#x\n", cmd_no); 1359 "PREP_CMD: unknown cmd- %#x\n", cmd_no);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 4669f8d9389f..9f990e14966e 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -95,7 +95,7 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
95 break; 95 break;
96 } 96 }
97 /* Handling errors here */ 97 /* Handling errors here */
98 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd); 98 mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
99 99
100 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); 100 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
101 adapter->curr_cmd = NULL; 101 adapter->curr_cmd = NULL;
@@ -907,6 +907,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
907 case HostCmd_CMD_REMAIN_ON_CHAN: 907 case HostCmd_CMD_REMAIN_ON_CHAN:
908 ret = mwifiex_ret_remain_on_chan(priv, resp, data_buf); 908 ret = mwifiex_ret_remain_on_chan(priv, resp, data_buf);
909 break; 909 break;
910 case HostCmd_CMD_11AC_CFG:
911 break;
910 case HostCmd_CMD_P2P_MODE_CFG: 912 case HostCmd_CMD_P2P_MODE_CFG:
911 ret = mwifiex_ret_p2p_mode_cfg(priv, resp, data_buf); 913 ret = mwifiex_ret_p2p_mode_cfg(priv, resp, data_buf);
912 break; 914 break;
@@ -976,6 +978,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
976 case HostCmd_CMD_UAP_BSS_STOP: 978 case HostCmd_CMD_UAP_BSS_STOP:
977 priv->bss_started = 0; 979 priv->bss_started = 0;
978 break; 980 break;
981 case HostCmd_CMD_MEF_CFG:
982 break;
979 default: 983 default:
980 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n", 984 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
981 resp->command); 985 resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 13100f8de3db..311d0b26b81c 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -59,9 +59,6 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
59{ 59{
60 int status; 60 int status;
61 61
62 dev_dbg(adapter->dev, "cmd pending\n");
63 atomic_inc(&adapter->cmd_pending);
64
65 /* Wait for completion */ 62 /* Wait for completion */
66 status = wait_event_interruptible(adapter->cmd_wait_q.wait, 63 status = wait_event_interruptible(adapter->cmd_wait_q.wait,
67 *(cmd_queued->condition)); 64 *(cmd_queued->condition));
@@ -143,12 +140,13 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
143/* 140/*
144 * This function fills bss descriptor structure using provided 141 * This function fills bss descriptor structure using provided
145 * information. 142 * information.
143 * beacon_ie buffer is allocated in this function. It is caller's
144 * responsibility to free the memory.
146 */ 145 */
147int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, 146int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
148 struct cfg80211_bss *bss, 147 struct cfg80211_bss *bss,
149 struct mwifiex_bssdescriptor *bss_desc) 148 struct mwifiex_bssdescriptor *bss_desc)
150{ 149{
151 int ret;
152 u8 *beacon_ie; 150 u8 *beacon_ie;
153 size_t beacon_ie_len; 151 size_t beacon_ie_len;
154 struct mwifiex_bss_priv *bss_priv = (void *)bss->priv; 152 struct mwifiex_bss_priv *bss_priv = (void *)bss->priv;
@@ -168,6 +166,7 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
168 166
169 memcpy(bss_desc->mac_address, bss->bssid, ETH_ALEN); 167 memcpy(bss_desc->mac_address, bss->bssid, ETH_ALEN);
170 bss_desc->rssi = bss->signal; 168 bss_desc->rssi = bss->signal;
169 /* The caller of this function will free beacon_ie */
171 bss_desc->beacon_buf = beacon_ie; 170 bss_desc->beacon_buf = beacon_ie;
172 bss_desc->beacon_buf_size = beacon_ie_len; 171 bss_desc->beacon_buf_size = beacon_ie_len;
173 bss_desc->beacon_period = bss->beacon_interval; 172 bss_desc->beacon_period = bss->beacon_interval;
@@ -185,10 +184,12 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
185 else 184 else
186 bss_desc->bss_mode = NL80211_IFTYPE_STATION; 185 bss_desc->bss_mode = NL80211_IFTYPE_STATION;
187 186
188 ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc); 187 /* Disable 11ac by default. Enable it only where there
188 * exist VHT_CAP IE in AP beacon
189 */
190 bss_desc->disable_11ac = true;
189 191
190 kfree(beacon_ie); 192 return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
191 return ret;
192} 193}
193 194
194static int mwifiex_process_country_ie(struct mwifiex_private *priv, 195static int mwifiex_process_country_ie(struct mwifiex_private *priv,
@@ -352,6 +353,11 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
352 } 353 }
353 354
354done: 355done:
356 /* beacon_ie buffer was allocated in function
357 * mwifiex_fill_new_bss_desc(). Free it now.
358 */
359 if (bss_desc)
360 kfree(bss_desc->beacon_buf);
355 kfree(bss_desc); 361 kfree(bss_desc);
356 return ret; 362 return ret;
357} 363}
@@ -382,7 +388,7 @@ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
382 break; 388 break;
383 } 389 }
384 if (hs_cfg->is_invoke_hostcmd) { 390 if (hs_cfg->is_invoke_hostcmd) {
385 if (hs_cfg->conditions == HOST_SLEEP_CFG_CANCEL) { 391 if (hs_cfg->conditions == HS_CFG_CANCEL) {
386 if (!adapter->is_hs_configured) 392 if (!adapter->is_hs_configured)
387 /* Already cancelled */ 393 /* Already cancelled */
388 break; 394 break;
@@ -397,8 +403,8 @@ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
397 adapter->hs_cfg.gpio = (u8)hs_cfg->gpio; 403 adapter->hs_cfg.gpio = (u8)hs_cfg->gpio;
398 if (hs_cfg->gap) 404 if (hs_cfg->gap)
399 adapter->hs_cfg.gap = (u8)hs_cfg->gap; 405 adapter->hs_cfg.gap = (u8)hs_cfg->gap;
400 } else if (adapter->hs_cfg.conditions 406 } else if (adapter->hs_cfg.conditions ==
401 == cpu_to_le32(HOST_SLEEP_CFG_CANCEL)) { 407 cpu_to_le32(HS_CFG_CANCEL)) {
402 /* Return failure if no parameters for HS 408 /* Return failure if no parameters for HS
403 enable */ 409 enable */
404 status = -1; 410 status = -1;
@@ -414,7 +420,7 @@ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
414 HostCmd_CMD_802_11_HS_CFG_ENH, 420 HostCmd_CMD_802_11_HS_CFG_ENH,
415 HostCmd_ACT_GEN_SET, 0, 421 HostCmd_ACT_GEN_SET, 0,
416 &adapter->hs_cfg); 422 &adapter->hs_cfg);
417 if (hs_cfg->conditions == HOST_SLEEP_CFG_CANCEL) 423 if (hs_cfg->conditions == HS_CFG_CANCEL)
418 /* Restore previous condition */ 424 /* Restore previous condition */
419 adapter->hs_cfg.conditions = 425 adapter->hs_cfg.conditions =
420 cpu_to_le32(prev_cond); 426 cpu_to_le32(prev_cond);
@@ -448,7 +454,7 @@ int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type)
448{ 454{
449 struct mwifiex_ds_hs_cfg hscfg; 455 struct mwifiex_ds_hs_cfg hscfg;
450 456
451 hscfg.conditions = HOST_SLEEP_CFG_CANCEL; 457 hscfg.conditions = HS_CFG_CANCEL;
452 hscfg.is_invoke_hostcmd = true; 458 hscfg.is_invoke_hostcmd = true;
453 459
454 return mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET, 460 return mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 296faec14365..8f923d0d2ba6 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -169,6 +169,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
169 if (!status) { 169 if (!status) {
170 priv->stats.tx_packets++; 170 priv->stats.tx_packets++;
171 priv->stats.tx_bytes += skb->len; 171 priv->stats.tx_bytes += skb->len;
172 if (priv->tx_timeout_cnt)
173 priv->tx_timeout_cnt = 0;
172 } else { 174 } else {
173 priv->stats.tx_errors++; 175 priv->stats.tx_errors++;
174 } 176 }
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 6e76a15a8950..b04b1db29100 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include "main.h" 20#include "main.h"
21#include "11ac.h"
21 22
22/* This function parses security related parameters from cfg80211_ap_settings 23/* This function parses security related parameters from cfg80211_ap_settings
23 * and sets into FW understandable bss_config structure. 24 * and sets into FW understandable bss_config structure.
@@ -177,6 +178,60 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
177 return; 178 return;
178} 179}
179 180
181/* This function updates 11ac related parameters from IE
182 * and sets them into bss_config structure.
183 */
184void mwifiex_set_vht_params(struct mwifiex_private *priv,
185 struct mwifiex_uap_bss_param *bss_cfg,
186 struct cfg80211_ap_settings *params)
187{
188 const u8 *vht_ie;
189
190 vht_ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, params->beacon.tail,
191 params->beacon.tail_len);
192 if (vht_ie) {
193 memcpy(&bss_cfg->vht_cap, vht_ie + 2,
194 sizeof(struct ieee80211_vht_cap));
195 priv->ap_11ac_enabled = 1;
196 } else {
197 priv->ap_11ac_enabled = 0;
198 }
199
200 return;
201}
202
203/* Enable VHT only when cfg80211_ap_settings has VHT IE.
204 * Otherwise disable VHT.
205 */
206void mwifiex_set_vht_width(struct mwifiex_private *priv,
207 enum nl80211_chan_width width,
208 bool ap_11ac_enable)
209{
210 struct mwifiex_adapter *adapter = priv->adapter;
211 struct mwifiex_11ac_vht_cfg vht_cfg;
212
213 vht_cfg.band_config = VHT_CFG_5GHZ;
214 vht_cfg.cap_info = adapter->hw_dot_11ac_dev_cap;
215
216 if (!ap_11ac_enable) {
217 vht_cfg.mcs_tx_set = DISABLE_VHT_MCS_SET;
218 vht_cfg.mcs_rx_set = DISABLE_VHT_MCS_SET;
219 } else {
220 vht_cfg.mcs_tx_set = DEFAULT_VHT_MCS_SET;
221 vht_cfg.mcs_rx_set = DEFAULT_VHT_MCS_SET;
222 }
223
224 vht_cfg.misc_config = VHT_CAP_UAP_ONLY;
225
226 if (ap_11ac_enable && width >= NL80211_CHAN_WIDTH_80)
227 vht_cfg.misc_config |= VHT_BW_80_160_80P80;
228
229 mwifiex_send_cmd_sync(priv, HostCmd_CMD_11AC_CFG,
230 HostCmd_ACT_GEN_SET, 0, &vht_cfg);
231
232 return;
233}
234
180/* This function finds supported rates IE from beacon parameter and sets 235/* This function finds supported rates IE from beacon parameter and sets
181 * these rates into bss_config structure. 236 * these rates into bss_config structure.
182 */ 237 */
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 21553976b550..e57ac0dd3ab5 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -195,7 +195,7 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
195 skb->protocol = eth_type_trans(skb, priv->netdev); 195 skb->protocol = eth_type_trans(skb, priv->netdev);
196 skb->ip_summed = CHECKSUM_NONE; 196 skb->ip_summed = CHECKSUM_NONE;
197 197
198 /* This is required only in case of 11n and USB as we alloc 198 /* This is required only in case of 11n and USB/PCIE as we alloc
199 * a buffer of 4K only if its 11N (to be able to receive 4K 199 * a buffer of 4K only if its 11N (to be able to receive 4K
200 * AMSDU packets). In case of SD we allocate buffers based 200 * AMSDU packets). In case of SD we allocate buffers based
201 * on the size of packet and hence this is not needed. 201 * on the size of packet and hence this is not needed.
@@ -212,7 +212,8 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
212 * fragments. Currently we fail the Filesndl-ht.scr script 212 * fragments. Currently we fail the Filesndl-ht.scr script
213 * for UDP, hence this fix 213 * for UDP, hence this fix
214 */ 214 */
215 if ((priv->adapter->iface_type == MWIFIEX_USB) && 215 if ((priv->adapter->iface_type == MWIFIEX_USB ||
216 priv->adapter->iface_type == MWIFIEX_PCIE) &&
216 (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)) 217 (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
217 skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE); 218 skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
218 219
@@ -238,7 +239,6 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
238int mwifiex_complete_cmd(struct mwifiex_adapter *adapter, 239int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
239 struct cmd_ctrl_node *cmd_node) 240 struct cmd_ctrl_node *cmd_node)
240{ 241{
241 atomic_dec(&adapter->cmd_pending);
242 dev_dbg(adapter->dev, "cmd completed: status=%d\n", 242 dev_dbg(adapter->dev, "cmd completed: status=%d\n",
243 adapter->cmd_wait_q.status); 243 adapter->cmd_wait_q.status);
244 244
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 32adc878041d..4be3d33ceae8 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -191,9 +191,6 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
191 } 191 }
192 list_add_tail(&ra_list->list, 192 list_add_tail(&ra_list->list,
193 &priv->wmm.tid_tbl_ptr[i].ra_list); 193 &priv->wmm.tid_tbl_ptr[i].ra_list);
194
195 if (!priv->wmm.tid_tbl_ptr[i].ra_list_curr)
196 priv->wmm.tid_tbl_ptr[i].ra_list_curr = ra_list;
197 } 194 }
198} 195}
199 196
@@ -424,7 +421,6 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
424 priv->aggr_prio_tbl[i].amsdu = tos_to_tid_inv[i]; 421 priv->aggr_prio_tbl[i].amsdu = tos_to_tid_inv[i];
425 priv->aggr_prio_tbl[i].ampdu_ap = tos_to_tid_inv[i]; 422 priv->aggr_prio_tbl[i].ampdu_ap = tos_to_tid_inv[i];
426 priv->aggr_prio_tbl[i].ampdu_user = tos_to_tid_inv[i]; 423 priv->aggr_prio_tbl[i].ampdu_user = tos_to_tid_inv[i];
427 priv->wmm.tid_tbl_ptr[i].ra_list_curr = NULL;
428 } 424 }
429 425
430 priv->aggr_prio_tbl[6].amsdu 426 priv->aggr_prio_tbl[6].amsdu
@@ -436,10 +432,7 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
436 = priv->aggr_prio_tbl[7].ampdu_user 432 = priv->aggr_prio_tbl[7].ampdu_user
437 = BA_STREAM_NOT_ALLOWED; 433 = BA_STREAM_NOT_ALLOWED;
438 434
439 priv->add_ba_param.timeout = MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT; 435 mwifiex_set_ba_params(priv);
440 priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
441 priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
442
443 mwifiex_reset_11n_rx_seq_num(priv); 436 mwifiex_reset_11n_rx_seq_num(priv);
444 437
445 atomic_set(&priv->wmm.tx_pkts_queued, 0); 438 atomic_set(&priv->wmm.tx_pkts_queued, 0);
@@ -533,8 +526,6 @@ static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
533 } 526 }
534 527
535 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list); 528 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
536
537 priv->wmm.tid_tbl_ptr[i].ra_list_curr = NULL;
538 } 529 }
539} 530}
540 531
@@ -688,13 +679,13 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
688 ra_list->total_pkts_size += skb->len; 679 ra_list->total_pkts_size += skb->len;
689 ra_list->pkt_count++; 680 ra_list->pkt_count++;
690 681
691 atomic_inc(&priv->wmm.tx_pkts_queued);
692
693 if (atomic_read(&priv->wmm.highest_queued_prio) < 682 if (atomic_read(&priv->wmm.highest_queued_prio) <
694 tos_to_tid_inv[tid_down]) 683 tos_to_tid_inv[tid_down])
695 atomic_set(&priv->wmm.highest_queued_prio, 684 atomic_set(&priv->wmm.highest_queued_prio,
696 tos_to_tid_inv[tid_down]); 685 tos_to_tid_inv[tid_down]);
697 686
687 atomic_inc(&priv->wmm.tx_pkts_queued);
688
698 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); 689 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
699} 690}
700 691
@@ -886,128 +877,65 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
886 struct mwifiex_private **priv, int *tid) 877 struct mwifiex_private **priv, int *tid)
887{ 878{
888 struct mwifiex_private *priv_tmp; 879 struct mwifiex_private *priv_tmp;
889 struct mwifiex_ra_list_tbl *ptr, *head; 880 struct mwifiex_ra_list_tbl *ptr;
890 struct mwifiex_bss_prio_node *bssprio_node, *bssprio_head;
891 struct mwifiex_tid_tbl *tid_ptr; 881 struct mwifiex_tid_tbl *tid_ptr;
892 atomic_t *hqp; 882 atomic_t *hqp;
893 int is_list_empty; 883 unsigned long flags_bss, flags_ra;
894 unsigned long flags;
895 int i, j; 884 int i, j;
896 885
886 /* check the BSS with highest priority first */
897 for (j = adapter->priv_num - 1; j >= 0; --j) { 887 for (j = adapter->priv_num - 1; j >= 0; --j) {
898 spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock, 888 spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock,
899 flags); 889 flags_bss);
900 is_list_empty = list_empty(&adapter->bss_prio_tbl[j]
901 .bss_prio_head);
902 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
903 flags);
904 if (is_list_empty)
905 continue;
906 890
907 if (adapter->bss_prio_tbl[j].bss_prio_cur == 891 /* iterate over BSS with the equal priority */
908 (struct mwifiex_bss_prio_node *) 892 list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
909 &adapter->bss_prio_tbl[j].bss_prio_head) { 893 &adapter->bss_prio_tbl[j].bss_prio_head,
910 adapter->bss_prio_tbl[j].bss_prio_cur = 894 list) {
911 list_first_entry(&adapter->bss_prio_tbl[j]
912 .bss_prio_head,
913 struct mwifiex_bss_prio_node,
914 list);
915 }
916 895
917 bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur; 896 priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
918 bssprio_head = bssprio_node;
919 897
920 do { 898 if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)
921 priv_tmp = bssprio_node->priv; 899 continue;
922 hqp = &priv_tmp->wmm.highest_queued_prio;
923 900
901 /* iterate over the WMM queues of the BSS */
902 hqp = &priv_tmp->wmm.highest_queued_prio;
924 for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) { 903 for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
925 904
905 spin_lock_irqsave(&priv_tmp->wmm.
906 ra_list_spinlock, flags_ra);
907
926 tid_ptr = &(priv_tmp)->wmm. 908 tid_ptr = &(priv_tmp)->wmm.
927 tid_tbl_ptr[tos_to_tid[i]]; 909 tid_tbl_ptr[tos_to_tid[i]];
928 910
929 /* For non-STA ra_list_curr may be NULL */ 911 /* iterate over receiver addresses */
930 if (!tid_ptr->ra_list_curr) 912 list_for_each_entry(ptr, &tid_ptr->ra_list,
931 continue; 913 list) {
932
933 spin_lock_irqsave(&tid_ptr->tid_tbl_lock,
934 flags);
935 is_list_empty =
936 list_empty(&adapter->bss_prio_tbl[j]
937 .bss_prio_head);
938 spin_unlock_irqrestore(&tid_ptr->tid_tbl_lock,
939 flags);
940 if (is_list_empty)
941 continue;
942
943 /*
944 * Always choose the next ra we transmitted
945 * last time, this way we pick the ra's in
946 * round robin fashion.
947 */
948 ptr = list_first_entry(
949 &tid_ptr->ra_list_curr->list,
950 struct mwifiex_ra_list_tbl,
951 list);
952
953 head = ptr;
954 if (ptr == (struct mwifiex_ra_list_tbl *)
955 &tid_ptr->ra_list) {
956 /* Get next ra */
957 ptr = list_first_entry(&ptr->list,
958 struct mwifiex_ra_list_tbl, list);
959 head = ptr;
960 }
961
962 do {
963 is_list_empty =
964 skb_queue_empty(&ptr->skb_head);
965 914
966 if (!is_list_empty) 915 if (!skb_queue_empty(&ptr->skb_head))
916 /* holds both locks */
967 goto found; 917 goto found;
918 }
968 919
969 /* Get next ra */ 920 spin_unlock_irqrestore(&priv_tmp->wmm.
970 ptr = list_first_entry(&ptr->list, 921 ra_list_spinlock,
971 struct mwifiex_ra_list_tbl, 922 flags_ra);
972 list);
973 if (ptr ==
974 (struct mwifiex_ra_list_tbl *)
975 &tid_ptr->ra_list)
976 ptr = list_first_entry(
977 &ptr->list,
978 struct mwifiex_ra_list_tbl,
979 list);
980 } while (ptr != head);
981 } 923 }
924 }
982 925
983 /* No packet at any TID for this priv. Mark as such 926 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
984 * to skip checking TIDs for this priv (until pkt is 927 flags_bss);
985 * added).
986 */
987 atomic_set(hqp, NO_PKT_PRIO_TID);
988
989 /* Get next bss priority node */
990 bssprio_node = list_first_entry(&bssprio_node->list,
991 struct mwifiex_bss_prio_node,
992 list);
993
994 if (bssprio_node ==
995 (struct mwifiex_bss_prio_node *)
996 &adapter->bss_prio_tbl[j].bss_prio_head)
997 /* Get next bss priority node */
998 bssprio_node = list_first_entry(
999 &bssprio_node->list,
1000 struct mwifiex_bss_prio_node,
1001 list);
1002 } while (bssprio_node != bssprio_head);
1003 } 928 }
929
1004 return NULL; 930 return NULL;
1005 931
1006found: 932found:
1007 spin_lock_irqsave(&priv_tmp->wmm.ra_list_spinlock, flags); 933 /* holds bss_prio_lock / ra_list_spinlock */
1008 if (atomic_read(hqp) > i) 934 if (atomic_read(hqp) > i)
1009 atomic_set(hqp, i); 935 atomic_set(hqp, i);
1010 spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags); 936 spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra);
937 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
938 flags_bss);
1011 939
1012 *priv = priv_tmp; 940 *priv = priv_tmp;
1013 *tid = tos_to_tid[i]; 941 *tid = tos_to_tid[i];
@@ -1015,6 +943,42 @@ found:
1015 return ptr; 943 return ptr;
1016} 944}
1017 945
946/* This functions rotates ra and bss lists so packets are picked round robin.
947 *
948 * After a packet is successfully transmitted, rotate the ra list, so the ra
949 * next to the one transmitted, will come first in the list. This way we pick
950 * the ra' in a round robin fashion. Same applies to bss nodes of equal
951 * priority.
952 *
953 * Function also increments wmm.packets_out counter.
954 */
955void mwifiex_rotate_priolists(struct mwifiex_private *priv,
956 struct mwifiex_ra_list_tbl *ra,
957 int tid)
958{
959 struct mwifiex_adapter *adapter = priv->adapter;
960 struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
961 struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
962 unsigned long flags;
963
964 spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
965 /*
966 * dirty trick: we remove 'head' temporarily and reinsert it after
967 * curr bss node. imagine list to stay fixed while head is moved
968 */
969 list_move(&tbl[priv->bss_priority].bss_prio_head,
970 &tbl[priv->bss_priority].bss_prio_cur->list);
971 spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags);
972
973 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
974 if (mwifiex_is_ralist_valid(priv, ra, tid)) {
975 priv->wmm.packets_out[tid]++;
976 /* same as above */
977 list_move(&tid_ptr->ra_list, &ra->list);
978 }
979 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
980}
981
1018/* 982/*
1019 * This function checks if 11n aggregation is possible. 983 * This function checks if 11n aggregation is possible.
1020 */ 984 */
@@ -1101,20 +1065,8 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
1101 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 1065 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1102 ra_list_flags); 1066 ra_list_flags);
1103 } else { 1067 } else {
1104 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); 1068 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1105 if (mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1106 priv->wmm.packets_out[ptr_index]++;
1107 priv->wmm.tid_tbl_ptr[ptr_index].ra_list_curr = ptr;
1108 }
1109 adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
1110 list_first_entry(
1111 &adapter->bss_prio_tbl[priv->bss_priority]
1112 .bss_prio_cur->list,
1113 struct mwifiex_bss_prio_node,
1114 list);
1115 atomic_dec(&priv->wmm.tx_pkts_queued); 1069 atomic_dec(&priv->wmm.tx_pkts_queued);
1116 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1117 ra_list_flags);
1118 } 1070 }
1119} 1071}
1120 1072
@@ -1218,20 +1170,8 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
1218 break; 1170 break;
1219 } 1171 }
1220 if (ret != -EBUSY) { 1172 if (ret != -EBUSY) {
1221 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); 1173 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1222 if (mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1223 priv->wmm.packets_out[ptr_index]++;
1224 priv->wmm.tid_tbl_ptr[ptr_index].ra_list_curr = ptr;
1225 }
1226 adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
1227 list_first_entry(
1228 &adapter->bss_prio_tbl[priv->bss_priority]
1229 .bss_prio_cur->list,
1230 struct mwifiex_bss_prio_node,
1231 list);
1232 atomic_dec(&priv->wmm.tx_pkts_queued); 1174 atomic_dec(&priv->wmm.tx_pkts_queued);
1233 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1234 ra_list_flags);
1235 } 1175 }
1236} 1176}
1237 1177
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index b92f39d8963b..644d6e0c51cc 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -85,6 +85,9 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
85void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, 85void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
86 struct sk_buff *skb); 86 struct sk_buff *skb);
87void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra); 87void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
88void mwifiex_rotate_priolists(struct mwifiex_private *priv,
89 struct mwifiex_ra_list_tbl *ra,
90 int tid);
88 91
89int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter); 92int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
90void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter); 93void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 091d9a64080a..6820fce4016b 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -193,10 +193,10 @@ struct mwl8k_priv {
193 struct rxd_ops *rxd_ops; 193 struct rxd_ops *rxd_ops;
194 struct ieee80211_supported_band band_24; 194 struct ieee80211_supported_band band_24;
195 struct ieee80211_channel channels_24[14]; 195 struct ieee80211_channel channels_24[14];
196 struct ieee80211_rate rates_24[14]; 196 struct ieee80211_rate rates_24[13];
197 struct ieee80211_supported_band band_50; 197 struct ieee80211_supported_band band_50;
198 struct ieee80211_channel channels_50[4]; 198 struct ieee80211_channel channels_50[4];
199 struct ieee80211_rate rates_50[9]; 199 struct ieee80211_rate rates_50[8];
200 u32 ap_macids_supported; 200 u32 ap_macids_supported;
201 u32 sta_macids_supported; 201 u32 sta_macids_supported;
202 202
@@ -232,6 +232,7 @@ struct mwl8k_priv {
232 u16 num_mcaddrs; 232 u16 num_mcaddrs;
233 u8 hw_rev; 233 u8 hw_rev;
234 u32 fw_rev; 234 u32 fw_rev;
235 u32 caps;
235 236
236 /* 237 /*
237 * Running count of TX packets in flight, to avoid 238 * Running count of TX packets in flight, to avoid
@@ -284,6 +285,7 @@ struct mwl8k_priv {
284 unsigned fw_state; 285 unsigned fw_state;
285 char *fw_pref; 286 char *fw_pref;
286 char *fw_alt; 287 char *fw_alt;
288 bool is_8764;
287 struct completion firmware_loading_complete; 289 struct completion firmware_loading_complete;
288 290
289 /* bitmap of running BSSes */ 291 /* bitmap of running BSSes */
@@ -364,7 +366,6 @@ static const struct ieee80211_rate mwl8k_rates_24[] = {
364 { .bitrate = 360, .hw_value = 72, }, 366 { .bitrate = 360, .hw_value = 72, },
365 { .bitrate = 480, .hw_value = 96, }, 367 { .bitrate = 480, .hw_value = 96, },
366 { .bitrate = 540, .hw_value = 108, }, 368 { .bitrate = 540, .hw_value = 108, },
367 { .bitrate = 720, .hw_value = 144, },
368}; 369};
369 370
370static const struct ieee80211_channel mwl8k_channels_50[] = { 371static const struct ieee80211_channel mwl8k_channels_50[] = {
@@ -383,7 +384,6 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
383 { .bitrate = 360, .hw_value = 72, }, 384 { .bitrate = 360, .hw_value = 72, },
384 { .bitrate = 480, .hw_value = 96, }, 385 { .bitrate = 480, .hw_value = 96, },
385 { .bitrate = 540, .hw_value = 108, }, 386 { .bitrate = 540, .hw_value = 108, },
386 { .bitrate = 720, .hw_value = 144, },
387}; 387};
388 388
389/* Set or get info from Firmware */ 389/* Set or get info from Firmware */
@@ -600,13 +600,18 @@ mwl8k_send_fw_load_cmd(struct mwl8k_priv *priv, void *data, int length)
600 loops = 1000; 600 loops = 1000;
601 do { 601 do {
602 u32 int_code; 602 u32 int_code;
603 603 if (priv->is_8764) {
604 int_code = ioread32(regs + MWL8K_HIU_INT_CODE); 604 int_code = ioread32(regs +
605 if (int_code == MWL8K_INT_CODE_CMD_FINISHED) { 605 MWL8K_HIU_H2A_INTERRUPT_STATUS);
606 iowrite32(0, regs + MWL8K_HIU_INT_CODE); 606 if (int_code == 0)
607 break; 607 break;
608 } else {
609 int_code = ioread32(regs + MWL8K_HIU_INT_CODE);
610 if (int_code == MWL8K_INT_CODE_CMD_FINISHED) {
611 iowrite32(0, regs + MWL8K_HIU_INT_CODE);
612 break;
613 }
608 } 614 }
609
610 cond_resched(); 615 cond_resched();
611 udelay(1); 616 udelay(1);
612 } while (--loops); 617 } while (--loops);
@@ -724,7 +729,7 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
724 int rc; 729 int rc;
725 int loops; 730 int loops;
726 731
727 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) { 732 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4) && !priv->is_8764) {
728 const struct firmware *helper = priv->fw_helper; 733 const struct firmware *helper = priv->fw_helper;
729 734
730 if (helper == NULL) { 735 if (helper == NULL) {
@@ -743,7 +748,10 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
743 748
744 rc = mwl8k_feed_fw_image(priv, fw->data, fw->size); 749 rc = mwl8k_feed_fw_image(priv, fw->data, fw->size);
745 } else { 750 } else {
746 rc = mwl8k_load_fw_image(priv, fw->data, fw->size); 751 if (priv->is_8764)
752 rc = mwl8k_feed_fw_image(priv, fw->data, fw->size);
753 else
754 rc = mwl8k_load_fw_image(priv, fw->data, fw->size);
747 } 755 }
748 756
749 if (rc) { 757 if (rc) {
@@ -908,9 +916,9 @@ static void mwl8k_encapsulate_tx_frame(struct mwl8k_priv *priv,
908} 916}
909 917
910/* 918/*
911 * Packet reception for 88w8366 AP firmware. 919 * Packet reception for 88w8366/88w8764 AP firmware.
912 */ 920 */
913struct mwl8k_rxd_8366_ap { 921struct mwl8k_rxd_ap {
914 __le16 pkt_len; 922 __le16 pkt_len;
915 __u8 sq2; 923 __u8 sq2;
916 __u8 rate; 924 __u8 rate;
@@ -928,30 +936,30 @@ struct mwl8k_rxd_8366_ap {
928 __u8 rx_ctrl; 936 __u8 rx_ctrl;
929} __packed; 937} __packed;
930 938
931#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80 939#define MWL8K_AP_RATE_INFO_MCS_FORMAT 0x80
932#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40 940#define MWL8K_AP_RATE_INFO_40MHZ 0x40
933#define MWL8K_8366_AP_RATE_INFO_RATEID(x) ((x) & 0x3f) 941#define MWL8K_AP_RATE_INFO_RATEID(x) ((x) & 0x3f)
934 942
935#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80 943#define MWL8K_AP_RX_CTRL_OWNED_BY_HOST 0x80
936 944
937/* 8366 AP rx_status bits */ 945/* 8366/8764 AP rx_status bits */
938#define MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK 0x80 946#define MWL8K_AP_RXSTAT_DECRYPT_ERR_MASK 0x80
939#define MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR 0xFF 947#define MWL8K_AP_RXSTAT_GENERAL_DECRYPT_ERR 0xFF
940#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR 0x02 948#define MWL8K_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR 0x02
941#define MWL8K_8366_AP_RXSTAT_WEP_DECRYPT_ICV_ERR 0x04 949#define MWL8K_AP_RXSTAT_WEP_DECRYPT_ICV_ERR 0x04
942#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR 0x08 950#define MWL8K_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR 0x08
943 951
944static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr) 952static void mwl8k_rxd_ap_init(void *_rxd, dma_addr_t next_dma_addr)
945{ 953{
946 struct mwl8k_rxd_8366_ap *rxd = _rxd; 954 struct mwl8k_rxd_ap *rxd = _rxd;
947 955
948 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr); 956 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
949 rxd->rx_ctrl = MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST; 957 rxd->rx_ctrl = MWL8K_AP_RX_CTRL_OWNED_BY_HOST;
950} 958}
951 959
952static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len) 960static void mwl8k_rxd_ap_refill(void *_rxd, dma_addr_t addr, int len)
953{ 961{
954 struct mwl8k_rxd_8366_ap *rxd = _rxd; 962 struct mwl8k_rxd_ap *rxd = _rxd;
955 963
956 rxd->pkt_len = cpu_to_le16(len); 964 rxd->pkt_len = cpu_to_le16(len);
957 rxd->pkt_phys_addr = cpu_to_le32(addr); 965 rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -960,12 +968,12 @@ static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len)
960} 968}
961 969
962static int 970static int
963mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status, 971mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status,
964 __le16 *qos, s8 *noise) 972 __le16 *qos, s8 *noise)
965{ 973{
966 struct mwl8k_rxd_8366_ap *rxd = _rxd; 974 struct mwl8k_rxd_ap *rxd = _rxd;
967 975
968 if (!(rxd->rx_ctrl & MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST)) 976 if (!(rxd->rx_ctrl & MWL8K_AP_RX_CTRL_OWNED_BY_HOST))
969 return -1; 977 return -1;
970 rmb(); 978 rmb();
971 979
@@ -974,11 +982,11 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
974 status->signal = -rxd->rssi; 982 status->signal = -rxd->rssi;
975 *noise = -rxd->noise_floor; 983 *noise = -rxd->noise_floor;
976 984
977 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) { 985 if (rxd->rate & MWL8K_AP_RATE_INFO_MCS_FORMAT) {
978 status->flag |= RX_FLAG_HT; 986 status->flag |= RX_FLAG_HT;
979 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_40MHZ) 987 if (rxd->rate & MWL8K_AP_RATE_INFO_40MHZ)
980 status->flag |= RX_FLAG_40MHZ; 988 status->flag |= RX_FLAG_40MHZ;
981 status->rate_idx = MWL8K_8366_AP_RATE_INFO_RATEID(rxd->rate); 989 status->rate_idx = MWL8K_AP_RATE_INFO_RATEID(rxd->rate);
982 } else { 990 } else {
983 int i; 991 int i;
984 992
@@ -1002,19 +1010,19 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
1002 1010
1003 *qos = rxd->qos_control; 1011 *qos = rxd->qos_control;
1004 1012
1005 if ((rxd->rx_status != MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR) && 1013 if ((rxd->rx_status != MWL8K_AP_RXSTAT_GENERAL_DECRYPT_ERR) &&
1006 (rxd->rx_status & MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK) && 1014 (rxd->rx_status & MWL8K_AP_RXSTAT_DECRYPT_ERR_MASK) &&
1007 (rxd->rx_status & MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR)) 1015 (rxd->rx_status & MWL8K_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR))
1008 status->flag |= RX_FLAG_MMIC_ERROR; 1016 status->flag |= RX_FLAG_MMIC_ERROR;
1009 1017
1010 return le16_to_cpu(rxd->pkt_len); 1018 return le16_to_cpu(rxd->pkt_len);
1011} 1019}
1012 1020
1013static struct rxd_ops rxd_8366_ap_ops = { 1021static struct rxd_ops rxd_ap_ops = {
1014 .rxd_size = sizeof(struct mwl8k_rxd_8366_ap), 1022 .rxd_size = sizeof(struct mwl8k_rxd_ap),
1015 .rxd_init = mwl8k_rxd_8366_ap_init, 1023 .rxd_init = mwl8k_rxd_ap_init,
1016 .rxd_refill = mwl8k_rxd_8366_ap_refill, 1024 .rxd_refill = mwl8k_rxd_ap_refill,
1017 .rxd_process = mwl8k_rxd_8366_ap_process, 1025 .rxd_process = mwl8k_rxd_ap_process,
1018}; 1026};
1019 1027
1020/* 1028/*
@@ -2401,6 +2409,9 @@ mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps)
2401{ 2409{
2402 struct mwl8k_priv *priv = hw->priv; 2410 struct mwl8k_priv *priv = hw->priv;
2403 2411
2412 if (priv->caps)
2413 return;
2414
2404 if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) { 2415 if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) {
2405 mwl8k_setup_2ghz_band(hw); 2416 mwl8k_setup_2ghz_band(hw);
2406 if (caps & MWL8K_CAP_MIMO) 2417 if (caps & MWL8K_CAP_MIMO)
@@ -2412,6 +2423,8 @@ mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps)
2412 if (caps & MWL8K_CAP_MIMO) 2423 if (caps & MWL8K_CAP_MIMO)
2413 mwl8k_set_ht_caps(hw, &priv->band_50, caps); 2424 mwl8k_set_ht_caps(hw, &priv->band_50, caps);
2414 } 2425 }
2426
2427 priv->caps = caps;
2415} 2428}
2416 2429
2417static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw) 2430static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
@@ -2837,7 +2850,9 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
2837 struct ieee80211_conf *conf, 2850 struct ieee80211_conf *conf,
2838 unsigned short pwr) 2851 unsigned short pwr)
2839{ 2852{
2840 struct ieee80211_channel *channel = conf->channel; 2853 struct ieee80211_channel *channel = conf->chandef.chan;
2854 enum nl80211_channel_type channel_type =
2855 cfg80211_get_chandef_type(&conf->chandef);
2841 struct mwl8k_cmd_tx_power *cmd; 2856 struct mwl8k_cmd_tx_power *cmd;
2842 int rc; 2857 int rc;
2843 int i; 2858 int i;
@@ -2857,14 +2872,14 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
2857 2872
2858 cmd->channel = cpu_to_le16(channel->hw_value); 2873 cmd->channel = cpu_to_le16(channel->hw_value);
2859 2874
2860 if (conf->channel_type == NL80211_CHAN_NO_HT || 2875 if (channel_type == NL80211_CHAN_NO_HT ||
2861 conf->channel_type == NL80211_CHAN_HT20) { 2876 channel_type == NL80211_CHAN_HT20) {
2862 cmd->bw = cpu_to_le16(0x2); 2877 cmd->bw = cpu_to_le16(0x2);
2863 } else { 2878 } else {
2864 cmd->bw = cpu_to_le16(0x4); 2879 cmd->bw = cpu_to_le16(0x4);
2865 if (conf->channel_type == NL80211_CHAN_HT40MINUS) 2880 if (channel_type == NL80211_CHAN_HT40MINUS)
2866 cmd->sub_ch = cpu_to_le16(0x3); 2881 cmd->sub_ch = cpu_to_le16(0x3);
2867 else if (conf->channel_type == NL80211_CHAN_HT40PLUS) 2882 else if (channel_type == NL80211_CHAN_HT40PLUS)
2868 cmd->sub_ch = cpu_to_le16(0x1); 2883 cmd->sub_ch = cpu_to_le16(0x1);
2869 } 2884 }
2870 2885
@@ -3008,7 +3023,9 @@ struct mwl8k_cmd_set_rf_channel {
3008static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw, 3023static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
3009 struct ieee80211_conf *conf) 3024 struct ieee80211_conf *conf)
3010{ 3025{
3011 struct ieee80211_channel *channel = conf->channel; 3026 struct ieee80211_channel *channel = conf->chandef.chan;
3027 enum nl80211_channel_type channel_type =
3028 cfg80211_get_chandef_type(&conf->chandef);
3012 struct mwl8k_cmd_set_rf_channel *cmd; 3029 struct mwl8k_cmd_set_rf_channel *cmd;
3013 int rc; 3030 int rc;
3014 3031
@@ -3026,12 +3043,12 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
3026 else if (channel->band == IEEE80211_BAND_5GHZ) 3043 else if (channel->band == IEEE80211_BAND_5GHZ)
3027 cmd->channel_flags |= cpu_to_le32(0x00000004); 3044 cmd->channel_flags |= cpu_to_le32(0x00000004);
3028 3045
3029 if (conf->channel_type == NL80211_CHAN_NO_HT || 3046 if (channel_type == NL80211_CHAN_NO_HT ||
3030 conf->channel_type == NL80211_CHAN_HT20) 3047 channel_type == NL80211_CHAN_HT20)
3031 cmd->channel_flags |= cpu_to_le32(0x00000080); 3048 cmd->channel_flags |= cpu_to_le32(0x00000080);
3032 else if (conf->channel_type == NL80211_CHAN_HT40MINUS) 3049 else if (channel_type == NL80211_CHAN_HT40MINUS)
3033 cmd->channel_flags |= cpu_to_le32(0x000001900); 3050 cmd->channel_flags |= cpu_to_le32(0x000001900);
3034 else if (conf->channel_type == NL80211_CHAN_HT40PLUS) 3051 else if (channel_type == NL80211_CHAN_HT40PLUS)
3035 cmd->channel_flags |= cpu_to_le32(0x000000900); 3052 cmd->channel_flags |= cpu_to_le32(0x000000900);
3036 3053
3037 rc = mwl8k_post_cmd(hw, &cmd->header); 3054 rc = mwl8k_post_cmd(hw, &cmd->header);
@@ -3064,11 +3081,11 @@ static void legacy_rate_mask_to_array(u8 *rates, u32 mask)
3064 int j; 3081 int j;
3065 3082
3066 /* 3083 /*
3067 * Clear nonstandard rates 4 and 13. 3084 * Clear nonstandard rate 4.
3068 */ 3085 */
3069 mask &= 0x1fef; 3086 mask &= 0x1fef;
3070 3087
3071 for (i = 0, j = 0; i < 14; i++) { 3088 for (i = 0, j = 0; i < 13; i++) {
3072 if (mask & (1 << i)) 3089 if (mask & (1 << i))
3073 rates[j++] = mwl8k_rates_24[i].hw_value; 3090 rates[j++] = mwl8k_rates_24[i].hw_value;
3074 } 3091 }
@@ -3950,7 +3967,7 @@ static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
3950 memcpy(cmd->mac_addr, sta->addr, ETH_ALEN); 3967 memcpy(cmd->mac_addr, sta->addr, ETH_ALEN);
3951 cmd->stn_id = cpu_to_le16(sta->aid); 3968 cmd->stn_id = cpu_to_le16(sta->aid);
3952 cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD); 3969 cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
3953 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) 3970 if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
3954 rates = sta->supp_rates[IEEE80211_BAND_2GHZ]; 3971 rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
3955 else 3972 else
3956 rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5; 3973 rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
@@ -4385,7 +4402,7 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
4385 p->ht_caps = cpu_to_le16(sta->ht_cap.cap); 4402 p->ht_caps = cpu_to_le16(sta->ht_cap.cap);
4386 p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) | 4403 p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
4387 ((sta->ht_cap.ampdu_density & 7) << 2); 4404 ((sta->ht_cap.ampdu_density & 7) << 2);
4388 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) 4405 if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
4389 rates = sta->supp_rates[IEEE80211_BAND_2GHZ]; 4406 rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
4390 else 4407 else
4391 rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5; 4408 rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
@@ -4792,16 +4809,14 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
4792 struct mwl8k_priv *priv = hw->priv; 4809 struct mwl8k_priv *priv = hw->priv;
4793 int rc; 4810 int rc;
4794 4811
4795 if (conf->flags & IEEE80211_CONF_IDLE) {
4796 mwl8k_cmd_radio_disable(hw);
4797 return 0;
4798 }
4799
4800 rc = mwl8k_fw_lock(hw); 4812 rc = mwl8k_fw_lock(hw);
4801 if (rc) 4813 if (rc)
4802 return rc; 4814 return rc;
4803 4815
4804 rc = mwl8k_cmd_radio_enable(hw); 4816 if (conf->flags & IEEE80211_CONF_IDLE)
4817 rc = mwl8k_cmd_radio_disable(hw);
4818 else
4819 rc = mwl8k_cmd_radio_enable(hw);
4805 if (rc) 4820 if (rc)
4806 goto out; 4821 goto out;
4807 4822
@@ -4868,7 +4883,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4868 goto out; 4883 goto out;
4869 } 4884 }
4870 4885
4871 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) { 4886 if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
4872 ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ]; 4887 ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ];
4873 } else { 4888 } else {
4874 ap_legacy_rates = 4889 ap_legacy_rates =
@@ -4900,7 +4915,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4900 if (idx) 4915 if (idx)
4901 idx--; 4916 idx--;
4902 4917
4903 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) 4918 if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
4904 rate = mwl8k_rates_24[idx].hw_value; 4919 rate = mwl8k_rates_24[idx].hw_value;
4905 else 4920 else
4906 rate = mwl8k_rates_50[idx].hw_value; 4921 rate = mwl8k_rates_50[idx].hw_value;
@@ -4973,7 +4988,7 @@ mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4973 if (idx) 4988 if (idx)
4974 idx--; 4989 idx--;
4975 4990
4976 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) 4991 if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
4977 rate = mwl8k_rates_24[idx].hw_value; 4992 rate = mwl8k_rates_24[idx].hw_value;
4978 else 4993 else
4979 rate = mwl8k_rates_50[idx].hw_value; 4994 rate = mwl8k_rates_50[idx].hw_value;
@@ -5246,7 +5261,7 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
5246 if (idx != 0) 5261 if (idx != 0)
5247 return -ENOENT; 5262 return -ENOENT;
5248 5263
5249 survey->channel = conf->channel; 5264 survey->channel = conf->chandef.chan;
5250 survey->filled = SURVEY_INFO_NOISE_DBM; 5265 survey->filled = SURVEY_INFO_NOISE_DBM;
5251 survey->noise = priv->noise; 5266 survey->noise = priv->noise;
5252 5267
@@ -5429,12 +5444,17 @@ enum {
5429 MWL8363 = 0, 5444 MWL8363 = 0,
5430 MWL8687, 5445 MWL8687,
5431 MWL8366, 5446 MWL8366,
5447 MWL8764,
5432}; 5448};
5433 5449
5434#define MWL8K_8366_AP_FW_API 3 5450#define MWL8K_8366_AP_FW_API 3
5435#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw" 5451#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw"
5436#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api) 5452#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api)
5437 5453
5454#define MWL8K_8764_AP_FW_API 1
5455#define _MWL8K_8764_AP_FW(api) "mwl8k/fmimage_8764_ap-" #api ".fw"
5456#define MWL8K_8764_AP_FW(api) _MWL8K_8764_AP_FW(api)
5457
5438static struct mwl8k_device_info mwl8k_info_tbl[] = { 5458static struct mwl8k_device_info mwl8k_info_tbl[] = {
5439 [MWL8363] = { 5459 [MWL8363] = {
5440 .part_name = "88w8363", 5460 .part_name = "88w8363",
@@ -5452,7 +5472,13 @@ static struct mwl8k_device_info mwl8k_info_tbl[] = {
5452 .fw_image_sta = "mwl8k/fmimage_8366.fw", 5472 .fw_image_sta = "mwl8k/fmimage_8366.fw",
5453 .fw_image_ap = MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API), 5473 .fw_image_ap = MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API),
5454 .fw_api_ap = MWL8K_8366_AP_FW_API, 5474 .fw_api_ap = MWL8K_8366_AP_FW_API,
5455 .ap_rxd_ops = &rxd_8366_ap_ops, 5475 .ap_rxd_ops = &rxd_ap_ops,
5476 },
5477 [MWL8764] = {
5478 .part_name = "88w8764",
5479 .fw_image_ap = MWL8K_8764_AP_FW(MWL8K_8764_AP_FW_API),
5480 .fw_api_ap = MWL8K_8764_AP_FW_API,
5481 .ap_rxd_ops = &rxd_ap_ops,
5456 }, 5482 },
5457}; 5483};
5458 5484
@@ -5474,6 +5500,7 @@ static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
5474 { PCI_VDEVICE(MARVELL, 0x2a41), .driver_data = MWL8366, }, 5500 { PCI_VDEVICE(MARVELL, 0x2a41), .driver_data = MWL8366, },
5475 { PCI_VDEVICE(MARVELL, 0x2a42), .driver_data = MWL8366, }, 5501 { PCI_VDEVICE(MARVELL, 0x2a42), .driver_data = MWL8366, },
5476 { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, }, 5502 { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, },
5503 { PCI_VDEVICE(MARVELL, 0x2b36), .driver_data = MWL8764, },
5477 { }, 5504 { },
5478}; 5505};
5479MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table); 5506MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
@@ -5995,6 +6022,8 @@ static int mwl8k_probe(struct pci_dev *pdev,
5995 priv->pdev = pdev; 6022 priv->pdev = pdev;
5996 priv->device_info = &mwl8k_info_tbl[id->driver_data]; 6023 priv->device_info = &mwl8k_info_tbl[id->driver_data];
5997 6024
6025 if (id->driver_data == MWL8764)
6026 priv->is_8764 = true;
5998 6027
5999 priv->sram = pci_iomap(pdev, 0, 0x10000); 6028 priv->sram = pci_iomap(pdev, 0, 0x10000);
6000 if (priv->sram == NULL) { 6029 if (priv->sram == NULL) {
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 7744f42de1ea..1f9cb55c3360 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1584,7 +1584,7 @@ static int ezusb_probe(struct usb_interface *interface,
1584 struct ezusb_priv *upriv = NULL; 1584 struct ezusb_priv *upriv = NULL;
1585 struct usb_interface_descriptor *iface_desc; 1585 struct usb_interface_descriptor *iface_desc;
1586 struct usb_endpoint_descriptor *ep; 1586 struct usb_endpoint_descriptor *ep;
1587 const struct firmware *fw_entry; 1587 const struct firmware *fw_entry = NULL;
1588 int retval = 0; 1588 int retval = 0;
1589 int i; 1589 int i;
1590 1590
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 9ba85106eec0..b3879fbf5368 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -402,7 +402,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
402 struct p54_rssi_db_entry *rssi_data; 402 struct p54_rssi_db_entry *rssi_data;
403 unsigned int i; 403 unsigned int i;
404 void *entry; 404 void *entry;
405 __le16 freq = cpu_to_le16(priv->hw->conf.channel->center_freq); 405 __le16 freq = cpu_to_le16(priv->hw->conf.chandef.chan->center_freq);
406 406
407 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) + 407 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) +
408 2 + sizeof(*iq_autocal) + sizeof(*body) + 408 2 + sizeof(*iq_autocal) + sizeof(*body) +
@@ -532,7 +532,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
532err: 532err:
533 wiphy_err(priv->hw->wiphy, "frequency change to channel %d failed.\n", 533 wiphy_err(priv->hw->wiphy, "frequency change to channel %d failed.\n",
534 ieee80211_frequency_to_channel( 534 ieee80211_frequency_to_channel(
535 priv->hw->conf.channel->center_freq)); 535 priv->hw->conf.chandef.chan->center_freq));
536 536
537 dev_kfree_skb_any(skb); 537 dev_kfree_skb_any(skb);
538 return -EINVAL; 538 return -EINVAL;
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index aadda99989c0..067e6f2fd050 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -340,7 +340,7 @@ static int p54_config(struct ieee80211_hw *dev, u32 changed)
340 * TODO: Use the LM_SCAN_TRAP to determine the current 340 * TODO: Use the LM_SCAN_TRAP to determine the current
341 * operating channel. 341 * operating channel.
342 */ 342 */
343 priv->curchan = priv->hw->conf.channel; 343 priv->curchan = priv->hw->conf.chandef.chan;
344 p54_reset_stats(priv); 344 p54_reset_stats(priv);
345 WARN_ON(p54_fetch_statistics(priv)); 345 WARN_ON(p54_fetch_statistics(priv));
346 } 346 }
@@ -480,7 +480,7 @@ static void p54_bss_info_changed(struct ieee80211_hw *dev,
480 p54_set_edcf(priv); 480 p54_set_edcf(priv);
481 } 481 }
482 if (changed & BSS_CHANGED_BASIC_RATES) { 482 if (changed & BSS_CHANGED_BASIC_RATES) {
483 if (dev->conf.channel->band == IEEE80211_BAND_5GHZ) 483 if (dev->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
484 priv->basic_rate_mask = (info->basic_rates << 4); 484 priv->basic_rate_mask = (info->basic_rates << 4);
485 else 485 else
486 priv->basic_rate_mask = info->basic_rates; 486 priv->basic_rate_mask = info->basic_rates;
@@ -670,7 +670,7 @@ static unsigned int p54_flush_count(struct p54_common *priv)
670 return total; 670 return total;
671} 671}
672 672
673static void p54_flush(struct ieee80211_hw *dev, bool drop) 673static void p54_flush(struct ieee80211_hw *dev, u32 queues, bool drop)
674{ 674{
675 struct p54_common *priv = dev->priv; 675 struct p54_common *priv = dev->priv;
676 unsigned int total, i; 676 unsigned int total, i;
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 4fd49a007b51..978e7eb26567 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -396,7 +396,7 @@ static int p54spi_rx(struct p54s_priv *priv)
396static irqreturn_t p54spi_interrupt(int irq, void *config) 396static irqreturn_t p54spi_interrupt(int irq, void *config)
397{ 397{
398 struct spi_device *spi = config; 398 struct spi_device *spi = config;
399 struct p54s_priv *priv = dev_get_drvdata(&spi->dev); 399 struct p54s_priv *priv = spi_get_drvdata(spi);
400 400
401 ieee80211_queue_work(priv->hw, &priv->work); 401 ieee80211_queue_work(priv->hw, &priv->work);
402 402
@@ -609,7 +609,7 @@ static int p54spi_probe(struct spi_device *spi)
609 609
610 priv = hw->priv; 610 priv = hw->priv;
611 priv->hw = hw; 611 priv->hw = hw;
612 dev_set_drvdata(&spi->dev, priv); 612 spi_set_drvdata(spi, priv);
613 priv->spi = spi; 613 priv->spi = spi;
614 614
615 spi->bits_per_word = 16; 615 spi->bits_per_word = 16;
@@ -685,7 +685,7 @@ err_free:
685 685
686static int p54spi_remove(struct spi_device *spi) 686static int p54spi_remove(struct spi_device *spi)
687{ 687{
688 struct p54s_priv *priv = dev_get_drvdata(&spi->dev); 688 struct p54s_priv *priv = spi_get_drvdata(spi);
689 689
690 p54_unregister_common(priv->hw); 690 p54_unregister_common(priv->hw);
691 691
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 12f0a34477f2..f95de0d16216 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -354,13 +354,13 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
354 rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi); 354 rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
355 if (hdr->rate & 0x10) 355 if (hdr->rate & 0x10)
356 rx_status->flag |= RX_FLAG_SHORTPRE; 356 rx_status->flag |= RX_FLAG_SHORTPRE;
357 if (priv->hw->conf.channel->band == IEEE80211_BAND_5GHZ) 357 if (priv->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
358 rx_status->rate_idx = (rate < 4) ? 0 : rate - 4; 358 rx_status->rate_idx = (rate < 4) ? 0 : rate - 4;
359 else 359 else
360 rx_status->rate_idx = rate; 360 rx_status->rate_idx = rate;
361 361
362 rx_status->freq = freq; 362 rx_status->freq = freq;
363 rx_status->band = priv->hw->conf.channel->band; 363 rx_status->band = priv->hw->conf.chandef.chan->band;
364 rx_status->antenna = hdr->antenna; 364 rx_status->antenna = hdr->antenna;
365 365
366 tsf32 = le32_to_cpu(hdr->tsf32); 366 tsf32 = le32_to_cpu(hdr->tsf32);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 3109c0db66e1..ebada812b3a5 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -144,7 +144,7 @@ static int psm;
144static char *essid; 144static char *essid;
145 145
146/* Default to encapsulation unless translation requested */ 146/* Default to encapsulation unless translation requested */
147static int translate = 1; 147static bool translate = 1;
148 148
149static int country = USA; 149static int country = USA;
150 150
@@ -178,7 +178,7 @@ module_param(hop_dwell, int, 0);
178module_param(beacon_period, int, 0); 178module_param(beacon_period, int, 0);
179module_param(psm, int, 0); 179module_param(psm, int, 0);
180module_param(essid, charp, 0); 180module_param(essid, charp, 0);
181module_param(translate, int, 0); 181module_param(translate, bool, 0);
182module_param(country, int, 0); 182module_param(country, int, 0);
183module_param(sniffer, int, 0); 183module_param(sniffer, int, 0);
184module_param(bc, int, 0); 184module_param(bc, int, 0);
@@ -953,7 +953,7 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
953 unsigned char *data, int len) 953 unsigned char *data, int len)
954{ 954{
955 __be16 proto = ((struct ethhdr *)data)->h_proto; 955 __be16 proto = ((struct ethhdr *)data)->h_proto;
956 if (ntohs(proto) >= 1536) { /* DIX II ethernet frame */ 956 if (ntohs(proto) >= ETH_P_802_3_MIN) { /* DIX II ethernet frame */
957 pr_debug("ray_cs translate_frame DIX II\n"); 957 pr_debug("ray_cs translate_frame DIX II\n");
958 /* Copy LLC header to card buffer */ 958 /* Copy LLC header to card buffer */
959 memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc)); 959 memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc));
@@ -1353,7 +1353,7 @@ static int ray_get_range(struct net_device *dev, struct iw_request_info *info,
1353static int ray_set_framing(struct net_device *dev, struct iw_request_info *info, 1353static int ray_set_framing(struct net_device *dev, struct iw_request_info *info,
1354 union iwreq_data *wrqu, char *extra) 1354 union iwreq_data *wrqu, char *extra)
1355{ 1355{
1356 translate = *(extra); /* Set framing mode */ 1356 translate = !!*(extra); /* Set framing mode */
1357 1357
1358 return 0; 1358 return 0;
1359} 1359}
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 525fd7521dff..8169a85c4498 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2,7 +2,7 @@
2 * Driver for RNDIS based wireless USB devices. 2 * Driver for RNDIS based wireless USB devices.
3 * 3 *
4 * Copyright (C) 2007 by Bjorge Dijkstra <bjd@jooz.net> 4 * Copyright (C) 2007 by Bjorge Dijkstra <bjd@jooz.net>
5 * Copyright (C) 2008-2009 by Jussi Kivilinna <jussi.kivilinna@mbnet.fi> 5 * Copyright (C) 2008-2009 by Jussi Kivilinna <jussi.kivilinna@iki.fi>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -2839,8 +2839,7 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2839 } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) 2839 } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
2840 cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL); 2840 cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
2841 2841
2842 if (info != NULL) 2842 kfree(info);
2843 kfree(info);
2844 2843
2845 priv->connected = true; 2844 priv->connected = true;
2846 memcpy(priv->bssid, bssid, ETH_ALEN); 2845 memcpy(priv->bssid, bssid, ETH_ALEN);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 76cd47eb901e..9b915d3a44be 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -173,6 +173,13 @@ config RT2800USB_RT53XX
173 rt2800usb driver. 173 rt2800usb driver.
174 Supported chips: RT5370 174 Supported chips: RT5370
175 175
176config RT2800USB_RT55XX
177 bool "rt2800usb - Include support for rt55xx devices (EXPERIMENTAL)"
178 ---help---
179 This adds support for rt55xx wireless chipset family to the
180 rt2800usb driver.
181 Supported chips: RT5572
182
176config RT2800USB_UNKNOWN 183config RT2800USB_UNKNOWN
177 bool "rt2800usb - Include support for unknown (USB) devices" 184 bool "rt2800usb - Include support for unknown (USB) devices"
178 default n 185 default n
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index dcfb54e0c516..f7143733d7e9 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -41,7 +41,7 @@
41/* 41/*
42 * Register access. 42 * Register access.
43 * All access to the CSR registers will go through the methods 43 * All access to the CSR registers will go through the methods
44 * rt2x00pci_register_read and rt2x00pci_register_write. 44 * rt2x00mmio_register_read and rt2x00mmio_register_write.
45 * BBP and RF register require indirect register access, 45 * BBP and RF register require indirect register access,
46 * and use the CSR registers BBPCSR and RFCSR to achieve this. 46 * and use the CSR registers BBPCSR and RFCSR to achieve this.
47 * These indirect registers work with busy bits, 47 * These indirect registers work with busy bits,
@@ -52,9 +52,9 @@
52 * and we will print an error. 52 * and we will print an error.
53 */ 53 */
54#define WAIT_FOR_BBP(__dev, __reg) \ 54#define WAIT_FOR_BBP(__dev, __reg) \
55 rt2x00pci_regbusy_read((__dev), BBPCSR, BBPCSR_BUSY, (__reg)) 55 rt2x00mmio_regbusy_read((__dev), BBPCSR, BBPCSR_BUSY, (__reg))
56#define WAIT_FOR_RF(__dev, __reg) \ 56#define WAIT_FOR_RF(__dev, __reg) \
57 rt2x00pci_regbusy_read((__dev), RFCSR, RFCSR_BUSY, (__reg)) 57 rt2x00mmio_regbusy_read((__dev), RFCSR, RFCSR_BUSY, (__reg))
58 58
59static void rt2400pci_bbp_write(struct rt2x00_dev *rt2x00dev, 59static void rt2400pci_bbp_write(struct rt2x00_dev *rt2x00dev,
60 const unsigned int word, const u8 value) 60 const unsigned int word, const u8 value)
@@ -74,7 +74,7 @@ static void rt2400pci_bbp_write(struct rt2x00_dev *rt2x00dev,
74 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1); 74 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1);
75 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 1); 75 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 1);
76 76
77 rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); 77 rt2x00mmio_register_write(rt2x00dev, BBPCSR, reg);
78 } 78 }
79 79
80 mutex_unlock(&rt2x00dev->csr_mutex); 80 mutex_unlock(&rt2x00dev->csr_mutex);
@@ -101,7 +101,7 @@ static void rt2400pci_bbp_read(struct rt2x00_dev *rt2x00dev,
101 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1); 101 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1);
102 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 0); 102 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 0);
103 103
104 rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); 104 rt2x00mmio_register_write(rt2x00dev, BBPCSR, reg);
105 105
106 WAIT_FOR_BBP(rt2x00dev, &reg); 106 WAIT_FOR_BBP(rt2x00dev, &reg);
107 } 107 }
@@ -129,7 +129,7 @@ static void rt2400pci_rf_write(struct rt2x00_dev *rt2x00dev,
129 rt2x00_set_field32(&reg, RFCSR_IF_SELECT, 0); 129 rt2x00_set_field32(&reg, RFCSR_IF_SELECT, 0);
130 rt2x00_set_field32(&reg, RFCSR_BUSY, 1); 130 rt2x00_set_field32(&reg, RFCSR_BUSY, 1);
131 131
132 rt2x00pci_register_write(rt2x00dev, RFCSR, reg); 132 rt2x00mmio_register_write(rt2x00dev, RFCSR, reg);
133 rt2x00_rf_write(rt2x00dev, word, value); 133 rt2x00_rf_write(rt2x00dev, word, value);
134 } 134 }
135 135
@@ -141,7 +141,7 @@ static void rt2400pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
141 struct rt2x00_dev *rt2x00dev = eeprom->data; 141 struct rt2x00_dev *rt2x00dev = eeprom->data;
142 u32 reg; 142 u32 reg;
143 143
144 rt2x00pci_register_read(rt2x00dev, CSR21, &reg); 144 rt2x00mmio_register_read(rt2x00dev, CSR21, &reg);
145 145
146 eeprom->reg_data_in = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_IN); 146 eeprom->reg_data_in = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_IN);
147 eeprom->reg_data_out = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_OUT); 147 eeprom->reg_data_out = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_OUT);
@@ -163,15 +163,15 @@ static void rt2400pci_eepromregister_write(struct eeprom_93cx6 *eeprom)
163 rt2x00_set_field32(&reg, CSR21_EEPROM_CHIP_SELECT, 163 rt2x00_set_field32(&reg, CSR21_EEPROM_CHIP_SELECT,
164 !!eeprom->reg_chip_select); 164 !!eeprom->reg_chip_select);
165 165
166 rt2x00pci_register_write(rt2x00dev, CSR21, reg); 166 rt2x00mmio_register_write(rt2x00dev, CSR21, reg);
167} 167}
168 168
169#ifdef CONFIG_RT2X00_LIB_DEBUGFS 169#ifdef CONFIG_RT2X00_LIB_DEBUGFS
170static const struct rt2x00debug rt2400pci_rt2x00debug = { 170static const struct rt2x00debug rt2400pci_rt2x00debug = {
171 .owner = THIS_MODULE, 171 .owner = THIS_MODULE,
172 .csr = { 172 .csr = {
173 .read = rt2x00pci_register_read, 173 .read = rt2x00mmio_register_read,
174 .write = rt2x00pci_register_write, 174 .write = rt2x00mmio_register_write,
175 .flags = RT2X00DEBUGFS_OFFSET, 175 .flags = RT2X00DEBUGFS_OFFSET,
176 .word_base = CSR_REG_BASE, 176 .word_base = CSR_REG_BASE,
177 .word_size = sizeof(u32), 177 .word_size = sizeof(u32),
@@ -205,7 +205,7 @@ static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
205{ 205{
206 u32 reg; 206 u32 reg;
207 207
208 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg); 208 rt2x00mmio_register_read(rt2x00dev, GPIOCSR, &reg);
209 return rt2x00_get_field32(reg, GPIOCSR_VAL0); 209 return rt2x00_get_field32(reg, GPIOCSR_VAL0);
210} 210}
211 211
@@ -218,14 +218,14 @@ static void rt2400pci_brightness_set(struct led_classdev *led_cdev,
218 unsigned int enabled = brightness != LED_OFF; 218 unsigned int enabled = brightness != LED_OFF;
219 u32 reg; 219 u32 reg;
220 220
221 rt2x00pci_register_read(led->rt2x00dev, LEDCSR, &reg); 221 rt2x00mmio_register_read(led->rt2x00dev, LEDCSR, &reg);
222 222
223 if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC) 223 if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC)
224 rt2x00_set_field32(&reg, LEDCSR_LINK, enabled); 224 rt2x00_set_field32(&reg, LEDCSR_LINK, enabled);
225 else if (led->type == LED_TYPE_ACTIVITY) 225 else if (led->type == LED_TYPE_ACTIVITY)
226 rt2x00_set_field32(&reg, LEDCSR_ACTIVITY, enabled); 226 rt2x00_set_field32(&reg, LEDCSR_ACTIVITY, enabled);
227 227
228 rt2x00pci_register_write(led->rt2x00dev, LEDCSR, reg); 228 rt2x00mmio_register_write(led->rt2x00dev, LEDCSR, reg);
229} 229}
230 230
231static int rt2400pci_blink_set(struct led_classdev *led_cdev, 231static int rt2400pci_blink_set(struct led_classdev *led_cdev,
@@ -236,10 +236,10 @@ static int rt2400pci_blink_set(struct led_classdev *led_cdev,
236 container_of(led_cdev, struct rt2x00_led, led_dev); 236 container_of(led_cdev, struct rt2x00_led, led_dev);
237 u32 reg; 237 u32 reg;
238 238
239 rt2x00pci_register_read(led->rt2x00dev, LEDCSR, &reg); 239 rt2x00mmio_register_read(led->rt2x00dev, LEDCSR, &reg);
240 rt2x00_set_field32(&reg, LEDCSR_ON_PERIOD, *delay_on); 240 rt2x00_set_field32(&reg, LEDCSR_ON_PERIOD, *delay_on);
241 rt2x00_set_field32(&reg, LEDCSR_OFF_PERIOD, *delay_off); 241 rt2x00_set_field32(&reg, LEDCSR_OFF_PERIOD, *delay_off);
242 rt2x00pci_register_write(led->rt2x00dev, LEDCSR, reg); 242 rt2x00mmio_register_write(led->rt2x00dev, LEDCSR, reg);
243 243
244 return 0; 244 return 0;
245} 245}
@@ -269,7 +269,7 @@ static void rt2400pci_config_filter(struct rt2x00_dev *rt2x00dev,
269 * Note that the version error will always be dropped 269 * Note that the version error will always be dropped
270 * since there is no filter for it at this time. 270 * since there is no filter for it at this time.
271 */ 271 */
272 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg); 272 rt2x00mmio_register_read(rt2x00dev, RXCSR0, &reg);
273 rt2x00_set_field32(&reg, RXCSR0_DROP_CRC, 273 rt2x00_set_field32(&reg, RXCSR0_DROP_CRC,
274 !(filter_flags & FIF_FCSFAIL)); 274 !(filter_flags & FIF_FCSFAIL));
275 rt2x00_set_field32(&reg, RXCSR0_DROP_PHYSICAL, 275 rt2x00_set_field32(&reg, RXCSR0_DROP_PHYSICAL,
@@ -282,7 +282,7 @@ static void rt2400pci_config_filter(struct rt2x00_dev *rt2x00dev,
282 !(filter_flags & FIF_PROMISC_IN_BSS) && 282 !(filter_flags & FIF_PROMISC_IN_BSS) &&
283 !rt2x00dev->intf_ap_count); 283 !rt2x00dev->intf_ap_count);
284 rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1); 284 rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
285 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); 285 rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
286} 286}
287 287
288static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev, 288static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev,
@@ -298,25 +298,26 @@ static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev,
298 * Enable beacon config 298 * Enable beacon config
299 */ 299 */
300 bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20); 300 bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20);
301 rt2x00pci_register_read(rt2x00dev, BCNCSR1, &reg); 301 rt2x00mmio_register_read(rt2x00dev, BCNCSR1, &reg);
302 rt2x00_set_field32(&reg, BCNCSR1_PRELOAD, bcn_preload); 302 rt2x00_set_field32(&reg, BCNCSR1_PRELOAD, bcn_preload);
303 rt2x00pci_register_write(rt2x00dev, BCNCSR1, reg); 303 rt2x00mmio_register_write(rt2x00dev, BCNCSR1, reg);
304 304
305 /* 305 /*
306 * Enable synchronisation. 306 * Enable synchronisation.
307 */ 307 */
308 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 308 rt2x00mmio_register_read(rt2x00dev, CSR14, &reg);
309 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync); 309 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
310 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 310 rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
311 } 311 }
312 312
313 if (flags & CONFIG_UPDATE_MAC) 313 if (flags & CONFIG_UPDATE_MAC)
314 rt2x00pci_register_multiwrite(rt2x00dev, CSR3, 314 rt2x00mmio_register_multiwrite(rt2x00dev, CSR3,
315 conf->mac, sizeof(conf->mac)); 315 conf->mac, sizeof(conf->mac));
316 316
317 if (flags & CONFIG_UPDATE_BSSID) 317 if (flags & CONFIG_UPDATE_BSSID)
318 rt2x00pci_register_multiwrite(rt2x00dev, CSR5, 318 rt2x00mmio_register_multiwrite(rt2x00dev, CSR5,
319 conf->bssid, sizeof(conf->bssid)); 319 conf->bssid,
320 sizeof(conf->bssid));
320} 321}
321 322
322static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev, 323static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev,
@@ -332,68 +333,68 @@ static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev,
332 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 333 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
333 preamble_mask = erp->short_preamble << 3; 334 preamble_mask = erp->short_preamble << 3;
334 335
335 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg); 336 rt2x00mmio_register_read(rt2x00dev, TXCSR1, &reg);
336 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x1ff); 337 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x1ff);
337 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0x13a); 338 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0x13a);
338 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); 339 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
339 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1); 340 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
340 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); 341 rt2x00mmio_register_write(rt2x00dev, TXCSR1, reg);
341 342
342 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg); 343 rt2x00mmio_register_read(rt2x00dev, ARCSR2, &reg);
343 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00); 344 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00);
344 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04); 345 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04);
345 rt2x00_set_field32(&reg, ARCSR2_LENGTH, 346 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
346 GET_DURATION(ACK_SIZE, 10)); 347 GET_DURATION(ACK_SIZE, 10));
347 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); 348 rt2x00mmio_register_write(rt2x00dev, ARCSR2, reg);
348 349
349 rt2x00pci_register_read(rt2x00dev, ARCSR3, &reg); 350 rt2x00mmio_register_read(rt2x00dev, ARCSR3, &reg);
350 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask); 351 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask);
351 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04); 352 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04);
352 rt2x00_set_field32(&reg, ARCSR2_LENGTH, 353 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
353 GET_DURATION(ACK_SIZE, 20)); 354 GET_DURATION(ACK_SIZE, 20));
354 rt2x00pci_register_write(rt2x00dev, ARCSR3, reg); 355 rt2x00mmio_register_write(rt2x00dev, ARCSR3, reg);
355 356
356 rt2x00pci_register_read(rt2x00dev, ARCSR4, &reg); 357 rt2x00mmio_register_read(rt2x00dev, ARCSR4, &reg);
357 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask); 358 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask);
358 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04); 359 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04);
359 rt2x00_set_field32(&reg, ARCSR2_LENGTH, 360 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
360 GET_DURATION(ACK_SIZE, 55)); 361 GET_DURATION(ACK_SIZE, 55));
361 rt2x00pci_register_write(rt2x00dev, ARCSR4, reg); 362 rt2x00mmio_register_write(rt2x00dev, ARCSR4, reg);
362 363
363 rt2x00pci_register_read(rt2x00dev, ARCSR5, &reg); 364 rt2x00mmio_register_read(rt2x00dev, ARCSR5, &reg);
364 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask); 365 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask);
365 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84); 366 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84);
366 rt2x00_set_field32(&reg, ARCSR2_LENGTH, 367 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
367 GET_DURATION(ACK_SIZE, 110)); 368 GET_DURATION(ACK_SIZE, 110));
368 rt2x00pci_register_write(rt2x00dev, ARCSR5, reg); 369 rt2x00mmio_register_write(rt2x00dev, ARCSR5, reg);
369 } 370 }
370 371
371 if (changed & BSS_CHANGED_BASIC_RATES) 372 if (changed & BSS_CHANGED_BASIC_RATES)
372 rt2x00pci_register_write(rt2x00dev, ARCSR1, erp->basic_rates); 373 rt2x00mmio_register_write(rt2x00dev, ARCSR1, erp->basic_rates);
373 374
374 if (changed & BSS_CHANGED_ERP_SLOT) { 375 if (changed & BSS_CHANGED_ERP_SLOT) {
375 rt2x00pci_register_read(rt2x00dev, CSR11, &reg); 376 rt2x00mmio_register_read(rt2x00dev, CSR11, &reg);
376 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time); 377 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time);
377 rt2x00pci_register_write(rt2x00dev, CSR11, reg); 378 rt2x00mmio_register_write(rt2x00dev, CSR11, reg);
378 379
379 rt2x00pci_register_read(rt2x00dev, CSR18, &reg); 380 rt2x00mmio_register_read(rt2x00dev, CSR18, &reg);
380 rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs); 381 rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs);
381 rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs); 382 rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs);
382 rt2x00pci_register_write(rt2x00dev, CSR18, reg); 383 rt2x00mmio_register_write(rt2x00dev, CSR18, reg);
383 384
384 rt2x00pci_register_read(rt2x00dev, CSR19, &reg); 385 rt2x00mmio_register_read(rt2x00dev, CSR19, &reg);
385 rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs); 386 rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs);
386 rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs); 387 rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs);
387 rt2x00pci_register_write(rt2x00dev, CSR19, reg); 388 rt2x00mmio_register_write(rt2x00dev, CSR19, reg);
388 } 389 }
389 390
390 if (changed & BSS_CHANGED_BEACON_INT) { 391 if (changed & BSS_CHANGED_BEACON_INT) {
391 rt2x00pci_register_read(rt2x00dev, CSR12, &reg); 392 rt2x00mmio_register_read(rt2x00dev, CSR12, &reg);
392 rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL, 393 rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL,
393 erp->beacon_int * 16); 394 erp->beacon_int * 16);
394 rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION, 395 rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION,
395 erp->beacon_int * 16); 396 erp->beacon_int * 16);
396 rt2x00pci_register_write(rt2x00dev, CSR12, reg); 397 rt2x00mmio_register_write(rt2x00dev, CSR12, reg);
397 } 398 }
398} 399}
399 400
@@ -497,7 +498,7 @@ static void rt2400pci_config_channel(struct rt2x00_dev *rt2x00dev,
497 /* 498 /*
498 * Clear false CRC during channel switch. 499 * Clear false CRC during channel switch.
499 */ 500 */
500 rt2x00pci_register_read(rt2x00dev, CNT0, &rf->rf1); 501 rt2x00mmio_register_read(rt2x00dev, CNT0, &rf->rf1);
501} 502}
502 503
503static void rt2400pci_config_txpower(struct rt2x00_dev *rt2x00dev, int txpower) 504static void rt2400pci_config_txpower(struct rt2x00_dev *rt2x00dev, int txpower)
@@ -510,12 +511,12 @@ static void rt2400pci_config_retry_limit(struct rt2x00_dev *rt2x00dev,
510{ 511{
511 u32 reg; 512 u32 reg;
512 513
513 rt2x00pci_register_read(rt2x00dev, CSR11, &reg); 514 rt2x00mmio_register_read(rt2x00dev, CSR11, &reg);
514 rt2x00_set_field32(&reg, CSR11_LONG_RETRY, 515 rt2x00_set_field32(&reg, CSR11_LONG_RETRY,
515 libconf->conf->long_frame_max_tx_count); 516 libconf->conf->long_frame_max_tx_count);
516 rt2x00_set_field32(&reg, CSR11_SHORT_RETRY, 517 rt2x00_set_field32(&reg, CSR11_SHORT_RETRY,
517 libconf->conf->short_frame_max_tx_count); 518 libconf->conf->short_frame_max_tx_count);
518 rt2x00pci_register_write(rt2x00dev, CSR11, reg); 519 rt2x00mmio_register_write(rt2x00dev, CSR11, reg);
519} 520}
520 521
521static void rt2400pci_config_ps(struct rt2x00_dev *rt2x00dev, 522static void rt2400pci_config_ps(struct rt2x00_dev *rt2x00dev,
@@ -527,7 +528,7 @@ static void rt2400pci_config_ps(struct rt2x00_dev *rt2x00dev,
527 u32 reg; 528 u32 reg;
528 529
529 if (state == STATE_SLEEP) { 530 if (state == STATE_SLEEP) {
530 rt2x00pci_register_read(rt2x00dev, CSR20, &reg); 531 rt2x00mmio_register_read(rt2x00dev, CSR20, &reg);
531 rt2x00_set_field32(&reg, CSR20_DELAY_AFTER_TBCN, 532 rt2x00_set_field32(&reg, CSR20_DELAY_AFTER_TBCN,
532 (rt2x00dev->beacon_int - 20) * 16); 533 (rt2x00dev->beacon_int - 20) * 16);
533 rt2x00_set_field32(&reg, CSR20_TBCN_BEFORE_WAKEUP, 534 rt2x00_set_field32(&reg, CSR20_TBCN_BEFORE_WAKEUP,
@@ -535,14 +536,14 @@ static void rt2400pci_config_ps(struct rt2x00_dev *rt2x00dev,
535 536
536 /* We must first disable autowake before it can be enabled */ 537 /* We must first disable autowake before it can be enabled */
537 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0); 538 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
538 rt2x00pci_register_write(rt2x00dev, CSR20, reg); 539 rt2x00mmio_register_write(rt2x00dev, CSR20, reg);
539 540
540 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1); 541 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1);
541 rt2x00pci_register_write(rt2x00dev, CSR20, reg); 542 rt2x00mmio_register_write(rt2x00dev, CSR20, reg);
542 } else { 543 } else {
543 rt2x00pci_register_read(rt2x00dev, CSR20, &reg); 544 rt2x00mmio_register_read(rt2x00dev, CSR20, &reg);
544 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0); 545 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
545 rt2x00pci_register_write(rt2x00dev, CSR20, reg); 546 rt2x00mmio_register_write(rt2x00dev, CSR20, reg);
546 } 547 }
547 548
548 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); 549 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
@@ -568,10 +569,10 @@ static void rt2400pci_config_cw(struct rt2x00_dev *rt2x00dev,
568{ 569{
569 u32 reg; 570 u32 reg;
570 571
571 rt2x00pci_register_read(rt2x00dev, CSR11, &reg); 572 rt2x00mmio_register_read(rt2x00dev, CSR11, &reg);
572 rt2x00_set_field32(&reg, CSR11_CWMIN, cw_min); 573 rt2x00_set_field32(&reg, CSR11_CWMIN, cw_min);
573 rt2x00_set_field32(&reg, CSR11_CWMAX, cw_max); 574 rt2x00_set_field32(&reg, CSR11_CWMAX, cw_max);
574 rt2x00pci_register_write(rt2x00dev, CSR11, reg); 575 rt2x00mmio_register_write(rt2x00dev, CSR11, reg);
575} 576}
576 577
577/* 578/*
@@ -586,7 +587,7 @@ static void rt2400pci_link_stats(struct rt2x00_dev *rt2x00dev,
586 /* 587 /*
587 * Update FCS error count from register. 588 * Update FCS error count from register.
588 */ 589 */
589 rt2x00pci_register_read(rt2x00dev, CNT0, &reg); 590 rt2x00mmio_register_read(rt2x00dev, CNT0, &reg);
590 qual->rx_failed = rt2x00_get_field32(reg, CNT0_FCS_ERROR); 591 qual->rx_failed = rt2x00_get_field32(reg, CNT0_FCS_ERROR);
591 592
592 /* 593 /*
@@ -641,16 +642,16 @@ static void rt2400pci_start_queue(struct data_queue *queue)
641 642
642 switch (queue->qid) { 643 switch (queue->qid) {
643 case QID_RX: 644 case QID_RX:
644 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg); 645 rt2x00mmio_register_read(rt2x00dev, RXCSR0, &reg);
645 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 0); 646 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 0);
646 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); 647 rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
647 break; 648 break;
648 case QID_BEACON: 649 case QID_BEACON:
649 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 650 rt2x00mmio_register_read(rt2x00dev, CSR14, &reg);
650 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1); 651 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
651 rt2x00_set_field32(&reg, CSR14_TBCN, 1); 652 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
652 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1); 653 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
653 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 654 rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
654 break; 655 break;
655 default: 656 default:
656 break; 657 break;
@@ -664,19 +665,19 @@ static void rt2400pci_kick_queue(struct data_queue *queue)
664 665
665 switch (queue->qid) { 666 switch (queue->qid) {
666 case QID_AC_VO: 667 case QID_AC_VO:
667 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 668 rt2x00mmio_register_read(rt2x00dev, TXCSR0, &reg);
668 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, 1); 669 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, 1);
669 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 670 rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
670 break; 671 break;
671 case QID_AC_VI: 672 case QID_AC_VI:
672 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 673 rt2x00mmio_register_read(rt2x00dev, TXCSR0, &reg);
673 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1); 674 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1);
674 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 675 rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
675 break; 676 break;
676 case QID_ATIM: 677 case QID_ATIM:
677 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 678 rt2x00mmio_register_read(rt2x00dev, TXCSR0, &reg);
678 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, 1); 679 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, 1);
679 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 680 rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
680 break; 681 break;
681 default: 682 default:
682 break; 683 break;
@@ -692,21 +693,21 @@ static void rt2400pci_stop_queue(struct data_queue *queue)
692 case QID_AC_VO: 693 case QID_AC_VO:
693 case QID_AC_VI: 694 case QID_AC_VI:
694 case QID_ATIM: 695 case QID_ATIM:
695 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 696 rt2x00mmio_register_read(rt2x00dev, TXCSR0, &reg);
696 rt2x00_set_field32(&reg, TXCSR0_ABORT, 1); 697 rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
697 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 698 rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
698 break; 699 break;
699 case QID_RX: 700 case QID_RX:
700 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg); 701 rt2x00mmio_register_read(rt2x00dev, RXCSR0, &reg);
701 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 1); 702 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 1);
702 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); 703 rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
703 break; 704 break;
704 case QID_BEACON: 705 case QID_BEACON:
705 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 706 rt2x00mmio_register_read(rt2x00dev, CSR14, &reg);
706 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0); 707 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0);
707 rt2x00_set_field32(&reg, CSR14_TBCN, 0); 708 rt2x00_set_field32(&reg, CSR14_TBCN, 0);
708 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 709 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
709 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 710 rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
710 711
711 /* 712 /*
712 * Wait for possibly running tbtt tasklets. 713 * Wait for possibly running tbtt tasklets.
@@ -723,7 +724,7 @@ static void rt2400pci_stop_queue(struct data_queue *queue)
723 */ 724 */
724static bool rt2400pci_get_entry_state(struct queue_entry *entry) 725static bool rt2400pci_get_entry_state(struct queue_entry *entry)
725{ 726{
726 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 727 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
727 u32 word; 728 u32 word;
728 729
729 if (entry->queue->qid == QID_RX) { 730 if (entry->queue->qid == QID_RX) {
@@ -740,7 +741,7 @@ static bool rt2400pci_get_entry_state(struct queue_entry *entry)
740 741
741static void rt2400pci_clear_entry(struct queue_entry *entry) 742static void rt2400pci_clear_entry(struct queue_entry *entry)
742{ 743{
743 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 744 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
744 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 745 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
745 u32 word; 746 u32 word;
746 747
@@ -766,53 +767,53 @@ static void rt2400pci_clear_entry(struct queue_entry *entry)
766 767
767static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev) 768static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
768{ 769{
769 struct queue_entry_priv_pci *entry_priv; 770 struct queue_entry_priv_mmio *entry_priv;
770 u32 reg; 771 u32 reg;
771 772
772 /* 773 /*
773 * Initialize registers. 774 * Initialize registers.
774 */ 775 */
775 rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg); 776 rt2x00mmio_register_read(rt2x00dev, TXCSR2, &reg);
776 rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size); 777 rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
777 rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit); 778 rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
778 rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit); 779 rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit);
779 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); 780 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
780 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); 781 rt2x00mmio_register_write(rt2x00dev, TXCSR2, reg);
781 782
782 entry_priv = rt2x00dev->tx[1].entries[0].priv_data; 783 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
783 rt2x00pci_register_read(rt2x00dev, TXCSR3, &reg); 784 rt2x00mmio_register_read(rt2x00dev, TXCSR3, &reg);
784 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER, 785 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER,
785 entry_priv->desc_dma); 786 entry_priv->desc_dma);
786 rt2x00pci_register_write(rt2x00dev, TXCSR3, reg); 787 rt2x00mmio_register_write(rt2x00dev, TXCSR3, reg);
787 788
788 entry_priv = rt2x00dev->tx[0].entries[0].priv_data; 789 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
789 rt2x00pci_register_read(rt2x00dev, TXCSR5, &reg); 790 rt2x00mmio_register_read(rt2x00dev, TXCSR5, &reg);
790 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER, 791 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER,
791 entry_priv->desc_dma); 792 entry_priv->desc_dma);
792 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); 793 rt2x00mmio_register_write(rt2x00dev, TXCSR5, reg);
793 794
794 entry_priv = rt2x00dev->atim->entries[0].priv_data; 795 entry_priv = rt2x00dev->atim->entries[0].priv_data;
795 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg); 796 rt2x00mmio_register_read(rt2x00dev, TXCSR4, &reg);
796 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER, 797 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
797 entry_priv->desc_dma); 798 entry_priv->desc_dma);
798 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); 799 rt2x00mmio_register_write(rt2x00dev, TXCSR4, reg);
799 800
800 entry_priv = rt2x00dev->bcn->entries[0].priv_data; 801 entry_priv = rt2x00dev->bcn->entries[0].priv_data;
801 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg); 802 rt2x00mmio_register_read(rt2x00dev, TXCSR6, &reg);
802 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER, 803 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
803 entry_priv->desc_dma); 804 entry_priv->desc_dma);
804 rt2x00pci_register_write(rt2x00dev, TXCSR6, reg); 805 rt2x00mmio_register_write(rt2x00dev, TXCSR6, reg);
805 806
806 rt2x00pci_register_read(rt2x00dev, RXCSR1, &reg); 807 rt2x00mmio_register_read(rt2x00dev, RXCSR1, &reg);
807 rt2x00_set_field32(&reg, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size); 808 rt2x00_set_field32(&reg, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size);
808 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit); 809 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit);
809 rt2x00pci_register_write(rt2x00dev, RXCSR1, reg); 810 rt2x00mmio_register_write(rt2x00dev, RXCSR1, reg);
810 811
811 entry_priv = rt2x00dev->rx->entries[0].priv_data; 812 entry_priv = rt2x00dev->rx->entries[0].priv_data;
812 rt2x00pci_register_read(rt2x00dev, RXCSR2, &reg); 813 rt2x00mmio_register_read(rt2x00dev, RXCSR2, &reg);
813 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER, 814 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER,
814 entry_priv->desc_dma); 815 entry_priv->desc_dma);
815 rt2x00pci_register_write(rt2x00dev, RXCSR2, reg); 816 rt2x00mmio_register_write(rt2x00dev, RXCSR2, reg);
816 817
817 return 0; 818 return 0;
818} 819}
@@ -821,23 +822,23 @@ static int rt2400pci_init_registers(struct rt2x00_dev *rt2x00dev)
821{ 822{
822 u32 reg; 823 u32 reg;
823 824
824 rt2x00pci_register_write(rt2x00dev, PSCSR0, 0x00020002); 825 rt2x00mmio_register_write(rt2x00dev, PSCSR0, 0x00020002);
825 rt2x00pci_register_write(rt2x00dev, PSCSR1, 0x00000002); 826 rt2x00mmio_register_write(rt2x00dev, PSCSR1, 0x00000002);
826 rt2x00pci_register_write(rt2x00dev, PSCSR2, 0x00023f20); 827 rt2x00mmio_register_write(rt2x00dev, PSCSR2, 0x00023f20);
827 rt2x00pci_register_write(rt2x00dev, PSCSR3, 0x00000002); 828 rt2x00mmio_register_write(rt2x00dev, PSCSR3, 0x00000002);
828 829
829 rt2x00pci_register_read(rt2x00dev, TIMECSR, &reg); 830 rt2x00mmio_register_read(rt2x00dev, TIMECSR, &reg);
830 rt2x00_set_field32(&reg, TIMECSR_US_COUNT, 33); 831 rt2x00_set_field32(&reg, TIMECSR_US_COUNT, 33);
831 rt2x00_set_field32(&reg, TIMECSR_US_64_COUNT, 63); 832 rt2x00_set_field32(&reg, TIMECSR_US_64_COUNT, 63);
832 rt2x00_set_field32(&reg, TIMECSR_BEACON_EXPECT, 0); 833 rt2x00_set_field32(&reg, TIMECSR_BEACON_EXPECT, 0);
833 rt2x00pci_register_write(rt2x00dev, TIMECSR, reg); 834 rt2x00mmio_register_write(rt2x00dev, TIMECSR, reg);
834 835
835 rt2x00pci_register_read(rt2x00dev, CSR9, &reg); 836 rt2x00mmio_register_read(rt2x00dev, CSR9, &reg);
836 rt2x00_set_field32(&reg, CSR9_MAX_FRAME_UNIT, 837 rt2x00_set_field32(&reg, CSR9_MAX_FRAME_UNIT,
837 (rt2x00dev->rx->data_size / 128)); 838 (rt2x00dev->rx->data_size / 128));
838 rt2x00pci_register_write(rt2x00dev, CSR9, reg); 839 rt2x00mmio_register_write(rt2x00dev, CSR9, reg);
839 840
840 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 841 rt2x00mmio_register_read(rt2x00dev, CSR14, &reg);
841 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0); 842 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0);
842 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, 0); 843 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, 0);
843 rt2x00_set_field32(&reg, CSR14_TBCN, 0); 844 rt2x00_set_field32(&reg, CSR14_TBCN, 0);
@@ -846,63 +847,63 @@ static int rt2400pci_init_registers(struct rt2x00_dev *rt2x00dev)
846 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 847 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
847 rt2x00_set_field32(&reg, CSR14_CFP_COUNT_PRELOAD, 0); 848 rt2x00_set_field32(&reg, CSR14_CFP_COUNT_PRELOAD, 0);
848 rt2x00_set_field32(&reg, CSR14_TBCM_PRELOAD, 0); 849 rt2x00_set_field32(&reg, CSR14_TBCM_PRELOAD, 0);
849 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 850 rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
850 851
851 rt2x00pci_register_write(rt2x00dev, CNT3, 0x3f080000); 852 rt2x00mmio_register_write(rt2x00dev, CNT3, 0x3f080000);
852 853
853 rt2x00pci_register_read(rt2x00dev, ARCSR0, &reg); 854 rt2x00mmio_register_read(rt2x00dev, ARCSR0, &reg);
854 rt2x00_set_field32(&reg, ARCSR0_AR_BBP_DATA0, 133); 855 rt2x00_set_field32(&reg, ARCSR0_AR_BBP_DATA0, 133);
855 rt2x00_set_field32(&reg, ARCSR0_AR_BBP_ID0, 134); 856 rt2x00_set_field32(&reg, ARCSR0_AR_BBP_ID0, 134);
856 rt2x00_set_field32(&reg, ARCSR0_AR_BBP_DATA1, 136); 857 rt2x00_set_field32(&reg, ARCSR0_AR_BBP_DATA1, 136);
857 rt2x00_set_field32(&reg, ARCSR0_AR_BBP_ID1, 135); 858 rt2x00_set_field32(&reg, ARCSR0_AR_BBP_ID1, 135);
858 rt2x00pci_register_write(rt2x00dev, ARCSR0, reg); 859 rt2x00mmio_register_write(rt2x00dev, ARCSR0, reg);
859 860
860 rt2x00pci_register_read(rt2x00dev, RXCSR3, &reg); 861 rt2x00mmio_register_read(rt2x00dev, RXCSR3, &reg);
861 rt2x00_set_field32(&reg, RXCSR3_BBP_ID0, 3); /* Tx power.*/ 862 rt2x00_set_field32(&reg, RXCSR3_BBP_ID0, 3); /* Tx power.*/
862 rt2x00_set_field32(&reg, RXCSR3_BBP_ID0_VALID, 1); 863 rt2x00_set_field32(&reg, RXCSR3_BBP_ID0_VALID, 1);
863 rt2x00_set_field32(&reg, RXCSR3_BBP_ID1, 32); /* Signal */ 864 rt2x00_set_field32(&reg, RXCSR3_BBP_ID1, 32); /* Signal */
864 rt2x00_set_field32(&reg, RXCSR3_BBP_ID1_VALID, 1); 865 rt2x00_set_field32(&reg, RXCSR3_BBP_ID1_VALID, 1);
865 rt2x00_set_field32(&reg, RXCSR3_BBP_ID2, 36); /* Rssi */ 866 rt2x00_set_field32(&reg, RXCSR3_BBP_ID2, 36); /* Rssi */
866 rt2x00_set_field32(&reg, RXCSR3_BBP_ID2_VALID, 1); 867 rt2x00_set_field32(&reg, RXCSR3_BBP_ID2_VALID, 1);
867 rt2x00pci_register_write(rt2x00dev, RXCSR3, reg); 868 rt2x00mmio_register_write(rt2x00dev, RXCSR3, reg);
868 869
869 rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0x3f3b3100); 870 rt2x00mmio_register_write(rt2x00dev, PWRCSR0, 0x3f3b3100);
870 871
871 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) 872 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE))
872 return -EBUSY; 873 return -EBUSY;
873 874
874 rt2x00pci_register_write(rt2x00dev, MACCSR0, 0x00217223); 875 rt2x00mmio_register_write(rt2x00dev, MACCSR0, 0x00217223);
875 rt2x00pci_register_write(rt2x00dev, MACCSR1, 0x00235518); 876 rt2x00mmio_register_write(rt2x00dev, MACCSR1, 0x00235518);
876 877
877 rt2x00pci_register_read(rt2x00dev, MACCSR2, &reg); 878 rt2x00mmio_register_read(rt2x00dev, MACCSR2, &reg);
878 rt2x00_set_field32(&reg, MACCSR2_DELAY, 64); 879 rt2x00_set_field32(&reg, MACCSR2_DELAY, 64);
879 rt2x00pci_register_write(rt2x00dev, MACCSR2, reg); 880 rt2x00mmio_register_write(rt2x00dev, MACCSR2, reg);
880 881
881 rt2x00pci_register_read(rt2x00dev, RALINKCSR, &reg); 882 rt2x00mmio_register_read(rt2x00dev, RALINKCSR, &reg);
882 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA0, 17); 883 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA0, 17);
883 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID0, 154); 884 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID0, 154);
884 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA1, 0); 885 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA1, 0);
885 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID1, 154); 886 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID1, 154);
886 rt2x00pci_register_write(rt2x00dev, RALINKCSR, reg); 887 rt2x00mmio_register_write(rt2x00dev, RALINKCSR, reg);
887 888
888 rt2x00pci_register_read(rt2x00dev, CSR1, &reg); 889 rt2x00mmio_register_read(rt2x00dev, CSR1, &reg);
889 rt2x00_set_field32(&reg, CSR1_SOFT_RESET, 1); 890 rt2x00_set_field32(&reg, CSR1_SOFT_RESET, 1);
890 rt2x00_set_field32(&reg, CSR1_BBP_RESET, 0); 891 rt2x00_set_field32(&reg, CSR1_BBP_RESET, 0);
891 rt2x00_set_field32(&reg, CSR1_HOST_READY, 0); 892 rt2x00_set_field32(&reg, CSR1_HOST_READY, 0);
892 rt2x00pci_register_write(rt2x00dev, CSR1, reg); 893 rt2x00mmio_register_write(rt2x00dev, CSR1, reg);
893 894
894 rt2x00pci_register_read(rt2x00dev, CSR1, &reg); 895 rt2x00mmio_register_read(rt2x00dev, CSR1, &reg);
895 rt2x00_set_field32(&reg, CSR1_SOFT_RESET, 0); 896 rt2x00_set_field32(&reg, CSR1_SOFT_RESET, 0);
896 rt2x00_set_field32(&reg, CSR1_HOST_READY, 1); 897 rt2x00_set_field32(&reg, CSR1_HOST_READY, 1);
897 rt2x00pci_register_write(rt2x00dev, CSR1, reg); 898 rt2x00mmio_register_write(rt2x00dev, CSR1, reg);
898 899
899 /* 900 /*
900 * We must clear the FCS and FIFO error count. 901 * We must clear the FCS and FIFO error count.
901 * These registers are cleared on read, 902 * These registers are cleared on read,
902 * so we may pass a useless variable to store the value. 903 * so we may pass a useless variable to store the value.
903 */ 904 */
904 rt2x00pci_register_read(rt2x00dev, CNT0, &reg); 905 rt2x00mmio_register_read(rt2x00dev, CNT0, &reg);
905 rt2x00pci_register_read(rt2x00dev, CNT4, &reg); 906 rt2x00mmio_register_read(rt2x00dev, CNT4, &reg);
906 907
907 return 0; 908 return 0;
908} 909}
@@ -919,7 +920,7 @@ static int rt2400pci_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
919 udelay(REGISTER_BUSY_DELAY); 920 udelay(REGISTER_BUSY_DELAY);
920 } 921 }
921 922
922 ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); 923 rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n");
923 return -EACCES; 924 return -EACCES;
924} 925}
925 926
@@ -976,8 +977,8 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
976 * should clear the register to assure a clean state. 977 * should clear the register to assure a clean state.
977 */ 978 */
978 if (state == STATE_RADIO_IRQ_ON) { 979 if (state == STATE_RADIO_IRQ_ON) {
979 rt2x00pci_register_read(rt2x00dev, CSR7, &reg); 980 rt2x00mmio_register_read(rt2x00dev, CSR7, &reg);
980 rt2x00pci_register_write(rt2x00dev, CSR7, reg); 981 rt2x00mmio_register_write(rt2x00dev, CSR7, reg);
981 } 982 }
982 983
983 /* 984 /*
@@ -986,13 +987,13 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
986 */ 987 */
987 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 988 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
988 989
989 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 990 rt2x00mmio_register_read(rt2x00dev, CSR8, &reg);
990 rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask); 991 rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
991 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask); 992 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
992 rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, mask); 993 rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, mask);
993 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask); 994 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
994 rt2x00_set_field32(&reg, CSR8_RXDONE, mask); 995 rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
995 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 996 rt2x00mmio_register_write(rt2x00dev, CSR8, reg);
996 997
997 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 998 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
998 999
@@ -1025,7 +1026,7 @@ static void rt2400pci_disable_radio(struct rt2x00_dev *rt2x00dev)
1025 /* 1026 /*
1026 * Disable power 1027 * Disable power
1027 */ 1028 */
1028 rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0); 1029 rt2x00mmio_register_write(rt2x00dev, PWRCSR0, 0);
1029} 1030}
1030 1031
1031static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev, 1032static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev,
@@ -1039,12 +1040,12 @@ static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev,
1039 1040
1040 put_to_sleep = (state != STATE_AWAKE); 1041 put_to_sleep = (state != STATE_AWAKE);
1041 1042
1042 rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg); 1043 rt2x00mmio_register_read(rt2x00dev, PWRCSR1, &reg);
1043 rt2x00_set_field32(&reg, PWRCSR1_SET_STATE, 1); 1044 rt2x00_set_field32(&reg, PWRCSR1_SET_STATE, 1);
1044 rt2x00_set_field32(&reg, PWRCSR1_BBP_DESIRE_STATE, state); 1045 rt2x00_set_field32(&reg, PWRCSR1_BBP_DESIRE_STATE, state);
1045 rt2x00_set_field32(&reg, PWRCSR1_RF_DESIRE_STATE, state); 1046 rt2x00_set_field32(&reg, PWRCSR1_RF_DESIRE_STATE, state);
1046 rt2x00_set_field32(&reg, PWRCSR1_PUT_TO_SLEEP, put_to_sleep); 1047 rt2x00_set_field32(&reg, PWRCSR1_PUT_TO_SLEEP, put_to_sleep);
1047 rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); 1048 rt2x00mmio_register_write(rt2x00dev, PWRCSR1, reg);
1048 1049
1049 /* 1050 /*
1050 * Device is not guaranteed to be in the requested state yet. 1051 * Device is not guaranteed to be in the requested state yet.
@@ -1052,12 +1053,12 @@ static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev,
1052 * device has entered the correct state. 1053 * device has entered the correct state.
1053 */ 1054 */
1054 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1055 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1055 rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg2); 1056 rt2x00mmio_register_read(rt2x00dev, PWRCSR1, &reg2);
1056 bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE); 1057 bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE);
1057 rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE); 1058 rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE);
1058 if (bbp_state == state && rf_state == state) 1059 if (bbp_state == state && rf_state == state)
1059 return 0; 1060 return 0;
1060 rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); 1061 rt2x00mmio_register_write(rt2x00dev, PWRCSR1, reg);
1061 msleep(10); 1062 msleep(10);
1062 } 1063 }
1063 1064
@@ -1092,8 +1093,8 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1092 } 1093 }
1093 1094
1094 if (unlikely(retval)) 1095 if (unlikely(retval))
1095 ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", 1096 rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n",
1096 state, retval); 1097 state, retval);
1097 1098
1098 return retval; 1099 return retval;
1099} 1100}
@@ -1105,7 +1106,7 @@ static void rt2400pci_write_tx_desc(struct queue_entry *entry,
1105 struct txentry_desc *txdesc) 1106 struct txentry_desc *txdesc)
1106{ 1107{
1107 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1108 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1108 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1109 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
1109 __le32 *txd = entry_priv->desc; 1110 __le32 *txd = entry_priv->desc;
1110 u32 word; 1111 u32 word;
1111 1112
@@ -1182,12 +1183,12 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
1182 * Disable beaconing while we are reloading the beacon data, 1183 * Disable beaconing while we are reloading the beacon data,
1183 * otherwise we might be sending out invalid data. 1184 * otherwise we might be sending out invalid data.
1184 */ 1185 */
1185 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 1186 rt2x00mmio_register_read(rt2x00dev, CSR14, &reg);
1186 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 1187 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
1187 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1188 rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
1188 1189
1189 if (rt2x00queue_map_txskb(entry)) { 1190 if (rt2x00queue_map_txskb(entry)) {
1190 ERROR(rt2x00dev, "Fail to map beacon, aborting\n"); 1191 rt2x00_err(rt2x00dev, "Fail to map beacon, aborting\n");
1191 goto out; 1192 goto out;
1192 } 1193 }
1193 /* 1194 /*
@@ -1208,7 +1209,7 @@ out:
1208 * Enable beaconing again. 1209 * Enable beaconing again.
1209 */ 1210 */
1210 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1); 1211 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
1211 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1212 rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
1212} 1213}
1213 1214
1214/* 1215/*
@@ -1218,7 +1219,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
1218 struct rxdone_entry_desc *rxdesc) 1219 struct rxdone_entry_desc *rxdesc)
1219{ 1220{
1220 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1221 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1221 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1222 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
1222 u32 word0; 1223 u32 word0;
1223 u32 word2; 1224 u32 word2;
1224 u32 word3; 1225 u32 word3;
@@ -1276,7 +1277,7 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
1276 const enum data_queue_qid queue_idx) 1277 const enum data_queue_qid queue_idx)
1277{ 1278{
1278 struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); 1279 struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
1279 struct queue_entry_priv_pci *entry_priv; 1280 struct queue_entry_priv_mmio *entry_priv;
1280 struct queue_entry *entry; 1281 struct queue_entry *entry;
1281 struct txdone_entry_desc txdesc; 1282 struct txdone_entry_desc txdesc;
1282 u32 word; 1283 u32 word;
@@ -1322,9 +1323,9 @@ static inline void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
1322 */ 1323 */
1323 spin_lock_irq(&rt2x00dev->irqmask_lock); 1324 spin_lock_irq(&rt2x00dev->irqmask_lock);
1324 1325
1325 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 1326 rt2x00mmio_register_read(rt2x00dev, CSR8, &reg);
1326 rt2x00_set_field32(&reg, irq_field, 0); 1327 rt2x00_set_field32(&reg, irq_field, 0);
1327 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1328 rt2x00mmio_register_write(rt2x00dev, CSR8, reg);
1328 1329
1329 spin_unlock_irq(&rt2x00dev->irqmask_lock); 1330 spin_unlock_irq(&rt2x00dev->irqmask_lock);
1330} 1331}
@@ -1347,11 +1348,11 @@ static void rt2400pci_txstatus_tasklet(unsigned long data)
1347 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) { 1348 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) {
1348 spin_lock_irq(&rt2x00dev->irqmask_lock); 1349 spin_lock_irq(&rt2x00dev->irqmask_lock);
1349 1350
1350 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 1351 rt2x00mmio_register_read(rt2x00dev, CSR8, &reg);
1351 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0); 1352 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
1352 rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0); 1353 rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
1353 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0); 1354 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
1354 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1355 rt2x00mmio_register_write(rt2x00dev, CSR8, reg);
1355 1356
1356 spin_unlock_irq(&rt2x00dev->irqmask_lock); 1357 spin_unlock_irq(&rt2x00dev->irqmask_lock);
1357 } 1358 }
@@ -1368,7 +1369,7 @@ static void rt2400pci_tbtt_tasklet(unsigned long data)
1368static void rt2400pci_rxdone_tasklet(unsigned long data) 1369static void rt2400pci_rxdone_tasklet(unsigned long data)
1369{ 1370{
1370 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 1371 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1371 if (rt2x00pci_rxdone(rt2x00dev)) 1372 if (rt2x00mmio_rxdone(rt2x00dev))
1372 tasklet_schedule(&rt2x00dev->rxdone_tasklet); 1373 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
1373 else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 1374 else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1374 rt2400pci_enable_interrupt(rt2x00dev, CSR8_RXDONE); 1375 rt2400pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
@@ -1383,8 +1384,8 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
1383 * Get the interrupt sources & saved to local variable. 1384 * Get the interrupt sources & saved to local variable.
1384 * Write register value back to clear pending interrupts. 1385 * Write register value back to clear pending interrupts.
1385 */ 1386 */
1386 rt2x00pci_register_read(rt2x00dev, CSR7, &reg); 1387 rt2x00mmio_register_read(rt2x00dev, CSR7, &reg);
1387 rt2x00pci_register_write(rt2x00dev, CSR7, reg); 1388 rt2x00mmio_register_write(rt2x00dev, CSR7, reg);
1388 1389
1389 if (!reg) 1390 if (!reg)
1390 return IRQ_NONE; 1391 return IRQ_NONE;
@@ -1421,9 +1422,9 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
1421 */ 1422 */
1422 spin_lock(&rt2x00dev->irqmask_lock); 1423 spin_lock(&rt2x00dev->irqmask_lock);
1423 1424
1424 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 1425 rt2x00mmio_register_read(rt2x00dev, CSR8, &reg);
1425 reg |= mask; 1426 reg |= mask;
1426 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1427 rt2x00mmio_register_write(rt2x00dev, CSR8, reg);
1427 1428
1428 spin_unlock(&rt2x00dev->irqmask_lock); 1429 spin_unlock(&rt2x00dev->irqmask_lock);
1429 1430
@@ -1442,7 +1443,7 @@ static int rt2400pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1442 u16 word; 1443 u16 word;
1443 u8 *mac; 1444 u8 *mac;
1444 1445
1445 rt2x00pci_register_read(rt2x00dev, CSR21, &reg); 1446 rt2x00mmio_register_read(rt2x00dev, CSR21, &reg);
1446 1447
1447 eeprom.data = rt2x00dev; 1448 eeprom.data = rt2x00dev;
1448 eeprom.register_read = rt2400pci_eepromregister_read; 1449 eeprom.register_read = rt2400pci_eepromregister_read;
@@ -1463,12 +1464,12 @@ static int rt2400pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1463 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 1464 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
1464 if (!is_valid_ether_addr(mac)) { 1465 if (!is_valid_ether_addr(mac)) {
1465 eth_random_addr(mac); 1466 eth_random_addr(mac);
1466 EEPROM(rt2x00dev, "MAC: %pM\n", mac); 1467 rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
1467 } 1468 }
1468 1469
1469 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); 1470 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
1470 if (word == 0xffff) { 1471 if (word == 0xffff) {
1471 ERROR(rt2x00dev, "Invalid EEPROM data detected.\n"); 1472 rt2x00_err(rt2x00dev, "Invalid EEPROM data detected\n");
1472 return -EINVAL; 1473 return -EINVAL;
1473 } 1474 }
1474 1475
@@ -1490,12 +1491,12 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1490 * Identify RF chipset. 1491 * Identify RF chipset.
1491 */ 1492 */
1492 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 1493 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
1493 rt2x00pci_register_read(rt2x00dev, CSR0, &reg); 1494 rt2x00mmio_register_read(rt2x00dev, CSR0, &reg);
1494 rt2x00_set_chip(rt2x00dev, RT2460, value, 1495 rt2x00_set_chip(rt2x00dev, RT2460, value,
1495 rt2x00_get_field32(reg, CSR0_REVISION)); 1496 rt2x00_get_field32(reg, CSR0_REVISION));
1496 1497
1497 if (!rt2x00_rf(rt2x00dev, RF2420) && !rt2x00_rf(rt2x00dev, RF2421)) { 1498 if (!rt2x00_rf(rt2x00dev, RF2420) && !rt2x00_rf(rt2x00dev, RF2421)) {
1498 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1499 rt2x00_err(rt2x00dev, "Invalid RF chipset detected\n");
1499 return -ENODEV; 1500 return -ENODEV;
1500 } 1501 }
1501 1502
@@ -1635,9 +1636,9 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1635 * Enable rfkill polling by setting GPIO direction of the 1636 * Enable rfkill polling by setting GPIO direction of the
1636 * rfkill switch GPIO pin correctly. 1637 * rfkill switch GPIO pin correctly.
1637 */ 1638 */
1638 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg); 1639 rt2x00mmio_register_read(rt2x00dev, GPIOCSR, &reg);
1639 rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1); 1640 rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1);
1640 rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg); 1641 rt2x00mmio_register_write(rt2x00dev, GPIOCSR, reg);
1641 1642
1642 /* 1643 /*
1643 * Initialize hw specifications. 1644 * Initialize hw specifications.
@@ -1697,9 +1698,9 @@ static u64 rt2400pci_get_tsf(struct ieee80211_hw *hw,
1697 u64 tsf; 1698 u64 tsf;
1698 u32 reg; 1699 u32 reg;
1699 1700
1700 rt2x00pci_register_read(rt2x00dev, CSR17, &reg); 1701 rt2x00mmio_register_read(rt2x00dev, CSR17, &reg);
1701 tsf = (u64) rt2x00_get_field32(reg, CSR17_HIGH_TSFTIMER) << 32; 1702 tsf = (u64) rt2x00_get_field32(reg, CSR17_HIGH_TSFTIMER) << 32;
1702 rt2x00pci_register_read(rt2x00dev, CSR16, &reg); 1703 rt2x00mmio_register_read(rt2x00dev, CSR16, &reg);
1703 tsf |= rt2x00_get_field32(reg, CSR16_LOW_TSFTIMER); 1704 tsf |= rt2x00_get_field32(reg, CSR16_LOW_TSFTIMER);
1704 1705
1705 return tsf; 1706 return tsf;
@@ -1710,7 +1711,7 @@ static int rt2400pci_tx_last_beacon(struct ieee80211_hw *hw)
1710 struct rt2x00_dev *rt2x00dev = hw->priv; 1711 struct rt2x00_dev *rt2x00dev = hw->priv;
1711 u32 reg; 1712 u32 reg;
1712 1713
1713 rt2x00pci_register_read(rt2x00dev, CSR15, &reg); 1714 rt2x00mmio_register_read(rt2x00dev, CSR15, &reg);
1714 return rt2x00_get_field32(reg, CSR15_BEACON_SENT); 1715 return rt2x00_get_field32(reg, CSR15_BEACON_SENT);
1715} 1716}
1716 1717
@@ -1743,8 +1744,8 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
1743 .tbtt_tasklet = rt2400pci_tbtt_tasklet, 1744 .tbtt_tasklet = rt2400pci_tbtt_tasklet,
1744 .rxdone_tasklet = rt2400pci_rxdone_tasklet, 1745 .rxdone_tasklet = rt2400pci_rxdone_tasklet,
1745 .probe_hw = rt2400pci_probe_hw, 1746 .probe_hw = rt2400pci_probe_hw,
1746 .initialize = rt2x00pci_initialize, 1747 .initialize = rt2x00mmio_initialize,
1747 .uninitialize = rt2x00pci_uninitialize, 1748 .uninitialize = rt2x00mmio_uninitialize,
1748 .get_entry_state = rt2400pci_get_entry_state, 1749 .get_entry_state = rt2400pci_get_entry_state,
1749 .clear_entry = rt2400pci_clear_entry, 1750 .clear_entry = rt2400pci_clear_entry,
1750 .set_device_state = rt2400pci_set_device_state, 1751 .set_device_state = rt2400pci_set_device_state,
@@ -1755,7 +1756,7 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
1755 .start_queue = rt2400pci_start_queue, 1756 .start_queue = rt2400pci_start_queue,
1756 .kick_queue = rt2400pci_kick_queue, 1757 .kick_queue = rt2400pci_kick_queue,
1757 .stop_queue = rt2400pci_stop_queue, 1758 .stop_queue = rt2400pci_stop_queue,
1758 .flush_queue = rt2x00pci_flush_queue, 1759 .flush_queue = rt2x00mmio_flush_queue,
1759 .write_tx_desc = rt2400pci_write_tx_desc, 1760 .write_tx_desc = rt2400pci_write_tx_desc,
1760 .write_beacon = rt2400pci_write_beacon, 1761 .write_beacon = rt2400pci_write_beacon,
1761 .fill_rxdone = rt2400pci_fill_rxdone, 1762 .fill_rxdone = rt2400pci_fill_rxdone,
@@ -1770,28 +1771,28 @@ static const struct data_queue_desc rt2400pci_queue_rx = {
1770 .entry_num = 24, 1771 .entry_num = 24,
1771 .data_size = DATA_FRAME_SIZE, 1772 .data_size = DATA_FRAME_SIZE,
1772 .desc_size = RXD_DESC_SIZE, 1773 .desc_size = RXD_DESC_SIZE,
1773 .priv_size = sizeof(struct queue_entry_priv_pci), 1774 .priv_size = sizeof(struct queue_entry_priv_mmio),
1774}; 1775};
1775 1776
1776static const struct data_queue_desc rt2400pci_queue_tx = { 1777static const struct data_queue_desc rt2400pci_queue_tx = {
1777 .entry_num = 24, 1778 .entry_num = 24,
1778 .data_size = DATA_FRAME_SIZE, 1779 .data_size = DATA_FRAME_SIZE,
1779 .desc_size = TXD_DESC_SIZE, 1780 .desc_size = TXD_DESC_SIZE,
1780 .priv_size = sizeof(struct queue_entry_priv_pci), 1781 .priv_size = sizeof(struct queue_entry_priv_mmio),
1781}; 1782};
1782 1783
1783static const struct data_queue_desc rt2400pci_queue_bcn = { 1784static const struct data_queue_desc rt2400pci_queue_bcn = {
1784 .entry_num = 1, 1785 .entry_num = 1,
1785 .data_size = MGMT_FRAME_SIZE, 1786 .data_size = MGMT_FRAME_SIZE,
1786 .desc_size = TXD_DESC_SIZE, 1787 .desc_size = TXD_DESC_SIZE,
1787 .priv_size = sizeof(struct queue_entry_priv_pci), 1788 .priv_size = sizeof(struct queue_entry_priv_mmio),
1788}; 1789};
1789 1790
1790static const struct data_queue_desc rt2400pci_queue_atim = { 1791static const struct data_queue_desc rt2400pci_queue_atim = {
1791 .entry_num = 8, 1792 .entry_num = 8,
1792 .data_size = DATA_FRAME_SIZE, 1793 .data_size = DATA_FRAME_SIZE,
1793 .desc_size = TXD_DESC_SIZE, 1794 .desc_size = TXD_DESC_SIZE,
1794 .priv_size = sizeof(struct queue_entry_priv_pci), 1795 .priv_size = sizeof(struct queue_entry_priv_mmio),
1795}; 1796};
1796 1797
1797static const struct rt2x00_ops rt2400pci_ops = { 1798static const struct rt2x00_ops rt2400pci_ops = {
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index e1d2dc9ed28a..77e45b223d15 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -41,7 +41,7 @@
41/* 41/*
42 * Register access. 42 * Register access.
43 * All access to the CSR registers will go through the methods 43 * All access to the CSR registers will go through the methods
44 * rt2x00pci_register_read and rt2x00pci_register_write. 44 * rt2x00mmio_register_read and rt2x00mmio_register_write.
45 * BBP and RF register require indirect register access, 45 * BBP and RF register require indirect register access,
46 * and use the CSR registers BBPCSR and RFCSR to achieve this. 46 * and use the CSR registers BBPCSR and RFCSR to achieve this.
47 * These indirect registers work with busy bits, 47 * These indirect registers work with busy bits,
@@ -52,9 +52,9 @@
52 * and we will print an error. 52 * and we will print an error.
53 */ 53 */
54#define WAIT_FOR_BBP(__dev, __reg) \ 54#define WAIT_FOR_BBP(__dev, __reg) \
55 rt2x00pci_regbusy_read((__dev), BBPCSR, BBPCSR_BUSY, (__reg)) 55 rt2x00mmio_regbusy_read((__dev), BBPCSR, BBPCSR_BUSY, (__reg))
56#define WAIT_FOR_RF(__dev, __reg) \ 56#define WAIT_FOR_RF(__dev, __reg) \
57 rt2x00pci_regbusy_read((__dev), RFCSR, RFCSR_BUSY, (__reg)) 57 rt2x00mmio_regbusy_read((__dev), RFCSR, RFCSR_BUSY, (__reg))
58 58
59static void rt2500pci_bbp_write(struct rt2x00_dev *rt2x00dev, 59static void rt2500pci_bbp_write(struct rt2x00_dev *rt2x00dev,
60 const unsigned int word, const u8 value) 60 const unsigned int word, const u8 value)
@@ -74,7 +74,7 @@ static void rt2500pci_bbp_write(struct rt2x00_dev *rt2x00dev,
74 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1); 74 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1);
75 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 1); 75 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 1);
76 76
77 rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); 77 rt2x00mmio_register_write(rt2x00dev, BBPCSR, reg);
78 } 78 }
79 79
80 mutex_unlock(&rt2x00dev->csr_mutex); 80 mutex_unlock(&rt2x00dev->csr_mutex);
@@ -101,7 +101,7 @@ static void rt2500pci_bbp_read(struct rt2x00_dev *rt2x00dev,
101 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1); 101 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1);
102 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 0); 102 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 0);
103 103
104 rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); 104 rt2x00mmio_register_write(rt2x00dev, BBPCSR, reg);
105 105
106 WAIT_FOR_BBP(rt2x00dev, &reg); 106 WAIT_FOR_BBP(rt2x00dev, &reg);
107 } 107 }
@@ -129,7 +129,7 @@ static void rt2500pci_rf_write(struct rt2x00_dev *rt2x00dev,
129 rt2x00_set_field32(&reg, RFCSR_IF_SELECT, 0); 129 rt2x00_set_field32(&reg, RFCSR_IF_SELECT, 0);
130 rt2x00_set_field32(&reg, RFCSR_BUSY, 1); 130 rt2x00_set_field32(&reg, RFCSR_BUSY, 1);
131 131
132 rt2x00pci_register_write(rt2x00dev, RFCSR, reg); 132 rt2x00mmio_register_write(rt2x00dev, RFCSR, reg);
133 rt2x00_rf_write(rt2x00dev, word, value); 133 rt2x00_rf_write(rt2x00dev, word, value);
134 } 134 }
135 135
@@ -141,7 +141,7 @@ static void rt2500pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
141 struct rt2x00_dev *rt2x00dev = eeprom->data; 141 struct rt2x00_dev *rt2x00dev = eeprom->data;
142 u32 reg; 142 u32 reg;
143 143
144 rt2x00pci_register_read(rt2x00dev, CSR21, &reg); 144 rt2x00mmio_register_read(rt2x00dev, CSR21, &reg);
145 145
146 eeprom->reg_data_in = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_IN); 146 eeprom->reg_data_in = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_IN);
147 eeprom->reg_data_out = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_OUT); 147 eeprom->reg_data_out = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_OUT);
@@ -163,15 +163,15 @@ static void rt2500pci_eepromregister_write(struct eeprom_93cx6 *eeprom)
163 rt2x00_set_field32(&reg, CSR21_EEPROM_CHIP_SELECT, 163 rt2x00_set_field32(&reg, CSR21_EEPROM_CHIP_SELECT,
164 !!eeprom->reg_chip_select); 164 !!eeprom->reg_chip_select);
165 165
166 rt2x00pci_register_write(rt2x00dev, CSR21, reg); 166 rt2x00mmio_register_write(rt2x00dev, CSR21, reg);
167} 167}
168 168
169#ifdef CONFIG_RT2X00_LIB_DEBUGFS 169#ifdef CONFIG_RT2X00_LIB_DEBUGFS
170static const struct rt2x00debug rt2500pci_rt2x00debug = { 170static const struct rt2x00debug rt2500pci_rt2x00debug = {
171 .owner = THIS_MODULE, 171 .owner = THIS_MODULE,
172 .csr = { 172 .csr = {
173 .read = rt2x00pci_register_read, 173 .read = rt2x00mmio_register_read,
174 .write = rt2x00pci_register_write, 174 .write = rt2x00mmio_register_write,
175 .flags = RT2X00DEBUGFS_OFFSET, 175 .flags = RT2X00DEBUGFS_OFFSET,
176 .word_base = CSR_REG_BASE, 176 .word_base = CSR_REG_BASE,
177 .word_size = sizeof(u32), 177 .word_size = sizeof(u32),
@@ -205,7 +205,7 @@ static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
205{ 205{
206 u32 reg; 206 u32 reg;
207 207
208 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg); 208 rt2x00mmio_register_read(rt2x00dev, GPIOCSR, &reg);
209 return rt2x00_get_field32(reg, GPIOCSR_VAL0); 209 return rt2x00_get_field32(reg, GPIOCSR_VAL0);
210} 210}
211 211
@@ -218,14 +218,14 @@ static void rt2500pci_brightness_set(struct led_classdev *led_cdev,
218 unsigned int enabled = brightness != LED_OFF; 218 unsigned int enabled = brightness != LED_OFF;
219 u32 reg; 219 u32 reg;
220 220
221 rt2x00pci_register_read(led->rt2x00dev, LEDCSR, &reg); 221 rt2x00mmio_register_read(led->rt2x00dev, LEDCSR, &reg);
222 222
223 if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC) 223 if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC)
224 rt2x00_set_field32(&reg, LEDCSR_LINK, enabled); 224 rt2x00_set_field32(&reg, LEDCSR_LINK, enabled);
225 else if (led->type == LED_TYPE_ACTIVITY) 225 else if (led->type == LED_TYPE_ACTIVITY)
226 rt2x00_set_field32(&reg, LEDCSR_ACTIVITY, enabled); 226 rt2x00_set_field32(&reg, LEDCSR_ACTIVITY, enabled);
227 227
228 rt2x00pci_register_write(led->rt2x00dev, LEDCSR, reg); 228 rt2x00mmio_register_write(led->rt2x00dev, LEDCSR, reg);
229} 229}
230 230
231static int rt2500pci_blink_set(struct led_classdev *led_cdev, 231static int rt2500pci_blink_set(struct led_classdev *led_cdev,
@@ -236,10 +236,10 @@ static int rt2500pci_blink_set(struct led_classdev *led_cdev,
236 container_of(led_cdev, struct rt2x00_led, led_dev); 236 container_of(led_cdev, struct rt2x00_led, led_dev);
237 u32 reg; 237 u32 reg;
238 238
239 rt2x00pci_register_read(led->rt2x00dev, LEDCSR, &reg); 239 rt2x00mmio_register_read(led->rt2x00dev, LEDCSR, &reg);
240 rt2x00_set_field32(&reg, LEDCSR_ON_PERIOD, *delay_on); 240 rt2x00_set_field32(&reg, LEDCSR_ON_PERIOD, *delay_on);
241 rt2x00_set_field32(&reg, LEDCSR_OFF_PERIOD, *delay_off); 241 rt2x00_set_field32(&reg, LEDCSR_OFF_PERIOD, *delay_off);
242 rt2x00pci_register_write(led->rt2x00dev, LEDCSR, reg); 242 rt2x00mmio_register_write(led->rt2x00dev, LEDCSR, reg);
243 243
244 return 0; 244 return 0;
245} 245}
@@ -270,7 +270,7 @@ static void rt2500pci_config_filter(struct rt2x00_dev *rt2x00dev,
270 * and broadcast frames will always be accepted since 270 * and broadcast frames will always be accepted since
271 * there is no filter for it at this time. 271 * there is no filter for it at this time.
272 */ 272 */
273 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg); 273 rt2x00mmio_register_read(rt2x00dev, RXCSR0, &reg);
274 rt2x00_set_field32(&reg, RXCSR0_DROP_CRC, 274 rt2x00_set_field32(&reg, RXCSR0_DROP_CRC,
275 !(filter_flags & FIF_FCSFAIL)); 275 !(filter_flags & FIF_FCSFAIL));
276 rt2x00_set_field32(&reg, RXCSR0_DROP_PHYSICAL, 276 rt2x00_set_field32(&reg, RXCSR0_DROP_PHYSICAL,
@@ -286,7 +286,7 @@ static void rt2500pci_config_filter(struct rt2x00_dev *rt2x00dev,
286 rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST, 286 rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST,
287 !(filter_flags & FIF_ALLMULTI)); 287 !(filter_flags & FIF_ALLMULTI));
288 rt2x00_set_field32(&reg, RXCSR0_DROP_BCAST, 0); 288 rt2x00_set_field32(&reg, RXCSR0_DROP_BCAST, 0);
289 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); 289 rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
290} 290}
291 291
292static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev, 292static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
@@ -303,25 +303,25 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
303 * Enable beacon config 303 * Enable beacon config
304 */ 304 */
305 bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20); 305 bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20);
306 rt2x00pci_register_read(rt2x00dev, BCNCSR1, &reg); 306 rt2x00mmio_register_read(rt2x00dev, BCNCSR1, &reg);
307 rt2x00_set_field32(&reg, BCNCSR1_PRELOAD, bcn_preload); 307 rt2x00_set_field32(&reg, BCNCSR1_PRELOAD, bcn_preload);
308 rt2x00_set_field32(&reg, BCNCSR1_BEACON_CWMIN, queue->cw_min); 308 rt2x00_set_field32(&reg, BCNCSR1_BEACON_CWMIN, queue->cw_min);
309 rt2x00pci_register_write(rt2x00dev, BCNCSR1, reg); 309 rt2x00mmio_register_write(rt2x00dev, BCNCSR1, reg);
310 310
311 /* 311 /*
312 * Enable synchronisation. 312 * Enable synchronisation.
313 */ 313 */
314 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 314 rt2x00mmio_register_read(rt2x00dev, CSR14, &reg);
315 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync); 315 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
316 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 316 rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
317 } 317 }
318 318
319 if (flags & CONFIG_UPDATE_MAC) 319 if (flags & CONFIG_UPDATE_MAC)
320 rt2x00pci_register_multiwrite(rt2x00dev, CSR3, 320 rt2x00mmio_register_multiwrite(rt2x00dev, CSR3,
321 conf->mac, sizeof(conf->mac)); 321 conf->mac, sizeof(conf->mac));
322 322
323 if (flags & CONFIG_UPDATE_BSSID) 323 if (flags & CONFIG_UPDATE_BSSID)
324 rt2x00pci_register_multiwrite(rt2x00dev, CSR5, 324 rt2x00mmio_register_multiwrite(rt2x00dev, CSR5,
325 conf->bssid, sizeof(conf->bssid)); 325 conf->bssid, sizeof(conf->bssid));
326} 326}
327 327
@@ -338,68 +338,68 @@ static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev,
338 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 338 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
339 preamble_mask = erp->short_preamble << 3; 339 preamble_mask = erp->short_preamble << 3;
340 340
341 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg); 341 rt2x00mmio_register_read(rt2x00dev, TXCSR1, &reg);
342 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x162); 342 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x162);
343 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0xa2); 343 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0xa2);
344 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); 344 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
345 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1); 345 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
346 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); 346 rt2x00mmio_register_write(rt2x00dev, TXCSR1, reg);
347 347
348 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg); 348 rt2x00mmio_register_read(rt2x00dev, ARCSR2, &reg);
349 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00); 349 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00);
350 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04); 350 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04);
351 rt2x00_set_field32(&reg, ARCSR2_LENGTH, 351 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
352 GET_DURATION(ACK_SIZE, 10)); 352 GET_DURATION(ACK_SIZE, 10));
353 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); 353 rt2x00mmio_register_write(rt2x00dev, ARCSR2, reg);
354 354
355 rt2x00pci_register_read(rt2x00dev, ARCSR3, &reg); 355 rt2x00mmio_register_read(rt2x00dev, ARCSR3, &reg);
356 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask); 356 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask);
357 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04); 357 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04);
358 rt2x00_set_field32(&reg, ARCSR2_LENGTH, 358 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
359 GET_DURATION(ACK_SIZE, 20)); 359 GET_DURATION(ACK_SIZE, 20));
360 rt2x00pci_register_write(rt2x00dev, ARCSR3, reg); 360 rt2x00mmio_register_write(rt2x00dev, ARCSR3, reg);
361 361
362 rt2x00pci_register_read(rt2x00dev, ARCSR4, &reg); 362 rt2x00mmio_register_read(rt2x00dev, ARCSR4, &reg);
363 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask); 363 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask);
364 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04); 364 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04);
365 rt2x00_set_field32(&reg, ARCSR2_LENGTH, 365 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
366 GET_DURATION(ACK_SIZE, 55)); 366 GET_DURATION(ACK_SIZE, 55));
367 rt2x00pci_register_write(rt2x00dev, ARCSR4, reg); 367 rt2x00mmio_register_write(rt2x00dev, ARCSR4, reg);
368 368
369 rt2x00pci_register_read(rt2x00dev, ARCSR5, &reg); 369 rt2x00mmio_register_read(rt2x00dev, ARCSR5, &reg);
370 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask); 370 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask);
371 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84); 371 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84);
372 rt2x00_set_field32(&reg, ARCSR2_LENGTH, 372 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
373 GET_DURATION(ACK_SIZE, 110)); 373 GET_DURATION(ACK_SIZE, 110));
374 rt2x00pci_register_write(rt2x00dev, ARCSR5, reg); 374 rt2x00mmio_register_write(rt2x00dev, ARCSR5, reg);
375 } 375 }
376 376
377 if (changed & BSS_CHANGED_BASIC_RATES) 377 if (changed & BSS_CHANGED_BASIC_RATES)
378 rt2x00pci_register_write(rt2x00dev, ARCSR1, erp->basic_rates); 378 rt2x00mmio_register_write(rt2x00dev, ARCSR1, erp->basic_rates);
379 379
380 if (changed & BSS_CHANGED_ERP_SLOT) { 380 if (changed & BSS_CHANGED_ERP_SLOT) {
381 rt2x00pci_register_read(rt2x00dev, CSR11, &reg); 381 rt2x00mmio_register_read(rt2x00dev, CSR11, &reg);
382 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time); 382 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time);
383 rt2x00pci_register_write(rt2x00dev, CSR11, reg); 383 rt2x00mmio_register_write(rt2x00dev, CSR11, reg);
384 384
385 rt2x00pci_register_read(rt2x00dev, CSR18, &reg); 385 rt2x00mmio_register_read(rt2x00dev, CSR18, &reg);
386 rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs); 386 rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs);
387 rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs); 387 rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs);
388 rt2x00pci_register_write(rt2x00dev, CSR18, reg); 388 rt2x00mmio_register_write(rt2x00dev, CSR18, reg);
389 389
390 rt2x00pci_register_read(rt2x00dev, CSR19, &reg); 390 rt2x00mmio_register_read(rt2x00dev, CSR19, &reg);
391 rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs); 391 rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs);
392 rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs); 392 rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs);
393 rt2x00pci_register_write(rt2x00dev, CSR19, reg); 393 rt2x00mmio_register_write(rt2x00dev, CSR19, reg);
394 } 394 }
395 395
396 if (changed & BSS_CHANGED_BEACON_INT) { 396 if (changed & BSS_CHANGED_BEACON_INT) {
397 rt2x00pci_register_read(rt2x00dev, CSR12, &reg); 397 rt2x00mmio_register_read(rt2x00dev, CSR12, &reg);
398 rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL, 398 rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL,
399 erp->beacon_int * 16); 399 erp->beacon_int * 16);
400 rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION, 400 rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION,
401 erp->beacon_int * 16); 401 erp->beacon_int * 16);
402 rt2x00pci_register_write(rt2x00dev, CSR12, reg); 402 rt2x00mmio_register_write(rt2x00dev, CSR12, reg);
403 } 403 }
404 404
405} 405}
@@ -418,7 +418,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
418 BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || 418 BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
419 ant->tx == ANTENNA_SW_DIVERSITY); 419 ant->tx == ANTENNA_SW_DIVERSITY);
420 420
421 rt2x00pci_register_read(rt2x00dev, BBPCSR1, &reg); 421 rt2x00mmio_register_read(rt2x00dev, BBPCSR1, &reg);
422 rt2500pci_bbp_read(rt2x00dev, 14, &r14); 422 rt2500pci_bbp_read(rt2x00dev, 14, &r14);
423 rt2500pci_bbp_read(rt2x00dev, 2, &r2); 423 rt2500pci_bbp_read(rt2x00dev, 2, &r2);
424 424
@@ -470,7 +470,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
470 rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 0); 470 rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 0);
471 } 471 }
472 472
473 rt2x00pci_register_write(rt2x00dev, BBPCSR1, reg); 473 rt2x00mmio_register_write(rt2x00dev, BBPCSR1, reg);
474 rt2500pci_bbp_write(rt2x00dev, 14, r14); 474 rt2500pci_bbp_write(rt2x00dev, 14, r14);
475 rt2500pci_bbp_write(rt2x00dev, 2, r2); 475 rt2500pci_bbp_write(rt2x00dev, 2, r2);
476} 476}
@@ -541,7 +541,7 @@ static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
541 /* 541 /*
542 * Clear false CRC during channel switch. 542 * Clear false CRC during channel switch.
543 */ 543 */
544 rt2x00pci_register_read(rt2x00dev, CNT0, &rf->rf1); 544 rt2x00mmio_register_read(rt2x00dev, CNT0, &rf->rf1);
545} 545}
546 546
547static void rt2500pci_config_txpower(struct rt2x00_dev *rt2x00dev, 547static void rt2500pci_config_txpower(struct rt2x00_dev *rt2x00dev,
@@ -559,12 +559,12 @@ static void rt2500pci_config_retry_limit(struct rt2x00_dev *rt2x00dev,
559{ 559{
560 u32 reg; 560 u32 reg;
561 561
562 rt2x00pci_register_read(rt2x00dev, CSR11, &reg); 562 rt2x00mmio_register_read(rt2x00dev, CSR11, &reg);
563 rt2x00_set_field32(&reg, CSR11_LONG_RETRY, 563 rt2x00_set_field32(&reg, CSR11_LONG_RETRY,
564 libconf->conf->long_frame_max_tx_count); 564 libconf->conf->long_frame_max_tx_count);
565 rt2x00_set_field32(&reg, CSR11_SHORT_RETRY, 565 rt2x00_set_field32(&reg, CSR11_SHORT_RETRY,
566 libconf->conf->short_frame_max_tx_count); 566 libconf->conf->short_frame_max_tx_count);
567 rt2x00pci_register_write(rt2x00dev, CSR11, reg); 567 rt2x00mmio_register_write(rt2x00dev, CSR11, reg);
568} 568}
569 569
570static void rt2500pci_config_ps(struct rt2x00_dev *rt2x00dev, 570static void rt2500pci_config_ps(struct rt2x00_dev *rt2x00dev,
@@ -576,7 +576,7 @@ static void rt2500pci_config_ps(struct rt2x00_dev *rt2x00dev,
576 u32 reg; 576 u32 reg;
577 577
578 if (state == STATE_SLEEP) { 578 if (state == STATE_SLEEP) {
579 rt2x00pci_register_read(rt2x00dev, CSR20, &reg); 579 rt2x00mmio_register_read(rt2x00dev, CSR20, &reg);
580 rt2x00_set_field32(&reg, CSR20_DELAY_AFTER_TBCN, 580 rt2x00_set_field32(&reg, CSR20_DELAY_AFTER_TBCN,
581 (rt2x00dev->beacon_int - 20) * 16); 581 (rt2x00dev->beacon_int - 20) * 16);
582 rt2x00_set_field32(&reg, CSR20_TBCN_BEFORE_WAKEUP, 582 rt2x00_set_field32(&reg, CSR20_TBCN_BEFORE_WAKEUP,
@@ -584,14 +584,14 @@ static void rt2500pci_config_ps(struct rt2x00_dev *rt2x00dev,
584 584
585 /* We must first disable autowake before it can be enabled */ 585 /* We must first disable autowake before it can be enabled */
586 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0); 586 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
587 rt2x00pci_register_write(rt2x00dev, CSR20, reg); 587 rt2x00mmio_register_write(rt2x00dev, CSR20, reg);
588 588
589 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1); 589 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1);
590 rt2x00pci_register_write(rt2x00dev, CSR20, reg); 590 rt2x00mmio_register_write(rt2x00dev, CSR20, reg);
591 } else { 591 } else {
592 rt2x00pci_register_read(rt2x00dev, CSR20, &reg); 592 rt2x00mmio_register_read(rt2x00dev, CSR20, &reg);
593 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0); 593 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
594 rt2x00pci_register_write(rt2x00dev, CSR20, reg); 594 rt2x00mmio_register_write(rt2x00dev, CSR20, reg);
595 } 595 }
596 596
597 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); 597 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
@@ -625,13 +625,13 @@ static void rt2500pci_link_stats(struct rt2x00_dev *rt2x00dev,
625 /* 625 /*
626 * Update FCS error count from register. 626 * Update FCS error count from register.
627 */ 627 */
628 rt2x00pci_register_read(rt2x00dev, CNT0, &reg); 628 rt2x00mmio_register_read(rt2x00dev, CNT0, &reg);
629 qual->rx_failed = rt2x00_get_field32(reg, CNT0_FCS_ERROR); 629 qual->rx_failed = rt2x00_get_field32(reg, CNT0_FCS_ERROR);
630 630
631 /* 631 /*
632 * Update False CCA count from register. 632 * Update False CCA count from register.
633 */ 633 */
634 rt2x00pci_register_read(rt2x00dev, CNT3, &reg); 634 rt2x00mmio_register_read(rt2x00dev, CNT3, &reg);
635 qual->false_cca = rt2x00_get_field32(reg, CNT3_FALSE_CCA); 635 qual->false_cca = rt2x00_get_field32(reg, CNT3_FALSE_CCA);
636} 636}
637 637
@@ -731,16 +731,16 @@ static void rt2500pci_start_queue(struct data_queue *queue)
731 731
732 switch (queue->qid) { 732 switch (queue->qid) {
733 case QID_RX: 733 case QID_RX:
734 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg); 734 rt2x00mmio_register_read(rt2x00dev, RXCSR0, &reg);
735 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 0); 735 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 0);
736 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); 736 rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
737 break; 737 break;
738 case QID_BEACON: 738 case QID_BEACON:
739 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 739 rt2x00mmio_register_read(rt2x00dev, CSR14, &reg);
740 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1); 740 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
741 rt2x00_set_field32(&reg, CSR14_TBCN, 1); 741 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
742 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1); 742 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
743 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 743 rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
744 break; 744 break;
745 default: 745 default:
746 break; 746 break;
@@ -754,19 +754,19 @@ static void rt2500pci_kick_queue(struct data_queue *queue)
754 754
755 switch (queue->qid) { 755 switch (queue->qid) {
756 case QID_AC_VO: 756 case QID_AC_VO:
757 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 757 rt2x00mmio_register_read(rt2x00dev, TXCSR0, &reg);
758 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, 1); 758 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, 1);
759 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 759 rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
760 break; 760 break;
761 case QID_AC_VI: 761 case QID_AC_VI:
762 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 762 rt2x00mmio_register_read(rt2x00dev, TXCSR0, &reg);
763 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1); 763 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1);
764 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 764 rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
765 break; 765 break;
766 case QID_ATIM: 766 case QID_ATIM:
767 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 767 rt2x00mmio_register_read(rt2x00dev, TXCSR0, &reg);
768 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, 1); 768 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, 1);
769 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 769 rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
770 break; 770 break;
771 default: 771 default:
772 break; 772 break;
@@ -782,21 +782,21 @@ static void rt2500pci_stop_queue(struct data_queue *queue)
782 case QID_AC_VO: 782 case QID_AC_VO:
783 case QID_AC_VI: 783 case QID_AC_VI:
784 case QID_ATIM: 784 case QID_ATIM:
785 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 785 rt2x00mmio_register_read(rt2x00dev, TXCSR0, &reg);
786 rt2x00_set_field32(&reg, TXCSR0_ABORT, 1); 786 rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
787 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 787 rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg);
788 break; 788 break;
789 case QID_RX: 789 case QID_RX:
790 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg); 790 rt2x00mmio_register_read(rt2x00dev, RXCSR0, &reg);
791 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 1); 791 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 1);
792 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); 792 rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
793 break; 793 break;
794 case QID_BEACON: 794 case QID_BEACON:
795 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 795 rt2x00mmio_register_read(rt2x00dev, CSR14, &reg);
796 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0); 796 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0);
797 rt2x00_set_field32(&reg, CSR14_TBCN, 0); 797 rt2x00_set_field32(&reg, CSR14_TBCN, 0);
798 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 798 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
799 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 799 rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
800 800
801 /* 801 /*
802 * Wait for possibly running tbtt tasklets. 802 * Wait for possibly running tbtt tasklets.
@@ -813,7 +813,7 @@ static void rt2500pci_stop_queue(struct data_queue *queue)
813 */ 813 */
814static bool rt2500pci_get_entry_state(struct queue_entry *entry) 814static bool rt2500pci_get_entry_state(struct queue_entry *entry)
815{ 815{
816 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 816 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
817 u32 word; 817 u32 word;
818 818
819 if (entry->queue->qid == QID_RX) { 819 if (entry->queue->qid == QID_RX) {
@@ -830,7 +830,7 @@ static bool rt2500pci_get_entry_state(struct queue_entry *entry)
830 830
831static void rt2500pci_clear_entry(struct queue_entry *entry) 831static void rt2500pci_clear_entry(struct queue_entry *entry)
832{ 832{
833 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 833 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
834 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 834 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
835 u32 word; 835 u32 word;
836 836
@@ -852,53 +852,53 @@ static void rt2500pci_clear_entry(struct queue_entry *entry)
852 852
853static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev) 853static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
854{ 854{
855 struct queue_entry_priv_pci *entry_priv; 855 struct queue_entry_priv_mmio *entry_priv;
856 u32 reg; 856 u32 reg;
857 857
858 /* 858 /*
859 * Initialize registers. 859 * Initialize registers.
860 */ 860 */
861 rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg); 861 rt2x00mmio_register_read(rt2x00dev, TXCSR2, &reg);
862 rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size); 862 rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
863 rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit); 863 rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
864 rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit); 864 rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit);
865 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); 865 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
866 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); 866 rt2x00mmio_register_write(rt2x00dev, TXCSR2, reg);
867 867
868 entry_priv = rt2x00dev->tx[1].entries[0].priv_data; 868 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
869 rt2x00pci_register_read(rt2x00dev, TXCSR3, &reg); 869 rt2x00mmio_register_read(rt2x00dev, TXCSR3, &reg);
870 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER, 870 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER,
871 entry_priv->desc_dma); 871 entry_priv->desc_dma);
872 rt2x00pci_register_write(rt2x00dev, TXCSR3, reg); 872 rt2x00mmio_register_write(rt2x00dev, TXCSR3, reg);
873 873
874 entry_priv = rt2x00dev->tx[0].entries[0].priv_data; 874 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
875 rt2x00pci_register_read(rt2x00dev, TXCSR5, &reg); 875 rt2x00mmio_register_read(rt2x00dev, TXCSR5, &reg);
876 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER, 876 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER,
877 entry_priv->desc_dma); 877 entry_priv->desc_dma);
878 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); 878 rt2x00mmio_register_write(rt2x00dev, TXCSR5, reg);
879 879
880 entry_priv = rt2x00dev->atim->entries[0].priv_data; 880 entry_priv = rt2x00dev->atim->entries[0].priv_data;
881 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg); 881 rt2x00mmio_register_read(rt2x00dev, TXCSR4, &reg);
882 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER, 882 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
883 entry_priv->desc_dma); 883 entry_priv->desc_dma);
884 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); 884 rt2x00mmio_register_write(rt2x00dev, TXCSR4, reg);
885 885
886 entry_priv = rt2x00dev->bcn->entries[0].priv_data; 886 entry_priv = rt2x00dev->bcn->entries[0].priv_data;
887 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg); 887 rt2x00mmio_register_read(rt2x00dev, TXCSR6, &reg);
888 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER, 888 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
889 entry_priv->desc_dma); 889 entry_priv->desc_dma);
890 rt2x00pci_register_write(rt2x00dev, TXCSR6, reg); 890 rt2x00mmio_register_write(rt2x00dev, TXCSR6, reg);
891 891
892 rt2x00pci_register_read(rt2x00dev, RXCSR1, &reg); 892 rt2x00mmio_register_read(rt2x00dev, RXCSR1, &reg);
893 rt2x00_set_field32(&reg, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size); 893 rt2x00_set_field32(&reg, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size);
894 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit); 894 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit);
895 rt2x00pci_register_write(rt2x00dev, RXCSR1, reg); 895 rt2x00mmio_register_write(rt2x00dev, RXCSR1, reg);
896 896
897 entry_priv = rt2x00dev->rx->entries[0].priv_data; 897 entry_priv = rt2x00dev->rx->entries[0].priv_data;
898 rt2x00pci_register_read(rt2x00dev, RXCSR2, &reg); 898 rt2x00mmio_register_read(rt2x00dev, RXCSR2, &reg);
899 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER, 899 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER,
900 entry_priv->desc_dma); 900 entry_priv->desc_dma);
901 rt2x00pci_register_write(rt2x00dev, RXCSR2, reg); 901 rt2x00mmio_register_write(rt2x00dev, RXCSR2, reg);
902 902
903 return 0; 903 return 0;
904} 904}
@@ -907,30 +907,30 @@ static int rt2500pci_init_registers(struct rt2x00_dev *rt2x00dev)
907{ 907{
908 u32 reg; 908 u32 reg;
909 909
910 rt2x00pci_register_write(rt2x00dev, PSCSR0, 0x00020002); 910 rt2x00mmio_register_write(rt2x00dev, PSCSR0, 0x00020002);
911 rt2x00pci_register_write(rt2x00dev, PSCSR1, 0x00000002); 911 rt2x00mmio_register_write(rt2x00dev, PSCSR1, 0x00000002);
912 rt2x00pci_register_write(rt2x00dev, PSCSR2, 0x00020002); 912 rt2x00mmio_register_write(rt2x00dev, PSCSR2, 0x00020002);
913 rt2x00pci_register_write(rt2x00dev, PSCSR3, 0x00000002); 913 rt2x00mmio_register_write(rt2x00dev, PSCSR3, 0x00000002);
914 914
915 rt2x00pci_register_read(rt2x00dev, TIMECSR, &reg); 915 rt2x00mmio_register_read(rt2x00dev, TIMECSR, &reg);
916 rt2x00_set_field32(&reg, TIMECSR_US_COUNT, 33); 916 rt2x00_set_field32(&reg, TIMECSR_US_COUNT, 33);
917 rt2x00_set_field32(&reg, TIMECSR_US_64_COUNT, 63); 917 rt2x00_set_field32(&reg, TIMECSR_US_64_COUNT, 63);
918 rt2x00_set_field32(&reg, TIMECSR_BEACON_EXPECT, 0); 918 rt2x00_set_field32(&reg, TIMECSR_BEACON_EXPECT, 0);
919 rt2x00pci_register_write(rt2x00dev, TIMECSR, reg); 919 rt2x00mmio_register_write(rt2x00dev, TIMECSR, reg);
920 920
921 rt2x00pci_register_read(rt2x00dev, CSR9, &reg); 921 rt2x00mmio_register_read(rt2x00dev, CSR9, &reg);
922 rt2x00_set_field32(&reg, CSR9_MAX_FRAME_UNIT, 922 rt2x00_set_field32(&reg, CSR9_MAX_FRAME_UNIT,
923 rt2x00dev->rx->data_size / 128); 923 rt2x00dev->rx->data_size / 128);
924 rt2x00pci_register_write(rt2x00dev, CSR9, reg); 924 rt2x00mmio_register_write(rt2x00dev, CSR9, reg);
925 925
926 /* 926 /*
927 * Always use CWmin and CWmax set in descriptor. 927 * Always use CWmin and CWmax set in descriptor.
928 */ 928 */
929 rt2x00pci_register_read(rt2x00dev, CSR11, &reg); 929 rt2x00mmio_register_read(rt2x00dev, CSR11, &reg);
930 rt2x00_set_field32(&reg, CSR11_CW_SELECT, 0); 930 rt2x00_set_field32(&reg, CSR11_CW_SELECT, 0);
931 rt2x00pci_register_write(rt2x00dev, CSR11, reg); 931 rt2x00mmio_register_write(rt2x00dev, CSR11, reg);
932 932
933 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 933 rt2x00mmio_register_read(rt2x00dev, CSR14, &reg);
934 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0); 934 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0);
935 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, 0); 935 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, 0);
936 rt2x00_set_field32(&reg, CSR14_TBCN, 0); 936 rt2x00_set_field32(&reg, CSR14_TBCN, 0);
@@ -939,11 +939,11 @@ static int rt2500pci_init_registers(struct rt2x00_dev *rt2x00dev)
939 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 939 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
940 rt2x00_set_field32(&reg, CSR14_CFP_COUNT_PRELOAD, 0); 940 rt2x00_set_field32(&reg, CSR14_CFP_COUNT_PRELOAD, 0);
941 rt2x00_set_field32(&reg, CSR14_TBCM_PRELOAD, 0); 941 rt2x00_set_field32(&reg, CSR14_TBCM_PRELOAD, 0);
942 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 942 rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
943 943
944 rt2x00pci_register_write(rt2x00dev, CNT3, 0); 944 rt2x00mmio_register_write(rt2x00dev, CNT3, 0);
945 945
946 rt2x00pci_register_read(rt2x00dev, TXCSR8, &reg); 946 rt2x00mmio_register_read(rt2x00dev, TXCSR8, &reg);
947 rt2x00_set_field32(&reg, TXCSR8_BBP_ID0, 10); 947 rt2x00_set_field32(&reg, TXCSR8_BBP_ID0, 10);
948 rt2x00_set_field32(&reg, TXCSR8_BBP_ID0_VALID, 1); 948 rt2x00_set_field32(&reg, TXCSR8_BBP_ID0_VALID, 1);
949 rt2x00_set_field32(&reg, TXCSR8_BBP_ID1, 11); 949 rt2x00_set_field32(&reg, TXCSR8_BBP_ID1, 11);
@@ -952,30 +952,30 @@ static int rt2500pci_init_registers(struct rt2x00_dev *rt2x00dev)
952 rt2x00_set_field32(&reg, TXCSR8_BBP_ID2_VALID, 1); 952 rt2x00_set_field32(&reg, TXCSR8_BBP_ID2_VALID, 1);
953 rt2x00_set_field32(&reg, TXCSR8_BBP_ID3, 12); 953 rt2x00_set_field32(&reg, TXCSR8_BBP_ID3, 12);
954 rt2x00_set_field32(&reg, TXCSR8_BBP_ID3_VALID, 1); 954 rt2x00_set_field32(&reg, TXCSR8_BBP_ID3_VALID, 1);
955 rt2x00pci_register_write(rt2x00dev, TXCSR8, reg); 955 rt2x00mmio_register_write(rt2x00dev, TXCSR8, reg);
956 956
957 rt2x00pci_register_read(rt2x00dev, ARTCSR0, &reg); 957 rt2x00mmio_register_read(rt2x00dev, ARTCSR0, &reg);
958 rt2x00_set_field32(&reg, ARTCSR0_ACK_CTS_1MBS, 112); 958 rt2x00_set_field32(&reg, ARTCSR0_ACK_CTS_1MBS, 112);
959 rt2x00_set_field32(&reg, ARTCSR0_ACK_CTS_2MBS, 56); 959 rt2x00_set_field32(&reg, ARTCSR0_ACK_CTS_2MBS, 56);
960 rt2x00_set_field32(&reg, ARTCSR0_ACK_CTS_5_5MBS, 20); 960 rt2x00_set_field32(&reg, ARTCSR0_ACK_CTS_5_5MBS, 20);
961 rt2x00_set_field32(&reg, ARTCSR0_ACK_CTS_11MBS, 10); 961 rt2x00_set_field32(&reg, ARTCSR0_ACK_CTS_11MBS, 10);
962 rt2x00pci_register_write(rt2x00dev, ARTCSR0, reg); 962 rt2x00mmio_register_write(rt2x00dev, ARTCSR0, reg);
963 963
964 rt2x00pci_register_read(rt2x00dev, ARTCSR1, &reg); 964 rt2x00mmio_register_read(rt2x00dev, ARTCSR1, &reg);
965 rt2x00_set_field32(&reg, ARTCSR1_ACK_CTS_6MBS, 45); 965 rt2x00_set_field32(&reg, ARTCSR1_ACK_CTS_6MBS, 45);
966 rt2x00_set_field32(&reg, ARTCSR1_ACK_CTS_9MBS, 37); 966 rt2x00_set_field32(&reg, ARTCSR1_ACK_CTS_9MBS, 37);
967 rt2x00_set_field32(&reg, ARTCSR1_ACK_CTS_12MBS, 33); 967 rt2x00_set_field32(&reg, ARTCSR1_ACK_CTS_12MBS, 33);
968 rt2x00_set_field32(&reg, ARTCSR1_ACK_CTS_18MBS, 29); 968 rt2x00_set_field32(&reg, ARTCSR1_ACK_CTS_18MBS, 29);
969 rt2x00pci_register_write(rt2x00dev, ARTCSR1, reg); 969 rt2x00mmio_register_write(rt2x00dev, ARTCSR1, reg);
970 970
971 rt2x00pci_register_read(rt2x00dev, ARTCSR2, &reg); 971 rt2x00mmio_register_read(rt2x00dev, ARTCSR2, &reg);
972 rt2x00_set_field32(&reg, ARTCSR2_ACK_CTS_24MBS, 29); 972 rt2x00_set_field32(&reg, ARTCSR2_ACK_CTS_24MBS, 29);
973 rt2x00_set_field32(&reg, ARTCSR2_ACK_CTS_36MBS, 25); 973 rt2x00_set_field32(&reg, ARTCSR2_ACK_CTS_36MBS, 25);
974 rt2x00_set_field32(&reg, ARTCSR2_ACK_CTS_48MBS, 25); 974 rt2x00_set_field32(&reg, ARTCSR2_ACK_CTS_48MBS, 25);
975 rt2x00_set_field32(&reg, ARTCSR2_ACK_CTS_54MBS, 25); 975 rt2x00_set_field32(&reg, ARTCSR2_ACK_CTS_54MBS, 25);
976 rt2x00pci_register_write(rt2x00dev, ARTCSR2, reg); 976 rt2x00mmio_register_write(rt2x00dev, ARTCSR2, reg);
977 977
978 rt2x00pci_register_read(rt2x00dev, RXCSR3, &reg); 978 rt2x00mmio_register_read(rt2x00dev, RXCSR3, &reg);
979 rt2x00_set_field32(&reg, RXCSR3_BBP_ID0, 47); /* CCK Signal */ 979 rt2x00_set_field32(&reg, RXCSR3_BBP_ID0, 47); /* CCK Signal */
980 rt2x00_set_field32(&reg, RXCSR3_BBP_ID0_VALID, 1); 980 rt2x00_set_field32(&reg, RXCSR3_BBP_ID0_VALID, 1);
981 rt2x00_set_field32(&reg, RXCSR3_BBP_ID1, 51); /* Rssi */ 981 rt2x00_set_field32(&reg, RXCSR3_BBP_ID1, 51); /* Rssi */
@@ -984,9 +984,9 @@ static int rt2500pci_init_registers(struct rt2x00_dev *rt2x00dev)
984 rt2x00_set_field32(&reg, RXCSR3_BBP_ID2_VALID, 1); 984 rt2x00_set_field32(&reg, RXCSR3_BBP_ID2_VALID, 1);
985 rt2x00_set_field32(&reg, RXCSR3_BBP_ID3, 51); /* RSSI */ 985 rt2x00_set_field32(&reg, RXCSR3_BBP_ID3, 51); /* RSSI */
986 rt2x00_set_field32(&reg, RXCSR3_BBP_ID3_VALID, 1); 986 rt2x00_set_field32(&reg, RXCSR3_BBP_ID3_VALID, 1);
987 rt2x00pci_register_write(rt2x00dev, RXCSR3, reg); 987 rt2x00mmio_register_write(rt2x00dev, RXCSR3, reg);
988 988
989 rt2x00pci_register_read(rt2x00dev, PCICSR, &reg); 989 rt2x00mmio_register_read(rt2x00dev, PCICSR, &reg);
990 rt2x00_set_field32(&reg, PCICSR_BIG_ENDIAN, 0); 990 rt2x00_set_field32(&reg, PCICSR_BIG_ENDIAN, 0);
991 rt2x00_set_field32(&reg, PCICSR_RX_TRESHOLD, 0); 991 rt2x00_set_field32(&reg, PCICSR_RX_TRESHOLD, 0);
992 rt2x00_set_field32(&reg, PCICSR_TX_TRESHOLD, 3); 992 rt2x00_set_field32(&reg, PCICSR_TX_TRESHOLD, 3);
@@ -994,54 +994,54 @@ static int rt2500pci_init_registers(struct rt2x00_dev *rt2x00dev)
994 rt2x00_set_field32(&reg, PCICSR_ENABLE_CLK, 1); 994 rt2x00_set_field32(&reg, PCICSR_ENABLE_CLK, 1);
995 rt2x00_set_field32(&reg, PCICSR_READ_MULTIPLE, 1); 995 rt2x00_set_field32(&reg, PCICSR_READ_MULTIPLE, 1);
996 rt2x00_set_field32(&reg, PCICSR_WRITE_INVALID, 1); 996 rt2x00_set_field32(&reg, PCICSR_WRITE_INVALID, 1);
997 rt2x00pci_register_write(rt2x00dev, PCICSR, reg); 997 rt2x00mmio_register_write(rt2x00dev, PCICSR, reg);
998 998
999 rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0x3f3b3100); 999 rt2x00mmio_register_write(rt2x00dev, PWRCSR0, 0x3f3b3100);
1000 1000
1001 rt2x00pci_register_write(rt2x00dev, GPIOCSR, 0x0000ff00); 1001 rt2x00mmio_register_write(rt2x00dev, GPIOCSR, 0x0000ff00);
1002 rt2x00pci_register_write(rt2x00dev, TESTCSR, 0x000000f0); 1002 rt2x00mmio_register_write(rt2x00dev, TESTCSR, 0x000000f0);
1003 1003
1004 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) 1004 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE))
1005 return -EBUSY; 1005 return -EBUSY;
1006 1006
1007 rt2x00pci_register_write(rt2x00dev, MACCSR0, 0x00213223); 1007 rt2x00mmio_register_write(rt2x00dev, MACCSR0, 0x00213223);
1008 rt2x00pci_register_write(rt2x00dev, MACCSR1, 0x00235518); 1008 rt2x00mmio_register_write(rt2x00dev, MACCSR1, 0x00235518);
1009 1009
1010 rt2x00pci_register_read(rt2x00dev, MACCSR2, &reg); 1010 rt2x00mmio_register_read(rt2x00dev, MACCSR2, &reg);
1011 rt2x00_set_field32(&reg, MACCSR2_DELAY, 64); 1011 rt2x00_set_field32(&reg, MACCSR2_DELAY, 64);
1012 rt2x00pci_register_write(rt2x00dev, MACCSR2, reg); 1012 rt2x00mmio_register_write(rt2x00dev, MACCSR2, reg);
1013 1013
1014 rt2x00pci_register_read(rt2x00dev, RALINKCSR, &reg); 1014 rt2x00mmio_register_read(rt2x00dev, RALINKCSR, &reg);
1015 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA0, 17); 1015 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA0, 17);
1016 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID0, 26); 1016 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID0, 26);
1017 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_VALID0, 1); 1017 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_VALID0, 1);
1018 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA1, 0); 1018 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA1, 0);
1019 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID1, 26); 1019 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID1, 26);
1020 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_VALID1, 1); 1020 rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_VALID1, 1);
1021 rt2x00pci_register_write(rt2x00dev, RALINKCSR, reg); 1021 rt2x00mmio_register_write(rt2x00dev, RALINKCSR, reg);
1022 1022
1023 rt2x00pci_register_write(rt2x00dev, BBPCSR1, 0x82188200); 1023 rt2x00mmio_register_write(rt2x00dev, BBPCSR1, 0x82188200);
1024 1024
1025 rt2x00pci_register_write(rt2x00dev, TXACKCSR0, 0x00000020); 1025 rt2x00mmio_register_write(rt2x00dev, TXACKCSR0, 0x00000020);
1026 1026
1027 rt2x00pci_register_read(rt2x00dev, CSR1, &reg); 1027 rt2x00mmio_register_read(rt2x00dev, CSR1, &reg);
1028 rt2x00_set_field32(&reg, CSR1_SOFT_RESET, 1); 1028 rt2x00_set_field32(&reg, CSR1_SOFT_RESET, 1);
1029 rt2x00_set_field32(&reg, CSR1_BBP_RESET, 0); 1029 rt2x00_set_field32(&reg, CSR1_BBP_RESET, 0);
1030 rt2x00_set_field32(&reg, CSR1_HOST_READY, 0); 1030 rt2x00_set_field32(&reg, CSR1_HOST_READY, 0);
1031 rt2x00pci_register_write(rt2x00dev, CSR1, reg); 1031 rt2x00mmio_register_write(rt2x00dev, CSR1, reg);
1032 1032
1033 rt2x00pci_register_read(rt2x00dev, CSR1, &reg); 1033 rt2x00mmio_register_read(rt2x00dev, CSR1, &reg);
1034 rt2x00_set_field32(&reg, CSR1_SOFT_RESET, 0); 1034 rt2x00_set_field32(&reg, CSR1_SOFT_RESET, 0);
1035 rt2x00_set_field32(&reg, CSR1_HOST_READY, 1); 1035 rt2x00_set_field32(&reg, CSR1_HOST_READY, 1);
1036 rt2x00pci_register_write(rt2x00dev, CSR1, reg); 1036 rt2x00mmio_register_write(rt2x00dev, CSR1, reg);
1037 1037
1038 /* 1038 /*
1039 * We must clear the FCS and FIFO error count. 1039 * We must clear the FCS and FIFO error count.
1040 * These registers are cleared on read, 1040 * These registers are cleared on read,
1041 * so we may pass a useless variable to store the value. 1041 * so we may pass a useless variable to store the value.
1042 */ 1042 */
1043 rt2x00pci_register_read(rt2x00dev, CNT0, &reg); 1043 rt2x00mmio_register_read(rt2x00dev, CNT0, &reg);
1044 rt2x00pci_register_read(rt2x00dev, CNT4, &reg); 1044 rt2x00mmio_register_read(rt2x00dev, CNT4, &reg);
1045 1045
1046 return 0; 1046 return 0;
1047} 1047}
@@ -1058,7 +1058,7 @@ static int rt2500pci_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
1058 udelay(REGISTER_BUSY_DELAY); 1058 udelay(REGISTER_BUSY_DELAY);
1059 } 1059 }
1060 1060
1061 ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); 1061 rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n");
1062 return -EACCES; 1062 return -EACCES;
1063} 1063}
1064 1064
@@ -1131,8 +1131,8 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1131 * should clear the register to assure a clean state. 1131 * should clear the register to assure a clean state.
1132 */ 1132 */
1133 if (state == STATE_RADIO_IRQ_ON) { 1133 if (state == STATE_RADIO_IRQ_ON) {
1134 rt2x00pci_register_read(rt2x00dev, CSR7, &reg); 1134 rt2x00mmio_register_read(rt2x00dev, CSR7, &reg);
1135 rt2x00pci_register_write(rt2x00dev, CSR7, reg); 1135 rt2x00mmio_register_write(rt2x00dev, CSR7, reg);
1136 } 1136 }
1137 1137
1138 /* 1138 /*
@@ -1141,13 +1141,13 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1141 */ 1141 */
1142 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 1142 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
1143 1143
1144 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 1144 rt2x00mmio_register_read(rt2x00dev, CSR8, &reg);
1145 rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask); 1145 rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
1146 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask); 1146 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
1147 rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, mask); 1147 rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, mask);
1148 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask); 1148 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
1149 rt2x00_set_field32(&reg, CSR8_RXDONE, mask); 1149 rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
1150 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1150 rt2x00mmio_register_write(rt2x00dev, CSR8, reg);
1151 1151
1152 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 1152 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
1153 1153
@@ -1179,7 +1179,7 @@ static void rt2500pci_disable_radio(struct rt2x00_dev *rt2x00dev)
1179 /* 1179 /*
1180 * Disable power 1180 * Disable power
1181 */ 1181 */
1182 rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0); 1182 rt2x00mmio_register_write(rt2x00dev, PWRCSR0, 0);
1183} 1183}
1184 1184
1185static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev, 1185static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev,
@@ -1193,12 +1193,12 @@ static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev,
1193 1193
1194 put_to_sleep = (state != STATE_AWAKE); 1194 put_to_sleep = (state != STATE_AWAKE);
1195 1195
1196 rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg); 1196 rt2x00mmio_register_read(rt2x00dev, PWRCSR1, &reg);
1197 rt2x00_set_field32(&reg, PWRCSR1_SET_STATE, 1); 1197 rt2x00_set_field32(&reg, PWRCSR1_SET_STATE, 1);
1198 rt2x00_set_field32(&reg, PWRCSR1_BBP_DESIRE_STATE, state); 1198 rt2x00_set_field32(&reg, PWRCSR1_BBP_DESIRE_STATE, state);
1199 rt2x00_set_field32(&reg, PWRCSR1_RF_DESIRE_STATE, state); 1199 rt2x00_set_field32(&reg, PWRCSR1_RF_DESIRE_STATE, state);
1200 rt2x00_set_field32(&reg, PWRCSR1_PUT_TO_SLEEP, put_to_sleep); 1200 rt2x00_set_field32(&reg, PWRCSR1_PUT_TO_SLEEP, put_to_sleep);
1201 rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); 1201 rt2x00mmio_register_write(rt2x00dev, PWRCSR1, reg);
1202 1202
1203 /* 1203 /*
1204 * Device is not guaranteed to be in the requested state yet. 1204 * Device is not guaranteed to be in the requested state yet.
@@ -1206,12 +1206,12 @@ static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev,
1206 * device has entered the correct state. 1206 * device has entered the correct state.
1207 */ 1207 */
1208 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1208 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1209 rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg2); 1209 rt2x00mmio_register_read(rt2x00dev, PWRCSR1, &reg2);
1210 bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE); 1210 bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE);
1211 rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE); 1211 rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE);
1212 if (bbp_state == state && rf_state == state) 1212 if (bbp_state == state && rf_state == state)
1213 return 0; 1213 return 0;
1214 rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); 1214 rt2x00mmio_register_write(rt2x00dev, PWRCSR1, reg);
1215 msleep(10); 1215 msleep(10);
1216 } 1216 }
1217 1217
@@ -1246,8 +1246,8 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1246 } 1246 }
1247 1247
1248 if (unlikely(retval)) 1248 if (unlikely(retval))
1249 ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", 1249 rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n",
1250 state, retval); 1250 state, retval);
1251 1251
1252 return retval; 1252 return retval;
1253} 1253}
@@ -1259,7 +1259,7 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry,
1259 struct txentry_desc *txdesc) 1259 struct txentry_desc *txdesc)
1260{ 1260{
1261 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1261 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1262 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1262 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
1263 __le32 *txd = entry_priv->desc; 1263 __le32 *txd = entry_priv->desc;
1264 u32 word; 1264 u32 word;
1265 1265
@@ -1335,12 +1335,12 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
1335 * Disable beaconing while we are reloading the beacon data, 1335 * Disable beaconing while we are reloading the beacon data,
1336 * otherwise we might be sending out invalid data. 1336 * otherwise we might be sending out invalid data.
1337 */ 1337 */
1338 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 1338 rt2x00mmio_register_read(rt2x00dev, CSR14, &reg);
1339 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 1339 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
1340 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1340 rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
1341 1341
1342 if (rt2x00queue_map_txskb(entry)) { 1342 if (rt2x00queue_map_txskb(entry)) {
1343 ERROR(rt2x00dev, "Fail to map beacon, aborting\n"); 1343 rt2x00_err(rt2x00dev, "Fail to map beacon, aborting\n");
1344 goto out; 1344 goto out;
1345 } 1345 }
1346 1346
@@ -1358,7 +1358,7 @@ out:
1358 * Enable beaconing again. 1358 * Enable beaconing again.
1359 */ 1359 */
1360 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1); 1360 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
1361 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1361 rt2x00mmio_register_write(rt2x00dev, CSR14, reg);
1362} 1362}
1363 1363
1364/* 1364/*
@@ -1367,7 +1367,7 @@ out:
1367static void rt2500pci_fill_rxdone(struct queue_entry *entry, 1367static void rt2500pci_fill_rxdone(struct queue_entry *entry,
1368 struct rxdone_entry_desc *rxdesc) 1368 struct rxdone_entry_desc *rxdesc)
1369{ 1369{
1370 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1370 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
1371 u32 word0; 1371 u32 word0;
1372 u32 word2; 1372 u32 word2;
1373 1373
@@ -1405,7 +1405,7 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
1405 const enum data_queue_qid queue_idx) 1405 const enum data_queue_qid queue_idx)
1406{ 1406{
1407 struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); 1407 struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
1408 struct queue_entry_priv_pci *entry_priv; 1408 struct queue_entry_priv_mmio *entry_priv;
1409 struct queue_entry *entry; 1409 struct queue_entry *entry;
1410 struct txdone_entry_desc txdesc; 1410 struct txdone_entry_desc txdesc;
1411 u32 word; 1411 u32 word;
@@ -1451,9 +1451,9 @@ static inline void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
1451 */ 1451 */
1452 spin_lock_irq(&rt2x00dev->irqmask_lock); 1452 spin_lock_irq(&rt2x00dev->irqmask_lock);
1453 1453
1454 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 1454 rt2x00mmio_register_read(rt2x00dev, CSR8, &reg);
1455 rt2x00_set_field32(&reg, irq_field, 0); 1455 rt2x00_set_field32(&reg, irq_field, 0);
1456 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1456 rt2x00mmio_register_write(rt2x00dev, CSR8, reg);
1457 1457
1458 spin_unlock_irq(&rt2x00dev->irqmask_lock); 1458 spin_unlock_irq(&rt2x00dev->irqmask_lock);
1459} 1459}
@@ -1476,11 +1476,11 @@ static void rt2500pci_txstatus_tasklet(unsigned long data)
1476 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) { 1476 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) {
1477 spin_lock_irq(&rt2x00dev->irqmask_lock); 1477 spin_lock_irq(&rt2x00dev->irqmask_lock);
1478 1478
1479 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 1479 rt2x00mmio_register_read(rt2x00dev, CSR8, &reg);
1480 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0); 1480 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
1481 rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0); 1481 rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
1482 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0); 1482 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
1483 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1483 rt2x00mmio_register_write(rt2x00dev, CSR8, reg);
1484 1484
1485 spin_unlock_irq(&rt2x00dev->irqmask_lock); 1485 spin_unlock_irq(&rt2x00dev->irqmask_lock);
1486 } 1486 }
@@ -1497,7 +1497,7 @@ static void rt2500pci_tbtt_tasklet(unsigned long data)
1497static void rt2500pci_rxdone_tasklet(unsigned long data) 1497static void rt2500pci_rxdone_tasklet(unsigned long data)
1498{ 1498{
1499 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 1499 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1500 if (rt2x00pci_rxdone(rt2x00dev)) 1500 if (rt2x00mmio_rxdone(rt2x00dev))
1501 tasklet_schedule(&rt2x00dev->rxdone_tasklet); 1501 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
1502 else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 1502 else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1503 rt2500pci_enable_interrupt(rt2x00dev, CSR8_RXDONE); 1503 rt2500pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
@@ -1512,8 +1512,8 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
1512 * Get the interrupt sources & saved to local variable. 1512 * Get the interrupt sources & saved to local variable.
1513 * Write register value back to clear pending interrupts. 1513 * Write register value back to clear pending interrupts.
1514 */ 1514 */
1515 rt2x00pci_register_read(rt2x00dev, CSR7, &reg); 1515 rt2x00mmio_register_read(rt2x00dev, CSR7, &reg);
1516 rt2x00pci_register_write(rt2x00dev, CSR7, reg); 1516 rt2x00mmio_register_write(rt2x00dev, CSR7, reg);
1517 1517
1518 if (!reg) 1518 if (!reg)
1519 return IRQ_NONE; 1519 return IRQ_NONE;
@@ -1550,9 +1550,9 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
1550 */ 1550 */
1551 spin_lock(&rt2x00dev->irqmask_lock); 1551 spin_lock(&rt2x00dev->irqmask_lock);
1552 1552
1553 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 1553 rt2x00mmio_register_read(rt2x00dev, CSR8, &reg);
1554 reg |= mask; 1554 reg |= mask;
1555 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1555 rt2x00mmio_register_write(rt2x00dev, CSR8, reg);
1556 1556
1557 spin_unlock(&rt2x00dev->irqmask_lock); 1557 spin_unlock(&rt2x00dev->irqmask_lock);
1558 1558
@@ -1569,7 +1569,7 @@ static int rt2500pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1569 u16 word; 1569 u16 word;
1570 u8 *mac; 1570 u8 *mac;
1571 1571
1572 rt2x00pci_register_read(rt2x00dev, CSR21, &reg); 1572 rt2x00mmio_register_read(rt2x00dev, CSR21, &reg);
1573 1573
1574 eeprom.data = rt2x00dev; 1574 eeprom.data = rt2x00dev;
1575 eeprom.register_read = rt2500pci_eepromregister_read; 1575 eeprom.register_read = rt2500pci_eepromregister_read;
@@ -1590,7 +1590,7 @@ static int rt2500pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1590 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 1590 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
1591 if (!is_valid_ether_addr(mac)) { 1591 if (!is_valid_ether_addr(mac)) {
1592 eth_random_addr(mac); 1592 eth_random_addr(mac);
1593 EEPROM(rt2x00dev, "MAC: %pM\n", mac); 1593 rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
1594 } 1594 }
1595 1595
1596 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); 1596 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
@@ -1606,7 +1606,7 @@ static int rt2500pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1606 rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); 1606 rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0);
1607 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522); 1607 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522);
1608 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); 1608 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
1609 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); 1609 rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word);
1610 } 1610 }
1611 1611
1612 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); 1612 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
@@ -1615,7 +1615,7 @@ static int rt2500pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1615 rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0); 1615 rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0);
1616 rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0); 1616 rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0);
1617 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); 1617 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
1618 EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); 1618 rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word);
1619 } 1619 }
1620 1620
1621 rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &word); 1621 rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &word);
@@ -1623,7 +1623,8 @@ static int rt2500pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1623 rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI, 1623 rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI,
1624 DEFAULT_RSSI_OFFSET); 1624 DEFAULT_RSSI_OFFSET);
1625 rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word); 1625 rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word);
1626 EEPROM(rt2x00dev, "Calibrate offset: 0x%04x\n", word); 1626 rt2x00_eeprom_dbg(rt2x00dev, "Calibrate offset: 0x%04x\n",
1627 word);
1627 } 1628 }
1628 1629
1629 return 0; 1630 return 0;
@@ -1644,7 +1645,7 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1644 * Identify RF chipset. 1645 * Identify RF chipset.
1645 */ 1646 */
1646 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 1647 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
1647 rt2x00pci_register_read(rt2x00dev, CSR0, &reg); 1648 rt2x00mmio_register_read(rt2x00dev, CSR0, &reg);
1648 rt2x00_set_chip(rt2x00dev, RT2560, value, 1649 rt2x00_set_chip(rt2x00dev, RT2560, value,
1649 rt2x00_get_field32(reg, CSR0_REVISION)); 1650 rt2x00_get_field32(reg, CSR0_REVISION));
1650 1651
@@ -1654,7 +1655,7 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1654 !rt2x00_rf(rt2x00dev, RF2525) && 1655 !rt2x00_rf(rt2x00dev, RF2525) &&
1655 !rt2x00_rf(rt2x00dev, RF2525E) && 1656 !rt2x00_rf(rt2x00dev, RF2525E) &&
1656 !rt2x00_rf(rt2x00dev, RF5222)) { 1657 !rt2x00_rf(rt2x00dev, RF5222)) {
1657 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1658 rt2x00_err(rt2x00dev, "Invalid RF chipset detected\n");
1658 return -ENODEV; 1659 return -ENODEV;
1659 } 1660 }
1660 1661
@@ -1950,9 +1951,9 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1950 * Enable rfkill polling by setting GPIO direction of the 1951 * Enable rfkill polling by setting GPIO direction of the
1951 * rfkill switch GPIO pin correctly. 1952 * rfkill switch GPIO pin correctly.
1952 */ 1953 */
1953 rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg); 1954 rt2x00mmio_register_read(rt2x00dev, GPIOCSR, &reg);
1954 rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1); 1955 rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1);
1955 rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg); 1956 rt2x00mmio_register_write(rt2x00dev, GPIOCSR, reg);
1956 1957
1957 /* 1958 /*
1958 * Initialize hw specifications. 1959 * Initialize hw specifications.
@@ -1986,9 +1987,9 @@ static u64 rt2500pci_get_tsf(struct ieee80211_hw *hw,
1986 u64 tsf; 1987 u64 tsf;
1987 u32 reg; 1988 u32 reg;
1988 1989
1989 rt2x00pci_register_read(rt2x00dev, CSR17, &reg); 1990 rt2x00mmio_register_read(rt2x00dev, CSR17, &reg);
1990 tsf = (u64) rt2x00_get_field32(reg, CSR17_HIGH_TSFTIMER) << 32; 1991 tsf = (u64) rt2x00_get_field32(reg, CSR17_HIGH_TSFTIMER) << 32;
1991 rt2x00pci_register_read(rt2x00dev, CSR16, &reg); 1992 rt2x00mmio_register_read(rt2x00dev, CSR16, &reg);
1992 tsf |= rt2x00_get_field32(reg, CSR16_LOW_TSFTIMER); 1993 tsf |= rt2x00_get_field32(reg, CSR16_LOW_TSFTIMER);
1993 1994
1994 return tsf; 1995 return tsf;
@@ -1999,7 +2000,7 @@ static int rt2500pci_tx_last_beacon(struct ieee80211_hw *hw)
1999 struct rt2x00_dev *rt2x00dev = hw->priv; 2000 struct rt2x00_dev *rt2x00dev = hw->priv;
2000 u32 reg; 2001 u32 reg;
2001 2002
2002 rt2x00pci_register_read(rt2x00dev, CSR15, &reg); 2003 rt2x00mmio_register_read(rt2x00dev, CSR15, &reg);
2003 return rt2x00_get_field32(reg, CSR15_BEACON_SENT); 2004 return rt2x00_get_field32(reg, CSR15_BEACON_SENT);
2004} 2005}
2005 2006
@@ -2032,8 +2033,8 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
2032 .tbtt_tasklet = rt2500pci_tbtt_tasklet, 2033 .tbtt_tasklet = rt2500pci_tbtt_tasklet,
2033 .rxdone_tasklet = rt2500pci_rxdone_tasklet, 2034 .rxdone_tasklet = rt2500pci_rxdone_tasklet,
2034 .probe_hw = rt2500pci_probe_hw, 2035 .probe_hw = rt2500pci_probe_hw,
2035 .initialize = rt2x00pci_initialize, 2036 .initialize = rt2x00mmio_initialize,
2036 .uninitialize = rt2x00pci_uninitialize, 2037 .uninitialize = rt2x00mmio_uninitialize,
2037 .get_entry_state = rt2500pci_get_entry_state, 2038 .get_entry_state = rt2500pci_get_entry_state,
2038 .clear_entry = rt2500pci_clear_entry, 2039 .clear_entry = rt2500pci_clear_entry,
2039 .set_device_state = rt2500pci_set_device_state, 2040 .set_device_state = rt2500pci_set_device_state,
@@ -2044,7 +2045,7 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
2044 .start_queue = rt2500pci_start_queue, 2045 .start_queue = rt2500pci_start_queue,
2045 .kick_queue = rt2500pci_kick_queue, 2046 .kick_queue = rt2500pci_kick_queue,
2046 .stop_queue = rt2500pci_stop_queue, 2047 .stop_queue = rt2500pci_stop_queue,
2047 .flush_queue = rt2x00pci_flush_queue, 2048 .flush_queue = rt2x00mmio_flush_queue,
2048 .write_tx_desc = rt2500pci_write_tx_desc, 2049 .write_tx_desc = rt2500pci_write_tx_desc,
2049 .write_beacon = rt2500pci_write_beacon, 2050 .write_beacon = rt2500pci_write_beacon,
2050 .fill_rxdone = rt2500pci_fill_rxdone, 2051 .fill_rxdone = rt2500pci_fill_rxdone,
@@ -2059,28 +2060,28 @@ static const struct data_queue_desc rt2500pci_queue_rx = {
2059 .entry_num = 32, 2060 .entry_num = 32,
2060 .data_size = DATA_FRAME_SIZE, 2061 .data_size = DATA_FRAME_SIZE,
2061 .desc_size = RXD_DESC_SIZE, 2062 .desc_size = RXD_DESC_SIZE,
2062 .priv_size = sizeof(struct queue_entry_priv_pci), 2063 .priv_size = sizeof(struct queue_entry_priv_mmio),
2063}; 2064};
2064 2065
2065static const struct data_queue_desc rt2500pci_queue_tx = { 2066static const struct data_queue_desc rt2500pci_queue_tx = {
2066 .entry_num = 32, 2067 .entry_num = 32,
2067 .data_size = DATA_FRAME_SIZE, 2068 .data_size = DATA_FRAME_SIZE,
2068 .desc_size = TXD_DESC_SIZE, 2069 .desc_size = TXD_DESC_SIZE,
2069 .priv_size = sizeof(struct queue_entry_priv_pci), 2070 .priv_size = sizeof(struct queue_entry_priv_mmio),
2070}; 2071};
2071 2072
2072static const struct data_queue_desc rt2500pci_queue_bcn = { 2073static const struct data_queue_desc rt2500pci_queue_bcn = {
2073 .entry_num = 1, 2074 .entry_num = 1,
2074 .data_size = MGMT_FRAME_SIZE, 2075 .data_size = MGMT_FRAME_SIZE,
2075 .desc_size = TXD_DESC_SIZE, 2076 .desc_size = TXD_DESC_SIZE,
2076 .priv_size = sizeof(struct queue_entry_priv_pci), 2077 .priv_size = sizeof(struct queue_entry_priv_mmio),
2077}; 2078};
2078 2079
2079static const struct data_queue_desc rt2500pci_queue_atim = { 2080static const struct data_queue_desc rt2500pci_queue_atim = {
2080 .entry_num = 8, 2081 .entry_num = 8,
2081 .data_size = DATA_FRAME_SIZE, 2082 .data_size = DATA_FRAME_SIZE,
2082 .desc_size = TXD_DESC_SIZE, 2083 .desc_size = TXD_DESC_SIZE,
2083 .priv_size = sizeof(struct queue_entry_priv_pci), 2084 .priv_size = sizeof(struct queue_entry_priv_mmio),
2084}; 2085};
2085 2086
2086static const struct rt2x00_ops rt2500pci_ops = { 2087static const struct rt2x00_ops rt2500pci_ops = {
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 6b2e1e431dd2..a7f7b365eff4 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -134,8 +134,8 @@ static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
134 udelay(REGISTER_BUSY_DELAY); 134 udelay(REGISTER_BUSY_DELAY);
135 } 135 }
136 136
137 ERROR(rt2x00dev, "Indirect register access failed: " 137 rt2x00_err(rt2x00dev, "Indirect register access failed: offset=0x%.08x, value=0x%.08x\n",
138 "offset=0x%.08x, value=0x%.08x\n", offset, *reg); 138 offset, *reg);
139 *reg = ~0; 139 *reg = ~0;
140 140
141 return 0; 141 return 0;
@@ -916,7 +916,7 @@ static int rt2500usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
916 udelay(REGISTER_BUSY_DELAY); 916 udelay(REGISTER_BUSY_DELAY);
917 } 917 }
918 918
919 ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); 919 rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n");
920 return -EACCES; 920 return -EACCES;
921} 921}
922 922
@@ -1069,8 +1069,8 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1069 } 1069 }
1070 1070
1071 if (unlikely(retval)) 1071 if (unlikely(retval))
1072 ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", 1072 rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n",
1073 state, retval); 1073 state, retval);
1074 1074
1075 return retval; 1075 return retval;
1076} 1076}
@@ -1353,7 +1353,7 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1353 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 1353 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
1354 if (!is_valid_ether_addr(mac)) { 1354 if (!is_valid_ether_addr(mac)) {
1355 eth_random_addr(mac); 1355 eth_random_addr(mac);
1356 EEPROM(rt2x00dev, "MAC: %pM\n", mac); 1356 rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
1357 } 1357 }
1358 1358
1359 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); 1359 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
@@ -1369,7 +1369,7 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1369 rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); 1369 rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0);
1370 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522); 1370 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522);
1371 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); 1371 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
1372 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); 1372 rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word);
1373 } 1373 }
1374 1374
1375 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); 1375 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
@@ -1378,7 +1378,7 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1378 rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0); 1378 rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0);
1379 rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0); 1379 rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0);
1380 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); 1380 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
1381 EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); 1381 rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word);
1382 } 1382 }
1383 1383
1384 rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &word); 1384 rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &word);
@@ -1386,14 +1386,15 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1386 rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI, 1386 rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI,
1387 DEFAULT_RSSI_OFFSET); 1387 DEFAULT_RSSI_OFFSET);
1388 rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word); 1388 rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word);
1389 EEPROM(rt2x00dev, "Calibrate offset: 0x%04x\n", word); 1389 rt2x00_eeprom_dbg(rt2x00dev, "Calibrate offset: 0x%04x\n",
1390 word);
1390 } 1391 }
1391 1392
1392 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE, &word); 1393 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE, &word);
1393 if (word == 0xffff) { 1394 if (word == 0xffff) {
1394 rt2x00_set_field16(&word, EEPROM_BBPTUNE_THRESHOLD, 45); 1395 rt2x00_set_field16(&word, EEPROM_BBPTUNE_THRESHOLD, 45);
1395 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE, word); 1396 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE, word);
1396 EEPROM(rt2x00dev, "BBPtune: 0x%04x\n", word); 1397 rt2x00_eeprom_dbg(rt2x00dev, "BBPtune: 0x%04x\n", word);
1397 } 1398 }
1398 1399
1399 /* 1400 /*
@@ -1408,7 +1409,7 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1408 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCUPPER, 0x40); 1409 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCUPPER, 0x40);
1409 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); 1410 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp);
1410 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); 1411 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word);
1411 EEPROM(rt2x00dev, "BBPtune vgc: 0x%04x\n", word); 1412 rt2x00_eeprom_dbg(rt2x00dev, "BBPtune vgc: 0x%04x\n", word);
1412 } else { 1413 } else {
1413 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); 1414 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp);
1414 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); 1415 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word);
@@ -1419,7 +1420,7 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1419 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_LOW, 0x48); 1420 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_LOW, 0x48);
1420 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41); 1421 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41);
1421 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word); 1422 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word);
1422 EEPROM(rt2x00dev, "BBPtune r17: 0x%04x\n", word); 1423 rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r17: 0x%04x\n", word);
1423 } 1424 }
1424 1425
1425 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &word); 1426 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &word);
@@ -1427,7 +1428,7 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1427 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_LOW, 0x40); 1428 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_LOW, 0x40);
1428 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_HIGH, 0x80); 1429 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_HIGH, 0x80);
1429 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R24, word); 1430 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R24, word);
1430 EEPROM(rt2x00dev, "BBPtune r24: 0x%04x\n", word); 1431 rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r24: 0x%04x\n", word);
1431 } 1432 }
1432 1433
1433 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &word); 1434 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &word);
@@ -1435,7 +1436,7 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1435 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_LOW, 0x40); 1436 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_LOW, 0x40);
1436 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_HIGH, 0x50); 1437 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_HIGH, 0x50);
1437 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R25, word); 1438 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R25, word);
1438 EEPROM(rt2x00dev, "BBPtune r25: 0x%04x\n", word); 1439 rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r25: 0x%04x\n", word);
1439 } 1440 }
1440 1441
1441 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &word); 1442 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &word);
@@ -1443,7 +1444,7 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1443 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_LOW, 0x60); 1444 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_LOW, 0x60);
1444 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_HIGH, 0x6d); 1445 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_HIGH, 0x6d);
1445 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R61, word); 1446 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R61, word);
1446 EEPROM(rt2x00dev, "BBPtune r61: 0x%04x\n", word); 1447 rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r61: 0x%04x\n", word);
1447 } 1448 }
1448 1449
1449 return 0; 1450 return 0;
@@ -1468,7 +1469,7 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1468 rt2x00_set_chip(rt2x00dev, RT2570, value, reg); 1469 rt2x00_set_chip(rt2x00dev, RT2570, value, reg);
1469 1470
1470 if (((reg & 0xfff0) != 0) || ((reg & 0x0000000f) == 0)) { 1471 if (((reg & 0xfff0) != 0) || ((reg & 0x0000000f) == 0)) {
1471 ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); 1472 rt2x00_err(rt2x00dev, "Invalid RT chipset detected\n");
1472 return -ENODEV; 1473 return -ENODEV;
1473 } 1474 }
1474 1475
@@ -1478,7 +1479,7 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1478 !rt2x00_rf(rt2x00dev, RF2525) && 1479 !rt2x00_rf(rt2x00dev, RF2525) &&
1479 !rt2x00_rf(rt2x00dev, RF2525E) && 1480 !rt2x00_rf(rt2x00dev, RF2525E) &&
1480 !rt2x00_rf(rt2x00dev, RF5222)) { 1481 !rt2x00_rf(rt2x00dev, RF5222)) {
1481 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1482 rt2x00_err(rt2x00dev, "Invalid RF chipset detected\n");
1482 return -ENODEV; 1483 return -ENODEV;
1483 } 1484 }
1484 1485
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 4db1088a847f..a7630d5ec892 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -51,6 +51,7 @@
51 * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390) 51 * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392) 52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
53 * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662) 53 * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
54 * RF5592 2.4G/5G 2T2R
54 * RF5360 2.4G 1T1R 55 * RF5360 2.4G 1T1R
55 * RF5370 2.4G 1T1R 56 * RF5370 2.4G 1T1R
56 * RF5390 2.4G 1T1R 57 * RF5390 2.4G 1T1R
@@ -68,6 +69,7 @@
68#define RF3320 0x000b 69#define RF3320 0x000b
69#define RF3322 0x000c 70#define RF3322 0x000c
70#define RF3053 0x000d 71#define RF3053 0x000d
72#define RF5592 0x000f
71#define RF3290 0x3290 73#define RF3290 0x3290
72#define RF5360 0x5360 74#define RF5360 0x5360
73#define RF5370 0x5370 75#define RF5370 0x5370
@@ -88,11 +90,8 @@
88#define REV_RT3390E 0x0211 90#define REV_RT3390E 0x0211
89#define REV_RT5390F 0x0502 91#define REV_RT5390F 0x0502
90#define REV_RT5390R 0x1502 92#define REV_RT5390R 0x1502
93#define REV_RT5592C 0x0221
91 94
92/*
93 * Signal information.
94 * Default offset is required for RSSI <-> dBm conversion.
95 */
96#define DEFAULT_RSSI_OFFSET 120 95#define DEFAULT_RSSI_OFFSET 120
97 96
98/* 97/*
@@ -690,6 +689,12 @@
690#define GPIO_SWITCH_7 FIELD32(0x00000080) 689#define GPIO_SWITCH_7 FIELD32(0x00000080)
691 690
692/* 691/*
692 * FIXME: where the DEBUG_INDEX name come from?
693 */
694#define MAC_DEBUG_INDEX 0x05e8
695#define MAC_DEBUG_INDEX_XTAL FIELD32(0x80000000)
696
697/*
693 * MAC Control/Status Registers(CSR). 698 * MAC Control/Status Registers(CSR).
694 * Some values are set in TU, whereas 1 TU == 1024 us. 699 * Some values are set in TU, whereas 1 TU == 1024 us.
695 */ 700 */
@@ -1934,6 +1939,9 @@ struct mac_iveiv_entry {
1934#define BBP4_BANDWIDTH FIELD8(0x18) 1939#define BBP4_BANDWIDTH FIELD8(0x18)
1935#define BBP4_MAC_IF_CTRL FIELD8(0x40) 1940#define BBP4_MAC_IF_CTRL FIELD8(0x40)
1936 1941
1942/* BBP27 */
1943#define BBP27_RX_CHAIN_SEL FIELD8(0x60)
1944
1937/* 1945/*
1938 * BBP 47: Bandwidth 1946 * BBP 47: Bandwidth
1939 */ 1947 */
@@ -1948,6 +1956,20 @@ struct mac_iveiv_entry {
1948#define BBP49_UPDATE_FLAG FIELD8(0x01) 1956#define BBP49_UPDATE_FLAG FIELD8(0x01)
1949 1957
1950/* 1958/*
1959 * BBP 105:
1960 * - bit0: detect SIG on primary channel only (on 40MHz bandwidth)
1961 * - bit1: FEQ (Feed Forward Compensation) for independend streams
1962 * - bit2: MLD (Maximum Likehood Detection) for 2 streams (reserved on single
1963 * stream)
1964 * - bit4: channel estimation updates based on remodulation of
1965 * L-SIG and HT-SIG symbols
1966 */
1967#define BBP105_DETECT_SIG_ON_PRIMARY FIELD8(0x01)
1968#define BBP105_FEQ FIELD8(0x02)
1969#define BBP105_MLD FIELD8(0x04)
1970#define BBP105_SIG_REMODULATION FIELD8(0x08)
1971
1972/*
1951 * BBP 109 1973 * BBP 109
1952 */ 1974 */
1953#define BBP109_TX0_POWER FIELD8(0x0f) 1975#define BBP109_TX0_POWER FIELD8(0x0f)
@@ -1967,6 +1989,11 @@ struct mac_iveiv_entry {
1967#define BBP152_RX_DEFAULT_ANT FIELD8(0x80) 1989#define BBP152_RX_DEFAULT_ANT FIELD8(0x80)
1968 1990
1969/* 1991/*
1992 * BBP 254: unknown
1993 */
1994#define BBP254_BIT7 FIELD8(0x80)
1995
1996/*
1970 * RFCSR registers 1997 * RFCSR registers
1971 * The wordsize of the RFCSR is 8 bits. 1998 * The wordsize of the RFCSR is 8 bits.
1972 */ 1999 */
@@ -2022,9 +2049,18 @@ struct mac_iveiv_entry {
2022#define RFCSR7_BITS67 FIELD8(0xc0) 2049#define RFCSR7_BITS67 FIELD8(0xc0)
2023 2050
2024/* 2051/*
2052 * RFCSR 9:
2053 */
2054#define RFCSR9_K FIELD8(0x0f)
2055#define RFCSR9_N FIELD8(0x10)
2056#define RFCSR9_UNKNOWN FIELD8(0x60)
2057#define RFCSR9_MOD FIELD8(0x80)
2058
2059/*
2025 * RFCSR 11: 2060 * RFCSR 11:
2026 */ 2061 */
2027#define RFCSR11_R FIELD8(0x03) 2062#define RFCSR11_R FIELD8(0x03)
2063#define RFCSR11_MOD FIELD8(0xc0)
2028 2064
2029/* 2065/*
2030 * RFCSR 12: 2066 * RFCSR 12:
@@ -2130,11 +2166,13 @@ struct mac_iveiv_entry {
2130 * RFCSR 49: 2166 * RFCSR 49:
2131 */ 2167 */
2132#define RFCSR49_TX FIELD8(0x3f) 2168#define RFCSR49_TX FIELD8(0x3f)
2169#define RFCSR49_EP FIELD8(0xc0)
2133 2170
2134/* 2171/*
2135 * RFCSR 50: 2172 * RFCSR 50:
2136 */ 2173 */
2137#define RFCSR50_TX FIELD8(0x3f) 2174#define RFCSR50_TX FIELD8(0x3f)
2175#define RFCSR50_EP FIELD8(0xc0)
2138 2176
2139/* 2177/*
2140 * RF registers 2178 * RF registers
@@ -2497,6 +2535,61 @@ struct mac_iveiv_entry {
2497#define EEPROM_BBP_REG_ID FIELD16(0xff00) 2535#define EEPROM_BBP_REG_ID FIELD16(0xff00)
2498 2536
2499/* 2537/*
2538 * EEPROM IQ Calibration, unlike other entries those are byte addresses.
2539 */
2540
2541#define EEPROM_IQ_GAIN_CAL_TX0_2G 0x130
2542#define EEPROM_IQ_PHASE_CAL_TX0_2G 0x131
2543#define EEPROM_IQ_GROUPDELAY_CAL_TX0_2G 0x132
2544#define EEPROM_IQ_GAIN_CAL_TX1_2G 0x133
2545#define EEPROM_IQ_PHASE_CAL_TX1_2G 0x134
2546#define EEPROM_IQ_GROUPDELAY_CAL_TX1_2G 0x135
2547#define EEPROM_IQ_GAIN_CAL_RX0_2G 0x136
2548#define EEPROM_IQ_PHASE_CAL_RX0_2G 0x137
2549#define EEPROM_IQ_GROUPDELAY_CAL_RX0_2G 0x138
2550#define EEPROM_IQ_GAIN_CAL_RX1_2G 0x139
2551#define EEPROM_IQ_PHASE_CAL_RX1_2G 0x13A
2552#define EEPROM_IQ_GROUPDELAY_CAL_RX1_2G 0x13B
2553#define EEPROM_RF_IQ_COMPENSATION_CONTROL 0x13C
2554#define EEPROM_RF_IQ_IMBALANCE_COMPENSATION_CONTROL 0x13D
2555#define EEPROM_IQ_GAIN_CAL_TX0_CH36_TO_CH64_5G 0x144
2556#define EEPROM_IQ_PHASE_CAL_TX0_CH36_TO_CH64_5G 0x145
2557#define EEPROM_IQ_GAIN_CAL_TX0_CH100_TO_CH138_5G 0X146
2558#define EEPROM_IQ_PHASE_CAL_TX0_CH100_TO_CH138_5G 0x147
2559#define EEPROM_IQ_GAIN_CAL_TX0_CH140_TO_CH165_5G 0x148
2560#define EEPROM_IQ_PHASE_CAL_TX0_CH140_TO_CH165_5G 0x149
2561#define EEPROM_IQ_GAIN_CAL_TX1_CH36_TO_CH64_5G 0x14A
2562#define EEPROM_IQ_PHASE_CAL_TX1_CH36_TO_CH64_5G 0x14B
2563#define EEPROM_IQ_GAIN_CAL_TX1_CH100_TO_CH138_5G 0X14C
2564#define EEPROM_IQ_PHASE_CAL_TX1_CH100_TO_CH138_5G 0x14D
2565#define EEPROM_IQ_GAIN_CAL_TX1_CH140_TO_CH165_5G 0x14E
2566#define EEPROM_IQ_PHASE_CAL_TX1_CH140_TO_CH165_5G 0x14F
2567#define EEPROM_IQ_GROUPDELAY_CAL_TX0_CH36_TO_CH64_5G 0x150
2568#define EEPROM_IQ_GROUPDELAY_CAL_TX1_CH36_TO_CH64_5G 0x151
2569#define EEPROM_IQ_GROUPDELAY_CAL_TX0_CH100_TO_CH138_5G 0x152
2570#define EEPROM_IQ_GROUPDELAY_CAL_TX1_CH100_TO_CH138_5G 0x153
2571#define EEPROM_IQ_GROUPDELAY_CAL_TX0_CH140_TO_CH165_5G 0x154
2572#define EEPROM_IQ_GROUPDELAY_CAL_TX1_CH140_TO_CH165_5G 0x155
2573#define EEPROM_IQ_GAIN_CAL_RX0_CH36_TO_CH64_5G 0x156
2574#define EEPROM_IQ_PHASE_CAL_RX0_CH36_TO_CH64_5G 0x157
2575#define EEPROM_IQ_GAIN_CAL_RX0_CH100_TO_CH138_5G 0X158
2576#define EEPROM_IQ_PHASE_CAL_RX0_CH100_TO_CH138_5G 0x159
2577#define EEPROM_IQ_GAIN_CAL_RX0_CH140_TO_CH165_5G 0x15A
2578#define EEPROM_IQ_PHASE_CAL_RX0_CH140_TO_CH165_5G 0x15B
2579#define EEPROM_IQ_GAIN_CAL_RX1_CH36_TO_CH64_5G 0x15C
2580#define EEPROM_IQ_PHASE_CAL_RX1_CH36_TO_CH64_5G 0x15D
2581#define EEPROM_IQ_GAIN_CAL_RX1_CH100_TO_CH138_5G 0X15E
2582#define EEPROM_IQ_PHASE_CAL_RX1_CH100_TO_CH138_5G 0x15F
2583#define EEPROM_IQ_GAIN_CAL_RX1_CH140_TO_CH165_5G 0x160
2584#define EEPROM_IQ_PHASE_CAL_RX1_CH140_TO_CH165_5G 0x161
2585#define EEPROM_IQ_GROUPDELAY_CAL_RX0_CH36_TO_CH64_5G 0x162
2586#define EEPROM_IQ_GROUPDELAY_CAL_RX1_CH36_TO_CH64_5G 0x163
2587#define EEPROM_IQ_GROUPDELAY_CAL_RX0_CH100_TO_CH138_5G 0x164
2588#define EEPROM_IQ_GROUPDELAY_CAL_RX1_CH100_TO_CH138_5G 0x165
2589#define EEPROM_IQ_GROUPDELAY_CAL_RX0_CH140_TO_CH165_5G 0x166
2590#define EEPROM_IQ_GROUPDELAY_CAL_RX1_CH140_TO_CH165_5G 0x167
2591
2592/*
2500 * MCU mailbox commands. 2593 * MCU mailbox commands.
2501 * MCU_SLEEP - go to power-save mode. 2594 * MCU_SLEEP - go to power-save mode.
2502 * arg1: 1: save as much power as possible, 0: save less power. 2595 * arg1: 1: save as much power as possible, 0: save less power.
@@ -2535,6 +2628,8 @@ struct mac_iveiv_entry {
2535#define TXWI_DESC_SIZE (4 * sizeof(__le32)) 2628#define TXWI_DESC_SIZE (4 * sizeof(__le32))
2536#define RXWI_DESC_SIZE (4 * sizeof(__le32)) 2629#define RXWI_DESC_SIZE (4 * sizeof(__le32))
2537 2630
2631#define TXWI_DESC_SIZE_5592 (5 * sizeof(__le32))
2632#define RXWI_DESC_SIZE_5592 (6 * sizeof(__le32))
2538/* 2633/*
2539 * TX WI structure 2634 * TX WI structure
2540 */ 2635 */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index a658b4bc7da2..b52d70c75e1a 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -80,7 +80,7 @@ static inline bool rt2800_is_305x_soc(struct rt2x00_dev *rt2x00dev)
80 rt2x00_rf(rt2x00dev, RF3022)) 80 rt2x00_rf(rt2x00dev, RF3022))
81 return true; 81 return true;
82 82
83 WARNING(rt2x00dev, "Unknown RF chipset on rt305x\n"); 83 rt2x00_warn(rt2x00dev, "Unknown RF chipset on rt305x\n");
84 return false; 84 return false;
85} 85}
86 86
@@ -328,7 +328,7 @@ int rt2800_wait_csr_ready(struct rt2x00_dev *rt2x00dev)
328 msleep(1); 328 msleep(1);
329 } 329 }
330 330
331 ERROR(rt2x00dev, "Unstable hardware.\n"); 331 rt2x00_err(rt2x00dev, "Unstable hardware\n");
332 return -EBUSY; 332 return -EBUSY;
333} 333}
334EXPORT_SYMBOL_GPL(rt2800_wait_csr_ready); 334EXPORT_SYMBOL_GPL(rt2800_wait_csr_ready);
@@ -351,7 +351,7 @@ int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
351 msleep(10); 351 msleep(10);
352 } 352 }
353 353
354 ERROR(rt2x00dev, "WPDMA TX/RX busy [0x%08x].\n", reg); 354 rt2x00_err(rt2x00dev, "WPDMA TX/RX busy [0x%08x]\n", reg);
355 return -EACCES; 355 return -EACCES;
356} 356}
357EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready); 357EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
@@ -512,7 +512,7 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
512 } 512 }
513 513
514 if (i == REGISTER_BUSY_COUNT) { 514 if (i == REGISTER_BUSY_COUNT) {
515 ERROR(rt2x00dev, "PBF system register not ready.\n"); 515 rt2x00_err(rt2x00dev, "PBF system register not ready\n");
516 return -EBUSY; 516 return -EBUSY;
517 } 517 }
518 518
@@ -527,8 +527,10 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
527 */ 527 */
528 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); 528 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
529 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 529 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
530 if (rt2x00_is_usb(rt2x00dev)) 530 if (rt2x00_is_usb(rt2x00dev)) {
531 rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0); 531 rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
532 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
533 }
532 msleep(1); 534 msleep(1);
533 535
534 return 0; 536 return 0;
@@ -540,6 +542,7 @@ void rt2800_write_tx_data(struct queue_entry *entry,
540{ 542{
541 __le32 *txwi = rt2800_drv_get_txwi(entry); 543 __le32 *txwi = rt2800_drv_get_txwi(entry);
542 u32 word; 544 u32 word;
545 int i;
543 546
544 /* 547 /*
545 * Initialize TX Info descriptor 548 * Initialize TX Info descriptor
@@ -582,14 +585,16 @@ void rt2800_write_tx_data(struct queue_entry *entry,
582 rt2x00_desc_write(txwi, 1, word); 585 rt2x00_desc_write(txwi, 1, word);
583 586
584 /* 587 /*
585 * Always write 0 to IV/EIV fields, hardware will insert the IV 588 * Always write 0 to IV/EIV fields (word 2 and 3), hardware will insert
586 * from the IVEIV register when TXD_W3_WIV is set to 0. 589 * the IV from the IVEIV register when TXD_W3_WIV is set to 0.
587 * When TXD_W3_WIV is set to 1 it will use the IV data 590 * When TXD_W3_WIV is set to 1 it will use the IV data
588 * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which 591 * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
589 * crypto entry in the registers should be used to encrypt the frame. 592 * crypto entry in the registers should be used to encrypt the frame.
593 *
594 * Nulify all remaining words as well, we don't know how to program them.
590 */ 595 */
591 _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */); 596 for (i = 2; i < entry->queue->winfo_size / sizeof(__le32); i++)
592 _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */); 597 _rt2x00_desc_write(txwi, i, 0);
593} 598}
594EXPORT_SYMBOL_GPL(rt2800_write_tx_data); 599EXPORT_SYMBOL_GPL(rt2800_write_tx_data);
595 600
@@ -674,11 +679,10 @@ void rt2800_process_rxwi(struct queue_entry *entry,
674 * Convert descriptor AGC value to RSSI value. 679 * Convert descriptor AGC value to RSSI value.
675 */ 680 */
676 rxdesc->rssi = rt2800_agc_to_rssi(entry->queue->rt2x00dev, word); 681 rxdesc->rssi = rt2800_agc_to_rssi(entry->queue->rt2x00dev, word);
677
678 /* 682 /*
679 * Remove RXWI descriptor from start of buffer. 683 * Remove RXWI descriptor from start of the buffer.
680 */ 684 */
681 skb_pull(entry->skb, RXWI_DESC_SIZE); 685 skb_pull(entry->skb, entry->queue->winfo_size);
682} 686}
683EXPORT_SYMBOL_GPL(rt2800_process_rxwi); 687EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
684 688
@@ -769,6 +773,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
769 unsigned int beacon_base; 773 unsigned int beacon_base;
770 unsigned int padding_len; 774 unsigned int padding_len;
771 u32 orig_reg, reg; 775 u32 orig_reg, reg;
776 const int txwi_desc_size = entry->queue->winfo_size;
772 777
773 /* 778 /*
774 * Disable beaconing while we are reloading the beacon data, 779 * Disable beaconing while we are reloading the beacon data,
@@ -782,14 +787,14 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
782 /* 787 /*
783 * Add space for the TXWI in front of the skb. 788 * Add space for the TXWI in front of the skb.
784 */ 789 */
785 memset(skb_push(entry->skb, TXWI_DESC_SIZE), 0, TXWI_DESC_SIZE); 790 memset(skb_push(entry->skb, txwi_desc_size), 0, txwi_desc_size);
786 791
787 /* 792 /*
788 * Register descriptor details in skb frame descriptor. 793 * Register descriptor details in skb frame descriptor.
789 */ 794 */
790 skbdesc->flags |= SKBDESC_DESC_IN_SKB; 795 skbdesc->flags |= SKBDESC_DESC_IN_SKB;
791 skbdesc->desc = entry->skb->data; 796 skbdesc->desc = entry->skb->data;
792 skbdesc->desc_len = TXWI_DESC_SIZE; 797 skbdesc->desc_len = txwi_desc_size;
793 798
794 /* 799 /*
795 * Add the TXWI for the beacon to the skb. 800 * Add the TXWI for the beacon to the skb.
@@ -806,7 +811,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
806 */ 811 */
807 padding_len = roundup(entry->skb->len, 4) - entry->skb->len; 812 padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
808 if (padding_len && skb_pad(entry->skb, padding_len)) { 813 if (padding_len && skb_pad(entry->skb, padding_len)) {
809 ERROR(rt2x00dev, "Failure padding beacon, aborting\n"); 814 rt2x00_err(rt2x00dev, "Failure padding beacon, aborting\n");
810 /* skb freed by skb_pad() on failure */ 815 /* skb freed by skb_pad() on failure */
811 entry->skb = NULL; 816 entry->skb = NULL;
812 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg); 817 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
@@ -835,13 +840,14 @@ static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
835 unsigned int beacon_base) 840 unsigned int beacon_base)
836{ 841{
837 int i; 842 int i;
843 const int txwi_desc_size = rt2x00dev->ops->bcn->winfo_size;
838 844
839 /* 845 /*
840 * For the Beacon base registers we only need to clear 846 * For the Beacon base registers we only need to clear
841 * the whole TXWI which (when set to 0) will invalidate 847 * the whole TXWI which (when set to 0) will invalidate
842 * the entire beacon. 848 * the entire beacon.
843 */ 849 */
844 for (i = 0; i < TXWI_DESC_SIZE; i += sizeof(__le32)) 850 for (i = 0; i < txwi_desc_size; i += sizeof(__le32))
845 rt2800_register_write(rt2x00dev, beacon_base + i, 0); 851 rt2800_register_write(rt2x00dev, beacon_base + i, 0);
846} 852}
847 853
@@ -1988,8 +1994,21 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
1988} 1994}
1989 1995
1990#define POWER_BOUND 0x27 1996#define POWER_BOUND 0x27
1997#define POWER_BOUND_5G 0x2b
1991#define FREQ_OFFSET_BOUND 0x5f 1998#define FREQ_OFFSET_BOUND 0x5f
1992 1999
2000static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev)
2001{
2002 u8 rfcsr;
2003
2004 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
2005 if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
2006 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
2007 else
2008 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
2009 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
2010}
2011
1993static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev, 2012static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
1994 struct ieee80211_conf *conf, 2013 struct ieee80211_conf *conf,
1995 struct rf_channel *rf, 2014 struct rf_channel *rf,
@@ -2010,12 +2029,7 @@ static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
2010 rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1); 2029 rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
2011 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); 2030 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
2012 2031
2013 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); 2032 rt2800_adjust_freq_offset(rt2x00dev);
2014 if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
2015 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
2016 else
2017 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
2018 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
2019 2033
2020 if (rf->channel <= 14) { 2034 if (rf->channel <= 14) {
2021 if (rf->channel == 6) 2035 if (rf->channel == 6)
@@ -2056,13 +2070,7 @@ static void rt2800_config_channel_rf3322(struct rt2x00_dev *rt2x00dev,
2056 else 2070 else
2057 rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2); 2071 rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2);
2058 2072
2059 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); 2073 rt2800_adjust_freq_offset(rt2x00dev);
2060 if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
2061 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
2062 else
2063 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
2064
2065 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
2066 2074
2067 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr); 2075 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
2068 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); 2076 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
@@ -2127,12 +2135,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
2127 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1); 2135 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
2128 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); 2136 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
2129 2137
2130 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); 2138 rt2800_adjust_freq_offset(rt2x00dev);
2131 if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
2132 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
2133 else
2134 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
2135 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
2136 2139
2137 if (rf->channel <= 14) { 2140 if (rf->channel <= 14) {
2138 int idx = rf->channel-1; 2141 int idx = rf->channel-1;
@@ -2184,6 +2187,382 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
2184 } 2187 }
2185} 2188}
2186 2189
2190static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev,
2191 struct ieee80211_conf *conf,
2192 struct rf_channel *rf,
2193 struct channel_info *info)
2194{
2195 u8 rfcsr, ep_reg;
2196 u32 reg;
2197 int power_bound;
2198
2199 /* TODO */
2200 const bool is_11b = false;
2201 const bool is_type_ep = false;
2202
2203 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
2204 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL,
2205 (rf->channel > 14 || conf_is_ht40(conf)) ? 5 : 0);
2206 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
2207
2208 /* Order of values on rf_channel entry: N, K, mod, R */
2209 rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1 & 0xff);
2210
2211 rt2800_rfcsr_read(rt2x00dev, 9, &rfcsr);
2212 rt2x00_set_field8(&rfcsr, RFCSR9_K, rf->rf2 & 0xf);
2213 rt2x00_set_field8(&rfcsr, RFCSR9_N, (rf->rf1 & 0x100) >> 8);
2214 rt2x00_set_field8(&rfcsr, RFCSR9_MOD, ((rf->rf3 - 8) & 0x4) >> 2);
2215 rt2800_rfcsr_write(rt2x00dev, 9, rfcsr);
2216
2217 rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
2218 rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf4 - 1);
2219 rt2x00_set_field8(&rfcsr, RFCSR11_MOD, (rf->rf3 - 8) & 0x3);
2220 rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
2221
2222 if (rf->channel <= 14) {
2223 rt2800_rfcsr_write(rt2x00dev, 10, 0x90);
2224 /* FIXME: RF11 owerwrite ? */
2225 rt2800_rfcsr_write(rt2x00dev, 11, 0x4A);
2226 rt2800_rfcsr_write(rt2x00dev, 12, 0x52);
2227 rt2800_rfcsr_write(rt2x00dev, 13, 0x42);
2228 rt2800_rfcsr_write(rt2x00dev, 22, 0x40);
2229 rt2800_rfcsr_write(rt2x00dev, 24, 0x4A);
2230 rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
2231 rt2800_rfcsr_write(rt2x00dev, 27, 0x42);
2232 rt2800_rfcsr_write(rt2x00dev, 36, 0x80);
2233 rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
2234 rt2800_rfcsr_write(rt2x00dev, 38, 0x89);
2235 rt2800_rfcsr_write(rt2x00dev, 39, 0x1B);
2236 rt2800_rfcsr_write(rt2x00dev, 40, 0x0D);
2237 rt2800_rfcsr_write(rt2x00dev, 41, 0x9B);
2238 rt2800_rfcsr_write(rt2x00dev, 42, 0xD5);
2239 rt2800_rfcsr_write(rt2x00dev, 43, 0x72);
2240 rt2800_rfcsr_write(rt2x00dev, 44, 0x0E);
2241 rt2800_rfcsr_write(rt2x00dev, 45, 0xA2);
2242 rt2800_rfcsr_write(rt2x00dev, 46, 0x6B);
2243 rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
2244 rt2800_rfcsr_write(rt2x00dev, 51, 0x3E);
2245 rt2800_rfcsr_write(rt2x00dev, 52, 0x48);
2246 rt2800_rfcsr_write(rt2x00dev, 54, 0x38);
2247 rt2800_rfcsr_write(rt2x00dev, 56, 0xA1);
2248 rt2800_rfcsr_write(rt2x00dev, 57, 0x00);
2249 rt2800_rfcsr_write(rt2x00dev, 58, 0x39);
2250 rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
2251 rt2800_rfcsr_write(rt2x00dev, 61, 0x91);
2252 rt2800_rfcsr_write(rt2x00dev, 62, 0x39);
2253
2254 /* TODO RF27 <- tssi */
2255
2256 rfcsr = rf->channel <= 10 ? 0x07 : 0x06;
2257 rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
2258 rt2800_rfcsr_write(rt2x00dev, 59, rfcsr);
2259
2260 if (is_11b) {
2261 /* CCK */
2262 rt2800_rfcsr_write(rt2x00dev, 31, 0xF8);
2263 rt2800_rfcsr_write(rt2x00dev, 32, 0xC0);
2264 if (is_type_ep)
2265 rt2800_rfcsr_write(rt2x00dev, 55, 0x06);
2266 else
2267 rt2800_rfcsr_write(rt2x00dev, 55, 0x47);
2268 } else {
2269 /* OFDM */
2270 if (is_type_ep)
2271 rt2800_rfcsr_write(rt2x00dev, 55, 0x03);
2272 else
2273 rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
2274 }
2275
2276 power_bound = POWER_BOUND;
2277 ep_reg = 0x2;
2278 } else {
2279 rt2800_rfcsr_write(rt2x00dev, 10, 0x97);
2280 /* FIMXE: RF11 overwrite */
2281 rt2800_rfcsr_write(rt2x00dev, 11, 0x40);
2282 rt2800_rfcsr_write(rt2x00dev, 25, 0xBF);
2283 rt2800_rfcsr_write(rt2x00dev, 27, 0x42);
2284 rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
2285 rt2800_rfcsr_write(rt2x00dev, 37, 0x04);
2286 rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
2287 rt2800_rfcsr_write(rt2x00dev, 40, 0x42);
2288 rt2800_rfcsr_write(rt2x00dev, 41, 0xBB);
2289 rt2800_rfcsr_write(rt2x00dev, 42, 0xD7);
2290 rt2800_rfcsr_write(rt2x00dev, 45, 0x41);
2291 rt2800_rfcsr_write(rt2x00dev, 48, 0x00);
2292 rt2800_rfcsr_write(rt2x00dev, 57, 0x77);
2293 rt2800_rfcsr_write(rt2x00dev, 60, 0x05);
2294 rt2800_rfcsr_write(rt2x00dev, 61, 0x01);
2295
2296 /* TODO RF27 <- tssi */
2297
2298 if (rf->channel >= 36 && rf->channel <= 64) {
2299
2300 rt2800_rfcsr_write(rt2x00dev, 12, 0x2E);
2301 rt2800_rfcsr_write(rt2x00dev, 13, 0x22);
2302 rt2800_rfcsr_write(rt2x00dev, 22, 0x60);
2303 rt2800_rfcsr_write(rt2x00dev, 23, 0x7F);
2304 if (rf->channel <= 50)
2305 rt2800_rfcsr_write(rt2x00dev, 24, 0x09);
2306 else if (rf->channel >= 52)
2307 rt2800_rfcsr_write(rt2x00dev, 24, 0x07);
2308 rt2800_rfcsr_write(rt2x00dev, 39, 0x1C);
2309 rt2800_rfcsr_write(rt2x00dev, 43, 0x5B);
2310 rt2800_rfcsr_write(rt2x00dev, 44, 0X40);
2311 rt2800_rfcsr_write(rt2x00dev, 46, 0X00);
2312 rt2800_rfcsr_write(rt2x00dev, 51, 0xFE);
2313 rt2800_rfcsr_write(rt2x00dev, 52, 0x0C);
2314 rt2800_rfcsr_write(rt2x00dev, 54, 0xF8);
2315 if (rf->channel <= 50) {
2316 rt2800_rfcsr_write(rt2x00dev, 55, 0x06),
2317 rt2800_rfcsr_write(rt2x00dev, 56, 0xD3);
2318 } else if (rf->channel >= 52) {
2319 rt2800_rfcsr_write(rt2x00dev, 55, 0x04);
2320 rt2800_rfcsr_write(rt2x00dev, 56, 0xBB);
2321 }
2322
2323 rt2800_rfcsr_write(rt2x00dev, 58, 0x15);
2324 rt2800_rfcsr_write(rt2x00dev, 59, 0x7F);
2325 rt2800_rfcsr_write(rt2x00dev, 62, 0x15);
2326
2327 } else if (rf->channel >= 100 && rf->channel <= 165) {
2328
2329 rt2800_rfcsr_write(rt2x00dev, 12, 0x0E);
2330 rt2800_rfcsr_write(rt2x00dev, 13, 0x42);
2331 rt2800_rfcsr_write(rt2x00dev, 22, 0x40);
2332 if (rf->channel <= 153) {
2333 rt2800_rfcsr_write(rt2x00dev, 23, 0x3C);
2334 rt2800_rfcsr_write(rt2x00dev, 24, 0x06);
2335 } else if (rf->channel >= 155) {
2336 rt2800_rfcsr_write(rt2x00dev, 23, 0x38);
2337 rt2800_rfcsr_write(rt2x00dev, 24, 0x05);
2338 }
2339 if (rf->channel <= 138) {
2340 rt2800_rfcsr_write(rt2x00dev, 39, 0x1A);
2341 rt2800_rfcsr_write(rt2x00dev, 43, 0x3B);
2342 rt2800_rfcsr_write(rt2x00dev, 44, 0x20);
2343 rt2800_rfcsr_write(rt2x00dev, 46, 0x18);
2344 } else if (rf->channel >= 140) {
2345 rt2800_rfcsr_write(rt2x00dev, 39, 0x18);
2346 rt2800_rfcsr_write(rt2x00dev, 43, 0x1B);
2347 rt2800_rfcsr_write(rt2x00dev, 44, 0x10);
2348 rt2800_rfcsr_write(rt2x00dev, 46, 0X08);
2349 }
2350 if (rf->channel <= 124)
2351 rt2800_rfcsr_write(rt2x00dev, 51, 0xFC);
2352 else if (rf->channel >= 126)
2353 rt2800_rfcsr_write(rt2x00dev, 51, 0xEC);
2354 if (rf->channel <= 138)
2355 rt2800_rfcsr_write(rt2x00dev, 52, 0x06);
2356 else if (rf->channel >= 140)
2357 rt2800_rfcsr_write(rt2x00dev, 52, 0x06);
2358 rt2800_rfcsr_write(rt2x00dev, 54, 0xEB);
2359 if (rf->channel <= 138)
2360 rt2800_rfcsr_write(rt2x00dev, 55, 0x01);
2361 else if (rf->channel >= 140)
2362 rt2800_rfcsr_write(rt2x00dev, 55, 0x00);
2363 if (rf->channel <= 128)
2364 rt2800_rfcsr_write(rt2x00dev, 56, 0xBB);
2365 else if (rf->channel >= 130)
2366 rt2800_rfcsr_write(rt2x00dev, 56, 0xAB);
2367 if (rf->channel <= 116)
2368 rt2800_rfcsr_write(rt2x00dev, 58, 0x1D);
2369 else if (rf->channel >= 118)
2370 rt2800_rfcsr_write(rt2x00dev, 58, 0x15);
2371 if (rf->channel <= 138)
2372 rt2800_rfcsr_write(rt2x00dev, 59, 0x3F);
2373 else if (rf->channel >= 140)
2374 rt2800_rfcsr_write(rt2x00dev, 59, 0x7C);
2375 if (rf->channel <= 116)
2376 rt2800_rfcsr_write(rt2x00dev, 62, 0x1D);
2377 else if (rf->channel >= 118)
2378 rt2800_rfcsr_write(rt2x00dev, 62, 0x15);
2379 }
2380
2381 power_bound = POWER_BOUND_5G;
2382 ep_reg = 0x3;
2383 }
2384
2385 rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
2386 if (info->default_power1 > power_bound)
2387 rt2x00_set_field8(&rfcsr, RFCSR49_TX, power_bound);
2388 else
2389 rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
2390 if (is_type_ep)
2391 rt2x00_set_field8(&rfcsr, RFCSR49_EP, ep_reg);
2392 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
2393
2394 rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
2395 if (info->default_power1 > power_bound)
2396 rt2x00_set_field8(&rfcsr, RFCSR50_TX, power_bound);
2397 else
2398 rt2x00_set_field8(&rfcsr, RFCSR50_TX, info->default_power2);
2399 if (is_type_ep)
2400 rt2x00_set_field8(&rfcsr, RFCSR50_EP, ep_reg);
2401 rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
2402
2403 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
2404 rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
2405 rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
2406
2407 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD,
2408 rt2x00dev->default_ant.tx_chain_num >= 1);
2409 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD,
2410 rt2x00dev->default_ant.tx_chain_num == 2);
2411 rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
2412
2413 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD,
2414 rt2x00dev->default_ant.rx_chain_num >= 1);
2415 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
2416 rt2x00dev->default_ant.rx_chain_num == 2);
2417 rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
2418
2419 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
2420 rt2800_rfcsr_write(rt2x00dev, 6, 0xe4);
2421
2422 if (conf_is_ht40(conf))
2423 rt2800_rfcsr_write(rt2x00dev, 30, 0x16);
2424 else
2425 rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
2426
2427 if (!is_11b) {
2428 rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
2429 rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
2430 }
2431
2432 /* TODO proper frequency adjustment */
2433 rt2800_adjust_freq_offset(rt2x00dev);
2434
2435 /* TODO merge with others */
2436 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
2437 rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
2438 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
2439
2440 /* BBP settings */
2441 rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
2442 rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
2443 rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
2444
2445 rt2800_bbp_write(rt2x00dev, 79, (rf->channel <= 14) ? 0x1C : 0x18);
2446 rt2800_bbp_write(rt2x00dev, 80, (rf->channel <= 14) ? 0x0E : 0x08);
2447 rt2800_bbp_write(rt2x00dev, 81, (rf->channel <= 14) ? 0x3A : 0x38);
2448 rt2800_bbp_write(rt2x00dev, 82, (rf->channel <= 14) ? 0x62 : 0x92);
2449
2450 /* GLRT band configuration */
2451 rt2800_bbp_write(rt2x00dev, 195, 128);
2452 rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0xE0 : 0xF0);
2453 rt2800_bbp_write(rt2x00dev, 195, 129);
2454 rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x1F : 0x1E);
2455 rt2800_bbp_write(rt2x00dev, 195, 130);
2456 rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x38 : 0x28);
2457 rt2800_bbp_write(rt2x00dev, 195, 131);
2458 rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x32 : 0x20);
2459 rt2800_bbp_write(rt2x00dev, 195, 133);
2460 rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x28 : 0x7F);
2461 rt2800_bbp_write(rt2x00dev, 195, 124);
2462 rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x19 : 0x7F);
2463}
2464
2465static void rt2800_bbp_write_with_rx_chain(struct rt2x00_dev *rt2x00dev,
2466 const unsigned int word,
2467 const u8 value)
2468{
2469 u8 chain, reg;
2470
2471 for (chain = 0; chain < rt2x00dev->default_ant.rx_chain_num; chain++) {
2472 rt2800_bbp_read(rt2x00dev, 27, &reg);
2473 rt2x00_set_field8(&reg, BBP27_RX_CHAIN_SEL, chain);
2474 rt2800_bbp_write(rt2x00dev, 27, reg);
2475
2476 rt2800_bbp_write(rt2x00dev, word, value);
2477 }
2478}
2479
2480static void rt2800_iq_calibrate(struct rt2x00_dev *rt2x00dev, int channel)
2481{
2482 u8 cal;
2483
2484 /* TX0 IQ Gain */
2485 rt2800_bbp_write(rt2x00dev, 158, 0x2c);
2486 if (channel <= 14)
2487 cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX0_2G);
2488 else if (channel >= 36 && channel <= 64)
2489 cal = rt2x00_eeprom_byte(rt2x00dev,
2490 EEPROM_IQ_GAIN_CAL_TX0_CH36_TO_CH64_5G);
2491 else if (channel >= 100 && channel <= 138)
2492 cal = rt2x00_eeprom_byte(rt2x00dev,
2493 EEPROM_IQ_GAIN_CAL_TX0_CH100_TO_CH138_5G);
2494 else if (channel >= 140 && channel <= 165)
2495 cal = rt2x00_eeprom_byte(rt2x00dev,
2496 EEPROM_IQ_GAIN_CAL_TX0_CH140_TO_CH165_5G);
2497 else
2498 cal = 0;
2499 rt2800_bbp_write(rt2x00dev, 159, cal);
2500
2501 /* TX0 IQ Phase */
2502 rt2800_bbp_write(rt2x00dev, 158, 0x2d);
2503 if (channel <= 14)
2504 cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX0_2G);
2505 else if (channel >= 36 && channel <= 64)
2506 cal = rt2x00_eeprom_byte(rt2x00dev,
2507 EEPROM_IQ_PHASE_CAL_TX0_CH36_TO_CH64_5G);
2508 else if (channel >= 100 && channel <= 138)
2509 cal = rt2x00_eeprom_byte(rt2x00dev,
2510 EEPROM_IQ_PHASE_CAL_TX0_CH100_TO_CH138_5G);
2511 else if (channel >= 140 && channel <= 165)
2512 cal = rt2x00_eeprom_byte(rt2x00dev,
2513 EEPROM_IQ_PHASE_CAL_TX0_CH140_TO_CH165_5G);
2514 else
2515 cal = 0;
2516 rt2800_bbp_write(rt2x00dev, 159, cal);
2517
2518 /* TX1 IQ Gain */
2519 rt2800_bbp_write(rt2x00dev, 158, 0x4a);
2520 if (channel <= 14)
2521 cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX1_2G);
2522 else if (channel >= 36 && channel <= 64)
2523 cal = rt2x00_eeprom_byte(rt2x00dev,
2524 EEPROM_IQ_GAIN_CAL_TX1_CH36_TO_CH64_5G);
2525 else if (channel >= 100 && channel <= 138)
2526 cal = rt2x00_eeprom_byte(rt2x00dev,
2527 EEPROM_IQ_GAIN_CAL_TX1_CH100_TO_CH138_5G);
2528 else if (channel >= 140 && channel <= 165)
2529 cal = rt2x00_eeprom_byte(rt2x00dev,
2530 EEPROM_IQ_GAIN_CAL_TX1_CH140_TO_CH165_5G);
2531 else
2532 cal = 0;
2533 rt2800_bbp_write(rt2x00dev, 159, cal);
2534
2535 /* TX1 IQ Phase */
2536 rt2800_bbp_write(rt2x00dev, 158, 0x4b);
2537 if (channel <= 14)
2538 cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX1_2G);
2539 else if (channel >= 36 && channel <= 64)
2540 cal = rt2x00_eeprom_byte(rt2x00dev,
2541 EEPROM_IQ_PHASE_CAL_TX1_CH36_TO_CH64_5G);
2542 else if (channel >= 100 && channel <= 138)
2543 cal = rt2x00_eeprom_byte(rt2x00dev,
2544 EEPROM_IQ_PHASE_CAL_TX1_CH100_TO_CH138_5G);
2545 else if (channel >= 140 && channel <= 165)
2546 cal = rt2x00_eeprom_byte(rt2x00dev,
2547 EEPROM_IQ_PHASE_CAL_TX1_CH140_TO_CH165_5G);
2548 else
2549 cal = 0;
2550 rt2800_bbp_write(rt2x00dev, 159, cal);
2551
2552 /* FIXME: possible RX0, RX1 callibration ? */
2553
2554 /* RF IQ compensation control */
2555 rt2800_bbp_write(rt2x00dev, 158, 0x04);
2556 cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_RF_IQ_COMPENSATION_CONTROL);
2557 rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0);
2558
2559 /* RF IQ imbalance compensation control */
2560 rt2800_bbp_write(rt2x00dev, 158, 0x03);
2561 cal = rt2x00_eeprom_byte(rt2x00dev,
2562 EEPROM_RF_IQ_IMBALANCE_COMPENSATION_CONTROL);
2563 rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0);
2564}
2565
2187static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, 2566static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2188 struct ieee80211_conf *conf, 2567 struct ieee80211_conf *conf,
2189 struct rf_channel *rf, 2568 struct rf_channel *rf,
@@ -2225,6 +2604,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2225 case RF5392: 2604 case RF5392:
2226 rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info); 2605 rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
2227 break; 2606 break;
2607 case RF5592:
2608 rt2800_config_channel_rf55xx(rt2x00dev, conf, rf, info);
2609 break;
2228 default: 2610 default:
2229 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); 2611 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
2230 } 2612 }
@@ -2326,6 +2708,17 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2326 if (rt2x00_rt(rt2x00dev, RT3572)) 2708 if (rt2x00_rt(rt2x00dev, RT3572))
2327 rt2800_rfcsr_write(rt2x00dev, 8, 0x80); 2709 rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
2328 2710
2711 if (rt2x00_rt(rt2x00dev, RT5592)) {
2712 rt2800_bbp_write(rt2x00dev, 195, 141);
2713 rt2800_bbp_write(rt2x00dev, 196, conf_is_ht40(conf) ? 0x10 : 0x1a);
2714
2715 /* AGC init */
2716 reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2 * rt2x00dev->lna_gain;
2717 rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
2718
2719 rt2800_iq_calibrate(rt2x00dev, rf->channel);
2720 }
2721
2329 rt2800_bbp_read(rt2x00dev, 4, &bbp); 2722 rt2800_bbp_read(rt2x00dev, 4, &bbp);
2330 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf)); 2723 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
2331 rt2800_bbp_write(rt2x00dev, 4, bbp); 2724 rt2800_bbp_write(rt2x00dev, 4, bbp);
@@ -2763,7 +3156,7 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
2763 3156
2764void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev) 3157void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev)
2765{ 3158{
2766 rt2800_config_txpower(rt2x00dev, rt2x00dev->hw->conf.channel, 3159 rt2800_config_txpower(rt2x00dev, rt2x00dev->hw->conf.chandef.chan,
2767 rt2x00dev->tx_power); 3160 rt2x00dev->tx_power);
2768} 3161}
2769EXPORT_SYMBOL_GPL(rt2800_gain_calibration); 3162EXPORT_SYMBOL_GPL(rt2800_gain_calibration);
@@ -2898,11 +3291,11 @@ void rt2800_config(struct rt2x00_dev *rt2x00dev,
2898 if (flags & IEEE80211_CONF_CHANGE_CHANNEL) { 3291 if (flags & IEEE80211_CONF_CHANGE_CHANNEL) {
2899 rt2800_config_channel(rt2x00dev, libconf->conf, 3292 rt2800_config_channel(rt2x00dev, libconf->conf,
2900 &libconf->rf, &libconf->channel); 3293 &libconf->rf, &libconf->channel);
2901 rt2800_config_txpower(rt2x00dev, libconf->conf->channel, 3294 rt2800_config_txpower(rt2x00dev, libconf->conf->chandef.chan,
2902 libconf->conf->power_level); 3295 libconf->conf->power_level);
2903 } 3296 }
2904 if (flags & IEEE80211_CONF_CHANGE_POWER) 3297 if (flags & IEEE80211_CONF_CHANGE_POWER)
2905 rt2800_config_txpower(rt2x00dev, libconf->conf->channel, 3298 rt2800_config_txpower(rt2x00dev, libconf->conf->chandef.chan,
2906 libconf->conf->power_level); 3299 libconf->conf->power_level);
2907 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) 3300 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
2908 rt2800_config_retry_limit(rt2x00dev, libconf); 3301 rt2800_config_retry_limit(rt2x00dev, libconf);
@@ -2938,13 +3331,16 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
2938 rt2x00_rt(rt2x00dev, RT3390) || 3331 rt2x00_rt(rt2x00dev, RT3390) ||
2939 rt2x00_rt(rt2x00dev, RT3572) || 3332 rt2x00_rt(rt2x00dev, RT3572) ||
2940 rt2x00_rt(rt2x00dev, RT5390) || 3333 rt2x00_rt(rt2x00dev, RT5390) ||
2941 rt2x00_rt(rt2x00dev, RT5392)) 3334 rt2x00_rt(rt2x00dev, RT5392) ||
3335 rt2x00_rt(rt2x00dev, RT5592))
2942 vgc = 0x1c + (2 * rt2x00dev->lna_gain); 3336 vgc = 0x1c + (2 * rt2x00dev->lna_gain);
2943 else 3337 else
2944 vgc = 0x2e + rt2x00dev->lna_gain; 3338 vgc = 0x2e + rt2x00dev->lna_gain;
2945 } else { /* 5GHZ band */ 3339 } else { /* 5GHZ band */
2946 if (rt2x00_rt(rt2x00dev, RT3572)) 3340 if (rt2x00_rt(rt2x00dev, RT3572))
2947 vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3; 3341 vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3;
3342 else if (rt2x00_rt(rt2x00dev, RT5592))
3343 vgc = 0x24 + (2 * rt2x00dev->lna_gain);
2948 else { 3344 else {
2949 if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) 3345 if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
2950 vgc = 0x32 + (rt2x00dev->lna_gain * 5) / 3; 3346 vgc = 0x32 + (rt2x00dev->lna_gain * 5) / 3;
@@ -2960,7 +3356,11 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
2960 struct link_qual *qual, u8 vgc_level) 3356 struct link_qual *qual, u8 vgc_level)
2961{ 3357{
2962 if (qual->vgc_level != vgc_level) { 3358 if (qual->vgc_level != vgc_level) {
2963 rt2800_bbp_write(rt2x00dev, 66, vgc_level); 3359 if (rt2x00_rt(rt2x00dev, RT5592)) {
3360 rt2800_bbp_write(rt2x00dev, 83, qual->rssi > -65 ? 0x4a : 0x7a);
3361 rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level);
3362 } else
3363 rt2800_bbp_write(rt2x00dev, 66, vgc_level);
2964 qual->vgc_level = vgc_level; 3364 qual->vgc_level = vgc_level;
2965 qual->vgc_level_reg = vgc_level; 3365 qual->vgc_level_reg = vgc_level;
2966 } 3366 }
@@ -2975,15 +3375,23 @@ EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
2975void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, 3375void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
2976 const u32 count) 3376 const u32 count)
2977{ 3377{
3378 u8 vgc;
3379
2978 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) 3380 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
2979 return; 3381 return;
2980
2981 /* 3382 /*
2982 * When RSSI is better then -80 increase VGC level with 0x10 3383 * When RSSI is better then -80 increase VGC level with 0x10, except
3384 * for rt5592 chip.
2983 */ 3385 */
2984 rt2800_set_vgc(rt2x00dev, qual, 3386
2985 rt2800_get_default_vgc(rt2x00dev) + 3387 vgc = rt2800_get_default_vgc(rt2x00dev);
2986 ((qual->rssi > -80) * 0x10)); 3388
3389 if (rt2x00_rt(rt2x00dev, RT5592) && qual->rssi > -65)
3390 vgc += 0x20;
3391 else if (qual->rssi > -80)
3392 vgc += 0x10;
3393
3394 rt2800_set_vgc(rt2x00dev, qual, vgc);
2987} 3395}
2988EXPORT_SYMBOL_GPL(rt2800_link_tuner); 3396EXPORT_SYMBOL_GPL(rt2800_link_tuner);
2989 3397
@@ -3122,7 +3530,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
3122 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 3530 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
3123 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 3531 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
3124 } else if (rt2x00_rt(rt2x00dev, RT5390) || 3532 } else if (rt2x00_rt(rt2x00dev, RT5390) ||
3125 rt2x00_rt(rt2x00dev, RT5392)) { 3533 rt2x00_rt(rt2x00dev, RT5392) ||
3534 rt2x00_rt(rt2x00dev, RT5592)) {
3126 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); 3535 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
3127 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 3536 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
3128 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); 3537 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -3302,7 +3711,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
3302 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CWMIN, 0); 3711 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CWMIN, 0);
3303 rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, reg); 3712 rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, reg);
3304 3713
3305 rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002); 3714 reg = rt2x00_rt(rt2x00dev, RT5592) ? 0x00000082 : 0x00000002;
3715 rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, reg);
3306 3716
3307 rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg); 3717 rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg);
3308 rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32); 3718 rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32);
@@ -3459,7 +3869,7 @@ static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
3459 udelay(REGISTER_BUSY_DELAY); 3869 udelay(REGISTER_BUSY_DELAY);
3460 } 3870 }
3461 3871
3462 ERROR(rt2x00dev, "BBP/RF register access failed, aborting.\n"); 3872 rt2x00_err(rt2x00dev, "BBP/RF register access failed, aborting\n");
3463 return -EACCES; 3873 return -EACCES;
3464} 3874}
3465 3875
@@ -3483,10 +3893,140 @@ static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
3483 udelay(REGISTER_BUSY_DELAY); 3893 udelay(REGISTER_BUSY_DELAY);
3484 } 3894 }
3485 3895
3486 ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); 3896 rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n");
3487 return -EACCES; 3897 return -EACCES;
3488} 3898}
3489 3899
3900static void rt2800_bbp4_mac_if_ctrl(struct rt2x00_dev *rt2x00dev)
3901{
3902 u8 value;
3903
3904 rt2800_bbp_read(rt2x00dev, 4, &value);
3905 rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
3906 rt2800_bbp_write(rt2x00dev, 4, value);
3907}
3908
3909static void rt2800_init_freq_calibration(struct rt2x00_dev *rt2x00dev)
3910{
3911 rt2800_bbp_write(rt2x00dev, 142, 1);
3912 rt2800_bbp_write(rt2x00dev, 143, 57);
3913}
3914
3915static void rt2800_init_bbp_5592_glrt(struct rt2x00_dev *rt2x00dev)
3916{
3917 const u8 glrt_table[] = {
3918 0xE0, 0x1F, 0X38, 0x32, 0x08, 0x28, 0x19, 0x0A, 0xFF, 0x00, /* 128 ~ 137 */
3919 0x16, 0x10, 0x10, 0x0B, 0x36, 0x2C, 0x26, 0x24, 0x42, 0x36, /* 138 ~ 147 */
3920 0x30, 0x2D, 0x4C, 0x46, 0x3D, 0x40, 0x3E, 0x42, 0x3D, 0x40, /* 148 ~ 157 */
3921 0X3C, 0x34, 0x2C, 0x2F, 0x3C, 0x35, 0x2E, 0x2A, 0x49, 0x41, /* 158 ~ 167 */
3922 0x36, 0x31, 0x30, 0x30, 0x0E, 0x0D, 0x28, 0x21, 0x1C, 0x16, /* 168 ~ 177 */
3923 0x50, 0x4A, 0x43, 0x40, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, /* 178 ~ 187 */
3924 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 188 ~ 197 */
3925 0x00, 0x00, 0x7D, 0x14, 0x32, 0x2C, 0x36, 0x4C, 0x43, 0x2C, /* 198 ~ 207 */
3926 0x2E, 0x36, 0x30, 0x6E, /* 208 ~ 211 */
3927 };
3928 int i;
3929
3930 for (i = 0; i < ARRAY_SIZE(glrt_table); i++) {
3931 rt2800_bbp_write(rt2x00dev, 195, 128 + i);
3932 rt2800_bbp_write(rt2x00dev, 196, glrt_table[i]);
3933 }
3934};
3935
3936static void rt2800_init_bbp_early(struct rt2x00_dev *rt2x00dev)
3937{
3938 rt2800_bbp_write(rt2x00dev, 65, 0x2C);
3939 rt2800_bbp_write(rt2x00dev, 66, 0x38);
3940 rt2800_bbp_write(rt2x00dev, 68, 0x0B);
3941 rt2800_bbp_write(rt2x00dev, 69, 0x12);
3942 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
3943 rt2800_bbp_write(rt2x00dev, 73, 0x10);
3944 rt2800_bbp_write(rt2x00dev, 81, 0x37);
3945 rt2800_bbp_write(rt2x00dev, 82, 0x62);
3946 rt2800_bbp_write(rt2x00dev, 83, 0x6A);
3947 rt2800_bbp_write(rt2x00dev, 84, 0x99);
3948 rt2800_bbp_write(rt2x00dev, 86, 0x00);
3949 rt2800_bbp_write(rt2x00dev, 91, 0x04);
3950 rt2800_bbp_write(rt2x00dev, 92, 0x00);
3951 rt2800_bbp_write(rt2x00dev, 103, 0x00);
3952 rt2800_bbp_write(rt2x00dev, 105, 0x05);
3953 rt2800_bbp_write(rt2x00dev, 106, 0x35);
3954}
3955
3956static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
3957{
3958 int ant, div_mode;
3959 u16 eeprom;
3960 u8 value;
3961
3962 rt2800_init_bbp_early(rt2x00dev);
3963
3964 rt2800_bbp_read(rt2x00dev, 105, &value);
3965 rt2x00_set_field8(&value, BBP105_MLD,
3966 rt2x00dev->default_ant.rx_chain_num == 2);
3967 rt2800_bbp_write(rt2x00dev, 105, value);
3968
3969 rt2800_bbp4_mac_if_ctrl(rt2x00dev);
3970
3971 rt2800_bbp_write(rt2x00dev, 20, 0x06);
3972 rt2800_bbp_write(rt2x00dev, 31, 0x08);
3973 rt2800_bbp_write(rt2x00dev, 65, 0x2C);
3974 rt2800_bbp_write(rt2x00dev, 68, 0xDD);
3975 rt2800_bbp_write(rt2x00dev, 69, 0x1A);
3976 rt2800_bbp_write(rt2x00dev, 70, 0x05);
3977 rt2800_bbp_write(rt2x00dev, 73, 0x13);
3978 rt2800_bbp_write(rt2x00dev, 74, 0x0F);
3979 rt2800_bbp_write(rt2x00dev, 75, 0x4F);
3980 rt2800_bbp_write(rt2x00dev, 76, 0x28);
3981 rt2800_bbp_write(rt2x00dev, 77, 0x59);
3982 rt2800_bbp_write(rt2x00dev, 84, 0x9A);
3983 rt2800_bbp_write(rt2x00dev, 86, 0x38);
3984 rt2800_bbp_write(rt2x00dev, 88, 0x90);
3985 rt2800_bbp_write(rt2x00dev, 91, 0x04);
3986 rt2800_bbp_write(rt2x00dev, 92, 0x02);
3987 rt2800_bbp_write(rt2x00dev, 95, 0x9a);
3988 rt2800_bbp_write(rt2x00dev, 98, 0x12);
3989 rt2800_bbp_write(rt2x00dev, 103, 0xC0);
3990 rt2800_bbp_write(rt2x00dev, 104, 0x92);
3991 /* FIXME BBP105 owerwrite */
3992 rt2800_bbp_write(rt2x00dev, 105, 0x3C);
3993 rt2800_bbp_write(rt2x00dev, 106, 0x35);
3994 rt2800_bbp_write(rt2x00dev, 128, 0x12);
3995 rt2800_bbp_write(rt2x00dev, 134, 0xD0);
3996 rt2800_bbp_write(rt2x00dev, 135, 0xF6);
3997 rt2800_bbp_write(rt2x00dev, 137, 0x0F);
3998
3999 /* Initialize GLRT (Generalized Likehood Radio Test) */
4000 rt2800_init_bbp_5592_glrt(rt2x00dev);
4001
4002 rt2800_bbp4_mac_if_ctrl(rt2x00dev);
4003
4004 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
4005 div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
4006 ant = (div_mode == 3) ? 1 : 0;
4007 rt2800_bbp_read(rt2x00dev, 152, &value);
4008 if (ant == 0) {
4009 /* Main antenna */
4010 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
4011 } else {
4012 /* Auxiliary antenna */
4013 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
4014 }
4015 rt2800_bbp_write(rt2x00dev, 152, value);
4016
4017 if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) {
4018 rt2800_bbp_read(rt2x00dev, 254, &value);
4019 rt2x00_set_field8(&value, BBP254_BIT7, 1);
4020 rt2800_bbp_write(rt2x00dev, 254, value);
4021 }
4022
4023 rt2800_init_freq_calibration(rt2x00dev);
4024
4025 rt2800_bbp_write(rt2x00dev, 84, 0x19);
4026 if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C))
4027 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
4028}
4029
3490static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) 4030static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3491{ 4031{
3492 unsigned int i; 4032 unsigned int i;
@@ -3498,6 +4038,11 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3498 rt2800_wait_bbp_ready(rt2x00dev))) 4038 rt2800_wait_bbp_ready(rt2x00dev)))
3499 return -EACCES; 4039 return -EACCES;
3500 4040
4041 if (rt2x00_rt(rt2x00dev, RT5592)) {
4042 rt2800_init_bbp_5592(rt2x00dev);
4043 return 0;
4044 }
4045
3501 if (rt2x00_rt(rt2x00dev, RT3352)) { 4046 if (rt2x00_rt(rt2x00dev, RT3352)) {
3502 rt2800_bbp_write(rt2x00dev, 3, 0x00); 4047 rt2800_bbp_write(rt2x00dev, 3, 0x00);
3503 rt2800_bbp_write(rt2x00dev, 4, 0x50); 4048 rt2800_bbp_write(rt2x00dev, 4, 0x50);
@@ -3505,11 +4050,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3505 4050
3506 if (rt2x00_rt(rt2x00dev, RT3290) || 4051 if (rt2x00_rt(rt2x00dev, RT3290) ||
3507 rt2x00_rt(rt2x00dev, RT5390) || 4052 rt2x00_rt(rt2x00dev, RT5390) ||
3508 rt2x00_rt(rt2x00dev, RT5392)) { 4053 rt2x00_rt(rt2x00dev, RT5392))
3509 rt2800_bbp_read(rt2x00dev, 4, &value); 4054 rt2800_bbp4_mac_if_ctrl(rt2x00dev);
3510 rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
3511 rt2800_bbp_write(rt2x00dev, 4, value);
3512 }
3513 4055
3514 if (rt2800_is_305x_soc(rt2x00dev) || 4056 if (rt2800_is_305x_soc(rt2x00dev) ||
3515 rt2x00_rt(rt2x00dev, RT3290) || 4057 rt2x00_rt(rt2x00dev, RT3290) ||
@@ -3783,9 +4325,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3783 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); 4325 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
3784 rt2800_bbp_write(rt2x00dev, 152, value); 4326 rt2800_bbp_write(rt2x00dev, 152, value);
3785 4327
3786 /* Init frequency calibration */ 4328 rt2800_init_freq_calibration(rt2x00dev);
3787 rt2800_bbp_write(rt2x00dev, 142, 1);
3788 rt2800_bbp_write(rt2x00dev, 143, 57);
3789 } 4329 }
3790 4330
3791 for (i = 0; i < EEPROM_BBP_SIZE; i++) { 4331 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
@@ -3801,8 +4341,17 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3801 return 0; 4341 return 0;
3802} 4342}
3803 4343
3804static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev, 4344static void rt2800_led_open_drain_enable(struct rt2x00_dev *rt2x00dev)
3805 bool bw40, u8 rfcsr24, u8 filter_target) 4345{
4346 u32 reg;
4347
4348 rt2800_register_read(rt2x00dev, OPT_14_CSR, &reg);
4349 rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
4350 rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
4351}
4352
4353static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev, bool bw40,
4354 u8 filter_target)
3806{ 4355{
3807 unsigned int i; 4356 unsigned int i;
3808 u8 bbp; 4357 u8 bbp;
@@ -3810,6 +4359,7 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
3810 u8 passband; 4359 u8 passband;
3811 u8 stopband; 4360 u8 stopband;
3812 u8 overtuned = 0; 4361 u8 overtuned = 0;
4362 u8 rfcsr24 = (bw40) ? 0x27 : 0x07;
3813 4363
3814 rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24); 4364 rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24);
3815 4365
@@ -3865,8 +4415,169 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
3865 return rfcsr24; 4415 return rfcsr24;
3866} 4416}
3867 4417
4418static void rt2800_rf_init_calibration(struct rt2x00_dev *rt2x00dev,
4419 const unsigned int rf_reg)
4420{
4421 u8 rfcsr;
4422
4423 rt2800_rfcsr_read(rt2x00dev, rf_reg, &rfcsr);
4424 rt2x00_set_field8(&rfcsr, FIELD8(0x80), 1);
4425 rt2800_rfcsr_write(rt2x00dev, rf_reg, rfcsr);
4426 msleep(1);
4427 rt2x00_set_field8(&rfcsr, FIELD8(0x80), 0);
4428 rt2800_rfcsr_write(rt2x00dev, rf_reg, rfcsr);
4429}
4430
4431static void rt2800_rx_filter_calibration(struct rt2x00_dev *rt2x00dev)
4432{
4433 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
4434 u8 filter_tgt_bw20;
4435 u8 filter_tgt_bw40;
4436 u8 rfcsr, bbp;
4437
4438 /*
4439 * TODO: sync filter_tgt values with vendor driver
4440 */
4441 if (rt2x00_rt(rt2x00dev, RT3070)) {
4442 filter_tgt_bw20 = 0x16;
4443 filter_tgt_bw40 = 0x19;
4444 } else {
4445 filter_tgt_bw20 = 0x13;
4446 filter_tgt_bw40 = 0x15;
4447 }
4448
4449 drv_data->calibration_bw20 =
4450 rt2800_init_rx_filter(rt2x00dev, false, filter_tgt_bw20);
4451 drv_data->calibration_bw40 =
4452 rt2800_init_rx_filter(rt2x00dev, true, filter_tgt_bw40);
4453
4454 /*
4455 * Save BBP 25 & 26 values for later use in channel switching (for 3052)
4456 */
4457 rt2800_bbp_read(rt2x00dev, 25, &drv_data->bbp25);
4458 rt2800_bbp_read(rt2x00dev, 26, &drv_data->bbp26);
4459
4460 /*
4461 * Set back to initial state
4462 */
4463 rt2800_bbp_write(rt2x00dev, 24, 0);
4464
4465 rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
4466 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
4467 rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
4468
4469 /*
4470 * Set BBP back to BW20
4471 */
4472 rt2800_bbp_read(rt2x00dev, 4, &bbp);
4473 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
4474 rt2800_bbp_write(rt2x00dev, 4, bbp);
4475}
4476
4477static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
4478{
4479 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
4480 u8 min_gain, rfcsr, bbp;
4481 u16 eeprom;
4482
4483 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
4484
4485 rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
4486 if (rt2x00_rt(rt2x00dev, RT3070) ||
4487 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
4488 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
4489 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
4490 if (!test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
4491 rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
4492 }
4493
4494 min_gain = rt2x00_rt(rt2x00dev, RT3070) ? 1 : 2;
4495 if (drv_data->txmixer_gain_24g >= min_gain) {
4496 rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
4497 drv_data->txmixer_gain_24g);
4498 }
4499
4500 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
4501
4502 if (rt2x00_rt(rt2x00dev, RT3090)) {
4503 /* Turn off unused DAC1 and ADC1 to reduce power consumption */
4504 rt2800_bbp_read(rt2x00dev, 138, &bbp);
4505 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
4506 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
4507 rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
4508 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
4509 rt2x00_set_field8(&bbp, BBP138_TX_DAC1, 1);
4510 rt2800_bbp_write(rt2x00dev, 138, bbp);
4511 }
4512
4513 if (rt2x00_rt(rt2x00dev, RT3070)) {
4514 rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
4515 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F))
4516 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
4517 else
4518 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0);
4519 rt2x00_set_field8(&rfcsr, RFCSR27_R2, 0);
4520 rt2x00_set_field8(&rfcsr, RFCSR27_R3, 0);
4521 rt2x00_set_field8(&rfcsr, RFCSR27_R4, 0);
4522 rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
4523 } else if (rt2x00_rt(rt2x00dev, RT3071) ||
4524 rt2x00_rt(rt2x00dev, RT3090) ||
4525 rt2x00_rt(rt2x00dev, RT3390)) {
4526 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
4527 rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
4528 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
4529 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
4530 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
4531 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
4532 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
4533
4534 rt2800_rfcsr_read(rt2x00dev, 15, &rfcsr);
4535 rt2x00_set_field8(&rfcsr, RFCSR15_TX_LO2_EN, 0);
4536 rt2800_rfcsr_write(rt2x00dev, 15, rfcsr);
4537
4538 rt2800_rfcsr_read(rt2x00dev, 20, &rfcsr);
4539 rt2x00_set_field8(&rfcsr, RFCSR20_RX_LO1_EN, 0);
4540 rt2800_rfcsr_write(rt2x00dev, 20, rfcsr);
4541
4542 rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr);
4543 rt2x00_set_field8(&rfcsr, RFCSR21_RX_LO2_EN, 0);
4544 rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
4545 }
4546}
4547
4548static void rt2800_normal_mode_setup_5xxx(struct rt2x00_dev *rt2x00dev)
4549{
4550 u8 reg;
4551 u16 eeprom;
4552
4553 /* Turn off unused DAC1 and ADC1 to reduce power consumption */
4554 rt2800_bbp_read(rt2x00dev, 138, &reg);
4555 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
4556 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
4557 rt2x00_set_field8(&reg, BBP138_RX_ADC1, 0);
4558 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
4559 rt2x00_set_field8(&reg, BBP138_TX_DAC1, 1);
4560 rt2800_bbp_write(rt2x00dev, 138, reg);
4561
4562 rt2800_rfcsr_read(rt2x00dev, 38, &reg);
4563 rt2x00_set_field8(&reg, RFCSR38_RX_LO1_EN, 0);
4564 rt2800_rfcsr_write(rt2x00dev, 38, reg);
4565
4566 rt2800_rfcsr_read(rt2x00dev, 39, &reg);
4567 rt2x00_set_field8(&reg, RFCSR39_RX_LO2_EN, 0);
4568 rt2800_rfcsr_write(rt2x00dev, 39, reg);
4569
4570 rt2800_bbp4_mac_if_ctrl(rt2x00dev);
4571
4572 rt2800_rfcsr_read(rt2x00dev, 30, &reg);
4573 rt2x00_set_field8(&reg, RFCSR30_RX_VCM, 2);
4574 rt2800_rfcsr_write(rt2x00dev, 30, reg);
4575}
4576
3868static void rt2800_init_rfcsr_305x_soc(struct rt2x00_dev *rt2x00dev) 4577static void rt2800_init_rfcsr_305x_soc(struct rt2x00_dev *rt2x00dev)
3869{ 4578{
4579 rt2800_rf_init_calibration(rt2x00dev, 30);
4580
3870 rt2800_rfcsr_write(rt2x00dev, 0, 0x50); 4581 rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
3871 rt2800_rfcsr_write(rt2x00dev, 1, 0x01); 4582 rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
3872 rt2800_rfcsr_write(rt2x00dev, 2, 0xf7); 4583 rt2800_rfcsr_write(rt2x00dev, 2, 0xf7);
@@ -3903,6 +4614,13 @@ static void rt2800_init_rfcsr_305x_soc(struct rt2x00_dev *rt2x00dev)
3903 4614
3904static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev) 4615static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev)
3905{ 4616{
4617 u8 rfcsr;
4618 u16 eeprom;
4619 u32 reg;
4620
4621 /* XXX vendor driver do this only for 3070 */
4622 rt2800_rf_init_calibration(rt2x00dev, 30);
4623
3906 rt2800_rfcsr_write(rt2x00dev, 4, 0x40); 4624 rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
3907 rt2800_rfcsr_write(rt2x00dev, 5, 0x03); 4625 rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
3908 rt2800_rfcsr_write(rt2x00dev, 6, 0x02); 4626 rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
@@ -3922,10 +4640,54 @@ static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev)
3922 rt2800_rfcsr_write(rt2x00dev, 24, 0x16); 4640 rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
3923 rt2800_rfcsr_write(rt2x00dev, 25, 0x01); 4641 rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
3924 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f); 4642 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
4643
4644 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
4645 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
4646 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
4647 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
4648 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
4649 } else if (rt2x00_rt(rt2x00dev, RT3071) ||
4650 rt2x00_rt(rt2x00dev, RT3090)) {
4651 rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
4652
4653 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
4654 rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
4655 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
4656
4657 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
4658 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
4659 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
4660 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) {
4661 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
4662 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
4663 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
4664 else
4665 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
4666 }
4667 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
4668
4669 rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
4670 rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
4671 rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
4672 }
4673
4674 rt2800_rx_filter_calibration(rt2x00dev);
4675
4676 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
4677 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
4678 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E))
4679 rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
4680
4681 rt2800_led_open_drain_enable(rt2x00dev);
4682 rt2800_normal_mode_setup_3xxx(rt2x00dev);
3925} 4683}
3926 4684
3927static void rt2800_init_rfcsr_3290(struct rt2x00_dev *rt2x00dev) 4685static void rt2800_init_rfcsr_3290(struct rt2x00_dev *rt2x00dev)
3928{ 4686{
4687 u8 rfcsr;
4688
4689 rt2800_rf_init_calibration(rt2x00dev, 2);
4690
3929 rt2800_rfcsr_write(rt2x00dev, 1, 0x0f); 4691 rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
3930 rt2800_rfcsr_write(rt2x00dev, 2, 0x80); 4692 rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
3931 rt2800_rfcsr_write(rt2x00dev, 3, 0x08); 4693 rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
@@ -3972,10 +4734,19 @@ static void rt2800_init_rfcsr_3290(struct rt2x00_dev *rt2x00dev)
3972 rt2800_rfcsr_write(rt2x00dev, 59, 0x09); 4734 rt2800_rfcsr_write(rt2x00dev, 59, 0x09);
3973 rt2800_rfcsr_write(rt2x00dev, 60, 0x45); 4735 rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
3974 rt2800_rfcsr_write(rt2x00dev, 61, 0xc1); 4736 rt2800_rfcsr_write(rt2x00dev, 61, 0xc1);
4737
4738 rt2800_rfcsr_read(rt2x00dev, 29, &rfcsr);
4739 rt2x00_set_field8(&rfcsr, RFCSR29_RSSI_GAIN, 3);
4740 rt2800_rfcsr_write(rt2x00dev, 29, rfcsr);
4741
4742 rt2800_led_open_drain_enable(rt2x00dev);
4743 rt2800_normal_mode_setup_3xxx(rt2x00dev);
3975} 4744}
3976 4745
3977static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev) 4746static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
3978{ 4747{
4748 rt2800_rf_init_calibration(rt2x00dev, 30);
4749
3979 rt2800_rfcsr_write(rt2x00dev, 0, 0xf0); 4750 rt2800_rfcsr_write(rt2x00dev, 0, 0xf0);
3980 rt2800_rfcsr_write(rt2x00dev, 1, 0x23); 4751 rt2800_rfcsr_write(rt2x00dev, 1, 0x23);
3981 rt2800_rfcsr_write(rt2x00dev, 2, 0x50); 4752 rt2800_rfcsr_write(rt2x00dev, 2, 0x50);
@@ -4039,10 +4810,18 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
4039 rt2800_rfcsr_write(rt2x00dev, 61, 0x00); 4810 rt2800_rfcsr_write(rt2x00dev, 61, 0x00);
4040 rt2800_rfcsr_write(rt2x00dev, 62, 0x00); 4811 rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
4041 rt2800_rfcsr_write(rt2x00dev, 63, 0x00); 4812 rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
4813
4814 rt2800_rx_filter_calibration(rt2x00dev);
4815 rt2800_led_open_drain_enable(rt2x00dev);
4816 rt2800_normal_mode_setup_3xxx(rt2x00dev);
4042} 4817}
4043 4818
4044static void rt2800_init_rfcsr_3390(struct rt2x00_dev *rt2x00dev) 4819static void rt2800_init_rfcsr_3390(struct rt2x00_dev *rt2x00dev)
4045{ 4820{
4821 u32 reg;
4822
4823 rt2800_rf_init_calibration(rt2x00dev, 30);
4824
4046 rt2800_rfcsr_write(rt2x00dev, 0, 0xa0); 4825 rt2800_rfcsr_write(rt2x00dev, 0, 0xa0);
4047 rt2800_rfcsr_write(rt2x00dev, 1, 0xe1); 4826 rt2800_rfcsr_write(rt2x00dev, 1, 0xe1);
4048 rt2800_rfcsr_write(rt2x00dev, 2, 0xf1); 4827 rt2800_rfcsr_write(rt2x00dev, 2, 0xf1);
@@ -4075,10 +4854,27 @@ static void rt2800_init_rfcsr_3390(struct rt2x00_dev *rt2x00dev)
4075 rt2800_rfcsr_write(rt2x00dev, 29, 0x8f); 4854 rt2800_rfcsr_write(rt2x00dev, 29, 0x8f);
4076 rt2800_rfcsr_write(rt2x00dev, 30, 0x20); 4855 rt2800_rfcsr_write(rt2x00dev, 30, 0x20);
4077 rt2800_rfcsr_write(rt2x00dev, 31, 0x0f); 4856 rt2800_rfcsr_write(rt2x00dev, 31, 0x0f);
4857
4858 rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
4859 rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
4860 rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
4861
4862 rt2800_rx_filter_calibration(rt2x00dev);
4863
4864 if (rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E))
4865 rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
4866
4867 rt2800_led_open_drain_enable(rt2x00dev);
4868 rt2800_normal_mode_setup_3xxx(rt2x00dev);
4078} 4869}
4079 4870
4080static void rt2800_init_rfcsr_3572(struct rt2x00_dev *rt2x00dev) 4871static void rt2800_init_rfcsr_3572(struct rt2x00_dev *rt2x00dev)
4081{ 4872{
4873 u8 rfcsr;
4874 u32 reg;
4875
4876 rt2800_rf_init_calibration(rt2x00dev, 30);
4877
4082 rt2800_rfcsr_write(rt2x00dev, 0, 0x70); 4878 rt2800_rfcsr_write(rt2x00dev, 0, 0x70);
4083 rt2800_rfcsr_write(rt2x00dev, 1, 0x81); 4879 rt2800_rfcsr_write(rt2x00dev, 1, 0x81);
4084 rt2800_rfcsr_write(rt2x00dev, 2, 0xf1); 4880 rt2800_rfcsr_write(rt2x00dev, 2, 0xf1);
@@ -4110,10 +4906,30 @@ static void rt2800_init_rfcsr_3572(struct rt2x00_dev *rt2x00dev)
4110 rt2800_rfcsr_write(rt2x00dev, 29, 0x9b); 4906 rt2800_rfcsr_write(rt2x00dev, 29, 0x9b);
4111 rt2800_rfcsr_write(rt2x00dev, 30, 0x09); 4907 rt2800_rfcsr_write(rt2x00dev, 30, 0x09);
4112 rt2800_rfcsr_write(rt2x00dev, 31, 0x10); 4908 rt2800_rfcsr_write(rt2x00dev, 31, 0x10);
4909
4910 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
4911 rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
4912 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
4913
4914 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
4915 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
4916 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
4917 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
4918 msleep(1);
4919 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
4920 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
4921 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
4922 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
4923
4924 rt2800_rx_filter_calibration(rt2x00dev);
4925 rt2800_led_open_drain_enable(rt2x00dev);
4926 rt2800_normal_mode_setup_3xxx(rt2x00dev);
4113} 4927}
4114 4928
4115static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev) 4929static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
4116{ 4930{
4931 rt2800_rf_init_calibration(rt2x00dev, 2);
4932
4117 rt2800_rfcsr_write(rt2x00dev, 1, 0x0f); 4933 rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
4118 rt2800_rfcsr_write(rt2x00dev, 2, 0x80); 4934 rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
4119 rt2800_rfcsr_write(rt2x00dev, 3, 0x88); 4935 rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
@@ -4194,10 +5010,16 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
4194 rt2800_rfcsr_write(rt2x00dev, 61, 0xdd); 5010 rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
4195 rt2800_rfcsr_write(rt2x00dev, 62, 0x00); 5011 rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
4196 rt2800_rfcsr_write(rt2x00dev, 63, 0x00); 5012 rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
5013
5014 rt2800_normal_mode_setup_5xxx(rt2x00dev);
5015
5016 rt2800_led_open_drain_enable(rt2x00dev);
4197} 5017}
4198 5018
4199static void rt2800_init_rfcsr_5392(struct rt2x00_dev *rt2x00dev) 5019static void rt2800_init_rfcsr_5392(struct rt2x00_dev *rt2x00dev)
4200{ 5020{
5021 rt2800_rf_init_calibration(rt2x00dev, 2);
5022
4201 rt2800_rfcsr_write(rt2x00dev, 1, 0x17); 5023 rt2800_rfcsr_write(rt2x00dev, 1, 0x17);
4202 rt2800_rfcsr_write(rt2x00dev, 2, 0x80); 5024 rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
4203 rt2800_rfcsr_write(rt2x00dev, 3, 0x88); 5025 rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
@@ -4257,53 +5079,61 @@ static void rt2800_init_rfcsr_5392(struct rt2x00_dev *rt2x00dev)
4257 rt2800_rfcsr_write(rt2x00dev, 61, 0x91); 5079 rt2800_rfcsr_write(rt2x00dev, 61, 0x91);
4258 rt2800_rfcsr_write(rt2x00dev, 62, 0x39); 5080 rt2800_rfcsr_write(rt2x00dev, 62, 0x39);
4259 rt2800_rfcsr_write(rt2x00dev, 63, 0x07); 5081 rt2800_rfcsr_write(rt2x00dev, 63, 0x07);
5082
5083 rt2800_normal_mode_setup_5xxx(rt2x00dev);
5084
5085 rt2800_led_open_drain_enable(rt2x00dev);
4260} 5086}
4261 5087
4262static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) 5088static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev)
4263{ 5089{
4264 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; 5090 rt2800_rf_init_calibration(rt2x00dev, 30);
4265 u8 rfcsr;
4266 u8 bbp;
4267 u32 reg;
4268 u16 eeprom;
4269 5091
4270 if (!rt2x00_rt(rt2x00dev, RT3070) && 5092 rt2800_rfcsr_write(rt2x00dev, 1, 0x3F);
4271 !rt2x00_rt(rt2x00dev, RT3071) && 5093 rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
4272 !rt2x00_rt(rt2x00dev, RT3090) && 5094 rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
4273 !rt2x00_rt(rt2x00dev, RT3290) && 5095 rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
4274 !rt2x00_rt(rt2x00dev, RT3352) && 5096 rt2800_rfcsr_write(rt2x00dev, 6, 0xE4);
4275 !rt2x00_rt(rt2x00dev, RT3390) && 5097 rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
4276 !rt2x00_rt(rt2x00dev, RT3572) && 5098 rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
4277 !rt2x00_rt(rt2x00dev, RT5390) && 5099 rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
4278 !rt2x00_rt(rt2x00dev, RT5392) && 5100 rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
4279 !rt2800_is_305x_soc(rt2x00dev)) 5101 rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
4280 return 0; 5102 rt2800_rfcsr_write(rt2x00dev, 19, 0x4D);
5103 rt2800_rfcsr_write(rt2x00dev, 20, 0x10);
5104 rt2800_rfcsr_write(rt2x00dev, 21, 0x8D);
5105 rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
5106 rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
5107 rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
5108 rt2800_rfcsr_write(rt2x00dev, 33, 0xC0);
5109 rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
5110 rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
5111 rt2800_rfcsr_write(rt2x00dev, 47, 0x0C);
5112 rt2800_rfcsr_write(rt2x00dev, 53, 0x22);
5113 rt2800_rfcsr_write(rt2x00dev, 63, 0x07);
4281 5114
4282 /* 5115 rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
4283 * Init RF calibration. 5116 msleep(1);
4284 */
4285 5117
4286 if (rt2x00_rt(rt2x00dev, RT3290) || 5118 rt2800_adjust_freq_offset(rt2x00dev);
4287 rt2x00_rt(rt2x00dev, RT5390) || 5119
4288 rt2x00_rt(rt2x00dev, RT5392)) { 5120 /* Enable DC filter */
4289 rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr); 5121 if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C))
4290 rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1); 5122 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
4291 rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); 5123
4292 msleep(1); 5124 rt2800_normal_mode_setup_5xxx(rt2x00dev);
4293 rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 0); 5125
4294 rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); 5126 if (rt2x00_rt_rev_lt(rt2x00dev, RT5592, REV_RT5592C))
4295 } else { 5127 rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
4296 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
4297 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
4298 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
4299 msleep(1);
4300 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
4301 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
4302 }
4303 5128
5129 rt2800_led_open_drain_enable(rt2x00dev);
5130}
5131
5132static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
5133{
4304 if (rt2800_is_305x_soc(rt2x00dev)) { 5134 if (rt2800_is_305x_soc(rt2x00dev)) {
4305 rt2800_init_rfcsr_305x_soc(rt2x00dev); 5135 rt2800_init_rfcsr_305x_soc(rt2x00dev);
4306 return 0; 5136 return;
4307 } 5137 }
4308 5138
4309 switch (rt2x00dev->chip.rt) { 5139 switch (rt2x00dev->chip.rt) {
@@ -4330,198 +5160,10 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
4330 case RT5392: 5160 case RT5392:
4331 rt2800_init_rfcsr_5392(rt2x00dev); 5161 rt2800_init_rfcsr_5392(rt2x00dev);
4332 break; 5162 break;
5163 case RT5592:
5164 rt2800_init_rfcsr_5592(rt2x00dev);
5165 break;
4333 } 5166 }
4334
4335 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
4336 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
4337 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
4338 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
4339 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
4340 } else if (rt2x00_rt(rt2x00dev, RT3071) ||
4341 rt2x00_rt(rt2x00dev, RT3090)) {
4342 rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
4343
4344 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
4345 rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
4346 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
4347
4348 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
4349 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
4350 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
4351 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) {
4352 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
4353 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
4354 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
4355 else
4356 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
4357 }
4358 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
4359
4360 rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
4361 rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
4362 rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
4363 } else if (rt2x00_rt(rt2x00dev, RT3390)) {
4364 rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
4365 rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
4366 rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
4367 } else if (rt2x00_rt(rt2x00dev, RT3572)) {
4368 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
4369 rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
4370 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
4371
4372 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
4373 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
4374 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
4375 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
4376 msleep(1);
4377 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
4378 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
4379 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
4380 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
4381 }
4382
4383 /*
4384 * Set RX Filter calibration for 20MHz and 40MHz
4385 */
4386 if (rt2x00_rt(rt2x00dev, RT3070)) {
4387 drv_data->calibration_bw20 =
4388 rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
4389 drv_data->calibration_bw40 =
4390 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
4391 } else if (rt2x00_rt(rt2x00dev, RT3071) ||
4392 rt2x00_rt(rt2x00dev, RT3090) ||
4393 rt2x00_rt(rt2x00dev, RT3352) ||
4394 rt2x00_rt(rt2x00dev, RT3390) ||
4395 rt2x00_rt(rt2x00dev, RT3572)) {
4396 drv_data->calibration_bw20 =
4397 rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x13);
4398 drv_data->calibration_bw40 =
4399 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
4400 }
4401
4402 /*
4403 * Save BBP 25 & 26 values for later use in channel switching
4404 */
4405 rt2800_bbp_read(rt2x00dev, 25, &drv_data->bbp25);
4406 rt2800_bbp_read(rt2x00dev, 26, &drv_data->bbp26);
4407
4408 if (!rt2x00_rt(rt2x00dev, RT5390) &&
4409 !rt2x00_rt(rt2x00dev, RT5392)) {
4410 /*
4411 * Set back to initial state
4412 */
4413 rt2800_bbp_write(rt2x00dev, 24, 0);
4414
4415 rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
4416 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
4417 rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
4418
4419 /*
4420 * Set BBP back to BW20
4421 */
4422 rt2800_bbp_read(rt2x00dev, 4, &bbp);
4423 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
4424 rt2800_bbp_write(rt2x00dev, 4, bbp);
4425 }
4426
4427 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
4428 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
4429 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
4430 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E))
4431 rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
4432
4433 rt2800_register_read(rt2x00dev, OPT_14_CSR, &reg);
4434 rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
4435 rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
4436
4437 if (!rt2x00_rt(rt2x00dev, RT5390) &&
4438 !rt2x00_rt(rt2x00dev, RT5392)) {
4439 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
4440 rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
4441 if (rt2x00_rt(rt2x00dev, RT3070) ||
4442 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
4443 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
4444 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
4445 if (!test_bit(CAPABILITY_EXTERNAL_LNA_BG,
4446 &rt2x00dev->cap_flags))
4447 rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
4448 }
4449 rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
4450 drv_data->txmixer_gain_24g);
4451 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
4452 }
4453
4454 if (rt2x00_rt(rt2x00dev, RT3090)) {
4455 rt2800_bbp_read(rt2x00dev, 138, &bbp);
4456
4457 /* Turn off unused DAC1 and ADC1 to reduce power consumption */
4458 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
4459 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
4460 rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
4461 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
4462 rt2x00_set_field8(&bbp, BBP138_TX_DAC1, 1);
4463
4464 rt2800_bbp_write(rt2x00dev, 138, bbp);
4465 }
4466
4467 if (rt2x00_rt(rt2x00dev, RT3071) ||
4468 rt2x00_rt(rt2x00dev, RT3090) ||
4469 rt2x00_rt(rt2x00dev, RT3390)) {
4470 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
4471 rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
4472 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
4473 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
4474 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
4475 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
4476 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
4477
4478 rt2800_rfcsr_read(rt2x00dev, 15, &rfcsr);
4479 rt2x00_set_field8(&rfcsr, RFCSR15_TX_LO2_EN, 0);
4480 rt2800_rfcsr_write(rt2x00dev, 15, rfcsr);
4481
4482 rt2800_rfcsr_read(rt2x00dev, 20, &rfcsr);
4483 rt2x00_set_field8(&rfcsr, RFCSR20_RX_LO1_EN, 0);
4484 rt2800_rfcsr_write(rt2x00dev, 20, rfcsr);
4485
4486 rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr);
4487 rt2x00_set_field8(&rfcsr, RFCSR21_RX_LO2_EN, 0);
4488 rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
4489 }
4490
4491 if (rt2x00_rt(rt2x00dev, RT3070)) {
4492 rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
4493 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F))
4494 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
4495 else
4496 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0);
4497 rt2x00_set_field8(&rfcsr, RFCSR27_R2, 0);
4498 rt2x00_set_field8(&rfcsr, RFCSR27_R3, 0);
4499 rt2x00_set_field8(&rfcsr, RFCSR27_R4, 0);
4500 rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
4501 }
4502
4503 if (rt2x00_rt(rt2x00dev, RT3290)) {
4504 rt2800_rfcsr_read(rt2x00dev, 29, &rfcsr);
4505 rt2x00_set_field8(&rfcsr, RFCSR29_RSSI_GAIN, 3);
4506 rt2800_rfcsr_write(rt2x00dev, 29, rfcsr);
4507 }
4508
4509 if (rt2x00_rt(rt2x00dev, RT5390) ||
4510 rt2x00_rt(rt2x00dev, RT5392)) {
4511 rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
4512 rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
4513 rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
4514
4515 rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr);
4516 rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0);
4517 rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
4518
4519 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
4520 rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
4521 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
4522 }
4523
4524 return 0;
4525} 5167}
4526 5168
4527int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev) 5169int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -4533,15 +5175,24 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
4533 * Initialize all registers. 5175 * Initialize all registers.
4534 */ 5176 */
4535 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) || 5177 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
4536 rt2800_init_registers(rt2x00dev) || 5178 rt2800_init_registers(rt2x00dev)))
4537 rt2800_init_bbp(rt2x00dev) ||
4538 rt2800_init_rfcsr(rt2x00dev)))
4539 return -EIO; 5179 return -EIO;
4540 5180
4541 /* 5181 /*
4542 * Send signal to firmware during boot time. 5182 * Send signal to firmware during boot time.
4543 */ 5183 */
4544 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0); 5184 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
5185 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
5186 if (rt2x00_is_usb(rt2x00dev)) {
5187 rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
5188 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
5189 }
5190 msleep(1);
5191
5192 if (unlikely(rt2800_init_bbp(rt2x00dev)))
5193 return -EIO;
5194
5195 rt2800_init_rfcsr(rt2x00dev);
4545 5196
4546 if (rt2x00_is_usb(rt2x00dev) && 5197 if (rt2x00_is_usb(rt2x00dev) &&
4547 (rt2x00_rt(rt2x00dev, RT3070) || 5198 (rt2x00_rt(rt2x00dev, RT3070) ||
@@ -4702,7 +5353,7 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
4702 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 5353 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
4703 if (!is_valid_ether_addr(mac)) { 5354 if (!is_valid_ether_addr(mac)) {
4704 eth_random_addr(mac); 5355 eth_random_addr(mac);
4705 EEPROM(rt2x00dev, "MAC: %pM\n", mac); 5356 rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
4706 } 5357 }
4707 5358
4708 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word); 5359 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word);
@@ -4711,7 +5362,7 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
4711 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_TXPATH, 1); 5362 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_TXPATH, 1);
4712 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RF_TYPE, RF2820); 5363 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RF_TYPE, RF2820);
4713 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word); 5364 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
4714 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); 5365 rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word);
4715 } else if (rt2x00_rt(rt2x00dev, RT2860) || 5366 } else if (rt2x00_rt(rt2x00dev, RT2860) ||
4716 rt2x00_rt(rt2x00dev, RT2872)) { 5367 rt2x00_rt(rt2x00dev, RT2872)) {
4717 /* 5368 /*
@@ -4740,14 +5391,14 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
4740 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BT_COEXIST, 0); 5391 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BT_COEXIST, 0);
4741 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_DAC_TEST, 0); 5392 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_DAC_TEST, 0);
4742 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word); 5393 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word);
4743 EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); 5394 rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word);
4744 } 5395 }
4745 5396
4746 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word); 5397 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
4747 if ((word & 0x00ff) == 0x00ff) { 5398 if ((word & 0x00ff) == 0x00ff) {
4748 rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); 5399 rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
4749 rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); 5400 rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
4750 EEPROM(rt2x00dev, "Freq: 0x%04x\n", word); 5401 rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word);
4751 } 5402 }
4752 if ((word & 0xff00) == 0xff00) { 5403 if ((word & 0xff00) == 0xff00) {
4753 rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE, 5404 rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
@@ -4757,7 +5408,7 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
4757 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555); 5408 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555);
4758 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221); 5409 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221);
4759 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8); 5410 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8);
4760 EEPROM(rt2x00dev, "Led Mode: 0x%04x\n", word); 5411 rt2x00_eeprom_dbg(rt2x00dev, "Led Mode: 0x%04x\n", word);
4761 } 5412 }
4762 5413
4763 /* 5414 /*
@@ -4821,9 +5472,9 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
4821 5472
4822static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) 5473static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4823{ 5474{
4824 u32 reg;
4825 u16 value; 5475 u16 value;
4826 u16 eeprom; 5476 u16 eeprom;
5477 u16 rf;
4827 5478
4828 /* 5479 /*
4829 * Read EEPROM word for configuration. 5480 * Read EEPROM word for configuration.
@@ -4835,41 +5486,14 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4835 * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field 5486 * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field
4836 * RT53xx: defined in "EEPROM_CHIP_ID" field 5487 * RT53xx: defined in "EEPROM_CHIP_ID" field
4837 */ 5488 */
4838 if (rt2x00_rt(rt2x00dev, RT3290)) 5489 if (rt2x00_rt(rt2x00dev, RT3290) ||
4839 rt2800_register_read(rt2x00dev, MAC_CSR0_3290, &reg); 5490 rt2x00_rt(rt2x00dev, RT5390) ||
4840 else 5491 rt2x00_rt(rt2x00dev, RT5392))
4841 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg); 5492 rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf);
4842
4843 if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT3290 ||
4844 rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390 ||
4845 rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5392)
4846 rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
4847 else 5493 else
4848 value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE); 5494 rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
4849 5495
4850 rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET), 5496 switch (rf) {
4851 value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
4852
4853 switch (rt2x00dev->chip.rt) {
4854 case RT2860:
4855 case RT2872:
4856 case RT2883:
4857 case RT3070:
4858 case RT3071:
4859 case RT3090:
4860 case RT3290:
4861 case RT3352:
4862 case RT3390:
4863 case RT3572:
4864 case RT5390:
4865 case RT5392:
4866 break;
4867 default:
4868 ERROR(rt2x00dev, "Invalid RT chipset 0x%04x detected.\n", rt2x00dev->chip.rt);
4869 return -ENODEV;
4870 }
4871
4872 switch (rt2x00dev->chip.rf) {
4873 case RF2820: 5497 case RF2820:
4874 case RF2850: 5498 case RF2850:
4875 case RF2720: 5499 case RF2720:
@@ -4887,13 +5511,16 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4887 case RF5372: 5511 case RF5372:
4888 case RF5390: 5512 case RF5390:
4889 case RF5392: 5513 case RF5392:
5514 case RF5592:
4890 break; 5515 break;
4891 default: 5516 default:
4892 ERROR(rt2x00dev, "Invalid RF chipset 0x%04x detected.\n", 5517 rt2x00_err(rt2x00dev, "Invalid RF chipset 0x%04x detected\n",
4893 rt2x00dev->chip.rf); 5518 rf);
4894 return -ENODEV; 5519 return -ENODEV;
4895 } 5520 }
4896 5521
5522 rt2x00_set_rf(rt2x00dev, rf);
5523
4897 /* 5524 /*
4898 * Identify default antenna configuration. 5525 * Identify default antenna configuration.
4899 */ 5526 */
@@ -5122,6 +5749,138 @@ static const struct rf_channel rf_vals_3x[] = {
5122 {173, 0x61, 0, 9}, 5749 {173, 0x61, 0, 9},
5123}; 5750};
5124 5751
5752static const struct rf_channel rf_vals_5592_xtal20[] = {
5753 /* Channel, N, K, mod, R */
5754 {1, 482, 4, 10, 3},
5755 {2, 483, 4, 10, 3},
5756 {3, 484, 4, 10, 3},
5757 {4, 485, 4, 10, 3},
5758 {5, 486, 4, 10, 3},
5759 {6, 487, 4, 10, 3},
5760 {7, 488, 4, 10, 3},
5761 {8, 489, 4, 10, 3},
5762 {9, 490, 4, 10, 3},
5763 {10, 491, 4, 10, 3},
5764 {11, 492, 4, 10, 3},
5765 {12, 493, 4, 10, 3},
5766 {13, 494, 4, 10, 3},
5767 {14, 496, 8, 10, 3},
5768 {36, 172, 8, 12, 1},
5769 {38, 173, 0, 12, 1},
5770 {40, 173, 4, 12, 1},
5771 {42, 173, 8, 12, 1},
5772 {44, 174, 0, 12, 1},
5773 {46, 174, 4, 12, 1},
5774 {48, 174, 8, 12, 1},
5775 {50, 175, 0, 12, 1},
5776 {52, 175, 4, 12, 1},
5777 {54, 175, 8, 12, 1},
5778 {56, 176, 0, 12, 1},
5779 {58, 176, 4, 12, 1},
5780 {60, 176, 8, 12, 1},
5781 {62, 177, 0, 12, 1},
5782 {64, 177, 4, 12, 1},
5783 {100, 183, 4, 12, 1},
5784 {102, 183, 8, 12, 1},
5785 {104, 184, 0, 12, 1},
5786 {106, 184, 4, 12, 1},
5787 {108, 184, 8, 12, 1},
5788 {110, 185, 0, 12, 1},
5789 {112, 185, 4, 12, 1},
5790 {114, 185, 8, 12, 1},
5791 {116, 186, 0, 12, 1},
5792 {118, 186, 4, 12, 1},
5793 {120, 186, 8, 12, 1},
5794 {122, 187, 0, 12, 1},
5795 {124, 187, 4, 12, 1},
5796 {126, 187, 8, 12, 1},
5797 {128, 188, 0, 12, 1},
5798 {130, 188, 4, 12, 1},
5799 {132, 188, 8, 12, 1},
5800 {134, 189, 0, 12, 1},
5801 {136, 189, 4, 12, 1},
5802 {138, 189, 8, 12, 1},
5803 {140, 190, 0, 12, 1},
5804 {149, 191, 6, 12, 1},
5805 {151, 191, 10, 12, 1},
5806 {153, 192, 2, 12, 1},
5807 {155, 192, 6, 12, 1},
5808 {157, 192, 10, 12, 1},
5809 {159, 193, 2, 12, 1},
5810 {161, 193, 6, 12, 1},
5811 {165, 194, 2, 12, 1},
5812 {184, 164, 0, 12, 1},
5813 {188, 164, 4, 12, 1},
5814 {192, 165, 8, 12, 1},
5815 {196, 166, 0, 12, 1},
5816};
5817
5818static const struct rf_channel rf_vals_5592_xtal40[] = {
5819 /* Channel, N, K, mod, R */
5820 {1, 241, 2, 10, 3},
5821 {2, 241, 7, 10, 3},
5822 {3, 242, 2, 10, 3},
5823 {4, 242, 7, 10, 3},
5824 {5, 243, 2, 10, 3},
5825 {6, 243, 7, 10, 3},
5826 {7, 244, 2, 10, 3},
5827 {8, 244, 7, 10, 3},
5828 {9, 245, 2, 10, 3},
5829 {10, 245, 7, 10, 3},
5830 {11, 246, 2, 10, 3},
5831 {12, 246, 7, 10, 3},
5832 {13, 247, 2, 10, 3},
5833 {14, 248, 4, 10, 3},
5834 {36, 86, 4, 12, 1},
5835 {38, 86, 6, 12, 1},
5836 {40, 86, 8, 12, 1},
5837 {42, 86, 10, 12, 1},
5838 {44, 87, 0, 12, 1},
5839 {46, 87, 2, 12, 1},
5840 {48, 87, 4, 12, 1},
5841 {50, 87, 6, 12, 1},
5842 {52, 87, 8, 12, 1},
5843 {54, 87, 10, 12, 1},
5844 {56, 88, 0, 12, 1},
5845 {58, 88, 2, 12, 1},
5846 {60, 88, 4, 12, 1},
5847 {62, 88, 6, 12, 1},
5848 {64, 88, 8, 12, 1},
5849 {100, 91, 8, 12, 1},
5850 {102, 91, 10, 12, 1},
5851 {104, 92, 0, 12, 1},
5852 {106, 92, 2, 12, 1},
5853 {108, 92, 4, 12, 1},
5854 {110, 92, 6, 12, 1},
5855 {112, 92, 8, 12, 1},
5856 {114, 92, 10, 12, 1},
5857 {116, 93, 0, 12, 1},
5858 {118, 93, 2, 12, 1},
5859 {120, 93, 4, 12, 1},
5860 {122, 93, 6, 12, 1},
5861 {124, 93, 8, 12, 1},
5862 {126, 93, 10, 12, 1},
5863 {128, 94, 0, 12, 1},
5864 {130, 94, 2, 12, 1},
5865 {132, 94, 4, 12, 1},
5866 {134, 94, 6, 12, 1},
5867 {136, 94, 8, 12, 1},
5868 {138, 94, 10, 12, 1},
5869 {140, 95, 0, 12, 1},
5870 {149, 95, 9, 12, 1},
5871 {151, 95, 11, 12, 1},
5872 {153, 96, 1, 12, 1},
5873 {155, 96, 3, 12, 1},
5874 {157, 96, 5, 12, 1},
5875 {159, 96, 7, 12, 1},
5876 {161, 96, 9, 12, 1},
5877 {165, 97, 1, 12, 1},
5878 {184, 82, 0, 12, 1},
5879 {188, 82, 4, 12, 1},
5880 {192, 82, 8, 12, 1},
5881 {196, 83, 0, 12, 1},
5882};
5883
5125static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 5884static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
5126{ 5885{
5127 struct hw_mode_spec *spec = &rt2x00dev->spec; 5886 struct hw_mode_spec *spec = &rt2x00dev->spec;
@@ -5130,6 +5889,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
5130 char *default_power2; 5889 char *default_power2;
5131 unsigned int i; 5890 unsigned int i;
5132 u16 eeprom; 5891 u16 eeprom;
5892 u32 reg;
5133 5893
5134 /* 5894 /*
5135 * Disable powersaving as default on PCI devices. 5895 * Disable powersaving as default on PCI devices.
@@ -5211,8 +5971,22 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
5211 spec->supported_bands |= SUPPORT_BAND_5GHZ; 5971 spec->supported_bands |= SUPPORT_BAND_5GHZ;
5212 spec->num_channels = ARRAY_SIZE(rf_vals_3x); 5972 spec->num_channels = ARRAY_SIZE(rf_vals_3x);
5213 spec->channels = rf_vals_3x; 5973 spec->channels = rf_vals_3x;
5974 } else if (rt2x00_rf(rt2x00dev, RF5592)) {
5975 spec->supported_bands |= SUPPORT_BAND_5GHZ;
5976
5977 rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX, &reg);
5978 if (rt2x00_get_field32(reg, MAC_DEBUG_INDEX_XTAL)) {
5979 spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal40);
5980 spec->channels = rf_vals_5592_xtal40;
5981 } else {
5982 spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal20);
5983 spec->channels = rf_vals_5592_xtal20;
5984 }
5214 } 5985 }
5215 5986
5987 if (WARN_ON_ONCE(!spec->channels))
5988 return -ENODEV;
5989
5216 /* 5990 /*
5217 * Initialize HT information. 5991 * Initialize HT information.
5218 */ 5992 */
@@ -5300,11 +6074,55 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
5300 return 0; 6074 return 0;
5301} 6075}
5302 6076
6077static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev)
6078{
6079 u32 reg;
6080 u32 rt;
6081 u32 rev;
6082
6083 if (rt2x00_rt(rt2x00dev, RT3290))
6084 rt2800_register_read(rt2x00dev, MAC_CSR0_3290, &reg);
6085 else
6086 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
6087
6088 rt = rt2x00_get_field32(reg, MAC_CSR0_CHIPSET);
6089 rev = rt2x00_get_field32(reg, MAC_CSR0_REVISION);
6090
6091 switch (rt) {
6092 case RT2860:
6093 case RT2872:
6094 case RT2883:
6095 case RT3070:
6096 case RT3071:
6097 case RT3090:
6098 case RT3290:
6099 case RT3352:
6100 case RT3390:
6101 case RT3572:
6102 case RT5390:
6103 case RT5392:
6104 case RT5592:
6105 break;
6106 default:
6107 rt2x00_err(rt2x00dev, "Invalid RT chipset 0x%04x, rev %04x detected\n",
6108 rt, rev);
6109 return -ENODEV;
6110 }
6111
6112 rt2x00_set_rt(rt2x00dev, rt, rev);
6113
6114 return 0;
6115}
6116
5303int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev) 6117int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev)
5304{ 6118{
5305 int retval; 6119 int retval;
5306 u32 reg; 6120 u32 reg;
5307 6121
6122 retval = rt2800_probe_rt(rt2x00dev);
6123 if (retval)
6124 return retval;
6125
5308 /* 6126 /*
5309 * Allocate eeprom data. 6127 * Allocate eeprom data.
5310 */ 6128 */
@@ -5546,7 +6364,8 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5546 case IEEE80211_AMPDU_TX_OPERATIONAL: 6364 case IEEE80211_AMPDU_TX_OPERATIONAL:
5547 break; 6365 break;
5548 default: 6366 default:
5549 WARNING((struct rt2x00_dev *)hw->priv, "Unknown AMPDU action\n"); 6367 rt2x00_warn((struct rt2x00_dev *)hw->priv,
6368 "Unknown AMPDU action\n");
5550 } 6369 }
5551 6370
5552 return ret; 6371 return ret;
@@ -5563,7 +6382,7 @@ int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
5563 if (idx != 0) 6382 if (idx != 0)
5564 return -ENOENT; 6383 return -ENOENT;
5565 6384
5566 survey->channel = conf->channel; 6385 survey->channel = conf->chandef.chan;
5567 6386
5568 rt2800_register_read(rt2x00dev, CH_IDLE_STA, &idle); 6387 rt2800_register_read(rt2x00dev, CH_IDLE_STA, &idle);
5569 rt2800_register_read(rt2x00dev, CH_BUSY_STA, &busy); 6388 rt2800_register_read(rt2x00dev, CH_BUSY_STA, &busy);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index ba5a05625aaa..6f4a861af336 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -72,7 +72,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
72 return; 72 return;
73 73
74 for (i = 0; i < 200; i++) { 74 for (i = 0; i < 200; i++) {
75 rt2x00pci_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg); 75 rt2x00mmio_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg);
76 76
77 if ((rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD0) == token) || 77 if ((rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD0) == token) ||
78 (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD1) == token) || 78 (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD1) == token) ||
@@ -84,10 +84,10 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
84 } 84 }
85 85
86 if (i == 200) 86 if (i == 200)
87 ERROR(rt2x00dev, "MCU request failed, no response from hardware\n"); 87 rt2x00_err(rt2x00dev, "MCU request failed, no response from hardware\n");
88 88
89 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); 89 rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
90 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 90 rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
91} 91}
92 92
93#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) 93#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
@@ -116,7 +116,7 @@ static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
116 struct rt2x00_dev *rt2x00dev = eeprom->data; 116 struct rt2x00_dev *rt2x00dev = eeprom->data;
117 u32 reg; 117 u32 reg;
118 118
119 rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, &reg); 119 rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR, &reg);
120 120
121 eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN); 121 eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN);
122 eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT); 122 eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT);
@@ -138,7 +138,7 @@ static void rt2800pci_eepromregister_write(struct eeprom_93cx6 *eeprom)
138 rt2x00_set_field32(&reg, E2PROM_CSR_CHIP_SELECT, 138 rt2x00_set_field32(&reg, E2PROM_CSR_CHIP_SELECT,
139 !!eeprom->reg_chip_select); 139 !!eeprom->reg_chip_select);
140 140
141 rt2x00pci_register_write(rt2x00dev, E2PROM_CSR, reg); 141 rt2x00mmio_register_write(rt2x00dev, E2PROM_CSR, reg);
142} 142}
143 143
144static int rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev) 144static int rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
@@ -146,7 +146,7 @@ static int rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
146 struct eeprom_93cx6 eeprom; 146 struct eeprom_93cx6 eeprom;
147 u32 reg; 147 u32 reg;
148 148
149 rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, &reg); 149 rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR, &reg);
150 150
151 eeprom.data = rt2x00dev; 151 eeprom.data = rt2x00dev;
152 eeprom.register_read = rt2800pci_eepromregister_read; 152 eeprom.register_read = rt2800pci_eepromregister_read;
@@ -210,20 +210,20 @@ static void rt2800pci_start_queue(struct data_queue *queue)
210 210
211 switch (queue->qid) { 211 switch (queue->qid) {
212 case QID_RX: 212 case QID_RX:
213 rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 213 rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
214 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1); 214 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
215 rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 215 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
216 break; 216 break;
217 case QID_BEACON: 217 case QID_BEACON:
218 rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 218 rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
219 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1); 219 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
220 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1); 220 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
221 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); 221 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
222 rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg); 222 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
223 223
224 rt2x00pci_register_read(rt2x00dev, INT_TIMER_EN, &reg); 224 rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
225 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1); 225 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
226 rt2x00pci_register_write(rt2x00dev, INT_TIMER_EN, reg); 226 rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
227 break; 227 break;
228 default: 228 default:
229 break; 229 break;
@@ -241,13 +241,13 @@ static void rt2800pci_kick_queue(struct data_queue *queue)
241 case QID_AC_BE: 241 case QID_AC_BE:
242 case QID_AC_BK: 242 case QID_AC_BK:
243 entry = rt2x00queue_get_entry(queue, Q_INDEX); 243 entry = rt2x00queue_get_entry(queue, Q_INDEX);
244 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX(queue->qid), 244 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
245 entry->entry_idx); 245 entry->entry_idx);
246 break; 246 break;
247 case QID_MGMT: 247 case QID_MGMT:
248 entry = rt2x00queue_get_entry(queue, Q_INDEX); 248 entry = rt2x00queue_get_entry(queue, Q_INDEX);
249 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX(5), 249 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
250 entry->entry_idx); 250 entry->entry_idx);
251 break; 251 break;
252 default: 252 default:
253 break; 253 break;
@@ -261,20 +261,20 @@ static void rt2800pci_stop_queue(struct data_queue *queue)
261 261
262 switch (queue->qid) { 262 switch (queue->qid) {
263 case QID_RX: 263 case QID_RX:
264 rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 264 rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
265 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0); 265 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
266 rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 266 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
267 break; 267 break;
268 case QID_BEACON: 268 case QID_BEACON:
269 rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 269 rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
270 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0); 270 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
271 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0); 271 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
272 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); 272 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
273 rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg); 273 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
274 274
275 rt2x00pci_register_read(rt2x00dev, INT_TIMER_EN, &reg); 275 rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
276 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0); 276 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
277 rt2x00pci_register_write(rt2x00dev, INT_TIMER_EN, reg); 277 rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
278 278
279 /* 279 /*
280 * Wait for current invocation to finish. The tasklet 280 * Wait for current invocation to finish. The tasklet
@@ -314,19 +314,19 @@ static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
314 */ 314 */
315 reg = 0; 315 reg = 0;
316 rt2x00_set_field32(&reg, PBF_SYS_CTRL_HOST_RAM_WRITE, 1); 316 rt2x00_set_field32(&reg, PBF_SYS_CTRL_HOST_RAM_WRITE, 1);
317 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, reg); 317 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, reg);
318 318
319 /* 319 /*
320 * Write firmware to device. 320 * Write firmware to device.
321 */ 321 */
322 rt2x00pci_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, 322 rt2x00mmio_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
323 data, len); 323 data, len);
324 324
325 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000); 325 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000);
326 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001); 326 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001);
327 327
328 rt2x00pci_register_write(rt2x00dev, H2M_BBP_AGENT, 0); 328 rt2x00mmio_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
329 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 329 rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
330 330
331 return 0; 331 return 0;
332} 332}
@@ -336,7 +336,7 @@ static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
336 */ 336 */
337static bool rt2800pci_get_entry_state(struct queue_entry *entry) 337static bool rt2800pci_get_entry_state(struct queue_entry *entry)
338{ 338{
339 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 339 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
340 u32 word; 340 u32 word;
341 341
342 if (entry->queue->qid == QID_RX) { 342 if (entry->queue->qid == QID_RX) {
@@ -352,7 +352,7 @@ static bool rt2800pci_get_entry_state(struct queue_entry *entry)
352 352
353static void rt2800pci_clear_entry(struct queue_entry *entry) 353static void rt2800pci_clear_entry(struct queue_entry *entry)
354{ 354{
355 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 355 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
356 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 356 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
357 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 357 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
358 u32 word; 358 u32 word;
@@ -370,8 +370,8 @@ static void rt2800pci_clear_entry(struct queue_entry *entry)
370 * Set RX IDX in register to inform hardware that we have 370 * Set RX IDX in register to inform hardware that we have
371 * handled this entry and it is available for reuse again. 371 * handled this entry and it is available for reuse again.
372 */ 372 */
373 rt2x00pci_register_write(rt2x00dev, RX_CRX_IDX, 373 rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
374 entry->entry_idx); 374 entry->entry_idx);
375 } else { 375 } else {
376 rt2x00_desc_read(entry_priv->desc, 1, &word); 376 rt2x00_desc_read(entry_priv->desc, 1, &word);
377 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1); 377 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
@@ -381,60 +381,65 @@ static void rt2800pci_clear_entry(struct queue_entry *entry)
381 381
382static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev) 382static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
383{ 383{
384 struct queue_entry_priv_pci *entry_priv; 384 struct queue_entry_priv_mmio *entry_priv;
385 385
386 /* 386 /*
387 * Initialize registers. 387 * Initialize registers.
388 */ 388 */
389 entry_priv = rt2x00dev->tx[0].entries[0].priv_data; 389 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
390 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR0, entry_priv->desc_dma); 390 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
391 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT0, 391 entry_priv->desc_dma);
392 rt2x00dev->tx[0].limit); 392 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
393 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX0, 0); 393 rt2x00dev->tx[0].limit);
394 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX0, 0); 394 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
395 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
395 396
396 entry_priv = rt2x00dev->tx[1].entries[0].priv_data; 397 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
397 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR1, entry_priv->desc_dma); 398 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
398 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT1, 399 entry_priv->desc_dma);
399 rt2x00dev->tx[1].limit); 400 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
400 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX1, 0); 401 rt2x00dev->tx[1].limit);
401 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX1, 0); 402 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
403 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
402 404
403 entry_priv = rt2x00dev->tx[2].entries[0].priv_data; 405 entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
404 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR2, entry_priv->desc_dma); 406 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
405 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT2, 407 entry_priv->desc_dma);
406 rt2x00dev->tx[2].limit); 408 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
407 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX2, 0); 409 rt2x00dev->tx[2].limit);
408 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX2, 0); 410 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
411 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
409 412
410 entry_priv = rt2x00dev->tx[3].entries[0].priv_data; 413 entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
411 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR3, entry_priv->desc_dma); 414 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
412 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT3, 415 entry_priv->desc_dma);
413 rt2x00dev->tx[3].limit); 416 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
414 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX3, 0); 417 rt2x00dev->tx[3].limit);
415 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX3, 0); 418 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
416 419 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
417 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR4, 0); 420
418 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT4, 0); 421 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
419 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX4, 0); 422 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
420 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX4, 0); 423 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
421 424 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
422 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR5, 0); 425
423 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT5, 0); 426 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
424 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX5, 0); 427 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
425 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX5, 0); 428 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
429 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
426 430
427 entry_priv = rt2x00dev->rx->entries[0].priv_data; 431 entry_priv = rt2x00dev->rx->entries[0].priv_data;
428 rt2x00pci_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma); 432 rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
429 rt2x00pci_register_write(rt2x00dev, RX_MAX_CNT, 433 entry_priv->desc_dma);
430 rt2x00dev->rx[0].limit); 434 rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
431 rt2x00pci_register_write(rt2x00dev, RX_CRX_IDX, 435 rt2x00dev->rx[0].limit);
432 rt2x00dev->rx[0].limit - 1); 436 rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
433 rt2x00pci_register_write(rt2x00dev, RX_DRX_IDX, 0); 437 rt2x00dev->rx[0].limit - 1);
438 rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
434 439
435 rt2800_disable_wpdma(rt2x00dev); 440 rt2800_disable_wpdma(rt2x00dev);
436 441
437 rt2x00pci_register_write(rt2x00dev, DELAY_INT_CFG, 0); 442 rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
438 443
439 return 0; 444 return 0;
440} 445}
@@ -453,8 +458,8 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
453 * should clear the register to assure a clean state. 458 * should clear the register to assure a clean state.
454 */ 459 */
455 if (state == STATE_RADIO_IRQ_ON) { 460 if (state == STATE_RADIO_IRQ_ON) {
456 rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, &reg); 461 rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
457 rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg); 462 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
458 } 463 }
459 464
460 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 465 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
@@ -466,7 +471,7 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
466 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1); 471 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
467 rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1); 472 rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
468 } 473 }
469 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); 474 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
470 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 475 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
471 476
472 if (state == STATE_RADIO_IRQ_OFF) { 477 if (state == STATE_RADIO_IRQ_OFF) {
@@ -488,7 +493,7 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
488 /* 493 /*
489 * Reset DMA indexes 494 * Reset DMA indexes
490 */ 495 */
491 rt2x00pci_register_read(rt2x00dev, WPDMA_RST_IDX, &reg); 496 rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
492 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1); 497 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
493 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1); 498 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
494 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1); 499 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
@@ -496,29 +501,29 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
496 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1); 501 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
497 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1); 502 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
498 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1); 503 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
499 rt2x00pci_register_write(rt2x00dev, WPDMA_RST_IDX, reg); 504 rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
500 505
501 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); 506 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
502 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); 507 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
503 508
504 if (rt2x00_is_pcie(rt2x00dev) && 509 if (rt2x00_is_pcie(rt2x00dev) &&
505 (rt2x00_rt(rt2x00dev, RT3572) || 510 (rt2x00_rt(rt2x00dev, RT3572) ||
506 rt2x00_rt(rt2x00dev, RT5390) || 511 rt2x00_rt(rt2x00dev, RT5390) ||
507 rt2x00_rt(rt2x00dev, RT5392))) { 512 rt2x00_rt(rt2x00dev, RT5392))) {
508 rt2x00pci_register_read(rt2x00dev, AUX_CTRL, &reg); 513 rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
509 rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1); 514 rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
510 rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1); 515 rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
511 rt2x00pci_register_write(rt2x00dev, AUX_CTRL, reg); 516 rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
512 } 517 }
513 518
514 rt2x00pci_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); 519 rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
515 520
516 reg = 0; 521 reg = 0;
517 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1); 522 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
518 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1); 523 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
519 rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 524 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
520 525
521 rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); 526 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
522 527
523 return 0; 528 return 0;
524} 529}
@@ -538,8 +543,8 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
538 return retval; 543 return retval;
539 544
540 /* After resume MCU_BOOT_SIGNAL will trash these. */ 545 /* After resume MCU_BOOT_SIGNAL will trash these. */
541 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); 546 rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
542 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 547 rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
543 548
544 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, TOKEN_RADIO_OFF, 0xff, 0x02); 549 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, TOKEN_RADIO_OFF, 0xff, 0x02);
545 rt2800pci_mcu_status(rt2x00dev, TOKEN_RADIO_OFF); 550 rt2800pci_mcu_status(rt2x00dev, TOKEN_RADIO_OFF);
@@ -554,8 +559,8 @@ static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
554{ 559{
555 if (rt2x00_is_soc(rt2x00dev)) { 560 if (rt2x00_is_soc(rt2x00dev)) {
556 rt2800_disable_radio(rt2x00dev); 561 rt2800_disable_radio(rt2x00dev);
557 rt2x00pci_register_write(rt2x00dev, PWR_PIN_CFG, 0); 562 rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0);
558 rt2x00pci_register_write(rt2x00dev, TX_PIN_CFG, 0); 563 rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, 0);
559 } 564 }
560} 565}
561 566
@@ -567,10 +572,10 @@ static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
567 0, 0x02); 572 0, 0x02);
568 rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKEUP); 573 rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKEUP);
569 } else if (state == STATE_SLEEP) { 574 } else if (state == STATE_SLEEP) {
570 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 575 rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_STATUS,
571 0xffffffff); 576 0xffffffff);
572 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, 577 rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CID,
573 0xffffffff); 578 0xffffffff);
574 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, TOKEN_SLEEP, 579 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, TOKEN_SLEEP,
575 0xff, 0x01); 580 0xff, 0x01);
576 } 581 }
@@ -611,8 +616,8 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
611 } 616 }
612 617
613 if (unlikely(retval)) 618 if (unlikely(retval))
614 ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", 619 rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n",
615 state, retval); 620 state, retval);
616 621
617 return retval; 622 return retval;
618} 623}
@@ -629,7 +634,7 @@ static void rt2800pci_write_tx_desc(struct queue_entry *entry,
629 struct txentry_desc *txdesc) 634 struct txentry_desc *txdesc)
630{ 635{
631 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 636 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
632 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 637 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
633 __le32 *txd = entry_priv->desc; 638 __le32 *txd = entry_priv->desc;
634 u32 word; 639 u32 word;
635 640
@@ -683,7 +688,7 @@ static void rt2800pci_write_tx_desc(struct queue_entry *entry,
683static void rt2800pci_fill_rxdone(struct queue_entry *entry, 688static void rt2800pci_fill_rxdone(struct queue_entry *entry,
684 struct rxdone_entry_desc *rxdesc) 689 struct rxdone_entry_desc *rxdesc)
685{ 690{
686 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 691 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
687 __le32 *rxd = entry_priv->desc; 692 __le32 *rxd = entry_priv->desc;
688 u32 word; 693 u32 word;
689 694
@@ -743,10 +748,90 @@ static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
743 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); 748 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
744} 749}
745 750
751static bool rt2800pci_txdone_entry_check(struct queue_entry *entry, u32 status)
752{
753 __le32 *txwi;
754 u32 word;
755 int wcid, tx_wcid;
756
757 wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
758
759 txwi = rt2800_drv_get_txwi(entry);
760 rt2x00_desc_read(txwi, 1, &word);
761 tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
762
763 return (tx_wcid == wcid);
764}
765
766static bool rt2800pci_txdone_find_entry(struct queue_entry *entry, void *data)
767{
768 u32 status = *(u32 *)data;
769
770 /*
771 * rt2800pci hardware might reorder frames when exchanging traffic
772 * with multiple BA enabled STAs.
773 *
774 * For example, a tx queue
775 * [ STA1 | STA2 | STA1 | STA2 ]
776 * can result in tx status reports
777 * [ STA1 | STA1 | STA2 | STA2 ]
778 * when the hw decides to aggregate the frames for STA1 into one AMPDU.
779 *
780 * To mitigate this effect, associate the tx status to the first frame
781 * in the tx queue with a matching wcid.
782 */
783 if (rt2800pci_txdone_entry_check(entry, status) &&
784 !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
785 /*
786 * Got a matching frame, associate the tx status with
787 * the frame
788 */
789 entry->status = status;
790 set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
791 return true;
792 }
793
794 /* Check the next frame */
795 return false;
796}
797
798static bool rt2800pci_txdone_match_first(struct queue_entry *entry, void *data)
799{
800 u32 status = *(u32 *)data;
801
802 /*
803 * Find the first frame without tx status and assign this status to it
804 * regardless if it matches or not.
805 */
806 if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
807 /*
808 * Got a matching frame, associate the tx status with
809 * the frame
810 */
811 entry->status = status;
812 set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
813 return true;
814 }
815
816 /* Check the next frame */
817 return false;
818}
819static bool rt2800pci_txdone_release_entries(struct queue_entry *entry,
820 void *data)
821{
822 if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
823 rt2800_txdone_entry(entry, entry->status,
824 rt2800pci_get_txwi(entry));
825 return false;
826 }
827
828 /* No more frames to release */
829 return true;
830}
831
746static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev) 832static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
747{ 833{
748 struct data_queue *queue; 834 struct data_queue *queue;
749 struct queue_entry *entry;
750 u32 status; 835 u32 status;
751 u8 qid; 836 u8 qid;
752 int max_tx_done = 16; 837 int max_tx_done = 16;
@@ -758,8 +843,8 @@ static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
758 * Unknown queue, this shouldn't happen. Just drop 843 * Unknown queue, this shouldn't happen. Just drop
759 * this tx status. 844 * this tx status.
760 */ 845 */
761 WARNING(rt2x00dev, "Got TX status report with " 846 rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
762 "unexpected pid %u, dropping\n", qid); 847 qid);
763 break; 848 break;
764 } 849 }
765 850
@@ -769,8 +854,8 @@ static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
769 * The queue is NULL, this shouldn't happen. Stop 854 * The queue is NULL, this shouldn't happen. Stop
770 * processing here and drop the tx status 855 * processing here and drop the tx status
771 */ 856 */
772 WARNING(rt2x00dev, "Got TX status for an unavailable " 857 rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
773 "queue %u, dropping\n", qid); 858 qid);
774 break; 859 break;
775 } 860 }
776 861
@@ -779,13 +864,37 @@ static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
779 * The queue is empty. Stop processing here 864 * The queue is empty. Stop processing here
780 * and drop the tx status. 865 * and drop the tx status.
781 */ 866 */
782 WARNING(rt2x00dev, "Got TX status for an empty " 867 rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
783 "queue %u, dropping\n", qid); 868 qid);
784 break; 869 break;
785 } 870 }
786 871
787 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 872 /*
788 rt2800_txdone_entry(entry, status, rt2800pci_get_txwi(entry)); 873 * Let's associate this tx status with the first
874 * matching frame.
875 */
876 if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
877 Q_INDEX, &status,
878 rt2800pci_txdone_find_entry)) {
879 /*
880 * We cannot match the tx status to any frame, so just
881 * use the first one.
882 */
883 if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
884 Q_INDEX, &status,
885 rt2800pci_txdone_match_first)) {
886 rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
887 qid);
888 break;
889 }
890 }
891
892 /*
893 * Release all frames with a valid tx status.
894 */
895 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
896 Q_INDEX, NULL,
897 rt2800pci_txdone_release_entries);
789 898
790 if (--max_tx_done == 0) 899 if (--max_tx_done == 0)
791 break; 900 break;
@@ -804,9 +913,9 @@ static inline void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
804 * access needs locking. 913 * access needs locking.
805 */ 914 */
806 spin_lock_irq(&rt2x00dev->irqmask_lock); 915 spin_lock_irq(&rt2x00dev->irqmask_lock);
807 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg); 916 rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
808 rt2x00_set_field32(&reg, irq_field, 1); 917 rt2x00_set_field32(&reg, irq_field, 1);
809 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); 918 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
810 spin_unlock_irq(&rt2x00dev->irqmask_lock); 919 spin_unlock_irq(&rt2x00dev->irqmask_lock);
811} 920}
812 921
@@ -847,15 +956,15 @@ static void rt2800pci_tbtt_tasklet(unsigned long data)
847 * interval every 64 beacons by 64us to mitigate this effect. 956 * interval every 64 beacons by 64us to mitigate this effect.
848 */ 957 */
849 if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) { 958 if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
850 rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 959 rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
851 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 960 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
852 (rt2x00dev->beacon_int * 16) - 1); 961 (rt2x00dev->beacon_int * 16) - 1);
853 rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg); 962 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
854 } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) { 963 } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
855 rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 964 rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
856 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 965 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
857 (rt2x00dev->beacon_int * 16)); 966 (rt2x00dev->beacon_int * 16));
858 rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg); 967 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
859 } 968 }
860 drv_data->tbtt_tick++; 969 drv_data->tbtt_tick++;
861 drv_data->tbtt_tick %= BCN_TBTT_OFFSET; 970 drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
@@ -868,7 +977,7 @@ static void rt2800pci_tbtt_tasklet(unsigned long data)
868static void rt2800pci_rxdone_tasklet(unsigned long data) 977static void rt2800pci_rxdone_tasklet(unsigned long data)
869{ 978{
870 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 979 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
871 if (rt2x00pci_rxdone(rt2x00dev)) 980 if (rt2x00mmio_rxdone(rt2x00dev))
872 tasklet_schedule(&rt2x00dev->rxdone_tasklet); 981 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
873 else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 982 else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
874 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE); 983 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
@@ -906,14 +1015,13 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
906 * need to lock the kfifo. 1015 * need to lock the kfifo.
907 */ 1016 */
908 for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) { 1017 for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
909 rt2x00pci_register_read(rt2x00dev, TX_STA_FIFO, &status); 1018 rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
910 1019
911 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID)) 1020 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
912 break; 1021 break;
913 1022
914 if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) { 1023 if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) {
915 WARNING(rt2x00dev, "TX status FIFO overrun," 1024 rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
916 "drop tx status report.\n");
917 break; 1025 break;
918 } 1026 }
919 } 1027 }
@@ -928,8 +1036,8 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
928 u32 reg, mask; 1036 u32 reg, mask;
929 1037
930 /* Read status and ACK all interrupts */ 1038 /* Read status and ACK all interrupts */
931 rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, &reg); 1039 rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
932 rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg); 1040 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
933 1041
934 if (!reg) 1042 if (!reg)
935 return IRQ_NONE; 1043 return IRQ_NONE;
@@ -969,9 +1077,9 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
969 * the tasklet will reenable the appropriate interrupts. 1077 * the tasklet will reenable the appropriate interrupts.
970 */ 1078 */
971 spin_lock(&rt2x00dev->irqmask_lock); 1079 spin_lock(&rt2x00dev->irqmask_lock);
972 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg); 1080 rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
973 reg &= mask; 1081 reg &= mask;
974 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); 1082 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
975 spin_unlock(&rt2x00dev->irqmask_lock); 1083 spin_unlock(&rt2x00dev->irqmask_lock);
976 1084
977 return IRQ_HANDLED; 1085 return IRQ_HANDLED;
@@ -1022,13 +1130,13 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = {
1022}; 1130};
1023 1131
1024static const struct rt2800_ops rt2800pci_rt2800_ops = { 1132static const struct rt2800_ops rt2800pci_rt2800_ops = {
1025 .register_read = rt2x00pci_register_read, 1133 .register_read = rt2x00mmio_register_read,
1026 .register_read_lock = rt2x00pci_register_read, /* same for PCI */ 1134 .register_read_lock = rt2x00mmio_register_read, /* same for PCI */
1027 .register_write = rt2x00pci_register_write, 1135 .register_write = rt2x00mmio_register_write,
1028 .register_write_lock = rt2x00pci_register_write, /* same for PCI */ 1136 .register_write_lock = rt2x00mmio_register_write, /* same for PCI */
1029 .register_multiread = rt2x00pci_register_multiread, 1137 .register_multiread = rt2x00mmio_register_multiread,
1030 .register_multiwrite = rt2x00pci_register_multiwrite, 1138 .register_multiwrite = rt2x00mmio_register_multiwrite,
1031 .regbusy_read = rt2x00pci_regbusy_read, 1139 .regbusy_read = rt2x00mmio_regbusy_read,
1032 .read_eeprom = rt2800pci_read_eeprom, 1140 .read_eeprom = rt2800pci_read_eeprom,
1033 .hwcrypt_disabled = rt2800pci_hwcrypt_disabled, 1141 .hwcrypt_disabled = rt2800pci_hwcrypt_disabled,
1034 .drv_write_firmware = rt2800pci_write_firmware, 1142 .drv_write_firmware = rt2800pci_write_firmware,
@@ -1047,8 +1155,8 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1047 .get_firmware_name = rt2800pci_get_firmware_name, 1155 .get_firmware_name = rt2800pci_get_firmware_name,
1048 .check_firmware = rt2800_check_firmware, 1156 .check_firmware = rt2800_check_firmware,
1049 .load_firmware = rt2800_load_firmware, 1157 .load_firmware = rt2800_load_firmware,
1050 .initialize = rt2x00pci_initialize, 1158 .initialize = rt2x00mmio_initialize,
1051 .uninitialize = rt2x00pci_uninitialize, 1159 .uninitialize = rt2x00mmio_uninitialize,
1052 .get_entry_state = rt2800pci_get_entry_state, 1160 .get_entry_state = rt2800pci_get_entry_state,
1053 .clear_entry = rt2800pci_clear_entry, 1161 .clear_entry = rt2800pci_clear_entry,
1054 .set_device_state = rt2800pci_set_device_state, 1162 .set_device_state = rt2800pci_set_device_state,
@@ -1061,7 +1169,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1061 .start_queue = rt2800pci_start_queue, 1169 .start_queue = rt2800pci_start_queue,
1062 .kick_queue = rt2800pci_kick_queue, 1170 .kick_queue = rt2800pci_kick_queue,
1063 .stop_queue = rt2800pci_stop_queue, 1171 .stop_queue = rt2800pci_stop_queue,
1064 .flush_queue = rt2x00pci_flush_queue, 1172 .flush_queue = rt2x00mmio_flush_queue,
1065 .write_tx_desc = rt2800pci_write_tx_desc, 1173 .write_tx_desc = rt2800pci_write_tx_desc,
1066 .write_tx_data = rt2800_write_tx_data, 1174 .write_tx_data = rt2800_write_tx_data,
1067 .write_beacon = rt2800_write_beacon, 1175 .write_beacon = rt2800_write_beacon,
@@ -1082,21 +1190,24 @@ static const struct data_queue_desc rt2800pci_queue_rx = {
1082 .entry_num = 128, 1190 .entry_num = 128,
1083 .data_size = AGGREGATION_SIZE, 1191 .data_size = AGGREGATION_SIZE,
1084 .desc_size = RXD_DESC_SIZE, 1192 .desc_size = RXD_DESC_SIZE,
1085 .priv_size = sizeof(struct queue_entry_priv_pci), 1193 .winfo_size = RXWI_DESC_SIZE,
1194 .priv_size = sizeof(struct queue_entry_priv_mmio),
1086}; 1195};
1087 1196
1088static const struct data_queue_desc rt2800pci_queue_tx = { 1197static const struct data_queue_desc rt2800pci_queue_tx = {
1089 .entry_num = 64, 1198 .entry_num = 64,
1090 .data_size = AGGREGATION_SIZE, 1199 .data_size = AGGREGATION_SIZE,
1091 .desc_size = TXD_DESC_SIZE, 1200 .desc_size = TXD_DESC_SIZE,
1092 .priv_size = sizeof(struct queue_entry_priv_pci), 1201 .winfo_size = TXWI_DESC_SIZE,
1202 .priv_size = sizeof(struct queue_entry_priv_mmio),
1093}; 1203};
1094 1204
1095static const struct data_queue_desc rt2800pci_queue_bcn = { 1205static const struct data_queue_desc rt2800pci_queue_bcn = {
1096 .entry_num = 8, 1206 .entry_num = 8,
1097 .data_size = 0, /* No DMA required for beacons */ 1207 .data_size = 0, /* No DMA required for beacons */
1098 .desc_size = TXWI_DESC_SIZE, 1208 .desc_size = TXD_DESC_SIZE,
1099 .priv_size = sizeof(struct queue_entry_priv_pci), 1209 .winfo_size = TXWI_DESC_SIZE,
1210 .priv_size = sizeof(struct queue_entry_priv_mmio),
1100}; 1211};
1101 1212
1102static const struct rt2x00_ops rt2800pci_ops = { 1213static const struct rt2x00_ops rt2800pci_ops = {
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 098613ed93fb..ac854d75bd6c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -128,9 +128,9 @@ static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry)
128 128
129 tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(100)); 129 tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
130 if (unlikely(tout)) 130 if (unlikely(tout))
131 WARNING(entry->queue->rt2x00dev, 131 rt2x00_warn(entry->queue->rt2x00dev,
132 "TX status timeout for entry %d in queue %d\n", 132 "TX status timeout for entry %d in queue %d\n",
133 entry->entry_idx, entry->queue->qid); 133 entry->entry_idx, entry->queue->qid);
134 return tout; 134 return tout;
135 135
136} 136}
@@ -154,7 +154,8 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
154 bool valid; 154 bool valid;
155 155
156 if (urb_status) { 156 if (urb_status) {
157 WARNING(rt2x00dev, "TX status read failed %d\n", urb_status); 157 rt2x00_warn(rt2x00dev, "TX status read failed %d\n",
158 urb_status);
158 159
159 goto stop_reading; 160 goto stop_reading;
160 } 161 }
@@ -162,7 +163,7 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
162 valid = rt2x00_get_field32(tx_status, TX_STA_FIFO_VALID); 163 valid = rt2x00_get_field32(tx_status, TX_STA_FIFO_VALID);
163 if (valid) { 164 if (valid) {
164 if (!kfifo_put(&rt2x00dev->txstatus_fifo, &tx_status)) 165 if (!kfifo_put(&rt2x00dev->txstatus_fifo, &tx_status))
165 WARNING(rt2x00dev, "TX status FIFO overrun\n"); 166 rt2x00_warn(rt2x00dev, "TX status FIFO overrun\n");
166 167
167 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); 168 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
168 169
@@ -269,7 +270,7 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
269 0, USB_MODE_FIRMWARE, 270 0, USB_MODE_FIRMWARE,
270 REGISTER_TIMEOUT_FIRMWARE); 271 REGISTER_TIMEOUT_FIRMWARE);
271 if (status < 0) { 272 if (status < 0) {
272 ERROR(rt2x00dev, "Failed to write Firmware to device.\n"); 273 rt2x00_err(rt2x00dev, "Failed to write Firmware to device\n");
273 return status; 274 return status;
274 } 275 }
275 276
@@ -392,8 +393,8 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
392 } 393 }
393 394
394 if (unlikely(retval)) 395 if (unlikely(retval))
395 ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", 396 rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n",
396 state, retval); 397 state, retval);
397 398
398 return retval; 399 return retval;
399} 400}
@@ -408,8 +409,7 @@ static void rt2800usb_watchdog(struct rt2x00_dev *rt2x00dev)
408 409
409 rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, &reg); 410 rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
410 if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q)) { 411 if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q)) {
411 WARNING(rt2x00dev, "TX HW queue 0 timed out," 412 rt2x00_warn(rt2x00dev, "TX HW queue 0 timed out, invoke forced kick\n");
412 " invoke forced kick\n");
413 413
414 rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40012); 414 rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40012);
415 415
@@ -424,8 +424,7 @@ static void rt2800usb_watchdog(struct rt2x00_dev *rt2x00dev)
424 424
425 rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, &reg); 425 rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
426 if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q)) { 426 if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q)) {
427 WARNING(rt2x00dev, "TX HW queue 1 timed out," 427 rt2x00_warn(rt2x00dev, "TX HW queue 1 timed out, invoke forced kick\n");
428 " invoke forced kick\n");
429 428
430 rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf4000a); 429 rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf4000a);
431 430
@@ -485,7 +484,7 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry,
485 */ 484 */
486 skbdesc->flags |= SKBDESC_DESC_IN_SKB; 485 skbdesc->flags |= SKBDESC_DESC_IN_SKB;
487 skbdesc->desc = txi; 486 skbdesc->desc = txi;
488 skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE; 487 skbdesc->desc_len = TXINFO_DESC_SIZE + entry->queue->winfo_size;
489} 488}
490 489
491/* 490/*
@@ -540,9 +539,9 @@ rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
540 tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID); 539 tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
541 540
542 if (wcid != tx_wcid || ack != tx_ack || (!is_agg && pid != tx_pid)) { 541 if (wcid != tx_wcid || ack != tx_ack || (!is_agg && pid != tx_pid)) {
543 DEBUG(entry->queue->rt2x00dev, 542 rt2x00_dbg(entry->queue->rt2x00dev,
544 "TX status report missed for queue %d entry %d\n", 543 "TX status report missed for queue %d entry %d\n",
545 entry->queue->qid, entry->entry_idx); 544 entry->queue->qid, entry->entry_idx);
546 return TXDONE_UNKNOWN; 545 return TXDONE_UNKNOWN;
547 } 546 }
548 547
@@ -566,8 +565,8 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
566 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); 565 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
567 566
568 if (unlikely(rt2x00queue_empty(queue))) { 567 if (unlikely(rt2x00queue_empty(queue))) {
569 WARNING(rt2x00dev, "Got TX status for an empty " 568 rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
570 "queue %u, dropping\n", qid); 569 qid);
571 break; 570 break;
572 } 571 }
573 572
@@ -575,8 +574,8 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
575 574
576 if (unlikely(test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || 575 if (unlikely(test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
577 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))) { 576 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))) {
578 WARNING(rt2x00dev, "Data pending for entry %u " 577 rt2x00_warn(rt2x00dev, "Data pending for entry %u in queue %u\n",
579 "in queue %u\n", entry->entry_idx, qid); 578 entry->entry_idx, qid);
580 break; 579 break;
581 } 580 }
582 581
@@ -677,8 +676,8 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
677 */ 676 */
678 if (unlikely(rx_pkt_len == 0 || 677 if (unlikely(rx_pkt_len == 0 ||
679 rx_pkt_len > entry->queue->data_size)) { 678 rx_pkt_len > entry->queue->data_size)) {
680 ERROR(entry->queue->rt2x00dev, 679 rt2x00_err(entry->queue->rt2x00dev,
681 "Bad frame size %d, forcing to 0\n", rx_pkt_len); 680 "Bad frame size %d, forcing to 0\n", rx_pkt_len);
682 return; 681 return;
683 } 682 }
684 683
@@ -853,21 +852,24 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
853static const struct data_queue_desc rt2800usb_queue_rx = { 852static const struct data_queue_desc rt2800usb_queue_rx = {
854 .entry_num = 128, 853 .entry_num = 128,
855 .data_size = AGGREGATION_SIZE, 854 .data_size = AGGREGATION_SIZE,
856 .desc_size = RXINFO_DESC_SIZE + RXWI_DESC_SIZE, 855 .desc_size = RXINFO_DESC_SIZE,
856 .winfo_size = RXWI_DESC_SIZE,
857 .priv_size = sizeof(struct queue_entry_priv_usb), 857 .priv_size = sizeof(struct queue_entry_priv_usb),
858}; 858};
859 859
860static const struct data_queue_desc rt2800usb_queue_tx = { 860static const struct data_queue_desc rt2800usb_queue_tx = {
861 .entry_num = 16, 861 .entry_num = 16,
862 .data_size = AGGREGATION_SIZE, 862 .data_size = AGGREGATION_SIZE,
863 .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE, 863 .desc_size = TXINFO_DESC_SIZE,
864 .winfo_size = TXWI_DESC_SIZE,
864 .priv_size = sizeof(struct queue_entry_priv_usb), 865 .priv_size = sizeof(struct queue_entry_priv_usb),
865}; 866};
866 867
867static const struct data_queue_desc rt2800usb_queue_bcn = { 868static const struct data_queue_desc rt2800usb_queue_bcn = {
868 .entry_num = 8, 869 .entry_num = 8,
869 .data_size = MGMT_FRAME_SIZE, 870 .data_size = MGMT_FRAME_SIZE,
870 .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE, 871 .desc_size = TXINFO_DESC_SIZE,
872 .winfo_size = TXWI_DESC_SIZE,
871 .priv_size = sizeof(struct queue_entry_priv_usb), 873 .priv_size = sizeof(struct queue_entry_priv_usb),
872}; 874};
873 875
@@ -890,6 +892,50 @@ static const struct rt2x00_ops rt2800usb_ops = {
890#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 892#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
891}; 893};
892 894
895static const struct data_queue_desc rt2800usb_queue_rx_5592 = {
896 .entry_num = 128,
897 .data_size = AGGREGATION_SIZE,
898 .desc_size = RXINFO_DESC_SIZE,
899 .winfo_size = RXWI_DESC_SIZE_5592,
900 .priv_size = sizeof(struct queue_entry_priv_usb),
901};
902
903static const struct data_queue_desc rt2800usb_queue_tx_5592 = {
904 .entry_num = 16,
905 .data_size = AGGREGATION_SIZE,
906 .desc_size = TXINFO_DESC_SIZE,
907 .winfo_size = TXWI_DESC_SIZE_5592,
908 .priv_size = sizeof(struct queue_entry_priv_usb),
909};
910
911static const struct data_queue_desc rt2800usb_queue_bcn_5592 = {
912 .entry_num = 8,
913 .data_size = MGMT_FRAME_SIZE,
914 .desc_size = TXINFO_DESC_SIZE,
915 .winfo_size = TXWI_DESC_SIZE_5592,
916 .priv_size = sizeof(struct queue_entry_priv_usb),
917};
918
919
920static const struct rt2x00_ops rt2800usb_ops_5592 = {
921 .name = KBUILD_MODNAME,
922 .drv_data_size = sizeof(struct rt2800_drv_data),
923 .max_ap_intf = 8,
924 .eeprom_size = EEPROM_SIZE,
925 .rf_size = RF_SIZE,
926 .tx_queues = NUM_TX_QUEUES,
927 .extra_tx_headroom = TXINFO_DESC_SIZE + TXWI_DESC_SIZE_5592,
928 .rx = &rt2800usb_queue_rx_5592,
929 .tx = &rt2800usb_queue_tx_5592,
930 .bcn = &rt2800usb_queue_bcn_5592,
931 .lib = &rt2800usb_rt2x00_ops,
932 .drv = &rt2800usb_rt2800_ops,
933 .hw = &rt2800usb_mac80211_ops,
934#ifdef CONFIG_RT2X00_LIB_DEBUGFS
935 .debugfs = &rt2800_rt2x00debug,
936#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
937};
938
893/* 939/*
894 * rt2800usb module information. 940 * rt2800usb module information.
895 */ 941 */
@@ -1200,6 +1246,18 @@ static struct usb_device_id rt2800usb_device_table[] = {
1200 { USB_DEVICE(0x148f, 0x5370) }, 1246 { USB_DEVICE(0x148f, 0x5370) },
1201 { USB_DEVICE(0x148f, 0x5372) }, 1247 { USB_DEVICE(0x148f, 0x5372) },
1202#endif 1248#endif
1249#ifdef CONFIG_RT2800USB_RT55XX
1250 /* Arcadyan */
1251 { USB_DEVICE(0x043e, 0x7a32), .driver_info = 5592 },
1252 /* AVM GmbH */
1253 { USB_DEVICE(0x057c, 0x8501), .driver_info = 5592 },
1254 /* D-Link DWA-160-B2 */
1255 { USB_DEVICE(0x2001, 0x3c1a), .driver_info = 5592 },
1256 /* Proware */
1257 { USB_DEVICE(0x043e, 0x7a13), .driver_info = 5592 },
1258 /* Ralink */
1259 { USB_DEVICE(0x148f, 0x5572), .driver_info = 5592 },
1260#endif
1203#ifdef CONFIG_RT2800USB_UNKNOWN 1261#ifdef CONFIG_RT2800USB_UNKNOWN
1204 /* 1262 /*
1205 * Unclear what kind of devices these are (they aren't supported by the 1263 * Unclear what kind of devices these are (they aren't supported by the
@@ -1303,6 +1361,9 @@ MODULE_LICENSE("GPL");
1303static int rt2800usb_probe(struct usb_interface *usb_intf, 1361static int rt2800usb_probe(struct usb_interface *usb_intf,
1304 const struct usb_device_id *id) 1362 const struct usb_device_id *id)
1305{ 1363{
1364 if (id->driver_info == 5592)
1365 return rt2x00usb_probe(usb_intf, &rt2800usb_ops_5592);
1366
1306 return rt2x00usb_probe(usb_intf, &rt2800usb_ops); 1367 return rt2x00usb_probe(usb_intf, &rt2800usb_ops);
1307} 1368}
1308 1369
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 086abb403a4f..7510723a8c37 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -54,47 +54,36 @@
54#define DRV_VERSION "2.3.0" 54#define DRV_VERSION "2.3.0"
55#define DRV_PROJECT "http://rt2x00.serialmonkey.com" 55#define DRV_PROJECT "http://rt2x00.serialmonkey.com"
56 56
57/* 57/* Debug definitions.
58 * Debug definitions.
59 * Debug output has to be enabled during compile time. 58 * Debug output has to be enabled during compile time.
60 */ 59 */
61#define DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, __args...) \
62 printk(__kernlvl "%s -> %s: %s - " __msg, \
63 wiphy_name((__dev)->hw->wiphy), __func__, __lvl, ##__args)
64
65#define DEBUG_PRINTK_PROBE(__kernlvl, __lvl, __msg, __args...) \
66 printk(__kernlvl "%s -> %s: %s - " __msg, \
67 KBUILD_MODNAME, __func__, __lvl, ##__args)
68
69#ifdef CONFIG_RT2X00_DEBUG 60#ifdef CONFIG_RT2X00_DEBUG
70#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \ 61#define DEBUG
71 DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, ##__args)
72#else
73#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \
74 do { } while (0)
75#endif /* CONFIG_RT2X00_DEBUG */ 62#endif /* CONFIG_RT2X00_DEBUG */
76 63
77/* 64/* Utility printing macros
78 * Various debug levels. 65 * rt2x00_probe_err is for messages when rt2x00_dev is uninitialized
79 * The debug levels PANIC and ERROR both indicate serious problems,
80 * for this reason they should never be ignored.
81 * The special ERROR_PROBE message is for messages that are generated
82 * when the rt2x00_dev is not yet initialized.
83 */ 66 */
84#define PANIC(__dev, __msg, __args...) \ 67#define rt2x00_probe_err(fmt, ...) \
85 DEBUG_PRINTK_MSG(__dev, KERN_CRIT, "Panic", __msg, ##__args) 68 printk(KERN_ERR KBUILD_MODNAME ": %s: Error - " fmt, \
86#define ERROR(__dev, __msg, __args...) \ 69 __func__, ##__VA_ARGS__)
87 DEBUG_PRINTK_MSG(__dev, KERN_ERR, "Error", __msg, ##__args) 70#define rt2x00_err(dev, fmt, ...) \
88#define ERROR_PROBE(__msg, __args...) \ 71 wiphy_err((dev)->hw->wiphy, "%s: Error - " fmt, \
89 DEBUG_PRINTK_PROBE(KERN_ERR, "Error", __msg, ##__args) 72 __func__, ##__VA_ARGS__)
90#define WARNING(__dev, __msg, __args...) \ 73#define rt2x00_warn(dev, fmt, ...) \
91 DEBUG_PRINTK_MSG(__dev, KERN_WARNING, "Warning", __msg, ##__args) 74 wiphy_warn((dev)->hw->wiphy, "%s: Warning - " fmt, \
92#define INFO(__dev, __msg, __args...) \ 75 __func__, ##__VA_ARGS__)
93 DEBUG_PRINTK_MSG(__dev, KERN_INFO, "Info", __msg, ##__args) 76#define rt2x00_info(dev, fmt, ...) \
94#define DEBUG(__dev, __msg, __args...) \ 77 wiphy_info((dev)->hw->wiphy, "%s: Info - " fmt, \
95 DEBUG_PRINTK(__dev, KERN_DEBUG, "Debug", __msg, ##__args) 78 __func__, ##__VA_ARGS__)
96#define EEPROM(__dev, __msg, __args...) \ 79
97 DEBUG_PRINTK(__dev, KERN_DEBUG, "EEPROM recovery", __msg, ##__args) 80/* Various debug levels */
81#define rt2x00_dbg(dev, fmt, ...) \
82 wiphy_dbg((dev)->hw->wiphy, "%s: Debug - " fmt, \
83 __func__, ##__VA_ARGS__)
84#define rt2x00_eeprom_dbg(dev, fmt, ...) \
85 wiphy_dbg((dev)->hw->wiphy, "%s: EEPROM recovery - " fmt, \
86 __func__, ##__VA_ARGS__)
98 87
99/* 88/*
100 * Duration calculations 89 * Duration calculations
@@ -193,6 +182,7 @@ struct rt2x00_chip {
193#define RT3883 0x3883 /* WSOC */ 182#define RT3883 0x3883 /* WSOC */
194#define RT5390 0x5390 /* 2.4GHz */ 183#define RT5390 0x5390 /* 2.4GHz */
195#define RT5392 0x5392 /* 2.4GHz */ 184#define RT5392 0x5392 /* 2.4GHz */
185#define RT5592 0x5592
196 186
197 u16 rf; 187 u16 rf;
198 u16 rev; 188 u16 rev;
@@ -1064,8 +1054,7 @@ static inline void rt2x00_rf_write(struct rt2x00_dev *rt2x00dev,
1064} 1054}
1065 1055
1066/* 1056/*
1067 * Generic EEPROM access. 1057 * Generic EEPROM access. The EEPROM is being accessed by word or byte index.
1068 * The EEPROM is being accessed by word index.
1069 */ 1058 */
1070static inline void *rt2x00_eeprom_addr(struct rt2x00_dev *rt2x00dev, 1059static inline void *rt2x00_eeprom_addr(struct rt2x00_dev *rt2x00dev,
1071 const unsigned int word) 1060 const unsigned int word)
@@ -1085,6 +1074,12 @@ static inline void rt2x00_eeprom_write(struct rt2x00_dev *rt2x00dev,
1085 rt2x00dev->eeprom[word] = cpu_to_le16(data); 1074 rt2x00dev->eeprom[word] = cpu_to_le16(data);
1086} 1075}
1087 1076
1077static inline u8 rt2x00_eeprom_byte(struct rt2x00_dev *rt2x00dev,
1078 const unsigned int byte)
1079{
1080 return *(((u8 *)rt2x00dev->eeprom) + byte);
1081}
1082
1088/* 1083/*
1089 * Chipset handlers 1084 * Chipset handlers
1090 */ 1085 */
@@ -1095,9 +1090,27 @@ static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev,
1095 rt2x00dev->chip.rf = rf; 1090 rt2x00dev->chip.rf = rf;
1096 rt2x00dev->chip.rev = rev; 1091 rt2x00dev->chip.rev = rev;
1097 1092
1098 INFO(rt2x00dev, 1093 rt2x00_info(rt2x00dev, "Chipset detected - rt: %04x, rf: %04x, rev: %04x\n",
1099 "Chipset detected - rt: %04x, rf: %04x, rev: %04x.\n", 1094 rt2x00dev->chip.rt, rt2x00dev->chip.rf,
1100 rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev); 1095 rt2x00dev->chip.rev);
1096}
1097
1098static inline void rt2x00_set_rt(struct rt2x00_dev *rt2x00dev,
1099 const u16 rt, const u16 rev)
1100{
1101 rt2x00dev->chip.rt = rt;
1102 rt2x00dev->chip.rev = rev;
1103
1104 rt2x00_info(rt2x00dev, "RT chipset %04x, rev %04x detected\n",
1105 rt2x00dev->chip.rt, rt2x00dev->chip.rev);
1106}
1107
1108static inline void rt2x00_set_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
1109{
1110 rt2x00dev->chip.rf = rf;
1111
1112 rt2x00_info(rt2x00dev, "RF chipset %04x detected\n",
1113 rt2x00dev->chip.rf);
1101} 1114}
1102 1115
1103static inline bool rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt) 1116static inline bool rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
@@ -1360,7 +1373,7 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
1360 struct ieee80211_vif *vif, u16 queue, 1373 struct ieee80211_vif *vif, u16 queue,
1361 const struct ieee80211_tx_queue_params *params); 1374 const struct ieee80211_tx_queue_params *params);
1362void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw); 1375void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
1363void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop); 1376void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
1364int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant); 1377int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
1365int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); 1378int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
1366void rt2x00mac_get_ringparam(struct ieee80211_hw *hw, 1379void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 49a63e973934..8cb43f8f3efc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -184,7 +184,7 @@ static u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
184 /* 184 /*
185 * Initialize center channel to current channel. 185 * Initialize center channel to current channel.
186 */ 186 */
187 center_channel = spec->channels[conf->channel->hw_value].channel; 187 center_channel = spec->channels[conf->chandef.chan->hw_value].channel;
188 188
189 /* 189 /*
190 * Adjust center channel to HT40+ and HT40- operation. 190 * Adjust center channel to HT40+ and HT40- operation.
@@ -199,7 +199,7 @@ static u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
199 return i; 199 return i;
200 200
201 WARN_ON(1); 201 WARN_ON(1);
202 return conf->channel->hw_value; 202 return conf->chandef.chan->hw_value;
203} 203}
204 204
205void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, 205void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
@@ -227,7 +227,7 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
227 hw_value = rt2x00ht_center_channel(rt2x00dev, conf); 227 hw_value = rt2x00ht_center_channel(rt2x00dev, conf);
228 } else { 228 } else {
229 clear_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags); 229 clear_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
230 hw_value = conf->channel->hw_value; 230 hw_value = conf->chandef.chan->hw_value;
231 } 231 }
232 232
233 memcpy(&libconf.rf, 233 memcpy(&libconf.rf,
@@ -279,8 +279,8 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
279 else 279 else
280 clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); 280 clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
281 281
282 rt2x00dev->curr_band = conf->channel->band; 282 rt2x00dev->curr_band = conf->chandef.chan->band;
283 rt2x00dev->curr_freq = conf->channel->center_freq; 283 rt2x00dev->curr_freq = conf->chandef.chan->center_freq;
284 rt2x00dev->tx_power = conf->power_level; 284 rt2x00dev->tx_power = conf->power_level;
285 rt2x00dev->short_retry = conf->short_frame_max_tx_count; 285 rt2x00dev->short_retry = conf->short_frame_max_tx_count;
286 rt2x00dev->long_retry = conf->long_frame_max_tx_count; 286 rt2x00dev->long_retry = conf->long_frame_max_tx_count;
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 3bb8cafbac59..fe7a7f63a9ed 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -174,7 +174,7 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
174 do_gettimeofday(&timestamp); 174 do_gettimeofday(&timestamp);
175 175
176 if (skb_queue_len(&intf->frame_dump_skbqueue) > 20) { 176 if (skb_queue_len(&intf->frame_dump_skbqueue) > 20) {
177 DEBUG(rt2x00dev, "txrx dump queue length exceeded.\n"); 177 rt2x00_dbg(rt2x00dev, "txrx dump queue length exceeded\n");
178 return; 178 return;
179 } 179 }
180 180
@@ -185,7 +185,7 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
185 skbcopy = alloc_skb(sizeof(*dump_hdr) + skbdesc->desc_len + data_len, 185 skbcopy = alloc_skb(sizeof(*dump_hdr) + skbdesc->desc_len + data_len,
186 GFP_ATOMIC); 186 GFP_ATOMIC);
187 if (!skbcopy) { 187 if (!skbcopy) {
188 DEBUG(rt2x00dev, "Failed to copy skb for dump.\n"); 188 rt2x00_dbg(rt2x00dev, "Failed to copy skb for dump\n");
189 return; 189 return;
190 } 190 }
191 191
@@ -657,7 +657,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
657 657
658 intf = kzalloc(sizeof(struct rt2x00debug_intf), GFP_KERNEL); 658 intf = kzalloc(sizeof(struct rt2x00debug_intf), GFP_KERNEL);
659 if (!intf) { 659 if (!intf) {
660 ERROR(rt2x00dev, "Failed to allocate debug handler.\n"); 660 rt2x00_err(rt2x00dev, "Failed to allocate debug handler\n");
661 return; 661 return;
662 } 662 }
663 663
@@ -760,7 +760,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
760 760
761exit: 761exit:
762 rt2x00debug_deregister(rt2x00dev); 762 rt2x00debug_deregister(rt2x00dev);
763 ERROR(rt2x00dev, "Failed to register debug handler.\n"); 763 rt2x00_err(rt2x00dev, "Failed to register debug handler\n");
764} 764}
765 765
766void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev) 766void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 189744db65e0..90dc14336980 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -171,7 +171,7 @@ static void rt2x00lib_autowakeup(struct work_struct *work)
171 return; 171 return;
172 172
173 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) 173 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE))
174 ERROR(rt2x00dev, "Device failed to wakeup.\n"); 174 rt2x00_err(rt2x00dev, "Device failed to wakeup\n");
175 clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); 175 clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
176} 176}
177 177
@@ -673,9 +673,8 @@ static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev,
673 break; 673 break;
674 } 674 }
675 675
676 WARNING(rt2x00dev, "Frame received with unrecognized signal, " 676 rt2x00_warn(rt2x00dev, "Frame received with unrecognized signal, mode=0x%.4x, signal=0x%.4x, type=%d\n",
677 "mode=0x%.4x, signal=0x%.4x, type=%d.\n", 677 rxdesc->rate_mode, signal, type);
678 rxdesc->rate_mode, signal, type);
679 return 0; 678 return 0;
680} 679}
681 680
@@ -720,8 +719,8 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
720 */ 719 */
721 if (unlikely(rxdesc.size == 0 || 720 if (unlikely(rxdesc.size == 0 ||
722 rxdesc.size > entry->queue->data_size)) { 721 rxdesc.size > entry->queue->data_size)) {
723 ERROR(rt2x00dev, "Wrong frame size %d max %d.\n", 722 rt2x00_err(rt2x00dev, "Wrong frame size %d max %d\n",
724 rxdesc.size, entry->queue->data_size); 723 rxdesc.size, entry->queue->data_size);
725 dev_kfree_skb(entry->skb); 724 dev_kfree_skb(entry->skb);
726 goto renew_skb; 725 goto renew_skb;
727 } 726 }
@@ -1006,7 +1005,7 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
1006 1005
1007 exit_free_channels: 1006 exit_free_channels:
1008 kfree(channels); 1007 kfree(channels);
1009 ERROR(rt2x00dev, "Allocation ieee80211 modes failed.\n"); 1008 rt2x00_err(rt2x00dev, "Allocation ieee80211 modes failed\n");
1010 return -ENOMEM; 1009 return -ENOMEM;
1011} 1010}
1012 1011
@@ -1337,7 +1336,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1337 */ 1336 */
1338 retval = rt2x00dev->ops->lib->probe_hw(rt2x00dev); 1337 retval = rt2x00dev->ops->lib->probe_hw(rt2x00dev);
1339 if (retval) { 1338 if (retval) {
1340 ERROR(rt2x00dev, "Failed to allocate device.\n"); 1339 rt2x00_err(rt2x00dev, "Failed to allocate device\n");
1341 goto exit; 1340 goto exit;
1342 } 1341 }
1343 1342
@@ -1353,7 +1352,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1353 */ 1352 */
1354 retval = rt2x00lib_probe_hw(rt2x00dev); 1353 retval = rt2x00lib_probe_hw(rt2x00dev);
1355 if (retval) { 1354 if (retval) {
1356 ERROR(rt2x00dev, "Failed to initialize hw.\n"); 1355 rt2x00_err(rt2x00dev, "Failed to initialize hw\n");
1357 goto exit; 1356 goto exit;
1358 } 1357 }
1359 1358
@@ -1451,7 +1450,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev);
1451#ifdef CONFIG_PM 1450#ifdef CONFIG_PM
1452int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state) 1451int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state)
1453{ 1452{
1454 DEBUG(rt2x00dev, "Going to sleep.\n"); 1453 rt2x00_dbg(rt2x00dev, "Going to sleep\n");
1455 1454
1456 /* 1455 /*
1457 * Prevent mac80211 from accessing driver while suspended. 1456 * Prevent mac80211 from accessing driver while suspended.
@@ -1482,8 +1481,7 @@ int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state)
1482 * device is as good as disabled. 1481 * device is as good as disabled.
1483 */ 1482 */
1484 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_SLEEP)) 1483 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_SLEEP))
1485 WARNING(rt2x00dev, "Device failed to enter sleep state, " 1484 rt2x00_warn(rt2x00dev, "Device failed to enter sleep state, continue suspending\n");
1486 "continue suspending.\n");
1487 1485
1488 return 0; 1486 return 0;
1489} 1487}
@@ -1491,7 +1489,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_suspend);
1491 1489
1492int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev) 1490int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1493{ 1491{
1494 DEBUG(rt2x00dev, "Waking up.\n"); 1492 rt2x00_dbg(rt2x00dev, "Waking up\n");
1495 1493
1496 /* 1494 /*
1497 * Restore/enable extra components. 1495 * Restore/enable extra components.
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index f316aad30612..1b4254b4272d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -42,28 +42,28 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
42 */ 42 */
43 fw_name = rt2x00dev->ops->lib->get_firmware_name(rt2x00dev); 43 fw_name = rt2x00dev->ops->lib->get_firmware_name(rt2x00dev);
44 if (!fw_name) { 44 if (!fw_name) {
45 ERROR(rt2x00dev, 45 rt2x00_err(rt2x00dev,
46 "Invalid firmware filename.\n" 46 "Invalid firmware filename\n"
47 "Please file bug report to %s.\n", DRV_PROJECT); 47 "Please file bug report to %s\n", DRV_PROJECT);
48 return -EINVAL; 48 return -EINVAL;
49 } 49 }
50 50
51 INFO(rt2x00dev, "Loading firmware file '%s'.\n", fw_name); 51 rt2x00_info(rt2x00dev, "Loading firmware file '%s'\n", fw_name);
52 52
53 retval = request_firmware(&fw, fw_name, device); 53 retval = request_firmware(&fw, fw_name, device);
54 if (retval) { 54 if (retval) {
55 ERROR(rt2x00dev, "Failed to request Firmware.\n"); 55 rt2x00_err(rt2x00dev, "Failed to request Firmware\n");
56 return retval; 56 return retval;
57 } 57 }
58 58
59 if (!fw || !fw->size || !fw->data) { 59 if (!fw || !fw->size || !fw->data) {
60 ERROR(rt2x00dev, "Failed to read Firmware.\n"); 60 rt2x00_err(rt2x00dev, "Failed to read Firmware\n");
61 release_firmware(fw); 61 release_firmware(fw);
62 return -ENOENT; 62 return -ENOENT;
63 } 63 }
64 64
65 INFO(rt2x00dev, "Firmware detected - version: %d.%d.\n", 65 rt2x00_info(rt2x00dev, "Firmware detected - version: %d.%d\n",
66 fw->data[fw->size - 4], fw->data[fw->size - 3]); 66 fw->data[fw->size - 4], fw->data[fw->size - 3]);
67 snprintf(rt2x00dev->hw->wiphy->fw_version, 67 snprintf(rt2x00dev->hw->wiphy->fw_version,
68 sizeof(rt2x00dev->hw->wiphy->fw_version), "%d.%d", 68 sizeof(rt2x00dev->hw->wiphy->fw_version), "%d.%d",
69 fw->data[fw->size - 4], fw->data[fw->size - 3]); 69 fw->data[fw->size - 4], fw->data[fw->size - 3]);
@@ -73,15 +73,14 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
73 case FW_OK: 73 case FW_OK:
74 break; 74 break;
75 case FW_BAD_CRC: 75 case FW_BAD_CRC:
76 ERROR(rt2x00dev, "Firmware checksum error.\n"); 76 rt2x00_err(rt2x00dev, "Firmware checksum error\n");
77 goto exit; 77 goto exit;
78 case FW_BAD_LENGTH: 78 case FW_BAD_LENGTH:
79 ERROR(rt2x00dev, 79 rt2x00_err(rt2x00dev, "Invalid firmware file length (len=%zu)\n",
80 "Invalid firmware file length (len=%zu)\n", fw->size); 80 fw->size);
81 goto exit; 81 goto exit;
82 case FW_BAD_VERSION: 82 case FW_BAD_VERSION:
83 ERROR(rt2x00dev, 83 rt2x00_err(rt2x00dev, "Current firmware does not support detected chipset\n");
84 "Current firmware does not support detected chipset.\n");
85 goto exit; 84 goto exit;
86 } 85 }
87 86
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c
index 8679d781a264..997a6c89e66e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.c
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
@@ -113,7 +113,7 @@ static int rt2x00leds_register_led(struct rt2x00_dev *rt2x00dev,
113 113
114 retval = led_classdev_register(device, &led->led_dev); 114 retval = led_classdev_register(device, &led->led_dev);
115 if (retval) { 115 if (retval) {
116 ERROR(rt2x00dev, "Failed to register led handler.\n"); 116 rt2x00_err(rt2x00dev, "Failed to register led handler\n");
117 return retval; 117 return retval;
118 } 118 }
119 119
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 20c6eccce5aa..f883802f3505 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -46,7 +46,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
46 46
47 skb = dev_alloc_skb(data_length + rt2x00dev->hw->extra_tx_headroom); 47 skb = dev_alloc_skb(data_length + rt2x00dev->hw->extra_tx_headroom);
48 if (unlikely(!skb)) { 48 if (unlikely(!skb)) {
49 WARNING(rt2x00dev, "Failed to create RTS/CTS frame.\n"); 49 rt2x00_warn(rt2x00dev, "Failed to create RTS/CTS frame\n");
50 return -ENOMEM; 50 return -ENOMEM;
51 } 51 }
52 52
@@ -93,7 +93,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
93 retval = rt2x00queue_write_tx_frame(queue, skb, true); 93 retval = rt2x00queue_write_tx_frame(queue, skb, true);
94 if (retval) { 94 if (retval) {
95 dev_kfree_skb_any(skb); 95 dev_kfree_skb_any(skb);
96 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n"); 96 rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n");
97 } 97 }
98 98
99 return retval; 99 return retval;
@@ -126,9 +126,9 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
126 126
127 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); 127 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
128 if (unlikely(!queue)) { 128 if (unlikely(!queue)) {
129 ERROR(rt2x00dev, 129 rt2x00_err(rt2x00dev,
130 "Attempt to send packet over invalid queue %d.\n" 130 "Attempt to send packet over invalid queue %d\n"
131 "Please file bug report to %s.\n", qid, DRV_PROJECT); 131 "Please file bug report to %s\n", qid, DRV_PROJECT);
132 goto exit_free_skb; 132 goto exit_free_skb;
133 } 133 }
134 134
@@ -731,9 +731,10 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
731 queue->aifs = params->aifs; 731 queue->aifs = params->aifs;
732 queue->txop = params->txop; 732 queue->txop = params->txop;
733 733
734 DEBUG(rt2x00dev, 734 rt2x00_dbg(rt2x00dev,
735 "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d.\n", 735 "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d\n",
736 queue_idx, queue->cw_min, queue->cw_max, queue->aifs, queue->txop); 736 queue_idx, queue->cw_min, queue->cw_max, queue->aifs,
737 queue->txop);
737 738
738 return 0; 739 return 0;
739} 740}
@@ -748,7 +749,7 @@ void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw)
748} 749}
749EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll); 750EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
750 751
751void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop) 752void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
752{ 753{
753 struct rt2x00_dev *rt2x00dev = hw->priv; 754 struct rt2x00_dev *rt2x00dev = hw->priv;
754 struct data_queue *queue; 755 struct data_queue *queue;
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.c b/drivers/net/wireless/rt2x00/rt2x00mmio.c
index d84a680ba0c9..64b06c6abe58 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mmio.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mmio.c
@@ -34,10 +34,10 @@
34/* 34/*
35 * Register access. 35 * Register access.
36 */ 36 */
37int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, 37int rt2x00mmio_regbusy_read(struct rt2x00_dev *rt2x00dev,
38 const unsigned int offset, 38 const unsigned int offset,
39 const struct rt2x00_field32 field, 39 const struct rt2x00_field32 field,
40 u32 *reg) 40 u32 *reg)
41{ 41{
42 unsigned int i; 42 unsigned int i;
43 43
@@ -45,7 +45,7 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
45 return 0; 45 return 0;
46 46
47 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 47 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
48 rt2x00pci_register_read(rt2x00dev, offset, reg); 48 rt2x00mmio_register_read(rt2x00dev, offset, reg);
49 if (!rt2x00_get_field32(*reg, field)) 49 if (!rt2x00_get_field32(*reg, field))
50 return 1; 50 return 1;
51 udelay(REGISTER_BUSY_DELAY); 51 udelay(REGISTER_BUSY_DELAY);
@@ -57,13 +57,13 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
57 57
58 return 0; 58 return 0;
59} 59}
60EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); 60EXPORT_SYMBOL_GPL(rt2x00mmio_regbusy_read);
61 61
62bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) 62bool rt2x00mmio_rxdone(struct rt2x00_dev *rt2x00dev)
63{ 63{
64 struct data_queue *queue = rt2x00dev->rx; 64 struct data_queue *queue = rt2x00dev->rx;
65 struct queue_entry *entry; 65 struct queue_entry *entry;
66 struct queue_entry_priv_pci *entry_priv; 66 struct queue_entry_priv_mmio *entry_priv;
67 struct skb_frame_desc *skbdesc; 67 struct skb_frame_desc *skbdesc;
68 int max_rx = 16; 68 int max_rx = 16;
69 69
@@ -96,24 +96,24 @@ bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
96 96
97 return !max_rx; 97 return !max_rx;
98} 98}
99EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); 99EXPORT_SYMBOL_GPL(rt2x00mmio_rxdone);
100 100
101void rt2x00pci_flush_queue(struct data_queue *queue, bool drop) 101void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop)
102{ 102{
103 unsigned int i; 103 unsigned int i;
104 104
105 for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) 105 for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
106 msleep(10); 106 msleep(10);
107} 107}
108EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue); 108EXPORT_SYMBOL_GPL(rt2x00mmio_flush_queue);
109 109
110/* 110/*
111 * Device initialization handlers. 111 * Device initialization handlers.
112 */ 112 */
113static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, 113static int rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
114 struct data_queue *queue) 114 struct data_queue *queue)
115{ 115{
116 struct queue_entry_priv_pci *entry_priv; 116 struct queue_entry_priv_mmio *entry_priv;
117 void *addr; 117 void *addr;
118 dma_addr_t dma; 118 dma_addr_t dma;
119 unsigned int i; 119 unsigned int i;
@@ -141,10 +141,10 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
141 return 0; 141 return 0;
142} 142}
143 143
144static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, 144static void rt2x00mmio_free_queue_dma(struct rt2x00_dev *rt2x00dev,
145 struct data_queue *queue) 145 struct data_queue *queue)
146{ 146{
147 struct queue_entry_priv_pci *entry_priv = 147 struct queue_entry_priv_mmio *entry_priv =
148 queue->entries[0].priv_data; 148 queue->entries[0].priv_data;
149 149
150 if (entry_priv->desc) 150 if (entry_priv->desc)
@@ -154,7 +154,7 @@ static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
154 entry_priv->desc = NULL; 154 entry_priv->desc = NULL;
155} 155}
156 156
157int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) 157int rt2x00mmio_initialize(struct rt2x00_dev *rt2x00dev)
158{ 158{
159 struct data_queue *queue; 159 struct data_queue *queue;
160 int status; 160 int status;
@@ -163,7 +163,7 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
163 * Allocate DMA 163 * Allocate DMA
164 */ 164 */
165 queue_for_each(rt2x00dev, queue) { 165 queue_for_each(rt2x00dev, queue) {
166 status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue); 166 status = rt2x00mmio_alloc_queue_dma(rt2x00dev, queue);
167 if (status) 167 if (status)
168 goto exit; 168 goto exit;
169 } 169 }
@@ -175,8 +175,8 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
175 rt2x00dev->ops->lib->irq_handler, 175 rt2x00dev->ops->lib->irq_handler,
176 IRQF_SHARED, rt2x00dev->name, rt2x00dev); 176 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
177 if (status) { 177 if (status) {
178 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", 178 rt2x00_err(rt2x00dev, "IRQ %d allocation failed (error %d)\n",
179 rt2x00dev->irq, status); 179 rt2x00dev->irq, status);
180 goto exit; 180 goto exit;
181 } 181 }
182 182
@@ -184,13 +184,13 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
184 184
185exit: 185exit:
186 queue_for_each(rt2x00dev, queue) 186 queue_for_each(rt2x00dev, queue)
187 rt2x00pci_free_queue_dma(rt2x00dev, queue); 187 rt2x00mmio_free_queue_dma(rt2x00dev, queue);
188 188
189 return status; 189 return status;
190} 190}
191EXPORT_SYMBOL_GPL(rt2x00pci_initialize); 191EXPORT_SYMBOL_GPL(rt2x00mmio_initialize);
192 192
193void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) 193void rt2x00mmio_uninitialize(struct rt2x00_dev *rt2x00dev)
194{ 194{
195 struct data_queue *queue; 195 struct data_queue *queue;
196 196
@@ -203,9 +203,9 @@ void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
203 * Free DMA 203 * Free DMA
204 */ 204 */
205 queue_for_each(rt2x00dev, queue) 205 queue_for_each(rt2x00dev, queue)
206 rt2x00pci_free_queue_dma(rt2x00dev, queue); 206 rt2x00mmio_free_queue_dma(rt2x00dev, queue);
207} 207}
208EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); 208EXPORT_SYMBOL_GPL(rt2x00mmio_uninitialize);
209 209
210/* 210/*
211 * rt2x00mmio module information. 211 * rt2x00mmio module information.
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.h b/drivers/net/wireless/rt2x00/rt2x00mmio.h
index 4ecaf60175bf..cda3dbcf7ead 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mmio.h
+++ b/drivers/net/wireless/rt2x00/rt2x00mmio.h
@@ -31,37 +31,37 @@
31/* 31/*
32 * Register access. 32 * Register access.
33 */ 33 */
34static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, 34static inline void rt2x00mmio_register_read(struct rt2x00_dev *rt2x00dev,
35 const unsigned int offset, 35 const unsigned int offset,
36 u32 *value) 36 u32 *value)
37{ 37{
38 *value = readl(rt2x00dev->csr.base + offset); 38 *value = readl(rt2x00dev->csr.base + offset);
39} 39}
40 40
41static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, 41static inline void rt2x00mmio_register_multiread(struct rt2x00_dev *rt2x00dev,
42 const unsigned int offset, 42 const unsigned int offset,
43 void *value, const u32 length) 43 void *value, const u32 length)
44{ 44{
45 memcpy_fromio(value, rt2x00dev->csr.base + offset, length); 45 memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
46} 46}
47 47
48static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, 48static inline void rt2x00mmio_register_write(struct rt2x00_dev *rt2x00dev,
49 const unsigned int offset, 49 const unsigned int offset,
50 u32 value) 50 u32 value)
51{ 51{
52 writel(value, rt2x00dev->csr.base + offset); 52 writel(value, rt2x00dev->csr.base + offset);
53} 53}
54 54
55static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, 55static inline void rt2x00mmio_register_multiwrite(struct rt2x00_dev *rt2x00dev,
56 const unsigned int offset, 56 const unsigned int offset,
57 const void *value, 57 const void *value,
58 const u32 length) 58 const u32 length)
59{ 59{
60 __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2); 60 __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
61} 61}
62 62
63/** 63/**
64 * rt2x00pci_regbusy_read - Read from register with busy check 64 * rt2x00mmio_regbusy_read - Read from register with busy check
65 * @rt2x00dev: Device pointer, see &struct rt2x00_dev. 65 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
66 * @offset: Register offset 66 * @offset: Register offset
67 * @field: Field to check if register is busy 67 * @field: Field to check if register is busy
@@ -73,47 +73,47 @@ static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
73 * is not read after a certain timeout, this function will return 73 * is not read after a certain timeout, this function will return
74 * FALSE. 74 * FALSE.
75 */ 75 */
76int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, 76int rt2x00mmio_regbusy_read(struct rt2x00_dev *rt2x00dev,
77 const unsigned int offset, 77 const unsigned int offset,
78 const struct rt2x00_field32 field, 78 const struct rt2x00_field32 field,
79 u32 *reg); 79 u32 *reg);
80 80
81/** 81/**
82 * struct queue_entry_priv_pci: Per entry PCI specific information 82 * struct queue_entry_priv_mmio: Per entry PCI specific information
83 * 83 *
84 * @desc: Pointer to device descriptor 84 * @desc: Pointer to device descriptor
85 * @desc_dma: DMA pointer to &desc. 85 * @desc_dma: DMA pointer to &desc.
86 * @data: Pointer to device's entry memory. 86 * @data: Pointer to device's entry memory.
87 * @data_dma: DMA pointer to &data. 87 * @data_dma: DMA pointer to &data.
88 */ 88 */
89struct queue_entry_priv_pci { 89struct queue_entry_priv_mmio {
90 __le32 *desc; 90 __le32 *desc;
91 dma_addr_t desc_dma; 91 dma_addr_t desc_dma;
92}; 92};
93 93
94/** 94/**
95 * rt2x00pci_rxdone - Handle RX done events 95 * rt2x00mmio_rxdone - Handle RX done events
96 * @rt2x00dev: Device pointer, see &struct rt2x00_dev. 96 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
97 * 97 *
98 * Returns true if there are still rx frames pending and false if all 98 * Returns true if there are still rx frames pending and false if all
99 * pending rx frames were processed. 99 * pending rx frames were processed.
100 */ 100 */
101bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); 101bool rt2x00mmio_rxdone(struct rt2x00_dev *rt2x00dev);
102 102
103/** 103/**
104 * rt2x00pci_flush_queue - Flush data queue 104 * rt2x00mmio_flush_queue - Flush data queue
105 * @queue: Data queue to stop 105 * @queue: Data queue to stop
106 * @drop: True to drop all pending frames. 106 * @drop: True to drop all pending frames.
107 * 107 *
108 * This will wait for a maximum of 100ms, waiting for the queues 108 * This will wait for a maximum of 100ms, waiting for the queues
109 * to become empty. 109 * to become empty.
110 */ 110 */
111void rt2x00pci_flush_queue(struct data_queue *queue, bool drop); 111void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop);
112 112
113/* 113/*
114 * Device initialization handlers. 114 * Device initialization handlers.
115 */ 115 */
116int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); 116int rt2x00mmio_initialize(struct rt2x00_dev *rt2x00dev);
117void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); 117void rt2x00mmio_uninitialize(struct rt2x00_dev *rt2x00dev);
118 118
119#endif /* RT2X00MMIO_H */ 119#endif /* RT2X00MMIO_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index e87865e33113..dc49e525ae5e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -68,7 +68,7 @@ static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
68 return 0; 68 return 0;
69 69
70exit: 70exit:
71 ERROR_PROBE("Failed to allocate registers.\n"); 71 rt2x00_probe_err("Failed to allocate registers\n");
72 72
73 rt2x00pci_free_reg(rt2x00dev); 73 rt2x00pci_free_reg(rt2x00dev);
74 74
@@ -84,30 +84,30 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
84 84
85 retval = pci_enable_device(pci_dev); 85 retval = pci_enable_device(pci_dev);
86 if (retval) { 86 if (retval) {
87 ERROR_PROBE("Enable device failed.\n"); 87 rt2x00_probe_err("Enable device failed\n");
88 return retval; 88 return retval;
89 } 89 }
90 90
91 retval = pci_request_regions(pci_dev, pci_name(pci_dev)); 91 retval = pci_request_regions(pci_dev, pci_name(pci_dev));
92 if (retval) { 92 if (retval) {
93 ERROR_PROBE("PCI request regions failed.\n"); 93 rt2x00_probe_err("PCI request regions failed\n");
94 goto exit_disable_device; 94 goto exit_disable_device;
95 } 95 }
96 96
97 pci_set_master(pci_dev); 97 pci_set_master(pci_dev);
98 98
99 if (pci_set_mwi(pci_dev)) 99 if (pci_set_mwi(pci_dev))
100 ERROR_PROBE("MWI not available.\n"); 100 rt2x00_probe_err("MWI not available\n");
101 101
102 if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) { 102 if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
103 ERROR_PROBE("PCI DMA not supported.\n"); 103 rt2x00_probe_err("PCI DMA not supported\n");
104 retval = -EIO; 104 retval = -EIO;
105 goto exit_release_regions; 105 goto exit_release_regions;
106 } 106 }
107 107
108 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); 108 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
109 if (!hw) { 109 if (!hw) {
110 ERROR_PROBE("Failed to allocate hardware.\n"); 110 rt2x00_probe_err("Failed to allocate hardware\n");
111 retval = -ENOMEM; 111 retval = -ENOMEM;
112 goto exit_release_regions; 112 goto exit_release_regions;
113 } 113 }
@@ -207,7 +207,7 @@ int rt2x00pci_resume(struct pci_dev *pci_dev)
207 207
208 if (pci_set_power_state(pci_dev, PCI_D0) || 208 if (pci_set_power_state(pci_dev, PCI_D0) ||
209 pci_enable_device(pci_dev)) { 209 pci_enable_device(pci_dev)) {
210 ERROR(rt2x00dev, "Failed to resume device.\n"); 210 rt2x00_err(rt2x00dev, "Failed to resume device\n");
211 return -EIO; 211 return -EIO;
212 } 212 }
213 213
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 4d91795dc6a2..2c12311467a9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -35,7 +35,8 @@
35 35
36struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp) 36struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
37{ 37{
38 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 38 struct data_queue *queue = entry->queue;
39 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
39 struct sk_buff *skb; 40 struct sk_buff *skb;
40 struct skb_frame_desc *skbdesc; 41 struct skb_frame_desc *skbdesc;
41 unsigned int frame_size; 42 unsigned int frame_size;
@@ -46,7 +47,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
46 * The frame size includes descriptor size, because the 47 * The frame size includes descriptor size, because the
47 * hardware directly receive the frame into the skbuffer. 48 * hardware directly receive the frame into the skbuffer.
48 */ 49 */
49 frame_size = entry->queue->data_size + entry->queue->desc_size; 50 frame_size = queue->data_size + queue->desc_size + queue->winfo_size;
50 51
51 /* 52 /*
52 * The payload should be aligned to a 4-byte boundary, 53 * The payload should be aligned to a 4-byte boundary,
@@ -531,10 +532,10 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
531 */ 532 */
532 if (unlikely(rt2x00dev->ops->lib->get_entry_state && 533 if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
533 rt2x00dev->ops->lib->get_entry_state(entry))) { 534 rt2x00dev->ops->lib->get_entry_state(entry))) {
534 ERROR(rt2x00dev, 535 rt2x00_err(rt2x00dev,
535 "Corrupt queue %d, accessing entry which is not ours.\n" 536 "Corrupt queue %d, accessing entry which is not ours\n"
536 "Please file bug report to %s.\n", 537 "Please file bug report to %s\n",
537 entry->queue->qid, DRV_PROJECT); 538 entry->queue->qid, DRV_PROJECT);
538 return -EINVAL; 539 return -EINVAL;
539 } 540 }
540 541
@@ -698,8 +699,8 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
698 spin_lock(&queue->tx_lock); 699 spin_lock(&queue->tx_lock);
699 700
700 if (unlikely(rt2x00queue_full(queue))) { 701 if (unlikely(rt2x00queue_full(queue))) {
701 ERROR(queue->rt2x00dev, 702 rt2x00_err(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
702 "Dropping frame due to full tx queue %d.\n", queue->qid); 703 queue->qid);
703 ret = -ENOBUFS; 704 ret = -ENOBUFS;
704 goto out; 705 goto out;
705 } 706 }
@@ -708,10 +709,10 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
708 709
709 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, 710 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
710 &entry->flags))) { 711 &entry->flags))) {
711 ERROR(queue->rt2x00dev, 712 rt2x00_err(queue->rt2x00dev,
712 "Arrived at non-free entry in the non-full queue %d.\n" 713 "Arrived at non-free entry in the non-full queue %d\n"
713 "Please file bug report to %s.\n", 714 "Please file bug report to %s\n",
714 queue->qid, DRV_PROJECT); 715 queue->qid, DRV_PROJECT);
715 ret = -EINVAL; 716 ret = -EINVAL;
716 goto out; 717 goto out;
717 } 718 }
@@ -832,7 +833,9 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
832bool rt2x00queue_for_each_entry(struct data_queue *queue, 833bool rt2x00queue_for_each_entry(struct data_queue *queue,
833 enum queue_index start, 834 enum queue_index start,
834 enum queue_index end, 835 enum queue_index end,
835 bool (*fn)(struct queue_entry *entry)) 836 void *data,
837 bool (*fn)(struct queue_entry *entry,
838 void *data))
836{ 839{
837 unsigned long irqflags; 840 unsigned long irqflags;
838 unsigned int index_start; 841 unsigned int index_start;
@@ -840,9 +843,9 @@ bool rt2x00queue_for_each_entry(struct data_queue *queue,
840 unsigned int i; 843 unsigned int i;
841 844
842 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) { 845 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
843 ERROR(queue->rt2x00dev, 846 rt2x00_err(queue->rt2x00dev,
844 "Entry requested from invalid index range (%d - %d)\n", 847 "Entry requested from invalid index range (%d - %d)\n",
845 start, end); 848 start, end);
846 return true; 849 return true;
847 } 850 }
848 851
@@ -863,17 +866,17 @@ bool rt2x00queue_for_each_entry(struct data_queue *queue,
863 */ 866 */
864 if (index_start < index_end) { 867 if (index_start < index_end) {
865 for (i = index_start; i < index_end; i++) { 868 for (i = index_start; i < index_end; i++) {
866 if (fn(&queue->entries[i])) 869 if (fn(&queue->entries[i], data))
867 return true; 870 return true;
868 } 871 }
869 } else { 872 } else {
870 for (i = index_start; i < queue->limit; i++) { 873 for (i = index_start; i < queue->limit; i++) {
871 if (fn(&queue->entries[i])) 874 if (fn(&queue->entries[i], data))
872 return true; 875 return true;
873 } 876 }
874 877
875 for (i = 0; i < index_end; i++) { 878 for (i = 0; i < index_end; i++) {
876 if (fn(&queue->entries[i])) 879 if (fn(&queue->entries[i], data))
877 return true; 880 return true;
878 } 881 }
879 } 882 }
@@ -889,8 +892,8 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
889 unsigned long irqflags; 892 unsigned long irqflags;
890 893
891 if (unlikely(index >= Q_INDEX_MAX)) { 894 if (unlikely(index >= Q_INDEX_MAX)) {
892 ERROR(queue->rt2x00dev, 895 rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n",
893 "Entry requested from invalid index type (%d)\n", index); 896 index);
894 return NULL; 897 return NULL;
895 } 898 }
896 899
@@ -910,8 +913,8 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
910 unsigned long irqflags; 913 unsigned long irqflags;
911 914
912 if (unlikely(index >= Q_INDEX_MAX)) { 915 if (unlikely(index >= Q_INDEX_MAX)) {
913 ERROR(queue->rt2x00dev, 916 rt2x00_err(queue->rt2x00dev,
914 "Index change on invalid index type (%d)\n", index); 917 "Index change on invalid index type (%d)\n", index);
915 return; 918 return;
916 } 919 }
917 920
@@ -1071,7 +1074,8 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
1071 * The queue flush has failed... 1074 * The queue flush has failed...
1072 */ 1075 */
1073 if (unlikely(!rt2x00queue_empty(queue))) 1076 if (unlikely(!rt2x00queue_empty(queue)))
1074 WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid); 1077 rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
1078 queue->qid);
1075 1079
1076 /* 1080 /*
1077 * Restore the queue to the previous status 1081 * Restore the queue to the previous status
@@ -1170,6 +1174,7 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
1170 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10); 1174 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
1171 queue->data_size = qdesc->data_size; 1175 queue->data_size = qdesc->data_size;
1172 queue->desc_size = qdesc->desc_size; 1176 queue->desc_size = qdesc->desc_size;
1177 queue->winfo_size = qdesc->winfo_size;
1173 1178
1174 /* 1179 /*
1175 * Allocate all queue entries. 1180 * Allocate all queue entries.
@@ -1260,7 +1265,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1260 return 0; 1265 return 0;
1261 1266
1262exit: 1267exit:
1263 ERROR(rt2x00dev, "Queue entries allocation failed.\n"); 1268 rt2x00_err(rt2x00dev, "Queue entries allocation failed\n");
1264 1269
1265 rt2x00queue_uninitialize(rt2x00dev); 1270 rt2x00queue_uninitialize(rt2x00dev);
1266 1271
@@ -1312,7 +1317,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1312 1317
1313 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); 1318 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1314 if (!queue) { 1319 if (!queue) {
1315 ERROR(rt2x00dev, "Queue allocation failed.\n"); 1320 rt2x00_err(rt2x00dev, "Queue allocation failed\n");
1316 return -ENOMEM; 1321 return -ENOMEM;
1317 } 1322 }
1318 1323
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 9b8c10a86dee..4a7b34e9261b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -359,6 +359,7 @@ enum queue_entry_flags {
359 ENTRY_DATA_PENDING, 359 ENTRY_DATA_PENDING,
360 ENTRY_DATA_IO_FAILED, 360 ENTRY_DATA_IO_FAILED,
361 ENTRY_DATA_STATUS_PENDING, 361 ENTRY_DATA_STATUS_PENDING,
362 ENTRY_DATA_STATUS_SET,
362}; 363};
363 364
364/** 365/**
@@ -372,6 +373,7 @@ enum queue_entry_flags {
372 * @entry_idx: The entry index number. 373 * @entry_idx: The entry index number.
373 * @priv_data: Private data belonging to this queue entry. The pointer 374 * @priv_data: Private data belonging to this queue entry. The pointer
374 * points to data specific to a particular driver and queue type. 375 * points to data specific to a particular driver and queue type.
376 * @status: Device specific status
375 */ 377 */
376struct queue_entry { 378struct queue_entry {
377 unsigned long flags; 379 unsigned long flags;
@@ -383,6 +385,8 @@ struct queue_entry {
383 385
384 unsigned int entry_idx; 386 unsigned int entry_idx;
385 387
388 u32 status;
389
386 void *priv_data; 390 void *priv_data;
387}; 391};
388 392
@@ -475,7 +479,8 @@ struct data_queue {
475 unsigned short cw_max; 479 unsigned short cw_max;
476 480
477 unsigned short data_size; 481 unsigned short data_size;
478 unsigned short desc_size; 482 unsigned char desc_size;
483 unsigned char winfo_size;
479 484
480 unsigned short usb_endpoint; 485 unsigned short usb_endpoint;
481 unsigned short usb_maxpacket; 486 unsigned short usb_maxpacket;
@@ -495,7 +500,8 @@ struct data_queue {
495struct data_queue_desc { 500struct data_queue_desc {
496 unsigned short entry_num; 501 unsigned short entry_num;
497 unsigned short data_size; 502 unsigned short data_size;
498 unsigned short desc_size; 503 unsigned char desc_size;
504 unsigned char winfo_size;
499 unsigned short priv_size; 505 unsigned short priv_size;
500}; 506};
501 507
@@ -584,6 +590,7 @@ struct data_queue_desc {
584 * @queue: Pointer to @data_queue 590 * @queue: Pointer to @data_queue
585 * @start: &enum queue_index Pointer to start index 591 * @start: &enum queue_index Pointer to start index
586 * @end: &enum queue_index Pointer to end index 592 * @end: &enum queue_index Pointer to end index
593 * @data: Data to pass to the callback function
587 * @fn: The function to call for each &struct queue_entry 594 * @fn: The function to call for each &struct queue_entry
588 * 595 *
589 * This will walk through all entries in the queue, in chronological 596 * This will walk through all entries in the queue, in chronological
@@ -596,7 +603,9 @@ struct data_queue_desc {
596bool rt2x00queue_for_each_entry(struct data_queue *queue, 603bool rt2x00queue_for_each_entry(struct data_queue *queue,
597 enum queue_index start, 604 enum queue_index start,
598 enum queue_index end, 605 enum queue_index end,
599 bool (*fn)(struct queue_entry *entry)); 606 void *data,
607 bool (*fn)(struct queue_entry *entry,
608 void *data));
600 609
601/** 610/**
602 * rt2x00queue_empty - Check if the queue is empty. 611 * rt2x00queue_empty - Check if the queue is empty.
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c
index 2aa5c38022f3..9271a5fce0a8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.c
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.c
@@ -68,7 +68,7 @@ static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev)
68 return 0; 68 return 0;
69 69
70exit: 70exit:
71 ERROR_PROBE("Failed to allocate registers.\n"); 71 rt2x00_probe_err("Failed to allocate registers\n");
72 rt2x00soc_free_reg(rt2x00dev); 72 rt2x00soc_free_reg(rt2x00dev);
73 73
74 return -ENOMEM; 74 return -ENOMEM;
@@ -82,7 +82,7 @@ int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops)
82 82
83 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); 83 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
84 if (!hw) { 84 if (!hw) {
85 ERROR_PROBE("Failed to allocate hardware.\n"); 85 rt2x00_probe_err("Failed to allocate hardware\n");
86 return -ENOMEM; 86 return -ENOMEM;
87 } 87 }
88 88
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 40ea80725a96..88289873c0cf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -70,9 +70,9 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
70 } 70 }
71 } 71 }
72 72
73 ERROR(rt2x00dev, 73 rt2x00_err(rt2x00dev,
74 "Vendor Request 0x%02x failed for offset 0x%04x with error %d.\n", 74 "Vendor Request 0x%02x failed for offset 0x%04x with error %d\n",
75 request, offset, status); 75 request, offset, status);
76 76
77 return status; 77 return status;
78} 78}
@@ -91,7 +91,7 @@ int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
91 * Check for Cache availability. 91 * Check for Cache availability.
92 */ 92 */
93 if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) { 93 if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) {
94 ERROR(rt2x00dev, "CSR cache not available.\n"); 94 rt2x00_err(rt2x00dev, "CSR cache not available\n");
95 return -ENOMEM; 95 return -ENOMEM;
96 } 96 }
97 97
@@ -157,8 +157,8 @@ int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
157 udelay(REGISTER_BUSY_DELAY); 157 udelay(REGISTER_BUSY_DELAY);
158 } 158 }
159 159
160 ERROR(rt2x00dev, "Indirect register access failed: " 160 rt2x00_err(rt2x00dev, "Indirect register access failed: offset=0x%.08x, value=0x%.08x\n",
161 "offset=0x%.08x, value=0x%.08x\n", offset, *reg); 161 offset, *reg);
162 *reg = ~0; 162 *reg = ~0;
163 163
164 return 0; 164 return 0;
@@ -285,7 +285,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
285 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); 285 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
286} 286}
287 287
288static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry) 288static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
289{ 289{
290 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 290 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
291 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 291 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
@@ -307,7 +307,7 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry)
307 status = skb_padto(entry->skb, length); 307 status = skb_padto(entry->skb, length);
308 if (unlikely(status)) { 308 if (unlikely(status)) {
309 /* TODO: report something more appropriate than IO_FAILED. */ 309 /* TODO: report something more appropriate than IO_FAILED. */
310 WARNING(rt2x00dev, "TX SKB padding error, out of memory\n"); 310 rt2x00_warn(rt2x00dev, "TX SKB padding error, out of memory\n");
311 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 311 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
312 rt2x00lib_dmadone(entry); 312 rt2x00lib_dmadone(entry);
313 313
@@ -390,7 +390,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
390 queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work); 390 queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
391} 391}
392 392
393static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry) 393static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
394{ 394{
395 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 395 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
396 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 396 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
@@ -427,12 +427,18 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
427 case QID_AC_BE: 427 case QID_AC_BE:
428 case QID_AC_BK: 428 case QID_AC_BK:
429 if (!rt2x00queue_empty(queue)) 429 if (!rt2x00queue_empty(queue))
430 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, 430 rt2x00queue_for_each_entry(queue,
431 Q_INDEX_DONE,
432 Q_INDEX,
433 NULL,
431 rt2x00usb_kick_tx_entry); 434 rt2x00usb_kick_tx_entry);
432 break; 435 break;
433 case QID_RX: 436 case QID_RX:
434 if (!rt2x00queue_full(queue)) 437 if (!rt2x00queue_full(queue))
435 rt2x00queue_for_each_entry(queue, Q_INDEX, Q_INDEX_DONE, 438 rt2x00queue_for_each_entry(queue,
439 Q_INDEX,
440 Q_INDEX_DONE,
441 NULL,
436 rt2x00usb_kick_rx_entry); 442 rt2x00usb_kick_rx_entry);
437 break; 443 break;
438 default: 444 default:
@@ -441,7 +447,7 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
441} 447}
442EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue); 448EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
443 449
444static bool rt2x00usb_flush_entry(struct queue_entry *entry) 450static bool rt2x00usb_flush_entry(struct queue_entry *entry, void *data)
445{ 451{
446 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 452 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
447 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 453 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
@@ -468,7 +474,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
468 unsigned int i; 474 unsigned int i;
469 475
470 if (drop) 476 if (drop)
471 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, 477 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL,
472 rt2x00usb_flush_entry); 478 rt2x00usb_flush_entry);
473 479
474 /* 480 /*
@@ -514,8 +520,8 @@ EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue);
514 520
515static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue) 521static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
516{ 522{
517 WARNING(queue->rt2x00dev, "TX queue %d DMA timed out," 523 rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
518 " invoke forced forced reset\n", queue->qid); 524 queue->qid);
519 525
520 rt2x00queue_flush_queue(queue, true); 526 rt2x00queue_flush_queue(queue, true);
521} 527}
@@ -559,7 +565,7 @@ void rt2x00usb_clear_entry(struct queue_entry *entry)
559 entry->flags = 0; 565 entry->flags = 0;
560 566
561 if (entry->queue->qid == QID_RX) 567 if (entry->queue->qid == QID_RX)
562 rt2x00usb_kick_rx_entry(entry); 568 rt2x00usb_kick_rx_entry(entry, NULL);
563} 569}
564EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry); 570EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
565 571
@@ -616,7 +622,7 @@ static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev)
616 * At least 1 endpoint for RX and 1 endpoint for TX must be available. 622 * At least 1 endpoint for RX and 1 endpoint for TX must be available.
617 */ 623 */
618 if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) { 624 if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) {
619 ERROR(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n"); 625 rt2x00_err(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n");
620 return -EPIPE; 626 return -EPIPE;
621 } 627 }
622 628
@@ -769,7 +775,7 @@ static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev)
769 return 0; 775 return 0;
770 776
771exit: 777exit:
772 ERROR_PROBE("Failed to allocate registers.\n"); 778 rt2x00_probe_err("Failed to allocate registers\n");
773 779
774 rt2x00usb_free_reg(rt2x00dev); 780 rt2x00usb_free_reg(rt2x00dev);
775 781
@@ -789,7 +795,7 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
789 795
790 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); 796 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
791 if (!hw) { 797 if (!hw) {
792 ERROR_PROBE("Failed to allocate hardware.\n"); 798 rt2x00_probe_err("Failed to allocate hardware\n");
793 retval = -ENOMEM; 799 retval = -ENOMEM;
794 goto exit_put_device; 800 goto exit_put_device;
795 } 801 }
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 9e3c8ff53e3f..0dc8180e251b 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -58,12 +58,12 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
58 * and we will print an error. 58 * and we will print an error.
59 */ 59 */
60#define WAIT_FOR_BBP(__dev, __reg) \ 60#define WAIT_FOR_BBP(__dev, __reg) \
61 rt2x00pci_regbusy_read((__dev), PHY_CSR3, PHY_CSR3_BUSY, (__reg)) 61 rt2x00mmio_regbusy_read((__dev), PHY_CSR3, PHY_CSR3_BUSY, (__reg))
62#define WAIT_FOR_RF(__dev, __reg) \ 62#define WAIT_FOR_RF(__dev, __reg) \
63 rt2x00pci_regbusy_read((__dev), PHY_CSR4, PHY_CSR4_BUSY, (__reg)) 63 rt2x00mmio_regbusy_read((__dev), PHY_CSR4, PHY_CSR4_BUSY, (__reg))
64#define WAIT_FOR_MCU(__dev, __reg) \ 64#define WAIT_FOR_MCU(__dev, __reg) \
65 rt2x00pci_regbusy_read((__dev), H2M_MAILBOX_CSR, \ 65 rt2x00mmio_regbusy_read((__dev), H2M_MAILBOX_CSR, \
66 H2M_MAILBOX_CSR_OWNER, (__reg)) 66 H2M_MAILBOX_CSR_OWNER, (__reg))
67 67
68static void rt61pci_bbp_write(struct rt2x00_dev *rt2x00dev, 68static void rt61pci_bbp_write(struct rt2x00_dev *rt2x00dev,
69 const unsigned int word, const u8 value) 69 const unsigned int word, const u8 value)
@@ -83,7 +83,7 @@ static void rt61pci_bbp_write(struct rt2x00_dev *rt2x00dev,
83 rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1); 83 rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1);
84 rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 0); 84 rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 0);
85 85
86 rt2x00pci_register_write(rt2x00dev, PHY_CSR3, reg); 86 rt2x00mmio_register_write(rt2x00dev, PHY_CSR3, reg);
87 } 87 }
88 88
89 mutex_unlock(&rt2x00dev->csr_mutex); 89 mutex_unlock(&rt2x00dev->csr_mutex);
@@ -110,7 +110,7 @@ static void rt61pci_bbp_read(struct rt2x00_dev *rt2x00dev,
110 rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1); 110 rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1);
111 rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 1); 111 rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 1);
112 112
113 rt2x00pci_register_write(rt2x00dev, PHY_CSR3, reg); 113 rt2x00mmio_register_write(rt2x00dev, PHY_CSR3, reg);
114 114
115 WAIT_FOR_BBP(rt2x00dev, &reg); 115 WAIT_FOR_BBP(rt2x00dev, &reg);
116 } 116 }
@@ -138,7 +138,7 @@ static void rt61pci_rf_write(struct rt2x00_dev *rt2x00dev,
138 rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0); 138 rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0);
139 rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1); 139 rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1);
140 140
141 rt2x00pci_register_write(rt2x00dev, PHY_CSR4, reg); 141 rt2x00mmio_register_write(rt2x00dev, PHY_CSR4, reg);
142 rt2x00_rf_write(rt2x00dev, word, value); 142 rt2x00_rf_write(rt2x00dev, word, value);
143 } 143 }
144 144
@@ -162,12 +162,12 @@ static void rt61pci_mcu_request(struct rt2x00_dev *rt2x00dev,
162 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_CMD_TOKEN, token); 162 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_CMD_TOKEN, token);
163 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG0, arg0); 163 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG0, arg0);
164 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG1, arg1); 164 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG1, arg1);
165 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, reg); 165 rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CSR, reg);
166 166
167 rt2x00pci_register_read(rt2x00dev, HOST_CMD_CSR, &reg); 167 rt2x00mmio_register_read(rt2x00dev, HOST_CMD_CSR, &reg);
168 rt2x00_set_field32(&reg, HOST_CMD_CSR_HOST_COMMAND, command); 168 rt2x00_set_field32(&reg, HOST_CMD_CSR_HOST_COMMAND, command);
169 rt2x00_set_field32(&reg, HOST_CMD_CSR_INTERRUPT_MCU, 1); 169 rt2x00_set_field32(&reg, HOST_CMD_CSR_INTERRUPT_MCU, 1);
170 rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, reg); 170 rt2x00mmio_register_write(rt2x00dev, HOST_CMD_CSR, reg);
171 } 171 }
172 172
173 mutex_unlock(&rt2x00dev->csr_mutex); 173 mutex_unlock(&rt2x00dev->csr_mutex);
@@ -179,7 +179,7 @@ static void rt61pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
179 struct rt2x00_dev *rt2x00dev = eeprom->data; 179 struct rt2x00_dev *rt2x00dev = eeprom->data;
180 u32 reg; 180 u32 reg;
181 181
182 rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, &reg); 182 rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR, &reg);
183 183
184 eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN); 184 eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN);
185 eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT); 185 eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT);
@@ -201,15 +201,15 @@ static void rt61pci_eepromregister_write(struct eeprom_93cx6 *eeprom)
201 rt2x00_set_field32(&reg, E2PROM_CSR_CHIP_SELECT, 201 rt2x00_set_field32(&reg, E2PROM_CSR_CHIP_SELECT,
202 !!eeprom->reg_chip_select); 202 !!eeprom->reg_chip_select);
203 203
204 rt2x00pci_register_write(rt2x00dev, E2PROM_CSR, reg); 204 rt2x00mmio_register_write(rt2x00dev, E2PROM_CSR, reg);
205} 205}
206 206
207#ifdef CONFIG_RT2X00_LIB_DEBUGFS 207#ifdef CONFIG_RT2X00_LIB_DEBUGFS
208static const struct rt2x00debug rt61pci_rt2x00debug = { 208static const struct rt2x00debug rt61pci_rt2x00debug = {
209 .owner = THIS_MODULE, 209 .owner = THIS_MODULE,
210 .csr = { 210 .csr = {
211 .read = rt2x00pci_register_read, 211 .read = rt2x00mmio_register_read,
212 .write = rt2x00pci_register_write, 212 .write = rt2x00mmio_register_write,
213 .flags = RT2X00DEBUGFS_OFFSET, 213 .flags = RT2X00DEBUGFS_OFFSET,
214 .word_base = CSR_REG_BASE, 214 .word_base = CSR_REG_BASE,
215 .word_size = sizeof(u32), 215 .word_size = sizeof(u32),
@@ -243,7 +243,7 @@ static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
243{ 243{
244 u32 reg; 244 u32 reg;
245 245
246 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg); 246 rt2x00mmio_register_read(rt2x00dev, MAC_CSR13, &reg);
247 return rt2x00_get_field32(reg, MAC_CSR13_VAL5); 247 return rt2x00_get_field32(reg, MAC_CSR13_VAL5);
248} 248}
249 249
@@ -294,10 +294,10 @@ static int rt61pci_blink_set(struct led_classdev *led_cdev,
294 container_of(led_cdev, struct rt2x00_led, led_dev); 294 container_of(led_cdev, struct rt2x00_led, led_dev);
295 u32 reg; 295 u32 reg;
296 296
297 rt2x00pci_register_read(led->rt2x00dev, MAC_CSR14, &reg); 297 rt2x00mmio_register_read(led->rt2x00dev, MAC_CSR14, &reg);
298 rt2x00_set_field32(&reg, MAC_CSR14_ON_PERIOD, *delay_on); 298 rt2x00_set_field32(&reg, MAC_CSR14_ON_PERIOD, *delay_on);
299 rt2x00_set_field32(&reg, MAC_CSR14_OFF_PERIOD, *delay_off); 299 rt2x00_set_field32(&reg, MAC_CSR14_OFF_PERIOD, *delay_off);
300 rt2x00pci_register_write(led->rt2x00dev, MAC_CSR14, reg); 300 rt2x00mmio_register_write(led->rt2x00dev, MAC_CSR14, reg);
301 301
302 return 0; 302 return 0;
303} 303}
@@ -339,7 +339,7 @@ static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev,
339 */ 339 */
340 mask = (0xf << crypto->bssidx); 340 mask = (0xf << crypto->bssidx);
341 341
342 rt2x00pci_register_read(rt2x00dev, SEC_CSR0, &reg); 342 rt2x00mmio_register_read(rt2x00dev, SEC_CSR0, &reg);
343 reg &= mask; 343 reg &= mask;
344 344
345 if (reg && reg == mask) 345 if (reg && reg == mask)
@@ -358,8 +358,8 @@ static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev,
358 sizeof(key_entry.rx_mic)); 358 sizeof(key_entry.rx_mic));
359 359
360 reg = SHARED_KEY_ENTRY(key->hw_key_idx); 360 reg = SHARED_KEY_ENTRY(key->hw_key_idx);
361 rt2x00pci_register_multiwrite(rt2x00dev, reg, 361 rt2x00mmio_register_multiwrite(rt2x00dev, reg,
362 &key_entry, sizeof(key_entry)); 362 &key_entry, sizeof(key_entry));
363 363
364 /* 364 /*
365 * The cipher types are stored over 2 registers. 365 * The cipher types are stored over 2 registers.
@@ -372,16 +372,16 @@ static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev,
372 field.bit_offset = (3 * key->hw_key_idx); 372 field.bit_offset = (3 * key->hw_key_idx);
373 field.bit_mask = 0x7 << field.bit_offset; 373 field.bit_mask = 0x7 << field.bit_offset;
374 374
375 rt2x00pci_register_read(rt2x00dev, SEC_CSR1, &reg); 375 rt2x00mmio_register_read(rt2x00dev, SEC_CSR1, &reg);
376 rt2x00_set_field32(&reg, field, crypto->cipher); 376 rt2x00_set_field32(&reg, field, crypto->cipher);
377 rt2x00pci_register_write(rt2x00dev, SEC_CSR1, reg); 377 rt2x00mmio_register_write(rt2x00dev, SEC_CSR1, reg);
378 } else { 378 } else {
379 field.bit_offset = (3 * (key->hw_key_idx - 8)); 379 field.bit_offset = (3 * (key->hw_key_idx - 8));
380 field.bit_mask = 0x7 << field.bit_offset; 380 field.bit_mask = 0x7 << field.bit_offset;
381 381
382 rt2x00pci_register_read(rt2x00dev, SEC_CSR5, &reg); 382 rt2x00mmio_register_read(rt2x00dev, SEC_CSR5, &reg);
383 rt2x00_set_field32(&reg, field, crypto->cipher); 383 rt2x00_set_field32(&reg, field, crypto->cipher);
384 rt2x00pci_register_write(rt2x00dev, SEC_CSR5, reg); 384 rt2x00mmio_register_write(rt2x00dev, SEC_CSR5, reg);
385 } 385 }
386 386
387 /* 387 /*
@@ -404,12 +404,12 @@ static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev,
404 */ 404 */
405 mask = 1 << key->hw_key_idx; 405 mask = 1 << key->hw_key_idx;
406 406
407 rt2x00pci_register_read(rt2x00dev, SEC_CSR0, &reg); 407 rt2x00mmio_register_read(rt2x00dev, SEC_CSR0, &reg);
408 if (crypto->cmd == SET_KEY) 408 if (crypto->cmd == SET_KEY)
409 reg |= mask; 409 reg |= mask;
410 else if (crypto->cmd == DISABLE_KEY) 410 else if (crypto->cmd == DISABLE_KEY)
411 reg &= ~mask; 411 reg &= ~mask;
412 rt2x00pci_register_write(rt2x00dev, SEC_CSR0, reg); 412 rt2x00mmio_register_write(rt2x00dev, SEC_CSR0, reg);
413 413
414 return 0; 414 return 0;
415} 415}
@@ -433,10 +433,10 @@ static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
433 * When both registers are full, we drop the key. 433 * When both registers are full, we drop the key.
434 * Otherwise, we use the first invalid entry. 434 * Otherwise, we use the first invalid entry.
435 */ 435 */
436 rt2x00pci_register_read(rt2x00dev, SEC_CSR2, &reg); 436 rt2x00mmio_register_read(rt2x00dev, SEC_CSR2, &reg);
437 if (reg && reg == ~0) { 437 if (reg && reg == ~0) {
438 key->hw_key_idx = 32; 438 key->hw_key_idx = 32;
439 rt2x00pci_register_read(rt2x00dev, SEC_CSR3, &reg); 439 rt2x00mmio_register_read(rt2x00dev, SEC_CSR3, &reg);
440 if (reg && reg == ~0) 440 if (reg && reg == ~0)
441 return -ENOSPC; 441 return -ENOSPC;
442 } 442 }
@@ -458,21 +458,21 @@ static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
458 addr_entry.cipher = crypto->cipher; 458 addr_entry.cipher = crypto->cipher;
459 459
460 reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx); 460 reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
461 rt2x00pci_register_multiwrite(rt2x00dev, reg, 461 rt2x00mmio_register_multiwrite(rt2x00dev, reg,
462 &key_entry, sizeof(key_entry)); 462 &key_entry, sizeof(key_entry));
463 463
464 reg = PAIRWISE_TA_ENTRY(key->hw_key_idx); 464 reg = PAIRWISE_TA_ENTRY(key->hw_key_idx);
465 rt2x00pci_register_multiwrite(rt2x00dev, reg, 465 rt2x00mmio_register_multiwrite(rt2x00dev, reg,
466 &addr_entry, sizeof(addr_entry)); 466 &addr_entry, sizeof(addr_entry));
467 467
468 /* 468 /*
469 * Enable pairwise lookup table for given BSS idx. 469 * Enable pairwise lookup table for given BSS idx.
470 * Without this, received frames will not be decrypted 470 * Without this, received frames will not be decrypted
471 * by the hardware. 471 * by the hardware.
472 */ 472 */
473 rt2x00pci_register_read(rt2x00dev, SEC_CSR4, &reg); 473 rt2x00mmio_register_read(rt2x00dev, SEC_CSR4, &reg);
474 reg |= (1 << crypto->bssidx); 474 reg |= (1 << crypto->bssidx);
475 rt2x00pci_register_write(rt2x00dev, SEC_CSR4, reg); 475 rt2x00mmio_register_write(rt2x00dev, SEC_CSR4, reg);
476 476
477 /* 477 /*
478 * The driver does not support the IV/EIV generation 478 * The driver does not support the IV/EIV generation
@@ -495,21 +495,21 @@ static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
495 if (key->hw_key_idx < 32) { 495 if (key->hw_key_idx < 32) {
496 mask = 1 << key->hw_key_idx; 496 mask = 1 << key->hw_key_idx;
497 497
498 rt2x00pci_register_read(rt2x00dev, SEC_CSR2, &reg); 498 rt2x00mmio_register_read(rt2x00dev, SEC_CSR2, &reg);
499 if (crypto->cmd == SET_KEY) 499 if (crypto->cmd == SET_KEY)
500 reg |= mask; 500 reg |= mask;
501 else if (crypto->cmd == DISABLE_KEY) 501 else if (crypto->cmd == DISABLE_KEY)
502 reg &= ~mask; 502 reg &= ~mask;
503 rt2x00pci_register_write(rt2x00dev, SEC_CSR2, reg); 503 rt2x00mmio_register_write(rt2x00dev, SEC_CSR2, reg);
504 } else { 504 } else {
505 mask = 1 << (key->hw_key_idx - 32); 505 mask = 1 << (key->hw_key_idx - 32);
506 506
507 rt2x00pci_register_read(rt2x00dev, SEC_CSR3, &reg); 507 rt2x00mmio_register_read(rt2x00dev, SEC_CSR3, &reg);
508 if (crypto->cmd == SET_KEY) 508 if (crypto->cmd == SET_KEY)
509 reg |= mask; 509 reg |= mask;
510 else if (crypto->cmd == DISABLE_KEY) 510 else if (crypto->cmd == DISABLE_KEY)
511 reg &= ~mask; 511 reg &= ~mask;
512 rt2x00pci_register_write(rt2x00dev, SEC_CSR3, reg); 512 rt2x00mmio_register_write(rt2x00dev, SEC_CSR3, reg);
513 } 513 }
514 514
515 return 0; 515 return 0;
@@ -526,7 +526,7 @@ static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev,
526 * and broadcast frames will always be accepted since 526 * and broadcast frames will always be accepted since
527 * there is no filter for it at this time. 527 * there is no filter for it at this time.
528 */ 528 */
529 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg); 529 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0, &reg);
530 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CRC, 530 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CRC,
531 !(filter_flags & FIF_FCSFAIL)); 531 !(filter_flags & FIF_FCSFAIL));
532 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_PHYSICAL, 532 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_PHYSICAL,
@@ -544,7 +544,7 @@ static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev,
544 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_BROADCAST, 0); 544 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_BROADCAST, 0);
545 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_ACK_CTS, 545 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_ACK_CTS,
546 !(filter_flags & FIF_CONTROL)); 546 !(filter_flags & FIF_CONTROL));
547 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); 547 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR0, reg);
548} 548}
549 549
550static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev, 550static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
@@ -558,9 +558,9 @@ static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
558 /* 558 /*
559 * Enable synchronisation. 559 * Enable synchronisation.
560 */ 560 */
561 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 561 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &reg);
562 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync); 562 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
563 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 563 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
564 } 564 }
565 565
566 if (flags & CONFIG_UPDATE_MAC) { 566 if (flags & CONFIG_UPDATE_MAC) {
@@ -568,8 +568,8 @@ static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
568 rt2x00_set_field32(&reg, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff); 568 rt2x00_set_field32(&reg, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff);
569 conf->mac[1] = cpu_to_le32(reg); 569 conf->mac[1] = cpu_to_le32(reg);
570 570
571 rt2x00pci_register_multiwrite(rt2x00dev, MAC_CSR2, 571 rt2x00mmio_register_multiwrite(rt2x00dev, MAC_CSR2,
572 conf->mac, sizeof(conf->mac)); 572 conf->mac, sizeof(conf->mac));
573 } 573 }
574 574
575 if (flags & CONFIG_UPDATE_BSSID) { 575 if (flags & CONFIG_UPDATE_BSSID) {
@@ -577,8 +577,9 @@ static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
577 rt2x00_set_field32(&reg, MAC_CSR5_BSS_ID_MASK, 3); 577 rt2x00_set_field32(&reg, MAC_CSR5_BSS_ID_MASK, 3);
578 conf->bssid[1] = cpu_to_le32(reg); 578 conf->bssid[1] = cpu_to_le32(reg);
579 579
580 rt2x00pci_register_multiwrite(rt2x00dev, MAC_CSR4, 580 rt2x00mmio_register_multiwrite(rt2x00dev, MAC_CSR4,
581 conf->bssid, sizeof(conf->bssid)); 581 conf->bssid,
582 sizeof(conf->bssid));
582 } 583 }
583} 584}
584 585
@@ -588,40 +589,40 @@ static void rt61pci_config_erp(struct rt2x00_dev *rt2x00dev,
588{ 589{
589 u32 reg; 590 u32 reg;
590 591
591 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg); 592 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0, &reg);
592 rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, 0x32); 593 rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, 0x32);
593 rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); 594 rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
594 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); 595 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR0, reg);
595 596
596 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 597 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
597 rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg); 598 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR4, &reg);
598 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1); 599 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1);
599 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE, 600 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE,
600 !!erp->short_preamble); 601 !!erp->short_preamble);
601 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); 602 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR4, reg);
602 } 603 }
603 604
604 if (changed & BSS_CHANGED_BASIC_RATES) 605 if (changed & BSS_CHANGED_BASIC_RATES)
605 rt2x00pci_register_write(rt2x00dev, TXRX_CSR5, 606 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR5,
606 erp->basic_rates); 607 erp->basic_rates);
607 608
608 if (changed & BSS_CHANGED_BEACON_INT) { 609 if (changed & BSS_CHANGED_BEACON_INT) {
609 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 610 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &reg);
610 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, 611 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL,
611 erp->beacon_int * 16); 612 erp->beacon_int * 16);
612 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 613 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
613 } 614 }
614 615
615 if (changed & BSS_CHANGED_ERP_SLOT) { 616 if (changed & BSS_CHANGED_ERP_SLOT) {
616 rt2x00pci_register_read(rt2x00dev, MAC_CSR9, &reg); 617 rt2x00mmio_register_read(rt2x00dev, MAC_CSR9, &reg);
617 rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time); 618 rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time);
618 rt2x00pci_register_write(rt2x00dev, MAC_CSR9, reg); 619 rt2x00mmio_register_write(rt2x00dev, MAC_CSR9, reg);
619 620
620 rt2x00pci_register_read(rt2x00dev, MAC_CSR8, &reg); 621 rt2x00mmio_register_read(rt2x00dev, MAC_CSR8, &reg);
621 rt2x00_set_field32(&reg, MAC_CSR8_SIFS, erp->sifs); 622 rt2x00_set_field32(&reg, MAC_CSR8_SIFS, erp->sifs);
622 rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); 623 rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3);
623 rt2x00_set_field32(&reg, MAC_CSR8_EIFS, erp->eifs); 624 rt2x00_set_field32(&reg, MAC_CSR8_EIFS, erp->eifs);
624 rt2x00pci_register_write(rt2x00dev, MAC_CSR8, reg); 625 rt2x00mmio_register_write(rt2x00dev, MAC_CSR8, reg);
625 } 626 }
626} 627}
627 628
@@ -714,7 +715,7 @@ static void rt61pci_config_antenna_2529_rx(struct rt2x00_dev *rt2x00dev,
714{ 715{
715 u32 reg; 716 u32 reg;
716 717
717 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg); 718 rt2x00mmio_register_read(rt2x00dev, MAC_CSR13, &reg);
718 719
719 rt2x00_set_field32(&reg, MAC_CSR13_DIR4, 0); 720 rt2x00_set_field32(&reg, MAC_CSR13_DIR4, 0);
720 rt2x00_set_field32(&reg, MAC_CSR13_VAL4, p1); 721 rt2x00_set_field32(&reg, MAC_CSR13_VAL4, p1);
@@ -722,7 +723,7 @@ static void rt61pci_config_antenna_2529_rx(struct rt2x00_dev *rt2x00dev,
722 rt2x00_set_field32(&reg, MAC_CSR13_DIR3, 0); 723 rt2x00_set_field32(&reg, MAC_CSR13_DIR3, 0);
723 rt2x00_set_field32(&reg, MAC_CSR13_VAL3, !p2); 724 rt2x00_set_field32(&reg, MAC_CSR13_VAL3, !p2);
724 725
725 rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg); 726 rt2x00mmio_register_write(rt2x00dev, MAC_CSR13, reg);
726} 727}
727 728
728static void rt61pci_config_antenna_2529(struct rt2x00_dev *rt2x00dev, 729static void rt61pci_config_antenna_2529(struct rt2x00_dev *rt2x00dev,
@@ -821,14 +822,14 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
821 for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++) 822 for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
822 rt61pci_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]); 823 rt61pci_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]);
823 824
824 rt2x00pci_register_read(rt2x00dev, PHY_CSR0, &reg); 825 rt2x00mmio_register_read(rt2x00dev, PHY_CSR0, &reg);
825 826
826 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG, 827 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG,
827 rt2x00dev->curr_band == IEEE80211_BAND_2GHZ); 828 rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
828 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A, 829 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A,
829 rt2x00dev->curr_band == IEEE80211_BAND_5GHZ); 830 rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
830 831
831 rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg); 832 rt2x00mmio_register_write(rt2x00dev, PHY_CSR0, reg);
832 833
833 if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325)) 834 if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325))
834 rt61pci_config_antenna_5x(rt2x00dev, ant); 835 rt61pci_config_antenna_5x(rt2x00dev, ant);
@@ -848,7 +849,7 @@ static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
848 u16 eeprom; 849 u16 eeprom;
849 short lna_gain = 0; 850 short lna_gain = 0;
850 851
851 if (libconf->conf->channel->band == IEEE80211_BAND_2GHZ) { 852 if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
852 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) 853 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
853 lna_gain += 14; 854 lna_gain += 14;
854 855
@@ -928,7 +929,7 @@ static void rt61pci_config_retry_limit(struct rt2x00_dev *rt2x00dev,
928{ 929{
929 u32 reg; 930 u32 reg;
930 931
931 rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg); 932 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR4, &reg);
932 rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1); 933 rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1);
933 rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_STEP, 0); 934 rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_STEP, 0);
934 rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0); 935 rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0);
@@ -936,7 +937,7 @@ static void rt61pci_config_retry_limit(struct rt2x00_dev *rt2x00dev,
936 libconf->conf->long_frame_max_tx_count); 937 libconf->conf->long_frame_max_tx_count);
937 rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT, 938 rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT,
938 libconf->conf->short_frame_max_tx_count); 939 libconf->conf->short_frame_max_tx_count);
939 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); 940 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR4, reg);
940} 941}
941 942
942static void rt61pci_config_ps(struct rt2x00_dev *rt2x00dev, 943static void rt61pci_config_ps(struct rt2x00_dev *rt2x00dev,
@@ -948,7 +949,7 @@ static void rt61pci_config_ps(struct rt2x00_dev *rt2x00dev,
948 u32 reg; 949 u32 reg;
949 950
950 if (state == STATE_SLEEP) { 951 if (state == STATE_SLEEP) {
951 rt2x00pci_register_read(rt2x00dev, MAC_CSR11, &reg); 952 rt2x00mmio_register_read(rt2x00dev, MAC_CSR11, &reg);
952 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 953 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN,
953 rt2x00dev->beacon_int - 10); 954 rt2x00dev->beacon_int - 10);
954 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 955 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP,
@@ -957,27 +958,29 @@ static void rt61pci_config_ps(struct rt2x00_dev *rt2x00dev,
957 958
958 /* We must first disable autowake before it can be enabled */ 959 /* We must first disable autowake before it can be enabled */
959 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0); 960 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0);
960 rt2x00pci_register_write(rt2x00dev, MAC_CSR11, reg); 961 rt2x00mmio_register_write(rt2x00dev, MAC_CSR11, reg);
961 962
962 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 1); 963 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 1);
963 rt2x00pci_register_write(rt2x00dev, MAC_CSR11, reg); 964 rt2x00mmio_register_write(rt2x00dev, MAC_CSR11, reg);
964 965
965 rt2x00pci_register_write(rt2x00dev, SOFT_RESET_CSR, 0x00000005); 966 rt2x00mmio_register_write(rt2x00dev, SOFT_RESET_CSR,
966 rt2x00pci_register_write(rt2x00dev, IO_CNTL_CSR, 0x0000001c); 967 0x00000005);
967 rt2x00pci_register_write(rt2x00dev, PCI_USEC_CSR, 0x00000060); 968 rt2x00mmio_register_write(rt2x00dev, IO_CNTL_CSR, 0x0000001c);
969 rt2x00mmio_register_write(rt2x00dev, PCI_USEC_CSR, 0x00000060);
968 970
969 rt61pci_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 0); 971 rt61pci_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 0);
970 } else { 972 } else {
971 rt2x00pci_register_read(rt2x00dev, MAC_CSR11, &reg); 973 rt2x00mmio_register_read(rt2x00dev, MAC_CSR11, &reg);
972 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 0); 974 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 0);
973 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0); 975 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0);
974 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0); 976 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0);
975 rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 0); 977 rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 0);
976 rt2x00pci_register_write(rt2x00dev, MAC_CSR11, reg); 978 rt2x00mmio_register_write(rt2x00dev, MAC_CSR11, reg);
977 979
978 rt2x00pci_register_write(rt2x00dev, SOFT_RESET_CSR, 0x00000007); 980 rt2x00mmio_register_write(rt2x00dev, SOFT_RESET_CSR,
979 rt2x00pci_register_write(rt2x00dev, IO_CNTL_CSR, 0x00000018); 981 0x00000007);
980 rt2x00pci_register_write(rt2x00dev, PCI_USEC_CSR, 0x00000020); 982 rt2x00mmio_register_write(rt2x00dev, IO_CNTL_CSR, 0x00000018);
983 rt2x00mmio_register_write(rt2x00dev, PCI_USEC_CSR, 0x00000020);
981 984
982 rt61pci_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0); 985 rt61pci_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0);
983 } 986 }
@@ -1013,13 +1016,13 @@ static void rt61pci_link_stats(struct rt2x00_dev *rt2x00dev,
1013 /* 1016 /*
1014 * Update FCS error count from register. 1017 * Update FCS error count from register.
1015 */ 1018 */
1016 rt2x00pci_register_read(rt2x00dev, STA_CSR0, &reg); 1019 rt2x00mmio_register_read(rt2x00dev, STA_CSR0, &reg);
1017 qual->rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR); 1020 qual->rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR);
1018 1021
1019 /* 1022 /*
1020 * Update False CCA count from register. 1023 * Update False CCA count from register.
1021 */ 1024 */
1022 rt2x00pci_register_read(rt2x00dev, STA_CSR1, &reg); 1025 rt2x00mmio_register_read(rt2x00dev, STA_CSR1, &reg);
1023 qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR); 1026 qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR);
1024} 1027}
1025 1028
@@ -1138,16 +1141,16 @@ static void rt61pci_start_queue(struct data_queue *queue)
1138 1141
1139 switch (queue->qid) { 1142 switch (queue->qid) {
1140 case QID_RX: 1143 case QID_RX:
1141 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg); 1144 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0, &reg);
1142 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0); 1145 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0);
1143 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); 1146 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR0, reg);
1144 break; 1147 break;
1145 case QID_BEACON: 1148 case QID_BEACON:
1146 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 1149 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &reg);
1147 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1); 1150 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
1148 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1); 1151 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
1149 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); 1152 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
1150 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 1153 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
1151 break; 1154 break;
1152 default: 1155 default:
1153 break; 1156 break;
@@ -1161,24 +1164,24 @@ static void rt61pci_kick_queue(struct data_queue *queue)
1161 1164
1162 switch (queue->qid) { 1165 switch (queue->qid) {
1163 case QID_AC_VO: 1166 case QID_AC_VO:
1164 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1167 rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1165 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, 1); 1168 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, 1);
1166 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1169 rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1167 break; 1170 break;
1168 case QID_AC_VI: 1171 case QID_AC_VI:
1169 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1172 rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1170 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, 1); 1173 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, 1);
1171 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1174 rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1172 break; 1175 break;
1173 case QID_AC_BE: 1176 case QID_AC_BE:
1174 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1177 rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1175 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2, 1); 1178 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2, 1);
1176 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1179 rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1177 break; 1180 break;
1178 case QID_AC_BK: 1181 case QID_AC_BK:
1179 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1182 rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1180 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, 1); 1183 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, 1);
1181 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1184 rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1182 break; 1185 break;
1183 default: 1186 default:
1184 break; 1187 break;
@@ -1192,36 +1195,36 @@ static void rt61pci_stop_queue(struct data_queue *queue)
1192 1195
1193 switch (queue->qid) { 1196 switch (queue->qid) {
1194 case QID_AC_VO: 1197 case QID_AC_VO:
1195 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1198 rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1196 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC0, 1); 1199 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC0, 1);
1197 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1200 rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1198 break; 1201 break;
1199 case QID_AC_VI: 1202 case QID_AC_VI:
1200 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1203 rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1201 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, 1); 1204 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, 1);
1202 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1205 rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1203 break; 1206 break;
1204 case QID_AC_BE: 1207 case QID_AC_BE:
1205 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1208 rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1206 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, 1); 1209 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, 1);
1207 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1210 rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1208 break; 1211 break;
1209 case QID_AC_BK: 1212 case QID_AC_BK:
1210 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1213 rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1211 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, 1); 1214 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, 1);
1212 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1215 rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1213 break; 1216 break;
1214 case QID_RX: 1217 case QID_RX:
1215 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg); 1218 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0, &reg);
1216 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 1); 1219 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 1);
1217 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); 1220 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR0, reg);
1218 break; 1221 break;
1219 case QID_BEACON: 1222 case QID_BEACON:
1220 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 1223 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &reg);
1221 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0); 1224 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0);
1222 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0); 1225 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
1223 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 1226 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1224 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 1227 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
1225 1228
1226 /* 1229 /*
1227 * Wait for possibly running tbtt tasklets. 1230 * Wait for possibly running tbtt tasklets.
@@ -1299,14 +1302,14 @@ static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev,
1299 * Wait for stable hardware. 1302 * Wait for stable hardware.
1300 */ 1303 */
1301 for (i = 0; i < 100; i++) { 1304 for (i = 0; i < 100; i++) {
1302 rt2x00pci_register_read(rt2x00dev, MAC_CSR0, &reg); 1305 rt2x00mmio_register_read(rt2x00dev, MAC_CSR0, &reg);
1303 if (reg) 1306 if (reg)
1304 break; 1307 break;
1305 msleep(1); 1308 msleep(1);
1306 } 1309 }
1307 1310
1308 if (!reg) { 1311 if (!reg) {
1309 ERROR(rt2x00dev, "Unstable hardware.\n"); 1312 rt2x00_err(rt2x00dev, "Unstable hardware\n");
1310 return -EBUSY; 1313 return -EBUSY;
1311 } 1314 }
1312 1315
@@ -1315,10 +1318,10 @@ static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev,
1315 */ 1318 */
1316 reg = 0; 1319 reg = 0;
1317 rt2x00_set_field32(&reg, MCU_CNTL_CSR_RESET, 1); 1320 rt2x00_set_field32(&reg, MCU_CNTL_CSR_RESET, 1);
1318 rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); 1321 rt2x00mmio_register_write(rt2x00dev, MCU_CNTL_CSR, reg);
1319 rt2x00pci_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff); 1322 rt2x00mmio_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff);
1320 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 1323 rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
1321 rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, 0); 1324 rt2x00mmio_register_write(rt2x00dev, HOST_CMD_CSR, 0);
1322 1325
1323 /* 1326 /*
1324 * Write firmware to device. 1327 * Write firmware to device.
@@ -1326,26 +1329,26 @@ static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev,
1326 reg = 0; 1329 reg = 0;
1327 rt2x00_set_field32(&reg, MCU_CNTL_CSR_RESET, 1); 1330 rt2x00_set_field32(&reg, MCU_CNTL_CSR_RESET, 1);
1328 rt2x00_set_field32(&reg, MCU_CNTL_CSR_SELECT_BANK, 1); 1331 rt2x00_set_field32(&reg, MCU_CNTL_CSR_SELECT_BANK, 1);
1329 rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); 1332 rt2x00mmio_register_write(rt2x00dev, MCU_CNTL_CSR, reg);
1330 1333
1331 rt2x00pci_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, 1334 rt2x00mmio_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
1332 data, len); 1335 data, len);
1333 1336
1334 rt2x00_set_field32(&reg, MCU_CNTL_CSR_SELECT_BANK, 0); 1337 rt2x00_set_field32(&reg, MCU_CNTL_CSR_SELECT_BANK, 0);
1335 rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); 1338 rt2x00mmio_register_write(rt2x00dev, MCU_CNTL_CSR, reg);
1336 1339
1337 rt2x00_set_field32(&reg, MCU_CNTL_CSR_RESET, 0); 1340 rt2x00_set_field32(&reg, MCU_CNTL_CSR_RESET, 0);
1338 rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); 1341 rt2x00mmio_register_write(rt2x00dev, MCU_CNTL_CSR, reg);
1339 1342
1340 for (i = 0; i < 100; i++) { 1343 for (i = 0; i < 100; i++) {
1341 rt2x00pci_register_read(rt2x00dev, MCU_CNTL_CSR, &reg); 1344 rt2x00mmio_register_read(rt2x00dev, MCU_CNTL_CSR, &reg);
1342 if (rt2x00_get_field32(reg, MCU_CNTL_CSR_READY)) 1345 if (rt2x00_get_field32(reg, MCU_CNTL_CSR_READY))
1343 break; 1346 break;
1344 msleep(1); 1347 msleep(1);
1345 } 1348 }
1346 1349
1347 if (i == 100) { 1350 if (i == 100) {
1348 ERROR(rt2x00dev, "MCU Control register not ready.\n"); 1351 rt2x00_err(rt2x00dev, "MCU Control register not ready\n");
1349 return -EBUSY; 1352 return -EBUSY;
1350 } 1353 }
1351 1354
@@ -1360,16 +1363,16 @@ static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev,
1360 reg = 0; 1363 reg = 0;
1361 rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 1); 1364 rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 1);
1362 rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 1); 1365 rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 1);
1363 rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); 1366 rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg);
1364 1367
1365 rt2x00pci_register_read(rt2x00dev, MAC_CSR1, &reg); 1368 rt2x00mmio_register_read(rt2x00dev, MAC_CSR1, &reg);
1366 rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 0); 1369 rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 0);
1367 rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 0); 1370 rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 0);
1368 rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); 1371 rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg);
1369 1372
1370 rt2x00pci_register_read(rt2x00dev, MAC_CSR1, &reg); 1373 rt2x00mmio_register_read(rt2x00dev, MAC_CSR1, &reg);
1371 rt2x00_set_field32(&reg, MAC_CSR1_HOST_READY, 1); 1374 rt2x00_set_field32(&reg, MAC_CSR1_HOST_READY, 1);
1372 rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); 1375 rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg);
1373 1376
1374 return 0; 1377 return 0;
1375} 1378}
@@ -1379,7 +1382,7 @@ static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev,
1379 */ 1382 */
1380static bool rt61pci_get_entry_state(struct queue_entry *entry) 1383static bool rt61pci_get_entry_state(struct queue_entry *entry)
1381{ 1384{
1382 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1385 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
1383 u32 word; 1386 u32 word;
1384 1387
1385 if (entry->queue->qid == QID_RX) { 1388 if (entry->queue->qid == QID_RX) {
@@ -1396,7 +1399,7 @@ static bool rt61pci_get_entry_state(struct queue_entry *entry)
1396 1399
1397static void rt61pci_clear_entry(struct queue_entry *entry) 1400static void rt61pci_clear_entry(struct queue_entry *entry)
1398{ 1401{
1399 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1402 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
1400 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1403 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1401 u32 word; 1404 u32 word;
1402 1405
@@ -1419,13 +1422,13 @@ static void rt61pci_clear_entry(struct queue_entry *entry)
1419 1422
1420static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev) 1423static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev)
1421{ 1424{
1422 struct queue_entry_priv_pci *entry_priv; 1425 struct queue_entry_priv_mmio *entry_priv;
1423 u32 reg; 1426 u32 reg;
1424 1427
1425 /* 1428 /*
1426 * Initialize registers. 1429 * Initialize registers.
1427 */ 1430 */
1428 rt2x00pci_register_read(rt2x00dev, TX_RING_CSR0, &reg); 1431 rt2x00mmio_register_read(rt2x00dev, TX_RING_CSR0, &reg);
1429 rt2x00_set_field32(&reg, TX_RING_CSR0_AC0_RING_SIZE, 1432 rt2x00_set_field32(&reg, TX_RING_CSR0_AC0_RING_SIZE,
1430 rt2x00dev->tx[0].limit); 1433 rt2x00dev->tx[0].limit);
1431 rt2x00_set_field32(&reg, TX_RING_CSR0_AC1_RING_SIZE, 1434 rt2x00_set_field32(&reg, TX_RING_CSR0_AC1_RING_SIZE,
@@ -1434,67 +1437,67 @@ static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev)
1434 rt2x00dev->tx[2].limit); 1437 rt2x00dev->tx[2].limit);
1435 rt2x00_set_field32(&reg, TX_RING_CSR0_AC3_RING_SIZE, 1438 rt2x00_set_field32(&reg, TX_RING_CSR0_AC3_RING_SIZE,
1436 rt2x00dev->tx[3].limit); 1439 rt2x00dev->tx[3].limit);
1437 rt2x00pci_register_write(rt2x00dev, TX_RING_CSR0, reg); 1440 rt2x00mmio_register_write(rt2x00dev, TX_RING_CSR0, reg);
1438 1441
1439 rt2x00pci_register_read(rt2x00dev, TX_RING_CSR1, &reg); 1442 rt2x00mmio_register_read(rt2x00dev, TX_RING_CSR1, &reg);
1440 rt2x00_set_field32(&reg, TX_RING_CSR1_TXD_SIZE, 1443 rt2x00_set_field32(&reg, TX_RING_CSR1_TXD_SIZE,
1441 rt2x00dev->tx[0].desc_size / 4); 1444 rt2x00dev->tx[0].desc_size / 4);
1442 rt2x00pci_register_write(rt2x00dev, TX_RING_CSR1, reg); 1445 rt2x00mmio_register_write(rt2x00dev, TX_RING_CSR1, reg);
1443 1446
1444 entry_priv = rt2x00dev->tx[0].entries[0].priv_data; 1447 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
1445 rt2x00pci_register_read(rt2x00dev, AC0_BASE_CSR, &reg); 1448 rt2x00mmio_register_read(rt2x00dev, AC0_BASE_CSR, &reg);
1446 rt2x00_set_field32(&reg, AC0_BASE_CSR_RING_REGISTER, 1449 rt2x00_set_field32(&reg, AC0_BASE_CSR_RING_REGISTER,
1447 entry_priv->desc_dma); 1450 entry_priv->desc_dma);
1448 rt2x00pci_register_write(rt2x00dev, AC0_BASE_CSR, reg); 1451 rt2x00mmio_register_write(rt2x00dev, AC0_BASE_CSR, reg);
1449 1452
1450 entry_priv = rt2x00dev->tx[1].entries[0].priv_data; 1453 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
1451 rt2x00pci_register_read(rt2x00dev, AC1_BASE_CSR, &reg); 1454 rt2x00mmio_register_read(rt2x00dev, AC1_BASE_CSR, &reg);
1452 rt2x00_set_field32(&reg, AC1_BASE_CSR_RING_REGISTER, 1455 rt2x00_set_field32(&reg, AC1_BASE_CSR_RING_REGISTER,
1453 entry_priv->desc_dma); 1456 entry_priv->desc_dma);
1454 rt2x00pci_register_write(rt2x00dev, AC1_BASE_CSR, reg); 1457 rt2x00mmio_register_write(rt2x00dev, AC1_BASE_CSR, reg);
1455 1458
1456 entry_priv = rt2x00dev->tx[2].entries[0].priv_data; 1459 entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
1457 rt2x00pci_register_read(rt2x00dev, AC2_BASE_CSR, &reg); 1460 rt2x00mmio_register_read(rt2x00dev, AC2_BASE_CSR, &reg);
1458 rt2x00_set_field32(&reg, AC2_BASE_CSR_RING_REGISTER, 1461 rt2x00_set_field32(&reg, AC2_BASE_CSR_RING_REGISTER,
1459 entry_priv->desc_dma); 1462 entry_priv->desc_dma);
1460 rt2x00pci_register_write(rt2x00dev, AC2_BASE_CSR, reg); 1463 rt2x00mmio_register_write(rt2x00dev, AC2_BASE_CSR, reg);
1461 1464
1462 entry_priv = rt2x00dev->tx[3].entries[0].priv_data; 1465 entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
1463 rt2x00pci_register_read(rt2x00dev, AC3_BASE_CSR, &reg); 1466 rt2x00mmio_register_read(rt2x00dev, AC3_BASE_CSR, &reg);
1464 rt2x00_set_field32(&reg, AC3_BASE_CSR_RING_REGISTER, 1467 rt2x00_set_field32(&reg, AC3_BASE_CSR_RING_REGISTER,
1465 entry_priv->desc_dma); 1468 entry_priv->desc_dma);
1466 rt2x00pci_register_write(rt2x00dev, AC3_BASE_CSR, reg); 1469 rt2x00mmio_register_write(rt2x00dev, AC3_BASE_CSR, reg);
1467 1470
1468 rt2x00pci_register_read(rt2x00dev, RX_RING_CSR, &reg); 1471 rt2x00mmio_register_read(rt2x00dev, RX_RING_CSR, &reg);
1469 rt2x00_set_field32(&reg, RX_RING_CSR_RING_SIZE, rt2x00dev->rx->limit); 1472 rt2x00_set_field32(&reg, RX_RING_CSR_RING_SIZE, rt2x00dev->rx->limit);
1470 rt2x00_set_field32(&reg, RX_RING_CSR_RXD_SIZE, 1473 rt2x00_set_field32(&reg, RX_RING_CSR_RXD_SIZE,
1471 rt2x00dev->rx->desc_size / 4); 1474 rt2x00dev->rx->desc_size / 4);
1472 rt2x00_set_field32(&reg, RX_RING_CSR_RXD_WRITEBACK_SIZE, 4); 1475 rt2x00_set_field32(&reg, RX_RING_CSR_RXD_WRITEBACK_SIZE, 4);
1473 rt2x00pci_register_write(rt2x00dev, RX_RING_CSR, reg); 1476 rt2x00mmio_register_write(rt2x00dev, RX_RING_CSR, reg);
1474 1477
1475 entry_priv = rt2x00dev->rx->entries[0].priv_data; 1478 entry_priv = rt2x00dev->rx->entries[0].priv_data;
1476 rt2x00pci_register_read(rt2x00dev, RX_BASE_CSR, &reg); 1479 rt2x00mmio_register_read(rt2x00dev, RX_BASE_CSR, &reg);
1477 rt2x00_set_field32(&reg, RX_BASE_CSR_RING_REGISTER, 1480 rt2x00_set_field32(&reg, RX_BASE_CSR_RING_REGISTER,
1478 entry_priv->desc_dma); 1481 entry_priv->desc_dma);
1479 rt2x00pci_register_write(rt2x00dev, RX_BASE_CSR, reg); 1482 rt2x00mmio_register_write(rt2x00dev, RX_BASE_CSR, reg);
1480 1483
1481 rt2x00pci_register_read(rt2x00dev, TX_DMA_DST_CSR, &reg); 1484 rt2x00mmio_register_read(rt2x00dev, TX_DMA_DST_CSR, &reg);
1482 rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC0, 2); 1485 rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC0, 2);
1483 rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC1, 2); 1486 rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC1, 2);
1484 rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC2, 2); 1487 rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC2, 2);
1485 rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC3, 2); 1488 rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC3, 2);
1486 rt2x00pci_register_write(rt2x00dev, TX_DMA_DST_CSR, reg); 1489 rt2x00mmio_register_write(rt2x00dev, TX_DMA_DST_CSR, reg);
1487 1490
1488 rt2x00pci_register_read(rt2x00dev, LOAD_TX_RING_CSR, &reg); 1491 rt2x00mmio_register_read(rt2x00dev, LOAD_TX_RING_CSR, &reg);
1489 rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC0, 1); 1492 rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC0, 1);
1490 rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC1, 1); 1493 rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC1, 1);
1491 rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC2, 1); 1494 rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC2, 1);
1492 rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC3, 1); 1495 rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC3, 1);
1493 rt2x00pci_register_write(rt2x00dev, LOAD_TX_RING_CSR, reg); 1496 rt2x00mmio_register_write(rt2x00dev, LOAD_TX_RING_CSR, reg);
1494 1497
1495 rt2x00pci_register_read(rt2x00dev, RX_CNTL_CSR, &reg); 1498 rt2x00mmio_register_read(rt2x00dev, RX_CNTL_CSR, &reg);
1496 rt2x00_set_field32(&reg, RX_CNTL_CSR_LOAD_RXD, 1); 1499 rt2x00_set_field32(&reg, RX_CNTL_CSR_LOAD_RXD, 1);
1497 rt2x00pci_register_write(rt2x00dev, RX_CNTL_CSR, reg); 1500 rt2x00mmio_register_write(rt2x00dev, RX_CNTL_CSR, reg);
1498 1501
1499 return 0; 1502 return 0;
1500} 1503}
@@ -1503,13 +1506,13 @@ static int rt61pci_init_registers(struct rt2x00_dev *rt2x00dev)
1503{ 1506{
1504 u32 reg; 1507 u32 reg;
1505 1508
1506 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg); 1509 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0, &reg);
1507 rt2x00_set_field32(&reg, TXRX_CSR0_AUTO_TX_SEQ, 1); 1510 rt2x00_set_field32(&reg, TXRX_CSR0_AUTO_TX_SEQ, 1);
1508 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0); 1511 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0);
1509 rt2x00_set_field32(&reg, TXRX_CSR0_TX_WITHOUT_WAITING, 0); 1512 rt2x00_set_field32(&reg, TXRX_CSR0_TX_WITHOUT_WAITING, 0);
1510 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); 1513 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR0, reg);
1511 1514
1512 rt2x00pci_register_read(rt2x00dev, TXRX_CSR1, &reg); 1515 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR1, &reg);
1513 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */ 1516 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */
1514 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID0_VALID, 1); 1517 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID0_VALID, 1);
1515 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID1, 30); /* Rssi */ 1518 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID1, 30); /* Rssi */
@@ -1518,12 +1521,12 @@ static int rt61pci_init_registers(struct rt2x00_dev *rt2x00dev)
1518 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID2_VALID, 1); 1521 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID2_VALID, 1);
1519 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID3, 30); /* Rssi */ 1522 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID3, 30); /* Rssi */
1520 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID3_VALID, 1); 1523 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID3_VALID, 1);
1521 rt2x00pci_register_write(rt2x00dev, TXRX_CSR1, reg); 1524 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR1, reg);
1522 1525
1523 /* 1526 /*
1524 * CCK TXD BBP registers 1527 * CCK TXD BBP registers
1525 */ 1528 */
1526 rt2x00pci_register_read(rt2x00dev, TXRX_CSR2, &reg); 1529 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR2, &reg);
1527 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID0, 13); 1530 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID0, 13);
1528 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID0_VALID, 1); 1531 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID0_VALID, 1);
1529 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID1, 12); 1532 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID1, 12);
@@ -1532,76 +1535,76 @@ static int rt61pci_init_registers(struct rt2x00_dev *rt2x00dev)
1532 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID2_VALID, 1); 1535 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID2_VALID, 1);
1533 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID3, 10); 1536 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID3, 10);
1534 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID3_VALID, 1); 1537 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID3_VALID, 1);
1535 rt2x00pci_register_write(rt2x00dev, TXRX_CSR2, reg); 1538 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR2, reg);
1536 1539
1537 /* 1540 /*
1538 * OFDM TXD BBP registers 1541 * OFDM TXD BBP registers
1539 */ 1542 */
1540 rt2x00pci_register_read(rt2x00dev, TXRX_CSR3, &reg); 1543 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR3, &reg);
1541 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID0, 7); 1544 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID0, 7);
1542 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID0_VALID, 1); 1545 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID0_VALID, 1);
1543 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID1, 6); 1546 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID1, 6);
1544 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID1_VALID, 1); 1547 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID1_VALID, 1);
1545 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID2, 5); 1548 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID2, 5);
1546 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID2_VALID, 1); 1549 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID2_VALID, 1);
1547 rt2x00pci_register_write(rt2x00dev, TXRX_CSR3, reg); 1550 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR3, reg);
1548 1551
1549 rt2x00pci_register_read(rt2x00dev, TXRX_CSR7, &reg); 1552 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR7, &reg);
1550 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_6MBS, 59); 1553 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_6MBS, 59);
1551 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_9MBS, 53); 1554 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_9MBS, 53);
1552 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_12MBS, 49); 1555 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_12MBS, 49);
1553 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_18MBS, 46); 1556 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_18MBS, 46);
1554 rt2x00pci_register_write(rt2x00dev, TXRX_CSR7, reg); 1557 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR7, reg);
1555 1558
1556 rt2x00pci_register_read(rt2x00dev, TXRX_CSR8, &reg); 1559 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR8, &reg);
1557 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_24MBS, 44); 1560 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_24MBS, 44);
1558 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_36MBS, 42); 1561 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_36MBS, 42);
1559 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_48MBS, 42); 1562 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_48MBS, 42);
1560 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_54MBS, 42); 1563 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_54MBS, 42);
1561 rt2x00pci_register_write(rt2x00dev, TXRX_CSR8, reg); 1564 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR8, reg);
1562 1565
1563 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 1566 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &reg);
1564 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, 0); 1567 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, 0);
1565 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0); 1568 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0);
1566 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, 0); 1569 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, 0);
1567 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0); 1570 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
1568 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 1571 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1569 rt2x00_set_field32(&reg, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0); 1572 rt2x00_set_field32(&reg, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0);
1570 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 1573 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
1571 1574
1572 rt2x00pci_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f); 1575 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f);
1573 1576
1574 rt2x00pci_register_write(rt2x00dev, MAC_CSR6, 0x00000fff); 1577 rt2x00mmio_register_write(rt2x00dev, MAC_CSR6, 0x00000fff);
1575 1578
1576 rt2x00pci_register_read(rt2x00dev, MAC_CSR9, &reg); 1579 rt2x00mmio_register_read(rt2x00dev, MAC_CSR9, &reg);
1577 rt2x00_set_field32(&reg, MAC_CSR9_CW_SELECT, 0); 1580 rt2x00_set_field32(&reg, MAC_CSR9_CW_SELECT, 0);
1578 rt2x00pci_register_write(rt2x00dev, MAC_CSR9, reg); 1581 rt2x00mmio_register_write(rt2x00dev, MAC_CSR9, reg);
1579 1582
1580 rt2x00pci_register_write(rt2x00dev, MAC_CSR10, 0x0000071c); 1583 rt2x00mmio_register_write(rt2x00dev, MAC_CSR10, 0x0000071c);
1581 1584
1582 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) 1585 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE))
1583 return -EBUSY; 1586 return -EBUSY;
1584 1587
1585 rt2x00pci_register_write(rt2x00dev, MAC_CSR13, 0x0000e000); 1588 rt2x00mmio_register_write(rt2x00dev, MAC_CSR13, 0x0000e000);
1586 1589
1587 /* 1590 /*
1588 * Invalidate all Shared Keys (SEC_CSR0), 1591 * Invalidate all Shared Keys (SEC_CSR0),
1589 * and clear the Shared key Cipher algorithms (SEC_CSR1 & SEC_CSR5) 1592 * and clear the Shared key Cipher algorithms (SEC_CSR1 & SEC_CSR5)
1590 */ 1593 */
1591 rt2x00pci_register_write(rt2x00dev, SEC_CSR0, 0x00000000); 1594 rt2x00mmio_register_write(rt2x00dev, SEC_CSR0, 0x00000000);
1592 rt2x00pci_register_write(rt2x00dev, SEC_CSR1, 0x00000000); 1595 rt2x00mmio_register_write(rt2x00dev, SEC_CSR1, 0x00000000);
1593 rt2x00pci_register_write(rt2x00dev, SEC_CSR5, 0x00000000); 1596 rt2x00mmio_register_write(rt2x00dev, SEC_CSR5, 0x00000000);
1594 1597
1595 rt2x00pci_register_write(rt2x00dev, PHY_CSR1, 0x000023b0); 1598 rt2x00mmio_register_write(rt2x00dev, PHY_CSR1, 0x000023b0);
1596 rt2x00pci_register_write(rt2x00dev, PHY_CSR5, 0x060a100c); 1599 rt2x00mmio_register_write(rt2x00dev, PHY_CSR5, 0x060a100c);
1597 rt2x00pci_register_write(rt2x00dev, PHY_CSR6, 0x00080606); 1600 rt2x00mmio_register_write(rt2x00dev, PHY_CSR6, 0x00080606);
1598 rt2x00pci_register_write(rt2x00dev, PHY_CSR7, 0x00000a08); 1601 rt2x00mmio_register_write(rt2x00dev, PHY_CSR7, 0x00000a08);
1599 1602
1600 rt2x00pci_register_write(rt2x00dev, PCI_CFG_CSR, 0x28ca4404); 1603 rt2x00mmio_register_write(rt2x00dev, PCI_CFG_CSR, 0x28ca4404);
1601 1604
1602 rt2x00pci_register_write(rt2x00dev, TEST_MODE_CSR, 0x00000200); 1605 rt2x00mmio_register_write(rt2x00dev, TEST_MODE_CSR, 0x00000200);
1603 1606
1604 rt2x00pci_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff); 1607 rt2x00mmio_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff);
1605 1608
1606 /* 1609 /*
1607 * Clear all beacons 1610 * Clear all beacons
@@ -1609,36 +1612,36 @@ static int rt61pci_init_registers(struct rt2x00_dev *rt2x00dev)
1609 * the first byte since that byte contains the VALID and OWNER 1612 * the first byte since that byte contains the VALID and OWNER
1610 * bits which (when set to 0) will invalidate the entire beacon. 1613 * bits which (when set to 0) will invalidate the entire beacon.
1611 */ 1614 */
1612 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE0, 0); 1615 rt2x00mmio_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
1613 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE1, 0); 1616 rt2x00mmio_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
1614 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE2, 0); 1617 rt2x00mmio_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
1615 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE3, 0); 1618 rt2x00mmio_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
1616 1619
1617 /* 1620 /*
1618 * We must clear the error counters. 1621 * We must clear the error counters.
1619 * These registers are cleared on read, 1622 * These registers are cleared on read,
1620 * so we may pass a useless variable to store the value. 1623 * so we may pass a useless variable to store the value.
1621 */ 1624 */
1622 rt2x00pci_register_read(rt2x00dev, STA_CSR0, &reg); 1625 rt2x00mmio_register_read(rt2x00dev, STA_CSR0, &reg);
1623 rt2x00pci_register_read(rt2x00dev, STA_CSR1, &reg); 1626 rt2x00mmio_register_read(rt2x00dev, STA_CSR1, &reg);
1624 rt2x00pci_register_read(rt2x00dev, STA_CSR2, &reg); 1627 rt2x00mmio_register_read(rt2x00dev, STA_CSR2, &reg);
1625 1628
1626 /* 1629 /*
1627 * Reset MAC and BBP registers. 1630 * Reset MAC and BBP registers.
1628 */ 1631 */
1629 rt2x00pci_register_read(rt2x00dev, MAC_CSR1, &reg); 1632 rt2x00mmio_register_read(rt2x00dev, MAC_CSR1, &reg);
1630 rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 1); 1633 rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 1);
1631 rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 1); 1634 rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 1);
1632 rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); 1635 rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg);
1633 1636
1634 rt2x00pci_register_read(rt2x00dev, MAC_CSR1, &reg); 1637 rt2x00mmio_register_read(rt2x00dev, MAC_CSR1, &reg);
1635 rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 0); 1638 rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 0);
1636 rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 0); 1639 rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 0);
1637 rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); 1640 rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg);
1638 1641
1639 rt2x00pci_register_read(rt2x00dev, MAC_CSR1, &reg); 1642 rt2x00mmio_register_read(rt2x00dev, MAC_CSR1, &reg);
1640 rt2x00_set_field32(&reg, MAC_CSR1_HOST_READY, 1); 1643 rt2x00_set_field32(&reg, MAC_CSR1_HOST_READY, 1);
1641 rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); 1644 rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg);
1642 1645
1643 return 0; 1646 return 0;
1644} 1647}
@@ -1655,7 +1658,7 @@ static int rt61pci_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
1655 udelay(REGISTER_BUSY_DELAY); 1658 udelay(REGISTER_BUSY_DELAY);
1656 } 1659 }
1657 1660
1658 ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); 1661 rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n");
1659 return -EACCES; 1662 return -EACCES;
1660} 1663}
1661 1664
@@ -1722,11 +1725,11 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1722 * should clear the register to assure a clean state. 1725 * should clear the register to assure a clean state.
1723 */ 1726 */
1724 if (state == STATE_RADIO_IRQ_ON) { 1727 if (state == STATE_RADIO_IRQ_ON) {
1725 rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, &reg); 1728 rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
1726 rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg); 1729 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
1727 1730
1728 rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg); 1731 rt2x00mmio_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg);
1729 rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg); 1732 rt2x00mmio_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg);
1730 } 1733 }
1731 1734
1732 /* 1735 /*
@@ -1735,15 +1738,15 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1735 */ 1738 */
1736 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 1739 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
1737 1740
1738 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg); 1741 rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
1739 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask); 1742 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask);
1740 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask); 1743 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask);
1741 rt2x00_set_field32(&reg, INT_MASK_CSR_BEACON_DONE, mask); 1744 rt2x00_set_field32(&reg, INT_MASK_CSR_BEACON_DONE, mask);
1742 rt2x00_set_field32(&reg, INT_MASK_CSR_ENABLE_MITIGATION, mask); 1745 rt2x00_set_field32(&reg, INT_MASK_CSR_ENABLE_MITIGATION, mask);
1743 rt2x00_set_field32(&reg, INT_MASK_CSR_MITIGATION_PERIOD, 0xff); 1746 rt2x00_set_field32(&reg, INT_MASK_CSR_MITIGATION_PERIOD, 0xff);
1744 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); 1747 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
1745 1748
1746 rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg); 1749 rt2x00mmio_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
1747 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_0, mask); 1750 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_0, mask);
1748 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_1, mask); 1751 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_1, mask);
1749 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_2, mask); 1752 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_2, mask);
@@ -1753,7 +1756,7 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1753 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_6, mask); 1756 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_6, mask);
1754 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask); 1757 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask);
1755 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_TWAKEUP, mask); 1758 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_TWAKEUP, mask);
1756 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); 1759 rt2x00mmio_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
1757 1760
1758 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 1761 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
1759 1762
@@ -1783,9 +1786,9 @@ static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev)
1783 /* 1786 /*
1784 * Enable RX. 1787 * Enable RX.
1785 */ 1788 */
1786 rt2x00pci_register_read(rt2x00dev, RX_CNTL_CSR, &reg); 1789 rt2x00mmio_register_read(rt2x00dev, RX_CNTL_CSR, &reg);
1787 rt2x00_set_field32(&reg, RX_CNTL_CSR_ENABLE_RX_DMA, 1); 1790 rt2x00_set_field32(&reg, RX_CNTL_CSR_ENABLE_RX_DMA, 1);
1788 rt2x00pci_register_write(rt2x00dev, RX_CNTL_CSR, reg); 1791 rt2x00mmio_register_write(rt2x00dev, RX_CNTL_CSR, reg);
1789 1792
1790 return 0; 1793 return 0;
1791} 1794}
@@ -1795,7 +1798,7 @@ static void rt61pci_disable_radio(struct rt2x00_dev *rt2x00dev)
1795 /* 1798 /*
1796 * Disable power 1799 * Disable power
1797 */ 1800 */
1798 rt2x00pci_register_write(rt2x00dev, MAC_CSR10, 0x00001818); 1801 rt2x00mmio_register_write(rt2x00dev, MAC_CSR10, 0x00001818);
1799} 1802}
1800 1803
1801static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) 1804static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
@@ -1806,10 +1809,10 @@ static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
1806 1809
1807 put_to_sleep = (state != STATE_AWAKE); 1810 put_to_sleep = (state != STATE_AWAKE);
1808 1811
1809 rt2x00pci_register_read(rt2x00dev, MAC_CSR12, &reg); 1812 rt2x00mmio_register_read(rt2x00dev, MAC_CSR12, &reg);
1810 rt2x00_set_field32(&reg, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep); 1813 rt2x00_set_field32(&reg, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep);
1811 rt2x00_set_field32(&reg, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep); 1814 rt2x00_set_field32(&reg, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep);
1812 rt2x00pci_register_write(rt2x00dev, MAC_CSR12, reg); 1815 rt2x00mmio_register_write(rt2x00dev, MAC_CSR12, reg);
1813 1816
1814 /* 1817 /*
1815 * Device is not guaranteed to be in the requested state yet. 1818 * Device is not guaranteed to be in the requested state yet.
@@ -1817,11 +1820,11 @@ static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
1817 * device has entered the correct state. 1820 * device has entered the correct state.
1818 */ 1821 */
1819 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1822 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1820 rt2x00pci_register_read(rt2x00dev, MAC_CSR12, &reg2); 1823 rt2x00mmio_register_read(rt2x00dev, MAC_CSR12, &reg2);
1821 state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE); 1824 state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE);
1822 if (state == !put_to_sleep) 1825 if (state == !put_to_sleep)
1823 return 0; 1826 return 0;
1824 rt2x00pci_register_write(rt2x00dev, MAC_CSR12, reg); 1827 rt2x00mmio_register_write(rt2x00dev, MAC_CSR12, reg);
1825 msleep(10); 1828 msleep(10);
1826 } 1829 }
1827 1830
@@ -1856,8 +1859,8 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1856 } 1859 }
1857 1860
1858 if (unlikely(retval)) 1861 if (unlikely(retval))
1859 ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", 1862 rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n",
1860 state, retval); 1863 state, retval);
1861 1864
1862 return retval; 1865 return retval;
1863} 1866}
@@ -1869,7 +1872,7 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
1869 struct txentry_desc *txdesc) 1872 struct txentry_desc *txdesc)
1870{ 1873{
1871 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1874 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1872 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1875 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
1873 __le32 *txd = entry_priv->desc; 1876 __le32 *txd = entry_priv->desc;
1874 u32 word; 1877 u32 word;
1875 1878
@@ -1967,7 +1970,7 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
1967 struct txentry_desc *txdesc) 1970 struct txentry_desc *txdesc)
1968{ 1971{
1969 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1972 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1970 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1973 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
1971 unsigned int beacon_base; 1974 unsigned int beacon_base;
1972 unsigned int padding_len; 1975 unsigned int padding_len;
1973 u32 orig_reg, reg; 1976 u32 orig_reg, reg;
@@ -1976,10 +1979,10 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
1976 * Disable beaconing while we are reloading the beacon data, 1979 * Disable beaconing while we are reloading the beacon data,
1977 * otherwise we might be sending out invalid data. 1980 * otherwise we might be sending out invalid data.
1978 */ 1981 */
1979 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 1982 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &reg);
1980 orig_reg = reg; 1983 orig_reg = reg;
1981 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 1984 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1982 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 1985 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
1983 1986
1984 /* 1987 /*
1985 * Write the TX descriptor for the beacon. 1988 * Write the TX descriptor for the beacon.
@@ -1996,19 +1999,19 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
1996 */ 1999 */
1997 padding_len = roundup(entry->skb->len, 4) - entry->skb->len; 2000 padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
1998 if (padding_len && skb_pad(entry->skb, padding_len)) { 2001 if (padding_len && skb_pad(entry->skb, padding_len)) {
1999 ERROR(rt2x00dev, "Failure padding beacon, aborting\n"); 2002 rt2x00_err(rt2x00dev, "Failure padding beacon, aborting\n");
2000 /* skb freed by skb_pad() on failure */ 2003 /* skb freed by skb_pad() on failure */
2001 entry->skb = NULL; 2004 entry->skb = NULL;
2002 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, orig_reg); 2005 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
2003 return; 2006 return;
2004 } 2007 }
2005 2008
2006 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 2009 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
2007 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base, 2010 rt2x00mmio_register_multiwrite(rt2x00dev, beacon_base,
2008 entry_priv->desc, TXINFO_SIZE); 2011 entry_priv->desc, TXINFO_SIZE);
2009 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base + TXINFO_SIZE, 2012 rt2x00mmio_register_multiwrite(rt2x00dev, beacon_base + TXINFO_SIZE,
2010 entry->skb->data, 2013 entry->skb->data,
2011 entry->skb->len + padding_len); 2014 entry->skb->len + padding_len);
2012 2015
2013 /* 2016 /*
2014 * Enable beaconing again. 2017 * Enable beaconing again.
@@ -2016,10 +2019,10 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
2016 * For Wi-Fi faily generated beacons between participating 2019 * For Wi-Fi faily generated beacons between participating
2017 * stations. Set TBTT phase adaptive adjustment step to 8us. 2020 * stations. Set TBTT phase adaptive adjustment step to 8us.
2018 */ 2021 */
2019 rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); 2022 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
2020 2023
2021 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); 2024 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
2022 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 2025 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
2023 2026
2024 /* 2027 /*
2025 * Clean up beacon skb. 2028 * Clean up beacon skb.
@@ -2037,21 +2040,21 @@ static void rt61pci_clear_beacon(struct queue_entry *entry)
2037 * Disable beaconing while we are reloading the beacon data, 2040 * Disable beaconing while we are reloading the beacon data,
2038 * otherwise we might be sending out invalid data. 2041 * otherwise we might be sending out invalid data.
2039 */ 2042 */
2040 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 2043 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &reg);
2041 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 2044 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
2042 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 2045 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
2043 2046
2044 /* 2047 /*
2045 * Clear beacon. 2048 * Clear beacon.
2046 */ 2049 */
2047 rt2x00pci_register_write(rt2x00dev, 2050 rt2x00mmio_register_write(rt2x00dev,
2048 HW_BEACON_OFFSET(entry->entry_idx), 0); 2051 HW_BEACON_OFFSET(entry->entry_idx), 0);
2049 2052
2050 /* 2053 /*
2051 * Enable beaconing again. 2054 * Enable beaconing again.
2052 */ 2055 */
2053 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); 2056 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
2054 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 2057 rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
2055} 2058}
2056 2059
2057/* 2060/*
@@ -2089,7 +2092,7 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
2089 struct rxdone_entry_desc *rxdesc) 2092 struct rxdone_entry_desc *rxdesc)
2090{ 2093{
2091 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 2094 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
2092 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 2095 struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
2093 u32 word0; 2096 u32 word0;
2094 u32 word1; 2097 u32 word1;
2095 2098
@@ -2155,7 +2158,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2155 struct data_queue *queue; 2158 struct data_queue *queue;
2156 struct queue_entry *entry; 2159 struct queue_entry *entry;
2157 struct queue_entry *entry_done; 2160 struct queue_entry *entry_done;
2158 struct queue_entry_priv_pci *entry_priv; 2161 struct queue_entry_priv_mmio *entry_priv;
2159 struct txdone_entry_desc txdesc; 2162 struct txdone_entry_desc txdesc;
2160 u32 word; 2163 u32 word;
2161 u32 reg; 2164 u32 reg;
@@ -2173,7 +2176,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2173 * tx ring size for now. 2176 * tx ring size for now.
2174 */ 2177 */
2175 for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) { 2178 for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
2176 rt2x00pci_register_read(rt2x00dev, STA_CSR4, &reg); 2179 rt2x00mmio_register_read(rt2x00dev, STA_CSR4, &reg);
2177 if (!rt2x00_get_field32(reg, STA_CSR4_VALID)) 2180 if (!rt2x00_get_field32(reg, STA_CSR4_VALID))
2178 break; 2181 break;
2179 2182
@@ -2207,9 +2210,8 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2207 /* Catch up. 2210 /* Catch up.
2208 * Just report any entries we missed as failed. 2211 * Just report any entries we missed as failed.
2209 */ 2212 */
2210 WARNING(rt2x00dev, 2213 rt2x00_warn(rt2x00dev, "TX status report missed for entry %d\n",
2211 "TX status report missed for entry %d\n", 2214 entry_done->entry_idx);
2212 entry_done->entry_idx);
2213 2215
2214 rt2x00lib_txdone_noinfo(entry_done, TXDONE_UNKNOWN); 2216 rt2x00lib_txdone_noinfo(entry_done, TXDONE_UNKNOWN);
2215 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 2217 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
@@ -2260,9 +2262,9 @@ static inline void rt61pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
2260 */ 2262 */
2261 spin_lock_irq(&rt2x00dev->irqmask_lock); 2263 spin_lock_irq(&rt2x00dev->irqmask_lock);
2262 2264
2263 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg); 2265 rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
2264 rt2x00_set_field32(&reg, irq_field, 0); 2266 rt2x00_set_field32(&reg, irq_field, 0);
2265 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); 2267 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
2266 2268
2267 spin_unlock_irq(&rt2x00dev->irqmask_lock); 2269 spin_unlock_irq(&rt2x00dev->irqmask_lock);
2268} 2270}
@@ -2278,9 +2280,9 @@ static void rt61pci_enable_mcu_interrupt(struct rt2x00_dev *rt2x00dev,
2278 */ 2280 */
2279 spin_lock_irq(&rt2x00dev->irqmask_lock); 2281 spin_lock_irq(&rt2x00dev->irqmask_lock);
2280 2282
2281 rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg); 2283 rt2x00mmio_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
2282 rt2x00_set_field32(&reg, irq_field, 0); 2284 rt2x00_set_field32(&reg, irq_field, 0);
2283 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); 2285 rt2x00mmio_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
2284 2286
2285 spin_unlock_irq(&rt2x00dev->irqmask_lock); 2287 spin_unlock_irq(&rt2x00dev->irqmask_lock);
2286} 2288}
@@ -2304,7 +2306,7 @@ static void rt61pci_tbtt_tasklet(unsigned long data)
2304static void rt61pci_rxdone_tasklet(unsigned long data) 2306static void rt61pci_rxdone_tasklet(unsigned long data)
2305{ 2307{
2306 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 2308 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
2307 if (rt2x00pci_rxdone(rt2x00dev)) 2309 if (rt2x00mmio_rxdone(rt2x00dev))
2308 tasklet_schedule(&rt2x00dev->rxdone_tasklet); 2310 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
2309 else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 2311 else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
2310 rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE); 2312 rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE);
@@ -2314,8 +2316,8 @@ static void rt61pci_autowake_tasklet(unsigned long data)
2314{ 2316{
2315 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 2317 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
2316 rt61pci_wakeup(rt2x00dev); 2318 rt61pci_wakeup(rt2x00dev);
2317 rt2x00pci_register_write(rt2x00dev, 2319 rt2x00mmio_register_write(rt2x00dev,
2318 M2H_CMD_DONE_CSR, 0xffffffff); 2320 M2H_CMD_DONE_CSR, 0xffffffff);
2319 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 2321 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
2320 rt61pci_enable_mcu_interrupt(rt2x00dev, MCU_INT_MASK_CSR_TWAKEUP); 2322 rt61pci_enable_mcu_interrupt(rt2x00dev, MCU_INT_MASK_CSR_TWAKEUP);
2321} 2323}
@@ -2330,11 +2332,11 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
2330 * Get the interrupt sources & saved to local variable. 2332 * Get the interrupt sources & saved to local variable.
2331 * Write register value back to clear pending interrupts. 2333 * Write register value back to clear pending interrupts.
2332 */ 2334 */
2333 rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg_mcu); 2335 rt2x00mmio_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg_mcu);
2334 rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg_mcu); 2336 rt2x00mmio_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg_mcu);
2335 2337
2336 rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, &reg); 2338 rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
2337 rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg); 2339 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
2338 2340
2339 if (!reg && !reg_mcu) 2341 if (!reg && !reg_mcu)
2340 return IRQ_NONE; 2342 return IRQ_NONE;
@@ -2371,13 +2373,13 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
2371 */ 2373 */
2372 spin_lock(&rt2x00dev->irqmask_lock); 2374 spin_lock(&rt2x00dev->irqmask_lock);
2373 2375
2374 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg); 2376 rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
2375 reg |= mask; 2377 reg |= mask;
2376 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); 2378 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
2377 2379
2378 rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg); 2380 rt2x00mmio_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
2379 reg |= mask_mcu; 2381 reg |= mask_mcu;
2380 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); 2382 rt2x00mmio_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
2381 2383
2382 spin_unlock(&rt2x00dev->irqmask_lock); 2384 spin_unlock(&rt2x00dev->irqmask_lock);
2383 2385
@@ -2395,7 +2397,7 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2395 u8 *mac; 2397 u8 *mac;
2396 s8 value; 2398 s8 value;
2397 2399
2398 rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, &reg); 2400 rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR, &reg);
2399 2401
2400 eeprom.data = rt2x00dev; 2402 eeprom.data = rt2x00dev;
2401 eeprom.register_read = rt61pci_eepromregister_read; 2403 eeprom.register_read = rt61pci_eepromregister_read;
@@ -2416,7 +2418,7 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2416 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 2418 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
2417 if (!is_valid_ether_addr(mac)) { 2419 if (!is_valid_ether_addr(mac)) {
2418 eth_random_addr(mac); 2420 eth_random_addr(mac);
2419 EEPROM(rt2x00dev, "MAC: %pM\n", mac); 2421 rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
2420 } 2422 }
2421 2423
2422 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); 2424 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
@@ -2431,7 +2433,7 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2431 rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); 2433 rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0);
2432 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF5225); 2434 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF5225);
2433 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); 2435 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
2434 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); 2436 rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word);
2435 } 2437 }
2436 2438
2437 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); 2439 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
@@ -2444,7 +2446,7 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2444 rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); 2446 rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0);
2445 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0); 2447 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0);
2446 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); 2448 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
2447 EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); 2449 rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word);
2448 } 2450 }
2449 2451
2450 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &word); 2452 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &word);
@@ -2452,7 +2454,7 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2452 rt2x00_set_field16(&word, EEPROM_LED_LED_MODE, 2454 rt2x00_set_field16(&word, EEPROM_LED_LED_MODE,
2453 LED_MODE_DEFAULT); 2455 LED_MODE_DEFAULT);
2454 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED, word); 2456 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED, word);
2455 EEPROM(rt2x00dev, "Led: 0x%04x\n", word); 2457 rt2x00_eeprom_dbg(rt2x00dev, "Led: 0x%04x\n", word);
2456 } 2458 }
2457 2459
2458 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word); 2460 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
@@ -2460,7 +2462,7 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2460 rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); 2462 rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
2461 rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0); 2463 rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0);
2462 rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); 2464 rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
2463 EEPROM(rt2x00dev, "Freq: 0x%04x\n", word); 2465 rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word);
2464 } 2466 }
2465 2467
2466 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &word); 2468 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &word);
@@ -2468,7 +2470,7 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2468 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); 2470 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0);
2469 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); 2471 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0);
2470 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); 2472 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word);
2471 EEPROM(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); 2473 rt2x00_eeprom_dbg(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word);
2472 } else { 2474 } else {
2473 value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_1); 2475 value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_1);
2474 if (value < -10 || value > 10) 2476 if (value < -10 || value > 10)
@@ -2484,7 +2486,7 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2484 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); 2486 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0);
2485 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); 2487 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0);
2486 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); 2488 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word);
2487 EEPROM(rt2x00dev, "RSSI OFFSET A: 0x%04x\n", word); 2489 rt2x00_eeprom_dbg(rt2x00dev, "RSSI OFFSET A: 0x%04x\n", word);
2488 } else { 2490 } else {
2489 value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1); 2491 value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1);
2490 if (value < -10 || value > 10) 2492 if (value < -10 || value > 10)
@@ -2513,7 +2515,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2513 * Identify RF chipset. 2515 * Identify RF chipset.
2514 */ 2516 */
2515 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 2517 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
2516 rt2x00pci_register_read(rt2x00dev, MAC_CSR0, &reg); 2518 rt2x00mmio_register_read(rt2x00dev, MAC_CSR0, &reg);
2517 rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET), 2519 rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
2518 value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); 2520 value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
2519 2521
@@ -2521,7 +2523,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2521 !rt2x00_rf(rt2x00dev, RF5325) && 2523 !rt2x00_rf(rt2x00dev, RF5325) &&
2522 !rt2x00_rf(rt2x00dev, RF2527) && 2524 !rt2x00_rf(rt2x00dev, RF2527) &&
2523 !rt2x00_rf(rt2x00dev, RF2529)) { 2525 !rt2x00_rf(rt2x00dev, RF2529)) {
2524 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 2526 rt2x00_err(rt2x00dev, "Invalid RF chipset detected\n");
2525 return -ENODEV; 2527 return -ENODEV;
2526 } 2528 }
2527 2529
@@ -2838,7 +2840,7 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
2838 /* 2840 /*
2839 * Disable power saving. 2841 * Disable power saving.
2840 */ 2842 */
2841 rt2x00pci_register_write(rt2x00dev, SOFT_RESET_CSR, 0x00000007); 2843 rt2x00mmio_register_write(rt2x00dev, SOFT_RESET_CSR, 0x00000007);
2842 2844
2843 /* 2845 /*
2844 * Allocate eeprom data. 2846 * Allocate eeprom data.
@@ -2855,9 +2857,9 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
2855 * Enable rfkill polling by setting GPIO direction of the 2857 * Enable rfkill polling by setting GPIO direction of the
2856 * rfkill switch GPIO pin correctly. 2858 * rfkill switch GPIO pin correctly.
2857 */ 2859 */
2858 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg); 2860 rt2x00mmio_register_read(rt2x00dev, MAC_CSR13, &reg);
2859 rt2x00_set_field32(&reg, MAC_CSR13_DIR5, 1); 2861 rt2x00_set_field32(&reg, MAC_CSR13_DIR5, 1);
2860 rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg); 2862 rt2x00mmio_register_write(rt2x00dev, MAC_CSR13, reg);
2861 2863
2862 /* 2864 /*
2863 * Initialize hw specifications. 2865 * Initialize hw specifications.
@@ -2927,25 +2929,25 @@ static int rt61pci_conf_tx(struct ieee80211_hw *hw,
2927 field.bit_offset = (queue_idx & 1) * 16; 2929 field.bit_offset = (queue_idx & 1) * 16;
2928 field.bit_mask = 0xffff << field.bit_offset; 2930 field.bit_mask = 0xffff << field.bit_offset;
2929 2931
2930 rt2x00pci_register_read(rt2x00dev, offset, &reg); 2932 rt2x00mmio_register_read(rt2x00dev, offset, &reg);
2931 rt2x00_set_field32(&reg, field, queue->txop); 2933 rt2x00_set_field32(&reg, field, queue->txop);
2932 rt2x00pci_register_write(rt2x00dev, offset, reg); 2934 rt2x00mmio_register_write(rt2x00dev, offset, reg);
2933 2935
2934 /* Update WMM registers */ 2936 /* Update WMM registers */
2935 field.bit_offset = queue_idx * 4; 2937 field.bit_offset = queue_idx * 4;
2936 field.bit_mask = 0xf << field.bit_offset; 2938 field.bit_mask = 0xf << field.bit_offset;
2937 2939
2938 rt2x00pci_register_read(rt2x00dev, AIFSN_CSR, &reg); 2940 rt2x00mmio_register_read(rt2x00dev, AIFSN_CSR, &reg);
2939 rt2x00_set_field32(&reg, field, queue->aifs); 2941 rt2x00_set_field32(&reg, field, queue->aifs);
2940 rt2x00pci_register_write(rt2x00dev, AIFSN_CSR, reg); 2942 rt2x00mmio_register_write(rt2x00dev, AIFSN_CSR, reg);
2941 2943
2942 rt2x00pci_register_read(rt2x00dev, CWMIN_CSR, &reg); 2944 rt2x00mmio_register_read(rt2x00dev, CWMIN_CSR, &reg);
2943 rt2x00_set_field32(&reg, field, queue->cw_min); 2945 rt2x00_set_field32(&reg, field, queue->cw_min);
2944 rt2x00pci_register_write(rt2x00dev, CWMIN_CSR, reg); 2946 rt2x00mmio_register_write(rt2x00dev, CWMIN_CSR, reg);
2945 2947
2946 rt2x00pci_register_read(rt2x00dev, CWMAX_CSR, &reg); 2948 rt2x00mmio_register_read(rt2x00dev, CWMAX_CSR, &reg);
2947 rt2x00_set_field32(&reg, field, queue->cw_max); 2949 rt2x00_set_field32(&reg, field, queue->cw_max);
2948 rt2x00pci_register_write(rt2x00dev, CWMAX_CSR, reg); 2950 rt2x00mmio_register_write(rt2x00dev, CWMAX_CSR, reg);
2949 2951
2950 return 0; 2952 return 0;
2951} 2953}
@@ -2956,9 +2958,9 @@ static u64 rt61pci_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
2956 u64 tsf; 2958 u64 tsf;
2957 u32 reg; 2959 u32 reg;
2958 2960
2959 rt2x00pci_register_read(rt2x00dev, TXRX_CSR13, &reg); 2961 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR13, &reg);
2960 tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32; 2962 tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32;
2961 rt2x00pci_register_read(rt2x00dev, TXRX_CSR12, &reg); 2963 rt2x00mmio_register_read(rt2x00dev, TXRX_CSR12, &reg);
2962 tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER); 2964 tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER);
2963 2965
2964 return tsf; 2966 return tsf;
@@ -2997,8 +2999,8 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2997 .get_firmware_name = rt61pci_get_firmware_name, 2999 .get_firmware_name = rt61pci_get_firmware_name,
2998 .check_firmware = rt61pci_check_firmware, 3000 .check_firmware = rt61pci_check_firmware,
2999 .load_firmware = rt61pci_load_firmware, 3001 .load_firmware = rt61pci_load_firmware,
3000 .initialize = rt2x00pci_initialize, 3002 .initialize = rt2x00mmio_initialize,
3001 .uninitialize = rt2x00pci_uninitialize, 3003 .uninitialize = rt2x00mmio_uninitialize,
3002 .get_entry_state = rt61pci_get_entry_state, 3004 .get_entry_state = rt61pci_get_entry_state,
3003 .clear_entry = rt61pci_clear_entry, 3005 .clear_entry = rt61pci_clear_entry,
3004 .set_device_state = rt61pci_set_device_state, 3006 .set_device_state = rt61pci_set_device_state,
@@ -3009,7 +3011,7 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
3009 .start_queue = rt61pci_start_queue, 3011 .start_queue = rt61pci_start_queue,
3010 .kick_queue = rt61pci_kick_queue, 3012 .kick_queue = rt61pci_kick_queue,
3011 .stop_queue = rt61pci_stop_queue, 3013 .stop_queue = rt61pci_stop_queue,
3012 .flush_queue = rt2x00pci_flush_queue, 3014 .flush_queue = rt2x00mmio_flush_queue,
3013 .write_tx_desc = rt61pci_write_tx_desc, 3015 .write_tx_desc = rt61pci_write_tx_desc,
3014 .write_beacon = rt61pci_write_beacon, 3016 .write_beacon = rt61pci_write_beacon,
3015 .clear_beacon = rt61pci_clear_beacon, 3017 .clear_beacon = rt61pci_clear_beacon,
@@ -3027,21 +3029,21 @@ static const struct data_queue_desc rt61pci_queue_rx = {
3027 .entry_num = 32, 3029 .entry_num = 32,
3028 .data_size = DATA_FRAME_SIZE, 3030 .data_size = DATA_FRAME_SIZE,
3029 .desc_size = RXD_DESC_SIZE, 3031 .desc_size = RXD_DESC_SIZE,
3030 .priv_size = sizeof(struct queue_entry_priv_pci), 3032 .priv_size = sizeof(struct queue_entry_priv_mmio),
3031}; 3033};
3032 3034
3033static const struct data_queue_desc rt61pci_queue_tx = { 3035static const struct data_queue_desc rt61pci_queue_tx = {
3034 .entry_num = 32, 3036 .entry_num = 32,
3035 .data_size = DATA_FRAME_SIZE, 3037 .data_size = DATA_FRAME_SIZE,
3036 .desc_size = TXD_DESC_SIZE, 3038 .desc_size = TXD_DESC_SIZE,
3037 .priv_size = sizeof(struct queue_entry_priv_pci), 3039 .priv_size = sizeof(struct queue_entry_priv_mmio),
3038}; 3040};
3039 3041
3040static const struct data_queue_desc rt61pci_queue_bcn = { 3042static const struct data_queue_desc rt61pci_queue_bcn = {
3041 .entry_num = 4, 3043 .entry_num = 4,
3042 .data_size = 0, /* No DMA required for beacons */ 3044 .data_size = 0, /* No DMA required for beacons */
3043 .desc_size = TXINFO_SIZE, 3045 .desc_size = TXINFO_SIZE,
3044 .priv_size = sizeof(struct queue_entry_priv_pci), 3046 .priv_size = sizeof(struct queue_entry_priv_mmio),
3045}; 3047};
3046 3048
3047static const struct rt2x00_ops rt61pci_ops = { 3049static const struct rt2x00_ops rt61pci_ops = {
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 24eec66e9fd2..377e09bb0b81 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -739,7 +739,7 @@ static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
739 u16 eeprom; 739 u16 eeprom;
740 short lna_gain = 0; 740 short lna_gain = 0;
741 741
742 if (libconf->conf->channel->band == IEEE80211_BAND_2GHZ) { 742 if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
743 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) 743 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
744 lna_gain += 14; 744 lna_gain += 14;
745 745
@@ -1122,7 +1122,7 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev,
1122 } 1122 }
1123 1123
1124 if (!reg) { 1124 if (!reg) {
1125 ERROR(rt2x00dev, "Unstable hardware.\n"); 1125 rt2x00_err(rt2x00dev, "Unstable hardware\n");
1126 return -EBUSY; 1126 return -EBUSY;
1127 } 1127 }
1128 1128
@@ -1139,7 +1139,7 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev,
1139 0, USB_MODE_FIRMWARE, 1139 0, USB_MODE_FIRMWARE,
1140 REGISTER_TIMEOUT_FIRMWARE); 1140 REGISTER_TIMEOUT_FIRMWARE);
1141 if (status < 0) { 1141 if (status < 0) {
1142 ERROR(rt2x00dev, "Failed to write Firmware to device.\n"); 1142 rt2x00_err(rt2x00dev, "Failed to write Firmware to device\n");
1143 return status; 1143 return status;
1144 } 1144 }
1145 1145
@@ -1305,7 +1305,7 @@ static int rt73usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
1305 udelay(REGISTER_BUSY_DELAY); 1305 udelay(REGISTER_BUSY_DELAY);
1306 } 1306 }
1307 1307
1308 ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); 1308 rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n");
1309 return -EACCES; 1309 return -EACCES;
1310} 1310}
1311 1311
@@ -1443,8 +1443,8 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1443 } 1443 }
1444 1444
1445 if (unlikely(retval)) 1445 if (unlikely(retval))
1446 ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", 1446 rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n",
1447 state, retval); 1447 state, retval);
1448 1448
1449 return retval; 1449 return retval;
1450} 1450}
@@ -1567,7 +1567,7 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
1567 */ 1567 */
1568 padding_len = roundup(entry->skb->len, 4) - entry->skb->len; 1568 padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
1569 if (padding_len && skb_pad(entry->skb, padding_len)) { 1569 if (padding_len && skb_pad(entry->skb, padding_len)) {
1570 ERROR(rt2x00dev, "Failure padding beacon, aborting\n"); 1570 rt2x00_err(rt2x00dev, "Failure padding beacon, aborting\n");
1571 /* skb freed by skb_pad() on failure */ 1571 /* skb freed by skb_pad() on failure */
1572 entry->skb = NULL; 1572 entry->skb = NULL;
1573 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg); 1573 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
@@ -1771,7 +1771,7 @@ static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1771 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 1771 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
1772 if (!is_valid_ether_addr(mac)) { 1772 if (!is_valid_ether_addr(mac)) {
1773 eth_random_addr(mac); 1773 eth_random_addr(mac);
1774 EEPROM(rt2x00dev, "MAC: %pM\n", mac); 1774 rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
1775 } 1775 }
1776 1776
1777 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); 1777 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
@@ -1786,14 +1786,14 @@ static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1786 rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); 1786 rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0);
1787 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF5226); 1787 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF5226);
1788 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); 1788 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
1789 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); 1789 rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word);
1790 } 1790 }
1791 1791
1792 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); 1792 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
1793 if (word == 0xffff) { 1793 if (word == 0xffff) {
1794 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA, 0); 1794 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA, 0);
1795 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); 1795 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
1796 EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); 1796 rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word);
1797 } 1797 }
1798 1798
1799 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &word); 1799 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &word);
@@ -1809,7 +1809,7 @@ static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1809 rt2x00_set_field16(&word, EEPROM_LED_LED_MODE, 1809 rt2x00_set_field16(&word, EEPROM_LED_LED_MODE,
1810 LED_MODE_DEFAULT); 1810 LED_MODE_DEFAULT);
1811 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED, word); 1811 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED, word);
1812 EEPROM(rt2x00dev, "Led: 0x%04x\n", word); 1812 rt2x00_eeprom_dbg(rt2x00dev, "Led: 0x%04x\n", word);
1813 } 1813 }
1814 1814
1815 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word); 1815 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
@@ -1817,7 +1817,7 @@ static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1817 rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); 1817 rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
1818 rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0); 1818 rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0);
1819 rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); 1819 rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
1820 EEPROM(rt2x00dev, "Freq: 0x%04x\n", word); 1820 rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word);
1821 } 1821 }
1822 1822
1823 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &word); 1823 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &word);
@@ -1825,7 +1825,7 @@ static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1825 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); 1825 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0);
1826 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); 1826 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0);
1827 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); 1827 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word);
1828 EEPROM(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); 1828 rt2x00_eeprom_dbg(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word);
1829 } else { 1829 } else {
1830 value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_1); 1830 value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_1);
1831 if (value < -10 || value > 10) 1831 if (value < -10 || value > 10)
@@ -1841,7 +1841,7 @@ static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1841 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); 1841 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0);
1842 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); 1842 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0);
1843 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); 1843 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word);
1844 EEPROM(rt2x00dev, "RSSI OFFSET A: 0x%04x\n", word); 1844 rt2x00_eeprom_dbg(rt2x00dev, "RSSI OFFSET A: 0x%04x\n", word);
1845 } else { 1845 } else {
1846 value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1); 1846 value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1);
1847 if (value < -10 || value > 10) 1847 if (value < -10 || value > 10)
@@ -1875,7 +1875,7 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1875 value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); 1875 value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
1876 1876
1877 if (!rt2x00_rt(rt2x00dev, RT2573) || (rt2x00_rev(rt2x00dev) == 0)) { 1877 if (!rt2x00_rt(rt2x00dev, RT2573) || (rt2x00_rev(rt2x00dev) == 0)) {
1878 ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); 1878 rt2x00_err(rt2x00dev, "Invalid RT chipset detected\n");
1879 return -ENODEV; 1879 return -ENODEV;
1880 } 1880 }
1881 1881
@@ -1883,7 +1883,7 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1883 !rt2x00_rf(rt2x00dev, RF2528) && 1883 !rt2x00_rf(rt2x00dev, RF2528) &&
1884 !rt2x00_rf(rt2x00dev, RF5225) && 1884 !rt2x00_rf(rt2x00dev, RF5225) &&
1885 !rt2x00_rf(rt2x00dev, RF2527)) { 1885 !rt2x00_rf(rt2x00dev, RF2527)) {
1886 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1886 rt2x00_err(rt2x00dev, "Invalid RF chipset detected\n");
1887 return -ENODEV; 1887 return -ENODEV;
1888 } 1888 }
1889 1889
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 1b3c2843221d..91a04e2b8ece 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -147,8 +147,8 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
147 signal = priv->rf->calc_rssi(agc, sq); 147 signal = priv->rf->calc_rssi(agc, sq);
148 } 148 }
149 rx_status.signal = signal; 149 rx_status.signal = signal;
150 rx_status.freq = dev->conf.channel->center_freq; 150 rx_status.freq = dev->conf.chandef.chan->center_freq;
151 rx_status.band = dev->conf.channel->band; 151 rx_status.band = dev->conf.chandef.chan->band;
152 rx_status.mactime = le64_to_cpu(entry->tsft); 152 rx_status.mactime = le64_to_cpu(entry->tsft);
153 rx_status.flag |= RX_FLAG_MACTIME_START; 153 rx_status.flag |= RX_FLAG_MACTIME_START;
154 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) 154 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
index 5ee7589dd546..077ff92cc139 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
@@ -82,7 +82,8 @@ static void grf5101_rf_set_channel(struct ieee80211_hw *dev,
82 struct ieee80211_conf *conf) 82 struct ieee80211_conf *conf)
83{ 83{
84 struct rtl8180_priv *priv = dev->priv; 84 struct rtl8180_priv *priv = dev->priv;
85 int channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 85 int channel =
86 ieee80211_frequency_to_channel(conf->chandef.chan->center_freq);
86 u32 txpw = priv->channels[channel - 1].hw_value & 0xFF; 87 u32 txpw = priv->channels[channel - 1].hw_value & 0xFF;
87 u32 chan = channel - 1; 88 u32 chan = channel - 1;
88 89
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.c b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
index 667b3363d437..4715000c94dd 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/max2820.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
@@ -95,7 +95,7 @@ static void max2820_rf_set_channel(struct ieee80211_hw *dev,
95{ 95{
96 struct rtl8180_priv *priv = dev->priv; 96 struct rtl8180_priv *priv = dev->priv;
97 int channel = conf ? 97 int channel = conf ?
98 ieee80211_frequency_to_channel(conf->channel->center_freq) : 1; 98 ieee80211_frequency_to_channel(conf->chandef.chan->center_freq) : 1;
99 unsigned int chan_idx = channel - 1; 99 unsigned int chan_idx = channel - 1;
100 u32 txpw = priv->channels[chan_idx].hw_value & 0xFF; 100 u32 txpw = priv->channels[chan_idx].hw_value & 0xFF;
101 u32 chan = max2820_chan[chan_idx]; 101 u32 chan = max2820_chan[chan_idx];
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
index 7c4574ba9d75..cc2a5412c1f0 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
@@ -719,7 +719,8 @@ static void rtl8225_rf_set_channel(struct ieee80211_hw *dev,
719 struct ieee80211_conf *conf) 719 struct ieee80211_conf *conf)
720{ 720{
721 struct rtl8180_priv *priv = dev->priv; 721 struct rtl8180_priv *priv = dev->priv;
722 int chan = ieee80211_frequency_to_channel(conf->channel->center_freq); 722 int chan =
723 ieee80211_frequency_to_channel(conf->chandef.chan->center_freq);
723 724
724 if (priv->rf->init == rtl8225_rf_init) 725 if (priv->rf->init == rtl8225_rf_init)
725 rtl8225_rf_set_tx_power(dev, chan); 726 rtl8225_rf_set_tx_power(dev, chan);
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
index 44771a6286af..b3ec40f6bd23 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
@@ -105,7 +105,8 @@ static void sa2400_rf_set_channel(struct ieee80211_hw *dev,
105 struct ieee80211_conf *conf) 105 struct ieee80211_conf *conf)
106{ 106{
107 struct rtl8180_priv *priv = dev->priv; 107 struct rtl8180_priv *priv = dev->priv;
108 int channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 108 int channel =
109 ieee80211_frequency_to_channel(conf->chandef.chan->center_freq);
109 u32 txpw = priv->channels[channel - 1].hw_value & 0xFF; 110 u32 txpw = priv->channels[channel - 1].hw_value & 0xFF;
110 u32 chan = sa2400_chan[channel - 1]; 111 u32 chan = sa2400_chan[channel - 1];
111 112
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 4574bd213705..f49220e234b0 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -379,8 +379,8 @@ static void rtl8187_rx_cb(struct urb *urb)
379 rate = (flags >> 20) & 0xF; 379 rate = (flags >> 20) & 0xF;
380 skb_trim(skb, flags & 0x0FFF); 380 skb_trim(skb, flags & 0x0FFF);
381 rx_status.rate_idx = rate; 381 rx_status.rate_idx = rate;
382 rx_status.freq = dev->conf.channel->center_freq; 382 rx_status.freq = dev->conf.chandef.chan->center_freq;
383 rx_status.band = dev->conf.channel->band; 383 rx_status.band = dev->conf.chandef.chan->band;
384 rx_status.flag |= RX_FLAG_MACTIME_START; 384 rx_status.flag |= RX_FLAG_MACTIME_START;
385 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) 385 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
386 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 386 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
index 908903f721f5..f0bf35fedbaf 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
@@ -905,7 +905,8 @@ static void rtl8225_rf_set_channel(struct ieee80211_hw *dev,
905 struct ieee80211_conf *conf) 905 struct ieee80211_conf *conf)
906{ 906{
907 struct rtl8187_priv *priv = dev->priv; 907 struct rtl8187_priv *priv = dev->priv;
908 int chan = ieee80211_frequency_to_channel(conf->channel->center_freq); 908 int chan =
909 ieee80211_frequency_to_channel(conf->chandef.chan->center_freq);
909 910
910 if (priv->rf->init == rtl8225_rf_init) 911 if (priv->rf->init == rtl8225_rf_init)
911 rtl8225_rf_set_tx_power(dev, chan); 912 rtl8225_rf_set_tx_power(dev, chan);
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index b6aa0c40658f..7253de3d8c66 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -55,6 +55,15 @@ config RTL8723AE
55 55
56 If you choose to build it as a module, it will be called rtl8723ae 56 If you choose to build it as a module, it will be called rtl8723ae
57 57
58config RTL8188EE
59 tristate "Realtek RTL8188EE Wireless Network Adapter"
60 depends on RTLWIFI && PCI
61 ---help---
62 This is the driver for Realtek RTL8188EE 802.11n PCIe
63 wireless network adapters.
64
65 If you choose to build it as a module, it will be called rtl8188ee
66
58config RTL8192CU 67config RTL8192CU
59 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter" 68 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
60 depends on RTLWIFI && USB 69 depends on RTLWIFI && USB
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index 3b1cbac741e3..ff02b874f8d8 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -26,5 +26,6 @@ obj-$(CONFIG_RTL8192CU) += rtl8192cu/
26obj-$(CONFIG_RTL8192SE) += rtl8192se/ 26obj-$(CONFIG_RTL8192SE) += rtl8192se/
27obj-$(CONFIG_RTL8192DE) += rtl8192de/ 27obj-$(CONFIG_RTL8192DE) += rtl8192de/
28obj-$(CONFIG_RTL8723AE) += rtl8723ae/ 28obj-$(CONFIG_RTL8723AE) += rtl8723ae/
29obj-$(CONFIG_RTL8188EE) += rtl8188ee/
29 30
30ccflags-y += -D__CHECK_ENDIAN__ 31ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 99c5cea3fe21..af59dd5718e1 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -54,7 +54,8 @@
54 *5) frame process functions 54 *5) frame process functions
55 *6) IOT functions 55 *6) IOT functions
56 *7) sysfs functions 56 *7) sysfs functions
57 *8) ... 57 *8) vif functions
58 *9) ...
58 */ 59 */
59 60
60/********************************************************* 61/*********************************************************
@@ -198,34 +199,46 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
198 199
199 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 200 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
200 201
201 /* 202 /*hw->wiphy->bands[IEEE80211_BAND_2GHZ]
202 *hw->wiphy->bands[IEEE80211_BAND_2GHZ]
203 *base on ant_num 203 *base on ant_num
204 *rx_mask: RX mask 204 *rx_mask: RX mask
205 *if rx_ant =1 rx_mask[0]=0xff;==>MCS0-MCS7 205 *if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7
206 *if rx_ant =2 rx_mask[1]=0xff;==>MCS8-MCS15 206 *if rx_ant = 2 rx_mask[1]= 0xff;==>MCS8-MCS15
207 *if rx_ant >=3 rx_mask[2]=0xff; 207 *if rx_ant >= 3 rx_mask[2]= 0xff;
208 *if BW_40 rx_mask[4]=0x01; 208 *if BW_40 rx_mask[4]= 0x01;
209 *highest supported RX rate 209 *highest supported RX rate
210 */ 210 */
211 if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_2T2R) { 211 if (rtlpriv->dm.supp_phymode_switch) {
212 212
213 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "1T2R or 2T2R\n"); 213 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
214 "Support phy mode switch\n");
214 215
215 ht_cap->mcs.rx_mask[0] = 0xFF; 216 ht_cap->mcs.rx_mask[0] = 0xFF;
216 ht_cap->mcs.rx_mask[1] = 0xFF; 217 ht_cap->mcs.rx_mask[1] = 0xFF;
217 ht_cap->mcs.rx_mask[4] = 0x01; 218 ht_cap->mcs.rx_mask[4] = 0x01;
218 219
219 ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15); 220 ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
220 } else if (get_rf_type(rtlphy) == RF_1T1R) { 221 } else {
221 222 if (get_rf_type(rtlphy) == RF_1T2R ||
222 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "1T1R\n"); 223 get_rf_type(rtlphy) == RF_2T2R) {
223 224 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
224 ht_cap->mcs.rx_mask[0] = 0xFF; 225 "1T2R or 2T2R\n");
225 ht_cap->mcs.rx_mask[1] = 0x00; 226 ht_cap->mcs.rx_mask[0] = 0xFF;
226 ht_cap->mcs.rx_mask[4] = 0x01; 227 ht_cap->mcs.rx_mask[1] = 0xFF;
227 228 ht_cap->mcs.rx_mask[4] = 0x01;
228 ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS7); 229
230 ht_cap->mcs.rx_highest =
231 cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
232 } else if (get_rf_type(rtlphy) == RF_1T1R) {
233 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "1T1R\n");
234
235 ht_cap->mcs.rx_mask[0] = 0xFF;
236 ht_cap->mcs.rx_mask[1] = 0x00;
237 ht_cap->mcs.rx_mask[4] = 0x01;
238
239 ht_cap->mcs.rx_highest =
240 cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS7);
241 }
229 } 242 }
230} 243}
231 244
@@ -311,6 +324,8 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
311 IEEE80211_HW_AMPDU_AGGREGATION | 324 IEEE80211_HW_AMPDU_AGGREGATION |
312 IEEE80211_HW_CONNECTION_MONITOR | 325 IEEE80211_HW_CONNECTION_MONITOR |
313 /* IEEE80211_HW_SUPPORTS_CQM_RSSI | */ 326 /* IEEE80211_HW_SUPPORTS_CQM_RSSI | */
327 IEEE80211_HW_CONNECTION_MONITOR |
328 IEEE80211_HW_MFP_CAPABLE |
314 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 0; 329 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 0;
315 330
316 /* swlps or hwlps has been set in diff chip in init_sw_vars */ 331 /* swlps or hwlps has been set in diff chip in init_sw_vars */
@@ -323,8 +338,12 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
323 hw->wiphy->interface_modes = 338 hw->wiphy->interface_modes =
324 BIT(NL80211_IFTYPE_AP) | 339 BIT(NL80211_IFTYPE_AP) |
325 BIT(NL80211_IFTYPE_STATION) | 340 BIT(NL80211_IFTYPE_STATION) |
326 BIT(NL80211_IFTYPE_ADHOC); 341 BIT(NL80211_IFTYPE_ADHOC) |
342 BIT(NL80211_IFTYPE_MESH_POINT) |
343 BIT(NL80211_IFTYPE_P2P_CLIENT) |
344 BIT(NL80211_IFTYPE_P2P_GO);
327 345
346 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
328 hw->wiphy->rts_threshold = 2347; 347 hw->wiphy->rts_threshold = 2347;
329 348
330 hw->queues = AC_MAX; 349 hw->queues = AC_MAX;
@@ -354,9 +373,10 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
354 struct rtl_priv *rtlpriv = rtl_priv(hw); 373 struct rtl_priv *rtlpriv = rtl_priv(hw);
355 374
356 /* <1> timer */ 375 /* <1> timer */
357 init_timer(&rtlpriv->works.watchdog_timer);
358 setup_timer(&rtlpriv->works.watchdog_timer, 376 setup_timer(&rtlpriv->works.watchdog_timer,
359 rtl_watch_dog_timer_callback, (unsigned long)hw); 377 rtl_watch_dog_timer_callback, (unsigned long)hw);
378 setup_timer(&rtlpriv->works.dualmac_easyconcurrent_retrytimer,
379 rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw);
360 380
361 /* <2> work queue */ 381 /* <2> work queue */
362 rtlpriv->works.hw = hw; 382 rtlpriv->works.hw = hw;
@@ -369,6 +389,8 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
369 (void *)rtl_swlps_wq_callback); 389 (void *)rtl_swlps_wq_callback);
370 INIT_DELAYED_WORK(&rtlpriv->works.ps_rfon_wq, 390 INIT_DELAYED_WORK(&rtlpriv->works.ps_rfon_wq,
371 (void *)rtl_swlps_rfon_wq_callback); 391 (void *)rtl_swlps_rfon_wq_callback);
392 INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq,
393 (void *)rtl_fwevt_wq_callback);
372 394
373} 395}
374 396
@@ -382,6 +404,7 @@ void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
382 cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); 404 cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
383 cancel_delayed_work(&rtlpriv->works.ps_work); 405 cancel_delayed_work(&rtlpriv->works.ps_work);
384 cancel_delayed_work(&rtlpriv->works.ps_rfon_wq); 406 cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
407 cancel_delayed_work(&rtlpriv->works.fwevt_wq);
385} 408}
386 409
387void rtl_init_rfkill(struct ieee80211_hw *hw) 410void rtl_init_rfkill(struct ieee80211_hw *hw)
@@ -436,12 +459,6 @@ int rtl_init_core(struct ieee80211_hw *hw)
436 if (rtl_regd_init(hw, rtl_reg_notifier)) { 459 if (rtl_regd_init(hw, rtl_reg_notifier)) {
437 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "REGD init failed\n"); 460 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "REGD init failed\n");
438 return 1; 461 return 1;
439 } else {
440 /* CRDA regd hint must after init CRDA */
441 if (regulatory_hint(hw->wiphy, rtlpriv->regd.alpha2)) {
442 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
443 "regulatory_hint fail\n");
444 }
445 } 462 }
446 463
447 /* <4> locks */ 464 /* <4> locks */
@@ -449,15 +466,25 @@ int rtl_init_core(struct ieee80211_hw *hw)
449 mutex_init(&rtlpriv->locks.ps_mutex); 466 mutex_init(&rtlpriv->locks.ps_mutex);
450 spin_lock_init(&rtlpriv->locks.ips_lock); 467 spin_lock_init(&rtlpriv->locks.ips_lock);
451 spin_lock_init(&rtlpriv->locks.irq_th_lock); 468 spin_lock_init(&rtlpriv->locks.irq_th_lock);
469 spin_lock_init(&rtlpriv->locks.irq_pci_lock);
470 spin_lock_init(&rtlpriv->locks.tx_lock);
452 spin_lock_init(&rtlpriv->locks.h2c_lock); 471 spin_lock_init(&rtlpriv->locks.h2c_lock);
453 spin_lock_init(&rtlpriv->locks.rf_ps_lock); 472 spin_lock_init(&rtlpriv->locks.rf_ps_lock);
454 spin_lock_init(&rtlpriv->locks.rf_lock); 473 spin_lock_init(&rtlpriv->locks.rf_lock);
455 spin_lock_init(&rtlpriv->locks.waitq_lock); 474 spin_lock_init(&rtlpriv->locks.waitq_lock);
475 spin_lock_init(&rtlpriv->locks.entry_list_lock);
476 spin_lock_init(&rtlpriv->locks.fw_ps_lock);
456 spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock); 477 spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
478 spin_lock_init(&rtlpriv->locks.check_sendpkt_lock);
479 spin_lock_init(&rtlpriv->locks.fw_ps_lock);
480 spin_lock_init(&rtlpriv->locks.lps_lock);
481
482 /* <5> init list */
483 INIT_LIST_HEAD(&rtlpriv->entry_list);
457 484
458 rtlmac->link_state = MAC80211_NOLINK; 485 rtlmac->link_state = MAC80211_NOLINK;
459 486
460 /* <5> init deferred work */ 487 /* <6> init deferred work */
461 _rtl_init_deferred_work(hw); 488 _rtl_init_deferred_work(hw);
462 489
463 return 0; 490 return 0;
@@ -523,7 +550,8 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw,
523 if (mac->opmode == NL80211_IFTYPE_STATION) 550 if (mac->opmode == NL80211_IFTYPE_STATION)
524 bw_40 = mac->bw_40; 551 bw_40 = mac->bw_40;
525 else if (mac->opmode == NL80211_IFTYPE_AP || 552 else if (mac->opmode == NL80211_IFTYPE_AP ||
526 mac->opmode == NL80211_IFTYPE_ADHOC) 553 mac->opmode == NL80211_IFTYPE_ADHOC ||
554 mac->opmode == NL80211_IFTYPE_MESH_POINT)
527 bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40; 555 bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
528 556
529 if (bw_40 && sgi_40) 557 if (bw_40 && sgi_40)
@@ -578,23 +606,26 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
578 if (!tcb_desc->disable_ratefallback || !tcb_desc->use_driver_rate) { 606 if (!tcb_desc->disable_ratefallback || !tcb_desc->use_driver_rate) {
579 if (mac->opmode == NL80211_IFTYPE_STATION) { 607 if (mac->opmode == NL80211_IFTYPE_STATION) {
580 tcb_desc->ratr_index = 0; 608 tcb_desc->ratr_index = 0;
581 } else if (mac->opmode == NL80211_IFTYPE_ADHOC) { 609 } else if (mac->opmode == NL80211_IFTYPE_ADHOC ||
610 mac->opmode == NL80211_IFTYPE_MESH_POINT) {
582 if (tcb_desc->multicast || tcb_desc->broadcast) { 611 if (tcb_desc->multicast || tcb_desc->broadcast) {
583 tcb_desc->hw_rate = 612 tcb_desc->hw_rate =
584 rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M]; 613 rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
585 tcb_desc->use_driver_rate = 1; 614 tcb_desc->use_driver_rate = 1;
615 tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
586 } else { 616 } else {
587 /* TODO */ 617 tcb_desc->ratr_index = ratr_index;
588 } 618 }
589 tcb_desc->ratr_index = ratr_index;
590 } else if (mac->opmode == NL80211_IFTYPE_AP) { 619 } else if (mac->opmode == NL80211_IFTYPE_AP) {
591 tcb_desc->ratr_index = ratr_index; 620 tcb_desc->ratr_index = ratr_index;
592 } 621 }
593 } 622 }
594 623
595 if (rtlpriv->dm.useramask) { 624 if (rtlpriv->dm.useramask) {
596 /* TODO we will differentiate adhoc and station futrue */ 625 tcb_desc->ratr_index = ratr_index;
597 if (mac->opmode == NL80211_IFTYPE_STATION) { 626 /* TODO we will differentiate adhoc and station future */
627 if (mac->opmode == NL80211_IFTYPE_STATION ||
628 mac->opmode == NL80211_IFTYPE_MESH_POINT) {
598 tcb_desc->mac_id = 0; 629 tcb_desc->mac_id = 0;
599 630
600 if (mac->mode == WIRELESS_MODE_N_24G) 631 if (mac->mode == WIRELESS_MODE_N_24G)
@@ -608,7 +639,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
608 else if (mac->mode & WIRELESS_MODE_A) 639 else if (mac->mode & WIRELESS_MODE_A)
609 tcb_desc->ratr_index = RATR_INX_WIRELESS_G; 640 tcb_desc->ratr_index = RATR_INX_WIRELESS_G;
610 } else if (mac->opmode == NL80211_IFTYPE_AP || 641 } else if (mac->opmode == NL80211_IFTYPE_AP ||
611 mac->opmode == NL80211_IFTYPE_ADHOC) { 642 mac->opmode == NL80211_IFTYPE_ADHOC) {
612 if (NULL != sta) { 643 if (NULL != sta) {
613 if (sta->aid > 0) 644 if (sta->aid > 0)
614 tcb_desc->mac_id = sta->aid + 1; 645 tcb_desc->mac_id = sta->aid + 1;
@@ -619,7 +650,6 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
619 } 650 }
620 } 651 }
621 } 652 }
622
623} 653}
624 654
625static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw, 655static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
@@ -633,7 +663,8 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
633 if (!sta) 663 if (!sta)
634 return; 664 return;
635 if (mac->opmode == NL80211_IFTYPE_AP || 665 if (mac->opmode == NL80211_IFTYPE_AP ||
636 mac->opmode == NL80211_IFTYPE_ADHOC) { 666 mac->opmode == NL80211_IFTYPE_ADHOC ||
667 mac->opmode == NL80211_IFTYPE_MESH_POINT) {
637 if (sta->bandwidth == IEEE80211_STA_RX_BW_20) 668 if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
638 return; 669 return;
639 } else if (mac->opmode == NL80211_IFTYPE_STATION) { 670 } else if (mac->opmode == NL80211_IFTYPE_STATION) {
@@ -691,7 +722,7 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
691 int rate_idx; 722 int rate_idx;
692 723
693 if (false == isht) { 724 if (false == isht) {
694 if (IEEE80211_BAND_2GHZ == hw->conf.channel->band) { 725 if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
695 switch (desc_rate) { 726 switch (desc_rate) {
696 case DESC92_RATE1M: 727 case DESC92_RATE1M:
697 rate_idx = 0; 728 rate_idx = 0;
@@ -834,8 +865,8 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
834 if (rtlpriv->dm.supp_phymode_switch && 865 if (rtlpriv->dm.supp_phymode_switch &&
835 mac->link_state < MAC80211_LINKED && 866 mac->link_state < MAC80211_LINKED &&
836 (ieee80211_is_auth(fc) || ieee80211_is_probe_req(fc))) { 867 (ieee80211_is_auth(fc) || ieee80211_is_probe_req(fc))) {
837 if (rtlpriv->cfg->ops->check_switch_to_dmdp) 868 if (rtlpriv->cfg->ops->chk_switch_dmdp)
838 rtlpriv->cfg->ops->check_switch_to_dmdp(hw); 869 rtlpriv->cfg->ops->chk_switch_dmdp(hw);
839 } 870 }
840 if (ieee80211_is_auth(fc)) { 871 if (ieee80211_is_auth(fc)) {
841 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n"); 872 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
@@ -924,6 +955,56 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
924} 955}
925EXPORT_SYMBOL(rtl_get_tcb_desc); 956EXPORT_SYMBOL(rtl_get_tcb_desc);
926 957
958static bool addbareq_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
959{
960 struct rtl_priv *rtlpriv = rtl_priv(hw);
961 struct ieee80211_sta *sta = NULL;
962 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
963 struct rtl_sta_info *sta_entry = NULL;
964 struct ieee80211_mgmt *mgmt = (void *)skb->data;
965 u16 capab = 0, tid = 0;
966 struct rtl_tid_data *tid_data;
967 struct sk_buff *skb_delba = NULL;
968 struct ieee80211_rx_status rx_status = { 0 };
969
970 rcu_read_lock();
971 sta = rtl_find_sta(hw, hdr->addr3);
972 if (sta == NULL) {
973 RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_EMERG,
974 "sta is NULL\n");
975 rcu_read_unlock();
976 return true;
977 }
978
979 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
980 if (!sta_entry) {
981 rcu_read_unlock();
982 return true;
983 }
984 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
985 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
986 tid_data = &sta_entry->tids[tid];
987 if (tid_data->agg.rx_agg_state == RTL_RX_AGG_START) {
988 skb_delba = rtl_make_del_ba(hw, hdr->addr2, hdr->addr3, tid);
989 if (skb_delba) {
990 rx_status.freq = hw->conf.chandef.chan->center_freq;
991 rx_status.band = hw->conf.chandef.chan->band;
992 rx_status.flag |= RX_FLAG_DECRYPTED;
993 rx_status.flag |= RX_FLAG_MACTIME_END;
994 rx_status.rate_idx = 0;
995 rx_status.signal = 50 + 10;
996 memcpy(IEEE80211_SKB_RXCB(skb_delba), &rx_status,
997 sizeof(rx_status));
998 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG,
999 "fake del\n", skb_delba->data,
1000 skb_delba->len);
1001 ieee80211_rx_irqsafe(hw, skb_delba);
1002 }
1003 }
1004 rcu_read_unlock();
1005 return false;
1006}
1007
927bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) 1008bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
928{ 1009{
929 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1010 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -948,6 +1029,11 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
948 RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, 1029 RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
949 "%s ACT_ADDBAREQ From :%pM\n", 1030 "%s ACT_ADDBAREQ From :%pM\n",
950 is_tx ? "Tx" : "Rx", hdr->addr2); 1031 is_tx ? "Tx" : "Rx", hdr->addr2);
1032 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "req\n",
1033 skb->data, skb->len);
1034 if (!is_tx)
1035 if (addbareq_rx(hw, skb))
1036 return true;
951 break; 1037 break;
952 case ACT_ADDBARSP: 1038 case ACT_ADDBARSP:
953 RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, 1039 RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
@@ -1003,8 +1089,9 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1003 is_tx ? "Tx" : "Rx"); 1089 is_tx ? "Tx" : "Rx");
1004 1090
1005 if (is_tx) { 1091 if (is_tx) {
1092 rtlpriv->enter_ps = false;
1006 schedule_work(&rtlpriv-> 1093 schedule_work(&rtlpriv->
1007 works.lps_leave_work); 1094 works.lps_change_work);
1008 ppsc->last_delaylps_stamp_jiffies = 1095 ppsc->last_delaylps_stamp_jiffies =
1009 jiffies; 1096 jiffies;
1010 } 1097 }
@@ -1014,7 +1101,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1014 } 1101 }
1015 } else if (ETH_P_ARP == ether_type) { 1102 } else if (ETH_P_ARP == ether_type) {
1016 if (is_tx) { 1103 if (is_tx) {
1017 schedule_work(&rtlpriv->works.lps_leave_work); 1104 rtlpriv->enter_ps = false;
1105 schedule_work(&rtlpriv->works.lps_change_work);
1018 ppsc->last_delaylps_stamp_jiffies = jiffies; 1106 ppsc->last_delaylps_stamp_jiffies = jiffies;
1019 } 1107 }
1020 1108
@@ -1024,7 +1112,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1024 "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); 1112 "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
1025 1113
1026 if (is_tx) { 1114 if (is_tx) {
1027 schedule_work(&rtlpriv->works.lps_leave_work); 1115 rtlpriv->enter_ps = false;
1116 schedule_work(&rtlpriv->works.lps_change_work);
1028 ppsc->last_delaylps_stamp_jiffies = jiffies; 1117 ppsc->last_delaylps_stamp_jiffies = jiffies;
1029 } 1118 }
1030 1119
@@ -1101,6 +1190,58 @@ int rtl_tx_agg_stop(struct ieee80211_hw *hw,
1101 return 0; 1190 return 0;
1102} 1191}
1103 1192
1193int rtl_rx_agg_start(struct ieee80211_hw *hw,
1194 struct ieee80211_sta *sta, u16 tid)
1195{
1196 struct rtl_priv *rtlpriv = rtl_priv(hw);
1197 struct rtl_tid_data *tid_data;
1198 struct rtl_sta_info *sta_entry = NULL;
1199
1200 if (sta == NULL)
1201 return -EINVAL;
1202
1203 if (unlikely(tid >= MAX_TID_COUNT))
1204 return -EINVAL;
1205
1206 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1207 if (!sta_entry)
1208 return -ENXIO;
1209 tid_data = &sta_entry->tids[tid];
1210
1211 RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
1212 "on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
1213 tid_data->seq_number);
1214
1215 tid_data->agg.rx_agg_state = RTL_RX_AGG_START;
1216 return 0;
1217}
1218
1219int rtl_rx_agg_stop(struct ieee80211_hw *hw,
1220 struct ieee80211_sta *sta, u16 tid)
1221{
1222 struct rtl_priv *rtlpriv = rtl_priv(hw);
1223 struct rtl_sta_info *sta_entry = NULL;
1224
1225 if (sta == NULL)
1226 return -EINVAL;
1227
1228 if (!sta->addr) {
1229 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "ra = NULL\n");
1230 return -EINVAL;
1231 }
1232
1233 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
1234 "on ra = %pM tid = %d\n", sta->addr, tid);
1235
1236 if (unlikely(tid >= MAX_TID_COUNT))
1237 return -EINVAL;
1238
1239 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1240 sta_entry->tids[tid].agg.rx_agg_state = RTL_RX_AGG_STOP;
1241
1242 return 0;
1243}
1244
1104int rtl_tx_agg_oper(struct ieee80211_hw *hw, 1245int rtl_tx_agg_oper(struct ieee80211_hw *hw,
1105 struct ieee80211_sta *sta, u16 tid) 1246 struct ieee80211_sta *sta, u16 tid)
1106{ 1247{
@@ -1132,6 +1273,34 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw,
1132 * wq & timer callback functions 1273 * wq & timer callback functions
1133 * 1274 *
1134 *********************************************************/ 1275 *********************************************************/
1276/* this function is used for roaming */
1277void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
1278{
1279 struct rtl_priv *rtlpriv = rtl_priv(hw);
1280 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1281
1282 if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
1283 return;
1284
1285 if (rtlpriv->mac80211.link_state < MAC80211_LINKED)
1286 return;
1287
1288 /* check if this really is a beacon */
1289 if (!ieee80211_is_beacon(hdr->frame_control) &&
1290 !ieee80211_is_probe_resp(hdr->frame_control))
1291 return;
1292
1293 /* min. beacon length + FCS_LEN */
1294 if (skb->len <= 40 + FCS_LEN)
1295 return;
1296
1297 /* and only beacons from the associated BSSID, please */
1298 if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid))
1299 return;
1300
1301 rtlpriv->link_info.bcn_rx_inperiod++;
1302}
1303
1135void rtl_watchdog_wq_callback(void *data) 1304void rtl_watchdog_wq_callback(void *data)
1136{ 1305{
1137 struct rtl_works *rtlworks = container_of_dwork_rtl(data, 1306 struct rtl_works *rtlworks = container_of_dwork_rtl(data,
@@ -1142,6 +1311,8 @@ void rtl_watchdog_wq_callback(void *data)
1142 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1311 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1143 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1312 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1144 bool busytraffic = false; 1313 bool busytraffic = false;
1314 bool tx_busy_traffic = false;
1315 bool rx_busy_traffic = false;
1145 bool higher_busytraffic = false; 1316 bool higher_busytraffic = false;
1146 bool higher_busyrxtraffic = false; 1317 bool higher_busyrxtraffic = false;
1147 u8 idx, tid; 1318 u8 idx, tid;
@@ -1151,7 +1322,6 @@ void rtl_watchdog_wq_callback(void *data)
1151 u32 aver_tx_cnt_inperiod = 0; 1322 u32 aver_tx_cnt_inperiod = 0;
1152 u32 aver_tidtx_inperiod[MAX_TID_COUNT] = {0}; 1323 u32 aver_tidtx_inperiod[MAX_TID_COUNT] = {0};
1153 u32 tidtx_inp4eriod[MAX_TID_COUNT] = {0}; 1324 u32 tidtx_inp4eriod[MAX_TID_COUNT] = {0};
1154 bool enter_ps = false;
1155 1325
1156 if (is_hal_stop(rtlhal)) 1326 if (is_hal_stop(rtlhal))
1157 return; 1327 return;
@@ -1191,8 +1361,13 @@ void rtl_watchdog_wq_callback(void *data)
1191 aver_tx_cnt_inperiod = tx_cnt_inp4eriod / 4; 1361 aver_tx_cnt_inperiod = tx_cnt_inp4eriod / 4;
1192 1362
1193 /* (2) check traffic busy */ 1363 /* (2) check traffic busy */
1194 if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100) 1364 if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100) {
1195 busytraffic = true; 1365 busytraffic = true;
1366 if (aver_rx_cnt_inperiod > aver_tx_cnt_inperiod)
1367 rx_busy_traffic = true;
1368 else
1369 tx_busy_traffic = false;
1370 }
1196 1371
1197 /* Higher Tx/Rx data. */ 1372 /* Higher Tx/Rx data. */
1198 if (aver_rx_cnt_inperiod > 4000 || 1373 if (aver_rx_cnt_inperiod > 4000 ||
@@ -1228,15 +1403,12 @@ void rtl_watchdog_wq_callback(void *data)
1228 if (((rtlpriv->link_info.num_rx_inperiod + 1403 if (((rtlpriv->link_info.num_rx_inperiod +
1229 rtlpriv->link_info.num_tx_inperiod) > 8) || 1404 rtlpriv->link_info.num_tx_inperiod) > 8) ||
1230 (rtlpriv->link_info.num_rx_inperiod > 2)) 1405 (rtlpriv->link_info.num_rx_inperiod > 2))
1231 enter_ps = false; 1406 rtlpriv->enter_ps = true;
1232 else 1407 else
1233 enter_ps = true; 1408 rtlpriv->enter_ps = false;
1234 1409
1235 /* LeisurePS only work in infra mode. */ 1410 /* LeisurePS only work in infra mode. */
1236 if (enter_ps) 1411 schedule_work(&rtlpriv->works.lps_change_work);
1237 rtl_lps_enter(hw);
1238 else
1239 rtl_lps_leave(hw);
1240 } 1412 }
1241 1413
1242 rtlpriv->link_info.num_rx_inperiod = 0; 1414 rtlpriv->link_info.num_rx_inperiod = 0;
@@ -1246,10 +1418,37 @@ void rtl_watchdog_wq_callback(void *data)
1246 1418
1247 rtlpriv->link_info.busytraffic = busytraffic; 1419 rtlpriv->link_info.busytraffic = busytraffic;
1248 rtlpriv->link_info.higher_busytraffic = higher_busytraffic; 1420 rtlpriv->link_info.higher_busytraffic = higher_busytraffic;
1421 rtlpriv->link_info.rx_busy_traffic = rx_busy_traffic;
1422 rtlpriv->link_info.tx_busy_traffic = tx_busy_traffic;
1249 rtlpriv->link_info.higher_busyrxtraffic = higher_busyrxtraffic; 1423 rtlpriv->link_info.higher_busyrxtraffic = higher_busyrxtraffic;
1250 1424
1251 /* <3> DM */ 1425 /* <3> DM */
1252 rtlpriv->cfg->ops->dm_watchdog(hw); 1426 rtlpriv->cfg->ops->dm_watchdog(hw);
1427
1428 /* <4> roaming */
1429 if (mac->link_state == MAC80211_LINKED &&
1430 mac->opmode == NL80211_IFTYPE_STATION) {
1431 if ((rtlpriv->link_info.bcn_rx_inperiod +
1432 rtlpriv->link_info.num_rx_inperiod) == 0) {
1433 rtlpriv->link_info.roam_times++;
1434 RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
1435 "AP off for %d s\n",
1436 (rtlpriv->link_info.roam_times * 2));
1437
1438 /* if we can't recv beacon for 6s, we should
1439 * reconnect this AP
1440 */
1441 if (rtlpriv->link_info.roam_times >= 3) {
1442 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1443 "AP off, try to reconnect now\n");
1444 rtlpriv->link_info.roam_times = 0;
1445 ieee80211_connection_loss(rtlpriv->mac80211.vif);
1446 }
1447 } else {
1448 rtlpriv->link_info.roam_times = 0;
1449 }
1450 }
1451 rtlpriv->link_info.bcn_rx_inperiod = 0;
1253} 1452}
1254 1453
1255void rtl_watch_dog_timer_callback(unsigned long data) 1454void rtl_watch_dog_timer_callback(unsigned long data)
@@ -1264,6 +1463,28 @@ void rtl_watch_dog_timer_callback(unsigned long data)
1264 jiffies + MSECS(RTL_WATCH_DOG_TIME)); 1463 jiffies + MSECS(RTL_WATCH_DOG_TIME));
1265} 1464}
1266 1465
1466void rtl_fwevt_wq_callback(void *data)
1467{
1468 struct rtl_works *rtlworks =
1469 container_of_dwork_rtl(data, struct rtl_works, fwevt_wq);
1470 struct ieee80211_hw *hw = rtlworks->hw;
1471 struct rtl_priv *rtlpriv = rtl_priv(hw);
1472
1473 rtlpriv->cfg->ops->c2h_command_handle(hw);
1474}
1475
1476void rtl_easy_concurrent_retrytimer_callback(unsigned long data)
1477{
1478 struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
1479 struct rtl_priv *rtlpriv = rtl_priv(hw);
1480 struct rtl_priv *buddy_priv = rtlpriv->buddy_priv;
1481
1482 if (buddy_priv == NULL)
1483 return;
1484
1485 rtlpriv->cfg->ops->dualmac_easy_concurrent(hw);
1486}
1487
1267/********************************************************* 1488/*********************************************************
1268 * 1489 *
1269 * frame process functions 1490 * frame process functions
@@ -1334,14 +1555,16 @@ static struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw,
1334} 1555}
1335 1556
1336int rtl_send_smps_action(struct ieee80211_hw *hw, 1557int rtl_send_smps_action(struct ieee80211_hw *hw,
1337 struct ieee80211_sta *sta, u8 *da, u8 *bssid, 1558 struct ieee80211_sta *sta,
1338 enum ieee80211_smps_mode smps) 1559 enum ieee80211_smps_mode smps)
1339{ 1560{
1340 struct rtl_priv *rtlpriv = rtl_priv(hw); 1561 struct rtl_priv *rtlpriv = rtl_priv(hw);
1341 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1562 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1342 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 1563 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1343 struct sk_buff *skb = rtl_make_smps_action(hw, smps, da, bssid); 1564 struct sk_buff *skb = NULL;
1344 struct rtl_tcb_desc tcb_desc; 1565 struct rtl_tcb_desc tcb_desc;
1566 u8 bssid[ETH_ALEN] = {0};
1567
1345 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); 1568 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
1346 1569
1347 if (rtlpriv->mac80211.act_scanning) 1570 if (rtlpriv->mac80211.act_scanning)
@@ -1356,21 +1579,67 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
1356 if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status)) 1579 if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
1357 goto err_free; 1580 goto err_free;
1358 1581
1582 if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP)
1583 memcpy(bssid, rtlpriv->efuse.dev_addr, ETH_ALEN);
1584 else
1585 memcpy(bssid, rtlpriv->mac80211.bssid, ETH_ALEN);
1586
1587 skb = rtl_make_smps_action(hw, smps, sta->addr, bssid);
1359 /* this is a type = mgmt * stype = action frame */ 1588 /* this is a type = mgmt * stype = action frame */
1360 if (skb) { 1589 if (skb) {
1361 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1590 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1362 struct rtl_sta_info *sta_entry = 1591 struct rtl_sta_info *sta_entry =
1363 (struct rtl_sta_info *) sta->drv_priv; 1592 (struct rtl_sta_info *) sta->drv_priv;
1364 sta_entry->mimo_ps = smps; 1593 sta_entry->mimo_ps = smps;
1365 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
1366 1594
1367 info->control.rates[0].idx = 0; 1595 info->control.rates[0].idx = 0;
1368 info->band = hw->conf.channel->band; 1596 info->band = hw->conf.chandef.chan->band;
1369 rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc); 1597 rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc);
1370 } 1598 }
1599 return 1;
1600
1371err_free: 1601err_free:
1372 return 0; 1602 return 0;
1373} 1603}
1604EXPORT_SYMBOL(rtl_send_smps_action);
1605
1606/* There seem to be issues in mac80211 regarding when del ba frames can be
1607 * received. As a work around, we make a fake del_ba if we receive a ba_req;
1608 * however, rx_agg was opened to let mac80211 release some ba related
1609 * resources. This del_ba is for tx only.
1610 */
1611struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw,
1612 u8 *sa, u8 *bssid, u16 tid)
1613{
1614 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1615 struct sk_buff *skb;
1616 struct ieee80211_mgmt *action_frame;
1617 u16 params;
1618
1619 /* 27 = header + category + action + smps mode */
1620 skb = dev_alloc_skb(34 + hw->extra_tx_headroom);
1621 if (!skb)
1622 return NULL;
1623
1624 skb_reserve(skb, hw->extra_tx_headroom);
1625 action_frame = (void *)skb_put(skb, 34);
1626 memset(action_frame, 0, 34);
1627 memcpy(action_frame->sa, sa, ETH_ALEN);
1628 memcpy(action_frame->da, rtlefuse->dev_addr, ETH_ALEN);
1629 memcpy(action_frame->bssid, bssid, ETH_ALEN);
1630 action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1631 IEEE80211_STYPE_ACTION);
1632 action_frame->u.action.category = WLAN_CATEGORY_BACK;
1633 action_frame->u.action.u.delba.action_code = WLAN_ACTION_DELBA;
1634 params = (u16)(1 << 11); /* bit 11 initiator */
1635 params |= (u16)(tid << 12); /* bit 15:12 TID number */
1636
1637 action_frame->u.action.u.delba.params = cpu_to_le16(params);
1638 action_frame->u.action.u.delba.reason_code =
1639 cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT);
1640
1641 return skb;
1642}
1374 1643
1375/********************************************************* 1644/*********************************************************
1376 * 1645 *
@@ -1587,11 +1856,17 @@ MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
1587MODULE_LICENSE("GPL"); 1856MODULE_LICENSE("GPL");
1588MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core"); 1857MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
1589 1858
1859struct rtl_global_var global_var = {};
1860
1590static int __init rtl_core_module_init(void) 1861static int __init rtl_core_module_init(void)
1591{ 1862{
1592 if (rtl_rate_control_register()) 1863 if (rtl_rate_control_register())
1593 pr_err("Unable to register rtl_rc, use default RC !!\n"); 1864 pr_err("Unable to register rtl_rc, use default RC !!\n");
1594 1865
1866 /* init some global vars */
1867 INIT_LIST_HEAD(&global_var.glb_priv_list);
1868 spin_lock_init(&global_var.glb_list_lock);
1869
1595 return 0; 1870 return 0;
1596} 1871}
1597 1872
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 5a8c80e259f7..8576bc34b032 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -113,6 +113,7 @@ void rtl_init_rx_config(struct ieee80211_hw *hw);
113void rtl_init_rfkill(struct ieee80211_hw *hw); 113void rtl_init_rfkill(struct ieee80211_hw *hw);
114void rtl_deinit_rfkill(struct ieee80211_hw *hw); 114void rtl_deinit_rfkill(struct ieee80211_hw *hw);
115 115
116void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
116void rtl_watch_dog_timer_callback(unsigned long data); 117void rtl_watch_dog_timer_callback(unsigned long data);
117void rtl_deinit_deferred_work(struct ieee80211_hw *hw); 118void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
118 119
@@ -126,7 +127,12 @@ int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
126 u16 tid); 127 u16 tid);
127int rtl_tx_agg_oper(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 128int rtl_tx_agg_oper(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
128 u16 tid); 129 u16 tid);
130int rtl_rx_agg_start(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
131 u16 tid);
132int rtl_rx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
133 u16 tid);
129void rtl_watchdog_wq_callback(void *data); 134void rtl_watchdog_wq_callback(void *data);
135void rtl_fwevt_wq_callback(void *data);
130 136
131void rtl_get_tcb_desc(struct ieee80211_hw *hw, 137void rtl_get_tcb_desc(struct ieee80211_hw *hw,
132 struct ieee80211_tx_info *info, 138 struct ieee80211_tx_info *info,
@@ -134,14 +140,18 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
134 struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc); 140 struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc);
135 141
136int rtl_send_smps_action(struct ieee80211_hw *hw, 142int rtl_send_smps_action(struct ieee80211_hw *hw,
137 struct ieee80211_sta *sta, u8 *da, u8 *bssid, 143 struct ieee80211_sta *sta,
138 enum ieee80211_smps_mode smps); 144 enum ieee80211_smps_mode smps);
139u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie); 145u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
140void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len); 146void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
141u8 rtl_tid_to_ac(u8 tid); 147u8 rtl_tid_to_ac(u8 tid);
142extern struct attribute_group rtl_attribute_group; 148extern struct attribute_group rtl_attribute_group;
149void rtl_easy_concurrent_retrytimer_callback(unsigned long data);
150extern struct rtl_global_var global_var;
143int rtlwifi_rate_mapping(struct ieee80211_hw *hw, 151int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
144 bool isht, u8 desc_rate, bool first_ampdu); 152 bool isht, u8 desc_rate, bool first_ampdu);
145bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb); 153bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
154struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw,
155 u8 *sa, u8 *bssid, u16 tid);
146 156
147#endif 157#endif
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index d3ce9fbef00e..ee84844be008 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -104,9 +104,12 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
104 if (is_hal_stop(rtlhal)) 104 if (is_hal_stop(rtlhal))
105 return; 105 return;
106 106
107 /* here is must, because adhoc do stop and start,
108 * but stop with RFOFF may cause something wrong,
109 * like adhoc TP
110 */
107 if (unlikely(ppsc->rfpwr_state == ERFOFF)) { 111 if (unlikely(ppsc->rfpwr_state == ERFOFF)) {
108 rtl_ips_nic_on(hw); 112 rtl_ips_nic_on(hw);
109 mdelay(1);
110 } 113 }
111 114
112 mutex_lock(&rtlpriv->locks.conf_mutex); 115 mutex_lock(&rtlpriv->locks.conf_mutex);
@@ -167,7 +170,11 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
167 rtl_ips_nic_on(hw); 170 rtl_ips_nic_on(hw);
168 171
169 mutex_lock(&rtlpriv->locks.conf_mutex); 172 mutex_lock(&rtlpriv->locks.conf_mutex);
170 switch (vif->type) { 173
174 switch (ieee80211_vif_type_p2p(vif)) {
175 case NL80211_IFTYPE_P2P_CLIENT:
176 mac->p2p = P2P_ROLE_CLIENT;
177 /*fall through*/
171 case NL80211_IFTYPE_STATION: 178 case NL80211_IFTYPE_STATION:
172 if (mac->beacon_enabled == 1) { 179 if (mac->beacon_enabled == 1) {
173 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, 180 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
@@ -192,6 +199,9 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
192 (u8 *) (&mac->basic_rates)); 199 (u8 *) (&mac->basic_rates));
193 200
194 break; 201 break;
202 case NL80211_IFTYPE_P2P_GO:
203 mac->p2p = P2P_ROLE_GO;
204 /*fall through*/
195 case NL80211_IFTYPE_AP: 205 case NL80211_IFTYPE_AP:
196 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, 206 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
197 "NL80211_IFTYPE_AP\n"); 207 "NL80211_IFTYPE_AP\n");
@@ -205,6 +215,19 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
205 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, 215 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
206 (u8 *) (&mac->basic_rates)); 216 (u8 *) (&mac->basic_rates));
207 break; 217 break;
218 case NL80211_IFTYPE_MESH_POINT:
219 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
220 "NL80211_IFTYPE_MESH_POINT\n");
221
222 mac->link_state = MAC80211_LINKED;
223 rtlpriv->cfg->ops->set_bcn_reg(hw);
224 if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
225 mac->basic_rates = 0xfff;
226 else
227 mac->basic_rates = 0xff0;
228 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
229 (u8 *)(&mac->basic_rates));
230 break;
208 default: 231 default:
209 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 232 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
210 "operation mode %d is not supported!\n", vif->type); 233 "operation mode %d is not supported!\n", vif->type);
@@ -212,6 +235,13 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
212 goto out; 235 goto out;
213 } 236 }
214 237
238 if (mac->p2p) {
239 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
240 "p2p role %x\n", vif->type);
241 mac->basic_rates = 0xff0;/*disable cck rate for p2p*/
242 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
243 (u8 *)(&mac->basic_rates));
244 }
215 mac->vif = vif; 245 mac->vif = vif;
216 mac->opmode = vif->type; 246 mac->opmode = vif->type;
217 rtlpriv->cfg->ops->set_network_type(hw, vif->type); 247 rtlpriv->cfg->ops->set_network_type(hw, vif->type);
@@ -232,9 +262,9 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
232 mutex_lock(&rtlpriv->locks.conf_mutex); 262 mutex_lock(&rtlpriv->locks.conf_mutex);
233 263
234 /* Free beacon resources */ 264 /* Free beacon resources */
235 if ((mac->opmode == NL80211_IFTYPE_AP) || 265 if ((vif->type == NL80211_IFTYPE_AP) ||
236 (mac->opmode == NL80211_IFTYPE_ADHOC) || 266 (vif->type == NL80211_IFTYPE_ADHOC) ||
237 (mac->opmode == NL80211_IFTYPE_MESH_POINT)) { 267 (vif->type == NL80211_IFTYPE_MESH_POINT)) {
238 if (mac->beacon_enabled == 1) { 268 if (mac->beacon_enabled == 1) {
239 mac->beacon_enabled = 0; 269 mac->beacon_enabled = 0;
240 rtlpriv->cfg->ops->update_interrupt_mask(hw, 0, 270 rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
@@ -247,6 +277,7 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
247 *Note: We assume NL80211_IFTYPE_UNSPECIFIED as 277 *Note: We assume NL80211_IFTYPE_UNSPECIFIED as
248 *NO LINK for our hardware. 278 *NO LINK for our hardware.
249 */ 279 */
280 mac->p2p = 0;
250 mac->vif = NULL; 281 mac->vif = NULL;
251 mac->link_state = MAC80211_NOLINK; 282 mac->link_state = MAC80211_NOLINK;
252 memset(mac->bssid, 0, 6); 283 memset(mac->bssid, 0, 6);
@@ -256,6 +287,22 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
256 mutex_unlock(&rtlpriv->locks.conf_mutex); 287 mutex_unlock(&rtlpriv->locks.conf_mutex);
257} 288}
258 289
290static int rtl_op_change_interface(struct ieee80211_hw *hw,
291 struct ieee80211_vif *vif,
292 enum nl80211_iftype new_type, bool p2p)
293{
294 struct rtl_priv *rtlpriv = rtl_priv(hw);
295 int ret;
296 rtl_op_remove_interface(hw, vif);
297
298 vif->type = new_type;
299 vif->p2p = p2p;
300 ret = rtl_op_add_interface(hw, vif);
301 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
302 "p2p %x\n", p2p);
303 return ret;
304}
305
259static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) 306static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
260{ 307{
261 struct rtl_priv *rtlpriv = rtl_priv(hw); 308 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -264,6 +311,9 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
264 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 311 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
265 struct ieee80211_conf *conf = &hw->conf; 312 struct ieee80211_conf *conf = &hw->conf;
266 313
314 if (mac->skip_scan)
315 return 1;
316
267 mutex_lock(&rtlpriv->locks.conf_mutex); 317 mutex_lock(&rtlpriv->locks.conf_mutex);
268 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /*BIT(2)*/ 318 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /*BIT(2)*/
269 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, 319 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
@@ -320,9 +370,19 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
320 } 370 }
321 371
322 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 372 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
323 struct ieee80211_channel *channel = hw->conf.channel; 373 struct ieee80211_channel *channel = hw->conf.chandef.chan;
324 u8 wide_chan = (u8) channel->hw_value; 374 u8 wide_chan = (u8) channel->hw_value;
325 375
376 if (mac->act_scanning)
377 mac->n_channels++;
378
379 if (rtlpriv->dm.supp_phymode_switch &&
380 mac->link_state < MAC80211_LINKED &&
381 !mac->act_scanning) {
382 if (rtlpriv->cfg->ops->chk_switch_dmdp)
383 rtlpriv->cfg->ops->chk_switch_dmdp(hw);
384 }
385
326 /* 386 /*
327 *because we should back channel to 387 *because we should back channel to
328 *current_network.chan in in scanning, 388 *current_network.chan in in scanning,
@@ -332,7 +392,7 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
332 *info for cisco1253 bw20, so we modify 392 *info for cisco1253 bw20, so we modify
333 *it here based on UPPER & LOWER 393 *it here based on UPPER & LOWER
334 */ 394 */
335 switch (hw->conf.channel_type) { 395 switch (cfg80211_get_chandef_type(&hw->conf.chandef)) {
336 case NL80211_CHAN_HT20: 396 case NL80211_CHAN_HT20:
337 case NL80211_CHAN_NO_HT: 397 case NL80211_CHAN_NO_HT:
338 /* SC */ 398 /* SC */
@@ -373,13 +433,13 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
373 if (wide_chan <= 0) 433 if (wide_chan <= 0)
374 wide_chan = 1; 434 wide_chan = 1;
375 435
376 /* In scanning, before we go offchannel we may send a ps=1 null 436 /* In scanning, before we go offchannel we may send a ps = 1
377 * to AP, and then we may send a ps = 0 null to AP quickly, but 437 * null to AP, and then we may send a ps = 0 null to AP quickly,
378 * first null may have caused AP to put lots of packet to hw tx 438 * but first null may have caused AP to put lots of packet to
379 * buffer. These packets must be tx'd before we go off channel 439 * hw tx buffer. These packets must be tx'd before we go off
380 * so we must delay more time to let AP flush these packets 440 * channel so we must delay more time to let AP flush these
381 * before going offchannel, or dis-association or delete BA will 441 * packets before going offchannel, or dis-association or
382 * happen by AP 442 * delete BA will be caused by AP
383 */ 443 */
384 if (rtlpriv->mac80211.offchan_delay) { 444 if (rtlpriv->mac80211.offchan_delay) {
385 rtlpriv->mac80211.offchan_delay = false; 445 rtlpriv->mac80211.offchan_delay = false;
@@ -390,7 +450,7 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
390 rtlpriv->cfg->ops->switch_channel(hw); 450 rtlpriv->cfg->ops->switch_channel(hw);
391 rtlpriv->cfg->ops->set_channel_access(hw); 451 rtlpriv->cfg->ops->set_channel_access(hw);
392 rtlpriv->cfg->ops->set_bw_mode(hw, 452 rtlpriv->cfg->ops->set_bw_mode(hw,
393 hw->conf.channel_type); 453 cfg80211_get_chandef_type(&hw->conf.chandef));
394 } 454 }
395 455
396 mutex_unlock(&rtlpriv->locks.conf_mutex); 456 mutex_unlock(&rtlpriv->locks.conf_mutex);
@@ -441,7 +501,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
441 * and nolink check bssid is set in set network_type */ 501 * and nolink check bssid is set in set network_type */
442 if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) && 502 if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
443 (mac->link_state >= MAC80211_LINKED)) { 503 (mac->link_state >= MAC80211_LINKED)) {
444 if (mac->opmode != NL80211_IFTYPE_AP) { 504 if (mac->opmode != NL80211_IFTYPE_AP &&
505 mac->opmode != NL80211_IFTYPE_MESH_POINT) {
445 if (*new_flags & FIF_BCN_PRBRESP_PROMISC) { 506 if (*new_flags & FIF_BCN_PRBRESP_PROMISC) {
446 rtlpriv->cfg->ops->set_chk_bssid(hw, false); 507 rtlpriv->cfg->ops->set_chk_bssid(hw, false);
447 } else { 508 } else {
@@ -481,32 +542,43 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw,
481{ 542{
482 struct rtl_priv *rtlpriv = rtl_priv(hw); 543 struct rtl_priv *rtlpriv = rtl_priv(hw);
483 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 544 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
545 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
484 struct rtl_sta_info *sta_entry; 546 struct rtl_sta_info *sta_entry;
485 547
486 if (sta) { 548 if (sta) {
487 sta_entry = (struct rtl_sta_info *) sta->drv_priv; 549 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
550 spin_lock_bh(&rtlpriv->locks.entry_list_lock);
551 list_add_tail(&sta_entry->list, &rtlpriv->entry_list);
552 spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
488 if (rtlhal->current_bandtype == BAND_ON_2_4G) { 553 if (rtlhal->current_bandtype == BAND_ON_2_4G) {
489 sta_entry->wireless_mode = WIRELESS_MODE_G; 554 sta_entry->wireless_mode = WIRELESS_MODE_G;
490 if (sta->supp_rates[0] <= 0xf) 555 if (sta->supp_rates[0] <= 0xf)
491 sta_entry->wireless_mode = WIRELESS_MODE_B; 556 sta_entry->wireless_mode = WIRELESS_MODE_B;
492 if (sta->ht_cap.ht_supported) 557 if (sta->ht_cap.ht_supported == true)
493 sta_entry->wireless_mode = WIRELESS_MODE_N_24G; 558 sta_entry->wireless_mode = WIRELESS_MODE_N_24G;
559
560 if (vif->type == NL80211_IFTYPE_ADHOC)
561 sta_entry->wireless_mode = WIRELESS_MODE_G;
494 } else if (rtlhal->current_bandtype == BAND_ON_5G) { 562 } else if (rtlhal->current_bandtype == BAND_ON_5G) {
495 sta_entry->wireless_mode = WIRELESS_MODE_A; 563 sta_entry->wireless_mode = WIRELESS_MODE_A;
496 if (sta->ht_cap.ht_supported) 564 if (sta->ht_cap.ht_supported == true)
497 sta_entry->wireless_mode = WIRELESS_MODE_N_24G; 565 sta_entry->wireless_mode = WIRELESS_MODE_N_24G;
498 }
499 566
500 /* I found some times mac80211 give wrong supp_rates for adhoc*/ 567 if (vif->type == NL80211_IFTYPE_ADHOC)
501 if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC) 568 sta_entry->wireless_mode = WIRELESS_MODE_A;
502 sta_entry->wireless_mode = WIRELESS_MODE_G; 569 }
570 /*disable cck rate for p2p*/
571 if (mac->p2p)
572 sta->supp_rates[0] &= 0xfffffff0;
503 573
574 memcpy(sta_entry->mac_addr, sta->addr, ETH_ALEN);
504 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, 575 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
505 "Add sta addr is %pM\n", sta->addr); 576 "Add sta addr is %pM\n", sta->addr);
506 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); 577 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
507 } 578 }
508 return 0; 579 return 0;
509} 580}
581
510static int rtl_op_sta_remove(struct ieee80211_hw *hw, 582static int rtl_op_sta_remove(struct ieee80211_hw *hw,
511 struct ieee80211_vif *vif, 583 struct ieee80211_vif *vif,
512 struct ieee80211_sta *sta) 584 struct ieee80211_sta *sta)
@@ -519,9 +591,14 @@ static int rtl_op_sta_remove(struct ieee80211_hw *hw,
519 sta_entry = (struct rtl_sta_info *) sta->drv_priv; 591 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
520 sta_entry->wireless_mode = 0; 592 sta_entry->wireless_mode = 0;
521 sta_entry->ratr_index = 0; 593 sta_entry->ratr_index = 0;
594
595 spin_lock_bh(&rtlpriv->locks.entry_list_lock);
596 list_del(&sta_entry->list);
597 spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
522 } 598 }
523 return 0; 599 return 0;
524} 600}
601
525static int _rtl_get_hal_qnum(u16 queue) 602static int _rtl_get_hal_qnum(u16 queue)
526{ 603{
527 int qnum; 604 int qnum;
@@ -547,8 +624,8 @@ static int _rtl_get_hal_qnum(u16 queue)
547} 624}
548 625
549/* 626/*
550 *for mac80211 VO=0, VI=1, BE=2, BK=3 627 *for mac80211 VO = 0, VI = 1, BE = 2, BK = 3
551 *for rtl819x BE=0, BK=1, VI=2, VO=3 628 *for rtl819x BE = 0, BK = 1, VI = 2, VO = 3
552 */ 629 */
553static int rtl_op_conf_tx(struct ieee80211_hw *hw, 630static int rtl_op_conf_tx(struct ieee80211_hw *hw,
554 struct ieee80211_vif *vif, u16 queue, 631 struct ieee80211_vif *vif, u16 queue,
@@ -630,6 +707,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
630 /*TODO: reference to enum ieee80211_bss_change */ 707 /*TODO: reference to enum ieee80211_bss_change */
631 if (changed & BSS_CHANGED_ASSOC) { 708 if (changed & BSS_CHANGED_ASSOC) {
632 if (bss_conf->assoc) { 709 if (bss_conf->assoc) {
710 struct ieee80211_sta *sta = NULL;
633 /* we should reset all sec info & cam 711 /* we should reset all sec info & cam
634 * before set cam after linked, we should not 712 * before set cam after linked, we should not
635 * reset in disassoc, that will cause tkip->wep 713 * reset in disassoc, that will cause tkip->wep
@@ -647,23 +725,39 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
647 725
648 if (rtlpriv->cfg->ops->linked_set_reg) 726 if (rtlpriv->cfg->ops->linked_set_reg)
649 rtlpriv->cfg->ops->linked_set_reg(hw); 727 rtlpriv->cfg->ops->linked_set_reg(hw);
650 if (mac->opmode == NL80211_IFTYPE_STATION && sta) 728 rcu_read_lock();
729 sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid);
730
731 if (vif->type == NL80211_IFTYPE_STATION && sta)
651 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); 732 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
733 RT_TRACE(rtlpriv, COMP_EASY_CONCURRENT, DBG_LOUD,
734 "send PS STATIC frame\n");
735 if (rtlpriv->dm.supp_phymode_switch) {
736 if (sta->ht_cap.ht_supported)
737 rtl_send_smps_action(hw, sta,
738 IEEE80211_SMPS_STATIC);
739 }
740 rcu_read_unlock();
741
652 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, 742 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
653 "BSS_CHANGED_ASSOC\n"); 743 "BSS_CHANGED_ASSOC\n");
654 } else { 744 } else {
655 if (mac->link_state == MAC80211_LINKED) 745 if (mac->link_state == MAC80211_LINKED) {
656 rtl_lps_leave(hw); 746 rtlpriv->enter_ps = false;
747 schedule_work(&rtlpriv->works.lps_change_work);
748 }
657 749
750 if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
751 rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
658 mac->link_state = MAC80211_NOLINK; 752 mac->link_state = MAC80211_NOLINK;
659 memset(mac->bssid, 0, 6); 753 memset(mac->bssid, 0, 6);
660
661 /* reset sec info */
662 rtl_cam_reset_sec_info(hw);
663
664 rtl_cam_reset_all_entry(hw);
665 mac->vendor = PEER_UNKNOWN; 754 mac->vendor = PEER_UNKNOWN;
666 755
756 if (rtlpriv->dm.supp_phymode_switch) {
757 if (rtlpriv->cfg->ops->chk_switch_dmdp)
758 rtlpriv->cfg->ops->chk_switch_dmdp(hw);
759 }
760
667 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, 761 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
668 "BSS_CHANGED_UN_ASSOC\n"); 762 "BSS_CHANGED_UN_ASSOC\n");
669 } 763 }
@@ -778,7 +872,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
778 } 872 }
779 873
780 if (changed & BSS_CHANGED_BASIC_RATES) { 874 if (changed & BSS_CHANGED_BASIC_RATES) {
781 /* for 5G must << RATE_6M_INDEX=4, 875 /* for 5G must << RATE_6M_INDEX = 4,
782 * because 5G have no cck rate*/ 876 * because 5G have no cck rate*/
783 if (rtlhal->current_bandtype == BAND_ON_5G) 877 if (rtlhal->current_bandtype == BAND_ON_5G)
784 basic_rates = sta->supp_rates[1] << 4; 878 basic_rates = sta->supp_rates[1] << 4;
@@ -815,6 +909,9 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
815 ppsc->report_linked = false; 909 ppsc->report_linked = false;
816 } 910 }
817 } 911 }
912 if (rtlpriv->cfg->ops->bt_wifi_media_status_notify)
913 rtlpriv->cfg->ops->bt_wifi_media_status_notify(hw,
914 ppsc->report_linked);
818 } 915 }
819 916
820out: 917out:
@@ -885,7 +982,6 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
885 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, 982 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
886 "IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid); 983 "IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid);
887 return rtl_tx_agg_stop(hw, sta, tid); 984 return rtl_tx_agg_stop(hw, sta, tid);
888 break;
889 case IEEE80211_AMPDU_TX_OPERATIONAL: 985 case IEEE80211_AMPDU_TX_OPERATIONAL:
890 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, 986 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
891 "IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid); 987 "IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid);
@@ -894,11 +990,11 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
894 case IEEE80211_AMPDU_RX_START: 990 case IEEE80211_AMPDU_RX_START:
895 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, 991 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
896 "IEEE80211_AMPDU_RX_START:TID:%d\n", tid); 992 "IEEE80211_AMPDU_RX_START:TID:%d\n", tid);
897 break; 993 return rtl_rx_agg_start(hw, sta, tid);
898 case IEEE80211_AMPDU_RX_STOP: 994 case IEEE80211_AMPDU_RX_STOP:
899 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, 995 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
900 "IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid); 996 "IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid);
901 break; 997 return rtl_rx_agg_stop(hw, sta, tid);
902 default: 998 default:
903 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 999 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
904 "IEEE80211_AMPDU_ERR!!!!:\n"); 1000 "IEEE80211_AMPDU_ERR!!!!:\n");
@@ -912,12 +1008,20 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw)
912 struct rtl_priv *rtlpriv = rtl_priv(hw); 1008 struct rtl_priv *rtlpriv = rtl_priv(hw);
913 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1009 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
914 1010
915 mac->act_scanning = true;
916
917 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n"); 1011 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
1012 mac->act_scanning = true;
1013 if (rtlpriv->link_info.higher_busytraffic) {
1014 mac->skip_scan = true;
1015 return;
1016 }
918 1017
1018 if (rtlpriv->dm.supp_phymode_switch) {
1019 if (rtlpriv->cfg->ops->chk_switch_dmdp)
1020 rtlpriv->cfg->ops->chk_switch_dmdp(hw);
1021 }
919 if (mac->link_state == MAC80211_LINKED) { 1022 if (mac->link_state == MAC80211_LINKED) {
920 rtl_lps_leave(hw); 1023 rtlpriv->enter_ps = false;
1024 schedule_work(&rtlpriv->works.lps_change_work);
921 mac->link_state = MAC80211_LINKED_SCANNING; 1025 mac->link_state = MAC80211_LINKED_SCANNING;
922 } else { 1026 } else {
923 rtl_ips_nic_on(hw); 1027 rtl_ips_nic_on(hw);
@@ -937,6 +1041,16 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw)
937 1041
938 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n"); 1042 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
939 mac->act_scanning = false; 1043 mac->act_scanning = false;
1044 mac->skip_scan = false;
1045 if (rtlpriv->link_info.higher_busytraffic)
1046 return;
1047
1048 /*p2p will use 1/6/11 to scan */
1049 if (mac->n_channels == 3)
1050 mac->p2p_in_use = true;
1051 else
1052 mac->p2p_in_use = false;
1053 mac->n_channels = 0;
940 /* Dual mac */ 1054 /* Dual mac */
941 rtlpriv->rtlhal.load_imrandiqk_setting_for2g = false; 1055 rtlpriv->rtlhal.load_imrandiqk_setting_for2g = false;
942 1056
@@ -970,6 +1084,11 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
970 "not open hw encryption\n"); 1084 "not open hw encryption\n");
971 return -ENOSPC; /*User disabled HW-crypto */ 1085 return -ENOSPC; /*User disabled HW-crypto */
972 } 1086 }
1087 /* To support IBSS, use sw-crypto for GTK */
1088 if (((vif->type == NL80211_IFTYPE_ADHOC) ||
1089 (vif->type == NL80211_IFTYPE_MESH_POINT)) &&
1090 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1091 return -ENOSPC;
973 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 1092 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
974 "%s hardware based encryption for keyidx: %d, mac: %pM\n", 1093 "%s hardware based encryption for keyidx: %d, mac: %pM\n",
975 cmd == SET_KEY ? "Using" : "Disabling", key->keyidx, 1094 cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
@@ -996,6 +1115,14 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
996 key_type = AESCCMP_ENCRYPTION; 1115 key_type = AESCCMP_ENCRYPTION;
997 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CCMP\n"); 1116 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CCMP\n");
998 break; 1117 break;
1118 case WLAN_CIPHER_SUITE_AES_CMAC:
1119 /*HW doesn't support CMAC encryption, use software CMAC */
1120 key_type = AESCMAC_ENCRYPTION;
1121 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CMAC\n");
1122 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
1123 "HW don't support CMAC encryption, use software CMAC\n");
1124 err = -EOPNOTSUPP;
1125 goto out_unlock;
999 default: 1126 default:
1000 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "alg_err:%x!!!!\n", 1127 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "alg_err:%x!!!!\n",
1001 key->cipher); 1128 key->cipher);
@@ -1017,13 +1144,14 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1017 * 1) wep only: is just for wep enc, in this condition 1144 * 1) wep only: is just for wep enc, in this condition
1018 * rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION 1145 * rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION
1019 * will be true & enable_hw_sec will be set when wep 1146 * will be true & enable_hw_sec will be set when wep
1020 * ke setting. 1147 * key setting.
1021 * 2) wep(group) + AES(pairwise): some AP like cisco 1148 * 2) wep(group) + AES(pairwise): some AP like cisco
1022 * may use it, in this condition enable_hw_sec will not 1149 * may use it, in this condition enable_hw_sec will not
1023 * be set when wep key setting */ 1150 * be set when wep key setting */
1024 /* we must reset sec_info after lingked before set key, 1151 /* we must reset sec_info after lingked before set key,
1025 * or some flag will be wrong*/ 1152 * or some flag will be wrong*/
1026 if (mac->opmode == NL80211_IFTYPE_AP) { 1153 if (vif->type == NL80211_IFTYPE_AP ||
1154 vif->type == NL80211_IFTYPE_MESH_POINT) {
1027 if (!group_key || key_type == WEP40_ENCRYPTION || 1155 if (!group_key || key_type == WEP40_ENCRYPTION ||
1028 key_type == WEP104_ENCRYPTION) { 1156 key_type == WEP104_ENCRYPTION) {
1029 if (group_key) 1157 if (group_key)
@@ -1098,12 +1226,16 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1098 key->hw_key_idx = key_idx; 1226 key->hw_key_idx = key_idx;
1099 if (key_type == TKIP_ENCRYPTION) 1227 if (key_type == TKIP_ENCRYPTION)
1100 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 1228 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1229 /*use software CCMP encryption for management frames (MFP) */
1230 if (key_type == AESCCMP_ENCRYPTION)
1231 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1101 break; 1232 break;
1102 case DISABLE_KEY: 1233 case DISABLE_KEY:
1103 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 1234 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
1104 "disable key delete one entry\n"); 1235 "disable key delete one entry\n");
1105 /*set local buf about wep key. */ 1236 /*set local buf about wep key. */
1106 if (mac->opmode == NL80211_IFTYPE_AP) { 1237 if (vif->type == NL80211_IFTYPE_AP ||
1238 vif->type == NL80211_IFTYPE_MESH_POINT) {
1107 if (sta) 1239 if (sta)
1108 rtl_cam_del_entry(hw, sta->addr); 1240 rtl_cam_del_entry(hw, sta->addr);
1109 } 1241 }
@@ -1163,10 +1295,10 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
1163} 1295}
1164 1296
1165/* this function is called by mac80211 to flush tx buffer 1297/* this function is called by mac80211 to flush tx buffer
1166 * before switch channle or power save, or tx buffer packet 1298 * before switch channel or power save, or tx buffer packet
1167 * maybe send after offchannel or rf sleep, this may cause 1299 * maybe send after offchannel or rf sleep, this may cause
1168 * dis-association by AP */ 1300 * dis-association by AP */
1169static void rtl_op_flush(struct ieee80211_hw *hw, bool drop) 1301static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1170{ 1302{
1171 struct rtl_priv *rtlpriv = rtl_priv(hw); 1303 struct rtl_priv *rtlpriv = rtl_priv(hw);
1172 1304
@@ -1180,6 +1312,7 @@ const struct ieee80211_ops rtl_ops = {
1180 .tx = rtl_op_tx, 1312 .tx = rtl_op_tx,
1181 .add_interface = rtl_op_add_interface, 1313 .add_interface = rtl_op_add_interface,
1182 .remove_interface = rtl_op_remove_interface, 1314 .remove_interface = rtl_op_remove_interface,
1315 .change_interface = rtl_op_change_interface,
1183 .config = rtl_op_config, 1316 .config = rtl_op_config,
1184 .configure_filter = rtl_op_configure_filter, 1317 .configure_filter = rtl_op_configure_filter,
1185 .sta_add = rtl_op_sta_add, 1318 .sta_add = rtl_op_sta_add,
diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/rtlwifi/debug.c
index bdda9b2fffe1..7d52d3d7769f 100644
--- a/drivers/net/wireless/rtlwifi/debug.c
+++ b/drivers/net/wireless/rtlwifi/debug.c
@@ -41,7 +41,10 @@ void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
41 COMP_BEACON | COMP_RATE | COMP_RXDESC | COMP_DIG | COMP_TXAGC | 41 COMP_BEACON | COMP_RATE | COMP_RXDESC | COMP_DIG | COMP_TXAGC |
42 COMP_POWER | COMP_POWER_TRACKING | COMP_BB_POWERSAVING | COMP_SWAS | 42 COMP_POWER | COMP_POWER_TRACKING | COMP_BB_POWERSAVING | COMP_SWAS |
43 COMP_RF | COMP_TURBO | COMP_RATR | COMP_CMD | 43 COMP_RF | COMP_TURBO | COMP_RATR | COMP_CMD |
44 COMP_EFUSE | COMP_QOS | COMP_MAC80211 | COMP_REGD | COMP_CHAN; 44 COMP_EFUSE | COMP_QOS | COMP_MAC80211 | COMP_REGD | COMP_CHAN |
45 COMP_EASY_CONCURRENT | COMP_EFUSE | COMP_QOS | COMP_MAC80211 |
46 COMP_REGD | COMP_CHAN | COMP_BT_COEXIST;
47
45 48
46 for (i = 0; i < DBGP_TYPE_MAX; i++) 49 for (i = 0; i < DBGP_TYPE_MAX; i++)
47 rtlpriv->dbg.dbgp_type[i] = 0; 50 rtlpriv->dbg.dbgp_type[i] = 0;
diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/rtlwifi/debug.h
index fd3269f47685..6d669364e3d9 100644
--- a/drivers/net/wireless/rtlwifi/debug.h
+++ b/drivers/net/wireless/rtlwifi/debug.h
@@ -115,11 +115,11 @@
115/* Define EEPROM and EFUSE check module bit*/ 115/* Define EEPROM and EFUSE check module bit*/
116#define EEPROM_W BIT(0) 116#define EEPROM_W BIT(0)
117#define EFUSE_PG BIT(1) 117#define EFUSE_PG BIT(1)
118#define EFUSE_READ_ALL BIT(2) 118#define EFUSE_READ_ALL BIT(2)
119 119
120/* Define init check for module bit*/ 120/* Define init check for module bit*/
121#define INIT_EEPROM BIT(0) 121#define INIT_EEPROM BIT(0)
122#define INIT_TxPower BIT(1) 122#define INIT_TXPOWER BIT(1)
123#define INIT_IQK BIT(2) 123#define INIT_IQK BIT(2)
124#define INIT_RF BIT(3) 124#define INIT_RF BIT(3)
125 125
@@ -135,6 +135,15 @@
135#define PHY_TXPWR BIT(8) 135#define PHY_TXPWR BIT(8)
136#define PHY_PWRDIFF BIT(9) 136#define PHY_PWRDIFF BIT(9)
137 137
138/* Define Dynamic Mechanism check module bit --> FDM */
139#define WA_IOT BIT(0)
140#define DM_PWDB BIT(1)
141#define DM_MONITOR BIT(2)
142#define DM_DIG BIT(3)
143#define DM_EDCA_TURBO BIT(4)
144
145#define DM_PWDB BIT(1)
146
138enum dbgp_flag_e { 147enum dbgp_flag_e {
139 FQOS = 0, 148 FQOS = 0,
140 FTX = 1, 149 FTX = 1,
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 8e2f9afb125a..9e3894178e77 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -35,8 +35,6 @@ static const u8 MAX_PGPKT_SIZE = 9;
35static const u8 PGPKT_DATA_SIZE = 8; 35static const u8 PGPKT_DATA_SIZE = 8;
36static const int EFUSE_MAX_SIZE = 512; 36static const int EFUSE_MAX_SIZE = 512;
37 37
38static const u8 EFUSE_OOB_PROTECT_BYTES = 15;
39
40static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = { 38static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = {
41 {0, 0, 0, 2}, 39 {0, 0, 0, 2},
42 {0, 1, 0, 2}, 40 {0, 1, 0, 2},
@@ -240,6 +238,7 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
240 u8 rtemp8[1]; 238 u8 rtemp8[1];
241 u16 efuse_addr = 0; 239 u16 efuse_addr = 0;
242 u8 offset, wren; 240 u8 offset, wren;
241 u8 u1temp = 0;
243 u16 i; 242 u16 i;
244 u16 j; 243 u16 j;
245 const u16 efuse_max_section = 244 const u16 efuse_max_section =
@@ -285,10 +284,31 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
285 } 284 }
286 285
287 while ((*rtemp8 != 0xFF) && (efuse_addr < efuse_len)) { 286 while ((*rtemp8 != 0xFF) && (efuse_addr < efuse_len)) {
288 offset = ((*rtemp8 >> 4) & 0x0f); 287 /* Check PG header for section num. */
288 if ((*rtemp8 & 0x1F) == 0x0F) {/* extended header */
289 u1temp = ((*rtemp8 & 0xE0) >> 5);
290 read_efuse_byte(hw, efuse_addr, rtemp8);
289 291
290 if (offset < efuse_max_section) { 292 if ((*rtemp8 & 0x0F) == 0x0F) {
293 efuse_addr++;
294 read_efuse_byte(hw, efuse_addr, rtemp8);
295
296 if (*rtemp8 != 0xFF &&
297 (efuse_addr < efuse_len)) {
298 efuse_addr++;
299 }
300 continue;
301 } else {
302 offset = ((*rtemp8 & 0xF0) >> 1) | u1temp;
303 wren = (*rtemp8 & 0x0F);
304 efuse_addr++;
305 }
306 } else {
307 offset = ((*rtemp8 >> 4) & 0x0f);
291 wren = (*rtemp8 & 0x0f); 308 wren = (*rtemp8 & 0x0f);
309 }
310
311 if (offset < efuse_max_section) {
292 RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL, 312 RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
293 "offset-%d Worden=%x\n", offset, wren); 313 "offset-%d Worden=%x\n", offset, wren);
294 314
@@ -391,7 +411,8 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
391 efuse_used = rtlefuse->efuse_usedbytes; 411 efuse_used = rtlefuse->efuse_usedbytes;
392 412
393 if ((totalbytes + efuse_used) >= 413 if ((totalbytes + efuse_used) >=
394 (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES)) 414 (EFUSE_MAX_SIZE -
415 rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))
395 result = false; 416 result = false;
396 417
397 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, 418 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
@@ -932,8 +953,8 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
932 u8 badworden = 0x0F; 953 u8 badworden = 0x0F;
933 static int repeat_times; 954 static int repeat_times;
934 955
935 if (efuse_get_current_size(hw) >= 956 if (efuse_get_current_size(hw) >= (EFUSE_MAX_SIZE -
936 (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES)) { 957 rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) {
937 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, 958 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
938 "efuse_pg_packet_write error\n"); 959 "efuse_pg_packet_write error\n");
939 return false; 960 return false;
@@ -949,8 +970,8 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
949 970
950 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, "efuse Power ON\n"); 971 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, "efuse Power ON\n");
951 972
952 while (continual && (efuse_addr < 973 while (continual && (efuse_addr < (EFUSE_MAX_SIZE -
953 (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES))) { 974 rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) {
954 975
955 if (write_state == PG_STATE_HEADER) { 976 if (write_state == PG_STATE_HEADER) {
956 badworden = 0x0F; 977 badworden = 0x0F;
@@ -1003,7 +1024,8 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
1003 } 1024 }
1004 } 1025 }
1005 1026
1006 if (efuse_addr >= (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES)) { 1027 if (efuse_addr >= (EFUSE_MAX_SIZE -
1028 rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) {
1007 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, 1029 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
1008 "efuse_addr(%#x) Out of size!!\n", efuse_addr); 1030 "efuse_addr(%#x) Out of size!!\n", efuse_addr);
1009 } 1031 }
@@ -1102,8 +1124,11 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
1102 u8 tempval; 1124 u8 tempval;
1103 u16 tmpV16; 1125 u16 tmpV16;
1104 1126
1105 if (pwrstate && (rtlhal->hw_type != 1127 if (pwrstate && (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)) {
1106 HARDWARE_TYPE_RTL8192SE)) { 1128 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE)
1129 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS],
1130 0x69);
1131
1107 tmpV16 = rtl_read_word(rtlpriv, 1132 tmpV16 = rtl_read_word(rtlpriv,
1108 rtlpriv->cfg->maps[SYS_ISO_CTRL]); 1133 rtlpriv->cfg->maps[SYS_ISO_CTRL]);
1109 if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) { 1134 if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) {
@@ -1153,6 +1178,10 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
1153 } 1178 }
1154 1179
1155 } else { 1180 } else {
1181 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE)
1182 rtl_write_byte(rtlpriv,
1183 rtlpriv->cfg->maps[EFUSE_ACCESS], 0);
1184
1156 if (write) { 1185 if (write) {
1157 tempval = rtl_read_byte(rtlpriv, 1186 tempval = rtl_read_byte(rtlpriv,
1158 rtlpriv->cfg->maps[EFUSE_TEST] + 1187 rtlpriv->cfg->maps[EFUSE_TEST] +
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h
index 2bdea9a8699e..395a326acfb4 100644
--- a/drivers/net/wireless/rtlwifi/efuse.h
+++ b/drivers/net/wireless/rtlwifi/efuse.h
@@ -32,7 +32,6 @@
32 32
33#define EFUSE_IC_ID_OFFSET 506 33#define EFUSE_IC_ID_OFFSET 506
34 34
35#define EFUSE_REAL_CONTENT_LEN 512
36#define EFUSE_MAP_LEN 128 35#define EFUSE_MAP_LEN 128
37#define EFUSE_MAX_WORD_UNIT 4 36#define EFUSE_MAX_WORD_UNIT 4
38 37
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 4261e8ecc4c3..999ffc12578b 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -59,7 +59,7 @@ static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
59 59
60 if (unlikely(ieee80211_is_beacon(fc))) 60 if (unlikely(ieee80211_is_beacon(fc)))
61 return BEACON_QUEUE; 61 return BEACON_QUEUE;
62 if (ieee80211_is_mgmt(fc)) 62 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
63 return MGNT_QUEUE; 63 return MGNT_QUEUE;
64 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) 64 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
65 if (ieee80211_is_nullfunc(fc)) 65 if (ieee80211_is_nullfunc(fc))
@@ -271,9 +271,6 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
271 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); 271 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
272 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 272 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
273 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 273 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
274 u8 pcibridge_busnum = pcipriv->ndis_adapter.pcibridge_busnum;
275 u8 pcibridge_devnum = pcipriv->ndis_adapter.pcibridge_devnum;
276 u8 pcibridge_funcnum = pcipriv->ndis_adapter.pcibridge_funcnum;
277 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor; 274 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
278 u8 num4bytes = pcipriv->ndis_adapter.num4bytes; 275 u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
279 u16 aspmlevel; 276 u16 aspmlevel;
@@ -302,8 +299,7 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
302 u_pcibridge_aspmsetting); 299 u_pcibridge_aspmsetting);
303 300
304 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 301 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
305 "PlatformEnableASPM():PciBridge busnumber[%x], DevNumbe[%x], funcnumber[%x], Write reg[%x] = %x\n", 302 "PlatformEnableASPM(): Write reg[%x] = %x\n",
306 pcibridge_busnum, pcibridge_devnum, pcibridge_funcnum,
307 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10), 303 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
308 u_pcibridge_aspmsetting); 304 u_pcibridge_aspmsetting);
309 305
@@ -349,6 +345,49 @@ static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
349 return status; 345 return status;
350} 346}
351 347
348static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
349 struct rtl_priv **buddy_priv)
350{
351 struct rtl_priv *rtlpriv = rtl_priv(hw);
352 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
353 bool find_buddy_priv = false;
354 struct rtl_priv *tpriv = NULL;
355 struct rtl_pci_priv *tpcipriv = NULL;
356
357 if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
358 list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
359 list) {
360 if (tpriv) {
361 tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
362 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
363 "pcipriv->ndis_adapter.funcnumber %x\n",
364 pcipriv->ndis_adapter.funcnumber);
365 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
366 "tpcipriv->ndis_adapter.funcnumber %x\n",
367 tpcipriv->ndis_adapter.funcnumber);
368
369 if ((pcipriv->ndis_adapter.busnumber ==
370 tpcipriv->ndis_adapter.busnumber) &&
371 (pcipriv->ndis_adapter.devnumber ==
372 tpcipriv->ndis_adapter.devnumber) &&
373 (pcipriv->ndis_adapter.funcnumber !=
374 tpcipriv->ndis_adapter.funcnumber)) {
375 find_buddy_priv = true;
376 break;
377 }
378 }
379 }
380 }
381
382 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
383 "find_buddy_priv %d\n", find_buddy_priv);
384
385 if (find_buddy_priv)
386 *buddy_priv = tpriv;
387
388 return find_buddy_priv;
389}
390
352static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw) 391static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
353{ 392{
354 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); 393 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
@@ -420,17 +459,14 @@ static void _rtl_pci_io_handler_init(struct device *dev,
420 459
421} 460}
422 461
423static void _rtl_pci_io_handler_release(struct ieee80211_hw *hw)
424{
425}
426
427static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw, 462static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw,
428 struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc, u8 tid) 463 struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc, u8 tid)
429{ 464{
430 struct rtl_priv *rtlpriv = rtl_priv(hw); 465 struct rtl_priv *rtlpriv = rtl_priv(hw);
431 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 466 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
432 u8 additionlen = FCS_LEN; 467 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
433 struct sk_buff *next_skb; 468 struct sk_buff *next_skb;
469 u8 additionlen = FCS_LEN;
434 470
435 /* here open is 4, wep/tkip is 8, aes is 12*/ 471 /* here open is 4, wep/tkip is 8, aes is 12*/
436 if (info->control.hw_key) 472 if (info->control.hw_key)
@@ -455,7 +491,7 @@ static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw,
455 next_skb)) 491 next_skb))
456 break; 492 break;
457 493
458 if (tcb_desc->empkt_num >= 5) 494 if (tcb_desc->empkt_num >= rtlhal->max_earlymode_num)
459 break; 495 break;
460 } 496 }
461 spin_unlock_bh(&rtlpriv->locks.waitq_lock); 497 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
@@ -471,11 +507,17 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
471 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 507 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
472 struct sk_buff *skb = NULL; 508 struct sk_buff *skb = NULL;
473 struct ieee80211_tx_info *info = NULL; 509 struct ieee80211_tx_info *info = NULL;
510 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
474 int tid; 511 int tid;
475 512
476 if (!rtlpriv->rtlhal.earlymode_enable) 513 if (!rtlpriv->rtlhal.earlymode_enable)
477 return; 514 return;
478 515
516 if (rtlpriv->dm.supp_phymode_switch &&
517 (rtlpriv->easy_concurrent_ctl.switch_in_process ||
518 (rtlpriv->buddy_priv &&
519 rtlpriv->buddy_priv->easy_concurrent_ctl.switch_in_process)))
520 return;
479 /* we juse use em for BE/BK/VI/VO */ 521 /* we juse use em for BE/BK/VI/VO */
480 for (tid = 7; tid >= 0; tid--) { 522 for (tid = 7; tid >= 0; tid--) {
481 u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(tid)]; 523 u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(tid)];
@@ -487,7 +529,8 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
487 529
488 spin_lock_bh(&rtlpriv->locks.waitq_lock); 530 spin_lock_bh(&rtlpriv->locks.waitq_lock);
489 if (!skb_queue_empty(&mac->skb_waitq[tid]) && 531 if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
490 (ring->entries - skb_queue_len(&ring->queue) > 5)) { 532 (ring->entries - skb_queue_len(&ring->queue) >
533 rtlhal->max_earlymode_num)) {
491 skb = skb_dequeue(&mac->skb_waitq[tid]); 534 skb = skb_dequeue(&mac->skb_waitq[tid]);
492 } else { 535 } else {
493 spin_unlock_bh(&rtlpriv->locks.waitq_lock); 536 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
@@ -525,9 +568,8 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
525 u8 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) entry, true, 568 u8 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) entry, true,
526 HW_DESC_OWN); 569 HW_DESC_OWN);
527 570
528 /* 571 /*beacon packet will only use the first
529 *beacon packet will only use the first 572 *descriptor by defaut, and the own may not
530 *descriptor defautly,and the own may not
531 *be cleared by the hardware 573 *be cleared by the hardware
532 */ 574 */
533 if (own) 575 if (own)
@@ -558,8 +600,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
558 } 600 }
559 601
560 /* for sw LPS, just after NULL skb send out, we can 602 /* for sw LPS, just after NULL skb send out, we can
561 * sure AP kown we are sleeped, our we should not let 603 * sure AP knows we are sleeping, we should not let
562 * rf to sleep*/ 604 * rf sleep
605 */
563 fc = rtl_get_fc(skb); 606 fc = rtl_get_fc(skb);
564 if (ieee80211_is_nullfunc(fc)) { 607 if (ieee80211_is_nullfunc(fc)) {
565 if (ieee80211_has_pm(fc)) { 608 if (ieee80211_has_pm(fc)) {
@@ -569,6 +612,15 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
569 rtlpriv->psc.state_inap = false; 612 rtlpriv->psc.state_inap = false;
570 } 613 }
571 } 614 }
615 if (ieee80211_is_action(fc)) {
616 struct ieee80211_mgmt *action_frame =
617 (struct ieee80211_mgmt *)skb->data;
618 if (action_frame->u.action.u.ht_smps.action ==
619 WLAN_HT_ACTION_SMPS) {
620 dev_kfree_skb(skb);
621 goto tx_status_ok;
622 }
623 }
572 624
573 /* update tid tx pkt num */ 625 /* update tid tx pkt num */
574 tid = rtl_get_tid(skb); 626 tid = rtl_get_tid(skb);
@@ -602,7 +654,8 @@ tx_status_ok:
602 if (((rtlpriv->link_info.num_rx_inperiod + 654 if (((rtlpriv->link_info.num_rx_inperiod +
603 rtlpriv->link_info.num_tx_inperiod) > 8) || 655 rtlpriv->link_info.num_tx_inperiod) > 8) ||
604 (rtlpriv->link_info.num_rx_inperiod > 2)) { 656 (rtlpriv->link_info.num_rx_inperiod > 2)) {
605 schedule_work(&rtlpriv->works.lps_leave_work); 657 rtlpriv->enter_ps = false;
658 schedule_work(&rtlpriv->works.lps_change_work);
606 } 659 }
607} 660}
608 661
@@ -637,6 +690,10 @@ static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb,
637 rtlpriv->link_info.num_rx_inperiod++; 690 rtlpriv->link_info.num_rx_inperiod++;
638 } 691 }
639 692
693 /* static bcn for roaming */
694 rtl_beacon_statistic(hw, skb);
695 rtl_p2p_info(hw, (void *)skb->data, skb->len);
696
640 /* for sw lps */ 697 /* for sw lps */
641 rtl_swlps_beacon(hw, (void *)skb->data, skb->len); 698 rtl_swlps_beacon(hw, (void *)skb->data, skb->len);
642 rtl_recognize_peer(hw, (void *)skb->data, skb->len); 699 rtl_recognize_peer(hw, (void *)skb->data, skb->len);
@@ -727,9 +784,10 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
727 _rtl_receive_one(hw, skb, rx_status); 784 _rtl_receive_one(hw, skb, rx_status);
728 785
729 if (((rtlpriv->link_info.num_rx_inperiod + 786 if (((rtlpriv->link_info.num_rx_inperiod +
730 rtlpriv->link_info.num_tx_inperiod) > 8) || 787 rtlpriv->link_info.num_tx_inperiod) > 8) ||
731 (rtlpriv->link_info.num_rx_inperiod > 2)) { 788 (rtlpriv->link_info.num_rx_inperiod > 2)) {
732 schedule_work(&rtlpriv->works.lps_leave_work); 789 rtlpriv->enter_ps = false;
790 schedule_work(&rtlpriv->works.lps_change_work);
733 } 791 }
734 792
735 dev_kfree_skb_any(skb); 793 dev_kfree_skb_any(skb);
@@ -803,7 +861,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
803 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n"); 861 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
804 } 862 }
805 863
806 if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) { 864 if (inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
807 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, 865 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
808 "prepare beacon for interrupt!\n"); 866 "prepare beacon for interrupt!\n");
809 tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet); 867 tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
@@ -884,6 +942,16 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
884 _rtl_pci_rx_interrupt(hw); 942 _rtl_pci_rx_interrupt(hw);
885 } 943 }
886 944
945 /*fw related*/
946 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
947 if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
948 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
949 "firmware interrupt!\n");
950 queue_delayed_work(rtlpriv->works.rtl_wq,
951 &rtlpriv->works.fwevt_wq, 0);
952 }
953 }
954
887 if (rtlpriv->rtlhal.earlymode_enable) 955 if (rtlpriv->rtlhal.earlymode_enable)
888 tasklet_schedule(&rtlpriv->works.irq_tasklet); 956 tasklet_schedule(&rtlpriv->works.irq_tasklet);
889 957
@@ -939,13 +1007,17 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
939 return; 1007 return;
940} 1008}
941 1009
942static void rtl_lps_leave_work_callback(struct work_struct *work) 1010static void rtl_lps_change_work_callback(struct work_struct *work)
943{ 1011{
944 struct rtl_works *rtlworks = 1012 struct rtl_works *rtlworks =
945 container_of(work, struct rtl_works, lps_leave_work); 1013 container_of(work, struct rtl_works, lps_change_work);
946 struct ieee80211_hw *hw = rtlworks->hw; 1014 struct ieee80211_hw *hw = rtlworks->hw;
1015 struct rtl_priv *rtlpriv = rtl_priv(hw);
947 1016
948 rtl_lps_leave(hw); 1017 if (rtlpriv->enter_ps)
1018 rtl_lps_enter(hw);
1019 else
1020 rtl_lps_leave(hw);
949} 1021}
950 1022
951static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw) 1023static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
@@ -1009,7 +1081,8 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
1009 tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet, 1081 tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
1010 (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet, 1082 (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
1011 (unsigned long)hw); 1083 (unsigned long)hw);
1012 INIT_WORK(&rtlpriv->works.lps_leave_work, rtl_lps_leave_work_callback); 1084 INIT_WORK(&rtlpriv->works.lps_change_work,
1085 rtl_lps_change_work_callback);
1013} 1086}
1014 1087
1015static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, 1088static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
@@ -1458,10 +1531,14 @@ static void rtl_pci_flush(struct ieee80211_hw *hw, bool drop)
1458 struct rtl_priv *rtlpriv = rtl_priv(hw); 1531 struct rtl_priv *rtlpriv = rtl_priv(hw);
1459 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); 1532 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1460 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1533 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1534 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1461 u16 i = 0; 1535 u16 i = 0;
1462 int queue_id; 1536 int queue_id;
1463 struct rtl8192_tx_ring *ring; 1537 struct rtl8192_tx_ring *ring;
1464 1538
1539 if (mac->skip_scan)
1540 return;
1541
1465 for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) { 1542 for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
1466 u32 queue_len; 1543 u32 queue_len;
1467 ring = &pcipriv->dev.tx_ring[queue_id]; 1544 ring = &pcipriv->dev.tx_ring[queue_id];
@@ -1491,7 +1568,7 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
1491 1568
1492 synchronize_irq(rtlpci->pdev->irq); 1569 synchronize_irq(rtlpci->pdev->irq);
1493 tasklet_kill(&rtlpriv->works.irq_tasklet); 1570 tasklet_kill(&rtlpriv->works.irq_tasklet);
1494 cancel_work_sync(&rtlpriv->works.lps_leave_work); 1571 cancel_work_sync(&rtlpriv->works.lps_change_work);
1495 1572
1496 flush_workqueue(rtlpriv->works.rtl_wq); 1573 flush_workqueue(rtlpriv->works.rtl_wq);
1497 destroy_workqueue(rtlpriv->works.rtl_wq); 1574 destroy_workqueue(rtlpriv->works.rtl_wq);
@@ -1566,7 +1643,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1566 set_hal_stop(rtlhal); 1643 set_hal_stop(rtlhal);
1567 1644
1568 rtlpriv->cfg->ops->disable_interrupt(hw); 1645 rtlpriv->cfg->ops->disable_interrupt(hw);
1569 cancel_work_sync(&rtlpriv->works.lps_leave_work); 1646 cancel_work_sync(&rtlpriv->works.lps_change_work);
1570 1647
1571 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags); 1648 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1572 while (ppsc->rfchange_inprogress) { 1649 while (ppsc->rfchange_inprogress) {
@@ -1673,6 +1750,10 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1673 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 1750 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1674 "8192D PCI-E is found - vid/did=%x/%x\n", 1751 "8192D PCI-E is found - vid/did=%x/%x\n",
1675 venderid, deviceid); 1752 venderid, deviceid);
1753 } else if (deviceid == RTL_PCI_8188EE_DID) {
1754 rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE;
1755 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1756 "Find adapter, Hardware type is 8188EE\n");
1676 } else { 1757 } else {
1677 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 1758 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1678 "Err: Unknown device - vid/did=%x/%x\n", 1759 "Err: Unknown device - vid/did=%x/%x\n",
@@ -1704,6 +1785,9 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1704 pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn); 1785 pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
1705 pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn); 1786 pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
1706 1787
1788 /* some ARM have no bridge_pdev and will crash here
1789 * so we should check if bridge_pdev is NULL
1790 */
1707 if (bridge_pdev) { 1791 if (bridge_pdev) {
1708 /*find bridge info if available */ 1792 /*find bridge info if available */
1709 pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor; 1793 pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
@@ -1758,6 +1842,7 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1758 pcipriv->ndis_adapter.amd_l1_patch); 1842 pcipriv->ndis_adapter.amd_l1_patch);
1759 1843
1760 rtl_pci_parse_configuration(pdev, hw); 1844 rtl_pci_parse_configuration(pdev, hw);
1845 list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
1761 1846
1762 return true; 1847 return true;
1763} 1848}
@@ -1804,6 +1889,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
1804 pci_set_drvdata(pdev, hw); 1889 pci_set_drvdata(pdev, hw);
1805 1890
1806 rtlpriv = hw->priv; 1891 rtlpriv = hw->priv;
1892 rtlpriv->hw = hw;
1807 pcipriv = (void *)rtlpriv->priv; 1893 pcipriv = (void *)rtlpriv->priv;
1808 pcipriv->dev.pdev = pdev; 1894 pcipriv->dev.pdev = pdev;
1809 init_completion(&rtlpriv->firmware_loading_complete); 1895 init_completion(&rtlpriv->firmware_loading_complete);
@@ -1812,6 +1898,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
1812 rtlpriv->rtlhal.interface = INTF_PCI; 1898 rtlpriv->rtlhal.interface = INTF_PCI;
1813 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data); 1899 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
1814 rtlpriv->intf_ops = &rtl_pci_ops; 1900 rtlpriv->intf_ops = &rtl_pci_ops;
1901 rtlpriv->glb_var = &global_var;
1815 1902
1816 /* 1903 /*
1817 *init dbgp flags before all 1904 *init dbgp flags before all
@@ -1916,7 +2003,6 @@ int rtl_pci_probe(struct pci_dev *pdev,
1916 2003
1917fail3: 2004fail3:
1918 rtl_deinit_core(hw); 2005 rtl_deinit_core(hw);
1919 _rtl_pci_io_handler_release(hw);
1920 2006
1921 if (rtlpriv->io.pci_mem_start != 0) 2007 if (rtlpriv->io.pci_mem_start != 0)
1922 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start); 2008 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
@@ -1965,14 +2051,15 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
1965 2051
1966 rtl_pci_deinit(hw); 2052 rtl_pci_deinit(hw);
1967 rtl_deinit_core(hw); 2053 rtl_deinit_core(hw);
1968 _rtl_pci_io_handler_release(hw);
1969 rtlpriv->cfg->ops->deinit_sw_vars(hw); 2054 rtlpriv->cfg->ops->deinit_sw_vars(hw);
1970 2055
1971 if (rtlpci->irq_alloc) { 2056 if (rtlpci->irq_alloc) {
2057 synchronize_irq(rtlpci->pdev->irq);
1972 free_irq(rtlpci->pdev->irq, hw); 2058 free_irq(rtlpci->pdev->irq, hw);
1973 rtlpci->irq_alloc = 0; 2059 rtlpci->irq_alloc = 0;
1974 } 2060 }
1975 2061
2062 list_del(&rtlpriv->list);
1976 if (rtlpriv->io.pci_mem_start != 0) { 2063 if (rtlpriv->io.pci_mem_start != 0) {
1977 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start); 2064 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
1978 pci_release_regions(pdev); 2065 pci_release_regions(pdev);
@@ -2034,6 +2121,7 @@ struct rtl_intf_ops rtl_pci_ops = {
2034 .read_efuse_byte = read_efuse_byte, 2121 .read_efuse_byte = read_efuse_byte,
2035 .adapter_start = rtl_pci_start, 2122 .adapter_start = rtl_pci_start,
2036 .adapter_stop = rtl_pci_stop, 2123 .adapter_stop = rtl_pci_stop,
2124 .check_buddy_priv = rtl_pci_check_buddy_priv,
2037 .adapter_tx = rtl_pci_tx, 2125 .adapter_tx = rtl_pci_tx,
2038 .flush = rtl_pci_flush, 2126 .flush = rtl_pci_flush,
2039 .reset_trx_ring = rtl_pci_reset_trx_ring, 2127 .reset_trx_ring = rtl_pci_reset_trx_ring,
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index 65b08f50022e..d3262ec45d23 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -94,6 +94,7 @@
94#define RTL_PCI_8192CU_DID 0x8191 /*8192ce */ 94#define RTL_PCI_8192CU_DID 0x8191 /*8192ce */
95#define RTL_PCI_8192DE_DID 0x8193 /*8192de */ 95#define RTL_PCI_8192DE_DID 0x8193 /*8192de */
96#define RTL_PCI_8192DE_DID2 0x002B /*92DE*/ 96#define RTL_PCI_8192DE_DID2 0x002B /*92DE*/
97#define RTL_PCI_8188EE_DID 0x8179 /*8188ee*/
97 98
98/*8192 support 16 pages of IO registers*/ 99/*8192 support 16 pages of IO registers*/
99#define RTL_MEM_MAPPED_IO_RANGE_8190PCI 0x1000 100#define RTL_MEM_MAPPED_IO_RANGE_8190PCI 0x1000
@@ -175,6 +176,7 @@ struct rtl_pci {
175 /*irq */ 176 /*irq */
176 u8 irq_alloc; 177 u8 irq_alloc;
177 u32 irq_mask[2]; 178 u32 irq_mask[2];
179 u32 sys_irq_mask;
178 180
179 /*Bcn control register setting */ 181 /*Bcn control register setting */
180 u32 reg_bcn_ctrl_val; 182 u32 reg_bcn_ctrl_val;
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 13ad33e85577..884bceae38a9 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -180,6 +180,9 @@ void rtl_ips_nic_off_wq_callback(void *data)
180 return; 180 return;
181 } 181 }
182 182
183 if (mac->p2p_in_use)
184 return;
185
183 if (mac->link_state > MAC80211_NOLINK) 186 if (mac->link_state > MAC80211_NOLINK)
184 return; 187 return;
185 188
@@ -189,6 +192,9 @@ void rtl_ips_nic_off_wq_callback(void *data)
189 if (rtlpriv->sec.being_setkey) 192 if (rtlpriv->sec.being_setkey)
190 return; 193 return;
191 194
195 if (rtlpriv->cfg->ops->bt_coex_off_before_lps)
196 rtlpriv->cfg->ops->bt_coex_off_before_lps(hw);
197
192 if (ppsc->inactiveps) { 198 if (ppsc->inactiveps) {
193 rtstate = ppsc->rfpwr_state; 199 rtstate = ppsc->rfpwr_state;
194 200
@@ -231,6 +237,9 @@ void rtl_ips_nic_off(struct ieee80211_hw *hw)
231 &rtlpriv->works.ips_nic_off_wq, MSECS(100)); 237 &rtlpriv->works.ips_nic_off_wq, MSECS(100));
232} 238}
233 239
240/* NOTICE: any opmode should exc nic_on, or disable without
241 * nic_on may something wrong, like adhoc TP
242 */
234void rtl_ips_nic_on(struct ieee80211_hw *hw) 243void rtl_ips_nic_on(struct ieee80211_hw *hw)
235{ 244{
236 struct rtl_priv *rtlpriv = rtl_priv(hw); 245 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -299,7 +308,7 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
299 struct rtl_priv *rtlpriv = rtl_priv(hw); 308 struct rtl_priv *rtlpriv = rtl_priv(hw);
300 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 309 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
301 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 310 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
302 u8 rpwm_val, fw_pwrmode; 311 bool enter_fwlps;
303 312
304 if (mac->opmode == NL80211_IFTYPE_ADHOC) 313 if (mac->opmode == NL80211_IFTYPE_ADHOC)
305 return; 314 return;
@@ -324,43 +333,31 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
324 */ 333 */
325 334
326 if ((ppsc->fwctrl_lps) && ppsc->report_linked) { 335 if ((ppsc->fwctrl_lps) && ppsc->report_linked) {
327 bool fw_current_inps;
328 if (ppsc->dot11_psmode == EACTIVE) { 336 if (ppsc->dot11_psmode == EACTIVE) {
329 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, 337 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
330 "FW LPS leave ps_mode:%x\n", 338 "FW LPS leave ps_mode:%x\n",
331 FW_PS_ACTIVE_MODE); 339 FW_PS_ACTIVE_MODE);
332 340 enter_fwlps = false;
333 rpwm_val = 0x0C; /* RF on */ 341 ppsc->pwr_mode = FW_PS_ACTIVE_MODE;
334 fw_pwrmode = FW_PS_ACTIVE_MODE; 342 ppsc->smart_ps = 0;
335 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
336 &rpwm_val);
337 rtlpriv->cfg->ops->set_hw_reg(hw, 343 rtlpriv->cfg->ops->set_hw_reg(hw,
338 HW_VAR_H2C_FW_PWRMODE, 344 HW_VAR_FW_LPS_ACTION,
339 &fw_pwrmode); 345 (u8 *)(&enter_fwlps));
340 fw_current_inps = false; 346 if (ppsc->p2p_ps_info.opp_ps)
341 347 rtl_p2p_ps_cmd(hw, P2P_PS_ENABLE);
342 rtlpriv->cfg->ops->set_hw_reg(hw,
343 HW_VAR_FW_PSMODE_STATUS,
344 (u8 *) (&fw_current_inps));
345 348
346 } else { 349 } else {
347 if (rtl_get_fwlps_doze(hw)) { 350 if (rtl_get_fwlps_doze(hw)) {
348 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, 351 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
349 "FW LPS enter ps_mode:%x\n", 352 "FW LPS enter ps_mode:%x\n",
350 ppsc->fwctrl_psmode); 353 ppsc->fwctrl_psmode);
351 354 enter_fwlps = true;
352 rpwm_val = 0x02; /* RF off */ 355 ppsc->pwr_mode = ppsc->fwctrl_psmode;
353 fw_current_inps = true; 356 ppsc->smart_ps = 2;
354 rtlpriv->cfg->ops->set_hw_reg(hw, 357 rtlpriv->cfg->ops->set_hw_reg(hw,
355 HW_VAR_FW_PSMODE_STATUS, 358 HW_VAR_FW_LPS_ACTION,
356 (u8 *) (&fw_current_inps)); 359 (u8 *)(&enter_fwlps));
357 rtlpriv->cfg->ops->set_hw_reg(hw,
358 HW_VAR_H2C_FW_PWRMODE,
359 &ppsc->fwctrl_psmode);
360 360
361 rtlpriv->cfg->ops->set_hw_reg(hw,
362 HW_VAR_SET_RPWM,
363 &rpwm_val);
364 } else { 361 } else {
365 /* Reset the power save related parameters. */ 362 /* Reset the power save related parameters. */
366 ppsc->dot11_psmode = EACTIVE; 363 ppsc->dot11_psmode = EACTIVE;
@@ -642,3 +639,286 @@ void rtl_swlps_wq_callback(void *data)
642 rtlpriv->psc.state = ps; 639 rtlpriv->psc.state = ps;
643 } 640 }
644} 641}
642
643static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
644 unsigned int len)
645{
646 struct rtl_priv *rtlpriv = rtl_priv(hw);
647 struct ieee80211_mgmt *mgmt = (void *)data;
648 struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
649 u8 *pos, *end, *ie;
650 u16 noa_len;
651 static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09};
652 u8 noa_num, index, i, noa_index = 0;
653 bool find_p2p_ie = false , find_p2p_ps_ie = false;
654 pos = (u8 *)mgmt->u.beacon.variable;
655 end = data + len;
656 ie = NULL;
657
658 while (pos + 1 < end) {
659 if (pos + 2 + pos[1] > end)
660 return;
661
662 if (pos[0] == 221 && pos[1] > 4) {
663 if (memcmp(&pos[2], p2p_oui_ie_type, 4) == 0) {
664 ie = pos + 2+4;
665 break;
666 }
667 }
668 pos += 2 + pos[1];
669 }
670
671 if (ie == NULL)
672 return;
673 find_p2p_ie = true;
674 /*to find noa ie*/
675 while (ie + 1 < end) {
676 noa_len = READEF2BYTE(&ie[1]);
677 if (ie + 3 + ie[1] > end)
678 return;
679
680 if (ie[0] == 12) {
681 find_p2p_ps_ie = true;
682 if ((noa_len - 2) % 13 != 0) {
683 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
684 "P2P notice of absence: invalid length.%d\n",
685 noa_len);
686 return;
687 } else {
688 noa_num = (noa_len - 2) / 13;
689 }
690 noa_index = ie[3];
691 if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
692 P2P_PS_NONE || noa_index != p2pinfo->noa_index) {
693 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
694 "update NOA ie.\n");
695 p2pinfo->noa_index = noa_index;
696 p2pinfo->opp_ps = (ie[4] >> 7);
697 p2pinfo->ctwindow = ie[4] & 0x7F;
698 p2pinfo->noa_num = noa_num;
699 index = 5;
700 for (i = 0; i < noa_num; i++) {
701 p2pinfo->noa_count_type[i] =
702 READEF1BYTE(ie+index);
703 index += 1;
704 p2pinfo->noa_duration[i] =
705 READEF4BYTE(ie+index);
706 index += 4;
707 p2pinfo->noa_interval[i] =
708 READEF4BYTE(ie+index);
709 index += 4;
710 p2pinfo->noa_start_time[i] =
711 READEF4BYTE(ie+index);
712 index += 4;
713 }
714
715 if (p2pinfo->opp_ps == 1) {
716 p2pinfo->p2p_ps_mode = P2P_PS_CTWINDOW;
717 /* Driver should wait LPS entering
718 * CTWindow
719 */
720 if (rtlpriv->psc.fw_current_inpsmode)
721 rtl_p2p_ps_cmd(hw,
722 P2P_PS_ENABLE);
723 } else if (p2pinfo->noa_num > 0) {
724 p2pinfo->p2p_ps_mode = P2P_PS_NOA;
725 rtl_p2p_ps_cmd(hw, P2P_PS_ENABLE);
726 } else if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
727 rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
728 }
729 }
730 break;
731 }
732 ie += 3 + noa_len;
733 }
734
735 if (find_p2p_ie == true) {
736 if ((p2pinfo->p2p_ps_mode > P2P_PS_NONE) &&
737 (find_p2p_ps_ie == false))
738 rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
739 }
740}
741
742static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
743 unsigned int len)
744{
745 struct rtl_priv *rtlpriv = rtl_priv(hw);
746 struct ieee80211_mgmt *mgmt = (void *)data;
747 struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
748 u8 noa_num, index, i, noa_index = 0;
749 u8 *pos, *end, *ie;
750 u16 noa_len;
751 static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09};
752
753 pos = (u8 *)&mgmt->u.action.category;
754 end = data + len;
755 ie = NULL;
756
757 if (pos[0] == 0x7f) {
758 if (memcmp(&pos[1], p2p_oui_ie_type, 4) == 0)
759 ie = pos + 3+4;
760 }
761
762 if (ie == NULL)
763 return;
764
765 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
766 /*to find noa ie*/
767 while (ie + 1 < end) {
768 noa_len = READEF2BYTE(&ie[1]);
769 if (ie + 3 + ie[1] > end)
770 return;
771
772 if (ie[0] == 12) {
773 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "find NOA IE.\n");
774 RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD, "noa ie ",
775 ie, noa_len);
776 if ((noa_len - 2) % 13 != 0) {
777 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
778 "P2P notice of absence: invalid length.%d\n",
779 noa_len);
780 return;
781 } else {
782 noa_num = (noa_len - 2) / 13;
783 }
784 noa_index = ie[3];
785 if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
786 P2P_PS_NONE || noa_index != p2pinfo->noa_index) {
787 p2pinfo->noa_index = noa_index;
788 p2pinfo->opp_ps = (ie[4] >> 7);
789 p2pinfo->ctwindow = ie[4] & 0x7F;
790 p2pinfo->noa_num = noa_num;
791 index = 5;
792 for (i = 0; i < noa_num; i++) {
793 p2pinfo->noa_count_type[i] =
794 READEF1BYTE(ie+index);
795 index += 1;
796 p2pinfo->noa_duration[i] =
797 READEF4BYTE(ie+index);
798 index += 4;
799 p2pinfo->noa_interval[i] =
800 READEF4BYTE(ie+index);
801 index += 4;
802 p2pinfo->noa_start_time[i] =
803 READEF4BYTE(ie+index);
804 index += 4;
805 }
806
807 if (p2pinfo->opp_ps == 1) {
808 p2pinfo->p2p_ps_mode = P2P_PS_CTWINDOW;
809 /* Driver should wait LPS entering
810 * CTWindow
811 */
812 if (rtlpriv->psc.fw_current_inpsmode)
813 rtl_p2p_ps_cmd(hw,
814 P2P_PS_ENABLE);
815 } else if (p2pinfo->noa_num > 0) {
816 p2pinfo->p2p_ps_mode = P2P_PS_NOA;
817 rtl_p2p_ps_cmd(hw, P2P_PS_ENABLE);
818 } else if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
819 rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
820 }
821 }
822 break;
823 }
824 ie += 3 + noa_len;
825 }
826}
827
828void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
829{
830 struct rtl_priv *rtlpriv = rtl_priv(hw);
831 struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
832 struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
833
834 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, " p2p state %x\n", p2p_ps_state);
835 switch (p2p_ps_state) {
836 case P2P_PS_DISABLE:
837 p2pinfo->p2p_ps_state = p2p_ps_state;
838 rtlpriv->cfg->ops->set_hw_reg(hw,
839 HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
840 (u8 *)(&p2p_ps_state));
841
842 p2pinfo->noa_index = 0;
843 p2pinfo->ctwindow = 0;
844 p2pinfo->opp_ps = 0;
845 p2pinfo->noa_num = 0;
846 p2pinfo->p2p_ps_mode = P2P_PS_NONE;
847 if (rtlps->fw_current_inpsmode == true) {
848 if (rtlps->smart_ps == 0) {
849 rtlps->smart_ps = 2;
850 rtlpriv->cfg->ops->set_hw_reg(hw,
851 HW_VAR_H2C_FW_PWRMODE,
852 (u8 *)(&rtlps->pwr_mode));
853 }
854 }
855 break;
856 case P2P_PS_ENABLE:
857 if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
858 p2pinfo->p2p_ps_state = p2p_ps_state;
859
860 if (p2pinfo->ctwindow > 0) {
861 if (rtlps->smart_ps != 0) {
862 rtlps->smart_ps = 0;
863 rtlpriv->cfg->ops->set_hw_reg(hw,
864 HW_VAR_H2C_FW_PWRMODE,
865 (u8 *)(&rtlps->pwr_mode));
866 }
867 }
868 rtlpriv->cfg->ops->set_hw_reg(hw,
869 HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
870 (u8 *)(&p2p_ps_state));
871 }
872 break;
873 case P2P_PS_SCAN:
874 case P2P_PS_SCAN_DONE:
875 case P2P_PS_ALLSTASLEEP:
876 if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) {
877 p2pinfo->p2p_ps_state = p2p_ps_state;
878 rtlpriv->cfg->ops->set_hw_reg(hw,
879 HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
880 (u8 *)(&p2p_ps_state));
881 }
882 break;
883 default:
884 break;
885 }
886 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
887 "ctwindow %x oppps %x\n", p2pinfo->ctwindow, p2pinfo->opp_ps);
888 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
889 "count %x duration %x index %x interval %x start time %x noa num %x\n",
890 p2pinfo->noa_count_type[0], p2pinfo->noa_duration[0],
891 p2pinfo->noa_index, p2pinfo->noa_interval[0],
892 p2pinfo->noa_start_time[0], p2pinfo->noa_num);
893 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "end\n");
894}
895
896void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
897{
898 struct rtl_priv *rtlpriv = rtl_priv(hw);
899 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
900 struct ieee80211_hdr *hdr = (void *)data;
901
902 if (!mac->p2p)
903 return;
904 if (mac->link_state != MAC80211_LINKED)
905 return;
906 /* min. beacon length + FCS_LEN */
907 if (len <= 40 + FCS_LEN)
908 return;
909
910 /* and only beacons from the associated BSSID, please */
911 if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid))
912 return;
913
914 /* check if this really is a beacon */
915 if (!(ieee80211_is_beacon(hdr->frame_control) ||
916 ieee80211_is_probe_resp(hdr->frame_control) ||
917 ieee80211_is_action(hdr->frame_control)))
918 return;
919
920 if (ieee80211_is_action(hdr->frame_control))
921 rtl_p2p_action_ie(hw, data, len - FCS_LEN);
922 else
923 rtl_p2p_noa_ie(hw, data, len - FCS_LEN);
924}
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
index 1357856998c2..4d682b753f50 100644
--- a/drivers/net/wireless/rtlwifi/ps.h
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -47,5 +47,7 @@ void rtl_swlps_wq_callback(void *data);
47void rtl_swlps_rfon_wq_callback(void *data); 47void rtl_swlps_rfon_wq_callback(void *data);
48void rtl_swlps_rf_awake(struct ieee80211_hw *hw); 48void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
49void rtl_swlps_rf_sleep(struct ieee80211_hw *hw); 49void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
50void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
51void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len);
50 52
51#endif 53#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile b/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile
new file mode 100644
index 000000000000..5b194e97f4b3
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile
@@ -0,0 +1,16 @@
1rtl8188ee-objs := \
2 dm.o \
3 fw.o \
4 hw.o \
5 led.o \
6 phy.o \
7 pwrseq.o \
8 pwrseqcmd.o \
9 rf.o \
10 sw.o \
11 table.o \
12 trx.o
13
14obj-$(CONFIG_RTL8188EE) += rtl8188ee.o
15
16ccflags-y += -Idrivers/net/wireless/rtlwifi -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/def.h b/drivers/net/wireless/rtlwifi/rtl8188ee/def.h
new file mode 100644
index 000000000000..c764fff9ebe6
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/def.h
@@ -0,0 +1,324 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92C_DEF_H__
31#define __RTL92C_DEF_H__
32
33#define HAL_RETRY_LIMIT_INFRA 48
34#define HAL_RETRY_LIMIT_AP_ADHOC 7
35
36#define RESET_DELAY_8185 20
37
38#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
39#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
40
41#define NUM_OF_FIRMWARE_QUEUE 10
42#define NUM_OF_PAGES_IN_FW 0x100
43#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07
44#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07
45#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07
46#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07
47#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0
48#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0
49#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02
50#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02
51#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2
52#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1
53
54#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
55#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
56#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
57#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
58#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
59
60#define MAX_LINES_HWCONFIG_TXT 1000
61#define MAX_BYTES_LINE_HWCONFIG_TXT 256
62
63#define SW_THREE_WIRE 0
64#define HW_THREE_WIRE 2
65
66#define BT_DEMO_BOARD 0
67#define BT_QA_BOARD 1
68#define BT_FPGA 2
69
70#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
71#define HAL_PRIME_CHNL_OFFSET_LOWER 1
72#define HAL_PRIME_CHNL_OFFSET_UPPER 2
73
74#define MAX_H2C_QUEUE_NUM 10
75
76#define RX_MPDU_QUEUE 0
77#define RX_CMD_QUEUE 1
78#define RX_MAX_QUEUE 2
79#define AC2QUEUEID(_AC) (_AC)
80
81#define C2H_RX_CMD_HDR_LEN 8
82#define GET_C2H_CMD_CMD_LEN(__prxhdr) \
83 LE_BITS_TO_4BYTE((__prxhdr), 0, 16)
84#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \
85 LE_BITS_TO_4BYTE((__prxhdr), 16, 8)
86#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \
87 LE_BITS_TO_4BYTE((__prxhdr), 24, 7)
88#define GET_C2H_CMD_CONTINUE(__prxhdr) \
89 LE_BITS_TO_4BYTE((__prxhdr), 31, 1)
90#define GET_C2H_CMD_CONTENT(__prxhdr) \
91 ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN)
92
93#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \
94 LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8)
95#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \
96 LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8)
97#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \
98 LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16)
99#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \
100 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5)
101#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \
102 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1)
103#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \
104 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5)
105#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \
106 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1)
107#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \
108 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4)
109#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \
110 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
111
112#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
113
114
115/* [15:12] IC version(CUT): A-cut=0, B-cut=1, C-cut=2, D-cut=3
116 * [7] Manufacturer: TSMC=0, UMC=1
117 * [6:4] RF type: 1T1R=0, 1T2R=1, 2T2R=2
118 * [3] Chip type: TEST=0, NORMAL=1
119 * [2:0] IC type: 81xxC=0, 8723=1, 92D=2
120 */
121#define CHIP_8723 BIT(0)
122#define CHIP_92D BIT(1)
123#define NORMAL_CHIP BIT(3)
124#define RF_TYPE_1T1R (~(BIT(4)|BIT(5)|BIT(6)))
125#define RF_TYPE_1T2R BIT(4)
126#define RF_TYPE_2T2R BIT(5)
127#define CHIP_VENDOR_UMC BIT(7)
128#define B_CUT_VERSION BIT(12)
129#define C_CUT_VERSION BIT(13)
130#define D_CUT_VERSION ((BIT(12)|BIT(13)))
131#define E_CUT_VERSION BIT(14)
132
133
134/* MASK */
135#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2))
136#define CHIP_TYPE_MASK BIT(3)
137#define RF_TYPE_MASK (BIT(4)|BIT(5)|BIT(6))
138#define MANUFACTUER_MASK BIT(7)
139#define ROM_VERSION_MASK (BIT(11)|BIT(10)|BIT(9)|BIT(8))
140#define CUT_VERSION_MASK (BIT(15)|BIT(14)|BIT(13)|BIT(12))
141
142/* Get element */
143#define GET_CVID_IC_TYPE(version) ((version) & IC_TYPE_MASK)
144#define GET_CVID_CHIP_TYPE(version) ((version) & CHIP_TYPE_MASK)
145#define GET_CVID_RF_TYPE(version) ((version) & RF_TYPE_MASK)
146#define GET_CVID_MANUFACTUER(version) ((version) & MANUFACTUER_MASK)
147#define GET_CVID_ROM_VERSION(version) ((version) & ROM_VERSION_MASK)
148#define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK)
149
150
151#define IS_81XXC(version) \
152 ((GET_CVID_IC_TYPE(version) == 0) ? true : false)
153#define IS_8723_SERIES(version) \
154 ((GET_CVID_IC_TYPE(version) == CHIP_8723) ? true : false)
155#define IS_92D(version) \
156 ((GET_CVID_IC_TYPE(version) == CHIP_92D) ? true : false)
157
158#define IS_NORMAL_CHIP(version) \
159 ((GET_CVID_CHIP_TYPE(version)) ? true : false)
160#define IS_NORMAL_CHIP92D(version) \
161 ((GET_CVID_CHIP_TYPE(version)) ? true : false)
162
163#define IS_1T1R(version) \
164 ((GET_CVID_RF_TYPE(version)) ? false : true)
165#define IS_1T2R(version) \
166 ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R) ? true : false)
167#define IS_2T2R(version) \
168 ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R) ? true : false)
169#define IS_CHIP_VENDOR_UMC(version) \
170 ((GET_CVID_MANUFACTUER(version)) ? true : false)
171
172#define IS_92C_SERIAL(version) \
173 ((IS_81XXC(version) && IS_2T2R(version)) ? true : false)
174#define IS_81xxC_VENDOR_UMC_A_CUT(version) \
175 (IS_81XXC(version) ? ((IS_CHIP_VENDOR_UMC(version)) ? \
176 ((GET_CVID_CUT_VERSION(version)) ? false : true) : false) : false)
177#define IS_81xxC_VENDOR_UMC_B_CUT(version) \
178 (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
179 ((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? true \
180 : false) : false) : false)
181
182enum version_8188e {
183 VERSION_TEST_CHIP_88E = 0x00,
184 VERSION_NORMAL_CHIP_88E = 0x01,
185 VERSION_UNKNOWN = 0xFF,
186};
187
188enum rx_packet_type {
189 NORMAL_RX,
190 TX_REPORT1,
191 TX_REPORT2,
192 HIS_REPORT,
193};
194
195enum rtl819x_loopback_e {
196 RTL819X_NO_LOOPBACK = 0,
197 RTL819X_MAC_LOOPBACK = 1,
198 RTL819X_DMA_LOOPBACK = 2,
199 RTL819X_CCK_LOOPBACK = 3,
200};
201
202enum rf_optype {
203 RF_OP_BY_SW_3WIRE = 0,
204 RF_OP_BY_FW,
205 RF_OP_MAX
206};
207
208enum rf_power_state {
209 RF_ON,
210 RF_OFF,
211 RF_SLEEP,
212 RF_SHUT_DOWN,
213};
214
215enum power_save_mode {
216 POWER_SAVE_MODE_ACTIVE,
217 POWER_SAVE_MODE_SAVE,
218};
219
220enum power_polocy_config {
221 POWERCFG_MAX_POWER_SAVINGS,
222 POWERCFG_GLOBAL_POWER_SAVINGS,
223 POWERCFG_LOCAL_POWER_SAVINGS,
224 POWERCFG_LENOVO,
225};
226
227enum interface_select_pci {
228 INTF_SEL1_MINICARD,
229 INTF_SEL0_PCIE,
230 INTF_SEL2_RSV,
231 INTF_SEL3_RSV,
232};
233
234enum hal_fw_c2h_cmd_id {
235 HAL_FW_C2H_CMD_Read_MACREG,
236 HAL_FW_C2H_CMD_Read_BBREG,
237 HAL_FW_C2H_CMD_Read_RFREG,
238 HAL_FW_C2H_CMD_Read_EEPROM,
239 HAL_FW_C2H_CMD_Read_EFUSE,
240 HAL_FW_C2H_CMD_Read_CAM,
241 HAL_FW_C2H_CMD_Get_BasicRate,
242 HAL_FW_C2H_CMD_Get_DataRate,
243 HAL_FW_C2H_CMD_Survey,
244 HAL_FW_C2H_CMD_SurveyDone,
245 HAL_FW_C2H_CMD_JoinBss,
246 HAL_FW_C2H_CMD_AddSTA,
247 HAL_FW_C2H_CMD_DelSTA,
248 HAL_FW_C2H_CMD_AtimDone,
249 HAL_FW_C2H_CMD_TX_Report,
250 HAL_FW_C2H_CMD_CCX_Report,
251 HAL_FW_C2H_CMD_DTM_Report,
252 HAL_FW_C2H_CMD_TX_Rate_Statistics,
253 HAL_FW_C2H_CMD_C2HLBK,
254 HAL_FW_C2H_CMD_C2HDBG,
255 HAL_FW_C2H_CMD_C2HFEEDBACK,
256 HAL_FW_C2H_CMD_MAX
257};
258
259enum wake_on_wlan_mode {
260 ewowlandisable,
261 ewakeonmagicpacketonly,
262 ewakeonpatternmatchonly,
263 ewakeonbothtypepacket
264};
265
266enum rtl_desc_qsel {
267 QSLT_BK = 0x2,
268 QSLT_BE = 0x0,
269 QSLT_VI = 0x5,
270 QSLT_VO = 0x7,
271 QSLT_BEACON = 0x10,
272 QSLT_HIGH = 0x11,
273 QSLT_MGNT = 0x12,
274 QSLT_CMD = 0x13,
275};
276
277enum rtl_desc92c_rate {
278 DESC92C_RATE1M = 0x00,
279 DESC92C_RATE2M = 0x01,
280 DESC92C_RATE5_5M = 0x02,
281 DESC92C_RATE11M = 0x03,
282
283 DESC92C_RATE6M = 0x04,
284 DESC92C_RATE9M = 0x05,
285 DESC92C_RATE12M = 0x06,
286 DESC92C_RATE18M = 0x07,
287 DESC92C_RATE24M = 0x08,
288 DESC92C_RATE36M = 0x09,
289 DESC92C_RATE48M = 0x0a,
290 DESC92C_RATE54M = 0x0b,
291
292 DESC92C_RATEMCS0 = 0x0c,
293 DESC92C_RATEMCS1 = 0x0d,
294 DESC92C_RATEMCS2 = 0x0e,
295 DESC92C_RATEMCS3 = 0x0f,
296 DESC92C_RATEMCS4 = 0x10,
297 DESC92C_RATEMCS5 = 0x11,
298 DESC92C_RATEMCS6 = 0x12,
299 DESC92C_RATEMCS7 = 0x13,
300 DESC92C_RATEMCS8 = 0x14,
301 DESC92C_RATEMCS9 = 0x15,
302 DESC92C_RATEMCS10 = 0x16,
303 DESC92C_RATEMCS11 = 0x17,
304 DESC92C_RATEMCS12 = 0x18,
305 DESC92C_RATEMCS13 = 0x19,
306 DESC92C_RATEMCS14 = 0x1a,
307 DESC92C_RATEMCS15 = 0x1b,
308 DESC92C_RATEMCS15_SG = 0x1c,
309 DESC92C_RATEMCS32 = 0x20,
310};
311
312struct phy_sts_cck_8192s_t {
313 u8 adc_pwdb_X[4];
314 u8 sq_rpt;
315 u8 cck_agc_rpt;
316};
317
318struct h2c_cmd_8192c {
319 u8 element_id;
320 u32 cmd_len;
321 u8 *p_cmdbuffer;
322};
323
324#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
new file mode 100644
index 000000000000..21a5cf060677
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
@@ -0,0 +1,1794 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../base.h"
32#include "../pci.h"
33#include "reg.h"
34#include "def.h"
35#include "phy.h"
36#include "dm.h"
37#include "fw.h"
38#include "trx.h"
39
40static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
41 0x7f8001fe, /* 0, +6.0dB */
42 0x788001e2, /* 1, +5.5dB */
43 0x71c001c7, /* 2, +5.0dB */
44 0x6b8001ae, /* 3, +4.5dB */
45 0x65400195, /* 4, +4.0dB */
46 0x5fc0017f, /* 5, +3.5dB */
47 0x5a400169, /* 6, +3.0dB */
48 0x55400155, /* 7, +2.5dB */
49 0x50800142, /* 8, +2.0dB */
50 0x4c000130, /* 9, +1.5dB */
51 0x47c0011f, /* 10, +1.0dB */
52 0x43c0010f, /* 11, +0.5dB */
53 0x40000100, /* 12, +0dB */
54 0x3c8000f2, /* 13, -0.5dB */
55 0x390000e4, /* 14, -1.0dB */
56 0x35c000d7, /* 15, -1.5dB */
57 0x32c000cb, /* 16, -2.0dB */
58 0x300000c0, /* 17, -2.5dB */
59 0x2d4000b5, /* 18, -3.0dB */
60 0x2ac000ab, /* 19, -3.5dB */
61 0x288000a2, /* 20, -4.0dB */
62 0x26000098, /* 21, -4.5dB */
63 0x24000090, /* 22, -5.0dB */
64 0x22000088, /* 23, -5.5dB */
65 0x20000080, /* 24, -6.0dB */
66 0x1e400079, /* 25, -6.5dB */
67 0x1c800072, /* 26, -7.0dB */
68 0x1b00006c, /* 27. -7.5dB */
69 0x19800066, /* 28, -8.0dB */
70 0x18000060, /* 29, -8.5dB */
71 0x16c0005b, /* 30, -9.0dB */
72 0x15800056, /* 31, -9.5dB */
73 0x14400051, /* 32, -10.0dB */
74 0x1300004c, /* 33, -10.5dB */
75 0x12000048, /* 34, -11.0dB */
76 0x11000044, /* 35, -11.5dB */
77 0x10000040, /* 36, -12.0dB */
78 0x0f00003c, /* 37, -12.5dB */
79 0x0e400039, /* 38, -13.0dB */
80 0x0d800036, /* 39, -13.5dB */
81 0x0cc00033, /* 40, -14.0dB */
82 0x0c000030, /* 41, -14.5dB */
83 0x0b40002d, /* 42, -15.0dB */
84};
85
86static const u8 cck_tbl_ch1_13[CCK_TABLE_SIZE][8] = {
87 {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
88 {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
89 {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
90 {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
91 {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
92 {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
93 {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
94 {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
95 {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
96 {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
97 {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
98 {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
99 {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
100 {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
101 {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
102 {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
103 {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
104 {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
105 {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
106 {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
107 {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB*/
108 {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB*/
109 {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB*/
110 {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB*/
111 {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB*/
112 {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB*/
113 {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB*/
114 {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB*/
115 {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB*/
116 {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB*/
117 {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB*/
118 {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB*/
119 {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB*/
120};
121
122static const u8 cck_tbl_ch14[CCK_TABLE_SIZE][8] = {
123 {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
124 {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
125 {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
126 {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
127 {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
128 {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
129 {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
130 {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
131 {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
132 {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
133 {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
134 {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
135 {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
136 {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
137 {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
138 {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
139 {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
140 {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
141 {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
142 {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
143 {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB*/
144 {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB*/
145 {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB*/
146 {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB*/
147 {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB*/
148 {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB*/
149 {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB*/
150 {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB*/
151 {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB*/
152 {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB*/
153 {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB*/
154 {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB*/
155 {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB*/
156};
157
158#define CAL_SWING_OFF(_off, _dir, _size, _del) \
159 do { \
160 for (_off = 0; _off < _size; _off++) { \
161 if (_del < thermal_threshold[_dir][_off]) { \
162 if (_off != 0) \
163 _off--; \
164 break; \
165 } \
166 } \
167 if (_off >= _size) \
168 _off = _size - 1; \
169 } while (0)
170
171static void rtl88e_set_iqk_matrix(struct ieee80211_hw *hw,
172 u8 ofdm_index, u8 rfpath,
173 long iqk_result_x, long iqk_result_y)
174{
175 long ele_a = 0, ele_d, ele_c = 0, value32;
176
177 ele_d = (ofdmswing_table[ofdm_index] & 0xFFC00000)>>22;
178
179 if (iqk_result_x != 0) {
180 if ((iqk_result_x & 0x00000200) != 0)
181 iqk_result_x = iqk_result_x | 0xFFFFFC00;
182 ele_a = ((iqk_result_x * ele_d)>>8)&0x000003FF;
183
184 if ((iqk_result_y & 0x00000200) != 0)
185 iqk_result_y = iqk_result_y | 0xFFFFFC00;
186 ele_c = ((iqk_result_y * ele_d)>>8)&0x000003FF;
187
188 switch (rfpath) {
189 case RF90_PATH_A:
190 value32 = (ele_d << 22)|((ele_c & 0x3F)<<16) | ele_a;
191 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBAL, MASKDWORD,
192 value32);
193 value32 = (ele_c & 0x000003C0) >> 6;
194 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32);
195 value32 = ((iqk_result_x * ele_d) >> 7) & 0x01;
196 rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(24), value32);
197 break;
198 case RF90_PATH_B:
199 value32 = (ele_d << 22)|((ele_c & 0x3F)<<16) | ele_a;
200 rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBAL,
201 MASKDWORD, value32);
202 value32 = (ele_c & 0x000003C0) >> 6;
203 rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, value32);
204 value32 = ((iqk_result_x * ele_d) >> 7) & 0x01;
205 rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(28), value32);
206 break;
207 default:
208 break;
209 }
210 } else {
211 switch (rfpath) {
212 case RF90_PATH_A:
213 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBAL, MASKDWORD,
214 ofdmswing_table[ofdm_index]);
215 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00);
216 rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(24), 0x00);
217 break;
218 case RF90_PATH_B:
219 rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBAL, MASKDWORD,
220 ofdmswing_table[ofdm_index]);
221 rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, 0x00);
222 rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(28), 0x00);
223 break;
224 default:
225 break;
226 }
227 }
228}
229
230void rtl88e_dm_txpower_track_adjust(struct ieee80211_hw *hw,
231 u8 type, u8 *pdirection, u32 *poutwrite_val)
232{
233 struct rtl_priv *rtlpriv = rtl_priv(hw);
234 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
235 u8 pwr_val = 0;
236 u8 cck_base = rtldm->swing_idx_cck_base;
237 u8 cck_val = rtldm->swing_idx_cck;
238 u8 ofdm_base = rtldm->swing_idx_ofdm_base;
239 u8 ofdm_val = rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A];
240
241 if (type == 0) {
242 if (ofdm_val <= ofdm_base) {
243 *pdirection = 1;
244 pwr_val = ofdm_base - ofdm_val;
245 } else {
246 *pdirection = 2;
247 pwr_val = ofdm_val - ofdm_base;
248 }
249 } else if (type == 1) {
250 if (cck_val <= cck_base) {
251 *pdirection = 1;
252 pwr_val = cck_base - cck_val;
253 } else {
254 *pdirection = 2;
255 pwr_val = cck_val - cck_base;
256 }
257 }
258
259 if (pwr_val >= TXPWRTRACK_MAX_IDX && (*pdirection == 1))
260 pwr_val = TXPWRTRACK_MAX_IDX;
261
262 *poutwrite_val = pwr_val | (pwr_val << 8) | (pwr_val << 16) |
263 (pwr_val << 24);
264}
265
266
267static void rtl88e_chk_tx_track(struct ieee80211_hw *hw,
268 enum pwr_track_control_method method,
269 u8 rfpath, u8 index)
270{
271 struct rtl_priv *rtlpriv = rtl_priv(hw);
272 struct rtl_phy *rtlphy = &(rtlpriv->phy);
273 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
274 int jj = rtldm->swing_idx_cck;
275 int i;
276
277 if (method == TXAGC) {
278 if (rtldm->swing_flag_ofdm == true ||
279 rtldm->swing_flag_cck == true) {
280 u8 chan = rtlphy->current_channel;
281 rtl88e_phy_set_txpower_level(hw, chan);
282 rtldm->swing_flag_ofdm = false;
283 rtldm->swing_flag_cck = false;
284 }
285 } else if (method == BBSWING) {
286 if (!rtldm->cck_inch14) {
287 for (i = 0; i < 8; i++)
288 rtl_write_byte(rtlpriv, 0xa22 + i,
289 cck_tbl_ch1_13[jj][i]);
290 } else {
291 for (i = 0; i < 8; i++)
292 rtl_write_byte(rtlpriv, 0xa22 + i,
293 cck_tbl_ch14[jj][i]);
294 }
295
296 if (rfpath == RF90_PATH_A) {
297 long x = rtlphy->iqk_matrix[index].value[0][0];
298 long y = rtlphy->iqk_matrix[index].value[0][1];
299 u8 indx = rtldm->swing_idx_ofdm[rfpath];
300 rtl88e_set_iqk_matrix(hw, indx, rfpath, x, y);
301 } else if (rfpath == RF90_PATH_B) {
302 u8 indx = rtldm->swing_idx_ofdm[rfpath];
303 long x = rtlphy->iqk_matrix[indx].value[0][4];
304 long y = rtlphy->iqk_matrix[indx].value[0][5];
305 rtl88e_set_iqk_matrix(hw, indx, rfpath, x, y);
306 }
307 } else {
308 return;
309 }
310}
311
312static void rtl88e_dm_diginit(struct ieee80211_hw *hw)
313{
314 struct rtl_priv *rtlpriv = rtl_priv(hw);
315 struct dig_t *dm_dig = &rtlpriv->dm_digtable;
316
317 dm_dig->dig_enable_flag = true;
318 dm_dig->cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
319 dm_dig->pre_igvalue = 0;
320 dm_dig->cursta_cstate = DIG_STA_DISCONNECT;
321 dm_dig->presta_cstate = DIG_STA_DISCONNECT;
322 dm_dig->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
323 dm_dig->rssi_lowthresh = DM_DIG_THRESH_LOW;
324 dm_dig->rssi_highthresh = DM_DIG_THRESH_HIGH;
325 dm_dig->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
326 dm_dig->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
327 dm_dig->rx_gain_max = DM_DIG_MAX;
328 dm_dig->rx_gain_min = DM_DIG_MIN;
329 dm_dig->back_val = DM_DIG_BACKOFF_DEFAULT;
330 dm_dig->back_range_max = DM_DIG_BACKOFF_MAX;
331 dm_dig->back_range_min = DM_DIG_BACKOFF_MIN;
332 dm_dig->pre_cck_cca_thres = 0xff;
333 dm_dig->cur_cck_cca_thres = 0x83;
334 dm_dig->forbidden_igi = DM_DIG_MIN;
335 dm_dig->large_fa_hit = 0;
336 dm_dig->recover_cnt = 0;
337 dm_dig->dig_min_0 = 0x25;
338 dm_dig->dig_min_1 = 0x25;
339 dm_dig->media_connect_0 = false;
340 dm_dig->media_connect_1 = false;
341 rtlpriv->dm.dm_initialgain_enable = true;
342}
343
344static u8 rtl88e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
345{
346 struct rtl_priv *rtlpriv = rtl_priv(hw);
347 struct dig_t *dm_dig = &rtlpriv->dm_digtable;
348 long rssi_val_min = 0;
349
350 if ((dm_dig->curmultista_cstate == DIG_MULTISTA_CONNECT) &&
351 (dm_dig->cursta_cstate == DIG_STA_CONNECT)) {
352 if (rtlpriv->dm.entry_min_undec_sm_pwdb != 0)
353 rssi_val_min =
354 (rtlpriv->dm.entry_min_undec_sm_pwdb >
355 rtlpriv->dm.undec_sm_pwdb) ?
356 rtlpriv->dm.undec_sm_pwdb :
357 rtlpriv->dm.entry_min_undec_sm_pwdb;
358 else
359 rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
360 } else if (dm_dig->cursta_cstate == DIG_STA_CONNECT ||
361 dm_dig->cursta_cstate == DIG_STA_BEFORE_CONNECT) {
362 rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
363 } else if (dm_dig->curmultista_cstate ==
364 DIG_MULTISTA_CONNECT) {
365 rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb;
366 }
367 return (u8)rssi_val_min;
368}
369
370static void rtl88e_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
371{
372 u32 ret_value;
373 struct rtl_priv *rtlpriv = rtl_priv(hw);
374 struct false_alarm_statistics *alm_cnt = &(rtlpriv->falsealm_cnt);
375
376 rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1);
377 rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1);
378
379 ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
380 alm_cnt->cnt_fast_fsync_fail = (ret_value&0xffff);
381 alm_cnt->cnt_sb_search_fail = ((ret_value&0xffff0000)>>16);
382
383 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
384 alm_cnt->cnt_ofdm_cca = (ret_value&0xffff);
385 alm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
386
387 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
388 alm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
389 alm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
390
391 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
392 alm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
393 alm_cnt->cnt_ofdm_fail = alm_cnt->cnt_parity_fail +
394 alm_cnt->cnt_rate_illegal +
395 alm_cnt->cnt_crc8_fail +
396 alm_cnt->cnt_mcs_fail +
397 alm_cnt->cnt_fast_fsync_fail +
398 alm_cnt->cnt_sb_search_fail;
399
400 ret_value = rtl_get_bbreg(hw, REG_SC_CNT, MASKDWORD);
401 alm_cnt->cnt_bw_lsc = (ret_value & 0xffff);
402 alm_cnt->cnt_bw_usc = ((ret_value & 0xffff0000) >> 16);
403
404 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(12), 1);
405 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
406
407 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
408 alm_cnt->cnt_cck_fail = ret_value;
409
410 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
411 alm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
412
413 ret_value = rtl_get_bbreg(hw, RCCK0_CCA_CNT, MASKDWORD);
414 alm_cnt->cnt_cck_cca = ((ret_value & 0xff) << 8) |
415 ((ret_value&0xFF00)>>8);
416
417 alm_cnt->cnt_all = alm_cnt->cnt_fast_fsync_fail +
418 alm_cnt->cnt_sb_search_fail +
419 alm_cnt->cnt_parity_fail +
420 alm_cnt->cnt_rate_illegal +
421 alm_cnt->cnt_crc8_fail +
422 alm_cnt->cnt_mcs_fail +
423 alm_cnt->cnt_cck_fail;
424 alm_cnt->cnt_cca_all = alm_cnt->cnt_ofdm_cca + alm_cnt->cnt_cck_cca;
425
426 rtl_set_bbreg(hw, ROFDM0_TRSWISOLATION, BIT(31), 1);
427 rtl_set_bbreg(hw, ROFDM0_TRSWISOLATION, BIT(31), 0);
428 rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(27), 1);
429 rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(27), 0);
430 rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 0);
431 rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 0);
432 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(13)|BIT(12), 0);
433 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(13)|BIT(12), 2);
434 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(15)|BIT(14), 0);
435 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(15)|BIT(14), 2);
436
437 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
438 "cnt_parity_fail = %d, cnt_rate_illegal = %d, "
439 "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
440 alm_cnt->cnt_parity_fail,
441 alm_cnt->cnt_rate_illegal,
442 alm_cnt->cnt_crc8_fail, alm_cnt->cnt_mcs_fail);
443
444 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
445 "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
446 alm_cnt->cnt_ofdm_fail,
447 alm_cnt->cnt_cck_fail, alm_cnt->cnt_all);
448}
449
450static void rtl88e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
451{
452 struct rtl_priv *rtlpriv = rtl_priv(hw);
453 struct dig_t *dm_dig = &rtlpriv->dm_digtable;
454 u8 cur_cck_cca_thresh;
455
456 if (dm_dig->cursta_cstate == DIG_STA_CONNECT) {
457 dm_dig->rssi_val_min = rtl88e_dm_initial_gain_min_pwdb(hw);
458 if (dm_dig->rssi_val_min > 25) {
459 cur_cck_cca_thresh = 0xcd;
460 } else if ((dm_dig->rssi_val_min <= 25) &&
461 (dm_dig->rssi_val_min > 10)) {
462 cur_cck_cca_thresh = 0x83;
463 } else {
464 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
465 cur_cck_cca_thresh = 0x83;
466 else
467 cur_cck_cca_thresh = 0x40;
468 }
469
470 } else {
471 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
472 cur_cck_cca_thresh = 0x83;
473 else
474 cur_cck_cca_thresh = 0x40;
475 }
476
477 if (dm_dig->cur_cck_cca_thres != cur_cck_cca_thresh)
478 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, cur_cck_cca_thresh);
479
480 dm_dig->cur_cck_cca_thres = cur_cck_cca_thresh;
481 dm_dig->pre_cck_cca_thres = dm_dig->cur_cck_cca_thres;
482 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
483 "CCK cca thresh hold =%x\n", dm_dig->cur_cck_cca_thres);
484}
485
486static void rtl88e_dm_dig(struct ieee80211_hw *hw)
487{
488 struct rtl_priv *rtlpriv = rtl_priv(hw);
489 struct dig_t *dm_dig = &rtlpriv->dm_digtable;
490 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
491 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
492 u8 dig_min, dig_maxofmin;
493 bool bfirstconnect;
494 u8 dm_dig_max, dm_dig_min;
495 u8 current_igi = dm_dig->cur_igvalue;
496
497 if (rtlpriv->dm.dm_initialgain_enable == false)
498 return;
499 if (dm_dig->dig_enable_flag == false)
500 return;
501 if (mac->act_scanning == true)
502 return;
503
504 if (mac->link_state >= MAC80211_LINKED)
505 dm_dig->cursta_cstate = DIG_STA_CONNECT;
506 else
507 dm_dig->cursta_cstate = DIG_STA_DISCONNECT;
508 if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP ||
509 rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC)
510 dm_dig->cursta_cstate = DIG_STA_DISCONNECT;
511
512 dm_dig_max = DM_DIG_MAX;
513 dm_dig_min = DM_DIG_MIN;
514 dig_maxofmin = DM_DIG_MAX_AP;
515 dig_min = dm_dig->dig_min_0;
516 bfirstconnect = ((mac->link_state >= MAC80211_LINKED) ? true : false) &&
517 (dm_dig->media_connect_0 == false);
518
519 dm_dig->rssi_val_min =
520 rtl88e_dm_initial_gain_min_pwdb(hw);
521
522 if (mac->link_state >= MAC80211_LINKED) {
523 if ((dm_dig->rssi_val_min + 20) > dm_dig_max)
524 dm_dig->rx_gain_max = dm_dig_max;
525 else if ((dm_dig->rssi_val_min + 20) < dm_dig_min)
526 dm_dig->rx_gain_max = dm_dig_min;
527 else
528 dm_dig->rx_gain_max = dm_dig->rssi_val_min + 20;
529
530 if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) {
531 dig_min = dm_dig->antdiv_rssi_max;
532 } else {
533 if (dm_dig->rssi_val_min < dm_dig_min)
534 dig_min = dm_dig_min;
535 else if (dm_dig->rssi_val_min < dig_maxofmin)
536 dig_min = dig_maxofmin;
537 else
538 dig_min = dm_dig->rssi_val_min;
539 }
540 } else {
541 dm_dig->rx_gain_max = dm_dig_max;
542 dig_min = dm_dig_min;
543 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
544 }
545
546 if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
547 dm_dig->large_fa_hit++;
548 if (dm_dig->forbidden_igi < current_igi) {
549 dm_dig->forbidden_igi = current_igi;
550 dm_dig->large_fa_hit = 1;
551 }
552
553 if (dm_dig->large_fa_hit >= 3) {
554 if ((dm_dig->forbidden_igi + 1) > dm_dig->rx_gain_max)
555 dm_dig->rx_gain_min = dm_dig->rx_gain_max;
556 else
557 dm_dig->rx_gain_min = dm_dig->forbidden_igi + 1;
558 dm_dig->recover_cnt = 3600;
559 }
560 } else {
561 if (dm_dig->recover_cnt != 0) {
562 dm_dig->recover_cnt--;
563 } else {
564 if (dm_dig->large_fa_hit == 0) {
565 if ((dm_dig->forbidden_igi - 1) < dig_min) {
566 dm_dig->forbidden_igi = dig_min;
567 dm_dig->rx_gain_min = dig_min;
568 } else {
569 dm_dig->forbidden_igi--;
570 dm_dig->rx_gain_min =
571 dm_dig->forbidden_igi + 1;
572 }
573 } else if (dm_dig->large_fa_hit == 3) {
574 dm_dig->large_fa_hit = 0;
575 }
576 }
577 }
578
579 if (dm_dig->cursta_cstate == DIG_STA_CONNECT) {
580 if (bfirstconnect) {
581 current_igi = dm_dig->rssi_val_min;
582 } else {
583 if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2)
584 current_igi += 2;
585 else if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH1)
586 current_igi++;
587 else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
588 current_igi--;
589 }
590 } else {
591 if (rtlpriv->falsealm_cnt.cnt_all > 10000)
592 current_igi += 2;
593 else if (rtlpriv->falsealm_cnt.cnt_all > 8000)
594 current_igi++;
595 else if (rtlpriv->falsealm_cnt.cnt_all < 500)
596 current_igi--;
597 }
598
599 if (current_igi > DM_DIG_FA_UPPER)
600 current_igi = DM_DIG_FA_UPPER;
601 else if (current_igi < DM_DIG_FA_LOWER)
602 current_igi = DM_DIG_FA_LOWER;
603
604 if (rtlpriv->falsealm_cnt.cnt_all > 10000)
605 current_igi = DM_DIG_FA_UPPER;
606
607 dm_dig->cur_igvalue = current_igi;
608 rtl88e_dm_write_dig(hw);
609 dm_dig->media_connect_0 = ((mac->link_state >= MAC80211_LINKED) ?
610 true : false);
611 dm_dig->dig_min_0 = dig_min;
612
613 rtl88e_dm_cck_packet_detection_thresh(hw);
614}
615
616static void rtl88e_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
617{
618 struct rtl_priv *rtlpriv = rtl_priv(hw);
619
620 rtlpriv->dm.dynamic_txpower_enable = false;
621
622 rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
623 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
624}
625
626static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
627{
628 struct rtl_priv *rtlpriv = rtl_priv(hw);
629 struct rtl_phy *rtlphy = &(rtlpriv->phy);
630 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
631 long undec_sm_pwdb;
632
633 if (!rtlpriv->dm.dynamic_txpower_enable)
634 return;
635
636 if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
637 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
638 return;
639 }
640
641 if ((mac->link_state < MAC80211_LINKED) &&
642 (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
643 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
644 "Not connected\n");
645
646 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
647
648 rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
649 return;
650 }
651
652 if (mac->link_state >= MAC80211_LINKED) {
653 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
654 undec_sm_pwdb =
655 rtlpriv->dm.entry_min_undec_sm_pwdb;
656 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
657 "AP Client PWDB = 0x%lx\n",
658 undec_sm_pwdb);
659 } else {
660 undec_sm_pwdb =
661 rtlpriv->dm.undec_sm_pwdb;
662 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
663 "STA Default Port PWDB = 0x%lx\n",
664 undec_sm_pwdb);
665 }
666 } else {
667 undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
668
669 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
670 "AP Ext Port PWDB = 0x%lx\n", undec_sm_pwdb);
671 }
672
673 if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
674 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
675 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
676 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x0)\n");
677 } else if ((undec_sm_pwdb <
678 (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
679 (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
680 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
681 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
682 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x10)\n");
683 } else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
684 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
685 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
686 "TXHIGHPWRLEVEL_NORMAL\n");
687 }
688
689 if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
690 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
691 "PHY_SetTxPowerLevel8192S() Channel = %d\n",
692 rtlphy->current_channel);
693 rtl88e_phy_set_txpower_level(hw, rtlphy->current_channel);
694 }
695
696 rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
697}
698
699void rtl88e_dm_write_dig(struct ieee80211_hw *hw)
700{
701 struct rtl_priv *rtlpriv = rtl_priv(hw);
702 struct dig_t *dm_dig = &rtlpriv->dm_digtable;
703
704 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
705 "cur_igvalue = 0x%x, "
706 "pre_igvalue = 0x%x, back_val = %d\n",
707 dm_dig->cur_igvalue, dm_dig->pre_igvalue,
708 dm_dig->back_val);
709
710 if (dm_dig->cur_igvalue > 0x3f)
711 dm_dig->cur_igvalue = 0x3f;
712 if (dm_dig->pre_igvalue != dm_dig->cur_igvalue) {
713 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
714 dm_dig->cur_igvalue);
715
716 dm_dig->pre_igvalue = dm_dig->cur_igvalue;
717 }
718}
719
720static void rtl88e_dm_pwdb_monitor(struct ieee80211_hw *hw)
721{
722 struct rtl_priv *rtlpriv = rtl_priv(hw);
723 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
724 struct rtl_sta_info *drv_priv;
725 static u64 last_txok;
726 static u64 last_rx;
727 long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff;
728
729 if (rtlhal->oem_id == RT_CID_819x_HP) {
730 u64 cur_txok_cnt = 0;
731 u64 cur_rxok_cnt = 0;
732 cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok;
733 cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rx;
734 last_txok = cur_txok_cnt;
735 last_rx = cur_rxok_cnt;
736
737 if (cur_rxok_cnt > (cur_txok_cnt * 6))
738 rtl_write_dword(rtlpriv, REG_ARFR0, 0x8f015);
739 else
740 rtl_write_dword(rtlpriv, REG_ARFR0, 0xff015);
741 }
742
743 /* AP & ADHOC & MESH */
744 spin_lock_bh(&rtlpriv->locks.entry_list_lock);
745 list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
746 if (drv_priv->rssi_stat.undec_sm_pwdb < tmp_entry_min_pwdb)
747 tmp_entry_min_pwdb = drv_priv->rssi_stat.undec_sm_pwdb;
748 if (drv_priv->rssi_stat.undec_sm_pwdb > tmp_entry_max_pwdb)
749 tmp_entry_max_pwdb = drv_priv->rssi_stat.undec_sm_pwdb;
750 }
751 spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
752
753 /* If associated entry is found */
754 if (tmp_entry_max_pwdb != 0) {
755 rtlpriv->dm.entry_max_undec_sm_pwdb = tmp_entry_max_pwdb;
756 RTPRINT(rtlpriv, FDM, DM_PWDB, "EntryMaxPWDB = 0x%lx(%ld)\n",
757 tmp_entry_max_pwdb, tmp_entry_max_pwdb);
758 } else {
759 rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
760 }
761 /* If associated entry is found */
762 if (tmp_entry_min_pwdb != 0xff) {
763 rtlpriv->dm.entry_min_undec_sm_pwdb = tmp_entry_min_pwdb;
764 RTPRINT(rtlpriv, FDM, DM_PWDB, "EntryMinPWDB = 0x%lx(%ld)\n",
765 tmp_entry_min_pwdb, tmp_entry_min_pwdb);
766 } else {
767 rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
768 }
769 /* Indicate Rx signal strength to FW. */
770 if (!rtlpriv->dm.useramask)
771 rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undec_sm_pwdb);
772}
773
774void rtl88e_dm_init_edca_turbo(struct ieee80211_hw *hw)
775{
776 struct rtl_priv *rtlpriv = rtl_priv(hw);
777
778 rtlpriv->dm.current_turbo_edca = false;
779 rtlpriv->dm.is_any_nonbepkts = false;
780 rtlpriv->dm.is_cur_rdlstate = false;
781}
782
783static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw)
784{
785 struct rtl_priv *rtlpriv = rtl_priv(hw);
786 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
787 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
788 static u64 last_txok_cnt;
789 static u64 last_rxok_cnt;
790 static u32 last_bt_edca_ul;
791 static u32 last_bt_edca_dl;
792 u64 cur_txok_cnt = 0;
793 u64 cur_rxok_cnt = 0;
794 u32 edca_be_ul = 0x5ea42b;
795 u32 edca_be_dl = 0x5ea42b;
796 bool change_edca = false;
797
798 if ((last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) ||
799 (last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) {
800 rtlpriv->dm.current_turbo_edca = false;
801 last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
802 last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl;
803 }
804
805 if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) {
806 edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
807 change_edca = true;
808 }
809
810 if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) {
811 edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl;
812 change_edca = true;
813 }
814
815 if (mac->link_state != MAC80211_LINKED) {
816 rtlpriv->dm.current_turbo_edca = false;
817 return;
818 }
819
820 if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) {
821 if (!(edca_be_ul & 0xffff0000))
822 edca_be_ul |= 0x005e0000;
823
824 if (!(edca_be_dl & 0xffff0000))
825 edca_be_dl |= 0x005e0000;
826 }
827
828 if ((change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) &&
829 (!rtlpriv->dm.disable_framebursting))) {
830 cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
831 cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
832
833 if (cur_rxok_cnt > 4 * cur_txok_cnt) {
834 if (!rtlpriv->dm.is_cur_rdlstate ||
835 !rtlpriv->dm.current_turbo_edca) {
836 rtl_write_dword(rtlpriv,
837 REG_EDCA_BE_PARAM,
838 edca_be_dl);
839 rtlpriv->dm.is_cur_rdlstate = true;
840 }
841 } else {
842 if (rtlpriv->dm.is_cur_rdlstate ||
843 !rtlpriv->dm.current_turbo_edca) {
844 rtl_write_dword(rtlpriv,
845 REG_EDCA_BE_PARAM,
846 edca_be_ul);
847 rtlpriv->dm.is_cur_rdlstate = false;
848 }
849 }
850 rtlpriv->dm.current_turbo_edca = true;
851 } else {
852 if (rtlpriv->dm.current_turbo_edca) {
853 u8 tmp = AC0_BE;
854 rtlpriv->cfg->ops->set_hw_reg(hw,
855 HW_VAR_AC_PARAM,
856 (u8 *)(&tmp));
857 rtlpriv->dm.current_turbo_edca = false;
858 }
859 }
860
861 rtlpriv->dm.is_any_nonbepkts = false;
862 last_txok_cnt = rtlpriv->stats.txbytesunicast;
863 last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
864}
865
866static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
867 *hw)
868{
869 struct rtl_priv *rtlpriv = rtl_priv(hw);
870 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
871 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
872 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
873 u8 thermalvalue = 0, delta, delta_lck, delta_iqk, off;
874 u8 th_avg_cnt = 0;
875 u32 thermalvalue_avg = 0;
876 long ele_d, temp_cck;
877 char ofdm_index[2], cck_index = 0, ofdm_old[2] = {0, 0}, cck_old = 0;
878 int i = 0;
879 bool is2t = false;
880
881 u8 ofdm_min_index = 6, rf = (is2t) ? 2 : 1;
882 u8 index_for_channel;
883 enum _dec_inc {dec, power_inc};
884
885 /* 0.1 the following TWO tables decide the final index of
886 * OFDM/CCK swing table
887 */
888 char del_tbl_idx[2][15] = {
889 {0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11},
890 {0, 0, -1, -2, -3, -4, -4, -4, -4, -5, -7, -8, -9, -9, -10}
891 };
892 u8 thermal_threshold[2][15] = {
893 {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 27},
894 {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 25, 25, 25}
895 };
896
897 /*Initilization (7 steps in total) */
898 rtlpriv->dm.txpower_trackinginit = true;
899 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
900 "rtl88e_dm_txpower_tracking_callback_thermalmeter\n");
901
902 thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xfc00);
903 if (!thermalvalue)
904 return;
905 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
906 "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
907 thermalvalue, rtlpriv->dm.thermalvalue,
908 rtlefuse->eeprom_thermalmeter);
909
910 /*1. Query OFDM Default Setting: Path A*/
911 ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBAL, MASKDWORD) & MASKOFDM_D;
912 for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
913 if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
914 ofdm_old[0] = (u8) i;
915 rtldm->swing_idx_ofdm_base = (u8)i;
916 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
917 "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n",
918 ROFDM0_XATXIQIMBAL,
919 ele_d, ofdm_old[0]);
920 break;
921 }
922 }
923
924 if (is2t) {
925 ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBAL,
926 MASKDWORD) & MASKOFDM_D;
927 for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
928 if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
929 ofdm_old[1] = (u8)i;
930
931 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
932 DBG_LOUD,
933 "Initial pathB ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n",
934 ROFDM0_XBTXIQIMBAL, ele_d,
935 ofdm_old[1]);
936 break;
937 }
938 }
939 }
940 /*2.Query CCK default setting From 0xa24*/
941 temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK;
942 for (i = 0; i < CCK_TABLE_LENGTH; i++) {
943 if (rtlpriv->dm.cck_inch14) {
944 if (memcmp(&temp_cck, &cck_tbl_ch14[i][2], 4) == 0) {
945 cck_old = (u8)i;
946 rtldm->swing_idx_cck_base = (u8)i;
947 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
948 "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch 14 %d\n",
949 RCCK0_TXFILTER2, temp_cck, cck_old,
950 rtlpriv->dm.cck_inch14);
951 break;
952 }
953 } else {
954 if (memcmp(&temp_cck, &cck_tbl_ch1_13[i][2], 4) == 0) {
955 cck_old = (u8)i;
956 rtldm->swing_idx_cck_base = (u8)i;
957 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
958 "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
959 RCCK0_TXFILTER2, temp_cck, cck_old,
960 rtlpriv->dm.cck_inch14);
961 break;
962 }
963 }
964 }
965
966 /*3 Initialize ThermalValues of RFCalibrateInfo*/
967 if (!rtldm->thermalvalue) {
968 rtlpriv->dm.thermalvalue = rtlefuse->eeprom_thermalmeter;
969 rtlpriv->dm.thermalvalue_lck = thermalvalue;
970 rtlpriv->dm.thermalvalue_iqk = thermalvalue;
971 for (i = 0; i < rf; i++)
972 rtlpriv->dm.ofdm_index[i] = ofdm_old[i];
973 rtlpriv->dm.cck_index = cck_old;
974 }
975
976 /*4 Calculate average thermal meter*/
977 rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermalvalue;
978 rtldm->thermalvalue_avg_index++;
979 if (rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_88E)
980 rtldm->thermalvalue_avg_index = 0;
981
982 for (i = 0; i < AVG_THERMAL_NUM_88E; i++) {
983 if (rtldm->thermalvalue_avg[i]) {
984 thermalvalue_avg += rtldm->thermalvalue_avg[i];
985 th_avg_cnt++;
986 }
987 }
988
989 if (th_avg_cnt)
990 thermalvalue = (u8)(thermalvalue_avg / th_avg_cnt);
991
992 /* 5 Calculate delta, delta_LCK, delta_IQK.*/
993 if (rtlhal->reloadtxpowerindex) {
994 delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
995 (thermalvalue - rtlefuse->eeprom_thermalmeter) :
996 (rtlefuse->eeprom_thermalmeter - thermalvalue);
997 rtlhal->reloadtxpowerindex = false;
998 rtlpriv->dm.done_txpower = false;
999 } else if (rtlpriv->dm.done_txpower) {
1000 delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
1001 (thermalvalue - rtlpriv->dm.thermalvalue) :
1002 (rtlpriv->dm.thermalvalue - thermalvalue);
1003 } else {
1004 delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
1005 (thermalvalue - rtlefuse->eeprom_thermalmeter) :
1006 (rtlefuse->eeprom_thermalmeter - thermalvalue);
1007 }
1008 delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
1009 (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
1010 (rtlpriv->dm.thermalvalue_lck - thermalvalue);
1011 delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
1012 (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
1013 (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
1014
1015 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1016 "Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
1017 "eeprom_thermalmeter 0x%x delta 0x%x "
1018 "delta_lck 0x%x delta_iqk 0x%x\n",
1019 thermalvalue, rtlpriv->dm.thermalvalue,
1020 rtlefuse->eeprom_thermalmeter, delta, delta_lck,
1021 delta_iqk);
1022 /* 6 If necessary, do LCK.*/
1023 if (delta_lck >= 8) {
1024 rtlpriv->dm.thermalvalue_lck = thermalvalue;
1025 rtl88e_phy_lc_calibrate(hw);
1026 }
1027
1028 /* 7 If necessary, move the index of swing table to adjust Tx power. */
1029 if (delta > 0 && rtlpriv->dm.txpower_track_control) {
1030 delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
1031 (thermalvalue - rtlefuse->eeprom_thermalmeter) :
1032 (rtlefuse->eeprom_thermalmeter - thermalvalue);
1033
1034 /* 7.1 Get the final CCK_index and OFDM_index for each
1035 * swing table.
1036 */
1037 if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
1038 CAL_SWING_OFF(off, power_inc, IDX_MAP, delta);
1039 for (i = 0; i < rf; i++)
1040 ofdm_index[i] = rtldm->ofdm_index[i] +
1041 del_tbl_idx[power_inc][off];
1042 cck_index = rtldm->cck_index +
1043 del_tbl_idx[power_inc][off];
1044 } else {
1045 CAL_SWING_OFF(off, dec, IDX_MAP, delta);
1046 for (i = 0; i < rf; i++)
1047 ofdm_index[i] = rtldm->ofdm_index[i] +
1048 del_tbl_idx[dec][off];
1049 cck_index = rtldm->cck_index + del_tbl_idx[dec][off];
1050 }
1051
1052 /* 7.2 Handle boundary conditions of index.*/
1053 for (i = 0; i < rf; i++) {
1054 if (ofdm_index[i] > OFDM_TABLE_SIZE-1)
1055 ofdm_index[i] = OFDM_TABLE_SIZE-1;
1056 else if (rtldm->ofdm_index[i] < ofdm_min_index)
1057 ofdm_index[i] = ofdm_min_index;
1058 }
1059
1060 if (cck_index > CCK_TABLE_SIZE - 1)
1061 cck_index = CCK_TABLE_SIZE - 1;
1062 else if (cck_index < 0)
1063 cck_index = 0;
1064
1065 /*7.3Configure the Swing Table to adjust Tx Power.*/
1066 if (rtlpriv->dm.txpower_track_control) {
1067 rtldm->done_txpower = true;
1068 rtldm->swing_idx_ofdm[RF90_PATH_A] =
1069 (u8)ofdm_index[RF90_PATH_A];
1070 if (is2t)
1071 rtldm->swing_idx_ofdm[RF90_PATH_B] =
1072 (u8)ofdm_index[RF90_PATH_B];
1073 rtldm->swing_idx_cck = cck_index;
1074 if (rtldm->swing_idx_ofdm_cur !=
1075 rtldm->swing_idx_ofdm[0]) {
1076 rtldm->swing_idx_ofdm_cur =
1077 rtldm->swing_idx_ofdm[0];
1078 rtldm->swing_flag_ofdm = true;
1079 }
1080
1081 if (rtldm->swing_idx_cck != rtldm->swing_idx_cck) {
1082 rtldm->swing_idx_cck_cur = rtldm->swing_idx_cck;
1083 rtldm->swing_flag_cck = true;
1084 }
1085
1086 rtl88e_chk_tx_track(hw, TXAGC, 0, 0);
1087
1088 if (is2t)
1089 rtl88e_chk_tx_track(hw, BBSWING,
1090 RF90_PATH_B,
1091 index_for_channel);
1092 }
1093 }
1094
1095 if (delta_iqk >= 8) {
1096 rtlpriv->dm.thermalvalue_iqk = thermalvalue;
1097 rtl88e_phy_iq_calibrate(hw, false);
1098 }
1099
1100 if (rtldm->txpower_track_control)
1101 rtldm->thermalvalue = thermalvalue;
1102 rtldm->txpowercount = 0;
1103 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n");
1104}
1105
1106static void rtl88e_dm_init_txpower_tracking(struct ieee80211_hw *hw)
1107{
1108 struct rtl_priv *rtlpriv = rtl_priv(hw);
1109
1110 rtlpriv->dm.txpower_tracking = true;
1111 rtlpriv->dm.txpower_trackinginit = false;
1112 rtlpriv->dm.txpowercount = 0;
1113 rtlpriv->dm.txpower_track_control = true;
1114
1115 rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A] = 12;
1116 rtlpriv->dm.swing_idx_ofdm_cur = 12;
1117 rtlpriv->dm.swing_flag_ofdm = false;
1118 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1119 " rtlpriv->dm.txpower_tracking = %d\n",
1120 rtlpriv->dm.txpower_tracking);
1121}
1122
1123void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw)
1124{
1125 struct rtl_priv *rtlpriv = rtl_priv(hw);
1126 static u8 tm_trigger;
1127
1128 if (!rtlpriv->dm.txpower_tracking)
1129 return;
1130
1131 if (!tm_trigger) {
1132 rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17)|BIT(16),
1133 0x03);
1134 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1135 "Trigger 88E Thermal Meter!!\n");
1136 tm_trigger = 1;
1137 return;
1138 } else {
1139 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1140 "Schedule TxPowerTracking !!\n");
1141 rtl88e_dm_txpower_tracking_callback_thermalmeter(hw);
1142 tm_trigger = 0;
1143 }
1144}
1145
1146void rtl88e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
1147{
1148 struct rtl_priv *rtlpriv = rtl_priv(hw);
1149 struct rate_adaptive *p_ra = &(rtlpriv->ra);
1150
1151 p_ra->ratr_state = DM_RATR_STA_INIT;
1152 p_ra->pre_ratr_state = DM_RATR_STA_INIT;
1153
1154 if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
1155 rtlpriv->dm.useramask = true;
1156 else
1157 rtlpriv->dm.useramask = false;
1158}
1159
1160static void rtl88e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1161{
1162 struct rtl_priv *rtlpriv = rtl_priv(hw);
1163 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1164 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1165 struct rate_adaptive *p_ra = &(rtlpriv->ra);
1166 struct ieee80211_sta *sta = NULL;
1167 u32 low_rssi, hi_rssi;
1168
1169 if (is_hal_stop(rtlhal)) {
1170 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1171 "driver is going to unload\n");
1172 return;
1173 }
1174
1175 if (!rtlpriv->dm.useramask) {
1176 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1177 "driver does not control rate adaptive mask\n");
1178 return;
1179 }
1180
1181 if (mac->link_state == MAC80211_LINKED &&
1182 mac->opmode == NL80211_IFTYPE_STATION) {
1183 switch (p_ra->pre_ratr_state) {
1184 case DM_RATR_STA_HIGH:
1185 hi_rssi = 50;
1186 low_rssi = 20;
1187 break;
1188 case DM_RATR_STA_MIDDLE:
1189 hi_rssi = 55;
1190 low_rssi = 20;
1191 break;
1192 case DM_RATR_STA_LOW:
1193 hi_rssi = 50;
1194 low_rssi = 25;
1195 break;
1196 default:
1197 hi_rssi = 50;
1198 low_rssi = 20;
1199 break;
1200 }
1201
1202 if (rtlpriv->dm.undec_sm_pwdb > (long)hi_rssi)
1203 p_ra->ratr_state = DM_RATR_STA_HIGH;
1204 else if (rtlpriv->dm.undec_sm_pwdb > (long)low_rssi)
1205 p_ra->ratr_state = DM_RATR_STA_MIDDLE;
1206 else
1207 p_ra->ratr_state = DM_RATR_STA_LOW;
1208
1209 if (p_ra->pre_ratr_state != p_ra->ratr_state) {
1210 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1211 "RSSI = %ld\n",
1212 rtlpriv->dm.undec_sm_pwdb);
1213 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1214 "RSSI_LEVEL = %d\n", p_ra->ratr_state);
1215 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1216 "PreState = %d, CurState = %d\n",
1217 p_ra->pre_ratr_state, p_ra->ratr_state);
1218
1219 rcu_read_lock();
1220 sta = rtl_find_sta(hw, mac->bssid);
1221 if (sta)
1222 rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
1223 p_ra->ratr_state);
1224 rcu_read_unlock();
1225
1226 p_ra->pre_ratr_state = p_ra->ratr_state;
1227 }
1228 }
1229}
1230
1231static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1232{
1233 struct rtl_priv *rtlpriv = rtl_priv(hw);
1234 struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
1235
1236 dm_pstable->pre_ccastate = CCA_MAX;
1237 dm_pstable->cur_ccasate = CCA_MAX;
1238 dm_pstable->pre_rfstate = RF_MAX;
1239 dm_pstable->cur_rfstate = RF_MAX;
1240 dm_pstable->rssi_val_min = 0;
1241}
1242
1243static void rtl88e_dm_update_rx_idle_ant(struct ieee80211_hw *hw, u8 ant)
1244{
1245 struct rtl_priv *rtlpriv = rtl_priv(hw);
1246 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1247 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
1248 struct fast_ant_training *fat_tbl = &(rtldm->fat_table);
1249 u32 def_ant, opt_ant;
1250
1251 if (fat_tbl->rx_idle_ant != ant) {
1252 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1253 "need to update rx idle ant\n");
1254 if (ant == MAIN_ANT) {
1255 def_ant = (fat_tbl->rx_idle_ant == CG_TRX_HW_ANTDIV) ?
1256 MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
1257 opt_ant = (fat_tbl->rx_idle_ant == CG_TRX_HW_ANTDIV) ?
1258 AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
1259 } else {
1260 def_ant = (fat_tbl->rx_idle_ant == CG_TRX_HW_ANTDIV) ?
1261 AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
1262 opt_ant = (fat_tbl->rx_idle_ant == CG_TRX_HW_ANTDIV) ?
1263 MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
1264 }
1265
1266 if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) {
1267 rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(5) |
1268 BIT(4) | BIT(3), def_ant);
1269 rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) |
1270 BIT(7) | BIT(6), opt_ant);
1271 rtl_set_bbreg(hw, DM_REG_ANTSEL_CTRL_11N, BIT(14) |
1272 BIT(13) | BIT(12), def_ant);
1273 rtl_set_bbreg(hw, DM_REG_RESP_TX_11N, BIT(6) | BIT(7),
1274 def_ant);
1275 } else if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) {
1276 rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(5) |
1277 BIT(4) | BIT(3), def_ant);
1278 rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) |
1279 BIT(7) | BIT(6), opt_ant);
1280 }
1281 }
1282 fat_tbl->rx_idle_ant = ant;
1283 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "RxIdleAnt %s\n",
1284 ((ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT")));
1285}
1286
1287static void rtl88e_dm_update_tx_ant(struct ieee80211_hw *hw,
1288 u8 ant, u32 mac_id)
1289{
1290 struct rtl_priv *rtlpriv = rtl_priv(hw);
1291 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
1292 struct fast_ant_training *fat_tbl = &(rtldm->fat_table);
1293 u8 target_ant;
1294
1295 if (ant == MAIN_ANT)
1296 target_ant = MAIN_ANT_CG_TRX;
1297 else
1298 target_ant = AUX_ANT_CG_TRX;
1299
1300 fat_tbl->antsel_a[mac_id] = target_ant & BIT(0);
1301 fat_tbl->antsel_b[mac_id] = (target_ant & BIT(1)) >> 1;
1302 fat_tbl->antsel_c[mac_id] = (target_ant & BIT(2)) >> 2;
1303 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "txfrominfo target ant %s\n",
1304 ((ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT")));
1305 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "antsel_tr_mux = 3'b%d%d%d\n",
1306 fat_tbl->antsel_c[mac_id],
1307 fat_tbl->antsel_b[mac_id], fat_tbl->antsel_a[mac_id]);
1308}
1309
1310static void rtl88e_dm_rx_hw_antena_div_init(struct ieee80211_hw *hw)
1311{
1312 u32 value32;
1313 /*MAC Setting*/
1314 value32 = rtl_get_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD);
1315 rtl_set_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD, value32 |
1316 (BIT(23) | BIT(25)));
1317 /*Pin Setting*/
1318 rtl_set_bbreg(hw, DM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0);
1319 rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(10), 0);
1320 rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(22), 1);
1321 rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(31), 1);
1322 /*OFDM Setting*/
1323 rtl_set_bbreg(hw, DM_REG_ANTDIV_PARA1_11N, MASKDWORD, 0x000000a0);
1324 /*CCK Setting*/
1325 rtl_set_bbreg(hw, DM_REG_BB_PWR_SAV4_11N, BIT(7), 1);
1326 rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1);
1327 rtl88e_dm_update_rx_idle_ant(hw, MAIN_ANT);
1328 rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKLWORD, 0x0201);
1329}
1330
1331static void rtl88e_dm_trx_hw_antenna_div_init(struct ieee80211_hw *hw)
1332{
1333 u32 value32;
1334
1335 /*MAC Setting*/
1336 value32 = rtl_get_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD);
1337 rtl_set_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD, value32 |
1338 (BIT(23) | BIT(25)));
1339 /*Pin Setting*/
1340 rtl_set_bbreg(hw, DM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0);
1341 rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(10), 0);
1342 rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(22), 0);
1343 rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(31), 1);
1344 /*OFDM Setting*/
1345 rtl_set_bbreg(hw, DM_REG_ANTDIV_PARA1_11N, MASKDWORD, 0x000000a0);
1346 /*CCK Setting*/
1347 rtl_set_bbreg(hw, DM_REG_BB_PWR_SAV4_11N, BIT(7), 1);
1348 rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1);
1349 /*TX Setting*/
1350 rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 0);
1351 rtl88e_dm_update_rx_idle_ant(hw, MAIN_ANT);
1352 rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKLWORD, 0x0201);
1353}
1354
1355static void rtl88e_dm_fast_training_init(struct ieee80211_hw *hw)
1356{
1357 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
1358 struct fast_ant_training *fat_tbl = &(rtldm->fat_table);
1359 u32 ant_combo = 2;
1360 u32 value32, i;
1361
1362 for (i = 0; i < 6; i++) {
1363 fat_tbl->bssid[i] = 0;
1364 fat_tbl->ant_sum[i] = 0;
1365 fat_tbl->ant_cnt[i] = 0;
1366 fat_tbl->ant_ave[i] = 0;
1367 }
1368 fat_tbl->train_idx = 0;
1369 fat_tbl->fat_state = FAT_NORMAL_STATE;
1370
1371 /*MAC Setting*/
1372 value32 = rtl_get_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD);
1373 rtl_set_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD, value32 | (BIT(23) |
1374 BIT(25)));
1375 value32 = rtl_get_bbreg(hw, DM_REG_ANT_TRAIN_2, MASKDWORD);
1376 rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_2, MASKDWORD, value32 | (BIT(16) |
1377 BIT(17)));
1378 rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_2, MASKLWORD, 0);
1379 rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_1, MASKDWORD, 0);
1380
1381 /*Pin Setting*/
1382 rtl_set_bbreg(hw, DM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0);
1383 rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(10), 0);
1384 rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(22), 0);
1385 rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(31), 1);
1386
1387 /*OFDM Setting*/
1388 rtl_set_bbreg(hw, DM_REG_ANTDIV_PARA1_11N, MASKDWORD, 0x000000a0);
1389 /*antenna mapping table*/
1390 if (ant_combo == 2) {
1391 rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE0, 1);
1392 rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE1, 2);
1393 } else if (ant_combo == 7) {
1394 rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE0, 1);
1395 rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE1, 2);
1396 rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE2, 2);
1397 rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE3, 3);
1398 rtl_set_bbreg(hw, DM_REG_ANT_MAPPING2_11N, MASKBYTE0, 4);
1399 rtl_set_bbreg(hw, DM_REG_ANT_MAPPING2_11N, MASKBYTE1, 5);
1400 rtl_set_bbreg(hw, DM_REG_ANT_MAPPING2_11N, MASKBYTE2, 6);
1401 rtl_set_bbreg(hw, DM_REG_ANT_MAPPING2_11N, MASKBYTE3, 7);
1402 }
1403
1404 /*TX Setting*/
1405 rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 1);
1406 rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), 0);
1407 rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), 1);
1408 rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(2) | BIT(1) | BIT(0),
1409 (ant_combo - 1));
1410
1411 rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 1);
1412}
1413
1414static void rtl88e_dm_antenna_div_init(struct ieee80211_hw *hw)
1415{
1416 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1417
1418 if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)
1419 rtl88e_dm_rx_hw_antena_div_init(hw);
1420 else if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV)
1421 rtl88e_dm_trx_hw_antenna_div_init(hw);
1422 else if (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV)
1423 rtl88e_dm_fast_training_init(hw);
1424}
1425
1426void rtl88e_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw,
1427 u8 *pdesc, u32 mac_id)
1428{
1429 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1430 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
1431 struct fast_ant_training *fat_tbl = &(rtldm->fat_table);
1432
1433 if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) ||
1434 (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV)) {
1435 SET_TX_DESC_ANTSEL_A(pdesc, fat_tbl->antsel_a[mac_id]);
1436 SET_TX_DESC_ANTSEL_B(pdesc, fat_tbl->antsel_b[mac_id]);
1437 SET_TX_DESC_ANTSEL_C(pdesc, fat_tbl->antsel_c[mac_id]);
1438 }
1439}
1440
1441void rtl88e_dm_ant_sel_statistics(struct ieee80211_hw *hw,
1442 u8 antsel_tr_mux, u32 mac_id, u32 rx_pwdb_all)
1443{
1444 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1445 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
1446 struct fast_ant_training *fat_tbl = &(rtldm->fat_table);
1447
1448 if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) {
1449 if (antsel_tr_mux == MAIN_ANT_CG_TRX) {
1450 fat_tbl->main_ant_sum[mac_id] += rx_pwdb_all;
1451 fat_tbl->main_ant_cnt[mac_id]++;
1452 } else {
1453 fat_tbl->aux_ant_sum[mac_id] += rx_pwdb_all;
1454 fat_tbl->aux_ant_cnt[mac_id]++;
1455 }
1456 } else if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) {
1457 if (antsel_tr_mux == MAIN_ANT_CGCS_RX) {
1458 fat_tbl->main_ant_sum[mac_id] += rx_pwdb_all;
1459 fat_tbl->main_ant_cnt[mac_id]++;
1460 } else {
1461 fat_tbl->aux_ant_sum[mac_id] += rx_pwdb_all;
1462 fat_tbl->aux_ant_cnt[mac_id]++;
1463 }
1464 }
1465}
1466
1467static void rtl88e_dm_hw_ant_div(struct ieee80211_hw *hw)
1468{
1469 struct rtl_priv *rtlpriv = rtl_priv(hw);
1470 struct dig_t *dm_dig = &rtlpriv->dm_digtable;
1471 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1472 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
1473 struct rtl_sta_info *drv_priv;
1474 struct fast_ant_training *fat_tbl = &(rtldm->fat_table);
1475 u32 i, min_rssi = 0xff, ant_div_max_rssi = 0, max_rssi = 0;
1476 u32 local_min_rssi, local_max_rssi;
1477 u32 main_rssi, aux_rssi;
1478 u8 rx_idle_ant = 0, target_ant = 7;
1479
1480 i = 0;
1481 main_rssi = (fat_tbl->main_ant_cnt[i] != 0) ?
1482 (fat_tbl->main_ant_sum[i] /
1483 fat_tbl->main_ant_cnt[i]) : 0;
1484 aux_rssi = (fat_tbl->aux_ant_cnt[i] != 0) ?
1485 (fat_tbl->aux_ant_sum[i] / fat_tbl->aux_ant_cnt[i]) : 0;
1486 target_ant = (main_rssi == aux_rssi) ?
1487 fat_tbl->rx_idle_ant : ((main_rssi >= aux_rssi) ?
1488 MAIN_ANT : AUX_ANT);
1489 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1490 "main_ant_sum %d main_ant_cnt %d\n",
1491 fat_tbl->main_ant_sum[i], fat_tbl->main_ant_cnt[i]);
1492 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1493 "aux_ant_sum %d aux_ant_cnt %d\n",
1494 fat_tbl->aux_ant_sum[i],
1495 fat_tbl->aux_ant_cnt[i]);
1496 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1497 "main_rssi %d aux_rssi%d\n", main_rssi, aux_rssi);
1498 local_max_rssi = (main_rssi > aux_rssi) ? main_rssi : aux_rssi;
1499 if ((local_max_rssi > ant_div_max_rssi) && (local_max_rssi < 40))
1500 ant_div_max_rssi = local_max_rssi;
1501 if (local_max_rssi > max_rssi)
1502 max_rssi = local_max_rssi;
1503
1504 if ((fat_tbl->rx_idle_ant == MAIN_ANT) && (main_rssi == 0))
1505 main_rssi = aux_rssi;
1506 else if ((fat_tbl->rx_idle_ant == AUX_ANT) && (aux_rssi == 0))
1507 aux_rssi = main_rssi;
1508
1509 local_min_rssi = (main_rssi > aux_rssi) ? aux_rssi : main_rssi;
1510 if (local_min_rssi < min_rssi) {
1511 min_rssi = local_min_rssi;
1512 rx_idle_ant = target_ant;
1513 }
1514 if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV)
1515 rtl88e_dm_update_tx_ant(hw, target_ant, i);
1516
1517 if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP ||
1518 rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC) {
1519 spin_lock_bh(&rtlpriv->locks.entry_list_lock);
1520 list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
1521 i++;
1522 main_rssi = (fat_tbl->main_ant_cnt[i] != 0) ?
1523 (fat_tbl->main_ant_sum[i] /
1524 fat_tbl->main_ant_cnt[i]) : 0;
1525 aux_rssi = (fat_tbl->aux_ant_cnt[i] != 0) ?
1526 (fat_tbl->aux_ant_sum[i] /
1527 fat_tbl->aux_ant_cnt[i]) : 0;
1528 target_ant = (main_rssi == aux_rssi) ?
1529 fat_tbl->rx_idle_ant : ((main_rssi >=
1530 aux_rssi) ? MAIN_ANT : AUX_ANT);
1531
1532
1533 local_max_rssi = max_t(u32, main_rssi, aux_rssi);
1534 if ((local_max_rssi > ant_div_max_rssi) &&
1535 (local_max_rssi < 40))
1536 ant_div_max_rssi = local_max_rssi;
1537 if (local_max_rssi > max_rssi)
1538 max_rssi = local_max_rssi;
1539
1540 if ((fat_tbl->rx_idle_ant == MAIN_ANT) && !main_rssi)
1541 main_rssi = aux_rssi;
1542 else if ((fat_tbl->rx_idle_ant == AUX_ANT) &&
1543 (aux_rssi == 0))
1544 aux_rssi = main_rssi;
1545
1546 local_min_rssi = (main_rssi > aux_rssi) ?
1547 aux_rssi : main_rssi;
1548 if (local_min_rssi < min_rssi) {
1549 min_rssi = local_min_rssi;
1550 rx_idle_ant = target_ant;
1551 }
1552 if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV)
1553 rtl88e_dm_update_tx_ant(hw, target_ant, i);
1554 }
1555 spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
1556 }
1557
1558 for (i = 0; i < ASSOCIATE_ENTRY_NUM; i++) {
1559 fat_tbl->main_ant_sum[i] = 0;
1560 fat_tbl->aux_ant_sum[i] = 0;
1561 fat_tbl->main_ant_cnt[i] = 0;
1562 fat_tbl->aux_ant_cnt[i] = 0;
1563 }
1564
1565 rtl88e_dm_update_rx_idle_ant(hw, rx_idle_ant);
1566
1567 dm_dig->antdiv_rssi_max = ant_div_max_rssi;
1568 dm_dig->rssi_max = max_rssi;
1569}
1570
1571static void rtl88e_set_next_mac_address_target(struct ieee80211_hw *hw)
1572{
1573 struct rtl_priv *rtlpriv = rtl_priv(hw);
1574 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1575 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
1576 struct rtl_sta_info *drv_priv;
1577 struct fast_ant_training *fat_tbl = &(rtldm->fat_table);
1578 u32 value32, i, j = 0;
1579
1580 if (mac->link_state >= MAC80211_LINKED) {
1581 for (i = 0; i < ASSOCIATE_ENTRY_NUM; i++) {
1582 if ((fat_tbl->train_idx + 1) == ASSOCIATE_ENTRY_NUM)
1583 fat_tbl->train_idx = 0;
1584 else
1585 fat_tbl->train_idx++;
1586
1587 if (fat_tbl->train_idx == 0) {
1588 value32 = (mac->mac_addr[5] << 8) |
1589 mac->mac_addr[4];
1590 rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_2,
1591 MASKLWORD, value32);
1592
1593 value32 = (mac->mac_addr[3] << 24) |
1594 (mac->mac_addr[2] << 16) |
1595 (mac->mac_addr[1] << 8) |
1596 mac->mac_addr[0];
1597 rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_1,
1598 MASKDWORD, value32);
1599 break;
1600 }
1601
1602 if (rtlpriv->mac80211.opmode !=
1603 NL80211_IFTYPE_STATION) {
1604 spin_lock_bh(&rtlpriv->locks.entry_list_lock);
1605 list_for_each_entry(drv_priv,
1606 &rtlpriv->entry_list,
1607 list) {
1608 j++;
1609 if (j != fat_tbl->train_idx)
1610 continue;
1611
1612 value32 = (drv_priv->mac_addr[5] << 8) |
1613 drv_priv->mac_addr[4];
1614 rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_2,
1615 MASKLWORD, value32);
1616
1617 value32 = (drv_priv->mac_addr[3]<<24) |
1618 (drv_priv->mac_addr[2]<<16) |
1619 (drv_priv->mac_addr[1]<<8) |
1620 drv_priv->mac_addr[0];
1621 rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_1,
1622 MASKDWORD, value32);
1623 break;
1624 }
1625 spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
1626 /*find entry, break*/
1627 if (j == fat_tbl->train_idx)
1628 break;
1629 }
1630 }
1631 }
1632}
1633
1634static void rtl88e_dm_fast_ant_training(struct ieee80211_hw *hw)
1635{
1636 struct rtl_priv *rtlpriv = rtl_priv(hw);
1637 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
1638 struct fast_ant_training *fat_tbl = &(rtldm->fat_table);
1639 u32 i, max_rssi = 0;
1640 u8 target_ant = 2;
1641 bool bpkt_filter_match = false;
1642
1643 if (fat_tbl->fat_state == FAT_TRAINING_STATE) {
1644 for (i = 0; i < 7; i++) {
1645 if (fat_tbl->ant_cnt[i] == 0) {
1646 fat_tbl->ant_ave[i] = 0;
1647 } else {
1648 fat_tbl->ant_ave[i] = fat_tbl->ant_sum[i] /
1649 fat_tbl->ant_cnt[i];
1650 bpkt_filter_match = true;
1651 }
1652
1653 if (fat_tbl->ant_ave[i] > max_rssi) {
1654 max_rssi = fat_tbl->ant_ave[i];
1655 target_ant = (u8) i;
1656 }
1657 }
1658
1659 if (bpkt_filter_match == false) {
1660 rtl_set_bbreg(hw, DM_REG_TXAGC_A_1_MCS32_11N,
1661 BIT(16), 0);
1662 rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 0);
1663 } else {
1664 rtl_set_bbreg(hw, DM_REG_TXAGC_A_1_MCS32_11N,
1665 BIT(16), 0);
1666 rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) |
1667 BIT(7) | BIT(6), target_ant);
1668 rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 1);
1669
1670 fat_tbl->antsel_a[fat_tbl->train_idx] =
1671 target_ant & BIT(0);
1672 fat_tbl->antsel_b[fat_tbl->train_idx] =
1673 (target_ant & BIT(1)) >> 1;
1674 fat_tbl->antsel_c[fat_tbl->train_idx] =
1675 (target_ant & BIT(2)) >> 2;
1676
1677 if (target_ant == 0)
1678 rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 0);
1679 }
1680
1681 for (i = 0; i < 7; i++) {
1682 fat_tbl->ant_sum[i] = 0;
1683 fat_tbl->ant_cnt[i] = 0;
1684 }
1685
1686 fat_tbl->fat_state = FAT_NORMAL_STATE;
1687 return;
1688 }
1689
1690 if (fat_tbl->fat_state == FAT_NORMAL_STATE) {
1691 rtl88e_set_next_mac_address_target(hw);
1692
1693 fat_tbl->fat_state = FAT_TRAINING_STATE;
1694 rtl_set_bbreg(hw, DM_REG_TXAGC_A_1_MCS32_11N, BIT(16), 1);
1695 rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 1);
1696
1697 mod_timer(&rtlpriv->works.fast_antenna_training_timer,
1698 jiffies + MSECS(RTL_WATCH_DOG_TIME));
1699 }
1700}
1701
1702void rtl88e_dm_fast_antenna_training_callback(unsigned long data)
1703{
1704 struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
1705
1706 rtl88e_dm_fast_ant_training(hw);
1707}
1708
1709static void rtl88e_dm_antenna_diversity(struct ieee80211_hw *hw)
1710{
1711 struct rtl_priv *rtlpriv = rtl_priv(hw);
1712 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1713 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1714 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
1715 struct fast_ant_training *fat_tbl = &(rtldm->fat_table);
1716
1717 if (mac->link_state < MAC80211_LINKED) {
1718 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "No Link\n");
1719 if (fat_tbl->becomelinked == true) {
1720 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
1721 "need to turn off HW AntDiv\n");
1722 rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 0);
1723 rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA1_11N,
1724 BIT(15), 0);
1725 if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV)
1726 rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N,
1727 BIT(21), 0);
1728 fat_tbl->becomelinked =
1729 (mac->link_state == MAC80211_LINKED) ? true : false;
1730 }
1731 return;
1732 } else {
1733 if (fat_tbl->becomelinked == false) {
1734 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
1735 "Need to turn on HW AntDiv\n");
1736 rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 1);
1737 rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA1_11N,
1738 BIT(15), 1);
1739 if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV)
1740 rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N,
1741 BIT(21), 1);
1742 fat_tbl->becomelinked =
1743 (mac->link_state >= MAC80211_LINKED) ? true : false;
1744 }
1745 }
1746
1747 if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) ||
1748 (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV))
1749 rtl88e_dm_hw_ant_div(hw);
1750 else if (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV)
1751 rtl88e_dm_fast_ant_training(hw);
1752}
1753
1754void rtl88e_dm_init(struct ieee80211_hw *hw)
1755{
1756 struct rtl_priv *rtlpriv = rtl_priv(hw);
1757
1758 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
1759 rtl88e_dm_diginit(hw);
1760 rtl88e_dm_init_dynamic_txpower(hw);
1761 rtl88e_dm_init_edca_turbo(hw);
1762 rtl88e_dm_init_rate_adaptive_mask(hw);
1763 rtl88e_dm_init_txpower_tracking(hw);
1764 rtl92c_dm_init_dynamic_bb_powersaving(hw);
1765 rtl88e_dm_antenna_div_init(hw);
1766}
1767
1768void rtl88e_dm_watchdog(struct ieee80211_hw *hw)
1769{
1770 struct rtl_priv *rtlpriv = rtl_priv(hw);
1771 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1772 bool fw_current_inpsmode = false;
1773 bool fw_ps_awake = true;
1774
1775 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
1776 (u8 *)(&fw_current_inpsmode));
1777 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
1778 (u8 *)(&fw_ps_awake));
1779 if (ppsc->p2p_ps_info.p2p_ps_mode)
1780 fw_ps_awake = false;
1781
1782 if ((ppsc->rfpwr_state == ERFON) &&
1783 ((!fw_current_inpsmode) && fw_ps_awake) &&
1784 (!ppsc->rfchange_inprogress)) {
1785 rtl88e_dm_pwdb_monitor(hw);
1786 rtl88e_dm_dig(hw);
1787 rtl88e_dm_false_alarm_counter_statistics(hw);
1788 rtl92c_dm_dynamic_txpower(hw);
1789 rtl88e_dm_check_txpower_tracking(hw);
1790 rtl88e_dm_refresh_rate_adaptive_mask(hw);
1791 rtl88e_dm_check_edca_turbo(hw);
1792 rtl88e_dm_antenna_diversity(hw);
1793 }
1794}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h
new file mode 100644
index 000000000000..0e07f72ea158
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h
@@ -0,0 +1,326 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL88E_DM_H__
31#define __RTL88E_DM_H__
32
33#define MAIN_ANT 0
34#define AUX_ANT 1
35#define MAIN_ANT_CG_TRX 1
36#define AUX_ANT_CG_TRX 0
37#define MAIN_ANT_CGCS_RX 0
38#define AUX_ANT_CGCS_RX 1
39
40/*RF REG LIST*/
41#define DM_REG_RF_MODE_11N 0x00
42#define DM_REG_RF_0B_11N 0x0B
43#define DM_REG_CHNBW_11N 0x18
44#define DM_REG_T_METER_11N 0x24
45#define DM_REG_RF_25_11N 0x25
46#define DM_REG_RF_26_11N 0x26
47#define DM_REG_RF_27_11N 0x27
48#define DM_REG_RF_2B_11N 0x2B
49#define DM_REG_RF_2C_11N 0x2C
50#define DM_REG_RXRF_A3_11N 0x3C
51#define DM_REG_T_METER_92D_11N 0x42
52#define DM_REG_T_METER_88E_11N 0x42
53
54/*BB REG LIST*/
55/*PAGE 8 */
56#define DM_REG_BB_CTRL_11N 0x800
57#define DM_REG_RF_PIN_11N 0x804
58#define DM_REG_PSD_CTRL_11N 0x808
59#define DM_REG_TX_ANT_CTRL_11N 0x80C
60#define DM_REG_BB_PWR_SAV5_11N 0x818
61#define DM_REG_CCK_RPT_FORMAT_11N 0x824
62#define DM_REG_RX_DEFAULT_A_11N 0x858
63#define DM_REG_RX_DEFAULT_B_11N 0x85A
64#define DM_REG_BB_PWR_SAV3_11N 0x85C
65#define DM_REG_ANTSEL_CTRL_11N 0x860
66#define DM_REG_RX_ANT_CTRL_11N 0x864
67#define DM_REG_PIN_CTRL_11N 0x870
68#define DM_REG_BB_PWR_SAV1_11N 0x874
69#define DM_REG_ANTSEL_PATH_11N 0x878
70#define DM_REG_BB_3WIRE_11N 0x88C
71#define DM_REG_SC_CNT_11N 0x8C4
72#define DM_REG_PSD_DATA_11N 0x8B4
73/*PAGE 9*/
74#define DM_REG_ANT_MAPPING1_11N 0x914
75#define DM_REG_ANT_MAPPING2_11N 0x918
76/*PAGE A*/
77#define DM_REG_CCK_ANTDIV_PARA1_11N 0xA00
78#define DM_REG_CCK_CCA_11N 0xA0A
79#define DM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
80#define DM_REG_CCK_ANTDIV_PARA3_11N 0xA10
81#define DM_REG_CCK_ANTDIV_PARA4_11N 0xA14
82#define DM_REG_CCK_FILTER_PARA1_11N 0xA22
83#define DM_REG_CCK_FILTER_PARA2_11N 0xA23
84#define DM_REG_CCK_FILTER_PARA3_11N 0xA24
85#define DM_REG_CCK_FILTER_PARA4_11N 0xA25
86#define DM_REG_CCK_FILTER_PARA5_11N 0xA26
87#define DM_REG_CCK_FILTER_PARA6_11N 0xA27
88#define DM_REG_CCK_FILTER_PARA7_11N 0xA28
89#define DM_REG_CCK_FILTER_PARA8_11N 0xA29
90#define DM_REG_CCK_FA_RST_11N 0xA2C
91#define DM_REG_CCK_FA_MSB_11N 0xA58
92#define DM_REG_CCK_FA_LSB_11N 0xA5C
93#define DM_REG_CCK_CCA_CNT_11N 0xA60
94#define DM_REG_BB_PWR_SAV4_11N 0xA74
95/*PAGE B */
96#define DM_REG_LNA_SWITCH_11N 0xB2C
97#define DM_REG_PATH_SWITCH_11N 0xB30
98#define DM_REG_RSSI_CTRL_11N 0xB38
99#define DM_REG_CONFIG_ANTA_11N 0xB68
100#define DM_REG_RSSI_BT_11N 0xB9C
101/*PAGE C */
102#define DM_REG_OFDM_FA_HOLDC_11N 0xC00
103#define DM_REG_RX_PATH_11N 0xC04
104#define DM_REG_TRMUX_11N 0xC08
105#define DM_REG_OFDM_FA_RSTC_11N 0xC0C
106#define DM_REG_RXIQI_MATRIX_11N 0xC14
107#define DM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C
108#define DM_REG_IGI_A_11N 0xC50
109#define DM_REG_ANTDIV_PARA2_11N 0xC54
110#define DM_REG_IGI_B_11N 0xC58
111#define DM_REG_ANTDIV_PARA3_11N 0xC5C
112#define DM_REG_BB_PWR_SAV2_11N 0xC70
113#define DM_REG_RX_OFF_11N 0xC7C
114#define DM_REG_TXIQK_MATRIXA_11N 0xC80
115#define DM_REG_TXIQK_MATRIXB_11N 0xC88
116#define DM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94
117#define DM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C
118#define DM_REG_RXIQK_MATRIX_LSB_11N 0xCA0
119#define DM_REG_ANTDIV_PARA1_11N 0xCA4
120#define DM_REG_OFDM_FA_TYPE1_11N 0xCF0
121/*PAGE D */
122#define DM_REG_OFDM_FA_RSTD_11N 0xD00
123#define DM_REG_OFDM_FA_TYPE2_11N 0xDA0
124#define DM_REG_OFDM_FA_TYPE3_11N 0xDA4
125#define DM_REG_OFDM_FA_TYPE4_11N 0xDA8
126/*PAGE E */
127#define DM_REG_TXAGC_A_6_18_11N 0xE00
128#define DM_REG_TXAGC_A_24_54_11N 0xE04
129#define DM_REG_TXAGC_A_1_MCS32_11N 0xE08
130#define DM_REG_TXAGC_A_MCS0_3_11N 0xE10
131#define DM_REG_TXAGC_A_MCS4_7_11N 0xE14
132#define DM_REG_TXAGC_A_MCS8_11_11N 0xE18
133#define DM_REG_TXAGC_A_MCS12_15_11N 0xE1C
134#define DM_REG_FPGA0_IQK_11N 0xE28
135#define DM_REG_TXIQK_TONE_A_11N 0xE30
136#define DM_REG_RXIQK_TONE_A_11N 0xE34
137#define DM_REG_TXIQK_PI_A_11N 0xE38
138#define DM_REG_RXIQK_PI_A_11N 0xE3C
139#define DM_REG_TXIQK_11N 0xE40
140#define DM_REG_RXIQK_11N 0xE44
141#define DM_REG_IQK_AGC_PTS_11N 0xE48
142#define DM_REG_IQK_AGC_RSP_11N 0xE4C
143#define DM_REG_BLUETOOTH_11N 0xE6C
144#define DM_REG_RX_WAIT_CCA_11N 0xE70
145#define DM_REG_TX_CCK_RFON_11N 0xE74
146#define DM_REG_TX_CCK_BBON_11N 0xE78
147#define DM_REG_OFDM_RFON_11N 0xE7C
148#define DM_REG_OFDM_BBON_11N 0xE80
149#define DM_REG_TX2RX_11N 0xE84
150#define DM_REG_TX2TX_11N 0xE88
151#define DM_REG_RX_CCK_11N 0xE8C
152#define DM_REG_RX_OFDM_11N 0xED0
153#define DM_REG_RX_WAIT_RIFS_11N 0xED4
154#define DM_REG_RX2RX_11N 0xED8
155#define DM_REG_STANDBY_11N 0xEDC
156#define DM_REG_SLEEP_11N 0xEE0
157#define DM_REG_PMPD_ANAEN_11N 0xEEC
158
159
160/*MAC REG LIST*/
161#define DM_REG_BB_RST_11N 0x02
162#define DM_REG_ANTSEL_PIN_11N 0x4C
163#define DM_REG_EARLY_MODE_11N 0x4D0
164#define DM_REG_RSSI_MONITOR_11N 0x4FE
165#define DM_REG_EDCA_VO_11N 0x500
166#define DM_REG_EDCA_VI_11N 0x504
167#define DM_REG_EDCA_BE_11N 0x508
168#define DM_REG_EDCA_BK_11N 0x50C
169#define DM_REG_TXPAUSE_11N 0x522
170#define DM_REG_RESP_TX_11N 0x6D8
171#define DM_REG_ANT_TRAIN_1 0x7b0
172#define DM_REG_ANT_TRAIN_2 0x7b4
173
174/*DIG Related*/
175#define DM_BIT_IGI_11N 0x0000007F
176
177#define HAL_DM_DIG_DISABLE BIT(0)
178#define HAL_DM_HIPWR_DISABLE BIT(1)
179
180#define OFDM_TABLE_LENGTH 43
181#define CCK_TABLE_LENGTH 33
182
183#define OFDM_TABLE_SIZE 43
184#define CCK_TABLE_SIZE 33
185
186#define BW_AUTO_SWITCH_HIGH_LOW 25
187#define BW_AUTO_SWITCH_LOW_HIGH 30
188
189#define DM_DIG_THRESH_HIGH 40
190#define DM_DIG_THRESH_LOW 35
191
192#define DM_FALSEALARM_THRESH_LOW 400
193#define DM_FALSEALARM_THRESH_HIGH 1000
194
195#define DM_DIG_MAX 0x3e
196#define DM_DIG_MIN 0x1e
197
198#define DM_DIG_MAX_AP 0x32
199#define DM_DIG_MIN_AP 0x20
200
201#define DM_DIG_FA_UPPER 0x3e
202#define DM_DIG_FA_LOWER 0x1e
203#define DM_DIG_FA_TH0 0x200
204#define DM_DIG_FA_TH1 0x300
205#define DM_DIG_FA_TH2 0x400
206
207#define DM_DIG_BACKOFF_MAX 12
208#define DM_DIG_BACKOFF_MIN -4
209#define DM_DIG_BACKOFF_DEFAULT 10
210
211#define RXPATHSELECTION_SS_TH_LOW 30
212#define RXPATHSELECTION_DIFF_TH 18
213
214#define DM_RATR_STA_INIT 0
215#define DM_RATR_STA_HIGH 1
216#define DM_RATR_STA_MIDDLE 2
217#define DM_RATR_STA_LOW 3
218
219#define CTS2SELF_THVAL 30
220#define REGC38_TH 20
221
222#define WAIOTTHVAL 25
223
224#define TXHIGHPWRLEVEL_NORMAL 0
225#define TXHIGHPWRLEVEL_LEVEL1 1
226#define TXHIGHPWRLEVEL_LEVEL2 2
227#define TXHIGHPWRLEVEL_BT1 3
228#define TXHIGHPWRLEVEL_BT2 4
229
230#define DM_TYPE_BYFW 0
231#define DM_TYPE_BYDRIVER 1
232
233#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
234#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
235#define TXPWRTRACK_MAX_IDX 6
236
237struct swat_t {
238 u8 failure_cnt;
239 u8 try_flag;
240 u8 stop_trying;
241 long pre_rssi;
242 long trying_threshold;
243 u8 cur_antenna;
244 u8 pre_antenna;
245};
246
247enum FAT_STATE {
248 FAT_NORMAL_STATE = 0,
249 FAT_TRAINING_STATE = 1,
250};
251
252enum tag_dynamic_init_gain_operation_type_definition {
253 DIG_TYPE_THRESH_HIGH = 0,
254 DIG_TYPE_THRESH_LOW = 1,
255 DIG_TYPE_BACKOFF = 2,
256 DIG_TYPE_RX_GAIN_MIN = 3,
257 DIG_TYPE_RX_GAIN_MAX = 4,
258 DIG_TYPE_ENABLE = 5,
259 DIG_TYPE_DISABLE = 6,
260 DIG_OP_TYPE_MAX
261};
262
263enum tag_cck_packet_detection_threshold_type_definition {
264 CCK_PD_STAGE_LOWRSSI = 0,
265 CCK_PD_STAGE_HIGHRSSI = 1,
266 CCK_FA_STAGE_LOW = 2,
267 CCK_FA_STAGE_HIGH = 3,
268 CCK_PD_STAGE_MAX = 4,
269};
270
271enum dm_1r_cca_e {
272 CCA_1R = 0,
273 CCA_2R = 1,
274 CCA_MAX = 2,
275};
276
277enum dm_rf_e {
278 RF_SAVE = 0,
279 RF_NORMAL = 1,
280 RF_MAX = 2,
281};
282
283enum dm_sw_ant_switch_e {
284 ANS_ANTENNA_B = 1,
285 ANS_ANTENNA_A = 2,
286 ANS_ANTENNA_MAX = 3,
287};
288
289enum dm_dig_ext_port_alg_e {
290 DIG_EXT_PORT_STAGE_0 = 0,
291 DIG_EXT_PORT_STAGE_1 = 1,
292 DIG_EXT_PORT_STAGE_2 = 2,
293 DIG_EXT_PORT_STAGE_3 = 3,
294 DIG_EXT_PORT_STAGE_MAX = 4,
295};
296
297enum dm_dig_connect_e {
298 DIG_STA_DISCONNECT = 0,
299 DIG_STA_CONNECT = 1,
300 DIG_STA_BEFORE_CONNECT = 2,
301 DIG_MULTISTA_DISCONNECT = 3,
302 DIG_MULTISTA_CONNECT = 4,
303 DIG_CONNECT_MAX
304};
305
306enum pwr_track_control_method {
307 BBSWING,
308 TXAGC
309};
310
311void rtl88e_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw,
312 u8 *pdesc, u32 mac_id);
313void rtl88e_dm_ant_sel_statistics(struct ieee80211_hw *hw, u8 antsel_tr_mux,
314 u32 mac_id, u32 rx_pwdb_all);
315void rtl88e_dm_fast_antenna_training_callback(unsigned long data);
316void rtl88e_dm_init(struct ieee80211_hw *hw);
317void rtl88e_dm_watchdog(struct ieee80211_hw *hw);
318void rtl88e_dm_write_dig(struct ieee80211_hw *hw);
319void rtl88e_dm_init_edca_turbo(struct ieee80211_hw *hw);
320void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw);
321void rtl88e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
322void rtl88e_dm_txpower_track_adjust(struct ieee80211_hw *hw,
323 u8 type, u8 *pdirection,
324 u32 *poutwrite_val);
325
326#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
new file mode 100644
index 000000000000..57e4cc5833a9
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
@@ -0,0 +1,830 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "../base.h"
33#include "reg.h"
34#include "def.h"
35#include "fw.h"
36
37#include <linux/kmemleak.h>
38
39static void _rtl88e_enable_fw_download(struct ieee80211_hw *hw, bool enable)
40{
41 struct rtl_priv *rtlpriv = rtl_priv(hw);
42 u8 tmp;
43
44 if (enable) {
45 tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
46 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
47
48 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
49 rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
50
51 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
52 rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
53 } else {
54 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
55 rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
56
57 rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
58 }
59}
60
61static void _rtl88e_fw_block_write(struct ieee80211_hw *hw,
62 const u8 *buffer, u32 size)
63{
64 struct rtl_priv *rtlpriv = rtl_priv(hw);
65 u32 blk_sz = sizeof(u32);
66 u8 *buf_ptr = (u8 *)buffer;
67 u32 *pu4BytePtr = (u32 *)buffer;
68 u32 i, offset, blk_cnt, remain;
69
70 blk_cnt = size / blk_sz;
71 remain = size % blk_sz;
72
73 for (i = 0; i < blk_cnt; i++) {
74 offset = i * blk_sz;
75 rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
76 *(pu4BytePtr + i));
77 }
78
79 if (remain) {
80 offset = blk_cnt * blk_sz;
81 buf_ptr += offset;
82 for (i = 0; i < remain; i++) {
83 rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
84 offset + i), *(buf_ptr + i));
85 }
86 }
87}
88
89static void _rtl88e_fw_page_write(struct ieee80211_hw *hw,
90 u32 page, const u8 *buffer, u32 size)
91{
92 struct rtl_priv *rtlpriv = rtl_priv(hw);
93 u8 value8;
94 u8 u8page = (u8) (page & 0x07);
95
96 value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
97
98 rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
99 _rtl88e_fw_block_write(hw, buffer, size);
100}
101
102static void _rtl88e_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
103{
104 u32 fwlen = *pfwlen;
105 u8 remain = (u8) (fwlen % 4);
106
107 remain = (remain == 0) ? 0 : (4 - remain);
108
109 while (remain > 0) {
110 pfwbuf[fwlen] = 0;
111 fwlen++;
112 remain--;
113 }
114
115 *pfwlen = fwlen;
116}
117
118static void _rtl88e_write_fw(struct ieee80211_hw *hw,
119 enum version_8188e version, u8 *buffer, u32 size)
120{
121 struct rtl_priv *rtlpriv = rtl_priv(hw);
122 u8 *buf_ptr = (u8 *)buffer;
123 u32 page_no, remain;
124 u32 page, offset;
125
126 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
127
128 _rtl88e_fill_dummy(buf_ptr, &size);
129
130 page_no = size / FW_8192C_PAGE_SIZE;
131 remain = size % FW_8192C_PAGE_SIZE;
132
133 if (page_no > 8) {
134 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
135 "Page numbers should not greater then 8\n");
136 }
137
138 for (page = 0; page < page_no; page++) {
139 offset = page * FW_8192C_PAGE_SIZE;
140 _rtl88e_fw_page_write(hw, page, (buf_ptr + offset),
141 FW_8192C_PAGE_SIZE);
142 }
143
144 if (remain) {
145 offset = page_no * FW_8192C_PAGE_SIZE;
146 page = page_no;
147 _rtl88e_fw_page_write(hw, page, (buf_ptr + offset), remain);
148 }
149}
150
151static int _rtl88e_fw_free_to_go(struct ieee80211_hw *hw)
152{
153 struct rtl_priv *rtlpriv = rtl_priv(hw);
154 int err = -EIO;
155 u32 counter = 0;
156 u32 value32;
157
158 do {
159 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
160 } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
161 (!(value32 & FWDL_CHKSUM_RPT)));
162
163 if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
164 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
165 "chksum report faill ! REG_MCUFWDL:0x%08x .\n",
166 value32);
167 goto exit;
168 }
169
170 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
171 "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
172
173 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
174 value32 |= MCUFWDL_RDY;
175 value32 &= ~WINTINI_RDY;
176 rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
177
178 rtl88e_firmware_selfreset(hw);
179 counter = 0;
180
181 do {
182 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
183 if (value32 & WINTINI_RDY) {
184 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
185 "Polling FW ready success!! REG_MCUFWDL:0x%08x.\n",
186 value32);
187 err = 0;
188 goto exit;
189 }
190
191 udelay(FW_8192C_POLLING_DELAY);
192
193 } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
194
195 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
196 "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32);
197
198exit:
199 return err;
200}
201
202int rtl88e_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
203{
204 struct rtl_priv *rtlpriv = rtl_priv(hw);
205 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
206 struct rtl92c_firmware_header *pfwheader;
207 u8 *pfwdata;
208 u32 fwsize;
209 int err;
210 enum version_8188e version = rtlhal->version;
211
212 if (!rtlhal->pfirmware)
213 return 1;
214
215 pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
216 pfwdata = (u8 *)rtlhal->pfirmware;
217 fwsize = rtlhal->fwsize;
218 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
219 "normal Firmware SIZE %d\n", fwsize);
220
221 if (IS_FW_HEADER_EXIST(pfwheader)) {
222 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
223 "Firmware Version(%d), Signature(%#x), Size(%d)\n",
224 pfwheader->version, pfwheader->signature,
225 (int)sizeof(struct rtl92c_firmware_header));
226
227 pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
228 fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
229 }
230
231 if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) {
232 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
233 rtl88e_firmware_selfreset(hw);
234 }
235 _rtl88e_enable_fw_download(hw, true);
236 _rtl88e_write_fw(hw, version, pfwdata, fwsize);
237 _rtl88e_enable_fw_download(hw, false);
238
239 err = _rtl88e_fw_free_to_go(hw);
240
241 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
242 "Firmware is%s ready to run!\n", err ? " not" : "");
243 return 0;
244}
245
246static bool _rtl88e_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
247{
248 struct rtl_priv *rtlpriv = rtl_priv(hw);
249 u8 val_hmetfr;
250
251 val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
252 if (((val_hmetfr >> boxnum) & BIT(0)) == 0)
253 return true;
254 return false;
255}
256
257static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
258 u8 element_id, u32 cmd_len,
259 u8 *cmd_b)
260{
261 struct rtl_priv *rtlpriv = rtl_priv(hw);
262 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
263 u8 boxnum;
264 u16 box_reg = 0, box_extreg = 0;
265 u8 u1b_tmp;
266 bool isfw_read = false;
267 u8 buf_index = 0;
268 bool write_sucess = false;
269 u8 wait_h2c_limit = 100;
270 u8 wait_writeh2c_limit = 100;
271 u8 boxc[4], boxext[2];
272 u32 h2c_waitcounter = 0;
273 unsigned long flag;
274 u8 idx;
275
276 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
277
278 while (true) {
279 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
280 if (rtlhal->h2c_setinprogress) {
281 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
282 "H2C set in progress! Wait to set..element_id(%d).\n",
283 element_id);
284
285 while (rtlhal->h2c_setinprogress) {
286 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
287 flag);
288 h2c_waitcounter++;
289 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
290 "Wait 100 us (%d times)...\n",
291 h2c_waitcounter);
292 udelay(100);
293
294 if (h2c_waitcounter > 1000)
295 return;
296 spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
297 flag);
298 }
299 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
300 } else {
301 rtlhal->h2c_setinprogress = true;
302 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
303 break;
304 }
305 }
306
307 while (!write_sucess) {
308 wait_writeh2c_limit--;
309 if (wait_writeh2c_limit == 0) {
310 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
311 "Write H2C fail because no trigger for FW INT!\n");
312 break;
313 }
314
315 boxnum = rtlhal->last_hmeboxnum;
316 switch (boxnum) {
317 case 0:
318 box_reg = REG_HMEBOX_0;
319 box_extreg = REG_HMEBOX_EXT_0;
320 break;
321 case 1:
322 box_reg = REG_HMEBOX_1;
323 box_extreg = REG_HMEBOX_EXT_1;
324 break;
325 case 2:
326 box_reg = REG_HMEBOX_2;
327 box_extreg = REG_HMEBOX_EXT_2;
328 break;
329 case 3:
330 box_reg = REG_HMEBOX_3;
331 box_extreg = REG_HMEBOX_EXT_3;
332 break;
333 default:
334 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
335 "switch case not processed\n");
336 break;
337 }
338
339 isfw_read = _rtl88e_check_fw_read_last_h2c(hw, boxnum);
340 while (!isfw_read) {
341 wait_h2c_limit--;
342 if (wait_h2c_limit == 0) {
343 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
344 "Wating too long for FW read "
345 "clear HMEBox(%d)!\n", boxnum);
346 break;
347 }
348
349 udelay(10);
350
351 isfw_read = _rtl88e_check_fw_read_last_h2c(hw, boxnum);
352 u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
353 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
354 "Wating for FW read clear HMEBox(%d)!!! "
355 "0x130 = %2x\n", boxnum, u1b_tmp);
356 }
357
358 if (!isfw_read) {
359 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
360 "Write H2C register BOX[%d] fail!!!!! "
361 "Fw do not read.\n", boxnum);
362 break;
363 }
364
365 memset(boxc, 0, sizeof(boxc));
366 memset(boxext, 0, sizeof(boxext));
367 boxc[0] = element_id;
368 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
369 "Write element_id box_reg(%4x) = %2x\n",
370 box_reg, element_id);
371
372 switch (cmd_len) {
373 case 1:
374 case 2:
375 case 3:
376 /*boxc[0] &= ~(BIT(7));*/
377 memcpy((u8 *)(boxc) + 1, cmd_b + buf_index, cmd_len);
378
379 for (idx = 0; idx < 4; idx++)
380 rtl_write_byte(rtlpriv, box_reg+idx, boxc[idx]);
381 break;
382 case 4:
383 case 5:
384 case 6:
385 case 7:
386 /*boxc[0] |= (BIT(7));*/
387 memcpy((u8 *)(boxext), cmd_b + buf_index+3, cmd_len-3);
388 memcpy((u8 *)(boxc) + 1, cmd_b + buf_index, 3);
389
390 for (idx = 0; idx < 2; idx++) {
391 rtl_write_byte(rtlpriv, box_extreg + idx,
392 boxext[idx]);
393 }
394
395 for (idx = 0; idx < 4; idx++) {
396 rtl_write_byte(rtlpriv, box_reg + idx,
397 boxc[idx]);
398 }
399 break;
400 default:
401 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
402 "switch case not processed\n");
403 break;
404 }
405
406 write_sucess = true;
407
408 rtlhal->last_hmeboxnum = boxnum + 1;
409 if (rtlhal->last_hmeboxnum == 4)
410 rtlhal->last_hmeboxnum = 0;
411
412 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
413 "pHalData->last_hmeboxnum = %d\n",
414 rtlhal->last_hmeboxnum);
415 }
416
417 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
418 rtlhal->h2c_setinprogress = false;
419 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
420
421 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
422}
423
424void rtl88e_fill_h2c_cmd(struct ieee80211_hw *hw,
425 u8 element_id, u32 cmd_len, u8 *cmd_b)
426{
427 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
428 u32 tmp_cmdbuf[2];
429
430 if (rtlhal->fw_ready == false) {
431 RT_ASSERT(false, "fail H2C cmd - Fw download fail!!!\n");
432 return;
433 }
434
435 memset(tmp_cmdbuf, 0, 8);
436 memcpy(tmp_cmdbuf, cmd_b, cmd_len);
437 _rtl88e_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf);
438
439 return;
440}
441
442void rtl88e_firmware_selfreset(struct ieee80211_hw *hw)
443{
444 u8 u1b_tmp;
445 struct rtl_priv *rtlpriv = rtl_priv(hw);
446
447 u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
448 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp & (~BIT(2))));
449 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp | BIT(2)));
450 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
451 "8051Reset88E(): 8051 reset success.\n");
452}
453
454void rtl88e_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
455{
456 struct rtl_priv *rtlpriv = rtl_priv(hw);
457 u8 u1_h2c_set_pwrmode[H2C_88E_PWEMODE_LENGTH] = { 0 };
458 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
459 u8 power_state = 0;
460
461 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
462 SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
463 SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, 0);
464 SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
465 (rtlpriv->mac80211.p2p) ?
466 ppsc->smart_ps : 1);
467 SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode,
468 ppsc->reg_max_lps_awakeintvl);
469 SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0);
470 if (mode == FW_PS_ACTIVE_MODE)
471 power_state |= FW_PWR_STATE_ACTIVE;
472 else
473 power_state |= FW_PWR_STATE_RF_OFF;
474 SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state);
475
476 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
477 "rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n",
478 u1_h2c_set_pwrmode, H2C_88E_PWEMODE_LENGTH);
479 rtl88e_fill_h2c_cmd(hw, H2C_88E_SETPWRMODE, H2C_88E_PWEMODE_LENGTH,
480 u1_h2c_set_pwrmode);
481}
482
483void rtl88e_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
484{
485 u8 u1_joinbssrpt_parm[1] = { 0 };
486
487 SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
488
489 rtl88e_fill_h2c_cmd(hw, H2C_88E_JOINBSSRPT, 1, u1_joinbssrpt_parm);
490}
491
492void rtl88e_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw,
493 u8 ap_offload_enable)
494{
495 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
496 u8 u1_apoffload_parm[H2C_88E_AP_OFFLOAD_LENGTH] = { 0 };
497
498 SET_H2CCMD_AP_OFFLOAD_ON(u1_apoffload_parm, ap_offload_enable);
499 SET_H2CCMD_AP_OFFLOAD_HIDDEN(u1_apoffload_parm, mac->hiddenssid);
500 SET_H2CCMD_AP_OFFLOAD_DENYANY(u1_apoffload_parm, 0);
501
502 rtl88e_fill_h2c_cmd(hw, H2C_88E_AP_OFFLOAD, H2C_88E_AP_OFFLOAD_LENGTH,
503 u1_apoffload_parm);
504}
505
506static bool _rtl88e_cmd_send_packet(struct ieee80211_hw *hw,
507 struct sk_buff *skb)
508{
509 struct rtl_priv *rtlpriv = rtl_priv(hw);
510 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
511 struct rtl8192_tx_ring *ring;
512 struct rtl_tx_desc *pdesc;
513 struct sk_buff *pskb = NULL;
514 unsigned long flags;
515
516 ring = &rtlpci->tx_ring[BEACON_QUEUE];
517
518 pskb = __skb_dequeue(&ring->queue);
519 if (pskb)
520 kfree_skb(pskb);
521
522 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
523
524 pdesc = &ring->desc[0];
525
526 rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
527
528 __skb_queue_tail(&ring->queue, skb);
529
530 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
531
532 rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
533
534 return true;
535}
536
537#define BEACON_PG 0 /* ->1 */
538#define PSPOLL_PG 2
539#define NULL_PG 3
540#define PROBERSP_PG 4 /* ->5 */
541
542#define TOTAL_RESERVED_PKT_LEN 768
543
544static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
545 /* page 0 beacon */
546 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
547 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
548 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x50, 0x08,
549 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
550 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
551 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
552 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
553 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
554 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
555 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
556 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
557 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
559 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
560 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
561 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
562
563 /* page 1 beacon */
564 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
565 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
566 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
567 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
568 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
569 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
571 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
572 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
573 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
575 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
576 0x10, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x10, 0x00,
577 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
578 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
579 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
580
581 /* page 2 ps-poll */
582 0xA4, 0x10, 0x01, 0xC0, 0x00, 0x40, 0x10, 0x10,
583 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
584 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
587 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
588 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
589 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
593 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
594 0x18, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
595 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
596 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
597 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
598
599 /* page 3 null */
600 0x48, 0x01, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
601 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
602 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
603 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
604 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
607 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
608 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
609 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
611 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
612 0x72, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
613 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
614 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
615 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
616
617 /* page 4 probe_resp */
618 0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
619 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
620 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
621 0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00,
622 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
623 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
624 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
625 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
626 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
627 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
628 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
629 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
630 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
631 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
632 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
633 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
634
635 /* page 5 probe_resp */
636 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
644 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
650 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
651 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
652};
653
654void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
655{
656 struct rtl_priv *rtlpriv = rtl_priv(hw);
657 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
658 struct sk_buff *skb = NULL;
659
660 u32 totalpacketlen;
661 u8 u1RsvdPageLoc[5] = { 0 };
662
663 u8 *beacon;
664 u8 *pspoll;
665 u8 *nullfunc;
666 u8 *probersp;
667 /*---------------------------------------------------------
668 * (1) beacon
669 *---------------------------------------------------------
670 */
671 beacon = &reserved_page_packet[BEACON_PG * 128];
672 SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
673 SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
674
675 /*-------------------------------------------------------
676 * (2) ps-poll
677 *--------------------------------------------------------
678 */
679 pspoll = &reserved_page_packet[PSPOLL_PG * 128];
680 SET_80211_PS_POLL_AID(pspoll, (mac->assoc_id | 0xc000));
681 SET_80211_PS_POLL_BSSID(pspoll, mac->bssid);
682 SET_80211_PS_POLL_TA(pspoll, mac->mac_addr);
683
684 SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG);
685
686 /*--------------------------------------------------------
687 * (3) null data
688 *---------------------------------------------------------
689 */
690 nullfunc = &reserved_page_packet[NULL_PG * 128];
691 SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
692 SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
693 SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
694
695 SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG);
696
697 /*---------------------------------------------------------
698 * (4) probe response
699 *----------------------------------------------------------
700 */
701 probersp = &reserved_page_packet[PROBERSP_PG * 128];
702 SET_80211_HDR_ADDRESS1(probersp, mac->bssid);
703 SET_80211_HDR_ADDRESS2(probersp, mac->mac_addr);
704 SET_80211_HDR_ADDRESS3(probersp, mac->bssid);
705
706 SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG);
707
708 totalpacketlen = TOTAL_RESERVED_PKT_LEN;
709
710 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
711 "rtl88e_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
712 &reserved_page_packet[0], totalpacketlen);
713 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
714 "rtl88e_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
715 u1RsvdPageLoc, 3);
716
717 skb = dev_alloc_skb(totalpacketlen);
718 if (!skb)
719 return;
720 kmemleak_not_leak(skb);
721 memcpy(skb_put(skb, totalpacketlen),
722 &reserved_page_packet, totalpacketlen);
723
724 if (_rtl88e_cmd_send_packet(hw, skb)) {
725 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
726 "Set RSVD page location to Fw.\n");
727 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
728 "H2C_RSVDPAGE:\n", u1RsvdPageLoc, 3);
729 rtl88e_fill_h2c_cmd(hw, H2C_88E_RSVDPAGE,
730 sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
731 } else
732 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
733 "Set RSVD page location to Fw FAIL!!!!!!.\n");
734}
735
736/*Shoud check FW support p2p or not.*/
737static void rtl88e_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw, u8 ctwindow)
738{
739 u8 u1_ctwindow_period[1] = {ctwindow};
740
741 rtl88e_fill_h2c_cmd(hw, H2C_88E_P2P_PS_CTW_CMD, 1, u1_ctwindow_period);
742}
743
744void rtl88e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
745{
746 struct rtl_priv *rtlpriv = rtl_priv(hw);
747 struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
748 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
749 struct rtl_p2p_ps_info *p2pinfo = &(rtlps->p2p_ps_info);
750 struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload;
751 u8 i;
752 u16 ctwindow;
753 u32 start_time, tsf_low;
754
755 switch (p2p_ps_state) {
756 case P2P_PS_DISABLE:
757 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
758 memset(p2p_ps_offload, 0, sizeof(struct p2p_ps_offload_t));
759 break;
760 case P2P_PS_ENABLE:
761 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
762 /* update CTWindow value. */
763 if (p2pinfo->ctwindow > 0) {
764 p2p_ps_offload->ctwindow_en = 1;
765 ctwindow = p2pinfo->ctwindow;
766 rtl88e_set_p2p_ctw_period_cmd(hw, ctwindow);
767 }
768 /* hw only support 2 set of NoA */
769 for (i = 0; i < p2pinfo->noa_num; i++) {
770 /* To control the register setting for which NOA*/
771 rtl_write_byte(rtlpriv, 0x5cf, (i << 4));
772 if (i == 0)
773 p2p_ps_offload->noa0_en = 1;
774 else
775 p2p_ps_offload->noa1_en = 1;
776
777 /* config P2P NoA Descriptor Register */
778 rtl_write_dword(rtlpriv, 0x5E0,
779 p2pinfo->noa_duration[i]);
780 rtl_write_dword(rtlpriv, 0x5E4,
781 p2pinfo->noa_interval[i]);
782
783 /*Get Current TSF value */
784 tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
785
786 start_time = p2pinfo->noa_start_time[i];
787 if (p2pinfo->noa_count_type[i] != 1) {
788 while (start_time <= (tsf_low + (50 * 1024))) {
789 start_time += p2pinfo->noa_interval[i];
790 if (p2pinfo->noa_count_type[i] != 255)
791 p2pinfo->noa_count_type[i]--;
792 }
793 }
794 rtl_write_dword(rtlpriv, 0x5E8, start_time);
795 rtl_write_dword(rtlpriv, 0x5EC,
796 p2pinfo->noa_count_type[i]);
797 }
798
799 if ((p2pinfo->opp_ps == 1) || (p2pinfo->noa_num > 0)) {
800 /* rst p2p circuit */
801 rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4));
802
803 p2p_ps_offload->offload_en = 1;
804
805 if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) {
806 p2p_ps_offload->role = 1;
807 p2p_ps_offload->allstasleep = 0;
808 } else {
809 p2p_ps_offload->role = 0;
810 }
811
812 p2p_ps_offload->discovery = 0;
813 }
814 break;
815 case P2P_PS_SCAN:
816 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
817 p2p_ps_offload->discovery = 1;
818 break;
819 case P2P_PS_SCAN_DONE:
820 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
821 p2p_ps_offload->discovery = 0;
822 p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
823 break;
824 default:
825 break;
826 }
827
828 rtl88e_fill_h2c_cmd(hw, H2C_88E_P2P_PS_OFFLOAD, 1,
829 (u8 *)p2p_ps_offload);
830}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
new file mode 100644
index 000000000000..854a9875cd5f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
@@ -0,0 +1,301 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 * Larry Finger <Larry.Finger@lwfinger.net>
26 *
27 *****************************************************************************/
28
29#ifndef __RTL92C__FW__H__
30#define __RTL92C__FW__H__
31
32#define FW_8192C_SIZE 0x8000
33#define FW_8192C_START_ADDRESS 0x1000
34#define FW_8192C_END_ADDRESS 0x5FFF
35#define FW_8192C_PAGE_SIZE 4096
36#define FW_8192C_POLLING_DELAY 5
37#define FW_8192C_POLLING_TIMEOUT_COUNT 3000
38
39#define IS_FW_HEADER_EXIST(_pfwhdr) \
40 ((_pfwhdr->signature&0xFFFF) == 0x88E1)
41#define USE_OLD_WOWLAN_DEBUG_FW 0
42
43#define H2C_88E_RSVDPAGE_LOC_LEN 5
44#define H2C_88E_PWEMODE_LENGTH 5
45#define H2C_88E_JOINBSSRPT_LENGTH 1
46#define H2C_88E_AP_OFFLOAD_LENGTH 3
47#define H2C_88E_WOWLAN_LENGTH 3
48#define H2C_88E_KEEP_ALIVE_CTRL_LENGTH 3
49#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
50#define H2C_88E_REMOTE_WAKE_CTRL_LEN 1
51#else
52#define H2C_88E_REMOTE_WAKE_CTRL_LEN 3
53#endif
54#define H2C_88E_AOAC_GLOBAL_INFO_LEN 2
55#define H2C_88E_AOAC_RSVDPAGE_LOC_LEN 7
56
57/* Fw PS state for RPWM.
58 * BIT[2:0] = HW state
59 * BIT[3] = Protocol PS state, 1: register active state, 0: register sleep state
60 * BIT[4] = sub-state
61 */
62#define FW_PS_GO_ON BIT(0)
63#define FW_PS_TX_NULL BIT(1)
64#define FW_PS_RF_ON BIT(2)
65#define FW_PS_REGISTER_ACTIVE BIT(3)
66
67#define FW_PS_DPS BIT(0)
68#define FW_PS_LCLK (FW_PS_DPS)
69#define FW_PS_RF_OFF BIT(1)
70#define FW_PS_ALL_ON BIT(2)
71#define FW_PS_ST_ACTIVE BIT(3)
72#define FW_PS_ISR_ENABLE BIT(4)
73#define FW_PS_IMR_ENABLE BIT(5)
74
75
76#define FW_PS_ACK BIT(6)
77#define FW_PS_TOGGLE BIT(7)
78
79 /* 88E RPWM value*/
80 /* BIT[0] = 1: 32k, 0: 40M*/
81#define FW_PS_CLOCK_OFF BIT(0) /* 32k*/
82#define FW_PS_CLOCK_ON 0 /*40M*/
83
84#define FW_PS_STATE_MASK (0x0F)
85#define FW_PS_STATE_HW_MASK (0x07)
86/*ISR_ENABLE, IMR_ENABLE, and PS mode should be inherited.*/
87#define FW_PS_STATE_INT_MASK (0x3F)
88
89#define FW_PS_STATE(x) (FW_PS_STATE_MASK & (x))
90#define FW_PS_STATE_HW(x) (FW_PS_STATE_HW_MASK & (x))
91#define FW_PS_STATE_INT(x) (FW_PS_STATE_INT_MASK & (x))
92#define FW_PS_ISR_VAL(x) ((x) & 0x70)
93#define FW_PS_IMR_MASK(x) ((x) & 0xDF)
94#define FW_PS_KEEP_IMR(x) ((x) & 0x20)
95
96#define FW_PS_STATE_S0 (FW_PS_DPS)
97#define FW_PS_STATE_S1 (FW_PS_LCLK)
98#define FW_PS_STATE_S2 (FW_PS_RF_OFF)
99#define FW_PS_STATE_S3 (FW_PS_ALL_ON)
100#define FW_PS_STATE_S4 ((FW_PS_ST_ACTIVE) | (FW_PS_ALL_ON))
101
102#define FW_PS_STATE_ALL_ON_88E (FW_PS_CLOCK_ON)
103#define FW_PS_STATE_RF_ON_88E (FW_PS_CLOCK_ON)
104#define FW_PS_STATE_RF_OFF_88E (FW_PS_CLOCK_ON)
105#define FW_PS_STATE_RF_OFF_LOW_PWR_88E (FW_PS_CLOCK_OFF)
106
107#define FW_PS_STATE_ALL_ON_92C (FW_PS_STATE_S4)
108#define FW_PS_STATE_RF_ON_92C (FW_PS_STATE_S3)
109#define FW_PS_STATE_RF_OFF_92C (FW_PS_STATE_S2)
110#define FW_PS_STATE_RF_OFF_LOW_PWR_92C (FW_PS_STATE_S1)
111
112/* For 88E H2C PwrMode Cmd ID 5.*/
113#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
114#define FW_PWR_STATE_RF_OFF 0
115
116#define FW_PS_IS_ACK(x) ((x) & FW_PS_ACK)
117#define FW_PS_IS_CLK_ON(x) ((x) & (FW_PS_RF_OFF | FW_PS_ALL_ON))
118#define FW_PS_IS_RF_ON(x) ((x) & (FW_PS_ALL_ON))
119#define FW_PS_IS_ACTIVE(x) ((x) & (FW_PS_ST_ACTIVE))
120#define FW_PS_IS_CPWM_INT(x) ((x) & 0x40)
121
122#define FW_CLR_PS_STATE(x) ((x) = ((x) & (0xF0)))
123
124#define IS_IN_LOW_POWER_STATE_88E(fwpsstate) \
125 (FW_PS_STATE(fwpsstate) == FW_PS_CLOCK_OFF)
126
127#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
128#define FW_PWR_STATE_RF_OFF 0
129
130struct rtl92c_firmware_header {
131 u16 signature;
132 u8 category;
133 u8 function;
134 u16 version;
135 u8 subversion;
136 u8 rsvd1;
137 u8 month;
138 u8 date;
139 u8 hour;
140 u8 minute;
141 u16 ramcodesize;
142 u16 rsvd2;
143 u32 svnindex;
144 u32 rsvd3;
145 u32 rsvd4;
146 u32 rsvd5;
147};
148
149enum rtl8192c_h2c_cmd {
150 H2C_88E_RSVDPAGE = 0,
151 H2C_88E_JOINBSSRPT = 1,
152 H2C_88E_SCAN = 2,
153 H2C_88E_KEEP_ALIVE_CTRL = 3,
154 H2C_88E_DISCONNECT_DECISION = 4,
155#if (USE_OLD_WOWLAN_DEBUG_FW == 1)
156 H2C_88E_WO_WLAN = 5,
157#endif
158 H2C_88E_INIT_OFFLOAD = 6,
159#if (USE_OLD_WOWLAN_DEBUG_FW == 1)
160 H2C_88E_REMOTE_WAKE_CTRL = 7,
161#endif
162 H2C_88E_AP_OFFLOAD = 8,
163 H2C_88E_BCN_RSVDPAGE = 9,
164 H2C_88E_PROBERSP_RSVDPAGE = 10,
165
166 H2C_88E_SETPWRMODE = 0x20,
167 H2C_88E_PS_TUNING_PARA = 0x21,
168 H2C_88E_PS_TUNING_PARA2 = 0x22,
169 H2C_88E_PS_LPS_PARA = 0x23,
170 H2C_88E_P2P_PS_OFFLOAD = 024,
171
172#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
173 H2C_88E_WO_WLAN = 0x80,
174 H2C_88E_REMOTE_WAKE_CTRL = 0x81,
175 H2C_88E_AOAC_GLOBAL_INFO = 0x82,
176 H2C_88E_AOAC_RSVDPAGE = 0x83,
177#endif
178 /* Not defined in new 88E H2C CMD Format */
179 H2C_88E_RA_MASK,
180 H2C_88E_SELECTIVE_SUSPEND_ROF_CMD,
181 H2C_88E_P2P_PS_MODE,
182 H2C_88E_PSD_RESULT,
183 /*Not defined CTW CMD for P2P yet*/
184 H2C_88E_P2P_PS_CTW_CMD,
185 MAX_88E_H2CCMD
186};
187
188#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
189
190#define SET_88E_H2CCMD_WOWLAN_FUNC_ENABLE(__cmd, __value) \
191 SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
192#define SET_88E_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__cmd, __value) \
193 SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
194#define SET_88E_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__cmd, __value) \
195 SET_BITS_TO_LE_1BYTE(__cmd, 2, 1, __value)
196#define SET_88E_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__cmd, __value) \
197 SET_BITS_TO_LE_1BYTE(__cmd, 3, 1, __value)
198#define SET_88E_H2CCMD_WOWLAN_ALL_PKT_DROP(__cmd, __value) \
199 SET_BITS_TO_LE_1BYTE(__cmd, 4, 1, __value)
200#define SET_88E_H2CCMD_WOWLAN_GPIO_ACTIVE(__cmd, __value) \
201 SET_BITS_TO_LE_1BYTE(__cmd, 5, 1, __value)
202#define SET_88E_H2CCMD_WOWLAN_REKEY_WAKE_UP(__cmd, __value) \
203 SET_BITS_TO_LE_1BYTE(__cmd, 6, 1, __value)
204#define SET_88E_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__cmd, __value) \
205 SET_BITS_TO_LE_1BYTE(__cmd, 7, 1, __value)
206#define SET_88E_H2CCMD_WOWLAN_GPIONUM(__cmd, __value) \
207 SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
208#define SET_88E_H2CCMD_WOWLAN_GPIO_DURATION(__cmd, __value) \
209 SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
210
211
212#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
213 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
214#define SET_H2CCMD_PWRMODE_PARM_RLBM(__cmd, __value) \
215 SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 4, __value)
216#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__cmd, __value) \
217 SET_BITS_TO_LE_1BYTE((__cmd)+1, 4, 4, __value)
218#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__cmd, __value) \
219 SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
220#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__cmd, __value) \
221 SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
222#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__cmd, __value) \
223 SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value)
224#define GET_88E_H2CCMD_PWRMODE_PARM_MODE(__cmd) \
225 LE_BITS_TO_1BYTE(__cmd, 0, 8)
226
227#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
228 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
229#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
230 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
231#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
232 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
233#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
234 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
235
236/* AP_OFFLOAD */
237#define SET_H2CCMD_AP_OFFLOAD_ON(__cmd, __value) \
238 SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value)
239#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__cmd, __value) \
240 SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
241#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__cmd, __value) \
242 SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
243#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__cmd, __value) \
244 SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
245
246/* Keep Alive Control*/
247#define SET_88E_H2CCMD_KEEP_ALIVE_ENABLE(__cmd, __value) \
248 SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
249#define SET_88E_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__cmd, __value) \
250 SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
251#define SET_88E_H2CCMD_KEEP_ALIVE_PERIOD(__cmd, __value) \
252 SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
253
254/*REMOTE_WAKE_CTRL */
255#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_EN(__cmd, __value) \
256 SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value)
257#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
258#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__cmd, __value) \
259 SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value)
260#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__cmd, __value) \
261 SET_BITS_TO_LE_1BYTE(__cmd, 2, 1, __value)
262#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__cmd, __value) \
263 SET_BITS_TO_LE_1BYTE(__cmd, 3, 1, __value)
264#else
265#define SET_88E_H2_REM_WAKE_ENC_ALG(__cmd, __value) \
266 SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
267#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GROUP_ENC_ALG(__cmd, __value) \
268 SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
269#endif
270
271/* GTK_OFFLOAD */
272#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__cmd, __value) \
273 SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value)
274#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__cmd, __value) \
275 SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
276
277/* AOAC_RSVDPAGE_LOC */
278#define SET_88E_H2CCMD_AOAC_RSVD_LOC_REM_WAKE_CTRL_INFO(__cmd, __value) \
279 SET_BITS_TO_LE_1BYTE((__cmd), 0, 8, __value)
280#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__cmd, __value) \
281 SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value)
282#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__cmd, __value) \
283 SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value)
284#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__cmd, __value) \
285 SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value)
286#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__cmd, __value) \
287 SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value)
288
289int rtl88e_download_fw(struct ieee80211_hw *hw,
290 bool buse_wake_on_wlan_fw);
291void rtl88e_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
292 u32 cmd_len, u8 *p_cmdbuffer);
293void rtl88e_firmware_selfreset(struct ieee80211_hw *hw);
294void rtl88e_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
295void rtl88e_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw,
296 u8 mstatus);
297void rtl88e_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, u8 enable);
298void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
299void rtl88e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
300
301#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
new file mode 100644
index 000000000000..b68cae3024fc
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -0,0 +1,2530 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../efuse.h"
32#include "../base.h"
33#include "../regd.h"
34#include "../cam.h"
35#include "../ps.h"
36#include "../pci.h"
37#include "reg.h"
38#include "def.h"
39#include "phy.h"
40#include "dm.h"
41#include "fw.h"
42#include "led.h"
43#include "hw.h"
44#include "pwrseqcmd.h"
45#include "pwrseq.h"
46
47#define LLT_CONFIG 5
48
49static void _rtl88ee_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
50 u8 set_bits, u8 clear_bits)
51{
52 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
53 struct rtl_priv *rtlpriv = rtl_priv(hw);
54
55 rtlpci->reg_bcn_ctrl_val |= set_bits;
56 rtlpci->reg_bcn_ctrl_val &= ~clear_bits;
57
58 rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
59}
60
61static void _rtl88ee_stop_tx_beacon(struct ieee80211_hw *hw)
62{
63 struct rtl_priv *rtlpriv = rtl_priv(hw);
64 u8 tmp1byte;
65
66 tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
67 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
68 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
69 tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
70 tmp1byte &= ~(BIT(0));
71 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
72}
73
74static void _rtl88ee_resume_tx_beacon(struct ieee80211_hw *hw)
75{
76 struct rtl_priv *rtlpriv = rtl_priv(hw);
77 u8 tmp1byte;
78
79 tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
80 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
81 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
82 tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
83 tmp1byte |= BIT(0);
84 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
85}
86
87static void _rtl88ee_enable_bcn_sub_func(struct ieee80211_hw *hw)
88{
89 _rtl88ee_set_bcn_ctrl_reg(hw, 0, BIT(1));
90}
91
92static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
93{
94 struct rtl_priv *rtlpriv = rtl_priv(hw);
95 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
96 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
97
98 while (skb_queue_len(&ring->queue)) {
99 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
100 struct sk_buff *skb = __skb_dequeue(&ring->queue);
101
102 pci_unmap_single(rtlpci->pdev,
103 rtlpriv->cfg->ops->get_desc(
104 (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
105 skb->len, PCI_DMA_TODEVICE);
106 kfree_skb(skb);
107 ring->idx = (ring->idx + 1) % ring->entries;
108 }
109}
110
111static void _rtl88ee_disable_bcn_sub_func(struct ieee80211_hw *hw)
112{
113 _rtl88ee_set_bcn_ctrl_reg(hw, BIT(1), 0);
114}
115
116static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw,
117 u8 rpwm_val, bool need_turn_off_ckk)
118{
119 struct rtl_priv *rtlpriv = rtl_priv(hw);
120 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
121 bool support_remote_wake_up;
122 u32 count = 0, isr_regaddr, content;
123 bool schedule_timer = need_turn_off_ckk;
124
125 rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN,
126 (u8 *)(&support_remote_wake_up));
127 if (!rtlhal->fw_ready)
128 return;
129 if (!rtlpriv->psc.fw_current_inpsmode)
130 return;
131
132 while (1) {
133 spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
134 if (rtlhal->fw_clk_change_in_progress) {
135 while (rtlhal->fw_clk_change_in_progress) {
136 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
137 udelay(100);
138 if (++count > 1000)
139 return;
140 spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
141 }
142 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
143 } else {
144 rtlhal->fw_clk_change_in_progress = false;
145 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
146 }
147 }
148
149 if (IS_IN_LOW_POWER_STATE_88E(rtlhal->fw_ps_state)) {
150 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM,
151 (u8 *)(&rpwm_val));
152 if (FW_PS_IS_ACK(rpwm_val)) {
153 isr_regaddr = REG_HISR;
154 content = rtl_read_dword(rtlpriv, isr_regaddr);
155 while (!(content & IMR_CPWM) && (count < 500)) {
156 udelay(50);
157 count++;
158 content = rtl_read_dword(rtlpriv, isr_regaddr);
159 }
160
161 if (content & IMR_CPWM) {
162 rtl_write_word(rtlpriv, isr_regaddr, 0x0100);
163 rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_88E;
164 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
165 "Receive CPWM INT!!! Set pHalData->FwPSState = %X\n",
166 rtlhal->fw_ps_state);
167 }
168 }
169
170 spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
171 rtlhal->fw_clk_change_in_progress = false;
172 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
173 if (schedule_timer) {
174 mod_timer(&rtlpriv->works.fw_clockoff_timer,
175 jiffies + MSECS(10));
176 }
177 } else {
178 spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
179 rtlhal->fw_clk_change_in_progress = false;
180 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
181 }
182}
183
184static void _rtl88ee_set_fw_clock_off(struct ieee80211_hw *hw,
185 u8 rpwm_val)
186{
187 struct rtl_priv *rtlpriv = rtl_priv(hw);
188 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
189 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
190 struct rtl8192_tx_ring *ring;
191 enum rf_pwrstate rtstate;
192 bool schedule_timer = false;
193 u8 queue;
194
195 if (!rtlhal->fw_ready)
196 return;
197 if (!rtlpriv->psc.fw_current_inpsmode)
198 return;
199 if (!rtlhal->allow_sw_to_change_hwclc)
200 return;
201 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, (u8 *)(&rtstate));
202 if (rtstate == ERFOFF || rtlpriv->psc.inactive_pwrstate == ERFOFF)
203 return;
204
205 for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) {
206 ring = &rtlpci->tx_ring[queue];
207 if (skb_queue_len(&ring->queue)) {
208 schedule_timer = true;
209 break;
210 }
211 }
212
213 if (schedule_timer) {
214 mod_timer(&rtlpriv->works.fw_clockoff_timer,
215 jiffies + MSECS(10));
216 return;
217 }
218
219 if (FW_PS_STATE(rtlhal->fw_ps_state) !=
220 FW_PS_STATE_RF_OFF_LOW_PWR_88E) {
221 spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
222 if (!rtlhal->fw_clk_change_in_progress) {
223 rtlhal->fw_clk_change_in_progress = true;
224 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
225 rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val);
226 rtl_write_word(rtlpriv, REG_HISR, 0x0100);
227 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
228 (u8 *)(&rpwm_val));
229 spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
230 rtlhal->fw_clk_change_in_progress = false;
231 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
232 } else {
233 spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
234 mod_timer(&rtlpriv->works.fw_clockoff_timer,
235 jiffies + MSECS(10));
236 }
237 }
238}
239
240static void _rtl88ee_set_fw_ps_rf_on(struct ieee80211_hw *hw)
241{
242 u8 rpwm_val = 0;
243
244 rpwm_val |= (FW_PS_STATE_RF_OFF_88E | FW_PS_ACK);
245 _rtl88ee_set_fw_clock_on(hw, rpwm_val, true);
246}
247
248static void _rtl88ee_set_fw_ps_rf_off_low_power(struct ieee80211_hw *hw)
249{
250 u8 rpwm_val = 0;
251
252 rpwm_val |= FW_PS_STATE_RF_OFF_LOW_PWR_88E;
253 _rtl88ee_set_fw_clock_off(hw, rpwm_val);
254}
255
256void rtl88ee_fw_clk_off_timer_callback(unsigned long data)
257{
258 struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
259
260 _rtl88ee_set_fw_ps_rf_off_low_power(hw);
261}
262
263static void _rtl88ee_fwlps_leave(struct ieee80211_hw *hw)
264{
265 struct rtl_priv *rtlpriv = rtl_priv(hw);
266 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
267 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
268 bool fw_current_inps = false;
269 u8 rpwm_val = 0, fw_pwrmode = FW_PS_ACTIVE_MODE;
270
271 if (ppsc->low_power_enable) {
272 rpwm_val = (FW_PS_STATE_ALL_ON_88E|FW_PS_ACK);/* RF on */
273 _rtl88ee_set_fw_clock_on(hw, rpwm_val, false);
274 rtlhal->allow_sw_to_change_hwclc = false;
275 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
276 (u8 *)(&fw_pwrmode));
277 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
278 (u8 *)(&fw_current_inps));
279 } else {
280 rpwm_val = FW_PS_STATE_ALL_ON_88E; /* RF on */
281 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
282 (u8 *)(&rpwm_val));
283 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
284 (u8 *)(&fw_pwrmode));
285 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
286 (u8 *)(&fw_current_inps));
287 }
288}
289
290static void _rtl88ee_fwlps_enter(struct ieee80211_hw *hw)
291{
292 struct rtl_priv *rtlpriv = rtl_priv(hw);
293 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
294 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
295 bool fw_current_inps = true;
296 u8 rpwm_val;
297
298 if (ppsc->low_power_enable) {
299 rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR_88E; /* RF off */
300 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
301 (u8 *)(&fw_current_inps));
302 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
303 (u8 *)(&ppsc->fwctrl_psmode));
304 rtlhal->allow_sw_to_change_hwclc = true;
305 _rtl88ee_set_fw_clock_off(hw, rpwm_val);
306 } else {
307 rpwm_val = FW_PS_STATE_RF_OFF_88E; /* RF off */
308 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
309 (u8 *)(&fw_current_inps));
310 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
311 (u8 *)(&ppsc->fwctrl_psmode));
312 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
313 (u8 *)(&rpwm_val));
314 }
315}
316
317void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
318{
319 struct rtl_priv *rtlpriv = rtl_priv(hw);
320 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
321 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
322
323 switch (variable) {
324 case HW_VAR_RCR:
325 *((u32 *)(val)) = rtlpci->receive_config;
326 break;
327 case HW_VAR_RF_STATE:
328 *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
329 break;
330 case HW_VAR_FWLPS_RF_ON:{
331 enum rf_pwrstate rfstate;
332 u32 val_rcr;
333
334 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
335 (u8 *)(&rfstate));
336 if (rfstate == ERFOFF) {
337 *((bool *)(val)) = true;
338 } else {
339 val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
340 val_rcr &= 0x00070000;
341 if (val_rcr)
342 *((bool *)(val)) = false;
343 else
344 *((bool *)(val)) = true;
345 }
346 break;
347 }
348 case HW_VAR_FW_PSMODE_STATUS:
349 *((bool *)(val)) = ppsc->fw_current_inpsmode;
350 break;
351 case HW_VAR_CORRECT_TSF:{
352 u64 tsf;
353 u32 *ptsf_low = (u32 *)&tsf;
354 u32 *ptsf_high = ((u32 *)&tsf) + 1;
355
356 *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
357 *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
358
359 *((u64 *)(val)) = tsf;
360 break; }
361 default:
362 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
363 "switch case not process %x\n", variable);
364 break;
365 }
366}
367
368void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
369{
370 struct rtl_priv *rtlpriv = rtl_priv(hw);
371 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
372 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
373 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
374 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
375 u8 idx;
376
377 switch (variable) {
378 case HW_VAR_ETHER_ADDR:
379 for (idx = 0; idx < ETH_ALEN; idx++)
380 rtl_write_byte(rtlpriv, (REG_MACID + idx), val[idx]);
381 break;
382 case HW_VAR_BASIC_RATE:{
383 u16 rate_cfg = ((u16 *)val)[0];
384 u8 rate_index = 0;
385 rate_cfg = rate_cfg & 0x15f;
386 rate_cfg |= 0x01;
387 rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
388 rtl_write_byte(rtlpriv, REG_RRSR + 1, (rate_cfg >> 8) & 0xff);
389 while (rate_cfg > 0x1) {
390 rate_cfg = (rate_cfg >> 1);
391 rate_index++;
392 }
393 rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, rate_index);
394 break; }
395 case HW_VAR_BSSID:
396 for (idx = 0; idx < ETH_ALEN; idx++)
397 rtl_write_byte(rtlpriv, (REG_BSSID + idx), val[idx]);
398 break;
399 case HW_VAR_SIFS:
400 rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
401 rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
402
403 rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
404 rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
405
406 if (!mac->ht_enable)
407 rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, 0x0e0e);
408 else
409 rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
410 *((u16 *)val));
411 break;
412 case HW_VAR_SLOT_TIME:{
413 u8 e_aci;
414
415 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
416 "HW_VAR_SLOT_TIME %x\n", val[0]);
417
418 rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
419
420 for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
421 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
422 (u8 *)(&e_aci));
423 }
424 break; }
425 case HW_VAR_ACK_PREAMBLE:{
426 u8 reg_tmp;
427 u8 short_preamble = (bool) (*(u8 *)val);
428 reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL+2);
429 if (short_preamble) {
430 reg_tmp |= 0x02;
431 rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
432 } else {
433 reg_tmp |= 0xFD;
434 rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
435 }
436 break; }
437 case HW_VAR_WPA_CONFIG:
438 rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *)val));
439 break;
440 case HW_VAR_AMPDU_MIN_SPACE:{
441 u8 min_spacing_to_set;
442 u8 sec_min_space;
443
444 min_spacing_to_set = *((u8 *)val);
445 if (min_spacing_to_set <= 7) {
446 sec_min_space = 0;
447
448 if (min_spacing_to_set < sec_min_space)
449 min_spacing_to_set = sec_min_space;
450
451 mac->min_space_cfg = ((mac->min_space_cfg &
452 0xf8) | min_spacing_to_set);
453
454 *val = min_spacing_to_set;
455
456 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
457 "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
458 mac->min_space_cfg);
459
460 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
461 mac->min_space_cfg);
462 }
463 break; }
464 case HW_VAR_SHORTGI_DENSITY:{
465 u8 density_to_set;
466
467 density_to_set = *((u8 *)val);
468 mac->min_space_cfg |= (density_to_set << 3);
469
470 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
471 "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
472 mac->min_space_cfg);
473
474 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
475 mac->min_space_cfg);
476 break; }
477 case HW_VAR_AMPDU_FACTOR:{
478 u8 regtoset_normal[4] = { 0x41, 0xa8, 0x72, 0xb9 };
479 u8 factor;
480 u8 *reg = NULL;
481 u8 id = 0;
482
483 reg = regtoset_normal;
484
485 factor = *((u8 *)val);
486 if (factor <= 3) {
487 factor = (1 << (factor + 2));
488 if (factor > 0xf)
489 factor = 0xf;
490
491 for (id = 0; id < 4; id++) {
492 if ((reg[id] & 0xf0) > (factor << 4))
493 reg[id] = (reg[id] & 0x0f) |
494 (factor << 4);
495
496 if ((reg[id] & 0x0f) > factor)
497 reg[id] = (reg[id] & 0xf0) | (factor);
498
499 rtl_write_byte(rtlpriv, (REG_AGGLEN_LMT + id),
500 reg[id]);
501 }
502
503 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
504 "Set HW_VAR_AMPDU_FACTOR: %#x\n", factor);
505 }
506 break; }
507 case HW_VAR_AC_PARAM:{
508 u8 e_aci = *((u8 *)val);
509 rtl88e_dm_init_edca_turbo(hw);
510
511 if (rtlpci->acm_method != eAcmWay2_SW)
512 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
513 (u8 *)(&e_aci));
514 break; }
515 case HW_VAR_ACM_CTRL:{
516 u8 e_aci = *((u8 *)val);
517 union aci_aifsn *p_aci_aifsn =
518 (union aci_aifsn *)(&(mac->ac[0].aifs));
519 u8 acm = p_aci_aifsn->f.acm;
520 u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
521
522 acm_ctrl = acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1);
523
524 if (acm) {
525 switch (e_aci) {
526 case AC0_BE:
527 acm_ctrl |= ACMHW_BEQEN;
528 break;
529 case AC2_VI:
530 acm_ctrl |= ACMHW_VIQEN;
531 break;
532 case AC3_VO:
533 acm_ctrl |= ACMHW_VOQEN;
534 break;
535 default:
536 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
537 "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
538 acm);
539 break;
540 }
541 } else {
542 switch (e_aci) {
543 case AC0_BE:
544 acm_ctrl &= (~ACMHW_BEQEN);
545 break;
546 case AC2_VI:
547 acm_ctrl &= (~ACMHW_VIQEN);
548 break;
549 case AC3_VO:
550 acm_ctrl &= (~ACMHW_BEQEN);
551 break;
552 default:
553 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
554 "switch case not process\n");
555 break;
556 }
557 }
558
559 RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
560 "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
561 acm_ctrl);
562 rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
563 break; }
564 case HW_VAR_RCR:
565 rtl_write_dword(rtlpriv, REG_RCR, ((u32 *)(val))[0]);
566 rtlpci->receive_config = ((u32 *)(val))[0];
567 break;
568 case HW_VAR_RETRY_LIMIT:{
569 u8 retry_limit = ((u8 *)(val))[0];
570
571 rtl_write_word(rtlpriv, REG_RL,
572 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
573 retry_limit << RETRY_LIMIT_LONG_SHIFT);
574 break; }
575 case HW_VAR_DUAL_TSF_RST:
576 rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
577 break;
578 case HW_VAR_EFUSE_BYTES:
579 rtlefuse->efuse_usedbytes = *((u16 *)val);
580 break;
581 case HW_VAR_EFUSE_USAGE:
582 rtlefuse->efuse_usedpercentage = *((u8 *)val);
583 break;
584 case HW_VAR_IO_CMD:
585 rtl88e_phy_set_io_cmd(hw, (*(enum io_type *)val));
586 break;
587 case HW_VAR_SET_RPWM:{
588 u8 rpwm_val;
589
590 rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM);
591 udelay(1);
592
593 if (rpwm_val & BIT(7)) {
594 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
595 (*(u8 *)val));
596 } else {
597 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
598 ((*(u8 *)val) | BIT(7)));
599 }
600 break; }
601 case HW_VAR_H2C_FW_PWRMODE:
602 rtl88e_set_fw_pwrmode_cmd(hw, (*(u8 *)val));
603 break;
604 case HW_VAR_FW_PSMODE_STATUS:
605 ppsc->fw_current_inpsmode = *((bool *)val);
606 break;
607 case HW_VAR_RESUME_CLK_ON:
608 _rtl88ee_set_fw_ps_rf_on(hw);
609 break;
610 case HW_VAR_FW_LPS_ACTION:{
611 bool enter_fwlps = *((bool *)val);
612
613 if (enter_fwlps)
614 _rtl88ee_fwlps_enter(hw);
615 else
616 _rtl88ee_fwlps_leave(hw);
617 break; }
618 case HW_VAR_H2C_FW_JOINBSSRPT:{
619 u8 mstatus = (*(u8 *)val);
620 u8 tmp, tmp_reg422, uval;
621 u8 count = 0, dlbcn_count = 0;
622 bool recover = false;
623
624 if (mstatus == RT_MEDIA_CONNECT) {
625 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, NULL);
626
627 tmp = rtl_read_byte(rtlpriv, REG_CR + 1);
628 rtl_write_byte(rtlpriv, REG_CR + 1, (tmp | BIT(0)));
629
630 _rtl88ee_set_bcn_ctrl_reg(hw, 0, BIT(3));
631 _rtl88ee_set_bcn_ctrl_reg(hw, BIT(4), 0);
632
633 tmp_reg422 = rtl_read_byte(rtlpriv,
634 REG_FWHW_TXQ_CTRL + 2);
635 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
636 tmp_reg422 & (~BIT(6)));
637 if (tmp_reg422 & BIT(6))
638 recover = true;
639
640 do {
641 uval = rtl_read_byte(rtlpriv, REG_TDECTRL+2);
642 rtl_write_byte(rtlpriv, REG_TDECTRL+2,
643 (uval | BIT(0)));
644 _rtl88ee_return_beacon_queue_skb(hw);
645
646 rtl88e_set_fw_rsvdpagepkt(hw, 0);
647 uval = rtl_read_byte(rtlpriv, REG_TDECTRL+2);
648 count = 0;
649 while (!(uval & BIT(0)) && count < 20) {
650 count++;
651 udelay(10);
652 uval = rtl_read_byte(rtlpriv,
653 REG_TDECTRL+2);
654 }
655 dlbcn_count++;
656 } while (!(uval & BIT(0)) && dlbcn_count < 5);
657
658 if (uval & BIT(0))
659 rtl_write_byte(rtlpriv, REG_TDECTRL+2, BIT(0));
660
661 _rtl88ee_set_bcn_ctrl_reg(hw, BIT(3), 0);
662 _rtl88ee_set_bcn_ctrl_reg(hw, 0, BIT(4));
663
664 if (recover) {
665 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
666 tmp_reg422);
667 }
668 rtl_write_byte(rtlpriv, REG_CR + 1, (tmp & ~(BIT(0))));
669 }
670 rtl88e_set_fw_joinbss_report_cmd(hw, (*(u8 *)val));
671 break; }
672 case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
673 rtl88e_set_p2p_ps_offload_cmd(hw, (*(u8 *)val));
674 break;
675 case HW_VAR_AID:{
676 u16 u2btmp;
677 u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
678 u2btmp &= 0xC000;
679 rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
680 mac->assoc_id));
681 break; }
682 case HW_VAR_CORRECT_TSF:{
683 u8 btype_ibss = ((u8 *)(val))[0];
684
685 if (btype_ibss == true)
686 _rtl88ee_stop_tx_beacon(hw);
687
688 _rtl88ee_set_bcn_ctrl_reg(hw, 0, BIT(3));
689
690 rtl_write_dword(rtlpriv, REG_TSFTR,
691 (u32) (mac->tsf & 0xffffffff));
692 rtl_write_dword(rtlpriv, REG_TSFTR + 4,
693 (u32) ((mac->tsf >> 32) & 0xffffffff));
694
695 _rtl88ee_set_bcn_ctrl_reg(hw, BIT(3), 0);
696
697 if (btype_ibss == true)
698 _rtl88ee_resume_tx_beacon(hw);
699 break; }
700 default:
701 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
702 "switch case not process %x\n", variable);
703 break;
704 }
705}
706
707static bool _rtl88ee_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
708{
709 struct rtl_priv *rtlpriv = rtl_priv(hw);
710 bool status = true;
711 long count = 0;
712 u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) |
713 _LLT_OP(_LLT_WRITE_ACCESS);
714
715 rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
716
717 do {
718 value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
719 if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
720 break;
721
722 if (count > POLLING_LLT_THRESHOLD) {
723 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
724 "Failed to polling write LLT done at address %d!\n",
725 address);
726 status = false;
727 break;
728 }
729 } while (++count);
730
731 return status;
732}
733
734static bool _rtl88ee_llt_table_init(struct ieee80211_hw *hw)
735{
736 struct rtl_priv *rtlpriv = rtl_priv(hw);
737 unsigned short i;
738 u8 txpktbuf_bndy;
739 u8 maxpage;
740 bool status;
741
742 maxpage = 0xAF;
743 txpktbuf_bndy = 0xAB;
744
745 rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x01);
746 rtl_write_dword(rtlpriv, REG_RQPN, 0x80730d29);
747
748
749 rtl_write_dword(rtlpriv, REG_TRXFF_BNDY, (0x25FF0000 | txpktbuf_bndy));
750 rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
751
752 rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
753 rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
754
755 rtl_write_byte(rtlpriv, 0x45D, txpktbuf_bndy);
756 rtl_write_byte(rtlpriv, REG_PBP, 0x11);
757 rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
758
759 for (i = 0; i < (txpktbuf_bndy - 1); i++) {
760 status = _rtl88ee_llt_write(hw, i, i + 1);
761 if (true != status)
762 return status;
763 }
764
765 status = _rtl88ee_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
766 if (true != status)
767 return status;
768
769 for (i = txpktbuf_bndy; i < maxpage; i++) {
770 status = _rtl88ee_llt_write(hw, i, (i + 1));
771 if (true != status)
772 return status;
773 }
774
775 status = _rtl88ee_llt_write(hw, maxpage, txpktbuf_bndy);
776 if (true != status)
777 return status;
778
779 return true;
780}
781
782static void _rtl88ee_gen_refresh_led_state(struct ieee80211_hw *hw)
783{
784 struct rtl_priv *rtlpriv = rtl_priv(hw);
785 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
786 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
787 struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
788
789 if (rtlpriv->rtlhal.up_first_time)
790 return;
791
792 if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
793 rtl88ee_sw_led_on(hw, pLed0);
794 else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
795 rtl88ee_sw_led_on(hw, pLed0);
796 else
797 rtl88ee_sw_led_off(hw, pLed0);
798}
799
800static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
801{
802 struct rtl_priv *rtlpriv = rtl_priv(hw);
803 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
804 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
805 u8 bytetmp;
806 u16 wordtmp;
807
808 /*Disable XTAL OUTPUT for power saving. YJ, add, 111206. */
809 bytetmp = rtl_read_byte(rtlpriv, REG_XCK_OUT_CTRL) & (~BIT(0));
810 rtl_write_byte(rtlpriv, REG_XCK_OUT_CTRL, bytetmp);
811 /*Auto Power Down to CHIP-off State*/
812 bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) & (~BIT(7));
813 rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp);
814
815 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
816 /* HW Power on sequence */
817 if (!rtl88_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
818 PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
819 Rtl8188E_NIC_ENABLE_FLOW)) {
820 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
821 "init MAC Fail as rtl88_hal_pwrseqcmdparsing\n");
822 return false;
823 }
824
825 bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO) | BIT(4);
826 rtl_write_byte(rtlpriv, REG_APS_FSMCO, bytetmp);
827
828 bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+2);
829 rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+2, bytetmp|BIT(2));
830
831 bytetmp = rtl_read_byte(rtlpriv, REG_WATCH_DOG+1);
832 rtl_write_byte(rtlpriv, REG_WATCH_DOG+1, bytetmp|BIT(7));
833
834 bytetmp = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL_EXT+1);
835 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL_EXT+1, bytetmp|BIT(1));
836
837 bytetmp = rtl_read_byte(rtlpriv, REG_TX_RPT_CTRL);
838 rtl_write_byte(rtlpriv, REG_TX_RPT_CTRL, bytetmp|BIT(1)|BIT(0));
839 rtl_write_byte(rtlpriv, REG_TX_RPT_CTRL+1, 2);
840 rtl_write_word(rtlpriv, REG_TX_RPT_TIME, 0xcdf0);
841
842 /*Add for wake up online*/
843 bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CLKR);
844
845 rtl_write_byte(rtlpriv, REG_SYS_CLKR, bytetmp|BIT(3));
846 bytetmp = rtl_read_byte(rtlpriv, REG_GPIO_MUXCFG+1);
847 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG+1, (bytetmp & (~BIT(4))));
848 rtl_write_byte(rtlpriv, 0x367, 0x80);
849
850 rtl_write_word(rtlpriv, REG_CR, 0x2ff);
851 rtl_write_byte(rtlpriv, REG_CR+1, 0x06);
852 rtl_write_byte(rtlpriv, REG_CR+2, 0x00);
853
854 if (!rtlhal->mac_func_enable) {
855 if (_rtl88ee_llt_table_init(hw) == false) {
856 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
857 "LLT table init fail\n");
858 return false;
859 }
860 }
861
862
863 rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
864 rtl_write_dword(rtlpriv, REG_HISRE, 0xffffffff);
865
866 wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL);
867 wordtmp &= 0xf;
868 wordtmp |= 0xE771;
869 rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp);
870
871 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
872 rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xffff);
873 rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config);
874
875 rtl_write_dword(rtlpriv, REG_BCNQ_DESA,
876 ((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) &
877 DMA_BIT_MASK(32));
878 rtl_write_dword(rtlpriv, REG_MGQ_DESA,
879 (u64) rtlpci->tx_ring[MGNT_QUEUE].dma &
880 DMA_BIT_MASK(32));
881 rtl_write_dword(rtlpriv, REG_VOQ_DESA,
882 (u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
883 rtl_write_dword(rtlpriv, REG_VIQ_DESA,
884 (u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
885 rtl_write_dword(rtlpriv, REG_BEQ_DESA,
886 (u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
887 rtl_write_dword(rtlpriv, REG_BKQ_DESA,
888 (u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
889 rtl_write_dword(rtlpriv, REG_HQ_DESA,
890 (u64) rtlpci->tx_ring[HIGH_QUEUE].dma &
891 DMA_BIT_MASK(32));
892 rtl_write_dword(rtlpriv, REG_RX_DESA,
893 (u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma &
894 DMA_BIT_MASK(32));
895
896 /* if we want to support 64 bit DMA, we should set it here,
897 * but at the moment we do not support 64 bit DMA
898 */
899
900 rtl_write_dword(rtlpriv, REG_INT_MIG, 0);
901
902 rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0);
903 rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+1, 0);/*Enable RX DMA */
904
905 if (rtlhal->earlymode_enable) {/*Early mode enable*/
906 bytetmp = rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL);
907 bytetmp |= 0x1f;
908 rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, bytetmp);
909 rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL+3, 0x81);
910 }
911 _rtl88ee_gen_refresh_led_state(hw);
912 return true;
913}
914
915static void _rtl88ee_hw_configure(struct ieee80211_hw *hw)
916{
917 struct rtl_priv *rtlpriv = rtl_priv(hw);
918 u32 reg_prsr;
919
920 reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
921
922 rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr);
923 rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
924}
925
926static void _rtl88ee_enable_aspm_back_door(struct ieee80211_hw *hw)
927{
928 struct rtl_priv *rtlpriv = rtl_priv(hw);
929 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
930 u8 tmp1byte = 0;
931 u32 tmp4Byte = 0, count;
932
933 rtl_write_word(rtlpriv, 0x354, 0x8104);
934 rtl_write_word(rtlpriv, 0x358, 0x24);
935
936 rtl_write_word(rtlpriv, 0x350, 0x70c);
937 rtl_write_byte(rtlpriv, 0x352, 0x2);
938 tmp1byte = rtl_read_byte(rtlpriv, 0x352);
939 count = 0;
940 while (tmp1byte && count < 20) {
941 udelay(10);
942 tmp1byte = rtl_read_byte(rtlpriv, 0x352);
943 count++;
944 }
945 if (0 == tmp1byte) {
946 tmp4Byte = rtl_read_dword(rtlpriv, 0x34c);
947 rtl_write_dword(rtlpriv, 0x348, tmp4Byte|BIT(31));
948 rtl_write_word(rtlpriv, 0x350, 0xf70c);
949 rtl_write_byte(rtlpriv, 0x352, 0x1);
950 }
951
952 tmp1byte = rtl_read_byte(rtlpriv, 0x352);
953 count = 0;
954 while (tmp1byte && count < 20) {
955 udelay(10);
956 tmp1byte = rtl_read_byte(rtlpriv, 0x352);
957 count++;
958 }
959
960 rtl_write_word(rtlpriv, 0x350, 0x718);
961 rtl_write_byte(rtlpriv, 0x352, 0x2);
962 tmp1byte = rtl_read_byte(rtlpriv, 0x352);
963 count = 0;
964 while (tmp1byte && count < 20) {
965 udelay(10);
966 tmp1byte = rtl_read_byte(rtlpriv, 0x352);
967 count++;
968 }
969 if (ppsc->support_backdoor || (0 == tmp1byte)) {
970 tmp4Byte = rtl_read_dword(rtlpriv, 0x34c);
971 rtl_write_dword(rtlpriv, 0x348, tmp4Byte|BIT(11)|BIT(12));
972 rtl_write_word(rtlpriv, 0x350, 0xf718);
973 rtl_write_byte(rtlpriv, 0x352, 0x1);
974 }
975 tmp1byte = rtl_read_byte(rtlpriv, 0x352);
976 count = 0;
977 while (tmp1byte && count < 20) {
978 udelay(10);
979 tmp1byte = rtl_read_byte(rtlpriv, 0x352);
980 count++;
981 }
982}
983
984void rtl88ee_enable_hw_security_config(struct ieee80211_hw *hw)
985{
986 struct rtl_priv *rtlpriv = rtl_priv(hw);
987 u8 sec_reg_value;
988
989 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
990 "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
991 rtlpriv->sec.pairwise_enc_algorithm,
992 rtlpriv->sec.group_enc_algorithm);
993
994 if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
995 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
996 "not open hw encryption\n");
997 return;
998 }
999 sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE;
1000
1001 if (rtlpriv->sec.use_defaultkey) {
1002 sec_reg_value |= SCR_TXUSEDK;
1003 sec_reg_value |= SCR_RXUSEDK;
1004 }
1005
1006 sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
1007
1008 rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
1009
1010 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
1011 "The SECR-value %x\n", sec_reg_value);
1012 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
1013}
1014
1015int rtl88ee_hw_init(struct ieee80211_hw *hw)
1016{
1017 struct rtl_priv *rtlpriv = rtl_priv(hw);
1018 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1019 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1020 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1021 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1022 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1023 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1024 bool rtstatus = true;
1025 int err = 0;
1026 u8 tmp_u1b, u1byte;
1027
1028 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Rtl8188EE hw init\n");
1029 rtlpriv->rtlhal.being_init_adapter = true;
1030 rtlpriv->intf_ops->disable_aspm(hw);
1031
1032 tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CLKR+1);
1033 u1byte = rtl_read_byte(rtlpriv, REG_CR);
1034 if ((tmp_u1b & BIT(3)) && (u1byte != 0 && u1byte != 0xEA)) {
1035 rtlhal->mac_func_enable = true;
1036 } else {
1037 rtlhal->mac_func_enable = false;
1038 rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_88E;
1039 }
1040
1041 rtstatus = _rtl88ee_init_mac(hw);
1042 if (rtstatus != true) {
1043 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
1044 err = 1;
1045 return err;
1046 }
1047
1048 err = rtl88e_download_fw(hw, false);
1049 if (err) {
1050 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1051 "Failed to download FW. Init HW without FW now..\n");
1052 err = 1;
1053 rtlhal->fw_ready = false;
1054 return err;
1055 } else {
1056 rtlhal->fw_ready = true;
1057 }
1058 /*fw related variable initialize */
1059 rtlhal->last_hmeboxnum = 0;
1060 rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_88E;
1061 rtlhal->fw_clk_change_in_progress = false;
1062 rtlhal->allow_sw_to_change_hwclc = false;
1063 ppsc->fw_current_inpsmode = false;
1064
1065 rtl88e_phy_mac_config(hw);
1066 /* because last function modifies RCR, we update
1067 * rcr var here, or TP will be unstable for receive_config
1068 * is wrong, RX RCR_ACRC32 will cause TP unstable & Rx
1069 * RCR_APP_ICV will cause mac80211 disassoc for cisco 1252
1070 */
1071 rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
1072 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
1073
1074 rtl88e_phy_bb_config(hw);
1075 rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
1076 rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
1077
1078 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
1079 rtl88e_phy_rf_config(hw);
1080
1081 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
1082 RF_CHNLBW, RFREG_OFFSET_MASK);
1083 rtlphy->rfreg_chnlval[0] = rtlphy->rfreg_chnlval[0] & 0xfff00fff;
1084
1085 _rtl88ee_hw_configure(hw);
1086 rtl_cam_reset_all_entry(hw);
1087 rtl88ee_enable_hw_security_config(hw);
1088
1089 rtlhal->mac_func_enable = true;
1090 ppsc->rfpwr_state = ERFON;
1091
1092 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
1093 _rtl88ee_enable_aspm_back_door(hw);
1094 rtlpriv->intf_ops->enable_aspm(hw);
1095
1096 if (ppsc->rfpwr_state == ERFON) {
1097 if ((rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) ||
1098 ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) &&
1099 (rtlhal->oem_id == RT_CID_819x_HP))) {
1100 rtl88e_phy_set_rfpath_switch(hw, true);
1101 rtlpriv->dm.fat_table.rx_idle_ant = MAIN_ANT;
1102 } else {
1103 rtl88e_phy_set_rfpath_switch(hw, false);
1104 rtlpriv->dm.fat_table.rx_idle_ant = AUX_ANT;
1105 }
1106 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1107 "rx idle ant %s\n",
1108 (rtlpriv->dm.fat_table.rx_idle_ant == MAIN_ANT) ?
1109 ("MAIN_ANT") : ("AUX_ANT"));
1110
1111 if (rtlphy->iqk_initialized) {
1112 rtl88e_phy_iq_calibrate(hw, true);
1113 } else {
1114 rtl88e_phy_iq_calibrate(hw, false);
1115 rtlphy->iqk_initialized = true;
1116 }
1117 rtl88e_dm_check_txpower_tracking(hw);
1118 rtl88e_phy_lc_calibrate(hw);
1119 }
1120
1121 tmp_u1b = efuse_read_1byte(hw, 0x1FA);
1122 if (!(tmp_u1b & BIT(0))) {
1123 rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
1124 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path A\n");
1125 }
1126
1127 if (!(tmp_u1b & BIT(4))) {
1128 tmp_u1b = rtl_read_byte(rtlpriv, 0x16);
1129 tmp_u1b &= 0x0F;
1130 rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
1131 udelay(10);
1132 rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
1133 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "under 1.5V\n");
1134 }
1135 rtl_write_byte(rtlpriv, REG_NAV_CTRL+2, ((30000+127)/128));
1136 rtl88e_dm_init(hw);
1137 rtlpriv->rtlhal.being_init_adapter = false;
1138 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "end of Rtl8188EE hw init %x\n",
1139 err);
1140 return 0;
1141}
1142
1143static enum version_8188e _rtl88ee_read_chip_version(struct ieee80211_hw *hw)
1144{
1145 struct rtl_priv *rtlpriv = rtl_priv(hw);
1146 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1147 enum version_8188e version = VERSION_UNKNOWN;
1148 u32 value32;
1149
1150 value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
1151 if (value32 & TRP_VAUX_EN) {
1152 version = (enum version_8188e) VERSION_TEST_CHIP_88E;
1153 } else {
1154 version = NORMAL_CHIP;
1155 version = version | ((value32 & TYPE_ID) ? RF_TYPE_2T2R : 0);
1156 version = version | ((value32 & VENDOR_ID) ?
1157 CHIP_VENDOR_UMC : 0);
1158 }
1159
1160 rtlphy->rf_type = RF_1T1R;
1161 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1162 "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
1163 "RF_2T2R" : "RF_1T1R");
1164
1165 return version;
1166}
1167
1168static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
1169 enum nl80211_iftype type)
1170{
1171 struct rtl_priv *rtlpriv = rtl_priv(hw);
1172 u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
1173 enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
1174 bt_msr &= 0xfc;
1175
1176 if (type == NL80211_IFTYPE_UNSPECIFIED ||
1177 type == NL80211_IFTYPE_STATION) {
1178 _rtl88ee_stop_tx_beacon(hw);
1179 _rtl88ee_enable_bcn_sub_func(hw);
1180 } else if (type == NL80211_IFTYPE_ADHOC ||
1181 type == NL80211_IFTYPE_AP ||
1182 type == NL80211_IFTYPE_MESH_POINT) {
1183 _rtl88ee_resume_tx_beacon(hw);
1184 _rtl88ee_disable_bcn_sub_func(hw);
1185 } else {
1186 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1187 "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
1188 type);
1189 }
1190
1191 switch (type) {
1192 case NL80211_IFTYPE_UNSPECIFIED:
1193 bt_msr |= MSR_NOLINK;
1194 ledaction = LED_CTL_LINK;
1195 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1196 "Set Network type to NO LINK!\n");
1197 break;
1198 case NL80211_IFTYPE_ADHOC:
1199 bt_msr |= MSR_ADHOC;
1200 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1201 "Set Network type to Ad Hoc!\n");
1202 break;
1203 case NL80211_IFTYPE_STATION:
1204 bt_msr |= MSR_INFRA;
1205 ledaction = LED_CTL_LINK;
1206 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1207 "Set Network type to STA!\n");
1208 break;
1209 case NL80211_IFTYPE_AP:
1210 bt_msr |= MSR_AP;
1211 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1212 "Set Network type to AP!\n");
1213 break;
1214 case NL80211_IFTYPE_MESH_POINT:
1215 bt_msr |= MSR_ADHOC;
1216 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1217 "Set Network type to Mesh Point!\n");
1218 break;
1219 default:
1220 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1221 "Network type %d not support!\n", type);
1222 return 1;
1223 }
1224
1225 rtl_write_byte(rtlpriv, (MSR), bt_msr);
1226 rtlpriv->cfg->ops->led_control(hw, ledaction);
1227 if ((bt_msr & 0xfc) == MSR_AP)
1228 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
1229 else
1230 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
1231 return 0;
1232}
1233
1234void rtl88ee_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1235{
1236 struct rtl_priv *rtlpriv = rtl_priv(hw);
1237 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1238 u32 reg_rcr = rtlpci->receive_config;
1239
1240 if (rtlpriv->psc.rfpwr_state != ERFON)
1241 return;
1242
1243 if (check_bssid == true) {
1244 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1245 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1246 (u8 *)(&reg_rcr));
1247 _rtl88ee_set_bcn_ctrl_reg(hw, 0, BIT(4));
1248 } else if (check_bssid == false) {
1249 reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
1250 _rtl88ee_set_bcn_ctrl_reg(hw, BIT(4), 0);
1251 rtlpriv->cfg->ops->set_hw_reg(hw,
1252 HW_VAR_RCR, (u8 *)(&reg_rcr));
1253 }
1254}
1255
1256int rtl88ee_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
1257{
1258 struct rtl_priv *rtlpriv = rtl_priv(hw);
1259
1260 if (_rtl88ee_set_media_status(hw, type))
1261 return -EOPNOTSUPP;
1262
1263 if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
1264 if (type != NL80211_IFTYPE_AP &&
1265 type != NL80211_IFTYPE_MESH_POINT)
1266 rtl88ee_set_check_bssid(hw, true);
1267 } else {
1268 rtl88ee_set_check_bssid(hw, false);
1269 }
1270
1271 return 0;
1272}
1273
1274/* don't set REG_EDCA_BE_PARAM here because mac80211 will send pkt when scan */
1275void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci)
1276{
1277 struct rtl_priv *rtlpriv = rtl_priv(hw);
1278 rtl88e_dm_init_edca_turbo(hw);
1279 switch (aci) {
1280 case AC1_BK:
1281 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
1282 break;
1283 case AC0_BE:
1284 break;
1285 case AC2_VI:
1286 rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
1287 break;
1288 case AC3_VO:
1289 rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
1290 break;
1291 default:
1292 RT_ASSERT(false, "invalid aci: %d !\n", aci);
1293 break;
1294 }
1295}
1296
1297void rtl88ee_enable_interrupt(struct ieee80211_hw *hw)
1298{
1299 struct rtl_priv *rtlpriv = rtl_priv(hw);
1300 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1301
1302 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
1303 rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
1304 rtlpci->irq_enabled = true;
1305 /* there are some C2H CMDs have been sent before system interrupt
1306 * is enabled, e.g., C2H, CPWM.
1307 * So we need to clear all C2H events that FW has notified, otherwise
1308 * FW won't schedule any commands anymore.
1309 */
1310 rtl_write_byte(rtlpriv, REG_C2HEVT_CLEAR, 0);
1311 /*enable system interrupt*/
1312 rtl_write_dword(rtlpriv, REG_HSIMR, rtlpci->sys_irq_mask & 0xFFFFFFFF);
1313}
1314
1315void rtl88ee_disable_interrupt(struct ieee80211_hw *hw)
1316{
1317 struct rtl_priv *rtlpriv = rtl_priv(hw);
1318 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1319
1320 rtl_write_dword(rtlpriv, REG_HIMR, IMR_DISABLED);
1321 rtl_write_dword(rtlpriv, REG_HIMRE, IMR_DISABLED);
1322 rtlpci->irq_enabled = false;
1323 synchronize_irq(rtlpci->pdev->irq);
1324}
1325
1326static void _rtl88ee_poweroff_adapter(struct ieee80211_hw *hw)
1327{
1328 struct rtl_priv *rtlpriv = rtl_priv(hw);
1329 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1330 u8 u1b_tmp;
1331 u32 count = 0;
1332 rtlhal->mac_func_enable = false;
1333 rtlpriv->intf_ops->enable_aspm(hw);
1334
1335 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "POWER OFF adapter\n");
1336 u1b_tmp = rtl_read_byte(rtlpriv, REG_TX_RPT_CTRL);
1337 rtl_write_byte(rtlpriv, REG_TX_RPT_CTRL, u1b_tmp & (~BIT(1)));
1338
1339 u1b_tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
1340 while (!(u1b_tmp & BIT(1)) && (count++ < 100)) {
1341 udelay(10);
1342 u1b_tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
1343 count++;
1344 }
1345 rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+1, 0xFF);
1346
1347 rtl88_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
1348 PWR_INTF_PCI_MSK,
1349 Rtl8188E_NIC_LPS_ENTER_FLOW);
1350
1351 rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
1352
1353 if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->fw_ready)
1354 rtl88e_firmware_selfreset(hw);
1355
1356 u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
1357 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2))));
1358 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
1359
1360 u1b_tmp = rtl_read_byte(rtlpriv, REG_32K_CTRL);
1361 rtl_write_byte(rtlpriv, REG_32K_CTRL, (u1b_tmp & (~BIT(0))));
1362
1363 rtl88_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
1364 PWR_INTF_PCI_MSK, Rtl8188E_NIC_DISABLE_FLOW);
1365
1366 u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
1367 rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(3))));
1368 u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
1369 rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp | BIT(3)));
1370
1371 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0E);
1372
1373 u1b_tmp = rtl_read_byte(rtlpriv, GPIO_IN);
1374 rtl_write_byte(rtlpriv, GPIO_OUT, u1b_tmp);
1375 rtl_write_byte(rtlpriv, GPIO_IO_SEL, 0x7F);
1376
1377 u1b_tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
1378 rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL, (u1b_tmp << 4) | u1b_tmp);
1379 u1b_tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL+1);
1380 rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL+1, u1b_tmp | 0x0F);
1381
1382 rtl_write_dword(rtlpriv, REG_GPIO_IO_SEL_2+2, 0x00080808);
1383}
1384
1385void rtl88ee_card_disable(struct ieee80211_hw *hw)
1386{
1387 struct rtl_priv *rtlpriv = rtl_priv(hw);
1388 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1389 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1390 enum nl80211_iftype opmode;
1391
1392 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "RTL8188ee card disable\n");
1393
1394 mac->link_state = MAC80211_NOLINK;
1395 opmode = NL80211_IFTYPE_UNSPECIFIED;
1396
1397 _rtl88ee_set_media_status(hw, opmode);
1398
1399 if (rtlpriv->rtlhal.driver_is_goingto_unload ||
1400 ppsc->rfoff_reason > RF_CHANGE_BY_PS)
1401 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1402
1403 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1404 _rtl88ee_poweroff_adapter(hw);
1405
1406 /* after power off we should do iqk again */
1407 rtlpriv->phy.iqk_initialized = false;
1408}
1409
1410void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw,
1411 u32 *p_inta, u32 *p_intb)
1412{
1413 struct rtl_priv *rtlpriv = rtl_priv(hw);
1414 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1415
1416 *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
1417 rtl_write_dword(rtlpriv, ISR, *p_inta);
1418
1419 *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
1420 rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
1421}
1422
1423void rtl88ee_set_beacon_related_registers(struct ieee80211_hw *hw)
1424{
1425 struct rtl_priv *rtlpriv = rtl_priv(hw);
1426 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1427 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1428 u16 bcn_interval, atim_window;
1429
1430 bcn_interval = mac->beacon_interval;
1431 atim_window = 2; /*FIX MERGE */
1432 rtl88ee_disable_interrupt(hw);
1433 rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
1434 rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
1435 rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
1436 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18);
1437 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18);
1438 rtl_write_byte(rtlpriv, 0x606, 0x30);
1439 rtlpci->reg_bcn_ctrl_val |= BIT(3);
1440 rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
1441 /*rtl88ee_enable_interrupt(hw);*/
1442}
1443
1444void rtl88ee_set_beacon_interval(struct ieee80211_hw *hw)
1445{
1446 struct rtl_priv *rtlpriv = rtl_priv(hw);
1447 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1448 u16 bcn_interval = mac->beacon_interval;
1449
1450 RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
1451 "beacon_interval:%d\n", bcn_interval);
1452 /*rtl88ee_disable_interrupt(hw);*/
1453 rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
1454 /*rtl88ee_enable_interrupt(hw);*/
1455}
1456
1457void rtl88ee_update_interrupt_mask(struct ieee80211_hw *hw,
1458 u32 add_msr, u32 rm_msr)
1459{
1460 struct rtl_priv *rtlpriv = rtl_priv(hw);
1461 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1462
1463 RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
1464 "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
1465
1466 rtl88ee_disable_interrupt(hw);
1467 if (add_msr)
1468 rtlpci->irq_mask[0] |= add_msr;
1469 if (rm_msr)
1470 rtlpci->irq_mask[0] &= (~rm_msr);
1471 rtl88ee_enable_interrupt(hw);
1472}
1473
1474static inline u8 get_chnl_group(u8 chnl)
1475{
1476 u8 group;
1477
1478 group = chnl / 3;
1479 if (chnl == 14)
1480 group = 5;
1481
1482 return group;
1483}
1484
1485static void set_diff0_2g(struct txpower_info_2g *pwr2g, u8 *hwinfo, u32 path,
1486 u32 i, u32 eadr)
1487{
1488 pwr2g->bw40_diff[path][i] = 0;
1489 if (hwinfo[eadr] == 0xFF) {
1490 pwr2g->bw20_diff[path][i] = 0x02;
1491 } else {
1492 pwr2g->bw20_diff[path][i] = (hwinfo[eadr]&0xf0)>>4;
1493 /*bit sign number to 8 bit sign number*/
1494 if (pwr2g->bw20_diff[path][i] & BIT(3))
1495 pwr2g->bw20_diff[path][i] |= 0xF0;
1496 }
1497
1498 if (hwinfo[eadr] == 0xFF) {
1499 pwr2g->ofdm_diff[path][i] = 0x04;
1500 } else {
1501 pwr2g->ofdm_diff[path][i] = (hwinfo[eadr] & 0x0f);
1502 /*bit sign number to 8 bit sign number*/
1503 if (pwr2g->ofdm_diff[path][i] & BIT(3))
1504 pwr2g->ofdm_diff[path][i] |= 0xF0;
1505 }
1506 pwr2g->cck_diff[path][i] = 0;
1507}
1508
1509static void set_diff0_5g(struct txpower_info_5g *pwr5g, u8 *hwinfo, u32 path,
1510 u32 i, u32 eadr)
1511{
1512 pwr5g->bw40_diff[path][i] = 0;
1513 if (hwinfo[eadr] == 0xFF) {
1514 pwr5g->bw20_diff[path][i] = 0;
1515 } else {
1516 pwr5g->bw20_diff[path][i] = (hwinfo[eadr]&0xf0)>>4;
1517 /*bit sign number to 8 bit sign number*/
1518 if (pwr5g->bw20_diff[path][i] & BIT(3))
1519 pwr5g->bw20_diff[path][i] |= 0xF0;
1520 }
1521
1522 if (hwinfo[eadr] == 0xFF) {
1523 pwr5g->ofdm_diff[path][i] = 0x04;
1524 } else {
1525 pwr5g->ofdm_diff[path][i] = (hwinfo[eadr] & 0x0f);
1526 /*bit sign number to 8 bit sign number*/
1527 if (pwr5g->ofdm_diff[path][i] & BIT(3))
1528 pwr5g->ofdm_diff[path][i] |= 0xF0;
1529 }
1530}
1531
1532static void set_diff1_2g(struct txpower_info_2g *pwr2g, u8 *hwinfo, u32 path,
1533 u32 i, u32 eadr)
1534{
1535 if (hwinfo[eadr] == 0xFF) {
1536 pwr2g->bw40_diff[path][i] = 0xFE;
1537 } else {
1538 pwr2g->bw40_diff[path][i] = (hwinfo[eadr]&0xf0)>>4;
1539 if (pwr2g->bw40_diff[path][i] & BIT(3))
1540 pwr2g->bw40_diff[path][i] |= 0xF0;
1541 }
1542
1543 if (hwinfo[eadr] == 0xFF) {
1544 pwr2g->bw20_diff[path][i] = 0xFE;
1545 } else {
1546 pwr2g->bw20_diff[path][i] = (hwinfo[eadr]&0x0f);
1547 if (pwr2g->bw20_diff[path][i] & BIT(3))
1548 pwr2g->bw20_diff[path][i] |= 0xF0;
1549 }
1550}
1551
1552static void set_diff1_5g(struct txpower_info_5g *pwr5g, u8 *hwinfo, u32 path,
1553 u32 i, u32 eadr)
1554{
1555 if (hwinfo[eadr] == 0xFF) {
1556 pwr5g->bw40_diff[path][i] = 0xFE;
1557 } else {
1558 pwr5g->bw40_diff[path][i] = (hwinfo[eadr]&0xf0)>>4;
1559 if (pwr5g->bw40_diff[path][i] & BIT(3))
1560 pwr5g->bw40_diff[path][i] |= 0xF0;
1561 }
1562
1563 if (hwinfo[eadr] == 0xFF) {
1564 pwr5g->bw20_diff[path][i] = 0xFE;
1565 } else {
1566 pwr5g->bw20_diff[path][i] = (hwinfo[eadr] & 0x0f);
1567 if (pwr5g->bw20_diff[path][i] & BIT(3))
1568 pwr5g->bw20_diff[path][i] |= 0xF0;
1569 }
1570}
1571
1572static void set_diff2_2g(struct txpower_info_2g *pwr2g, u8 *hwinfo, u32 path,
1573 u32 i, u32 eadr)
1574{
1575 if (hwinfo[eadr] == 0xFF) {
1576 pwr2g->ofdm_diff[path][i] = 0xFE;
1577 } else {
1578 pwr2g->ofdm_diff[path][i] = (hwinfo[eadr]&0xf0)>>4;
1579 if (pwr2g->ofdm_diff[path][i] & BIT(3))
1580 pwr2g->ofdm_diff[path][i] |= 0xF0;
1581 }
1582
1583 if (hwinfo[eadr] == 0xFF) {
1584 pwr2g->cck_diff[path][i] = 0xFE;
1585 } else {
1586 pwr2g->cck_diff[path][i] = (hwinfo[eadr]&0x0f);
1587 if (pwr2g->cck_diff[path][i] & BIT(3))
1588 pwr2g->cck_diff[path][i] |= 0xF0;
1589 }
1590}
1591
1592static void _rtl8188e_read_power_value_fromprom(struct ieee80211_hw *hw,
1593 struct txpower_info_2g *pwr2g,
1594 struct txpower_info_5g *pwr5g,
1595 bool autoload_fail,
1596 u8 *hwinfo)
1597{
1598 struct rtl_priv *rtlpriv = rtl_priv(hw);
1599 u32 path, eadr = EEPROM_TX_PWR_INX, i;
1600
1601 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1602 "hal_ReadPowerValueFromPROM88E(): PROMContent[0x%x]= 0x%x\n",
1603 (eadr+1), hwinfo[eadr+1]);
1604 if (0xFF == hwinfo[eadr+1])
1605 autoload_fail = true;
1606
1607 if (autoload_fail) {
1608 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1609 "auto load fail : Use Default value!\n");
1610 for (path = 0; path < MAX_RF_PATH; path++) {
1611 /* 2.4G default value */
1612 for (i = 0; i < MAX_CHNL_GROUP_24G; i++) {
1613 pwr2g->index_cck_base[path][i] = 0x2D;
1614 pwr2g->index_bw40_base[path][i] = 0x2D;
1615 }
1616 for (i = 0; i < MAX_TX_COUNT; i++) {
1617 if (i == 0) {
1618 pwr2g->bw20_diff[path][0] = 0x02;
1619 pwr2g->ofdm_diff[path][0] = 0x04;
1620 } else {
1621 pwr2g->bw20_diff[path][i] = 0xFE;
1622 pwr2g->bw40_diff[path][i] = 0xFE;
1623 pwr2g->cck_diff[path][i] = 0xFE;
1624 pwr2g->ofdm_diff[path][i] = 0xFE;
1625 }
1626 }
1627 }
1628 return;
1629 }
1630
1631 for (path = 0; path < MAX_RF_PATH; path++) {
1632 /*2.4G default value*/
1633 for (i = 0; i < MAX_CHNL_GROUP_24G; i++) {
1634 pwr2g->index_cck_base[path][i] = hwinfo[eadr++];
1635 if (pwr2g->index_cck_base[path][i] == 0xFF)
1636 pwr2g->index_cck_base[path][i] = 0x2D;
1637 }
1638 for (i = 0; i < MAX_CHNL_GROUP_24G; i++) {
1639 pwr2g->index_bw40_base[path][i] = hwinfo[eadr++];
1640 if (pwr2g->index_bw40_base[path][i] == 0xFF)
1641 pwr2g->index_bw40_base[path][i] = 0x2D;
1642 }
1643 for (i = 0; i < MAX_TX_COUNT; i++) {
1644 if (i == 0) {
1645 set_diff0_2g(pwr2g, hwinfo, path, i, eadr);
1646 eadr++;
1647 } else {
1648 set_diff1_2g(pwr2g, hwinfo, path, i, eadr);
1649 eadr++;
1650
1651 set_diff2_2g(pwr2g, hwinfo, path, i, eadr);
1652 eadr++;
1653 }
1654 }
1655
1656 /*5G default value*/
1657 for (i = 0; i < MAX_CHNL_GROUP_5G; i++) {
1658 pwr5g->index_bw40_base[path][i] = hwinfo[eadr++];
1659 if (pwr5g->index_bw40_base[path][i] == 0xFF)
1660 pwr5g->index_bw40_base[path][i] = 0xFE;
1661 }
1662
1663 for (i = 0; i < MAX_TX_COUNT; i++) {
1664 if (i == 0) {
1665 set_diff0_5g(pwr5g, hwinfo, path, i, eadr);
1666 eadr++;
1667 } else {
1668 set_diff1_5g(pwr5g, hwinfo, path, i, eadr);
1669 eadr++;
1670 }
1671 }
1672
1673 if (hwinfo[eadr] == 0xFF) {
1674 pwr5g->ofdm_diff[path][1] = 0xFE;
1675 pwr5g->ofdm_diff[path][2] = 0xFE;
1676 } else {
1677 pwr5g->ofdm_diff[path][1] = (hwinfo[eadr] & 0xf0) >> 4;
1678 pwr5g->ofdm_diff[path][2] = (hwinfo[eadr] & 0x0f);
1679 }
1680 eadr++;
1681
1682 if (hwinfo[eadr] == 0xFF)
1683 pwr5g->ofdm_diff[path][3] = 0xFE;
1684 else
1685 pwr5g->ofdm_diff[path][3] = (hwinfo[eadr]&0x0f);
1686 eadr++;
1687
1688 for (i = 1; i < MAX_TX_COUNT; i++) {
1689 if (pwr5g->ofdm_diff[path][i] == 0xFF)
1690 pwr5g->ofdm_diff[path][i] = 0xFE;
1691 else if (pwr5g->ofdm_diff[path][i] & BIT(3))
1692 pwr5g->ofdm_diff[path][i] |= 0xF0;
1693 }
1694 }
1695}
1696
1697static void _rtl88ee_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1698 bool autoload_fail,
1699 u8 *hwinfo)
1700{
1701 struct rtl_priv *rtlpriv = rtl_priv(hw);
1702 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1703 struct txpower_info_2g pwrinfo24g;
1704 struct txpower_info_5g pwrinfo5g;
1705 u8 rf_path, index;
1706 u8 i;
1707 int jj = EEPROM_RF_BOARD_OPTION_88E;
1708 int kk = EEPROM_THERMAL_METER_88E;
1709
1710 _rtl8188e_read_power_value_fromprom(hw, &pwrinfo24g, &pwrinfo5g,
1711 autoload_fail, hwinfo);
1712
1713 for (rf_path = 0; rf_path < 2; rf_path++) {
1714 for (i = 0; i < 14; i++) {
1715 index = get_chnl_group(i+1);
1716
1717 rtlefuse->txpwrlevel_cck[rf_path][i] =
1718 pwrinfo24g.index_cck_base[rf_path][index];
1719 if (i == 13)
1720 rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
1721 pwrinfo24g.index_bw40_base[rf_path][4];
1722 else
1723 rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
1724 pwrinfo24g.index_bw40_base[rf_path][index];
1725 rtlefuse->txpwr_ht20diff[rf_path][i] =
1726 pwrinfo24g.bw20_diff[rf_path][0];
1727 rtlefuse->txpwr_legacyhtdiff[rf_path][i] =
1728 pwrinfo24g.ofdm_diff[rf_path][0];
1729 }
1730
1731 for (i = 0; i < 14; i++) {
1732 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1733 "RF(%d)-Ch(%d) [CCK / HT40_1S ] = "
1734 "[0x%x / 0x%x ]\n", rf_path, i,
1735 rtlefuse->txpwrlevel_cck[rf_path][i],
1736 rtlefuse->txpwrlevel_ht40_1s[rf_path][i]);
1737 }
1738 }
1739
1740 if (!autoload_fail)
1741 rtlefuse->eeprom_thermalmeter = hwinfo[kk];
1742 else
1743 rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
1744
1745 if (rtlefuse->eeprom_thermalmeter == 0xff || autoload_fail) {
1746 rtlefuse->apk_thermalmeterignore = true;
1747 rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
1748 }
1749
1750 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
1751 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1752 "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
1753
1754 if (!autoload_fail) {
1755 rtlefuse->eeprom_regulatory = hwinfo[jj] & 0x07;/*bit0~2*/
1756 if (hwinfo[jj] == 0xFF)
1757 rtlefuse->eeprom_regulatory = 0;
1758 } else {
1759 rtlefuse->eeprom_regulatory = 0;
1760 }
1761 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1762 "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
1763}
1764
1765static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
1766{
1767 struct rtl_priv *rtlpriv = rtl_priv(hw);
1768 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1769 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1770 struct rtl_pci_priv *rppriv = rtl_pcipriv(hw);
1771 u16 i, usvalue;
1772 u8 hwinfo[HWSET_MAX_SIZE];
1773 u16 eeprom_id;
1774 int jj = EEPROM_RF_BOARD_OPTION_88E;
1775 int kk = EEPROM_RF_FEATURE_OPTION_88E;
1776
1777 if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
1778 rtl_efuse_shadow_map_update(hw);
1779
1780 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
1781 HWSET_MAX_SIZE);
1782 } else if (rtlefuse->epromtype == EEPROM_93C46) {
1783 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1784 "RTL819X Not boot from eeprom, check it !!");
1785 }
1786
1787 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
1788 hwinfo, HWSET_MAX_SIZE);
1789
1790 eeprom_id = *((u16 *)&hwinfo[0]);
1791 if (eeprom_id != RTL8188E_EEPROM_ID) {
1792 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1793 "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
1794 rtlefuse->autoload_failflag = true;
1795 } else {
1796 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
1797 rtlefuse->autoload_failflag = false;
1798 }
1799
1800 if (rtlefuse->autoload_failflag == true)
1801 return;
1802 /*VID DID SVID SDID*/
1803 rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
1804 rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
1805 rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
1806 rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
1807 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1808 "EEPROMId = 0x%4x\n", eeprom_id);
1809 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1810 "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
1811 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1812 "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
1813 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1814 "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
1815 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1816 "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
1817 /*customer ID*/
1818 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
1819 if (rtlefuse->eeprom_oemid == 0xFF)
1820 rtlefuse->eeprom_oemid = 0;
1821
1822 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1823 "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
1824 /*EEPROM version*/
1825 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
1826 /*mac address*/
1827 for (i = 0; i < 6; i += 2) {
1828 usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
1829 *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
1830 }
1831
1832 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1833 "dev_addr: %pM\n", rtlefuse->dev_addr);
1834 /*channel plan */
1835 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
1836 /* set channel paln to world wide 13 */
1837 rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
1838 /*tx power*/
1839 _rtl88ee_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag,
1840 hwinfo);
1841 rtlefuse->txpwr_fromeprom = true;
1842
1843 rtl8188ee_read_bt_coexist_info_from_hwpg(hw,
1844 rtlefuse->autoload_failflag,
1845 hwinfo);
1846 /*board type*/
1847 rtlefuse->board_type = (((*(u8 *)&hwinfo[jj]) & 0xE0) >> 5);
1848 /*Wake on wlan*/
1849 rtlefuse->wowlan_enable = ((hwinfo[kk] & 0x40) >> 6);
1850 /*parse xtal*/
1851 rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_88E];
1852 if (hwinfo[EEPROM_XTAL_88E])
1853 rtlefuse->crystalcap = 0x20;
1854 /*antenna diversity*/
1855 rtlefuse->antenna_div_cfg = (hwinfo[jj] & 0x18) >> 3;
1856 if (hwinfo[jj] == 0xFF)
1857 rtlefuse->antenna_div_cfg = 0;
1858 if (rppriv->bt_coexist.eeprom_bt_coexist != 0 &&
1859 rppriv->bt_coexist.eeprom_bt_ant_num == ANT_X1)
1860 rtlefuse->antenna_div_cfg = 0;
1861
1862 rtlefuse->antenna_div_type = hwinfo[EEPROM_RF_ANTENNA_OPT_88E];
1863 if (rtlefuse->antenna_div_type == 0xFF)
1864 rtlefuse->antenna_div_type = 0x01;
1865 if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV ||
1866 rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)
1867 rtlefuse->antenna_div_cfg = 1;
1868
1869 if (rtlhal->oem_id == RT_CID_DEFAULT) {
1870 switch (rtlefuse->eeprom_oemid) {
1871 case EEPROM_CID_DEFAULT:
1872 if (rtlefuse->eeprom_did == 0x8179) {
1873 if (rtlefuse->eeprom_svid == 0x1025) {
1874 rtlhal->oem_id = RT_CID_819x_Acer;
1875 } else if ((rtlefuse->eeprom_svid == 0x10EC &&
1876 rtlefuse->eeprom_smid == 0x0179) ||
1877 (rtlefuse->eeprom_svid == 0x17AA &&
1878 rtlefuse->eeprom_smid == 0x0179)) {
1879 rtlhal->oem_id = RT_CID_819x_Lenovo;
1880 } else if (rtlefuse->eeprom_svid == 0x103c &&
1881 rtlefuse->eeprom_smid == 0x197d) {
1882 rtlhal->oem_id = RT_CID_819x_HP;
1883 } else {
1884 rtlhal->oem_id = RT_CID_DEFAULT;
1885 }
1886 } else {
1887 rtlhal->oem_id = RT_CID_DEFAULT;
1888 }
1889 break;
1890 case EEPROM_CID_TOSHIBA:
1891 rtlhal->oem_id = RT_CID_TOSHIBA;
1892 break;
1893 case EEPROM_CID_QMI:
1894 rtlhal->oem_id = RT_CID_819x_QMI;
1895 break;
1896 case EEPROM_CID_WHQL:
1897 default:
1898 rtlhal->oem_id = RT_CID_DEFAULT;
1899 break;
1900 }
1901 }
1902}
1903
1904static void _rtl88ee_hal_customized_behavior(struct ieee80211_hw *hw)
1905{
1906 struct rtl_priv *rtlpriv = rtl_priv(hw);
1907 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1908 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1909
1910 pcipriv->ledctl.led_opendrain = true;
1911
1912 switch (rtlhal->oem_id) {
1913 case RT_CID_819x_HP:
1914 pcipriv->ledctl.led_opendrain = true;
1915 break;
1916 case RT_CID_819x_Lenovo:
1917 case RT_CID_DEFAULT:
1918 case RT_CID_TOSHIBA:
1919 case RT_CID_CCX:
1920 case RT_CID_819x_Acer:
1921 case RT_CID_WHQL:
1922 default:
1923 break;
1924 }
1925 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1926 "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
1927}
1928
1929void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw)
1930{
1931 struct rtl_priv *rtlpriv = rtl_priv(hw);
1932 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1933 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1934 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1935 u8 tmp_u1b;
1936
1937 rtlhal->version = _rtl88ee_read_chip_version(hw);
1938 if (get_rf_type(rtlphy) == RF_1T1R) {
1939 rtlpriv->dm.rfpath_rxenable[0] = true;
1940 } else {
1941 rtlpriv->dm.rfpath_rxenable[0] = true;
1942 rtlpriv->dm.rfpath_rxenable[1] = true;
1943 }
1944 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
1945 rtlhal->version);
1946 tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
1947 if (tmp_u1b & BIT(4)) {
1948 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
1949 rtlefuse->epromtype = EEPROM_93C46;
1950 } else {
1951 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
1952 rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
1953 }
1954 if (tmp_u1b & BIT(5)) {
1955 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
1956 rtlefuse->autoload_failflag = false;
1957 _rtl88ee_read_adapter_info(hw);
1958 } else {
1959 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
1960 }
1961 _rtl88ee_hal_customized_behavior(hw);
1962}
1963
1964static void rtl88ee_update_hal_rate_table(struct ieee80211_hw *hw,
1965 struct ieee80211_sta *sta)
1966{
1967 struct rtl_priv *rtlpriv = rtl_priv(hw);
1968 struct rtl_pci_priv *rppriv = rtl_pcipriv(hw);
1969 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1970 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1971 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1972 u32 ratr_value;
1973 u8 ratr_index = 0;
1974 u8 nmode = mac->ht_enable;
1975 u8 mimo_ps = IEEE80211_SMPS_OFF;
1976 u16 shortgi_rate;
1977 u32 tmp_ratr_value;
1978 u8 ctx40 = mac->bw_40;
1979 u16 cap = sta->ht_cap.cap;
1980 u8 short40 = (cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
1981 u8 short20 = (cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0;
1982 enum wireless_mode wirelessmode = mac->mode;
1983
1984 if (rtlhal->current_bandtype == BAND_ON_5G)
1985 ratr_value = sta->supp_rates[1] << 4;
1986 else
1987 ratr_value = sta->supp_rates[0];
1988 if (mac->opmode == NL80211_IFTYPE_ADHOC)
1989 ratr_value = 0xfff;
1990 ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
1991 sta->ht_cap.mcs.rx_mask[0] << 12);
1992 switch (wirelessmode) {
1993 case WIRELESS_MODE_B:
1994 if (ratr_value & 0x0000000c)
1995 ratr_value &= 0x0000000d;
1996 else
1997 ratr_value &= 0x0000000f;
1998 break;
1999 case WIRELESS_MODE_G:
2000 ratr_value &= 0x00000FF5;
2001 break;
2002 case WIRELESS_MODE_N_24G:
2003 case WIRELESS_MODE_N_5G:
2004 nmode = 1;
2005 if (mimo_ps == IEEE80211_SMPS_STATIC) {
2006 ratr_value &= 0x0007F005;
2007 } else {
2008 u32 ratr_mask;
2009
2010 if (get_rf_type(rtlphy) == RF_1T2R ||
2011 get_rf_type(rtlphy) == RF_1T1R)
2012 ratr_mask = 0x000ff005;
2013 else
2014 ratr_mask = 0x0f0ff005;
2015
2016 ratr_value &= ratr_mask;
2017 }
2018 break;
2019 default:
2020 if (rtlphy->rf_type == RF_1T2R)
2021 ratr_value &= 0x000ff0ff;
2022 else
2023 ratr_value &= 0x0f0ff0ff;
2024
2025 break;
2026 }
2027
2028 if ((rppriv->bt_coexist.bt_coexistence) &&
2029 (rppriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) &&
2030 (rppriv->bt_coexist.bt_cur_state) &&
2031 (rppriv->bt_coexist.bt_ant_isolation) &&
2032 ((rppriv->bt_coexist.bt_service == BT_SCO) ||
2033 (rppriv->bt_coexist.bt_service == BT_BUSY)))
2034 ratr_value &= 0x0fffcfc0;
2035 else
2036 ratr_value &= 0x0FFFFFFF;
2037
2038 if (nmode && ((ctx40 && short40) ||
2039 (!ctx40 && short20))) {
2040 ratr_value |= 0x10000000;
2041 tmp_ratr_value = (ratr_value >> 12);
2042
2043 for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
2044 if ((1 << shortgi_rate) & tmp_ratr_value)
2045 break;
2046 }
2047
2048 shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
2049 (shortgi_rate << 4) | (shortgi_rate);
2050 }
2051
2052 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
2053
2054 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
2055 "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
2056}
2057
2058static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw,
2059 struct ieee80211_sta *sta, u8 rssi)
2060{
2061 struct rtl_priv *rtlpriv = rtl_priv(hw);
2062 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2063 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2064 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2065 struct rtl_sta_info *sta_entry = NULL;
2066 u32 ratr_bitmap;
2067 u8 ratr_index;
2068 u16 cap = sta->ht_cap.cap;
2069 u8 ctx40 = (cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0;
2070 u8 short40 = (cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
2071 u8 short20 = (cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0;
2072 enum wireless_mode wirelessmode = 0;
2073 bool shortgi = false;
2074 u8 rate_mask[5];
2075 u8 macid = 0;
2076 u8 mimo_ps = IEEE80211_SMPS_OFF;
2077
2078 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
2079 wirelessmode = sta_entry->wireless_mode;
2080 if (mac->opmode == NL80211_IFTYPE_STATION ||
2081 mac->opmode == NL80211_IFTYPE_MESH_POINT)
2082 ctx40 = mac->bw_40;
2083 else if (mac->opmode == NL80211_IFTYPE_AP ||
2084 mac->opmode == NL80211_IFTYPE_ADHOC)
2085 macid = sta->aid + 1;
2086
2087 if (rtlhal->current_bandtype == BAND_ON_5G)
2088 ratr_bitmap = sta->supp_rates[1] << 4;
2089 else
2090 ratr_bitmap = sta->supp_rates[0];
2091 if (mac->opmode == NL80211_IFTYPE_ADHOC)
2092 ratr_bitmap = 0xfff;
2093 ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
2094 sta->ht_cap.mcs.rx_mask[0] << 12);
2095 switch (wirelessmode) {
2096 case WIRELESS_MODE_B:
2097 ratr_index = RATR_INX_WIRELESS_B;
2098 if (ratr_bitmap & 0x0000000c)
2099 ratr_bitmap &= 0x0000000d;
2100 else
2101 ratr_bitmap &= 0x0000000f;
2102 break;
2103 case WIRELESS_MODE_G:
2104 ratr_index = RATR_INX_WIRELESS_GB;
2105
2106 if (rssi == 1)
2107 ratr_bitmap &= 0x00000f00;
2108 else if (rssi == 2)
2109 ratr_bitmap &= 0x00000ff0;
2110 else
2111 ratr_bitmap &= 0x00000ff5;
2112 break;
2113 case WIRELESS_MODE_A:
2114 ratr_index = RATR_INX_WIRELESS_A;
2115 ratr_bitmap &= 0x00000ff0;
2116 break;
2117 case WIRELESS_MODE_N_24G:
2118 case WIRELESS_MODE_N_5G:
2119 ratr_index = RATR_INX_WIRELESS_NGB;
2120
2121 if (mimo_ps == IEEE80211_SMPS_STATIC) {
2122 if (rssi == 1)
2123 ratr_bitmap &= 0x00070000;
2124 else if (rssi == 2)
2125 ratr_bitmap &= 0x0007f000;
2126 else
2127 ratr_bitmap &= 0x0007f005;
2128 } else {
2129 if (rtlphy->rf_type == RF_1T2R ||
2130 rtlphy->rf_type == RF_1T1R) {
2131 if (ctx40) {
2132 if (rssi == 1)
2133 ratr_bitmap &= 0x000f0000;
2134 else if (rssi == 2)
2135 ratr_bitmap &= 0x000ff000;
2136 else
2137 ratr_bitmap &= 0x000ff015;
2138 } else {
2139 if (rssi == 1)
2140 ratr_bitmap &= 0x000f0000;
2141 else if (rssi == 2)
2142 ratr_bitmap &= 0x000ff000;
2143 else
2144 ratr_bitmap &= 0x000ff005;
2145 }
2146 } else {
2147 if (ctx40) {
2148 if (rssi == 1)
2149 ratr_bitmap &= 0x0f8f0000;
2150 else if (rssi == 2)
2151 ratr_bitmap &= 0x0f8ff000;
2152 else
2153 ratr_bitmap &= 0x0f8ff015;
2154 } else {
2155 if (rssi == 1)
2156 ratr_bitmap &= 0x0f8f0000;
2157 else if (rssi == 2)
2158 ratr_bitmap &= 0x0f8ff000;
2159 else
2160 ratr_bitmap &= 0x0f8ff005;
2161 }
2162 }
2163 }
2164
2165 if ((ctx40 && short40) || (!ctx40 && short20)) {
2166 if (macid == 0)
2167 shortgi = true;
2168 else if (macid == 1)
2169 shortgi = false;
2170 }
2171 break;
2172 default:
2173 ratr_index = RATR_INX_WIRELESS_NGB;
2174
2175 if (rtlphy->rf_type == RF_1T2R)
2176 ratr_bitmap &= 0x000ff0ff;
2177 else
2178 ratr_bitmap &= 0x0f0ff0ff;
2179 break;
2180 }
2181 sta_entry->ratr_index = ratr_index;
2182
2183 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
2184 "ratr_bitmap :%x\n", ratr_bitmap);
2185 *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
2186 (ratr_index << 28);
2187 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
2188 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
2189 "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
2190 ratr_index, ratr_bitmap, rate_mask[0], rate_mask[1],
2191 rate_mask[2], rate_mask[3], rate_mask[4]);
2192 rtl88e_fill_h2c_cmd(hw, H2C_88E_RA_MASK, 5, rate_mask);
2193 _rtl88ee_set_bcn_ctrl_reg(hw, BIT(3), 0);
2194}
2195
2196void rtl88ee_update_hal_rate_tbl(struct ieee80211_hw *hw,
2197 struct ieee80211_sta *sta, u8 rssi)
2198{
2199 struct rtl_priv *rtlpriv = rtl_priv(hw);
2200
2201 if (rtlpriv->dm.useramask)
2202 rtl88ee_update_hal_rate_mask(hw, sta, rssi);
2203 else
2204 rtl88ee_update_hal_rate_table(hw, sta);
2205}
2206
2207void rtl88ee_update_channel_access_setting(struct ieee80211_hw *hw)
2208{
2209 struct rtl_priv *rtlpriv = rtl_priv(hw);
2210 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2211 u16 sifs_timer;
2212
2213 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
2214 (u8 *)&mac->slot_time);
2215 if (!mac->ht_enable)
2216 sifs_timer = 0x0a0a;
2217 else
2218 sifs_timer = 0x0e0e;
2219 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
2220}
2221
2222bool rtl88ee_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
2223{
2224 struct rtl_priv *rtlpriv = rtl_priv(hw);
2225 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
2226 enum rf_pwrstate state_toset;
2227 u32 u4tmp;
2228 bool actuallyset = false;
2229
2230 if (rtlpriv->rtlhal.being_init_adapter)
2231 return false;
2232
2233 if (ppsc->swrf_processing)
2234 return false;
2235
2236 spin_lock(&rtlpriv->locks.rf_ps_lock);
2237 if (ppsc->rfchange_inprogress) {
2238 spin_unlock(&rtlpriv->locks.rf_ps_lock);
2239 return false;
2240 } else {
2241 ppsc->rfchange_inprogress = true;
2242 spin_unlock(&rtlpriv->locks.rf_ps_lock);
2243 }
2244
2245 u4tmp = rtl_read_dword(rtlpriv, REG_GPIO_OUTPUT);
2246 state_toset = (u4tmp & BIT(31)) ? ERFON : ERFOFF;
2247
2248
2249 if ((ppsc->hwradiooff == true) && (state_toset == ERFON)) {
2250 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2251 "GPIOChangeRF - HW Radio ON, RF ON\n");
2252
2253 state_toset = ERFON;
2254 ppsc->hwradiooff = false;
2255 actuallyset = true;
2256 } else if ((ppsc->hwradiooff == false) && (state_toset == ERFOFF)) {
2257 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2258 "GPIOChangeRF - HW Radio OFF, RF OFF\n");
2259
2260 state_toset = ERFOFF;
2261 ppsc->hwradiooff = true;
2262 actuallyset = true;
2263 }
2264
2265 if (actuallyset) {
2266 spin_lock(&rtlpriv->locks.rf_ps_lock);
2267 ppsc->rfchange_inprogress = false;
2268 spin_unlock(&rtlpriv->locks.rf_ps_lock);
2269 } else {
2270 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
2271 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
2272
2273 spin_lock(&rtlpriv->locks.rf_ps_lock);
2274 ppsc->rfchange_inprogress = false;
2275 spin_unlock(&rtlpriv->locks.rf_ps_lock);
2276 }
2277
2278 *valid = 1;
2279 return !ppsc->hwradiooff;
2280}
2281
2282static void add_one_key(struct ieee80211_hw *hw, u8 *macaddr,
2283 struct rtl_mac *mac, u32 key, u32 id,
2284 u8 enc_algo, bool is_pairwise)
2285{
2286 struct rtl_priv *rtlpriv = rtl_priv(hw);
2287 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
2288
2289 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "add one entry\n");
2290 if (is_pairwise) {
2291 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "set Pairwise key\n");
2292
2293 rtl_cam_add_one_entry(hw, macaddr, key, id, enc_algo,
2294 CAM_CONFIG_NO_USEDK,
2295 rtlpriv->sec.key_buf[key]);
2296 } else {
2297 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "set group key\n");
2298
2299 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
2300 rtl_cam_add_one_entry(hw, rtlefuse->dev_addr,
2301 PAIRWISE_KEYIDX,
2302 CAM_PAIRWISE_KEY_POSITION,
2303 enc_algo,
2304 CAM_CONFIG_NO_USEDK,
2305 rtlpriv->sec.key_buf[id]);
2306 }
2307
2308 rtl_cam_add_one_entry(hw, macaddr, key, id, enc_algo,
2309 CAM_CONFIG_NO_USEDK,
2310 rtlpriv->sec.key_buf[id]);
2311 }
2312}
2313
2314void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key,
2315 u8 *mac_ad, bool is_group, u8 enc_algo,
2316 bool is_wepkey, bool clear_all)
2317{
2318 struct rtl_priv *rtlpriv = rtl_priv(hw);
2319 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2320 u8 *macaddr = mac_ad;
2321 u32 id = 0;
2322 bool is_pairwise = false;
2323
2324 static u8 cam_const_addr[4][6] = {
2325 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
2326 {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
2327 {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
2328 {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
2329 };
2330 static u8 cam_const_broad[] = {
2331 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2332 };
2333
2334 if (clear_all) {
2335 u8 idx = 0;
2336 u8 cam_offset = 0;
2337 u8 clear_number = 5;
2338
2339 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
2340
2341 for (idx = 0; idx < clear_number; idx++) {
2342 rtl_cam_mark_invalid(hw, cam_offset + idx);
2343 rtl_cam_empty_entry(hw, cam_offset + idx);
2344
2345 if (idx < 5) {
2346 memset(rtlpriv->sec.key_buf[idx], 0,
2347 MAX_KEY_LEN);
2348 rtlpriv->sec.key_len[idx] = 0;
2349 }
2350 }
2351
2352 } else {
2353 switch (enc_algo) {
2354 case WEP40_ENCRYPTION:
2355 enc_algo = CAM_WEP40;
2356 break;
2357 case WEP104_ENCRYPTION:
2358 enc_algo = CAM_WEP104;
2359 break;
2360 case TKIP_ENCRYPTION:
2361 enc_algo = CAM_TKIP;
2362 break;
2363 case AESCCMP_ENCRYPTION:
2364 enc_algo = CAM_AES;
2365 break;
2366 default:
2367 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2368 "switch case not processed\n");
2369 enc_algo = CAM_TKIP;
2370 break;
2371 }
2372
2373 if (is_wepkey || rtlpriv->sec.use_defaultkey) {
2374 macaddr = cam_const_addr[key];
2375 id = key;
2376 } else {
2377 if (is_group) {
2378 macaddr = cam_const_broad;
2379 id = key;
2380 } else {
2381 if (mac->opmode == NL80211_IFTYPE_AP ||
2382 mac->opmode == NL80211_IFTYPE_MESH_POINT) {
2383 id = rtl_cam_get_free_entry(hw, mac_ad);
2384 if (id >= TOTAL_CAM_ENTRY) {
2385 RT_TRACE(rtlpriv, COMP_SEC,
2386 DBG_EMERG,
2387 "Can not find free hw security cam entry\n");
2388 return;
2389 }
2390 } else {
2391 id = CAM_PAIRWISE_KEY_POSITION;
2392 }
2393
2394 key = PAIRWISE_KEYIDX;
2395 is_pairwise = true;
2396 }
2397 }
2398
2399 if (rtlpriv->sec.key_len[key] == 0) {
2400 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2401 "delete one entry, id is %d\n", id);
2402 if (mac->opmode == NL80211_IFTYPE_AP ||
2403 mac->opmode == NL80211_IFTYPE_MESH_POINT)
2404 rtl_cam_del_entry(hw, mac_ad);
2405 rtl_cam_delete_one_entry(hw, mac_ad, id);
2406 } else {
2407 add_one_key(hw, macaddr, mac, key, id, enc_algo,
2408 is_pairwise);
2409 }
2410 }
2411}
2412
2413static void rtl8188ee_bt_var_init(struct ieee80211_hw *hw)
2414{
2415 struct rtl_pci_priv *rppriv = rtl_pcipriv(hw);
2416 struct bt_coexist_info coexist = rppriv->bt_coexist;
2417
2418 coexist.bt_coexistence = rppriv->bt_coexist.eeprom_bt_coexist;
2419 coexist.bt_ant_num = coexist.eeprom_bt_ant_num;
2420 coexist.bt_coexist_type = coexist.eeprom_bt_type;
2421
2422 if (coexist.reg_bt_iso == 2)
2423 coexist.bt_ant_isolation = coexist.eeprom_bt_ant_isol;
2424 else
2425 coexist.bt_ant_isolation = coexist.reg_bt_iso;
2426
2427 coexist.bt_radio_shared_type = coexist.eeprom_bt_radio_shared;
2428
2429 if (coexist.bt_coexistence) {
2430 if (coexist.reg_bt_sco == 1)
2431 coexist.bt_service = BT_OTHER_ACTION;
2432 else if (coexist.reg_bt_sco == 2)
2433 coexist.bt_service = BT_SCO;
2434 else if (coexist.reg_bt_sco == 4)
2435 coexist.bt_service = BT_BUSY;
2436 else if (coexist.reg_bt_sco == 5)
2437 coexist.bt_service = BT_OTHERBUSY;
2438 else
2439 coexist.bt_service = BT_IDLE;
2440
2441 coexist.bt_edca_ul = 0;
2442 coexist.bt_edca_dl = 0;
2443 coexist.bt_rssi_state = 0xff;
2444 }
2445}
2446
2447void rtl8188ee_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
2448 bool auto_load_fail, u8 *hwinfo)
2449{
2450 rtl8188ee_bt_var_init(hw);
2451}
2452
2453void rtl8188ee_bt_reg_init(struct ieee80211_hw *hw)
2454{
2455 struct rtl_pci_priv *rppriv = rtl_pcipriv(hw);
2456
2457 /* 0:Low, 1:High, 2:From Efuse. */
2458 rppriv->bt_coexist.reg_bt_iso = 2;
2459 /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
2460 rppriv->bt_coexist.reg_bt_sco = 3;
2461 /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
2462 rppriv->bt_coexist.reg_bt_sco = 0;
2463}
2464
2465void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw)
2466{
2467 struct rtl_priv *rtlpriv = rtl_priv(hw);
2468 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2469 struct rtl_pci_priv *rppriv = rtl_pcipriv(hw);
2470 struct bt_coexist_info coexist = rppriv->bt_coexist;
2471 u8 u1_tmp;
2472
2473 if (coexist.bt_coexistence &&
2474 ((coexist.bt_coexist_type == BT_CSR_BC4) ||
2475 coexist.bt_coexist_type == BT_CSR_BC8)) {
2476 if (coexist.bt_ant_isolation)
2477 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
2478
2479 u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) &
2480 BIT_OFFSET_LEN_MASK_32(0, 1);
2481 u1_tmp = u1_tmp | ((coexist.bt_ant_isolation == 1) ?
2482 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
2483 ((coexist.bt_service == BT_SCO) ?
2484 0 : BIT_OFFSET_LEN_MASK_32(2, 1));
2485 rtl_write_byte(rtlpriv, 0x4fd, u1_tmp);
2486
2487 rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+4, 0xaaaa9aaa);
2488 rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+8, 0xffbd0040);
2489 rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+0xc, 0x40000010);
2490
2491 /* Config to 1T1R. */
2492 if (rtlphy->rf_type == RF_1T1R) {
2493 u1_tmp = rtl_read_byte(rtlpriv, ROFDM0_TRXPATHENABLE);
2494 u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
2495 rtl_write_byte(rtlpriv, ROFDM0_TRXPATHENABLE, u1_tmp);
2496
2497 u1_tmp = rtl_read_byte(rtlpriv, ROFDM1_TRXPATHENABLE);
2498 u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
2499 rtl_write_byte(rtlpriv, ROFDM1_TRXPATHENABLE, u1_tmp);
2500 }
2501 }
2502}
2503
2504void rtl88ee_suspend(struct ieee80211_hw *hw)
2505{
2506}
2507
2508void rtl88ee_resume(struct ieee80211_hw *hw)
2509{
2510}
2511
2512/* Turn on AAP (RCR:bit 0) for promicuous mode. */
2513void rtl88ee_allow_all_destaddr(struct ieee80211_hw *hw,
2514 bool allow_all_da, bool write_into_reg)
2515{
2516 struct rtl_priv *rtlpriv = rtl_priv(hw);
2517 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2518
2519 if (allow_all_da) /* Set BIT0 */
2520 rtlpci->receive_config |= RCR_AAP;
2521 else /* Clear BIT0 */
2522 rtlpci->receive_config &= ~RCR_AAP;
2523
2524 if (write_into_reg)
2525 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
2526
2527 RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
2528 "receive_config = 0x%08X, write_into_reg =%d\n",
2529 rtlpci->receive_config, write_into_reg);
2530}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
new file mode 100644
index 000000000000..b4460a41bd01
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
@@ -0,0 +1,68 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CE_HW_H__
31#define __RTL92CE_HW_H__
32
33void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
34void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw);
35void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw,
36 u32 *p_inta, u32 *p_intb);
37int rtl88ee_hw_init(struct ieee80211_hw *hw);
38void rtl88ee_card_disable(struct ieee80211_hw *hw);
39void rtl88ee_enable_interrupt(struct ieee80211_hw *hw);
40void rtl88ee_disable_interrupt(struct ieee80211_hw *hw);
41int rtl88ee_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
42void rtl88ee_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
43void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci);
44void rtl88ee_set_beacon_related_registers(struct ieee80211_hw *hw);
45void rtl88ee_set_beacon_interval(struct ieee80211_hw *hw);
46void rtl88ee_update_interrupt_mask(struct ieee80211_hw *hw,
47 u32 add_msr, u32 rm_msr);
48void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
49void rtl88ee_update_hal_rate_tbl(struct ieee80211_hw *hw,
50 struct ieee80211_sta *sta, u8 rssi_level);
51void rtl88ee_update_channel_access_setting(struct ieee80211_hw *hw);
52bool rtl88ee_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
53void rtl88ee_enable_hw_security_config(struct ieee80211_hw *hw);
54void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index,
55 u8 *p_macaddr, bool is_group, u8 enc_algo,
56 bool is_wepkey, bool clear_all);
57
58void rtl8188ee_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
59 bool autoload_fail, u8 *hwinfo);
60void rtl8188ee_bt_reg_init(struct ieee80211_hw *hw);
61void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw);
62void rtl88ee_suspend(struct ieee80211_hw *hw);
63void rtl88ee_resume(struct ieee80211_hw *hw);
64void rtl88ee_allow_all_destaddr(struct ieee80211_hw *hw,
65 bool allow_all_da, bool write_into_reg);
66void rtl88ee_fw_clk_off_timer_callback(unsigned long data);
67
68#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/rtlwifi/rtl8188ee/led.c
new file mode 100644
index 000000000000..c81a9cb6894c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/led.c
@@ -0,0 +1,157 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "reg.h"
33#include "led.h"
34
35static void rtl88ee_init_led(struct ieee80211_hw *hw,
36 struct rtl_led *pled, enum rtl_led_pin ledpin)
37{
38 pled->hw = hw;
39 pled->ledpin = ledpin;
40 pled->ledon = false;
41}
42
43void rtl88ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
44{
45 u8 ledcfg;
46 struct rtl_priv *rtlpriv = rtl_priv(hw);
47
48 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
49 "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin);
50
51 switch (pled->ledpin) {
52 case LED_PIN_GPIO0:
53 break;
54 case LED_PIN_LED0:
55 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
56 rtl_write_byte(rtlpriv, REG_LEDCFG2,
57 (ledcfg & 0xf0) | BIT(5) | BIT(6));
58 break;
59 case LED_PIN_LED1:
60 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
61 rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
62 break;
63 default:
64 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
65 "switch case not processed\n");
66 break;
67 }
68 pled->ledon = true;
69}
70
71void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
72{
73 struct rtl_priv *rtlpriv = rtl_priv(hw);
74 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
75 u8 ledcfg;
76 u8 val;
77
78 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
79 "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin);
80
81 switch (pled->ledpin) {
82 case LED_PIN_GPIO0:
83 break;
84 case LED_PIN_LED0:
85 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
86 ledcfg &= 0xf0;
87 val = ledcfg | BIT(3) | BIT(5) | BIT(6);
88 if (pcipriv->ledctl.led_opendrain == true) {
89 rtl_write_byte(rtlpriv, REG_LEDCFG2, val);
90 ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
91 val = ledcfg & 0xFE;
92 rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, val);
93 } else {
94 rtl_write_byte(rtlpriv, REG_LEDCFG2, val);
95 }
96 break;
97 case LED_PIN_LED1:
98 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
99 ledcfg &= 0x10;
100 rtl_write_byte(rtlpriv, REG_LEDCFG1, (ledcfg | BIT(3)));
101 break;
102 default:
103 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
104 "switch case not processed\n");
105 break;
106 }
107 pled->ledon = false;
108}
109
110void rtl88ee_init_sw_leds(struct ieee80211_hw *hw)
111{
112 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
113
114 rtl88ee_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
115 rtl88ee_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
116}
117
118static void rtl88ee_sw_led_control(struct ieee80211_hw *hw,
119 enum led_ctl_mode ledaction)
120{
121 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
122 struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
123
124 switch (ledaction) {
125 case LED_CTL_POWER_ON:
126 case LED_CTL_LINK:
127 case LED_CTL_NO_LINK:
128 rtl88ee_sw_led_on(hw, pLed0);
129 break;
130 case LED_CTL_POWER_OFF:
131 rtl88ee_sw_led_off(hw, pLed0);
132 break;
133 default:
134 break;
135 }
136}
137
138void rtl88ee_led_control(struct ieee80211_hw *hw,
139 enum led_ctl_mode ledaction)
140{
141 struct rtl_priv *rtlpriv = rtl_priv(hw);
142 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
143
144 if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
145 (ledaction == LED_CTL_TX ||
146 ledaction == LED_CTL_RX ||
147 ledaction == LED_CTL_SITE_SURVEY ||
148 ledaction == LED_CTL_LINK ||
149 ledaction == LED_CTL_NO_LINK ||
150 ledaction == LED_CTL_START_TO_LINK ||
151 ledaction == LED_CTL_POWER_ON)) {
152 return;
153 }
154 RT_TRACE(rtlpriv, COMP_LED, DBG_TRACE, "ledaction %d,\n",
155 ledaction);
156 rtl88ee_sw_led_control(hw, ledaction);
157}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/led.h b/drivers/net/wireless/rtlwifi/rtl8188ee/led.h
new file mode 100644
index 000000000000..4073f6f847b2
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/led.h
@@ -0,0 +1,38 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CE_LED_H__
31#define __RTL92CE_LED_H__
32
33void rtl88ee_init_sw_leds(struct ieee80211_hw *hw);
34void rtl88ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
35void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
36void rtl88ee_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
37
38#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
new file mode 100644
index 000000000000..e655c0473225
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
@@ -0,0 +1,2202 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "../ps.h"
33#include "reg.h"
34#include "def.h"
35#include "phy.h"
36#include "rf.h"
37#include "dm.h"
38#include "table.h"
39
40static void set_baseband_phy_config(struct ieee80211_hw *hw);
41static void set_baseband_agc_config(struct ieee80211_hw *hw);
42static void store_pwrindex_offset(struct ieee80211_hw *hw,
43 u32 regaddr, u32 bitmask,
44 u32 data);
45static bool check_cond(struct ieee80211_hw *hw, const u32 condition);
46
47static u32 rf_serial_read(struct ieee80211_hw *hw,
48 enum radio_path rfpath, u32 offset)
49{
50 struct rtl_priv *rtlpriv = rtl_priv(hw);
51 struct rtl_phy *rtlphy = &(rtlpriv->phy);
52 struct bb_reg_def *phreg = &rtlphy->phyreg_def[rfpath];
53 u32 newoffset;
54 u32 tmplong, tmplong2;
55 u8 rfpi_enable = 0;
56 u32 ret;
57 int jj = RF90_PATH_A;
58 int kk = RF90_PATH_B;
59
60 offset &= 0xff;
61 newoffset = offset;
62 if (RT_CANNOT_IO(hw)) {
63 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
64 return 0xFFFFFFFF;
65 }
66 tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
67 if (rfpath == jj)
68 tmplong2 = tmplong;
69 else
70 tmplong2 = rtl_get_bbreg(hw, phreg->rfhssi_para2, MASKDWORD);
71 tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
72 (newoffset << 23) | BLSSIREADEDGE;
73 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
74 tmplong & (~BLSSIREADEDGE));
75 mdelay(1);
76 rtl_set_bbreg(hw, phreg->rfhssi_para2, MASKDWORD, tmplong2);
77 mdelay(2);
78 if (rfpath == jj)
79 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
80 BIT(8));
81 else if (rfpath == kk)
82 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
83 BIT(8));
84 if (rfpi_enable)
85 ret = rtl_get_bbreg(hw, phreg->rf_rbpi, BLSSIREADBACKDATA);
86 else
87 ret = rtl_get_bbreg(hw, phreg->rf_rb, BLSSIREADBACKDATA);
88 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]= 0x%x\n",
89 rfpath, phreg->rf_rb, ret);
90 return ret;
91}
92
93static void rf_serial_write(struct ieee80211_hw *hw,
94 enum radio_path rfpath, u32 offset,
95 u32 data)
96{
97 u32 data_and_addr;
98 u32 newoffset;
99 struct rtl_priv *rtlpriv = rtl_priv(hw);
100 struct rtl_phy *rtlphy = &(rtlpriv->phy);
101 struct bb_reg_def *phreg = &rtlphy->phyreg_def[rfpath];
102
103 if (RT_CANNOT_IO(hw)) {
104 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
105 return;
106 }
107 offset &= 0xff;
108 newoffset = offset;
109 data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
110 rtl_set_bbreg(hw, phreg->rf3wire_offset, MASKDWORD, data_and_addr);
111 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]= 0x%x\n",
112 rfpath, phreg->rf3wire_offset, data_and_addr);
113}
114
115static u32 cal_bit_shift(u32 bitmask)
116{
117 u32 i;
118
119 for (i = 0; i <= 31; i++) {
120 if (((bitmask >> i) & 0x1) == 1)
121 break;
122 }
123 return i;
124}
125
126static bool config_bb_with_header(struct ieee80211_hw *hw,
127 u8 configtype)
128{
129 if (configtype == BASEBAND_CONFIG_PHY_REG)
130 set_baseband_phy_config(hw);
131 else if (configtype == BASEBAND_CONFIG_AGC_TAB)
132 set_baseband_agc_config(hw);
133 return true;
134}
135
136static bool config_bb_with_pgheader(struct ieee80211_hw *hw,
137 u8 configtype)
138{
139 struct rtl_priv *rtlpriv = rtl_priv(hw);
140 int i;
141 u32 *table_pg;
142 u16 tbl_page_len;
143 u32 v1 = 0, v2 = 0;
144
145 tbl_page_len = RTL8188EEPHY_REG_ARRAY_PGLEN;
146 table_pg = RTL8188EEPHY_REG_ARRAY_PG;
147
148 if (configtype == BASEBAND_CONFIG_PHY_REG) {
149 for (i = 0; i < tbl_page_len; i = i + 3) {
150 v1 = table_pg[i];
151 v2 = table_pg[i + 1];
152
153 if (v1 < 0xcdcdcdcd) {
154 if (table_pg[i] == 0xfe)
155 mdelay(50);
156 else if (table_pg[i] == 0xfd)
157 mdelay(5);
158 else if (table_pg[i] == 0xfc)
159 mdelay(1);
160 else if (table_pg[i] == 0xfb)
161 udelay(50);
162 else if (table_pg[i] == 0xfa)
163 udelay(5);
164 else if (table_pg[i] == 0xf9)
165 udelay(1);
166
167 store_pwrindex_offset(hw, table_pg[i],
168 table_pg[i + 1],
169 table_pg[i + 2]);
170 continue;
171 } else {
172 if (!check_cond(hw, table_pg[i])) {
173 /*don't need the hw_body*/
174 i += 2; /* skip the pair of expression*/
175 v1 = table_pg[i];
176 v2 = table_pg[i + 1];
177 while (v2 != 0xDEAD) {
178 i += 3;
179 v1 = table_pg[i];
180 v2 = table_pg[i + 1];
181 }
182 }
183 }
184 }
185 } else {
186 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
187 "configtype != BaseBand_Config_PHY_REG\n");
188 }
189 return true;
190}
191
192static bool config_parafile(struct ieee80211_hw *hw)
193{
194 struct rtl_priv *rtlpriv = rtl_priv(hw);
195 struct rtl_phy *rtlphy = &(rtlpriv->phy);
196 struct rtl_efuse *fuse = rtl_efuse(rtl_priv(hw));
197 bool rtstatus;
198
199 rtstatus = config_bb_with_header(hw, BASEBAND_CONFIG_PHY_REG);
200 if (rtstatus != true) {
201 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!");
202 return false;
203 }
204
205 if (fuse->autoload_failflag == false) {
206 rtlphy->pwrgroup_cnt = 0;
207 rtstatus = config_bb_with_pgheader(hw, BASEBAND_CONFIG_PHY_REG);
208 }
209 if (rtstatus != true) {
210 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!");
211 return false;
212 }
213 rtstatus = config_bb_with_header(hw, BASEBAND_CONFIG_AGC_TAB);
214 if (rtstatus != true) {
215 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
216 return false;
217 }
218 rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
219 RFPGA0_XA_HSSIPARAMETER2, 0x200));
220
221 return true;
222}
223
224static void rtl88e_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
225{
226 struct rtl_priv *rtlpriv = rtl_priv(hw);
227 struct rtl_phy *rtlphy = &(rtlpriv->phy);
228 int jj = RF90_PATH_A;
229 int kk = RF90_PATH_B;
230
231 rtlphy->phyreg_def[jj].rfintfs = RFPGA0_XAB_RFINTERFACESW;
232 rtlphy->phyreg_def[kk].rfintfs = RFPGA0_XAB_RFINTERFACESW;
233 rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
234 rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
235
236 rtlphy->phyreg_def[jj].rfintfi = RFPGA0_XAB_RFINTERFACERB;
237 rtlphy->phyreg_def[kk].rfintfi = RFPGA0_XAB_RFINTERFACERB;
238 rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
239 rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
240
241 rtlphy->phyreg_def[jj].rfintfo = RFPGA0_XA_RFINTERFACEOE;
242 rtlphy->phyreg_def[kk].rfintfo = RFPGA0_XB_RFINTERFACEOE;
243
244 rtlphy->phyreg_def[jj].rfintfe = RFPGA0_XA_RFINTERFACEOE;
245 rtlphy->phyreg_def[kk].rfintfe = RFPGA0_XB_RFINTERFACEOE;
246
247 rtlphy->phyreg_def[jj].rf3wire_offset = RFPGA0_XA_LSSIPARAMETER;
248 rtlphy->phyreg_def[kk].rf3wire_offset = RFPGA0_XB_LSSIPARAMETER;
249
250 rtlphy->phyreg_def[jj].rflssi_select = rFPGA0_XAB_RFPARAMETER;
251 rtlphy->phyreg_def[kk].rflssi_select = rFPGA0_XAB_RFPARAMETER;
252 rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
253 rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
254
255 rtlphy->phyreg_def[jj].rftxgain_stage = RFPGA0_TXGAINSTAGE;
256 rtlphy->phyreg_def[kk].rftxgain_stage = RFPGA0_TXGAINSTAGE;
257 rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
258 rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
259
260 rtlphy->phyreg_def[jj].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
261 rtlphy->phyreg_def[kk].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
262
263 rtlphy->phyreg_def[jj].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
264 rtlphy->phyreg_def[kk].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
265
266 rtlphy->phyreg_def[jj].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
267 rtlphy->phyreg_def[kk].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
268 rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
269 rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
270
271 rtlphy->phyreg_def[jj].rfagc_control1 = ROFDM0_XAAGCCORE1;
272 rtlphy->phyreg_def[kk].rfagc_control1 = ROFDM0_XBAGCCORE1;
273 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
274 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
275
276 rtlphy->phyreg_def[jj].rfagc_control2 = ROFDM0_XAAGCCORE2;
277 rtlphy->phyreg_def[kk].rfagc_control2 = ROFDM0_XBAGCCORE2;
278 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
279 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
280
281 rtlphy->phyreg_def[jj].rfrxiq_imbal = ROFDM0_XARXIQIMBAL;
282 rtlphy->phyreg_def[kk].rfrxiq_imbal = ROFDM0_XBRXIQIMBAL;
283 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBAL;
284 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBAL;
285
286 rtlphy->phyreg_def[jj].rfrx_afe = ROFDM0_XARXAFE;
287 rtlphy->phyreg_def[kk].rfrx_afe = ROFDM0_XBRXAFE;
288 rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
289 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
290
291 rtlphy->phyreg_def[jj].rftxiq_imbal = ROFDM0_XATXIQIMBAL;
292 rtlphy->phyreg_def[kk].rftxiq_imbal = ROFDM0_XBTXIQIMBAL;
293 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBAL;
294 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBAL;
295
296 rtlphy->phyreg_def[jj].rftx_afe = ROFDM0_XATXAFE;
297 rtlphy->phyreg_def[kk].rftx_afe = ROFDM0_XBTXAFE;
298
299 rtlphy->phyreg_def[jj].rf_rb = RFPGA0_XA_LSSIREADBACK;
300 rtlphy->phyreg_def[kk].rf_rb = RFPGA0_XB_LSSIREADBACK;
301
302 rtlphy->phyreg_def[jj].rf_rbpi = TRANSCEIVEA_HSPI_READBACK;
303 rtlphy->phyreg_def[kk].rf_rbpi = TRANSCEIVEB_HSPI_READBACK;
304}
305
306static bool rtl88e_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
307 u32 cmdtableidx, u32 cmdtablesz,
308 enum swchnlcmd_id cmdid,
309 u32 para1, u32 para2, u32 msdelay)
310{
311 struct swchnlcmd *pcmd;
312
313 if (cmdtable == NULL) {
314 RT_ASSERT(false, "cmdtable cannot be NULL.\n");
315 return false;
316 }
317
318 if (cmdtableidx >= cmdtablesz)
319 return false;
320
321 pcmd = cmdtable + cmdtableidx;
322 pcmd->cmdid = cmdid;
323 pcmd->para1 = para1;
324 pcmd->para2 = para2;
325 pcmd->msdelay = msdelay;
326 return true;
327}
328
329static bool chnl_step_by_step(struct ieee80211_hw *hw,
330 u8 channel, u8 *stage, u8 *step,
331 u32 *delay)
332{
333 struct rtl_priv *rtlpriv = rtl_priv(hw);
334 struct rtl_phy *rtlphy = &(rtlpriv->phy);
335 struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
336 u32 precommoncmdcnt;
337 struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
338 u32 postcommoncmdcnt;
339 struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
340 u32 rfdependcmdcnt;
341 struct swchnlcmd *currentcmd = NULL;
342 u8 rfpath;
343 u8 num_total_rfpath = rtlphy->num_total_rfpath;
344
345 precommoncmdcnt = 0;
346 rtl88e_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
347 MAX_PRECMD_CNT,
348 CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
349 rtl88e_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
350 MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
351
352 postcommoncmdcnt = 0;
353
354 rtl88e_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
355 MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
356
357 rfdependcmdcnt = 0;
358
359 RT_ASSERT((channel >= 1 && channel <= 14),
360 "illegal channel for Zebra: %d\n", channel);
361
362 rtl88e_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
363 MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
364 RF_CHNLBW, channel, 10);
365
366 rtl88e_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
367 MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0,
368 0);
369
370 do {
371 switch (*stage) {
372 case 0:
373 currentcmd = &precommoncmd[*step];
374 break;
375 case 1:
376 currentcmd = &rfdependcmd[*step];
377 break;
378 case 2:
379 currentcmd = &postcommoncmd[*step];
380 break;
381 }
382
383 if (currentcmd->cmdid == CMDID_END) {
384 if ((*stage) == 2) {
385 return true;
386 } else {
387 (*stage)++;
388 (*step) = 0;
389 continue;
390 }
391 }
392
393 switch (currentcmd->cmdid) {
394 case CMDID_SET_TXPOWEROWER_LEVEL:
395 rtl88e_phy_set_txpower_level(hw, channel);
396 break;
397 case CMDID_WRITEPORT_ULONG:
398 rtl_write_dword(rtlpriv, currentcmd->para1,
399 currentcmd->para2);
400 break;
401 case CMDID_WRITEPORT_USHORT:
402 rtl_write_word(rtlpriv, currentcmd->para1,
403 (u16) currentcmd->para2);
404 break;
405 case CMDID_WRITEPORT_UCHAR:
406 rtl_write_byte(rtlpriv, currentcmd->para1,
407 (u8) currentcmd->para2);
408 break;
409 case CMDID_RF_WRITEREG:
410 for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
411 rtlphy->rfreg_chnlval[rfpath] =
412 ((rtlphy->rfreg_chnlval[rfpath] &
413 0xfffffc00) | currentcmd->para2);
414
415 rtl_set_rfreg(hw, (enum radio_path)rfpath,
416 currentcmd->para1,
417 RFREG_OFFSET_MASK,
418 rtlphy->rfreg_chnlval[rfpath]);
419 }
420 break;
421 default:
422 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
423 "switch case not processed\n");
424 break;
425 }
426
427 break;
428 } while (true);
429
430 (*delay) = currentcmd->msdelay;
431 (*step)++;
432 return false;
433}
434
435static long rtl88e_pwr_idx_dbm(struct ieee80211_hw *hw,
436 enum wireless_mode wirelessmode,
437 u8 txpwridx)
438{
439 long offset;
440 long pwrout_dbm;
441
442 switch (wirelessmode) {
443 case WIRELESS_MODE_B:
444 offset = -7;
445 break;
446 case WIRELESS_MODE_G:
447 case WIRELESS_MODE_N_24G:
448 offset = -8;
449 break;
450 default:
451 offset = -8;
452 break;
453 }
454 pwrout_dbm = txpwridx / 2 + offset;
455 return pwrout_dbm;
456}
457
458static void rtl88e_phy_set_io(struct ieee80211_hw *hw)
459{
460 struct rtl_priv *rtlpriv = rtl_priv(hw);
461 struct rtl_phy *rtlphy = &(rtlpriv->phy);
462 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
463
464 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
465 "--->Cmd(%#x), set_io_inprogress(%d)\n",
466 rtlphy->current_io_type, rtlphy->set_io_inprogress);
467 switch (rtlphy->current_io_type) {
468 case IO_CMD_RESUME_DM_BY_SCAN:
469 dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
470 /*rtl92c_dm_write_dig(hw);*/
471 rtl88e_phy_set_txpower_level(hw, rtlphy->current_channel);
472 rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x83);
473 break;
474 case IO_CMD_PAUSE_DM_BY_SCAN:
475 rtlphy->initgain_backup.xaagccore1 = dm_digtable->cur_igvalue;
476 dm_digtable->cur_igvalue = 0x17;
477 rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x40);
478 break;
479 default:
480 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
481 "switch case not processed\n");
482 break;
483 }
484 rtlphy->set_io_inprogress = false;
485 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
486 "(%#x)\n", rtlphy->current_io_type);
487}
488
489u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
490{
491 struct rtl_priv *rtlpriv = rtl_priv(hw);
492 u32 returnvalue, originalvalue, bitshift;
493
494 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
495 "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
496 originalvalue = rtl_read_dword(rtlpriv, regaddr);
497 bitshift = cal_bit_shift(bitmask);
498 returnvalue = (originalvalue & bitmask) >> bitshift;
499
500 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
501 "BBR MASK = 0x%x Addr[0x%x]= 0x%x\n", bitmask,
502 regaddr, originalvalue);
503
504 return returnvalue;
505}
506
507void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
508 u32 regaddr, u32 bitmask, u32 data)
509{
510 struct rtl_priv *rtlpriv = rtl_priv(hw);
511 u32 originalvalue, bitshift;
512
513 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
514 "regaddr(%#x), bitmask(%#x),data(%#x)\n",
515 regaddr, bitmask, data);
516
517 if (bitmask != MASKDWORD) {
518 originalvalue = rtl_read_dword(rtlpriv, regaddr);
519 bitshift = cal_bit_shift(bitmask);
520 data = ((originalvalue & (~bitmask)) | (data << bitshift));
521 }
522
523 rtl_write_dword(rtlpriv, regaddr, data);
524
525 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
526 "regaddr(%#x), bitmask(%#x), data(%#x)\n",
527 regaddr, bitmask, data);
528}
529
530u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
531 enum radio_path rfpath, u32 regaddr, u32 bitmask)
532{
533 struct rtl_priv *rtlpriv = rtl_priv(hw);
534 u32 original_value, readback_value, bitshift;
535 unsigned long flags;
536
537 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
538 "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
539 regaddr, rfpath, bitmask);
540
541 spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
542
543
544 original_value = rf_serial_read(hw, rfpath, regaddr);
545 bitshift = cal_bit_shift(bitmask);
546 readback_value = (original_value & bitmask) >> bitshift;
547
548 spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
549
550 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
551 "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
552 regaddr, rfpath, bitmask, original_value);
553
554 return readback_value;
555}
556
557void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
558 enum radio_path rfpath,
559 u32 regaddr, u32 bitmask, u32 data)
560{
561 struct rtl_priv *rtlpriv = rtl_priv(hw);
562 u32 original_value, bitshift;
563 unsigned long flags;
564
565 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
566 "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
567 regaddr, bitmask, data, rfpath);
568
569 spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
570
571 if (bitmask != RFREG_OFFSET_MASK) {
572 original_value = rf_serial_read(hw, rfpath, regaddr);
573 bitshift = cal_bit_shift(bitmask);
574 data = ((original_value & (~bitmask)) |
575 (data << bitshift));
576 }
577
578 rf_serial_write(hw, rfpath, regaddr, data);
579
580
581 spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
582
583 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
584 "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
585 regaddr, bitmask, data, rfpath);
586}
587
588static bool config_mac_with_header(struct ieee80211_hw *hw)
589{
590 struct rtl_priv *rtlpriv = rtl_priv(hw);
591 u32 i;
592 u32 arraylength;
593 u32 *ptrarray;
594
595 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl8188EMACPHY_Array\n");
596 arraylength = RTL8188EEMAC_1T_ARRAYLEN;
597 ptrarray = RTL8188EEMAC_1T_ARRAY;
598 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
599 "Img:RTL8188EEMAC_1T_ARRAY LEN %d\n", arraylength);
600 for (i = 0; i < arraylength; i = i + 2)
601 rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
602 return true;
603}
604
605bool rtl88e_phy_mac_config(struct ieee80211_hw *hw)
606{
607 struct rtl_priv *rtlpriv = rtl_priv(hw);
608 bool rtstatus = config_mac_with_header(hw);
609
610 rtl_write_byte(rtlpriv, 0x04CA, 0x0B);
611 return rtstatus;
612}
613
614bool rtl88e_phy_bb_config(struct ieee80211_hw *hw)
615{
616 bool rtstatus = true;
617 struct rtl_priv *rtlpriv = rtl_priv(hw);
618 u16 regval;
619 u8 reg_hwparafile = 1;
620 u32 tmp;
621 rtl88e_phy_init_bb_rf_register_definition(hw);
622 regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
623 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN,
624 regval | BIT(13) | BIT(0) | BIT(1));
625
626 rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
627 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN,
628 FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE |
629 FEN_BB_GLB_RSTN | FEN_BBRSTB);
630 tmp = rtl_read_dword(rtlpriv, 0x4c);
631 rtl_write_dword(rtlpriv, 0x4c, tmp | BIT(23));
632 if (reg_hwparafile == 1)
633 rtstatus = config_parafile(hw);
634 return rtstatus;
635}
636
637bool rtl88e_phy_rf_config(struct ieee80211_hw *hw)
638{
639 return rtl88e_phy_rf6052_config(hw);
640}
641
642static bool check_cond(struct ieee80211_hw *hw,
643 const u32 condition)
644{
645 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
646 struct rtl_efuse *fuse = rtl_efuse(rtl_priv(hw));
647 u32 _board = fuse->board_type; /*need efuse define*/
648 u32 _interface = rtlhal->interface;
649 u32 _platform = 0x08;/*SupportPlatform */
650 u32 cond = condition;
651
652 if (condition == 0xCDCDCDCD)
653 return true;
654
655 cond = condition & 0xFF;
656 if ((_board & cond) == 0 && cond != 0x1F)
657 return false;
658
659 cond = condition & 0xFF00;
660 cond = cond >> 8;
661 if ((_interface & cond) == 0 && cond != 0x07)
662 return false;
663
664 cond = condition & 0xFF0000;
665 cond = cond >> 16;
666 if ((_platform & cond) == 0 && cond != 0x0F)
667 return false;
668 return true;
669}
670
671static void _rtl8188e_config_rf_reg(struct ieee80211_hw *hw,
672 u32 addr, u32 data, enum radio_path rfpath,
673 u32 regaddr)
674{
675 if (addr == 0xffe) {
676 mdelay(50);
677 } else if (addr == 0xfd) {
678 mdelay(5);
679 } else if (addr == 0xfc) {
680 mdelay(1);
681 } else if (addr == 0xfb) {
682 udelay(50);
683 } else if (addr == 0xfa) {
684 udelay(5);
685 } else if (addr == 0xf9) {
686 udelay(1);
687 } else {
688 rtl_set_rfreg(hw, rfpath, regaddr,
689 RFREG_OFFSET_MASK,
690 data);
691 udelay(1);
692 }
693}
694
695static void rtl88_config_s(struct ieee80211_hw *hw,
696 u32 addr, u32 data)
697{
698 u32 content = 0x1000; /*RF Content: radio_a_txt*/
699 u32 maskforphyset = (u32)(content & 0xE000);
700
701 _rtl8188e_config_rf_reg(hw, addr, data, RF90_PATH_A,
702 addr | maskforphyset);
703}
704
705static void _rtl8188e_config_bb_reg(struct ieee80211_hw *hw,
706 u32 addr, u32 data)
707{
708 if (addr == 0xfe) {
709 mdelay(50);
710 } else if (addr == 0xfd) {
711 mdelay(5);
712 } else if (addr == 0xfc) {
713 mdelay(1);
714 } else if (addr == 0xfb) {
715 udelay(50);
716 } else if (addr == 0xfa) {
717 udelay(5);
718 } else if (addr == 0xf9) {
719 udelay(1);
720 } else {
721 rtl_set_bbreg(hw, addr, MASKDWORD, data);
722 udelay(1);
723 }
724}
725
726
727#define NEXT_PAIR(v1, v2, i) \
728 do { \
729 i += 2; v1 = array_table[i]; \
730 v2 = array_table[i + 1]; \
731 } while (0)
732
733static void set_baseband_agc_config(struct ieee80211_hw *hw)
734{
735 int i;
736 u32 *array_table;
737 u16 arraylen;
738 struct rtl_priv *rtlpriv = rtl_priv(hw);
739 u32 v1 = 0, v2 = 0;
740
741 arraylen = RTL8188EEAGCTAB_1TARRAYLEN;
742 array_table = RTL8188EEAGCTAB_1TARRAY;
743
744 for (i = 0; i < arraylen; i += 2) {
745 v1 = array_table[i];
746 v2 = array_table[i + 1];
747 if (v1 < 0xCDCDCDCD) {
748 rtl_set_bbreg(hw, array_table[i], MASKDWORD,
749 array_table[i + 1]);
750 udelay(1);
751 continue;
752 } else {/*This line is the start line of branch.*/
753 if (!check_cond(hw, array_table[i])) {
754 /*Discard the following (offset, data) pairs*/
755 NEXT_PAIR(v1, v2, i);
756 while (v2 != 0xDEAD && v2 != 0xCDEF &&
757 v2 != 0xCDCD && i < arraylen - 2) {
758 NEXT_PAIR(v1, v2, i);
759 }
760 i -= 2; /* compensate for loop's += 2*/
761 } else {
762 /* Configure matched pairs and skip to end */
763 NEXT_PAIR(v1, v2, i);
764 while (v2 != 0xDEAD && v2 != 0xCDEF &&
765 v2 != 0xCDCD && i < arraylen - 2) {
766 rtl_set_bbreg(hw, array_table[i],
767 MASKDWORD,
768 array_table[i + 1]);
769 udelay(1);
770 NEXT_PAIR(v1, v2, i);
771 }
772
773 while (v2 != 0xDEAD && i < arraylen - 2)
774 NEXT_PAIR(v1, v2, i);
775 }
776 }
777 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
778 "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n",
779 array_table[i],
780 array_table[i + 1]);
781 }
782}
783
784static void set_baseband_phy_config(struct ieee80211_hw *hw)
785{
786 int i;
787 u32 *array_table;
788 u16 arraylen;
789 u32 v1 = 0, v2 = 0;
790
791 arraylen = RTL8188EEPHY_REG_1TARRAYLEN;
792 array_table = RTL8188EEPHY_REG_1TARRAY;
793
794 for (i = 0; i < arraylen; i += 2) {
795 v1 = array_table[i];
796 v2 = array_table[i + 1];
797 if (v1 < 0xcdcdcdcd) {
798 _rtl8188e_config_bb_reg(hw, v1, v2);
799 } else {/*This line is the start line of branch.*/
800 if (!check_cond(hw, array_table[i])) {
801 /*Discard the following (offset, data) pairs*/
802 NEXT_PAIR(v1, v2, i);
803 while (v2 != 0xDEAD &&
804 v2 != 0xCDEF &&
805 v2 != 0xCDCD && i < arraylen - 2)
806 NEXT_PAIR(v1, v2, i);
807 i -= 2; /* prevent from for-loop += 2*/
808 } else {
809 /* Configure matched pairs and skip to end */
810 NEXT_PAIR(v1, v2, i);
811 while (v2 != 0xDEAD &&
812 v2 != 0xCDEF &&
813 v2 != 0xCDCD && i < arraylen - 2) {
814 _rtl8188e_config_bb_reg(hw, v1, v2);
815 NEXT_PAIR(v1, v2, i);
816 }
817
818 while (v2 != 0xDEAD && i < arraylen - 2)
819 NEXT_PAIR(v1, v2, i);
820 }
821 }
822 }
823}
824
825static void store_pwrindex_offset(struct ieee80211_hw *hw,
826 u32 regaddr, u32 bitmask,
827 u32 data)
828{
829 struct rtl_priv *rtlpriv = rtl_priv(hw);
830 struct rtl_phy *rtlphy = &(rtlpriv->phy);
831
832 if (regaddr == RTXAGC_A_RATE18_06) {
833 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][0] = data;
834 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
835 "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
836 rtlphy->pwrgroup_cnt,
837 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][0]);
838 }
839 if (regaddr == RTXAGC_A_RATE54_24) {
840 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][1] = data;
841 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
842 "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
843 rtlphy->pwrgroup_cnt,
844 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][1]);
845 }
846 if (regaddr == RTXAGC_A_CCK1_MCS32) {
847 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][6] = data;
848 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
849 "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
850 rtlphy->pwrgroup_cnt,
851 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][6]);
852 }
853 if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
854 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][7] = data;
855 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
856 "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
857 rtlphy->pwrgroup_cnt,
858 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][7]);
859 }
860 if (regaddr == RTXAGC_A_MCS03_MCS00) {
861 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][2] = data;
862 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
863 "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
864 rtlphy->pwrgroup_cnt,
865 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][2]);
866 }
867 if (regaddr == RTXAGC_A_MCS07_MCS04) {
868 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][3] = data;
869 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
870 "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
871 rtlphy->pwrgroup_cnt,
872 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][3]);
873 }
874 if (regaddr == RTXAGC_A_MCS11_MCS08) {
875 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][4] = data;
876 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
877 "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
878 rtlphy->pwrgroup_cnt,
879 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][4]);
880 }
881 if (regaddr == RTXAGC_A_MCS15_MCS12) {
882 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][5] = data;
883 if (get_rf_type(rtlphy) == RF_1T1R)
884 rtlphy->pwrgroup_cnt++;
885 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
886 "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
887 rtlphy->pwrgroup_cnt,
888 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][5]);
889 }
890 if (regaddr == RTXAGC_B_RATE18_06) {
891 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][8] = data;
892 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
893 "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
894 rtlphy->pwrgroup_cnt,
895 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][8]);
896 }
897 if (regaddr == RTXAGC_B_RATE54_24) {
898 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][9] = data;
899 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
900 "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
901 rtlphy->pwrgroup_cnt,
902 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][9]);
903 }
904 if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
905 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][14] = data;
906 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
907 "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
908 rtlphy->pwrgroup_cnt,
909 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][14]);
910 }
911 if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
912 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][15] = data;
913 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
914 "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
915 rtlphy->pwrgroup_cnt,
916 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][15]);
917 }
918 if (regaddr == RTXAGC_B_MCS03_MCS00) {
919 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][10] = data;
920 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
921 "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
922 rtlphy->pwrgroup_cnt,
923 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][10]);
924 }
925 if (regaddr == RTXAGC_B_MCS07_MCS04) {
926 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][11] = data;
927 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
928 "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
929 rtlphy->pwrgroup_cnt,
930 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][11]);
931 }
932 if (regaddr == RTXAGC_B_MCS11_MCS08) {
933 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][12] = data;
934 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
935 "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
936 rtlphy->pwrgroup_cnt,
937 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][12]);
938 }
939 if (regaddr == RTXAGC_B_MCS15_MCS12) {
940 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][13] = data;
941 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
942 "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
943 rtlphy->pwrgroup_cnt,
944 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][13]);
945 if (get_rf_type(rtlphy) != RF_1T1R)
946 rtlphy->pwrgroup_cnt++;
947 }
948}
949
950#define READ_NEXT_RF_PAIR(v1, v2, i) \
951 do { \
952 i += 2; v1 = a_table[i]; \
953 v2 = a_table[i + 1]; \
954 } while (0)
955
956bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
957 enum radio_path rfpath)
958{
959 int i;
960 u32 *a_table;
961 u16 a_len;
962 struct rtl_priv *rtlpriv = rtl_priv(hw);
963 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
964 u32 v1 = 0, v2 = 0;
965
966 a_len = RTL8188EE_RADIOA_1TARRAYLEN;
967 a_table = RTL8188EE_RADIOA_1TARRAY;
968 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
969 "Radio_A:RTL8188EE_RADIOA_1TARRAY %d\n", a_len);
970 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
971 switch (rfpath) {
972 case RF90_PATH_A:
973 for (i = 0; i < a_len; i = i + 2) {
974 v1 = a_table[i];
975 v2 = a_table[i + 1];
976 if (v1 < 0xcdcdcdcd) {
977 rtl88_config_s(hw, v1, v2);
978 } else {/*This line is the start line of branch.*/
979 if (!check_cond(hw, a_table[i])) {
980 /* Discard the following (offset, data)
981 * pairs
982 */
983 READ_NEXT_RF_PAIR(v1, v2, i);
984 while (v2 != 0xDEAD && v2 != 0xCDEF &&
985 v2 != 0xCDCD && i < a_len - 2)
986 READ_NEXT_RF_PAIR(v1, v2, i);
987 i -= 2; /* prevent from for-loop += 2*/
988 } else {
989 /* Configure matched pairs and skip to
990 * end of if-else.
991 */
992 READ_NEXT_RF_PAIR(v1, v2, i);
993 while (v2 != 0xDEAD && v2 != 0xCDEF &&
994 v2 != 0xCDCD && i < a_len - 2) {
995 rtl88_config_s(hw, v1, v2);
996 READ_NEXT_RF_PAIR(v1, v2, i);
997 }
998
999 while (v2 != 0xDEAD && i < a_len - 2)
1000 READ_NEXT_RF_PAIR(v1, v2, i);
1001 }
1002 }
1003 }
1004
1005 if (rtlhal->oem_id == RT_CID_819x_HP)
1006 rtl88_config_s(hw, 0x52, 0x7E4BD);
1007
1008 break;
1009
1010 case RF90_PATH_B:
1011 case RF90_PATH_C:
1012 case RF90_PATH_D:
1013 default:
1014 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1015 "switch case not processed\n");
1016 break;
1017 }
1018 return true;
1019}
1020
1021void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
1022{
1023 struct rtl_priv *rtlpriv = rtl_priv(hw);
1024 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1025
1026 rtlphy->default_initialgain[0] = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1,
1027 MASKBYTE0);
1028 rtlphy->default_initialgain[1] = rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1,
1029 MASKBYTE0);
1030 rtlphy->default_initialgain[2] = rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1,
1031 MASKBYTE0);
1032 rtlphy->default_initialgain[3] = rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1,
1033 MASKBYTE0);
1034
1035 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1036 "Default initial gain (c50 = 0x%x, c58 = 0x%x, c60 = 0x%x, c68 = 0x%x\n",
1037 rtlphy->default_initialgain[0],
1038 rtlphy->default_initialgain[1],
1039 rtlphy->default_initialgain[2],
1040 rtlphy->default_initialgain[3]);
1041
1042 rtlphy->framesync = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
1043 MASKBYTE0);
1044 rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
1045 MASKDWORD);
1046
1047 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1048 "Default framesync (0x%x) = 0x%x\n",
1049 ROFDM0_RXDETECTOR3, rtlphy->framesync);
1050}
1051
1052void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
1053{
1054 struct rtl_priv *rtlpriv = rtl_priv(hw);
1055 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1056 u8 level;
1057 long dbm;
1058
1059 level = rtlphy->cur_cck_txpwridx;
1060 dbm = rtl88e_pwr_idx_dbm(hw, WIRELESS_MODE_B, level);
1061 level = rtlphy->cur_ofdm24g_txpwridx;
1062 if (rtl88e_pwr_idx_dbm(hw, WIRELESS_MODE_G, level) > dbm)
1063 dbm = rtl88e_pwr_idx_dbm(hw, WIRELESS_MODE_G, level);
1064 level = rtlphy->cur_ofdm24g_txpwridx;
1065 if (rtl88e_pwr_idx_dbm(hw, WIRELESS_MODE_N_24G, level) > dbm)
1066 dbm = rtl88e_pwr_idx_dbm(hw, WIRELESS_MODE_N_24G, level);
1067 *powerlevel = dbm;
1068}
1069
1070static void _rtl88e_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
1071 u8 *cckpower, u8 *ofdm, u8 *bw20_pwr,
1072 u8 *bw40_pwr)
1073{
1074 struct rtl_efuse *fuse = rtl_efuse(rtl_priv(hw));
1075 u8 i = (channel - 1);
1076 u8 rf_path = 0;
1077 int jj = RF90_PATH_A;
1078 int kk = RF90_PATH_B;
1079
1080 for (rf_path = 0; rf_path < 2; rf_path++) {
1081 if (rf_path == jj) {
1082 cckpower[jj] = fuse->txpwrlevel_cck[jj][i];
1083 if (fuse->txpwr_ht20diff[jj][i] > 0x0f) /*-8~7 */
1084 bw20_pwr[jj] = fuse->txpwrlevel_ht40_1s[jj][i] -
1085 (~(fuse->txpwr_ht20diff[jj][i]) + 1);
1086 else
1087 bw20_pwr[jj] = fuse->txpwrlevel_ht40_1s[jj][i] +
1088 fuse->txpwr_ht20diff[jj][i];
1089 if (fuse->txpwr_legacyhtdiff[jj][i] > 0xf)
1090 ofdm[jj] = fuse->txpwrlevel_ht40_1s[jj][i] -
1091 (~(fuse->txpwr_legacyhtdiff[jj][i])+1);
1092 else
1093 ofdm[jj] = fuse->txpwrlevel_ht40_1s[jj][i] +
1094 fuse->txpwr_legacyhtdiff[jj][i];
1095 bw40_pwr[jj] = fuse->txpwrlevel_ht40_1s[jj][i];
1096
1097 } else if (rf_path == kk) {
1098 cckpower[kk] = fuse->txpwrlevel_cck[kk][i];
1099 bw20_pwr[kk] = fuse->txpwrlevel_ht40_1s[kk][i] +
1100 fuse->txpwr_ht20diff[kk][i];
1101 ofdm[kk] = fuse->txpwrlevel_ht40_1s[kk][i] +
1102 fuse->txpwr_legacyhtdiff[kk][i];
1103 bw40_pwr[kk] = fuse->txpwrlevel_ht40_1s[kk][i];
1104 }
1105 }
1106}
1107
1108static void _rtl88e_ccxpower_index_check(struct ieee80211_hw *hw,
1109 u8 channel, u8 *cckpower,
1110 u8 *ofdm, u8 *bw20_pwr,
1111 u8 *bw40_pwr)
1112{
1113 struct rtl_priv *rtlpriv = rtl_priv(hw);
1114 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1115
1116 rtlphy->cur_cck_txpwridx = cckpower[0];
1117 rtlphy->cur_ofdm24g_txpwridx = ofdm[0];
1118 rtlphy->cur_bw20_txpwridx = bw20_pwr[0];
1119 rtlphy->cur_bw40_txpwridx = bw40_pwr[0];
1120}
1121
1122void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
1123{
1124 struct rtl_efuse *fuse = rtl_efuse(rtl_priv(hw));
1125 u8 cckpower[MAX_TX_COUNT] = {0}, ofdm[MAX_TX_COUNT] = {0};
1126 u8 bw20_pwr[MAX_TX_COUNT] = {0}, bw40_pwr[MAX_TX_COUNT] = {0};
1127
1128 if (fuse->txpwr_fromeprom == false)
1129 return;
1130 _rtl88e_get_txpower_index(hw, channel, &cckpower[0], &ofdm[0],
1131 &bw20_pwr[0], &bw40_pwr[0]);
1132 _rtl88e_ccxpower_index_check(hw, channel, &cckpower[0], &ofdm[0],
1133 &bw20_pwr[0], &bw40_pwr[0]);
1134 rtl88e_phy_rf6052_set_cck_txpower(hw, &cckpower[0]);
1135 rtl88e_phy_rf6052_set_ofdm_txpower(hw, &ofdm[0], &bw20_pwr[0],
1136 &bw40_pwr[0], channel);
1137}
1138
1139void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
1140{
1141 struct rtl_priv *rtlpriv = rtl_priv(hw);
1142 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1143 enum io_type iotype;
1144
1145 if (!is_hal_stop(rtlhal)) {
1146 switch (operation) {
1147 case SCAN_OPT_BACKUP:
1148 iotype = IO_CMD_PAUSE_DM_BY_SCAN;
1149 rtlpriv->cfg->ops->set_hw_reg(hw,
1150 HW_VAR_IO_CMD,
1151 (u8 *)&iotype);
1152 break;
1153 case SCAN_OPT_RESTORE:
1154 iotype = IO_CMD_RESUME_DM_BY_SCAN;
1155 rtlpriv->cfg->ops->set_hw_reg(hw,
1156 HW_VAR_IO_CMD,
1157 (u8 *)&iotype);
1158 break;
1159 default:
1160 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1161 "Unknown Scan Backup operation.\n");
1162 break;
1163 }
1164 }
1165}
1166
1167void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
1168{
1169 struct rtl_priv *rtlpriv = rtl_priv(hw);
1170 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1171 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1172 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1173 u8 reg_bw_opmode;
1174 u8 reg_prsr_rsc;
1175
1176 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
1177 "Switch to %s bandwidth\n",
1178 rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
1179 "20MHz" : "40MHz");
1180
1181 if (is_hal_stop(rtlhal)) {
1182 rtlphy->set_bwmode_inprogress = false;
1183 return;
1184 }
1185
1186 reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
1187 reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
1188
1189 switch (rtlphy->current_chan_bw) {
1190 case HT_CHANNEL_WIDTH_20:
1191 reg_bw_opmode |= BW_OPMODE_20MHZ;
1192 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
1193 break;
1194 case HT_CHANNEL_WIDTH_20_40:
1195 reg_bw_opmode &= ~BW_OPMODE_20MHZ;
1196 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
1197 reg_prsr_rsc =
1198 (reg_prsr_rsc & 0x90) | (mac->cur_40_prime_sc << 5);
1199 rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
1200 break;
1201 default:
1202 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1203 "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
1204 break;
1205 }
1206
1207 switch (rtlphy->current_chan_bw) {
1208 case HT_CHANNEL_WIDTH_20:
1209 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
1210 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
1211 /* rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);*/
1212 break;
1213 case HT_CHANNEL_WIDTH_20_40:
1214 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
1215 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
1216
1217 rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
1218 (mac->cur_40_prime_sc >> 1));
1219 rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
1220 /*rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0);*/
1221
1222 rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
1223 (mac->cur_40_prime_sc ==
1224 HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
1225 break;
1226 default:
1227 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1228 "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
1229 break;
1230 }
1231 rtl88e_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
1232 rtlphy->set_bwmode_inprogress = false;
1233 RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
1234}
1235
1236void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
1237 enum nl80211_channel_type ch_type)
1238{
1239 struct rtl_priv *rtlpriv = rtl_priv(hw);
1240 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1241 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1242 u8 tmp_bw = rtlphy->current_chan_bw;
1243
1244 if (rtlphy->set_bwmode_inprogress)
1245 return;
1246 rtlphy->set_bwmode_inprogress = true;
1247 if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
1248 rtl88e_phy_set_bw_mode_callback(hw);
1249 } else {
1250 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1251 "FALSE driver sleep or unload\n");
1252 rtlphy->set_bwmode_inprogress = false;
1253 rtlphy->current_chan_bw = tmp_bw;
1254 }
1255}
1256
1257void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw)
1258{
1259 struct rtl_priv *rtlpriv = rtl_priv(hw);
1260 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1261 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1262 u32 delay;
1263
1264 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
1265 "switch to channel%d\n", rtlphy->current_channel);
1266 if (is_hal_stop(rtlhal))
1267 return;
1268 do {
1269 if (!rtlphy->sw_chnl_inprogress)
1270 break;
1271 if (!chnl_step_by_step(hw, rtlphy->current_channel,
1272 &rtlphy->sw_chnl_stage,
1273 &rtlphy->sw_chnl_step, &delay)) {
1274 if (delay > 0)
1275 mdelay(delay);
1276 else
1277 continue;
1278 } else {
1279 rtlphy->sw_chnl_inprogress = false;
1280 }
1281 break;
1282 } while (true);
1283 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
1284}
1285
1286u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw)
1287{
1288 struct rtl_priv *rtlpriv = rtl_priv(hw);
1289 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1290 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1291
1292 if (rtlphy->sw_chnl_inprogress)
1293 return 0;
1294 if (rtlphy->set_bwmode_inprogress)
1295 return 0;
1296 RT_ASSERT((rtlphy->current_channel <= 14),
1297 "WIRELESS_MODE_G but channel>14");
1298 rtlphy->sw_chnl_inprogress = true;
1299 rtlphy->sw_chnl_stage = 0;
1300 rtlphy->sw_chnl_step = 0;
1301 if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
1302 rtl88e_phy_sw_chnl_callback(hw);
1303 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
1304 "sw_chnl_inprogress false schdule workitem current channel %d\n",
1305 rtlphy->current_channel);
1306 rtlphy->sw_chnl_inprogress = false;
1307 } else {
1308 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
1309 "sw_chnl_inprogress false driver sleep or unload\n");
1310 rtlphy->sw_chnl_inprogress = false;
1311 }
1312 return 1;
1313}
1314
1315static u8 _rtl88e_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
1316{
1317 u32 reg_eac, reg_e94, reg_e9c;
1318 u8 result = 0x00;
1319
1320 rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1c);
1321 rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x30008c1c);
1322 rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x8214032a);
1323 rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x28160000);
1324
1325 rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
1326 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
1327 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
1328
1329 mdelay(IQK_DELAY_TIME);
1330
1331 reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1332 reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
1333 reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
1334
1335 if (!(reg_eac & BIT(28)) &&
1336 (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
1337 (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
1338 result |= 0x01;
1339 return result;
1340}
1341
1342static u8 _rtl88e_phy_path_b_iqk(struct ieee80211_hw *hw)
1343{
1344 u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
1345 u8 result = 0x00;
1346
1347 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
1348 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
1349 mdelay(IQK_DELAY_TIME);
1350 reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1351 reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
1352 reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
1353 reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
1354 reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
1355
1356 if (!(reg_eac & BIT(31)) &&
1357 (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
1358 (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
1359 result |= 0x01;
1360 else
1361 return result;
1362 if (!(reg_eac & BIT(30)) &&
1363 (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
1364 (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
1365 result |= 0x02;
1366 return result;
1367}
1368
1369static u8 _rtl88e_phy_path_a_rx_iqk(struct ieee80211_hw *hw, bool config_pathb)
1370{
1371 u32 reg_eac, reg_e94, reg_e9c, reg_ea4, u32temp;
1372 u8 result = 0x00;
1373 int jj = RF90_PATH_A;
1374
1375 /*Get TXIMR Setting*/
1376 /*Modify RX IQK mode table*/
1377 rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000);
1378 rtl_set_rfreg(hw, jj, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0);
1379 rtl_set_rfreg(hw, jj, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000);
1380 rtl_set_rfreg(hw, jj, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f);
1381 rtl_set_rfreg(hw, jj, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf117b);
1382 rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000);
1383
1384 /*IQK Setting*/
1385 rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, 0x01007c00);
1386 rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x81004800);
1387
1388 /*path a IQK setting*/
1389 rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x10008c1c);
1390 rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x30008c1c);
1391 rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82160804);
1392 rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x28160000);
1393
1394 /*LO calibration Setting*/
1395 rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x0046a911);
1396 /*one shot, path A LOK & iqk*/
1397 rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000);
1398 rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000);
1399
1400 mdelay(IQK_DELAY_TIME);
1401
1402 reg_eac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD);
1403 reg_e94 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, MASKDWORD);
1404 reg_e9c = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, MASKDWORD);
1405
1406
1407 if (!(reg_eac & BIT(28)) &&
1408 (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
1409 (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
1410 result |= 0x01;
1411 else
1412 return result;
1413
1414 u32temp = 0x80007C00 | (reg_e94&0x3FF0000) |
1415 ((reg_e9c&0x3FF0000) >> 16);
1416 rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, u32temp);
1417 /*RX IQK*/
1418 /*Modify RX IQK mode table*/
1419 rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000);
1420 rtl_set_rfreg(hw, jj, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0);
1421 rtl_set_rfreg(hw, jj, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000);
1422 rtl_set_rfreg(hw, jj, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f);
1423 rtl_set_rfreg(hw, jj, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf7ffa);
1424 rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000);
1425
1426 /*IQK Setting*/
1427 rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800);
1428
1429 /*path a IQK setting*/
1430 rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x30008c1c);
1431 rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x10008c1c);
1432 rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82160c05);
1433 rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x28160c05);
1434
1435 /*LO calibration Setting*/
1436 rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x0046a911);
1437 /*one shot, path A LOK & iqk*/
1438 rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000);
1439 rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000);
1440
1441 mdelay(IQK_DELAY_TIME);
1442
1443 reg_eac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD);
1444 reg_e94 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, MASKDWORD);
1445 reg_e9c = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, MASKDWORD);
1446 reg_ea4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2, MASKDWORD);
1447
1448 if (!(reg_eac & BIT(27)) &&
1449 (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
1450 (((reg_eac & 0x03FF0000) >> 16) != 0x36))
1451 result |= 0x02;
1452 return result;
1453}
1454
1455static void fill_iqk(struct ieee80211_hw *hw, bool iqk_ok, long result[][8],
1456 u8 final, bool btxonly)
1457{
1458 u32 oldval_0, x, tx0_a, reg;
1459 long y, tx0_c;
1460
1461 if (final == 0xFF) {
1462 return;
1463 } else if (iqk_ok) {
1464 oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBAL,
1465 MASKDWORD) >> 22) & 0x3FF;
1466 x = result[final][0];
1467 if ((x & 0x00000200) != 0)
1468 x = x | 0xFFFFFC00;
1469 tx0_a = (x * oldval_0) >> 8;
1470 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBAL, 0x3FF, tx0_a);
1471 rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(31),
1472 ((x * oldval_0 >> 7) & 0x1));
1473 y = result[final][1];
1474 if ((y & 0x00000200) != 0)
1475 y |= 0xFFFFFC00;
1476 tx0_c = (y * oldval_0) >> 8;
1477 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
1478 ((tx0_c & 0x3C0) >> 6));
1479 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBAL, 0x003F0000,
1480 (tx0_c & 0x3F));
1481 rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(29),
1482 ((y * oldval_0 >> 7) & 0x1));
1483 if (btxonly)
1484 return;
1485 reg = result[final][2];
1486 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBAL, 0x3FF, reg);
1487 reg = result[final][3] & 0x3F;
1488 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBAL, 0xFC00, reg);
1489 reg = (result[final][3] >> 6) & 0xF;
1490 rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
1491 }
1492}
1493
1494static void save_adda_reg(struct ieee80211_hw *hw,
1495 const u32 *addareg, u32 *backup,
1496 u32 registernum)
1497{
1498 u32 i;
1499
1500 for (i = 0; i < registernum; i++)
1501 backup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
1502}
1503
1504static void save_mac_reg(struct ieee80211_hw *hw, const u32 *macreg,
1505 u32 *macbackup)
1506{
1507 struct rtl_priv *rtlpriv = rtl_priv(hw);
1508 u32 i;
1509
1510 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1511 macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
1512 macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
1513}
1514
1515static void reload_adda(struct ieee80211_hw *hw, const u32 *addareg,
1516 u32 *backup, u32 reg_num)
1517{
1518 u32 i;
1519
1520 for (i = 0; i < reg_num; i++)
1521 rtl_set_bbreg(hw, addareg[i], MASKDWORD, backup[i]);
1522}
1523
1524static void reload_mac(struct ieee80211_hw *hw, const u32 *macreg,
1525 u32 *macbackup)
1526{
1527 struct rtl_priv *rtlpriv = rtl_priv(hw);
1528 u32 i;
1529
1530 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1531 rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
1532 rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
1533}
1534
1535static void _rtl88e_phy_path_adda_on(struct ieee80211_hw *hw,
1536 const u32 *addareg, bool is_patha_on,
1537 bool is2t)
1538{
1539 u32 pathon;
1540 u32 i;
1541
1542 pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
1543 if (false == is2t) {
1544 pathon = 0x0bdb25a0;
1545 rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
1546 } else {
1547 rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon);
1548 }
1549
1550 for (i = 1; i < IQK_ADDA_REG_NUM; i++)
1551 rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathon);
1552}
1553
1554static void _rtl88e_phy_mac_setting_calibration(struct ieee80211_hw *hw,
1555 const u32 *macreg,
1556 u32 *macbackup)
1557{
1558 struct rtl_priv *rtlpriv = rtl_priv(hw);
1559 u32 i = 0;
1560
1561 rtl_write_byte(rtlpriv, macreg[i], 0x3F);
1562
1563 for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
1564 rtl_write_byte(rtlpriv, macreg[i],
1565 (u8) (macbackup[i] & (~BIT(3))));
1566 rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
1567}
1568
1569static void _rtl88e_phy_path_a_standby(struct ieee80211_hw *hw)
1570{
1571 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
1572 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1573 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1574}
1575
1576static void _rtl88e_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
1577{
1578 u32 mode;
1579
1580 mode = pi_mode ? 0x01000100 : 0x01000000;
1581 rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
1582 rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
1583}
1584
1585static bool sim_comp(struct ieee80211_hw *hw, long result[][8], u8 c1, u8 c2)
1586{
1587 u32 i, j, diff, bitmap, bound;
1588 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1589
1590 u8 final[2] = {0xFF, 0xFF};
1591 bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
1592
1593 if (is2t)
1594 bound = 8;
1595 else
1596 bound = 4;
1597
1598 bitmap = 0;
1599
1600 for (i = 0; i < bound; i++) {
1601 diff = (result[c1][i] > result[c2][i]) ?
1602 (result[c1][i] - result[c2][i]) :
1603 (result[c2][i] - result[c1][i]);
1604
1605 if (diff > MAX_TOLERANCE) {
1606 if ((i == 2 || i == 6) && !bitmap) {
1607 if (result[c1][i] + result[c1][i + 1] == 0)
1608 final[(i / 4)] = c2;
1609 else if (result[c2][i] + result[c2][i + 1] == 0)
1610 final[(i / 4)] = c1;
1611 else
1612 bitmap = bitmap | (1 << i);
1613 } else {
1614 bitmap = bitmap | (1 << i);
1615 }
1616 }
1617 }
1618
1619 if (bitmap == 0) {
1620 for (i = 0; i < (bound / 4); i++) {
1621 if (final[i] != 0xFF) {
1622 for (j = i * 4; j < (i + 1) * 4 - 2; j++)
1623 result[3][j] = result[final[i]][j];
1624 bresult = false;
1625 }
1626 }
1627 return bresult;
1628 } else if (!(bitmap & 0x0F)) {
1629 for (i = 0; i < 4; i++)
1630 result[3][i] = result[c1][i];
1631 return false;
1632 } else if (!(bitmap & 0xF0) && is2t) {
1633 for (i = 4; i < 8; i++)
1634 result[3][i] = result[c1][i];
1635 return false;
1636 } else {
1637 return false;
1638 }
1639}
1640
1641static void _rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw,
1642 long result[][8], u8 t, bool is2t)
1643{
1644 struct rtl_priv *rtlpriv = rtl_priv(hw);
1645 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1646 u32 i;
1647 u8 patha_ok, pathb_ok;
1648 const u32 adda_reg[IQK_ADDA_REG_NUM] = {
1649 0x85c, 0xe6c, 0xe70, 0xe74,
1650 0xe78, 0xe7c, 0xe80, 0xe84,
1651 0xe88, 0xe8c, 0xed0, 0xed4,
1652 0xed8, 0xedc, 0xee0, 0xeec
1653 };
1654 const u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
1655 0x522, 0x550, 0x551, 0x040
1656 };
1657 const u32 iqk_bb_reg[IQK_BB_REG_NUM] = {
1658 ROFDM0_TRXPATHENABLE, ROFDM0_TRMUXPAR, RFPGA0_XCD_RFINTERFACESW,
1659 0xb68, 0xb6c, 0x870, 0x860, 0x864, 0x800
1660 };
1661 const u32 retrycount = 2;
1662
1663 if (t == 0) {
1664 save_adda_reg(hw, adda_reg, rtlphy->adda_backup, 16);
1665 save_mac_reg(hw, iqk_mac_reg, rtlphy->iqk_mac_backup);
1666 save_adda_reg(hw, iqk_bb_reg, rtlphy->iqk_bb_backup,
1667 IQK_BB_REG_NUM);
1668 }
1669 _rtl88e_phy_path_adda_on(hw, adda_reg, true, is2t);
1670 if (t == 0) {
1671 rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
1672 RFPGA0_XA_HSSIPARAMETER1, BIT(8));
1673 }
1674
1675 if (!rtlphy->rfpi_enable)
1676 _rtl88e_phy_pi_mode_switch(hw, true);
1677 /*BB Setting*/
1678 rtl_set_bbreg(hw, 0x800, BIT(24), 0x00);
1679 rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
1680 rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
1681 rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
1682
1683 rtl_set_bbreg(hw, 0x870, BIT(10), 0x01);
1684 rtl_set_bbreg(hw, 0x870, BIT(26), 0x01);
1685 rtl_set_bbreg(hw, 0x860, BIT(10), 0x00);
1686 rtl_set_bbreg(hw, 0x864, BIT(10), 0x00);
1687
1688 if (is2t) {
1689 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1690 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
1691 }
1692 _rtl88e_phy_mac_setting_calibration(hw, iqk_mac_reg,
1693 rtlphy->iqk_mac_backup);
1694 rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000);
1695 if (is2t)
1696 rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x0f600000);
1697
1698 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1699 rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
1700 rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x81004800);
1701 for (i = 0; i < retrycount; i++) {
1702 patha_ok = _rtl88e_phy_path_a_iqk(hw, is2t);
1703 if (patha_ok == 0x01) {
1704 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1705 "Path A Tx IQK Success!!\n");
1706 result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
1707 0x3FF0000) >> 16;
1708 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
1709 0x3FF0000) >> 16;
1710 break;
1711 }
1712 }
1713
1714 for (i = 0; i < retrycount; i++) {
1715 patha_ok = _rtl88e_phy_path_a_rx_iqk(hw, is2t);
1716 if (patha_ok == 0x03) {
1717 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1718 "Path A Rx IQK Success!!\n");
1719 result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
1720 0x3FF0000) >> 16;
1721 result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
1722 0x3FF0000) >> 16;
1723 break;
1724 } else {
1725 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1726 "Path a RX iqk fail!!!\n");
1727 }
1728 }
1729
1730 if (0 == patha_ok) {
1731 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1732 "Path A IQK Success!!\n");
1733 }
1734 if (is2t) {
1735 _rtl88e_phy_path_a_standby(hw);
1736 _rtl88e_phy_path_adda_on(hw, adda_reg, false, is2t);
1737 for (i = 0; i < retrycount; i++) {
1738 pathb_ok = _rtl88e_phy_path_b_iqk(hw);
1739 if (pathb_ok == 0x03) {
1740 result[t][4] = (rtl_get_bbreg(hw,
1741 0xeb4, MASKDWORD) &
1742 0x3FF0000) >> 16;
1743 result[t][5] =
1744 (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
1745 0x3FF0000) >> 16;
1746 result[t][6] =
1747 (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
1748 0x3FF0000) >> 16;
1749 result[t][7] =
1750 (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
1751 0x3FF0000) >> 16;
1752 break;
1753 } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
1754 result[t][4] = (rtl_get_bbreg(hw,
1755 0xeb4, MASKDWORD) &
1756 0x3FF0000) >> 16;
1757 }
1758 result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
1759 0x3FF0000) >> 16;
1760 }
1761 }
1762
1763 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
1764
1765 if (t != 0) {
1766 if (!rtlphy->rfpi_enable)
1767 _rtl88e_phy_pi_mode_switch(hw, false);
1768 reload_adda(hw, adda_reg, rtlphy->adda_backup, 16);
1769 reload_mac(hw, iqk_mac_reg, rtlphy->iqk_mac_backup);
1770 reload_adda(hw, iqk_bb_reg, rtlphy->iqk_bb_backup,
1771 IQK_BB_REG_NUM);
1772
1773 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
1774 if (is2t)
1775 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
1776 rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
1777 rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
1778 }
1779 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "88ee IQK Finish!!\n");
1780}
1781
1782static void _rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
1783{
1784 u8 tmpreg;
1785 u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
1786 struct rtl_priv *rtlpriv = rtl_priv(hw);
1787 int jj = RF90_PATH_A;
1788 int kk = RF90_PATH_B;
1789
1790 tmpreg = rtl_read_byte(rtlpriv, 0xd03);
1791
1792 if ((tmpreg & 0x70) != 0)
1793 rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
1794 else
1795 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
1796
1797 if ((tmpreg & 0x70) != 0) {
1798 rf_a_mode = rtl_get_rfreg(hw, jj, 0x00, MASK12BITS);
1799
1800 if (is2t)
1801 rf_b_mode = rtl_get_rfreg(hw, kk, 0x00,
1802 MASK12BITS);
1803
1804 rtl_set_rfreg(hw, jj, 0x00, MASK12BITS,
1805 (rf_a_mode & 0x8FFFF) | 0x10000);
1806
1807 if (is2t)
1808 rtl_set_rfreg(hw, kk, 0x00, MASK12BITS,
1809 (rf_b_mode & 0x8FFFF) | 0x10000);
1810 }
1811 lc_cal = rtl_get_rfreg(hw, jj, 0x18, MASK12BITS);
1812
1813 rtl_set_rfreg(hw, jj, 0x18, MASK12BITS, lc_cal | 0x08000);
1814
1815 mdelay(100);
1816
1817 if ((tmpreg & 0x70) != 0) {
1818 rtl_write_byte(rtlpriv, 0xd03, tmpreg);
1819 rtl_set_rfreg(hw, jj, 0x00, MASK12BITS, rf_a_mode);
1820
1821 if (is2t)
1822 rtl_set_rfreg(hw, kk, 0x00, MASK12BITS,
1823 rf_b_mode);
1824 } else {
1825 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
1826 }
1827 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
1828}
1829
1830static void rfpath_switch(struct ieee80211_hw *hw,
1831 bool bmain, bool is2t)
1832{
1833 struct rtl_priv *rtlpriv = rtl_priv(hw);
1834 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1835 struct rtl_efuse *fuse = rtl_efuse(rtl_priv(hw));
1836 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
1837
1838 if (is_hal_stop(rtlhal)) {
1839 u8 u1btmp;
1840 u1btmp = rtl_read_byte(rtlpriv, REG_LEDCFG0);
1841 rtl_write_byte(rtlpriv, REG_LEDCFG0, u1btmp | BIT(7));
1842 rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
1843 }
1844 if (is2t) {
1845 if (bmain)
1846 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
1847 BIT(5) | BIT(6), 0x1);
1848 else
1849 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
1850 BIT(5) | BIT(6), 0x2);
1851 } else {
1852 rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BIT(8) | BIT(9), 0);
1853 rtl_set_bbreg(hw, 0x914, MASKLWORD, 0x0201);
1854
1855 /* We use the RF definition of MAIN and AUX, left antenna and
1856 * right antenna repectively.
1857 * Default output at AUX.
1858 */
1859 if (bmain) {
1860 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BIT(14) |
1861 BIT(13) | BIT(12), 0);
1862 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BIT(5) |
1863 BIT(4) | BIT(3), 0);
1864 if (fuse->antenna_div_type == CGCS_RX_HW_ANTDIV)
1865 rtl_set_bbreg(hw, RCONFIG_RAM64X16, BIT(31), 0);
1866 } else {
1867 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BIT(14) |
1868 BIT(13) | BIT(12), 1);
1869 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BIT(5) |
1870 BIT(4) | BIT(3), 1);
1871 if (fuse->antenna_div_type == CGCS_RX_HW_ANTDIV)
1872 rtl_set_bbreg(hw, RCONFIG_RAM64X16, BIT(31), 1);
1873 }
1874 }
1875}
1876
1877#undef IQK_ADDA_REG_NUM
1878#undef IQK_DELAY_TIME
1879
1880void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
1881{
1882 struct rtl_priv *rtlpriv = rtl_priv(hw);
1883 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1884 long result[4][8];
1885 u8 i, final;
1886 bool patha_ok;
1887 long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_tmp = 0;
1888 bool is12simular, is13simular, is23simular;
1889 u32 iqk_bb_reg[9] = {
1890 ROFDM0_XARXIQIMBAL,
1891 ROFDM0_XBRXIQIMBAL,
1892 ROFDM0_ECCATHRES,
1893 ROFDM0_AGCRSSITABLE,
1894 ROFDM0_XATXIQIMBAL,
1895 ROFDM0_XBTXIQIMBAL,
1896 ROFDM0_XCTXAFE,
1897 ROFDM0_XDTXAFE,
1898 ROFDM0_RXIQEXTANTA
1899 };
1900
1901 if (recovery) {
1902 reload_adda(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 9);
1903 return;
1904 }
1905
1906 memset(result, 0, 32 * sizeof(long));
1907 final = 0xff;
1908 patha_ok = false;
1909 is12simular = false;
1910 is23simular = false;
1911 is13simular = false;
1912 for (i = 0; i < 3; i++) {
1913 if (get_rf_type(rtlphy) == RF_2T2R)
1914 _rtl88e_phy_iq_calibrate(hw, result, i, true);
1915 else
1916 _rtl88e_phy_iq_calibrate(hw, result, i, false);
1917 if (i == 1) {
1918 is12simular = sim_comp(hw, result, 0, 1);
1919 if (is12simular) {
1920 final = 0;
1921 break;
1922 }
1923 }
1924 if (i == 2) {
1925 is13simular = sim_comp(hw, result, 0, 2);
1926 if (is13simular) {
1927 final = 0;
1928 break;
1929 }
1930 is23simular = sim_comp(hw, result, 1, 2);
1931 if (is23simular) {
1932 final = 1;
1933 } else {
1934 for (i = 0; i < 8; i++)
1935 reg_tmp += result[3][i];
1936
1937 if (reg_tmp != 0)
1938 final = 3;
1939 else
1940 final = 0xFF;
1941 }
1942 }
1943 }
1944 for (i = 0; i < 4; i++) {
1945 reg_e94 = result[i][0];
1946 reg_e9c = result[i][1];
1947 reg_ea4 = result[i][2];
1948 reg_eb4 = result[i][4];
1949 reg_ebc = result[i][5];
1950 }
1951 if (final != 0xff) {
1952 reg_e94 = result[final][0];
1953 rtlphy->reg_e94 = reg_e94;
1954 reg_e9c = result[final][1];
1955 rtlphy->reg_e9c = reg_e9c;
1956 reg_ea4 = result[final][2];
1957 reg_eb4 = result[final][4];
1958 rtlphy->reg_eb4 = reg_eb4;
1959 reg_ebc = result[final][5];
1960 rtlphy->reg_ebc = reg_ebc;
1961 patha_ok = true;
1962 } else {
1963 rtlphy->reg_e94 = 0x100;
1964 rtlphy->reg_eb4 = 0x100;
1965 rtlphy->reg_ebc = 0x0;
1966 rtlphy->reg_e9c = 0x0;
1967 }
1968 if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
1969 fill_iqk(hw, patha_ok, result, final, (reg_ea4 == 0));
1970 if (final != 0xFF) {
1971 for (i = 0; i < IQK_MATRIX_REG_NUM; i++)
1972 rtlphy->iqk_matrix[0].value[0][i] = result[final][i];
1973 rtlphy->iqk_matrix[0].iqk_done = true;
1974 }
1975 save_adda_reg(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 9);
1976}
1977
1978void rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw)
1979{
1980 struct rtl_priv *rtlpriv = rtl_priv(hw);
1981 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1982 struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
1983 bool start_conttx = false, singletone = false;
1984 u32 timeout = 2000, timecount = 0;
1985
1986 if (start_conttx || singletone)
1987 return;
1988
1989 while (rtlpriv->mac80211.act_scanning && timecount < timeout) {
1990 udelay(50);
1991 timecount += 50;
1992 }
1993
1994 rtlphy->lck_inprogress = true;
1995 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1996 "LCK:Start!!! currentband %x delay %d ms\n",
1997 rtlhal->current_bandtype, timecount);
1998
1999 _rtl88e_phy_lc_calibrate(hw, false);
2000
2001 rtlphy->lck_inprogress = false;
2002}
2003
2004void rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
2005{
2006 rfpath_switch(hw, bmain, false);
2007}
2008
2009bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
2010{
2011 struct rtl_priv *rtlpriv = rtl_priv(hw);
2012 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2013 bool postprocessing = false;
2014
2015 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2016 "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
2017 iotype, rtlphy->set_io_inprogress);
2018 do {
2019 switch (iotype) {
2020 case IO_CMD_RESUME_DM_BY_SCAN:
2021 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2022 "[IO CMD] Resume DM after scan.\n");
2023 postprocessing = true;
2024 break;
2025 case IO_CMD_PAUSE_DM_BY_SCAN:
2026 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2027 "[IO CMD] Pause DM before scan.\n");
2028 postprocessing = true;
2029 break;
2030 default:
2031 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2032 "switch case not processed\n");
2033 break;
2034 }
2035 } while (false);
2036 if (postprocessing && !rtlphy->set_io_inprogress) {
2037 rtlphy->set_io_inprogress = true;
2038 rtlphy->current_io_type = iotype;
2039 } else {
2040 return false;
2041 }
2042 rtl88e_phy_set_io(hw);
2043 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
2044 return true;
2045}
2046
2047static void rtl88ee_phy_set_rf_on(struct ieee80211_hw *hw)
2048{
2049 struct rtl_priv *rtlpriv = rtl_priv(hw);
2050
2051 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
2052 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2053 /*rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);*/
2054 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2055 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2056 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
2057}
2058
2059static void _rtl88ee_phy_set_rf_sleep(struct ieee80211_hw *hw)
2060{
2061 struct rtl_priv *rtlpriv = rtl_priv(hw);
2062 int jj = RF90_PATH_A;
2063
2064 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
2065 rtl_set_rfreg(hw, jj, 0x00, RFREG_OFFSET_MASK, 0x00);
2066 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2067 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
2068}
2069
2070static bool _rtl88ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
2071 enum rf_pwrstate rfpwr_state)
2072{
2073 struct rtl_priv *rtlpriv = rtl_priv(hw);
2074 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2075 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2076 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
2077 struct rtl8192_tx_ring *ring = NULL;
2078 bool bresult = true;
2079 u8 i, queue_id;
2080
2081 switch (rfpwr_state) {
2082 case ERFON:{
2083 if ((ppsc->rfpwr_state == ERFOFF) &&
2084 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
2085 bool rtstatus;
2086 u32 init = 0;
2087 do {
2088 init++;
2089 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2090 "IPS Set eRf nic enable\n");
2091 rtstatus = rtl_ps_enable_nic(hw);
2092 } while ((rtstatus != true) && (init < 10));
2093 RT_CLEAR_PS_LEVEL(ppsc,
2094 RT_RF_OFF_LEVL_HALT_NIC);
2095 } else {
2096 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2097 "Set ERFON sleeped:%d ms\n",
2098 jiffies_to_msecs(jiffies - ppsc->
2099 last_sleep_jiffies));
2100 ppsc->last_awake_jiffies = jiffies;
2101 rtl88ee_phy_set_rf_on(hw);
2102 }
2103 if (mac->link_state == MAC80211_LINKED)
2104 rtlpriv->cfg->ops->led_control(hw, LED_CTL_LINK);
2105 else
2106 rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK);
2107 break; }
2108 case ERFOFF:{
2109 for (queue_id = 0, i = 0;
2110 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
2111 ring = &pcipriv->dev.tx_ring[queue_id];
2112 if (skb_queue_len(&ring->queue) == 0) {
2113 queue_id++;
2114 continue;
2115 } else {
2116 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2117 "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
2118 (i + 1), queue_id,
2119 skb_queue_len(&ring->queue));
2120
2121 udelay(10);
2122 i++;
2123 }
2124 if (i >= MAX_DOZE_WAITING_TIMES_9x) {
2125 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2126 "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
2127 MAX_DOZE_WAITING_TIMES_9x,
2128 queue_id,
2129 skb_queue_len(&ring->queue));
2130 break;
2131 }
2132 }
2133 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
2134 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2135 "IPS Set eRf nic disable\n");
2136 rtl_ps_disable_nic(hw);
2137 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
2138 } else {
2139 if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
2140 rtlpriv->cfg->ops->led_control(hw,
2141 LED_CTL_NO_LINK);
2142 } else {
2143 rtlpriv->cfg->ops->led_control(hw,
2144 LED_CTL_POWER_OFF);
2145 }
2146 }
2147 break; }
2148 case ERFSLEEP:{
2149 if (ppsc->rfpwr_state == ERFOFF)
2150 break;
2151 for (queue_id = 0, i = 0;
2152 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
2153 ring = &pcipriv->dev.tx_ring[queue_id];
2154 if (skb_queue_len(&ring->queue) == 0) {
2155 queue_id++;
2156 continue;
2157 } else {
2158 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2159 "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
2160 (i + 1), queue_id,
2161 skb_queue_len(&ring->queue));
2162
2163 udelay(10);
2164 i++;
2165 }
2166 if (i >= MAX_DOZE_WAITING_TIMES_9x) {
2167 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2168 "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
2169 MAX_DOZE_WAITING_TIMES_9x,
2170 queue_id,
2171 skb_queue_len(&ring->queue));
2172 break;
2173 }
2174 }
2175 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2176 "Set ERFSLEEP awaked:%d ms\n",
2177 jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies));
2178 ppsc->last_sleep_jiffies = jiffies;
2179 _rtl88ee_phy_set_rf_sleep(hw);
2180 break; }
2181 default:
2182 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2183 "switch case not processed\n");
2184 bresult = false;
2185 break;
2186 }
2187 if (bresult)
2188 ppsc->rfpwr_state = rfpwr_state;
2189 return bresult;
2190}
2191
2192bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
2193 enum rf_pwrstate rfpwr_state)
2194{
2195 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
2196 bool bresult;
2197
2198 if (rfpwr_state == ppsc->rfpwr_state)
2199 return false;
2200 bresult = _rtl88ee_phy_set_rf_power_state(hw, rfpwr_state);
2201 return bresult;
2202}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
new file mode 100644
index 000000000000..f1acd6d27e44
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
@@ -0,0 +1,236 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92C_PHY_H__
31#define __RTL92C_PHY_H__
32
33/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/
34#define MAX_TX_COUNT 4
35
36#define MAX_PRECMD_CNT 16
37#define MAX_RFDEPENDCMD_CNT 16
38#define MAX_POSTCMD_CNT 16
39
40#define MAX_DOZE_WAITING_TIMES_9x 64
41
42#define RT_CANNOT_IO(hw) false
43#define HIGHPOWER_RADIOA_ARRAYLEN 22
44
45#define IQK_ADDA_REG_NUM 16
46#define IQK_BB_REG_NUM 9
47#define MAX_TOLERANCE 5
48#define IQK_DELAY_TIME 10
49#define IDX_MAP 15
50
51#define APK_BB_REG_NUM 5
52#define APK_AFE_REG_NUM 16
53#define APK_CURVE_REG_NUM 4
54#define PATH_NUM 2
55
56#define LOOP_LIMIT 5
57#define MAX_STALL_TIME 50
58#define ANTENNADIVERSITYVALUE 0x80
59#define MAX_TXPWR_IDX_NMODE_92S 63
60#define RESET_CNT_LIMIT 3
61
62#define IQK_ADDA_REG_NUM 16
63#define IQK_MAC_REG_NUM 4
64
65#define RF6052_MAX_PATH 2
66
67#define CT_OFFSET_MAC_ADDR 0X16
68
69#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A
70#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60
71#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF 0x66
72#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
73#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
74
75#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F
76#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72
77
78#define CT_OFFSET_CHANNEL_PLAH 0x75
79#define CT_OFFSET_THERMAL_METER 0x78
80#define CT_OFFSET_RF_OPTION 0x79
81#define CT_OFFSET_VERSION 0x7E
82#define CT_OFFSET_CUSTOMER_ID 0x7F
83
84#define RTL92C_MAX_PATH_NUM 2
85
86enum swchnlcmd_id {
87 CMDID_END,
88 CMDID_SET_TXPOWEROWER_LEVEL,
89 CMDID_BBREGWRITE10,
90 CMDID_WRITEPORT_ULONG,
91 CMDID_WRITEPORT_USHORT,
92 CMDID_WRITEPORT_UCHAR,
93 CMDID_RF_WRITEREG,
94};
95
96struct swchnlcmd {
97 enum swchnlcmd_id cmdid;
98 u32 para1;
99 u32 para2;
100 u32 msdelay;
101};
102
103enum hw90_block_e {
104 HW90_BLOCK_MAC = 0,
105 HW90_BLOCK_PHY0 = 1,
106 HW90_BLOCK_PHY1 = 2,
107 HW90_BLOCK_RF = 3,
108 HW90_BLOCK_MAXIMUM = 4,
109};
110
111enum baseband_config_type {
112 BASEBAND_CONFIG_PHY_REG = 0,
113 BASEBAND_CONFIG_AGC_TAB = 1,
114};
115
116enum ra_offset_area {
117 RA_OFFSET_LEGACY_OFDM1,
118 RA_OFFSET_LEGACY_OFDM2,
119 RA_OFFSET_HT_OFDM1,
120 RA_OFFSET_HT_OFDM2,
121 RA_OFFSET_HT_OFDM3,
122 RA_OFFSET_HT_OFDM4,
123 RA_OFFSET_HT_CCK,
124};
125
126enum antenna_path {
127 ANTENNA_NONE,
128 ANTENNA_D,
129 ANTENNA_C,
130 ANTENNA_CD,
131 ANTENNA_B,
132 ANTENNA_BD,
133 ANTENNA_BC,
134 ANTENNA_BCD,
135 ANTENNA_A,
136 ANTENNA_AD,
137 ANTENNA_AC,
138 ANTENNA_ACD,
139 ANTENNA_AB,
140 ANTENNA_ABD,
141 ANTENNA_ABC,
142 ANTENNA_ABCD
143};
144
145struct r_antenna_select_ofdm {
146 u32 r_tx_antenna:4;
147 u32 r_ant_l:4;
148 u32 r_ant_non_ht:4;
149 u32 r_ant_ht1:4;
150 u32 r_ant_ht2:4;
151 u32 r_ant_ht_s1:4;
152 u32 r_ant_non_ht_s1:4;
153 u32 ofdm_txsc:2;
154 u32 reserved:2;
155};
156
157struct r_antenna_select_cck {
158 u8 r_cckrx_enable_2:2;
159 u8 r_cckrx_enable:2;
160 u8 r_ccktx_enable:4;
161};
162
163
164struct efuse_contents {
165 u8 mac_addr[ETH_ALEN];
166 u8 cck_tx_power_idx[6];
167 u8 ht40_1s_tx_power_idx[6];
168 u8 ht40_2s_tx_power_idx_diff[3];
169 u8 ht20_tx_power_idx_diff[3];
170 u8 ofdm_tx_power_idx_diff[3];
171 u8 ht40_max_power_offset[3];
172 u8 ht20_max_power_offset[3];
173 u8 channel_plan;
174 u8 thermal_meter;
175 u8 rf_option[5];
176 u8 version;
177 u8 oem_id;
178 u8 regulatory;
179};
180
181struct tx_power_struct {
182 u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
183 u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
184 u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
185 u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
186 u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
187 u8 legacy_ht_txpowerdiff;
188 u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
189 u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
190 u8 pwrgroup_cnt;
191 u32 mcs_original_offset[4][16];
192};
193
194enum _ANT_DIV_TYPE {
195 NO_ANTDIV = 0xFF,
196 CG_TRX_HW_ANTDIV = 0x01,
197 CGCS_RX_HW_ANTDIV = 0x02,
198 FIXED_HW_ANTDIV = 0x03,
199 CG_TRX_SMART_ANTDIV = 0x04,
200 CGCS_RX_SW_ANTDIV = 0x05,
201};
202
203extern u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw,
204 u32 regaddr, u32 bitmask);
205extern void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
206 u32 regaddr, u32 bitmask, u32 data);
207extern u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
208 enum radio_path rfpath, u32 regaddr,
209 u32 bitmask);
210extern void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
211 enum radio_path rfpath, u32 regaddr,
212 u32 bitmask, u32 data);
213extern bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
214extern bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
215extern bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
216extern void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
217extern void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
218 long *powerlevel);
219extern void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
220extern void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw,
221 u8 operation);
222extern void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
223extern void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
224 enum nl80211_channel_type ch_type);
225extern void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
226extern u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
227extern void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
228void rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw);
229void rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
230bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
231 enum radio_path rfpath);
232bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
233extern bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
234 enum rf_pwrstate rfpwr_state);
235
236#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c
new file mode 100644
index 000000000000..6dc4e3a954f6
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c
@@ -0,0 +1,109 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "pwrseqcmd.h"
31#include "pwrseq.h"
32
33/* drivers should parse below arrays and do the corresponding actions */
34/*3 Power on Array*/
35struct wlan_pwr_cfg rtl8188e_power_on_flow[RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS +
36 RTL8188E_TRANS_END_STEPS] = {
37 RTL8188E_TRANS_CARDEMU_TO_ACT
38 RTL8188E_TRANS_END
39};
40
41/*3Radio off GPIO Array */
42struct wlan_pwr_cfg rtl8188e_radio_off_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS
43 + RTL8188E_TRANS_END_STEPS] = {
44 RTL8188E_TRANS_ACT_TO_CARDEMU
45 RTL8188E_TRANS_END
46};
47
48/*3Card Disable Array*/
49struct wlan_pwr_cfg rtl8188e_card_disable_flow
50 [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS +
51 RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS +
52 RTL8188E_TRANS_END_STEPS] = {
53 RTL8188E_TRANS_ACT_TO_CARDEMU
54 RTL8188E_TRANS_CARDEMU_TO_CARDDIS
55 RTL8188E_TRANS_END
56};
57
58/*3 Card Enable Array*/
59struct wlan_pwr_cfg rtl8188e_card_enable_flow
60 [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS +
61 RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS +
62 RTL8188E_TRANS_END_STEPS] = {
63 RTL8188E_TRANS_CARDDIS_TO_CARDEMU
64 RTL8188E_TRANS_CARDEMU_TO_ACT
65 RTL8188E_TRANS_END
66};
67
68/*3Suspend Array*/
69struct wlan_pwr_cfg rtl8188e_suspend_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS
70 + RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS
71 + RTL8188E_TRANS_END_STEPS] = {
72 RTL8188E_TRANS_ACT_TO_CARDEMU
73 RTL8188E_TRANS_CARDEMU_TO_SUS
74 RTL8188E_TRANS_END
75};
76
77/*3 Resume Array*/
78struct wlan_pwr_cfg rtl8188e_resume_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS
79 + RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS
80 + RTL8188E_TRANS_END_STEPS] = {
81 RTL8188E_TRANS_SUS_TO_CARDEMU
82 RTL8188E_TRANS_CARDEMU_TO_ACT
83 RTL8188E_TRANS_END
84};
85
86/*3HWPDN Array*/
87struct wlan_pwr_cfg rtl8188e_hwpdn_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS
88 + RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS
89 + RTL8188E_TRANS_END_STEPS] = {
90 RTL8188E_TRANS_ACT_TO_CARDEMU
91 RTL8188E_TRANS_CARDEMU_TO_PDN
92 RTL8188E_TRANS_END
93};
94
95/*3 Enter LPS */
96struct wlan_pwr_cfg rtl8188e_enter_lps_flow[RTL8188E_TRANS_ACT_TO_LPS_STEPS
97 + RTL8188E_TRANS_END_STEPS] = {
98 /*FW behavior*/
99 RTL8188E_TRANS_ACT_TO_LPS
100 RTL8188E_TRANS_END
101};
102
103/*3 Leave LPS */
104struct wlan_pwr_cfg rtl8188e_leave_lps_flow[RTL8188E_TRANS_LPS_TO_ACT_STEPS
105 + RTL8188E_TRANS_END_STEPS] = {
106 /*FW behavior*/
107 RTL8188E_TRANS_LPS_TO_ACT
108 RTL8188E_TRANS_END
109};
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
new file mode 100644
index 000000000000..028ec6dd52b4
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
@@ -0,0 +1,327 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL8723E_PWRSEQ_H__
31#define __RTL8723E_PWRSEQ_H__
32
33#include "pwrseqcmd.h"
34/*
35 Check document WM-20110607-Paul-RTL8188E_Power_Architecture-R02.vsd
36 There are 6 HW Power States:
37 0: POFF--Power Off
38 1: PDN--Power Down
39 2: CARDEMU--Card Emulation
40 3: ACT--Active Mode
41 4: LPS--Low Power State
42 5: SUS--Suspend
43
44 The transision from different states are defined below
45 TRANS_CARDEMU_TO_ACT
46 TRANS_ACT_TO_CARDEMU
47 TRANS_CARDEMU_TO_SUS
48 TRANS_SUS_TO_CARDEMU
49 TRANS_CARDEMU_TO_PDN
50 TRANS_ACT_TO_LPS
51 TRANS_LPS_TO_ACT
52
53 TRANS_END
54 PWR SEQ Version: rtl8188e_PwrSeq_V09.h
55*/
56
57#define RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS 10
58#define RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS 10
59#define RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS 10
60#define RTL8188E_TRANS_SUS_TO_CARDEMU_STEPS 10
61#define RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS 10
62#define RTL8188E_TRANS_PDN_TO_CARDEMU_STEPS 10
63#define RTL8188E_TRANS_ACT_TO_LPS_STEPS 15
64#define RTL8188E_TRANS_LPS_TO_ACT_STEPS 15
65#define RTL8188E_TRANS_END_STEPS 1
66
67
68#define RTL8188E_TRANS_CARDEMU_TO_ACT \
69 /* format */ \
70 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\
71 {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
72 /* wait till 0x04[17] = 1 power ready*/ \
73 PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
74 {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
75 /* 0x02[1:0] = 0 reset BB*/ \
76 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0)|BIT(1), 0}, \
77 {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
78 /*0x24[23] = 2b'01 schmit trigger */ \
79 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, \
80 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
81 /* 0x04[15] = 0 disable HWPDN (control by DRV)*/ \
82 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, \
83 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
84 /*0x04[12:11] = 2b'00 disable WL suspend*/ \
85 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4)|BIT(3), 0}, \
86 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
87 /*0x04[8] = 1 polling until return 0*/ \
88 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
89 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
90 /*wait till 0x04[8] = 0*/ \
91 PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0}, \
92 {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
93 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*LDO normal mode*/\
94 {0x0074, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
95 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, /*SDIO Driving*/\
96
97#define RTL8188E_TRANS_ACT_TO_CARDEMU \
98 /* format */ \
99 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\
100 {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
101 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},/*0x1F[7:0] = 0 turn off RF*/\
102 {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
103 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, /*LDO Sleep mode*/\
104 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
105 /*0x04[9] = 1 turn off MAC by HW state machine*/ \
106 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
107 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
108 /*wait till 0x04[9] = 0 polling until return 0 to disable*/ \
109 PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}, \
110
111
112#define RTL8188E_TRANS_CARDEMU_TO_SUS \
113 /* format */ \
114 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\
115 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
116 PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \
117 /*0x04[12:11] = 2b'01enable WL suspend*/ \
118 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, \
119 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
120 /*0x04[12:11] = 2b'11enable WL suspend for PCIe*/ \
121 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)|BIT(4)},\
122 {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
123 PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \
124 /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */\
125 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, BIT(7)}, \
126 {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
127 PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \
128 /*Clear SIC_EN register 0x40[12] = 1'b0 */ \
129 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
130 {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
131 PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \
132 /*Set USB suspend enable local register 0xfe10[4]= 1 */ \
133 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
134 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
135 /*Set SDIO suspend local register*/ \
136 PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
137 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
138 /*wait power state to suspend*/ \
139 PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0},
140
141#define RTL8188E_TRANS_SUS_TO_CARDEMU \
142 /* format */ \
143 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
144 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
145 /*Set SDIO suspend local register*/ \
146 PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \
147 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
148 /*wait power state to suspend*/ \
149 PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
150 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
151 /*0x04[12:11] = 2b'01enable WL suspend*/ \
152 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0},
153
154#define RTL8188E_TRANS_CARDEMU_TO_CARDDIS \
155 /* format */ \
156 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
157 {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
158 /*0x24[23] = 2b'01 schmit trigger */ \
159 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, \
160 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
161 PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \
162 /*0x04[12:11] = 2b'01 enable WL suspend*/ \
163 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, \
164 {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
165 PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \
166 /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */\
167 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, \
168 {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
169 PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \
170 /*Clear SIC_EN register 0x40[12] = 1'b0 */ \
171 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
172 {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \
173 /*Set USB suspend enable local register 0xfe10[4]= 1 */ \
174 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
175 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
176 /*Set SDIO suspend local register*/ \
177 PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
178 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
179 PWR_CMD_POLLING, BIT(1), 0}, /*wait power state to suspend*/
180
181#define RTL8188E_TRANS_CARDDIS_TO_CARDEMU \
182 /* format */ \
183 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
184 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
185 PWR_BASEADDR_SDIO,\
186 PWR_CMD_WRITE, BIT(0), 0}, /*Set SDIO suspend local register*/ \
187 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
188 PWR_BASEADDR_SDIO,\
189 PWR_CMD_POLLING, BIT(1), BIT(1)}, /*wait power state to suspend*/\
190 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
191 PWR_BASEADDR_MAC, \
192 PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, \
193 /*0x04[12:11] = 2b'01enable WL suspend*/
194
195
196#define RTL8188E_TRANS_CARDEMU_TO_PDN \
197 /* format */ \
198 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
199 {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
200 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0},/* 0x04[16] = 0*/ \
201 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
202 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)},/* 0x04[15] = 1*/
203
204
205#define RTL8188E_TRANS_PDN_TO_CARDEMU \
206 /* format */ \
207 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
208 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
209 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},/* 0x04[15] = 0*/
210
211
212#define RTL8188E_TRANS_ACT_TO_LPS \
213 /* format */ \
214 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\
215 {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
216 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x7F},/*Tx Pause*/ \
217 {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
218 /*zero if no pkt is tx*/\
219 PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
220 {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
221 /*Should be zero if no packet is transmitting*/ \
222 PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
223 {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
224 /*Should be zero if no packet is transmitting*/ \
225 PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
226 {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
227 /*Should be zero if no packet is transmitting*/ \
228 PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
229 {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
230 /*CCK and OFDM are disabled, and clock are gated*/ \
231 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
232 {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
233 PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/\
234 {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
235 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x3F},/*Reset MAC TRX*/ \
236 {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
237 /*check if removed later*/ \
238 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \
239 {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
240 /*Respond TxOK to scheduler*/ \
241 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)}, \
242
243
244#define RTL8188E_TRANS_LPS_TO_ACT \
245 /* format */ \
246 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
247 {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
248 PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/ \
249 {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \
250 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/ \
251 {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
252 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/ \
253 {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
254 PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/ \
255 {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
256 /*. 0x08[4] = 0 switch TSF to 40M*/ \
257 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
258 {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
259 /*Polling 0x109[7]= 0 TSF in 40M*/ \
260 PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, \
261 {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
262 /*. 0x29[7:6] = 2b'00 enable BB clock*/ \
263 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6)|BIT(7), 0}, \
264 {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
265 /*. 0x101[1] = 1*/\
266 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
267 {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
268 /*. 0x100[7:0] = 0xFF enable WMAC TRX*/\
269 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \
270 {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
271 /*. 0x02[1:0] = 2b'11 enable BB macro*/\
272 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1)|BIT(0), BIT(1)|BIT(0)}, \
273 {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\
274 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/
275
276
277#define RTL8188E_TRANS_END \
278 /* format */ \
279 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\
280 {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\
281 0, PWR_CMD_END, 0, 0}
282
283extern struct wlan_pwr_cfg rtl8188e_power_on_flow
284 [RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS +
285 RTL8188E_TRANS_END_STEPS];
286extern struct wlan_pwr_cfg rtl8188e_radio_off_flow
287 [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS +
288 RTL8188E_TRANS_END_STEPS];
289extern struct wlan_pwr_cfg rtl8188e_card_disable_flow
290 [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS +
291 RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS +
292 RTL8188E_TRANS_END_STEPS];
293extern struct wlan_pwr_cfg rtl8188e_card_enable_flow
294 [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS +
295 RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS +
296 RTL8188E_TRANS_END_STEPS];
297extern struct wlan_pwr_cfg rtl8188e_suspend_flow
298 [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS +
299 RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS +
300 RTL8188E_TRANS_END_STEPS];
301extern struct wlan_pwr_cfg rtl8188e_resume_flow
302 [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS +
303 RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS +
304 RTL8188E_TRANS_END_STEPS];
305extern struct wlan_pwr_cfg rtl8188e_hwpdn_flow
306 [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS +
307 RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS +
308 RTL8188E_TRANS_END_STEPS];
309extern struct wlan_pwr_cfg rtl8188e_enter_lps_flow
310 [RTL8188E_TRANS_ACT_TO_LPS_STEPS +
311 RTL8188E_TRANS_END_STEPS];
312extern struct wlan_pwr_cfg rtl8188e_leave_lps_flow
313 [RTL8188E_TRANS_LPS_TO_ACT_STEPS +
314 RTL8188E_TRANS_END_STEPS];
315
316/* RTL8723 Power Configuration CMDs for PCIe interface */
317#define Rtl8188E_NIC_PWR_ON_FLOW rtl8188e_power_on_flow
318#define Rtl8188E_NIC_RF_OFF_FLOW rtl8188e_radio_off_flow
319#define Rtl8188E_NIC_DISABLE_FLOW rtl8188e_card_disable_flow
320#define Rtl8188E_NIC_ENABLE_FLOW rtl8188e_card_enable_flow
321#define Rtl8188E_NIC_SUSPEND_FLOW rtl8188e_suspend_flow
322#define Rtl8188E_NIC_RESUME_FLOW rtl8188e_resume_flow
323#define Rtl8188E_NIC_PDN_FLOW rtl8188e_hwpdn_flow
324#define Rtl8188E_NIC_LPS_ENTER_FLOW rtl8188e_enter_lps_flow
325#define Rtl8188E_NIC_LPS_LEAVE_FLOW rtl8188e_leave_lps_flow
326
327#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.c b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.c
new file mode 100644
index 000000000000..a9cfa13be3a8
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.c
@@ -0,0 +1,140 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "pwrseq.h"
31
32
33/* Description:
34 * This routine deal with the Power Configuration CMDs
35 * parsing for RTL8723/RTL8188E Series IC.
36 * Assumption:
37 * We should follow specific format which was released from HW SD.
38 *
39 * 2011.07.07, added by Roger.
40 */
41
42bool rtl88_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
43 u8 fab_version, u8 interface_type,
44 struct wlan_pwr_cfg pwrcfgcmd[])
45{
46 struct wlan_pwr_cfg cmd = {0};
47 bool polling_bit = false;
48 u32 ary_idx = 0;
49 u8 val = 0;
50 u32 offset = 0;
51 u32 polling_count = 0;
52 u32 max_polling_cnt = 5000;
53
54 do {
55 cmd = pwrcfgcmd[ary_idx];
56 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
57 "rtl88_hal_pwrseqcmdparsing(): offset(%#x), cut_msk(%#x), fab_msk(%#x),"
58 "interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), val(%#x)\n",
59 GET_PWR_CFG_OFFSET(cmd),
60 GET_PWR_CFG_CUT_MASK(cmd),
61 GET_PWR_CFG_FAB_MASK(cmd),
62 GET_PWR_CFG_INTF_MASK(cmd),
63 GET_PWR_CFG_BASE(cmd),
64 GET_PWR_CFG_CMD(cmd),
65 GET_PWR_CFG_MASK(cmd),
66 GET_PWR_CFG_VALUE(cmd));
67
68 if ((GET_PWR_CFG_FAB_MASK(cmd) & fab_version) &&
69 (GET_PWR_CFG_CUT_MASK(cmd) & cut_version) &&
70 (GET_PWR_CFG_INTF_MASK(cmd) & interface_type)) {
71 switch (GET_PWR_CFG_CMD(cmd)) {
72 case PWR_CMD_READ:
73 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
74 "rtl88_hal_pwrseqcmdparsing(): PWR_CMD_READ\n");
75 break;
76 case PWR_CMD_WRITE: {
77 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
78 "rtl88_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n");
79 offset = GET_PWR_CFG_OFFSET(cmd);
80
81 /*Read the val from system register*/
82 val = rtl_read_byte(rtlpriv, offset);
83 val &= (~(GET_PWR_CFG_MASK(cmd)));
84 val |= (GET_PWR_CFG_VALUE(cmd) &
85 GET_PWR_CFG_MASK(cmd));
86
87 /*Write the val back to sytem register*/
88 rtl_write_byte(rtlpriv, offset, val);
89 }
90 break;
91 case PWR_CMD_POLLING:
92 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
93 "rtl88_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n");
94 polling_bit = false;
95 offset = GET_PWR_CFG_OFFSET(cmd);
96
97 do {
98 val = rtl_read_byte(rtlpriv, offset);
99
100 val = val & GET_PWR_CFG_MASK(cmd);
101 if (val == (GET_PWR_CFG_VALUE(cmd) &
102 GET_PWR_CFG_MASK(cmd)))
103 polling_bit = true;
104 else
105 udelay(10);
106
107 if (polling_count++ > max_polling_cnt) {
108 RT_TRACE(rtlpriv, COMP_INIT,
109 DBG_LOUD,
110 "polling fail in pwrseqcmd\n");
111 return false;
112 }
113 } while (!polling_bit);
114
115 break;
116 case PWR_CMD_DELAY:
117 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
118 "rtl88_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n");
119 if (GET_PWR_CFG_VALUE(cmd) == PWRSEQ_DELAY_US)
120 udelay(GET_PWR_CFG_OFFSET(cmd));
121 else
122 mdelay(GET_PWR_CFG_OFFSET(cmd));
123 break;
124 case PWR_CMD_END:
125 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
126 "rtl88_hal_pwrseqcmdparsing(): PWR_CMD_END\n");
127 return true;
128 break;
129 default:
130 RT_ASSERT(false,
131 "rtl88_hal_pwrseqcmdparsing(): Unknown CMD!!\n");
132 break;
133 }
134 }
135
136 ary_idx++;
137 } while (1);
138
139 return true;
140}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.h b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.h
new file mode 100644
index 000000000000..d9ae280bb1a2
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.h
@@ -0,0 +1,97 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL8723E_PWRSEQCMD_H__
31#define __RTL8723E_PWRSEQCMD_H__
32
33#include "../wifi.h"
34/*---------------------------------------------*/
35/* The value of cmd: 4 bits */
36/*---------------------------------------------*/
37#define PWR_CMD_READ 0x00
38#define PWR_CMD_WRITE 0x01
39#define PWR_CMD_POLLING 0x02
40#define PWR_CMD_DELAY 0x03
41#define PWR_CMD_END 0x04
42
43/* define the base address of each block */
44#define PWR_BASEADDR_MAC 0x00
45#define PWR_BASEADDR_USB 0x01
46#define PWR_BASEADDR_PCIE 0x02
47#define PWR_BASEADDR_SDIO 0x03
48
49#define PWR_INTF_SDIO_MSK BIT(0)
50#define PWR_INTF_USB_MSK BIT(1)
51#define PWR_INTF_PCI_MSK BIT(2)
52#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
53
54#define PWR_FAB_TSMC_MSK BIT(0)
55#define PWR_FAB_UMC_MSK BIT(1)
56#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
57
58#define PWR_CUT_TESTCHIP_MSK BIT(0)
59#define PWR_CUT_A_MSK BIT(1)
60#define PWR_CUT_B_MSK BIT(2)
61#define PWR_CUT_C_MSK BIT(3)
62#define PWR_CUT_D_MSK BIT(4)
63#define PWR_CUT_E_MSK BIT(5)
64#define PWR_CUT_F_MSK BIT(6)
65#define PWR_CUT_G_MSK BIT(7)
66#define PWR_CUT_ALL_MSK 0xFF
67
68enum pwrseq_delay_unit {
69 PWRSEQ_DELAY_US,
70 PWRSEQ_DELAY_MS,
71};
72
73struct wlan_pwr_cfg {
74 u16 offset;
75 u8 cut_msk;
76 u8 fab_msk:4;
77 u8 interface_msk:4;
78 u8 base:4;
79 u8 cmd:4;
80 u8 msk;
81 u8 value;
82};
83
84#define GET_PWR_CFG_OFFSET(__PWR) (__PWR.offset)
85#define GET_PWR_CFG_CUT_MASK(__PWR) (__PWR.cut_msk)
86#define GET_PWR_CFG_FAB_MASK(__PWR) (__PWR.fab_msk)
87#define GET_PWR_CFG_INTF_MASK(__PWR) (__PWR.interface_msk)
88#define GET_PWR_CFG_BASE(__PWR) (__PWR.base)
89#define GET_PWR_CFG_CMD(__PWR) (__PWR.cmd)
90#define GET_PWR_CFG_MASK(__PWR) (__PWR.msk)
91#define GET_PWR_CFG_VALUE(__PWR) (__PWR.value)
92
93bool rtl88_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
94 u8 fab_version, u8 interface_type,
95 struct wlan_pwr_cfg pwrcfgcmd[]);
96
97#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h b/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
new file mode 100644
index 000000000000..d849abf7d94a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
@@ -0,0 +1,2258 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92C_REG_H__
31#define __RTL92C_REG_H__
32
33#define TXPKT_BUF_SELECT 0x69
34#define RXPKT_BUF_SELECT 0xA5
35#define DISABLE_TRXPKT_BUF_ACCESS 0x0
36
37#define REG_SYS_ISO_CTRL 0x0000
38#define REG_SYS_FUNC_EN 0x0002
39#define REG_APS_FSMCO 0x0004
40#define REG_SYS_CLKR 0x0008
41#define REG_9346CR 0x000A
42#define REG_EE_VPD 0x000C
43#define REG_AFE_MISC 0x0010
44#define REG_SPS0_CTRL 0x0011
45#define REG_SPS_OCP_CFG 0x0018
46#define REG_RSV_CTRL 0x001C
47#define REG_RF_CTRL 0x001F
48#define REG_LDOA15_CTRL 0x0020
49#define REG_LDOV12D_CTRL 0x0021
50#define REG_LDOHCI12_CTRL 0x0022
51#define REG_LPLDO_CTRL 0x0023
52#define REG_AFE_XTAL_CTRL 0x0024
53#define REG_AFE_LDO_CTRL 0x0027 /* 1.5v for 8188EE test
54 * chip, 1.4v for MP chip
55 */
56#define REG_AFE_PLL_CTRL 0x0028
57#define REG_EFUSE_CTRL 0x0030
58#define REG_EFUSE_TEST 0x0034
59#define REG_PWR_DATA 0x0038
60#define REG_CAL_TIMER 0x003C
61#define REG_ACLK_MON 0x003E
62#define REG_GPIO_MUXCFG 0x0040
63#define REG_GPIO_IO_SEL 0x0042
64#define REG_MAC_PINMUX_CFG 0x0043
65#define REG_GPIO_PIN_CTRL 0x0044
66#define REG_GPIO_INTM 0x0048
67#define REG_LEDCFG0 0x004C
68#define REG_LEDCFG1 0x004D
69#define REG_LEDCFG2 0x004E
70#define REG_LEDCFG3 0x004F
71#define REG_FSIMR 0x0050
72#define REG_FSISR 0x0054
73#define REG_HSIMR 0x0058
74#define REG_HSISR 0x005c
75#define REG_GPIO_PIN_CTRL_2 0x0060
76#define REG_GPIO_IO_SEL_2 0x0062
77#define REG_GPIO_OUTPUT 0x006c
78#define REG_AFE_XTAL_CTRL_EXT 0x0078
79#define REG_XCK_OUT_CTRL 0x007c
80#define REG_MCUFWDL 0x0080
81#define REG_WOL_EVENT 0x0081
82#define REG_MCUTSTCFG 0x0084
83
84
85#define REG_HIMR 0x00B0
86#define REG_HISR 0x00B4
87#define REG_HIMRE 0x00B8
88#define REG_HISRE 0x00BC
89
90#define REG_EFUSE_ACCESS 0x00CF
91
92#define REG_BIST_SCAN 0x00D0
93#define REG_BIST_RPT 0x00D4
94#define REG_BIST_ROM_RPT 0x00D8
95#define REG_USB_SIE_INTF 0x00E0
96#define REG_PCIE_MIO_INTF 0x00E4
97#define REG_PCIE_MIO_INTD 0x00E8
98#define REG_HPON_FSM 0x00EC
99#define REG_SYS_CFG 0x00F0
100
101#define REG_CR 0x0100
102#define REG_PBP 0x0104
103#define REG_PKT_BUFF_ACCESS_CTRL 0x0106
104#define REG_TRXDMA_CTRL 0x010C
105#define REG_TRXFF_BNDY 0x0114
106#define REG_TRXFF_STATUS 0x0118
107#define REG_RXFF_PTR 0x011C
108
109#define REG_CPWM 0x012F
110#define REG_FWIMR 0x0130
111#define REG_FWISR 0x0134
112#define REG_PKTBUF_DBG_CTRL 0x0140
113#define REG_PKTBUF_DBG_DATA_L 0x0144
114#define REG_PKTBUF_DBG_DATA_H 0x0148
115#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL+2)
116
117#define REG_TC0_CTRL 0x0150
118#define REG_TC1_CTRL 0x0154
119#define REG_TC2_CTRL 0x0158
120#define REG_TC3_CTRL 0x015C
121#define REG_TC4_CTRL 0x0160
122#define REG_TCUNIT_BASE 0x0164
123#define REG_MBIST_START 0x0174
124#define REG_MBIST_DONE 0x0178
125#define REG_MBIST_FAIL 0x017C
126#define REG_32K_CTRL 0x0194
127#define REG_C2HEVT_MSG_NORMAL 0x01A0
128#define REG_C2HEVT_CLEAR 0x01AF
129#define REG_C2HEVT_MSG_TEST 0x01B8
130#define REG_MCUTST_1 0x01c0
131#define REG_FMETHR 0x01C8
132#define REG_HMETFR 0x01CC
133#define REG_HMEBOX_0 0x01D0
134#define REG_HMEBOX_1 0x01D4
135#define REG_HMEBOX_2 0x01D8
136#define REG_HMEBOX_3 0x01DC
137
138#define REG_LLT_INIT 0x01E0
139#define REG_BB_ACCEESS_CTRL 0x01E8
140#define REG_BB_ACCESS_DATA 0x01EC
141
142#define REG_HMEBOX_EXT_0 0x01F0
143#define REG_HMEBOX_EXT_1 0x01F4
144#define REG_HMEBOX_EXT_2 0x01F8
145#define REG_HMEBOX_EXT_3 0x01FC
146
147#define REG_RQPN 0x0200
148#define REG_FIFOPAGE 0x0204
149#define REG_TDECTRL 0x0208
150#define REG_TXDMA_OFFSET_CHK 0x020C
151#define REG_TXDMA_STATUS 0x0210
152#define REG_RQPN_NPQ 0x0214
153
154#define REG_RXDMA_AGG_PG_TH 0x0280
155#define REG_FW_UPD_RDPTR 0x0284 /* FW shall update this
156 * register before FW * write
157 * RXPKT_RELEASE_POLL to 1
158 */
159#define REG_RXDMA_CONTROL 0x0286 /* Control the RX DMA.*/
160#define REG_RXPKT_NUM 0x0287 /* The number of packets
161 * in RXPKTBUF.
162 */
163#define REG_PCIE_CTRL_REG 0x0300
164#define REG_INT_MIG 0x0304
165#define REG_BCNQ_DESA 0x0308
166#define REG_HQ_DESA 0x0310
167#define REG_MGQ_DESA 0x0318
168#define REG_VOQ_DESA 0x0320
169#define REG_VIQ_DESA 0x0328
170#define REG_BEQ_DESA 0x0330
171#define REG_BKQ_DESA 0x0338
172#define REG_RX_DESA 0x0340
173
174#define REG_DBI 0x0348
175#define REG_MDIO 0x0354
176#define REG_DBG_SEL 0x0360
177#define REG_PCIE_HRPWM 0x0361
178#define REG_PCIE_HCPWM 0x0363
179#define REG_UART_CTRL 0x0364
180#define REG_WATCH_DOG 0x0368
181#define REG_UART_TX_DESA 0x0370
182#define REG_UART_RX_DESA 0x0378
183
184
185#define REG_HDAQ_DESA_NODEF 0x0000
186#define REG_CMDQ_DESA_NODEF 0x0000
187
188#define REG_VOQ_INFORMATION 0x0400
189#define REG_VIQ_INFORMATION 0x0404
190#define REG_BEQ_INFORMATION 0x0408
191#define REG_BKQ_INFORMATION 0x040C
192#define REG_MGQ_INFORMATION 0x0410
193#define REG_HGQ_INFORMATION 0x0414
194#define REG_BCNQ_INFORMATION 0x0418
195#define REG_TXPKT_EMPTY 0x041A
196
197
198#define REG_CPU_MGQ_INFORMATION 0x041C
199#define REG_FWHW_TXQ_CTRL 0x0420
200#define REG_HWSEQ_CTRL 0x0423
201#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
202#define REG_TXPKTBUF_MGQ_BDNY 0x0425
203#define REG_MULTI_BCNQ_EN 0x0426
204#define REG_MULTI_BCNQ_OFFSET 0x0427
205#define REG_SPEC_SIFS 0x0428
206#define REG_RL 0x042A
207#define REG_DARFRC 0x0430
208#define REG_RARFRC 0x0438
209#define REG_RRSR 0x0440
210#define REG_ARFR0 0x0444
211#define REG_ARFR1 0x0448
212#define REG_ARFR2 0x044C
213#define REG_ARFR3 0x0450
214#define REG_AGGLEN_LMT 0x0458
215#define REG_AMPDU_MIN_SPACE 0x045C
216#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D
217#define REG_FAST_EDCA_CTRL 0x0460
218#define REG_RD_RESP_PKT_TH 0x0463
219#define REG_INIRTS_RATE_SEL 0x0480
220#define REG_INIDATA_RATE_SEL 0x0484
221#define REG_POWER_STATUS 0x04A4
222#define REG_POWER_STAGE1 0x04B4
223#define REG_POWER_STAGE2 0x04B8
224#define REG_PKT_LIFE_TIME 0x04C0
225#define REG_STBC_SETTING 0x04C4
226#define REG_PROT_MODE_CTRL 0x04C8
227#define REG_BAR_MODE_CTRL 0x04CC
228#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
229#define REG_EARLY_MODE_CONTROL 0x04D0
230#define REG_NQOS_SEQ 0x04DC
231#define REG_QOS_SEQ 0x04DE
232#define REG_NEED_CPU_HANDLE 0x04E0
233#define REG_PKT_LOSE_RPT 0x04E1
234#define REG_PTCL_ERR_STATUS 0x04E2
235#define REG_TX_RPT_CTRL 0x04EC
236#define REG_TX_RPT_TIME 0x04F0
237#define REG_DUMMY 0x04FC
238
239#define REG_EDCA_VO_PARAM 0x0500
240#define REG_EDCA_VI_PARAM 0x0504
241#define REG_EDCA_BE_PARAM 0x0508
242#define REG_EDCA_BK_PARAM 0x050C
243#define REG_BCNTCFG 0x0510
244#define REG_PIFS 0x0512
245#define REG_RDG_PIFS 0x0513
246#define REG_SIFS_CTX 0x0514
247#define REG_SIFS_TRX 0x0516
248#define REG_AGGR_BREAK_TIME 0x051A
249#define REG_SLOT 0x051B
250#define REG_TX_PTCL_CTRL 0x0520
251#define REG_TXPAUSE 0x0522
252#define REG_DIS_TXREQ_CLR 0x0523
253#define REG_RD_CTRL 0x0524
254#define REG_TBTT_PROHIBIT 0x0540
255#define REG_RD_NAV_NXT 0x0544
256#define REG_NAV_PROT_LEN 0x0546
257#define REG_BCN_CTRL 0x0550
258#define REG_USTIME_TSF 0x0551
259#define REG_MBID_NUM 0x0552
260#define REG_DUAL_TSF_RST 0x0553
261#define REG_BCN_INTERVAL 0x0554
262#define REG_MBSSID_BCN_SPACE 0x0554
263#define REG_DRVERLYINT 0x0558
264#define REG_BCNDMATIM 0x0559
265#define REG_ATIMWND 0x055A
266#define REG_BCN_MAX_ERR 0x055D
267#define REG_RXTSF_OFFSET_CCK 0x055E
268#define REG_RXTSF_OFFSET_OFDM 0x055F
269#define REG_TSFTR 0x0560
270#define REG_INIT_TSFTR 0x0564
271#define REG_PSTIMER 0x0580
272#define REG_TIMER0 0x0584
273#define REG_TIMER1 0x0588
274#define REG_ACMHWCTRL 0x05C0
275#define REG_ACMRSTCTRL 0x05C1
276#define REG_ACMAVG 0x05C2
277#define REG_VO_ADMTIME 0x05C4
278#define REG_VI_ADMTIME 0x05C6
279#define REG_BE_ADMTIME 0x05C8
280#define REG_EDCA_RANDOM_GEN 0x05CC
281#define REG_SCH_TXCMD 0x05D0
282
283#define REG_APSD_CTRL 0x0600
284#define REG_BWOPMODE 0x0603
285#define REG_TCR 0x0604
286#define REG_RCR 0x0608
287#define REG_RX_PKT_LIMIT 0x060C
288#define REG_RX_DLK_TIME 0x060D
289#define REG_RX_DRVINFO_SZ 0x060F
290
291#define REG_MACID 0x0610
292#define REG_BSSID 0x0618
293#define REG_MAR 0x0620
294#define REG_MBIDCAMCFG 0x0628
295
296#define REG_USTIME_EDCA 0x0638
297#define REG_MAC_SPEC_SIFS 0x063A
298#define REG_RESP_SIFS_CCK 0x063C
299#define REG_RESP_SIFS_OFDM 0x063E
300#define REG_ACKTO 0x0640
301#define REG_CTS2TO 0x0641
302#define REG_EIFS 0x0642
303
304#define REG_NAV_CTRL 0x0650
305#define REG_BACAMCMD 0x0654
306#define REG_BACAMCONTENT 0x0658
307#define REG_LBDLY 0x0660
308#define REG_FWDLY 0x0661
309#define REG_RXERR_RPT 0x0664
310#define REG_TRXPTCL_CTL 0x0668
311
312#define REG_CAMCMD 0x0670
313#define REG_CAMWRITE 0x0674
314#define REG_CAMREAD 0x0678
315#define REG_CAMDBG 0x067C
316#define REG_SECCFG 0x0680
317
318#define REG_WOW_CTRL 0x0690
319#define REG_PSSTATUS 0x0691
320#define REG_PS_RX_INFO 0x0692
321#define REG_UAPSD_TID 0x0693
322#define REG_LPNAV_CTRL 0x0694
323#define REG_WKFMCAM_NUM 0x0698
324#define REG_WKFMCAM_RWD 0x069C
325#define REG_RXFLTMAP0 0x06A0
326#define REG_RXFLTMAP1 0x06A2
327#define REG_RXFLTMAP2 0x06A4
328#define REG_BCN_PSR_RPT 0x06A8
329#define REG_CALB32K_CTRL 0x06AC
330#define REG_PKT_MON_CTRL 0x06B4
331#define REG_BT_COEX_TABLE 0x06C0
332#define REG_WMAC_RESP_TXINFO 0x06D8
333
334#define REG_USB_INFO 0xFE17
335#define REG_USB_SPECIAL_OPTION 0xFE55
336#define REG_USB_DMA_AGG_TO 0xFE5B
337#define REG_USB_AGG_TO 0xFE5C
338#define REG_USB_AGG_TH 0xFE5D
339
340#define REG_TEST_USB_TXQS 0xFE48
341#define REG_TEST_SIE_VID 0xFE60
342#define REG_TEST_SIE_PID 0xFE62
343#define REG_TEST_SIE_OPTIONAL 0xFE64
344#define REG_TEST_SIE_CHIRP_K 0xFE65
345#define REG_TEST_SIE_PHY 0xFE66
346#define REG_TEST_SIE_MAC_ADDR 0xFE70
347#define REG_TEST_SIE_STRING 0xFE80
348
349#define REG_NORMAL_SIE_VID 0xFE60
350#define REG_NORMAL_SIE_PID 0xFE62
351#define REG_NORMAL_SIE_OPTIONAL 0xFE64
352#define REG_NORMAL_SIE_EP 0xFE65
353#define REG_NORMAL_SIE_PHY 0xFE68
354#define REG_NORMAL_SIE_MAC_ADDR 0xFE70
355#define REG_NORMAL_SIE_STRING 0xFE80
356
357#define CR9346 REG_9346CR
358#define MSR (REG_CR + 2)
359#define ISR REG_HISR
360#define TSFR REG_TSFTR
361
362#define MACIDR0 REG_MACID
363#define MACIDR4 (REG_MACID + 4)
364
365#define PBP REG_PBP
366
367#define IDR0 MACIDR0
368#define IDR4 MACIDR4
369
370#define UNUSED_REGISTER 0x1BF
371#define DCAM UNUSED_REGISTER
372#define PSR UNUSED_REGISTER
373#define BBADDR UNUSED_REGISTER
374#define PHYDATAR UNUSED_REGISTER
375
376#define INVALID_BBRF_VALUE 0x12345678
377
378#define MAX_MSS_DENSITY_2T 0x13
379#define MAX_MSS_DENSITY_1T 0x0A
380
381#define CMDEEPROM_EN BIT(5)
382#define CMDEEPROM_SEL BIT(4)
383#define CMD9346CR_9356SEL BIT(4)
384#define AUTOLOAD_EEPROM (CMDEEPROM_EN|CMDEEPROM_SEL)
385#define AUTOLOAD_EFUSE CMDEEPROM_EN
386
387#define GPIOSEL_GPIO 0
388#define GPIOSEL_ENBT BIT(5)
389
390#define GPIO_IN REG_GPIO_PIN_CTRL
391#define GPIO_OUT (REG_GPIO_PIN_CTRL+1)
392#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2)
393#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
394
395/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
396#define HSIMR_GPIO12_0_INT_EN BIT(0)
397#define HSIMR_SPS_OCP_INT_EN BIT(5)
398#define HSIMR_RON_INT_EN BIT(6)
399#define HSIMR_PDN_INT_EN BIT(7)
400#define HSIMR_GPIO9_INT_EN BIT(25)
401
402
403/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */
404#define HSISR_GPIO12_0_INT BIT(0)
405#define HSISR_SPS_OCP_INT BIT(5)
406#define HSISR_RON_INT_EN BIT(6)
407#define HSISR_PDNINT BIT(7)
408#define HSISR_GPIO9_INT BIT(25)
409
410#define MSR_NOLINK 0x00
411#define MSR_ADHOC 0x01
412#define MSR_INFRA 0x02
413#define MSR_AP 0x03
414
415#define RRSR_RSC_OFFSET 21
416#define RRSR_SHORT_OFFSET 23
417#define RRSR_RSC_BW_40M 0x600000
418#define RRSR_RSC_UPSUBCHNL 0x400000
419#define RRSR_RSC_LOWSUBCHNL 0x200000
420#define RRSR_SHORT 0x800000
421#define RRSR_1M BIT(0)
422#define RRSR_2M BIT(1)
423#define RRSR_5_5M BIT(2)
424#define RRSR_11M BIT(3)
425#define RRSR_6M BIT(4)
426#define RRSR_9M BIT(5)
427#define RRSR_12M BIT(6)
428#define RRSR_18M BIT(7)
429#define RRSR_24M BIT(8)
430#define RRSR_36M BIT(9)
431#define RRSR_48M BIT(10)
432#define RRSR_54M BIT(11)
433#define RRSR_MCS0 BIT(12)
434#define RRSR_MCS1 BIT(13)
435#define RRSR_MCS2 BIT(14)
436#define RRSR_MCS3 BIT(15)
437#define RRSR_MCS4 BIT(16)
438#define RRSR_MCS5 BIT(17)
439#define RRSR_MCS6 BIT(18)
440#define RRSR_MCS7 BIT(19)
441#define BRSR_ACKSHORTPMB BIT(23)
442
443#define RATR_1M 0x00000001
444#define RATR_2M 0x00000002
445#define RATR_55M 0x00000004
446#define RATR_11M 0x00000008
447#define RATR_6M 0x00000010
448#define RATR_9M 0x00000020
449#define RATR_12M 0x00000040
450#define RATR_18M 0x00000080
451#define RATR_24M 0x00000100
452#define RATR_36M 0x00000200
453#define RATR_48M 0x00000400
454#define RATR_54M 0x00000800
455#define RATR_MCS0 0x00001000
456#define RATR_MCS1 0x00002000
457#define RATR_MCS2 0x00004000
458#define RATR_MCS3 0x00008000
459#define RATR_MCS4 0x00010000
460#define RATR_MCS5 0x00020000
461#define RATR_MCS6 0x00040000
462#define RATR_MCS7 0x00080000
463#define RATR_MCS8 0x00100000
464#define RATR_MCS9 0x00200000
465#define RATR_MCS10 0x00400000
466#define RATR_MCS11 0x00800000
467#define RATR_MCS12 0x01000000
468#define RATR_MCS13 0x02000000
469#define RATR_MCS14 0x04000000
470#define RATR_MCS15 0x08000000
471
472#define RATE_1M BIT(0)
473#define RATE_2M BIT(1)
474#define RATE_5_5M BIT(2)
475#define RATE_11M BIT(3)
476#define RATE_6M BIT(4)
477#define RATE_9M BIT(5)
478#define RATE_12M BIT(6)
479#define RATE_18M BIT(7)
480#define RATE_24M BIT(8)
481#define RATE_36M BIT(9)
482#define RATE_48M BIT(10)
483#define RATE_54M BIT(11)
484#define RATE_MCS0 BIT(12)
485#define RATE_MCS1 BIT(13)
486#define RATE_MCS2 BIT(14)
487#define RATE_MCS3 BIT(15)
488#define RATE_MCS4 BIT(16)
489#define RATE_MCS5 BIT(17)
490#define RATE_MCS6 BIT(18)
491#define RATE_MCS7 BIT(19)
492#define RATE_MCS8 BIT(20)
493#define RATE_MCS9 BIT(21)
494#define RATE_MCS10 BIT(22)
495#define RATE_MCS11 BIT(23)
496#define RATE_MCS12 BIT(24)
497#define RATE_MCS13 BIT(25)
498#define RATE_MCS14 BIT(26)
499#define RATE_MCS15 BIT(27)
500
501#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
502#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M | \
503 RATR_24M | RATR_36M | RATR_48M | RATR_54M)
504#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 | \
505 RATR_MCS3 | RATR_MCS4 | RATR_MCS5 | \
506 RATR_MCS6 | RATR_MCS7)
507#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | \
508 RATR_MCS11 | RATR_MCS12 | RATR_MCS13 | \
509 RATR_MCS14 | RATR_MCS15)
510
511#define BW_OPMODE_20MHZ BIT(2)
512#define BW_OPMODE_5G BIT(1)
513#define BW_OPMODE_11J BIT(0)
514
515#define CAM_VALID BIT(15)
516#define CAM_NOTVALID 0x0000
517#define CAM_USEDK BIT(5)
518
519#define CAM_NONE 0x0
520#define CAM_WEP40 0x01
521#define CAM_TKIP 0x02
522#define CAM_AES 0x04
523#define CAM_WEP104 0x05
524
525#define TOTAL_CAM_ENTRY 32
526#define HALF_CAM_ENTRY 16
527
528#define CAM_WRITE BIT(16)
529#define CAM_READ 0x00000000
530#define CAM_POLLINIG BIT(31)
531
532#define SCR_USEDK 0x01
533#define SCR_TXSEC_ENABLE 0x02
534#define SCR_RXSEC_ENABLE 0x04
535
536#define WOW_PMEN BIT(0)
537#define WOW_WOMEN BIT(1)
538#define WOW_MAGIC BIT(2)
539#define WOW_UWF BIT(3)
540
541/*********************************************
542* 8188 IMR/ISR bits
543**********************************************/
544#define IMR_DISABLED 0x0
545/* IMR DW0(0x0060-0063) Bit 0-31 */
546#define IMR_TXCCK BIT(30) /* TXRPT interrupt when CCX bit of
547 * the packet is set
548 */
549#define IMR_PSTIMEOUT BIT(29) /* Power Save Time Out Interrupt */
550#define IMR_GTINT4 BIT(28) /* When GTIMER4 expires,
551 * this bit is set to 1
552 */
553#define IMR_GTINT3 BIT(27) /* When GTIMER3 expires,
554 * this bit is set to 1
555 */
556#define IMR_TBDER BIT(26) /* Transmit Beacon0 Error */
557#define IMR_TBDOK BIT(25) /* Transmit Beacon0 OK */
558#define IMR_TSF_BIT32_TOGGLE BIT(24) /* TSF Timer BIT32 toggle ind int */
559#define IMR_BCNDMAINT0 BIT(20) /* Beacon DMA Interrupt 0 */
560#define IMR_BCNDOK0 BIT(16) /* Beacon Queue DMA OK0 */
561#define IMR_HSISR_IND_ON_INT BIT(15) /* HSISR Indicator (HSIMR & HSISR is
562 * true, this bit is set to 1)
563 */
564#define IMR_BCNDMAINT_E BIT(14) /* Beacon DMA Int Extension for Win7 */
565#define IMR_ATIMEND BIT(12) /* CTWidnow End or ATIM Window End */
566#define IMR_HISR1_IND_INT BIT(11) /* HISR1 Indicator (HISR1 & HIMR1 is
567 * true, this bit is set to 1)
568 */
569#define IMR_C2HCMD BIT(10) /* CPU to Host Command INT Status,
570 * Write 1 clear
571 */
572#define IMR_CPWM2 BIT(9) /* CPU power Mode exchange INT Status,
573 * Write 1 clear
574 */
575#define IMR_CPWM BIT(8) /* CPU power Mode exchange INT Status,
576 * Write 1 clear
577 */
578#define IMR_HIGHDOK BIT(7) /* High Queue DMA OK */
579#define IMR_MGNTDOK BIT(6) /* Management Queue DMA OK */
580#define IMR_BKDOK BIT(5) /* AC_BK DMA OK */
581#define IMR_BEDOK BIT(4) /* AC_BE DMA OK */
582#define IMR_VIDOK BIT(3) /* AC_VI DMA OK */
583#define IMR_VODOK BIT(2) /* AC_VO DMA OK */
584#define IMR_RDU BIT(1) /* Rx Descriptor Unavailable */
585#define IMR_ROK BIT(0) /* Receive DMA OK */
586
587/* IMR DW1(0x00B4-00B7) Bit 0-31 */
588#define IMR_BCNDMAINT7 BIT(27) /* Beacon DMA Interrupt 7 */
589#define IMR_BCNDMAINT6 BIT(26) /* Beacon DMA Interrupt 6 */
590#define IMR_BCNDMAINT5 BIT(25) /* Beacon DMA Interrupt 5 */
591#define IMR_BCNDMAINT4 BIT(24) /* Beacon DMA Interrupt 4 */
592#define IMR_BCNDMAINT3 BIT(23) /* Beacon DMA Interrupt 3 */
593#define IMR_BCNDMAINT2 BIT(22) /* Beacon DMA Interrupt 2 */
594#define IMR_BCNDMAINT1 BIT(21) /* Beacon DMA Interrupt 1 */
595#define IMR_BCNDOK7 BIT(20) /* Beacon Queue DMA OK Interrup 7 */
596#define IMR_BCNDOK6 BIT(19) /* Beacon Queue DMA OK Interrup 6 */
597#define IMR_BCNDOK5 BIT(18) /* Beacon Queue DMA OK Interrup 5 */
598#define IMR_BCNDOK4 BIT(17) /* Beacon Queue DMA OK Interrup 4 */
599#define IMR_BCNDOK3 BIT(16) /* Beacon Queue DMA OK Interrup 3 */
600#define IMR_BCNDOK2 BIT(15) /* Beacon Queue DMA OK Interrup 2 */
601#define IMR_BCNDOK1 BIT(14) /* Beacon Queue DMA OK Interrup 1 */
602#define IMR_ATIMEND_E BIT(13) /* ATIM Window End Extension for Win7 */
603#define IMR_TXERR BIT(11) /* Tx Err Flag Int Status,
604 * write 1 clear.
605 */
606#define IMR_RXERR BIT(10) /* Rx Err Flag INT Status,
607 * Write 1 clear
608 */
609#define IMR_TXFOVW BIT(9) /* Transmit FIFO Overflow */
610#define IMR_RXFOVW BIT(8) /* Receive FIFO Overflow */
611
612
613#define HWSET_MAX_SIZE 512
614#define EFUSE_MAX_SECTION 64
615#define EFUSE_REAL_CONTENT_LEN 256
616#define EFUSE_OOB_PROTECT_BYTES 18 /* PG data exclude header,
617 * dummy 7 bytes frome CP
618 * test and reserved 1byte.
619 */
620
621#define EEPROM_DEFAULT_TSSI 0x0
622#define EEPROM_DEFAULT_TXPOWERDIFF 0x0
623#define EEPROM_DEFAULT_CRYSTALCAP 0x5
624#define EEPROM_DEFAULT_BOARDTYPE 0x02
625#define EEPROM_DEFAULT_TXPOWER 0x1010
626#define EEPROM_DEFAULT_HT2T_TXPWR 0x10
627
628#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
629#define EEPROM_DEFAULT_THERMALMETER 0x18
630#define EEPROM_DEFAULT_ANTTXPOWERDIFF 0x0
631#define EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP 0x5
632#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22
633#define EEPROM_DEFAULT_HT40_2SDIFF 0x0
634#define EEPROM_DEFAULT_HT20_DIFF 2
635#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
636#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0
637#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0
638
639#define RF_OPTION1 0x79
640#define RF_OPTION2 0x7A
641#define RF_OPTION3 0x7B
642#define RF_OPTION4 0x7C
643
644#define EEPROM_DEFAULT_PID 0x1234
645#define EEPROM_DEFAULT_VID 0x5678
646#define EEPROM_DEFAULT_CUSTOMERID 0xAB
647#define EEPROM_DEFAULT_SUBCUSTOMERID 0xCD
648#define EEPROM_DEFAULT_VERSION 0
649
650#define EEPROM_CHANNEL_PLAN_FCC 0x0
651#define EEPROM_CHANNEL_PLAN_IC 0x1
652#define EEPROM_CHANNEL_PLAN_ETSI 0x2
653#define EEPROM_CHANNEL_PLAN_SPAIN 0x3
654#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
655#define EEPROM_CHANNEL_PLAN_MKK 0x5
656#define EEPROM_CHANNEL_PLAN_MKK1 0x6
657#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
658#define EEPROM_CHANNEL_PLAN_TELEC 0x8
659#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9
660#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
661#define EEPROM_CHANNEL_PLAN_NCC 0xB
662#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
663
664#define EEPROM_CID_DEFAULT 0x0
665#define EEPROM_CID_TOSHIBA 0x4
666#define EEPROM_CID_CCX 0x10
667#define EEPROM_CID_QMI 0x0D
668#define EEPROM_CID_WHQL 0xFE
669
670#define RTL8188E_EEPROM_ID 0x8129
671
672#define EEPROM_HPON 0x02
673#define EEPROM_CLK 0x06
674#define EEPROM_TESTR 0x08
675
676#define EEPROM_TXPOWERCCK 0x10
677#define EEPROM_TXPOWERHT40_1S 0x16
678#define EEPROM_TXPOWERHT20DIFF 0x1B
679#define EEPROM_TXPOWER_OFDMDIFF 0x1B
680
681#define EEPROM_TX_PWR_INX 0x10
682
683#define EEPROM_CHANNELPLAN 0xB8
684#define EEPROM_XTAL_88E 0xB9
685#define EEPROM_THERMAL_METER_88E 0xBA
686#define EEPROM_IQK_LCK_88E 0xBB
687
688#define EEPROM_RF_BOARD_OPTION_88E 0xC1
689#define EEPROM_RF_FEATURE_OPTION_88E 0xC2
690#define EEPROM_RF_BT_SETTING_88E 0xC3
691#define EEPROM_VERSION 0xC4
692#define EEPROM_CUSTOMER_ID 0xC5
693#define EEPROM_RF_ANTENNA_OPT_88E 0xC9
694
695#define EEPROM_MAC_ADDR 0xD0
696#define EEPROM_VID 0xD6
697#define EEPROM_DID 0xD8
698#define EEPROM_SVID 0xDA
699#define EEPROM_SMID 0xDC
700
701#define STOPBECON BIT(6)
702#define STOPHIGHT BIT(5)
703#define STOPMGT BIT(4)
704#define STOPVO BIT(3)
705#define STOPVI BIT(2)
706#define STOPBE BIT(1)
707#define STOPBK BIT(0)
708
709#define RCR_APPFCS BIT(31)
710#define RCR_APP_MIC BIT(30)
711#define RCR_APP_ICV BIT(29)
712#define RCR_APP_PHYST_RXFF BIT(28)
713#define RCR_APP_BA_SSN BIT(27)
714#define RCR_ENMBID BIT(24)
715#define RCR_LSIGEN BIT(23)
716#define RCR_MFBEN BIT(22)
717#define RCR_HTC_LOC_CTRL BIT(14)
718#define RCR_AMF BIT(13)
719#define RCR_ACF BIT(12)
720#define RCR_ADF BIT(11)
721#define RCR_AICV BIT(9)
722#define RCR_ACRC32 BIT(8)
723#define RCR_CBSSID_BCN BIT(7)
724#define RCR_CBSSID_DATA BIT(6)
725#define RCR_CBSSID RCR_CBSSID_DATA
726#define RCR_APWRMGT BIT(5)
727#define RCR_ADD3 BIT(4)
728#define RCR_AB BIT(3)
729#define RCR_AM BIT(2)
730#define RCR_APM BIT(1)
731#define RCR_AAP BIT(0)
732#define RCR_MXDMA_OFFSET 8
733#define RCR_FIFO_OFFSET 13
734
735#define RSV_CTRL 0x001C
736#define RD_CTRL 0x0524
737
738#define REG_USB_INFO 0xFE17
739#define REG_USB_SPECIAL_OPTION 0xFE55
740#define REG_USB_DMA_AGG_TO 0xFE5B
741#define REG_USB_AGG_TO 0xFE5C
742#define REG_USB_AGG_TH 0xFE5D
743
744#define REG_USB_VID 0xFE60
745#define REG_USB_PID 0xFE62
746#define REG_USB_OPTIONAL 0xFE64
747#define REG_USB_CHIRP_K 0xFE65
748#define REG_USB_PHY 0xFE66
749#define REG_USB_MAC_ADDR 0xFE70
750#define REG_USB_HRPWM 0xFE58
751#define REG_USB_HCPWM 0xFE57
752
753#define SW18_FPWM BIT(3)
754
755#define ISO_MD2PP BIT(0)
756#define ISO_UA2USB BIT(1)
757#define ISO_UD2CORE BIT(2)
758#define ISO_PA2PCIE BIT(3)
759#define ISO_PD2CORE BIT(4)
760#define ISO_IP2MAC BIT(5)
761#define ISO_DIOP BIT(6)
762#define ISO_DIOE BIT(7)
763#define ISO_EB2CORE BIT(8)
764#define ISO_DIOR BIT(9)
765
766#define PWC_EV25V BIT(14)
767#define PWC_EV12V BIT(15)
768
769#define FEN_BBRSTB BIT(0)
770#define FEN_BB_GLB_RSTN BIT(1)
771#define FEN_USBA BIT(2)
772#define FEN_UPLL BIT(3)
773#define FEN_USBD BIT(4)
774#define FEN_DIO_PCIE BIT(5)
775#define FEN_PCIEA BIT(6)
776#define FEN_PPLL BIT(7)
777#define FEN_PCIED BIT(8)
778#define FEN_DIOE BIT(9)
779#define FEN_CPUEN BIT(10)
780#define FEN_DCORE BIT(11)
781#define FEN_ELDR BIT(12)
782#define FEN_DIO_RF BIT(13)
783#define FEN_HWPDN BIT(14)
784#define FEN_MREGEN BIT(15)
785
786#define PFM_LDALL BIT(0)
787#define PFM_ALDN BIT(1)
788#define PFM_LDKP BIT(2)
789#define PFM_WOWL BIT(3)
790#define ENPDN BIT(4)
791#define PDN_PL BIT(5)
792#define APFM_ONMAC BIT(8)
793#define APFM_OFF BIT(9)
794#define APFM_RSM BIT(10)
795#define AFSM_HSUS BIT(11)
796#define AFSM_PCIE BIT(12)
797#define APDM_MAC BIT(13)
798#define APDM_HOST BIT(14)
799#define APDM_HPDN BIT(15)
800#define RDY_MACON BIT(16)
801#define SUS_HOST BIT(17)
802#define ROP_ALD BIT(20)
803#define ROP_PWR BIT(21)
804#define ROP_SPS BIT(22)
805#define SOP_MRST BIT(25)
806#define SOP_FUSE BIT(26)
807#define SOP_ABG BIT(27)
808#define SOP_AMB BIT(28)
809#define SOP_RCK BIT(29)
810#define SOP_A8M BIT(30)
811#define XOP_BTCK BIT(31)
812
813#define ANAD16V_EN BIT(0)
814#define ANA8M BIT(1)
815#define MACSLP BIT(4)
816#define LOADER_CLK_EN BIT(5)
817#define _80M_SSC_DIS BIT(7)
818#define _80M_SSC_EN_HO BIT(8)
819#define PHY_SSC_RSTB BIT(9)
820#define SEC_CLK_EN BIT(10)
821#define MAC_CLK_EN BIT(11)
822#define SYS_CLK_EN BIT(12)
823#define RING_CLK_EN BIT(13)
824
825#define BOOT_FROM_EEPROM BIT(4)
826#define EEPROM_EN BIT(5)
827
828#define AFE_BGEN BIT(0)
829#define AFE_MBEN BIT(1)
830#define MAC_ID_EN BIT(7)
831
832#define WLOCK_ALL BIT(0)
833#define WLOCK_00 BIT(1)
834#define WLOCK_04 BIT(2)
835#define WLOCK_08 BIT(3)
836#define WLOCK_40 BIT(4)
837#define R_DIS_PRST_0 BIT(5)
838#define R_DIS_PRST_1 BIT(6)
839#define LOCK_ALL_EN BIT(7)
840
841#define RF_EN BIT(0)
842#define RF_RSTB BIT(1)
843#define RF_SDMRSTB BIT(2)
844
845#define LDA15_EN BIT(0)
846#define LDA15_STBY BIT(1)
847#define LDA15_OBUF BIT(2)
848#define LDA15_REG_VOS BIT(3)
849#define _LDA15_VOADJ(x) (((x) & 0x7) << 4)
850
851#define LDV12_EN BIT(0)
852#define LDV12_SDBY BIT(1)
853#define LPLDO_HSM BIT(2)
854#define LPLDO_LSM_DIS BIT(3)
855#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
856
857#define XTAL_EN BIT(0)
858#define XTAL_BSEL BIT(1)
859#define _XTAL_BOSC(x) (((x) & 0x3) << 2)
860#define _XTAL_CADJ(x) (((x) & 0xF) << 4)
861#define XTAL_GATE_USB BIT(8)
862#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9)
863#define XTAL_GATE_AFE BIT(11)
864#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12)
865#define XTAL_RF_GATE BIT(14)
866#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15)
867#define XTAL_GATE_DIG BIT(17)
868#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18)
869#define XTAL_BT_GATE BIT(20)
870#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21)
871#define _XTAL_GPIO(x) (((x) & 0x7) << 23)
872
873#define CKDLY_AFE BIT(26)
874#define CKDLY_USB BIT(27)
875#define CKDLY_DIG BIT(28)
876#define CKDLY_BT BIT(29)
877
878#define APLL_EN BIT(0)
879#define APLL_320_EN BIT(1)
880#define APLL_FREF_SEL BIT(2)
881#define APLL_EDGE_SEL BIT(3)
882#define APLL_WDOGB BIT(4)
883#define APLL_LPFEN BIT(5)
884
885#define APLL_REF_CLK_13MHZ 0x1
886#define APLL_REF_CLK_19_2MHZ 0x2
887#define APLL_REF_CLK_20MHZ 0x3
888#define APLL_REF_CLK_25MHZ 0x4
889#define APLL_REF_CLK_26MHZ 0x5
890#define APLL_REF_CLK_38_4MHZ 0x6
891#define APLL_REF_CLK_40MHZ 0x7
892
893#define APLL_320EN BIT(14)
894#define APLL_80EN BIT(15)
895#define APLL_1MEN BIT(24)
896
897#define ALD_EN BIT(18)
898#define EF_PD BIT(19)
899#define EF_FLAG BIT(31)
900
901#define EF_TRPT BIT(7)
902#define LDOE25_EN BIT(31)
903
904#define RSM_EN BIT(0)
905#define TIMER_EN BIT(4)
906
907#define TRSW0EN BIT(2)
908#define TRSW1EN BIT(3)
909#define EROM_EN BIT(4)
910#define ENBT BIT(5)
911#define ENUART BIT(8)
912#define UART_910 BIT(9)
913#define ENPMAC BIT(10)
914#define SIC_SWRST BIT(11)
915#define ENSIC BIT(12)
916#define SIC_23 BIT(13)
917#define ENHDP BIT(14)
918#define SIC_LBK BIT(15)
919
920#define LED0PL BIT(4)
921#define LED1PL BIT(12)
922#define LED0DIS BIT(7)
923
924#define MCUFWDL_EN BIT(0)
925#define MCUFWDL_RDY BIT(1)
926#define FWDL_CHKSUM_RPT BIT(2)
927#define MACINI_RDY BIT(3)
928#define BBINI_RDY BIT(4)
929#define RFINI_RDY BIT(5)
930#define WINTINI_RDY BIT(6)
931#define CPRST BIT(23)
932
933#define XCLK_VLD BIT(0)
934#define ACLK_VLD BIT(1)
935#define UCLK_VLD BIT(2)
936#define PCLK_VLD BIT(3)
937#define PCIRSTB BIT(4)
938#define V15_VLD BIT(5)
939#define TRP_B15V_EN BIT(7)
940#define SIC_IDLE BIT(8)
941#define BD_MAC2 BIT(9)
942#define BD_MAC1 BIT(10)
943#define IC_MACPHY_MODE BIT(11)
944#define VENDOR_ID BIT(19)
945#define PAD_HWPD_IDN BIT(22)
946#define TRP_VAUX_EN BIT(23)
947#define TRP_BT_EN BIT(24)
948#define BD_PKG_SEL BIT(25)
949#define BD_HCI_SEL BIT(26)
950#define TYPE_ID BIT(27)
951
952#define CHIP_VER_RTL_MASK 0xF000
953#define CHIP_VER_RTL_SHIFT 12
954
955#define REG_LBMODE (REG_CR + 3)
956
957#define HCI_TXDMA_EN BIT(0)
958#define HCI_RXDMA_EN BIT(1)
959#define TXDMA_EN BIT(2)
960#define RXDMA_EN BIT(3)
961#define PROTOCOL_EN BIT(4)
962#define SCHEDULE_EN BIT(5)
963#define MACTXEN BIT(6)
964#define MACRXEN BIT(7)
965#define ENSWBCN BIT(8)
966#define ENSEC BIT(9)
967
968#define _NETTYPE(x) (((x) & 0x3) << 16)
969#define MASK_NETTYPE 0x30000
970#define NT_NO_LINK 0x0
971#define NT_LINK_AD_HOC 0x1
972#define NT_LINK_AP 0x2
973#define NT_AS_AP 0x3
974
975#define _LBMODE(x) (((x) & 0xF) << 24)
976#define MASK_LBMODE 0xF000000
977#define LOOPBACK_NORMAL 0x0
978#define LOOPBACK_IMMEDIATELY 0xB
979#define LOOPBACK_MAC_DELAY 0x3
980#define LOOPBACK_PHY 0x1
981#define LOOPBACK_DMA 0x7
982
983#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
984#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
985#define _PSRX_MASK 0xF
986#define _PSTX_MASK 0xF0
987#define _PSRX(x) (x)
988#define _PSTX(x) ((x) << 4)
989
990#define PBP_64 0x0
991#define PBP_128 0x1
992#define PBP_256 0x2
993#define PBP_512 0x3
994#define PBP_1024 0x4
995
996#define RXDMA_ARBBW_EN BIT(0)
997#define RXSHFT_EN BIT(1)
998#define RXDMA_AGG_EN BIT(2)
999#define QS_VO_QUEUE BIT(8)
1000#define QS_VI_QUEUE BIT(9)
1001#define QS_BE_QUEUE BIT(10)
1002#define QS_BK_QUEUE BIT(11)
1003#define QS_MANAGER_QUEUE BIT(12)
1004#define QS_HIGH_QUEUE BIT(13)
1005
1006#define HQSEL_VOQ BIT(0)
1007#define HQSEL_VIQ BIT(1)
1008#define HQSEL_BEQ BIT(2)
1009#define HQSEL_BKQ BIT(3)
1010#define HQSEL_MGTQ BIT(4)
1011#define HQSEL_HIQ BIT(5)
1012
1013#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
1014#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
1015#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
1016#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8)
1017#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6)
1018#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4)
1019
1020#define QUEUE_LOW 1
1021#define QUEUE_NORMAL 2
1022#define QUEUE_HIGH 3
1023
1024#define _LLT_NO_ACTIVE 0x0
1025#define _LLT_WRITE_ACCESS 0x1
1026#define _LLT_READ_ACCESS 0x2
1027
1028#define _LLT_INIT_DATA(x) ((x) & 0xFF)
1029#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
1030#define _LLT_OP(x) (((x) & 0x3) << 30)
1031#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
1032
1033#define BB_WRITE_READ_MASK (BIT(31) | BIT(30))
1034#define BB_WRITE_EN BIT(30)
1035#define BB_READ_EN BIT(31)
1036
1037#define _HPQ(x) ((x) & 0xFF)
1038#define _LPQ(x) (((x) & 0xFF) << 8)
1039#define _PUBQ(x) (((x) & 0xFF) << 16)
1040#define _NPQ(x) ((x) & 0xFF)
1041
1042#define HPQ_PUBLIC_DIS BIT(24)
1043#define LPQ_PUBLIC_DIS BIT(25)
1044#define LD_RQPN BIT(31)
1045
1046#define BCN_VALID BIT(16)
1047#define BCN_HEAD(x) (((x) & 0xFF) << 8)
1048#define BCN_HEAD_MASK 0xFF00
1049
1050#define BLK_DESC_NUM_SHIFT 4
1051#define BLK_DESC_NUM_MASK 0xF
1052
1053#define DROP_DATA_EN BIT(9)
1054
1055#define EN_AMPDU_RTY_NEW BIT(7)
1056
1057#define _INIRTSMCS_SEL(x) ((x) & 0x3F)
1058
1059#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
1060#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
1061
1062#define RATE_REG_BITMAP_ALL 0xFFFFF
1063
1064#define _RRSC_BITMAP(x) ((x) & 0xFFFFF)
1065
1066#define _RRSR_RSC(x) (((x) & 0x3) << 21)
1067#define RRSR_RSC_RESERVED 0x0
1068#define RRSR_RSC_UPPER_SUBCHANNEL 0x1
1069#define RRSR_RSC_LOWER_SUBCHANNEL 0x2
1070#define RRSR_RSC_DUPLICATE_MODE 0x3
1071
1072#define USE_SHORT_G1 BIT(20)
1073
1074#define _AGGLMT_MCS0(x) ((x) & 0xF)
1075#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4)
1076#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8)
1077#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12)
1078#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16)
1079#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20)
1080#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24)
1081#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28)
1082
1083#define RETRY_LIMIT_SHORT_SHIFT 8
1084#define RETRY_LIMIT_LONG_SHIFT 0
1085
1086#define _DARF_RC1(x) ((x) & 0x1F)
1087#define _DARF_RC2(x) (((x) & 0x1F) << 8)
1088#define _DARF_RC3(x) (((x) & 0x1F) << 16)
1089#define _DARF_RC4(x) (((x) & 0x1F) << 24)
1090#define _DARF_RC5(x) ((x) & 0x1F)
1091#define _DARF_RC6(x) (((x) & 0x1F) << 8)
1092#define _DARF_RC7(x) (((x) & 0x1F) << 16)
1093#define _DARF_RC8(x) (((x) & 0x1F) << 24)
1094
1095#define _RARF_RC1(x) ((x) & 0x1F)
1096#define _RARF_RC2(x) (((x) & 0x1F) << 8)
1097#define _RARF_RC3(x) (((x) & 0x1F) << 16)
1098#define _RARF_RC4(x) (((x) & 0x1F) << 24)
1099#define _RARF_RC5(x) ((x) & 0x1F)
1100#define _RARF_RC6(x) (((x) & 0x1F) << 8)
1101#define _RARF_RC7(x) (((x) & 0x1F) << 16)
1102#define _RARF_RC8(x) (((x) & 0x1F) << 24)
1103
1104#define AC_PARAM_TXOP_LIMIT_OFFSET 16
1105#define AC_PARAM_ECW_MAX_OFFSET 12
1106#define AC_PARAM_ECW_MIN_OFFSET 8
1107#define AC_PARAM_AIFS_OFFSET 0
1108
1109#define _AIFS(x) (x)
1110#define _ECW_MAX_MIN(x) ((x) << 8)
1111#define _TXOP_LIMIT(x) ((x) << 16)
1112
1113#define _BCNIFS(x) ((x) & 0xFF)
1114#define _BCNECW(x) ((((x) & 0xF)) << 8)
1115
1116#define _LRL(x) ((x) & 0x3F)
1117#define _SRL(x) (((x) & 0x3F) << 8)
1118
1119#define _SIFS_CCK_CTX(x) ((x) & 0xFF)
1120#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8);
1121
1122#define _SIFS_OFDM_CTX(x) ((x) & 0xFF)
1123#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8);
1124
1125#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8)
1126
1127#define DIS_EDCA_CNT_DWN BIT(11)
1128
1129#define EN_MBSSID BIT(1)
1130#define EN_TXBCN_RPT BIT(2)
1131#define EN_BCN_FUNCTION BIT(3)
1132
1133#define TSFTR_RST BIT(0)
1134#define TSFTR1_RST BIT(1)
1135
1136#define STOP_BCNQ BIT(6)
1137
1138#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
1139#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
1140
1141#define ACMHW_HWEN BIT(0)
1142#define ACMHW_BEQEN BIT(1)
1143#define ACMHW_VIQEN BIT(2)
1144#define ACMHW_VOQEN BIT(3)
1145#define ACMHW_BEQSTATUS BIT(4)
1146#define ACMHW_VIQSTATUS BIT(5)
1147#define ACMHW_VOQSTATUS BIT(6)
1148
1149#define APSDOFF BIT(6)
1150#define APSDOFF_STATUS BIT(7)
1151
1152#define BW_20MHZ BIT(2)
1153
1154#define RATE_BITMAP_ALL 0xFFFFF
1155
1156#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
1157
1158#define TSFRST BIT(0)
1159#define DIS_GCLK BIT(1)
1160#define PAD_SEL BIT(2)
1161#define PWR_ST BIT(6)
1162#define PWRBIT_OW_EN BIT(7)
1163#define ACRC BIT(8)
1164#define CFENDFORM BIT(9)
1165#define ICV BIT(10)
1166
1167#define AAP BIT(0)
1168#define APM BIT(1)
1169#define AM BIT(2)
1170#define AB BIT(3)
1171#define ADD3 BIT(4)
1172#define APWRMGT BIT(5)
1173#define CBSSID BIT(6)
1174#define CBSSID_DATA BIT(6)
1175#define CBSSID_BCN BIT(7)
1176#define ACRC32 BIT(8)
1177#define AICV BIT(9)
1178#define ADF BIT(11)
1179#define ACF BIT(12)
1180#define AMF BIT(13)
1181#define HTC_LOC_CTRL BIT(14)
1182#define UC_DATA_EN BIT(16)
1183#define BM_DATA_EN BIT(17)
1184#define MFBEN BIT(22)
1185#define LSIGEN BIT(23)
1186#define ENMBID BIT(24)
1187#define APP_BASSN BIT(27)
1188#define APP_PHYSTS BIT(28)
1189#define APP_ICV BIT(29)
1190#define APP_MIC BIT(30)
1191#define APP_FCS BIT(31)
1192
1193#define _MIN_SPACE(x) ((x) & 0x7)
1194#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3)
1195
1196#define RXERR_TYPE_OFDM_PPDU 0
1197#define RXERR_TYPE_OFDM_FALSE_ALARM 1
1198#define RXERR_TYPE_OFDM_MPDU_OK 2
1199#define RXERR_TYPE_OFDM_MPDU_FAIL 3
1200#define RXERR_TYPE_CCK_PPDU 4
1201#define RXERR_TYPE_CCK_FALSE_ALARM 5
1202#define RXERR_TYPE_CCK_MPDU_OK 6
1203#define RXERR_TYPE_CCK_MPDU_FAIL 7
1204#define RXERR_TYPE_HT_PPDU 8
1205#define RXERR_TYPE_HT_FALSE_ALARM 9
1206#define RXERR_TYPE_HT_MPDU_TOTAL 10
1207#define RXERR_TYPE_HT_MPDU_OK 11
1208#define RXERR_TYPE_HT_MPDU_FAIL 12
1209#define RXERR_TYPE_RX_FULL_DROP 15
1210
1211#define RXERR_COUNTER_MASK 0xFFFFF
1212#define RXERR_RPT_RST BIT(27)
1213#define _RXERR_RPT_SEL(type) ((type) << 28)
1214
1215#define SCR_TXUSEDK BIT(0)
1216#define SCR_RXUSEDK BIT(1)
1217#define SCR_TXENCENABLE BIT(2)
1218#define SCR_RXDECENABLE BIT(3)
1219#define SCR_SKBYA2 BIT(4)
1220#define SCR_NOSKMC BIT(5)
1221#define SCR_TXBCUSEDK BIT(6)
1222#define SCR_RXBCUSEDK BIT(7)
1223
1224#define USB_IS_HIGH_SPEED 0
1225#define USB_IS_FULL_SPEED 1
1226#define USB_SPEED_MASK BIT(5)
1227
1228#define USB_NORMAL_SIE_EP_MASK 0xF
1229#define USB_NORMAL_SIE_EP_SHIFT 4
1230
1231#define USB_TEST_EP_MASK 0x30
1232#define USB_TEST_EP_SHIFT 4
1233
1234#define USB_AGG_EN BIT(3)
1235
1236#define MAC_ADDR_LEN 6
1237#define LAST_ENTRY_OF_TX_PKT_BUFFER 175/*255 88e*/
1238
1239#define POLLING_LLT_THRESHOLD 20
1240#define POLLING_READY_TIMEOUT_COUNT 3000
1241
1242#define MAX_MSS_DENSITY_2T 0x13
1243#define MAX_MSS_DENSITY_1T 0x0A
1244
1245#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
1246#define EPROM_CMD_CONFIG 0x3
1247#define EPROM_CMD_LOAD 1
1248
1249#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE
1250
1251#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
1252
1253#define RPMAC_RESET 0x100
1254#define RPMAC_TXSTART 0x104
1255#define RPMAC_TXLEGACYSIG 0x108
1256#define RPMAC_TXHTSIG1 0x10c
1257#define RPMAC_TXHTSIG2 0x110
1258#define RPMAC_PHYDEBUG 0x114
1259#define RPMAC_TXPACKETNUM 0x118
1260#define RPMAC_TXIDLE 0x11c
1261#define RPMAC_TXMACHEADER0 0x120
1262#define RPMAC_TXMACHEADER1 0x124
1263#define RPMAC_TXMACHEADER2 0x128
1264#define RPMAC_TXMACHEADER3 0x12c
1265#define RPMAC_TXMACHEADER4 0x130
1266#define RPMAC_TXMACHEADER5 0x134
1267#define RPMAC_TXDADATYPE 0x138
1268#define RPMAC_TXRANDOMSEED 0x13c
1269#define RPMAC_CCKPLCPPREAMBLE 0x140
1270#define RPMAC_CCKPLCPHEADER 0x144
1271#define RPMAC_CCKCRC16 0x148
1272#define RPMAC_OFDMRXCRC32OK 0x170
1273#define RPMAC_OFDMRXCRC32Er 0x174
1274#define RPMAC_OFDMRXPARITYER 0x178
1275#define RPMAC_OFDMRXCRC8ER 0x17c
1276#define RPMAC_CCKCRXRC16ER 0x180
1277#define RPMAC_CCKCRXRC32ER 0x184
1278#define RPMAC_CCKCRXRC32OK 0x188
1279#define RPMAC_TXSTATUS 0x18c
1280
1281#define RFPGA0_RFMOD 0x800
1282
1283#define RFPGA0_TXINFO 0x804
1284#define RFPGA0_PSDFUNCTION 0x808
1285
1286#define RFPGA0_TXGAINSTAGE 0x80c
1287
1288#define RFPGA0_RFTIMING1 0x810
1289#define RFPGA0_RFTIMING2 0x814
1290
1291#define RFPGA0_XA_HSSIPARAMETER1 0x820
1292#define RFPGA0_XA_HSSIPARAMETER2 0x824
1293#define RFPGA0_XB_HSSIPARAMETER1 0x828
1294#define RFPGA0_XB_HSSIPARAMETER2 0x82c
1295
1296#define RFPGA0_XA_LSSIPARAMETER 0x840
1297#define RFPGA0_XB_LSSIPARAMETER 0x844
1298
1299#define RFPGA0_RFWAKEUPPARAMETER 0x850
1300#define RFPGA0_RFSLEEPUPPARAMETER 0x854
1301
1302#define RFPGA0_XAB_SWITCHCONTROL 0x858
1303#define RFPGA0_XCD_SWITCHCONTROL 0x85c
1304
1305#define RFPGA0_XA_RFINTERFACEOE 0x860
1306#define RFPGA0_XB_RFINTERFACEOE 0x864
1307
1308#define RFPGA0_XAB_RFINTERFACESW 0x870
1309#define RFPGA0_XCD_RFINTERFACESW 0x874
1310
1311#define rFPGA0_XAB_RFPARAMETER 0x878
1312#define rFPGA0_XCD_RFPARAMETER 0x87c
1313
1314#define RFPGA0_ANALOGPARAMETER1 0x880
1315#define RFPGA0_ANALOGPARAMETER2 0x884
1316#define RFPGA0_ANALOGPARAMETER3 0x888
1317#define RFPGA0_ANALOGPARAMETER4 0x88c
1318
1319#define RFPGA0_XA_LSSIREADBACK 0x8a0
1320#define RFPGA0_XB_LSSIREADBACK 0x8a4
1321#define RFPGA0_XC_LSSIREADBACK 0x8a8
1322#define RFPGA0_XD_LSSIREADBACK 0x8ac
1323
1324#define RFPGA0_PSDREPORT 0x8b4
1325#define TRANSCEIVEA_HSPI_READBACK 0x8b8
1326#define TRANSCEIVEB_HSPI_READBACK 0x8bc
1327#define REG_SC_CNT 0x8c4
1328#define RFPGA0_XAB_RFINTERFACERB 0x8e0
1329#define RFPGA0_XCD_RFINTERFACERB 0x8e4
1330
1331#define RFPGA1_RFMOD 0x900
1332
1333#define RFPGA1_TXBLOCK 0x904
1334#define RFPGA1_DEBUGSELECT 0x908
1335#define RFPGA1_TXINFO 0x90c
1336
1337#define RCCK0_SYSTEM 0xa00
1338
1339#define RCCK0_AFESETTING 0xa04
1340#define RCCK0_CCA 0xa08
1341
1342#define RCCK0_RXAGC1 0xa0c
1343#define RCCK0_RXAGC2 0xa10
1344
1345#define RCCK0_RXHP 0xa14
1346
1347#define RCCK0_DSPPARAMETER1 0xa18
1348#define RCCK0_DSPPARAMETER2 0xa1c
1349
1350#define RCCK0_TXFILTER1 0xa20
1351#define RCCK0_TXFILTER2 0xa24
1352#define RCCK0_DEBUGPORT 0xa28
1353#define RCCK0_FALSEALARMREPORT 0xa2c
1354#define RCCK0_TRSSIREPORT 0xa50
1355#define RCCK0_RXREPORT 0xa54
1356#define RCCK0_FACOUNTERLOWER 0xa5c
1357#define RCCK0_FACOUNTERUPPER 0xa58
1358#define RCCK0_CCA_CNT 0xa60
1359
1360
1361/* PageB(0xB00) */
1362#define RPDP_ANTA 0xb00
1363#define RPDP_ANTA_4 0xb04
1364#define RPDP_ANTA_8 0xb08
1365#define RPDP_ANTA_C 0xb0c
1366#define RPDP_ANTA_10 0xb10
1367#define RPDP_ANTA_14 0xb14
1368#define RPDP_ANTA_18 0xb18
1369#define RPDP_ANTA_1C 0xb1c
1370#define RPDP_ANTA_20 0xb20
1371#define RPDP_ANTA_24 0xb24
1372
1373#define RCONFIG_PMPD_ANTA 0xb28
1374#define RCONFIG_RAM64X16 0xb2c
1375
1376#define RBNDA 0xb30
1377#define RHSSIPAR 0xb34
1378
1379#define RCONFIG_ANTA 0xb68
1380#define RCONFIG_ANTB 0xb6c
1381
1382#define RPDP_ANTB 0xb70
1383#define RPDP_ANTB_4 0xb74
1384#define RPDP_ANTB_8 0xb78
1385#define RPDP_ANTB_C 0xb7c
1386#define RPDP_ANTB_10 0xb80
1387#define RPDP_ANTB_14 0xb84
1388#define RPDP_ANTB_18 0xb88
1389#define RPDP_ANTB_1C 0xb8c
1390#define RPDP_ANTB_20 0xb90
1391#define RPDP_ANTB_24 0xb94
1392
1393#define RCONFIG_PMPD_ANTB 0xb98
1394
1395#define RBNDB 0xba0
1396
1397#define RAPK 0xbd8
1398#define rPm_Rx0_AntA 0xbdc
1399#define rPm_Rx1_AntA 0xbe0
1400#define rPm_Rx2_AntA 0xbe4
1401#define rPm_Rx3_AntA 0xbe8
1402#define rPm_Rx0_AntB 0xbec
1403#define rPm_Rx1_AntB 0xbf0
1404#define rPm_Rx2_AntB 0xbf4
1405#define rPm_Rx3_AntB 0xbf8
1406
1407/*Page C*/
1408#define ROFDM0_LSTF 0xc00
1409
1410#define ROFDM0_TRXPATHENABLE 0xc04
1411#define ROFDM0_TRMUXPAR 0xc08
1412#define ROFDM0_TRSWISOLATION 0xc0c
1413
1414#define ROFDM0_XARXAFE 0xc10
1415#define ROFDM0_XARXIQIMBAL 0xc14
1416#define ROFDM0_XBRXAFE 0xc18
1417#define ROFDM0_XBRXIQIMBAL 0xc1c
1418#define ROFDM0_XCRXAFE 0xc20
1419#define ROFDM0_XCRXIQIMBAL 0xc24
1420#define ROFDM0_XDRXAFE 0xc28
1421#define ROFDM0_XDRXIQIMBAL 0xc2c
1422
1423#define ROFDM0_RXDETECTOR1 0xc30
1424#define ROFDM0_RXDETECTOR2 0xc34
1425#define ROFDM0_RXDETECTOR3 0xc38
1426#define ROFDM0_RXDETECTOR4 0xc3c
1427
1428#define ROFDM0_RXDSP 0xc40
1429#define ROFDM0_CFOANDDAGC 0xc44
1430#define ROFDM0_CCADROPTHRES 0xc48
1431#define ROFDM0_ECCATHRES 0xc4c
1432
1433#define ROFDM0_XAAGCCORE1 0xc50
1434#define ROFDM0_XAAGCCORE2 0xc54
1435#define ROFDM0_XBAGCCORE1 0xc58
1436#define ROFDM0_XBAGCCORE2 0xc5c
1437#define ROFDM0_XCAGCCORE1 0xc60
1438#define ROFDM0_XCAGCCORE2 0xc64
1439#define ROFDM0_XDAGCCORE1 0xc68
1440#define ROFDM0_XDAGCCORE2 0xc6c
1441
1442#define ROFDM0_AGCPARAMETER1 0xc70
1443#define ROFDM0_AGCPARAMETER2 0xc74
1444#define ROFDM0_AGCRSSITABLE 0xc78
1445#define ROFDM0_HTSTFAGC 0xc7c
1446
1447#define ROFDM0_XATXIQIMBAL 0xc80
1448#define ROFDM0_XATXAFE 0xc84
1449#define ROFDM0_XBTXIQIMBAL 0xc88
1450#define ROFDM0_XBTXAFE 0xc8c
1451#define ROFDM0_XCTXIQIMBAL 0xc90
1452#define ROFDM0_XCTXAFE 0xc94
1453#define ROFDM0_XDTXIQIMBAL 0xc98
1454#define ROFDM0_XDTXAFE 0xc9c
1455
1456#define ROFDM0_RXIQEXTANTA 0xca0
1457#define ROFDM0_TXCOEFF1 0xca4
1458#define ROFDM0_TXCOEFF2 0xca8
1459#define ROFDM0_TXCOEFF3 0xcac
1460#define ROFDM0_TXCOEFF4 0xcb0
1461#define ROFDM0_TXCOEFF5 0xcb4
1462#define ROFDM0_TXCOEFF6 0xcb8
1463
1464#define ROFDM0_RXHPPARAMETER 0xce0
1465#define ROFDM0_TXPSEUDONOISEWGT 0xce4
1466#define ROFDM0_FRAMESYNC 0xcf0
1467#define ROFDM0_DFSREPORT 0xcf4
1468
1469
1470#define ROFDM1_LSTF 0xd00
1471#define ROFDM1_TRXPATHENABLE 0xd04
1472
1473#define ROFDM1_CF0 0xd08
1474#define ROFDM1_CSI1 0xd10
1475#define ROFDM1_SBD 0xd14
1476#define ROFDM1_CSI2 0xd18
1477#define ROFDM1_CFOTRACKING 0xd2c
1478#define ROFDM1_TRXMESAURE1 0xd34
1479#define ROFDM1_INTFDET 0xd3c
1480#define ROFDM1_PSEUDONOISESTATEAB 0xd50
1481#define ROFDM1_PSEUDONOISESTATECD 0xd54
1482#define ROFDM1_RXPSEUDONOISEWGT 0xd58
1483
1484#define ROFDM_PHYCOUNTER1 0xda0
1485#define ROFDM_PHYCOUNTER2 0xda4
1486#define ROFDM_PHYCOUNTER3 0xda8
1487
1488#define ROFDM_SHORTCFOAB 0xdac
1489#define ROFDM_SHORTCFOCD 0xdb0
1490#define ROFDM_LONGCFOAB 0xdb4
1491#define ROFDM_LONGCFOCD 0xdb8
1492#define ROFDM_TAILCF0AB 0xdbc
1493#define ROFDM_TAILCF0CD 0xdc0
1494#define ROFDM_PWMEASURE1 0xdc4
1495#define ROFDM_PWMEASURE2 0xdc8
1496#define ROFDM_BWREPORT 0xdcc
1497#define ROFDM_AGCREPORT 0xdd0
1498#define ROFDM_RXSNR 0xdd4
1499#define ROFDM_RXEVMCSI 0xdd8
1500#define ROFDM_SIGREPORT 0xddc
1501
1502#define RTXAGC_A_RATE18_06 0xe00
1503#define RTXAGC_A_RATE54_24 0xe04
1504#define RTXAGC_A_CCK1_MCS32 0xe08
1505#define RTXAGC_A_MCS03_MCS00 0xe10
1506#define RTXAGC_A_MCS07_MCS04 0xe14
1507#define RTXAGC_A_MCS11_MCS08 0xe18
1508#define RTXAGC_A_MCS15_MCS12 0xe1c
1509
1510#define RTXAGC_B_RATE18_06 0x830
1511#define RTXAGC_B_RATE54_24 0x834
1512#define RTXAGC_B_CCK1_55_MCS32 0x838
1513#define RTXAGC_B_MCS03_MCS00 0x83c
1514#define RTXAGC_B_MCS07_MCS04 0x848
1515#define RTXAGC_B_MCS11_MCS08 0x84c
1516#define RTXAGC_B_MCS15_MCS12 0x868
1517#define RTXAGC_B_CCK11_A_CCK2_11 0x86c
1518
1519#define RFPGA0_IQK 0xe28
1520#define RTX_IQK_TONE_A 0xe30
1521#define RRX_IQK_TONE_A 0xe34
1522#define RTX_IQK_PI_A 0xe38
1523#define RRX_IQK_PI_A 0xe3c
1524
1525#define RTX_IQK 0xe40
1526#define RRX_IQK 0xe44
1527#define RIQK_AGC_PTS 0xe48
1528#define RIQK_AGC_RSP 0xe4c
1529#define RTX_IQK_TONE_B 0xe50
1530#define RRX_IQK_TONE_B 0xe54
1531#define RTX_IQK_PI_B 0xe58
1532#define RRX_IQK_PI_B 0xe5c
1533#define RIQK_AGC_CONT 0xe60
1534
1535#define RBLUE_TOOTH 0xe6c
1536#define RRX_WAIT_CCA 0xe70
1537#define RTX_CCK_RFON 0xe74
1538#define RTX_CCK_BBON 0xe78
1539#define RTX_OFDM_RFON 0xe7c
1540#define RTX_OFDM_BBON 0xe80
1541#define RTX_TO_RX 0xe84
1542#define RTX_TO_TX 0xe88
1543#define RRX_CCK 0xe8c
1544
1545#define RTX_POWER_BEFORE_IQK_A 0xe94
1546#define RTX_POWER_AFTER_IQK_A 0xe9c
1547
1548#define RRX_POWER_BEFORE_IQK_A 0xea0
1549#define RRX_POWER_BEFORE_IQK_A_2 0xea4
1550#define RRX_POWER_AFTER_IQK_A 0xea8
1551#define RRX_POWER_AFTER_IQK_A_2 0xeac
1552
1553#define RTX_POWER_BEFORE_IQK_B 0xeb4
1554#define RTX_POWER_AFTER_IQK_B 0xebc
1555
1556#define RRX_POWER_BEFORE_IQK_B 0xec0
1557#define RRX_POWER_BEFORE_IQK_B_2 0xec4
1558#define RRX_POWER_AFTER_IQK_B 0xec8
1559#define RRX_POWER_AFTER_IQK_B_2 0xecc
1560
1561#define RRX_OFDM 0xed0
1562#define RRX_WAIT_RIFS 0xed4
1563#define RRX_TO_RX 0xed8
1564#define RSTANDBY 0xedc
1565#define RSLEEP 0xee0
1566#define RPMPD_ANAEN 0xeec
1567
1568#define RZEBRA1_HSSIENABLE 0x0
1569#define RZEBRA1_TRXENABLE1 0x1
1570#define RZEBRA1_TRXENABLE2 0x2
1571#define RZEBRA1_AGC 0x4
1572#define RZEBRA1_CHARGEPUMP 0x5
1573#define RZEBRA1_CHANNEL 0x7
1574
1575#define RZEBRA1_TXGAIN 0x8
1576#define RZEBRA1_TXLPF 0x9
1577#define RZEBRA1_RXLPF 0xb
1578#define RZEBRA1_RXHPFCORNER 0xc
1579
1580#define RGLOBALCTRL 0
1581#define RRTL8256_TXLPF 19
1582#define RRTL8256_RXLPF 11
1583#define RRTL8258_TXLPF 0x11
1584#define RRTL8258_RXLPF 0x13
1585#define RRTL8258_RSSILPF 0xa
1586
1587#define RF_AC 0x00
1588
1589#define RF_IQADJ_G1 0x01
1590#define RF_IQADJ_G2 0x02
1591#define RF_POW_TRSW 0x05
1592
1593#define RF_GAIN_RX 0x06
1594#define RF_GAIN_TX 0x07
1595
1596#define RF_TXM_IDAC 0x08
1597#define RF_BS_IQGEN 0x0F
1598
1599#define RF_MODE1 0x10
1600#define RF_MODE2 0x11
1601
1602#define RF_RX_AGC_HP 0x12
1603#define RF_TX_AGC 0x13
1604#define RF_BIAS 0x14
1605#define RF_IPA 0x15
1606#define RF_POW_ABILITY 0x17
1607#define RF_MODE_AG 0x18
1608#define RRFCHANNEL 0x18
1609#define RF_CHNLBW 0x18
1610#define RF_TOP 0x19
1611
1612#define RF_RX_G1 0x1A
1613#define RF_RX_G2 0x1B
1614
1615#define RF_RX_BB2 0x1C
1616#define RF_RX_BB1 0x1D
1617
1618#define RF_RCK1 0x1E
1619#define RF_RCK2 0x1F
1620
1621#define RF_TX_G1 0x20
1622#define RF_TX_G2 0x21
1623#define RF_TX_G3 0x22
1624
1625#define RF_TX_BB1 0x23
1626#define RF_T_METER 0x42
1627
1628#define RF_SYN_G1 0x25
1629#define RF_SYN_G2 0x26
1630#define RF_SYN_G3 0x27
1631#define RF_SYN_G4 0x28
1632#define RF_SYN_G5 0x29
1633#define RF_SYN_G6 0x2A
1634#define RF_SYN_G7 0x2B
1635#define RF_SYN_G8 0x2C
1636
1637#define RF_RCK_OS 0x30
1638#define RF_TXPA_G1 0x31
1639#define RF_TXPA_G2 0x32
1640#define RF_TXPA_G3 0x33
1641
1642#define RF_TX_BIAS_A 0x35
1643#define RF_TX_BIAS_D 0x36
1644#define RF_LOBF_9 0x38
1645#define RF_RXRF_A3 0x3C
1646#define RF_TRSW 0x3F
1647
1648#define RF_TXRF_A2 0x41
1649#define RF_TXPA_G4 0x46
1650#define RF_TXPA_A4 0x4B
1651
1652#define RF_WE_LUT 0xEF
1653
1654#define BBBRESETB 0x100
1655#define BGLOBALRESETB 0x200
1656#define BOFDMTXSTART 0x4
1657#define BCCKTXSTART 0x8
1658#define BCRC32DEBUG 0x100
1659#define BPMACLOOPBACK 0x10
1660#define BTXLSIG 0xffffff
1661#define BOFDMTXRATE 0xf
1662#define BOFDMTXRESERVED 0x10
1663#define BOFDMTXLENGTH 0x1ffe0
1664#define BOFDMTXPARITY 0x20000
1665#define BTXHTSIG1 0xffffff
1666#define BTXHTMCSRATE 0x7f
1667#define BTXHTBW 0x80
1668#define BTXHTLENGTH 0xffff00
1669#define BTXHTSIG2 0xffffff
1670#define BTXHTSMOOTHING 0x1
1671#define BTXHTSOUNDING 0x2
1672#define BTXHTRESERVED 0x4
1673#define BTXHTAGGREATION 0x8
1674#define BTXHTSTBC 0x30
1675#define BTXHTADVANCECODING 0x40
1676#define BTXHTSHORTGI 0x80
1677#define BTXHTNUMBERHT_LTF 0x300
1678#define BTXHTCRC8 0x3fc00
1679#define BCOUNTERRESET 0x10000
1680#define BNUMOFOFDMTX 0xffff
1681#define BNUMOFCCKTX 0xffff0000
1682#define BTXIDLEINTERVAL 0xffff
1683#define BOFDMSERVICE 0xffff0000
1684#define BTXMACHEADER 0xffffffff
1685#define BTXDATAINIT 0xff
1686#define BTXHTMODE 0x100
1687#define BTXDATATYPE 0x30000
1688#define BTXRANDOMSEED 0xffffffff
1689#define BCCKTXPREAMBLE 0x1
1690#define BCCKTXSFD 0xffff0000
1691#define BCCKTXSIG 0xff
1692#define BCCKTXSERVICE 0xff00
1693#define BCCKLENGTHEXT 0x8000
1694#define BCCKTXLENGHT 0xffff0000
1695#define BCCKTXCRC16 0xffff
1696#define BCCKTXSTATUS 0x1
1697#define BOFDMTXSTATUS 0x2
1698#define IS_BB_REG_OFFSET_92S(_offset) \
1699 ((_offset >= 0x800) && (_offset <= 0xfff))
1700
1701#define BRFMOD 0x1
1702#define BJAPANMODE 0x2
1703#define BCCKTXSC 0x30
1704#define BCCKEN 0x1000000
1705#define BOFDMEN 0x2000000
1706
1707#define BOFDMRXADCPHASE 0x10000
1708#define BOFDMTXDACPHASE 0x40000
1709#define BXATXAGC 0x3f
1710
1711#define BXBTXAGC 0xf00
1712#define BXCTXAGC 0xf000
1713#define BXDTXAGC 0xf0000
1714
1715#define BPASTART 0xf0000000
1716#define BTRSTART 0x00f00000
1717#define BRFSTART 0x0000f000
1718#define BBBSTART 0x000000f0
1719#define BBBCCKSTART 0x0000000f
1720#define BPAEND 0xf
1721#define BTREND 0x0f000000
1722#define BRFEND 0x000f0000
1723#define BCCAMASK 0x000000f0
1724#define BR2RCCAMASK 0x00000f00
1725#define BHSSI_R2TDELAY 0xf8000000
1726#define BHSSI_T2RDELAY 0xf80000
1727#define BCONTXHSSI 0x400
1728#define BIGFROMCCK 0x200
1729#define BAGCADDRESS 0x3f
1730#define BRXHPTX 0x7000
1731#define BRXHP2RX 0x38000
1732#define BRXHPCCKINI 0xc0000
1733#define BAGCTXCODE 0xc00000
1734#define BAGCRXCODE 0x300000
1735
1736#define B3WIREDATALENGTH 0x800
1737#define B3WIREADDREAALENGTH 0x400
1738
1739#define B3WIRERFPOWERDOWN 0x1
1740#define B5GPAPEPOLARITY 0x40000000
1741#define B2GPAPEPOLARITY 0x80000000
1742#define BRFSW_TXDEFAULTANT 0x3
1743#define BRFSW_TXOPTIONANT 0x30
1744#define BRFSW_RXDEFAULTANT 0x300
1745#define BRFSW_RXOPTIONANT 0x3000
1746#define BRFSI_3WIREDATA 0x1
1747#define BRFSI_3WIRECLOCK 0x2
1748#define BRFSI_3WIRELOAD 0x4
1749#define BRFSI_3WIRERW 0x8
1750#define BRFSI_3WIRE 0xf
1751
1752#define BRFSI_RFENV 0x10
1753
1754#define BRFSI_TRSW 0x20
1755#define BRFSI_TRSWB 0x40
1756#define BRFSI_ANTSW 0x100
1757#define BRFSI_ANTSWB 0x200
1758#define BRFSI_PAPE 0x400
1759#define BRFSI_PAPE5G 0x800
1760#define BBANDSELECT 0x1
1761#define BHTSIG2_GI 0x80
1762#define BHTSIG2_SMOOTHING 0x01
1763#define BHTSIG2_SOUNDING 0x02
1764#define BHTSIG2_AGGREATON 0x08
1765#define BHTSIG2_STBC 0x30
1766#define BHTSIG2_ADVCODING 0x40
1767#define BHTSIG2_NUMOFHTLTF 0x300
1768#define BHTSIG2_CRC8 0x3fc
1769#define BHTSIG1_MCS 0x7f
1770#define BHTSIG1_BANDWIDTH 0x80
1771#define BHTSIG1_HTLENGTH 0xffff
1772#define BLSIG_RATE 0xf
1773#define BLSIG_RESERVED 0x10
1774#define BLSIG_LENGTH 0x1fffe
1775#define BLSIG_PARITY 0x20
1776#define BCCKRXPHASE 0x4
1777
1778#define BLSSIREADADDRESS 0x7f800000
1779#define BLSSIREADEDGE 0x80000000
1780
1781#define BLSSIREADBACKDATA 0xfffff
1782
1783#define BLSSIREADOKFLAG 0x1000
1784#define BCCKSAMPLERATE 0x8
1785#define BREGULATOR0STANDBY 0x1
1786#define BREGULATORPLLSTANDBY 0x2
1787#define BREGULATOR1STANDBY 0x4
1788#define BPLLPOWERUP 0x8
1789#define BDPLLPOWERUP 0x10
1790#define BDA10POWERUP 0x20
1791#define BAD7POWERUP 0x200
1792#define BDA6POWERUP 0x2000
1793#define BXTALPOWERUP 0x4000
1794#define B40MDCLKPOWERUP 0x8000
1795#define BDA6DEBUGMODE 0x20000
1796#define BDA6SWING 0x380000
1797
1798#define BADCLKPHASE 0x4000000
1799#define B80MCLKDELAY 0x18000000
1800#define BAFEWATCHDOGENABLE 0x20000000
1801
1802#define BXTALCAP01 0xc0000000
1803#define BXTALCAP23 0x3
1804#define BXTALCAP92X 0x0f000000
1805#define BXTALCAP 0x0f000000
1806
1807#define BINTDIFCLKENABLE 0x400
1808#define BEXTSIGCLKENABLE 0x800
1809#define BBANDGAP_MBIAS_POWERUP 0x10000
1810#define BAD11SH_GAIN 0xc0000
1811#define BAD11NPUT_RANGE 0x700000
1812#define BAD110P_CURRENT 0x3800000
1813#define BLPATH_LOOPBACK 0x4000000
1814#define BQPATH_LOOPBACK 0x8000000
1815#define BAFE_LOOPBACK 0x10000000
1816#define BDA10_SWING 0x7e0
1817#define BDA10_REVERSE 0x800
1818#define BDA_CLK_SOURCE 0x1000
1819#define BDA7INPUT_RANGE 0x6000
1820#define BDA7_GAIN 0x38000
1821#define BDA7OUTPUT_CM_MODE 0x40000
1822#define BDA7INPUT_CM_MODE 0x380000
1823#define BDA7CURRENT 0xc00000
1824#define BREGULATOR_ADJUST 0x7000000
1825#define BAD11POWERUP_ATTX 0x1
1826#define BDA10PS_ATTX 0x10
1827#define BAD11POWERUP_ATRX 0x100
1828#define BDA10PS_ATRX 0x1000
1829#define BCCKRX_AGC_FORMAT 0x200
1830#define BPSDFFT_SAMPLE_POINT 0xc000
1831#define BPSD_AVERAGE_NUM 0x3000
1832#define BIQPATH_CONTROL 0xc00
1833#define BPSD_FREQ 0x3ff
1834#define BPSD_ANTENNA_PATH 0x30
1835#define BPSD_IQ_SWITCH 0x40
1836#define BPSD_RX_TRIGGER 0x400000
1837#define BPSD_TX_TRIGGERCW 0x80000000
1838#define BPSD_SINE_TONE_SCALE 0x7f000000
1839#define BPSD_REPORT 0xffff
1840
1841#define BOFDM_TXSC 0x30000000
1842#define BCCK_TXON 0x1
1843#define BOFDM_TXON 0x2
1844#define BDEBUG_PAGE 0xfff
1845#define BDEBUG_ITEM 0xff
1846#define BANTL 0x10
1847#define BANT_NONHT 0x100
1848#define BANT_HT1 0x1000
1849#define BANT_HT2 0x10000
1850#define BANT_HT1S1 0x100000
1851#define BANT_NONHTS1 0x1000000
1852
1853#define BCCK_BBMODE 0x3
1854#define BCCK_TXPOWERSAVING 0x80
1855#define BCCK_RXPOWERSAVING 0x40
1856
1857#define BCCK_SIDEBAND 0x10
1858
1859#define BCCK_SCRAMBLE 0x8
1860#define BCCK_ANTDIVERSITY 0x8000
1861#define BCCK_CARRIER_RECOVERY 0x4000
1862#define BCCK_TXRATE 0x3000
1863#define BCCK_DCCANCEL 0x0800
1864#define BCCK_ISICANCEL 0x0400
1865#define BCCK_MATCH_FILTER 0x0200
1866#define BCCK_EQUALIZER 0x0100
1867#define BCCK_PREAMBLE_DETECT 0x800000
1868#define BCCK_FAST_FALSECCA 0x400000
1869#define BCCK_CH_ESTSTART 0x300000
1870#define BCCK_CCA_COUNT 0x080000
1871#define BCCK_CS_LIM 0x070000
1872#define BCCK_BIST_MODE 0x80000000
1873#define BCCK_CCAMASK 0x40000000
1874#define BCCK_TX_DAC_PHASE 0x4
1875#define BCCK_RX_ADC_PHASE 0x20000000
1876#define BCCKR_CP_MODE 0x0100
1877#define BCCK_TXDC_OFFSET 0xf0
1878#define BCCK_RXDC_OFFSET 0xf
1879#define BCCK_CCA_MODE 0xc000
1880#define BCCK_FALSECS_LIM 0x3f00
1881#define BCCK_CS_RATIO 0xc00000
1882#define BCCK_CORGBIT_SEL 0x300000
1883#define BCCK_PD_LIM 0x0f0000
1884#define BCCK_NEWCCA 0x80000000
1885#define BCCK_RXHP_OF_IG 0x8000
1886#define BCCK_RXIG 0x7f00
1887#define BCCK_LNA_POLARITY 0x800000
1888#define BCCK_RX1ST_BAIN 0x7f0000
1889#define BCCK_RF_EXTEND 0x20000000
1890#define BCCK_RXAGC_SATLEVEL 0x1f000000
1891#define BCCK_RXAGC_SATCOUNT 0xe0
1892#define BCCKRXRFSETTLE 0x1f
1893#define BCCK_FIXED_RXAGC 0x8000
1894#define BCCK_ANTENNA_POLARITY 0x2000
1895#define BCCK_TXFILTER_TYPE 0x0c00
1896#define BCCK_RXAGC_REPORTTYPE 0x0300
1897#define BCCK_RXDAGC_EN 0x80000000
1898#define BCCK_RXDAGC_PERIOD 0x20000000
1899#define BCCK_RXDAGC_SATLEVEL 0x1f000000
1900#define BCCK_TIMING_RECOVERY 0x800000
1901#define BCCK_TXC0 0x3f0000
1902#define BCCK_TXC1 0x3f000000
1903#define BCCK_TXC2 0x3f
1904#define BCCK_TXC3 0x3f00
1905#define BCCK_TXC4 0x3f0000
1906#define BCCK_TXC5 0x3f000000
1907#define BCCK_TXC6 0x3f
1908#define BCCK_TXC7 0x3f00
1909#define BCCK_DEBUGPORT 0xff0000
1910#define BCCK_DAC_DEBUG 0x0f000000
1911#define BCCK_FALSEALARM_ENABLE 0x8000
1912#define BCCK_FALSEALARM_READ 0x4000
1913#define BCCK_TRSSI 0x7f
1914#define BCCK_RXAGC_REPORT 0xfe
1915#define BCCK_RXREPORT_ANTSEL 0x80000000
1916#define BCCK_RXREPORT_MFOFF 0x40000000
1917#define BCCK_RXREPORT_SQLOSS 0x20000000
1918#define BCCK_RXREPORT_PKTLOSS 0x10000000
1919#define BCCK_RXREPORT_LOCKEDBIT 0x08000000
1920#define BCCK_RXREPORT_RATEERROR 0x04000000
1921#define BCCK_RXREPORT_RXRATE 0x03000000
1922#define BCCK_RXFA_COUNTER_LOWER 0xff
1923#define BCCK_RXFA_COUNTER_UPPER 0xff000000
1924#define BCCK_RXHPAGC_START 0xe000
1925#define BCCK_RXHPAGC_FINAL 0x1c00
1926#define BCCK_RXFALSEALARM_ENABLE 0x8000
1927#define BCCK_FACOUNTER_FREEZE 0x4000
1928#define BCCK_TXPATH_SEL 0x10000000
1929#define BCCK_DEFAULT_RXPATH 0xc000000
1930#define BCCK_OPTION_RXPATH 0x3000000
1931
1932#define BNUM_OFSTF 0x3
1933#define BSHIFT_L 0xc0
1934#define BGI_TH 0xc
1935#define BRXPATH_A 0x1
1936#define BRXPATH_B 0x2
1937#define BRXPATH_C 0x4
1938#define BRXPATH_D 0x8
1939#define BTXPATH_A 0x1
1940#define BTXPATH_B 0x2
1941#define BTXPATH_C 0x4
1942#define BTXPATH_D 0x8
1943#define BTRSSI_FREQ 0x200
1944#define BADC_BACKOFF 0x3000
1945#define BDFIR_BACKOFF 0xc000
1946#define BTRSSI_LATCH_PHASE 0x10000
1947#define BRX_LDC_OFFSET 0xff
1948#define BRX_QDC_OFFSET 0xff00
1949#define BRX_DFIR_MODE 0x1800000
1950#define BRX_DCNF_TYPE 0xe000000
1951#define BRXIQIMB_A 0x3ff
1952#define BRXIQIMB_B 0xfc00
1953#define BRXIQIMB_C 0x3f0000
1954#define BRXIQIMB_D 0xffc00000
1955#define BDC_DC_NOTCH 0x60000
1956#define BRXNB_NOTCH 0x1f000000
1957#define BPD_TH 0xf
1958#define BPD_TH_OPT2 0xc000
1959#define BPWED_TH 0x700
1960#define BIFMF_WIN_L 0x800
1961#define BPD_OPTION 0x1000
1962#define BMF_WIN_L 0xe000
1963#define BBW_SEARCH_L 0x30000
1964#define BWIN_ENH_L 0xc0000
1965#define BBW_TH 0x700000
1966#define BED_TH2 0x3800000
1967#define BBW_OPTION 0x4000000
1968#define BRADIO_TH 0x18000000
1969#define BWINDOW_L 0xe0000000
1970#define BSBD_OPTION 0x1
1971#define BFRAME_TH 0x1c
1972#define BFS_OPTION 0x60
1973#define BDC_SLOPE_CHECK 0x80
1974#define BFGUARD_COUNTER_DC_L 0xe00
1975#define BFRAME_WEIGHT_SHORT 0x7000
1976#define BSUB_TUNE 0xe00000
1977#define BFRAME_DC_LENGTH 0xe000000
1978#define BSBD_START_OFFSET 0x30000000
1979#define BFRAME_TH_2 0x7
1980#define BFRAME_GI2_TH 0x38
1981#define BGI2_SYNC_EN 0x40
1982#define BSARCH_SHORT_EARLY 0x300
1983#define BSARCH_SHORT_LATE 0xc00
1984#define BSARCH_GI2_LATE 0x70000
1985#define BCFOANTSUM 0x1
1986#define BCFOACC 0x2
1987#define BCFOSTARTOFFSET 0xc
1988#define BCFOLOOPBACK 0x70
1989#define BCFOSUMWEIGHT 0x80
1990#define BDAGCENABLE 0x10000
1991#define BTXIQIMB_A 0x3ff
1992#define BTXIQIMB_B 0xfc00
1993#define BTXIQIMB_C 0x3f0000
1994#define BTXIQIMB_D 0xffc00000
1995#define BTXIDCOFFSET 0xff
1996#define BTXIQDCOFFSET 0xff00
1997#define BTXDFIRMODE 0x10000
1998#define BTXPESUDO_NOISEON 0x4000000
1999#define BTXPESUDO_NOISE_A 0xff
2000#define BTXPESUDO_NOISE_B 0xff00
2001#define BTXPESUDO_NOISE_C 0xff0000
2002#define BTXPESUDO_NOISE_D 0xff000000
2003#define BCCA_DROPOPTION 0x20000
2004#define BCCA_DROPTHRES 0xfff00000
2005#define BEDCCA_H 0xf
2006#define BEDCCA_L 0xf0
2007#define BLAMBDA_ED 0x300
2008#define BRX_INITIALGAIN 0x7f
2009#define BRX_ANTDIV_EN 0x80
2010#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00
2011#define BRX_HIGHPOWER_FLOW 0x8000
2012#define BRX_AGC_FREEZE_THRES 0xc0000
2013#define BRX_FREEZESTEP_AGC1 0x300000
2014#define BRX_FREEZESTEP_AGC2 0xc00000
2015#define BRX_FREEZESTEP_AGC3 0x3000000
2016#define BRX_FREEZESTEP_AGC0 0xc000000
2017#define BRXRSSI_CMP_EN 0x10000000
2018#define BRXQUICK_AGCEN 0x20000000
2019#define BRXAGC_FREEZE_THRES_MODE 0x40000000
2020#define BRX_OVERFLOW_CHECKTYPE 0x80000000
2021#define BRX_AGCSHIFT 0x7f
2022#define BTRSW_TRI_ONLY 0x80
2023#define BPOWER_THRES 0x300
2024#define BRXAGC_EN 0x1
2025#define BRXAGC_TOGETHER_EN 0x2
2026#define BRXAGC_MIN 0x4
2027#define BRXHP_INI 0x7
2028#define BRXHP_TRLNA 0x70
2029#define BRXHP_RSSI 0x700
2030#define BRXHP_BBP1 0x7000
2031#define BRXHP_BBP2 0x70000
2032#define BRXHP_BBP3 0x700000
2033#define BRSSI_H 0x7f0000
2034#define BRSSI_GEN 0x7f000000
2035#define BRXSETTLE_TRSW 0x7
2036#define BRXSETTLE_LNA 0x38
2037#define BRXSETTLE_RSSI 0x1c0
2038#define BRXSETTLE_BBP 0xe00
2039#define BRXSETTLE_RXHP 0x7000
2040#define BRXSETTLE_ANTSW_RSSI 0x38000
2041#define BRXSETTLE_ANTSW 0xc0000
2042#define BRXPROCESS_TIME_DAGC 0x300000
2043#define BRXSETTLE_HSSI 0x400000
2044#define BRXPROCESS_TIME_BBPPW 0x800000
2045#define BRXANTENNA_POWER_SHIFT 0x3000000
2046#define BRSSI_TABLE_SELECT 0xc000000
2047#define BRXHP_FINAL 0x7000000
2048#define BRXHPSETTLE_BBP 0x7
2049#define BRXHTSETTLE_HSSI 0x8
2050#define BRXHTSETTLE_RXHP 0x70
2051#define BRXHTSETTLE_BBPPW 0x80
2052#define BRXHTSETTLE_IDLE 0x300
2053#define BRXHTSETTLE_RESERVED 0x1c00
2054#define BRXHT_RXHP_EN 0x8000
2055#define BRXAGC_FREEZE_THRES 0x30000
2056#define BRXAGC_TOGETHEREN 0x40000
2057#define BRXHTAGC_MIN 0x80000
2058#define BRXHTAGC_EN 0x100000
2059#define BRXHTDAGC_EN 0x200000
2060#define BRXHT_RXHP_BBP 0x1c00000
2061#define BRXHT_RXHP_FINAL 0xe0000000
2062#define BRXPW_RADIO_TH 0x3
2063#define BRXPW_RADIO_EN 0x4
2064#define BRXMF_HOLD 0x3800
2065#define BRXPD_DELAY_TH1 0x38
2066#define BRXPD_DELAY_TH2 0x1c0
2067#define BRXPD_DC_COUNT_MAX 0x600
2068#define BRXPD_DELAY_TH 0x8000
2069#define BRXPROCESS_DELAY 0xf0000
2070#define BRXSEARCHRANGE_GI2_EARLY 0x700000
2071#define BRXFRAME_FUARD_COUNTER_L 0x3800000
2072#define BRXSGI_GUARD_L 0xc000000
2073#define BRXSGI_SEARCH_L 0x30000000
2074#define BRXSGI_TH 0xc0000000
2075#define BDFSCNT0 0xff
2076#define BDFSCNT1 0xff00
2077#define BDFSFLAG 0xf0000
2078#define BMF_WEIGHT_SUM 0x300000
2079#define BMINIDX_TH 0x7f000000
2080#define BDAFORMAT 0x40000
2081#define BTXCH_EMU_ENABLE 0x01000000
2082#define BTRSW_ISOLATION_A 0x7f
2083#define BTRSW_ISOLATION_B 0x7f00
2084#define BTRSW_ISOLATION_C 0x7f0000
2085#define BTRSW_ISOLATION_D 0x7f000000
2086#define BEXT_LNA_GAIN 0x7c00
2087
2088#define BSTBC_EN 0x4
2089#define BANTENNA_MAPPING 0x10
2090#define BNSS 0x20
2091#define BCFO_ANTSUM_ID 0x200
2092#define BPHY_COUNTER_RESET 0x8000000
2093#define BCFO_REPORT_GET 0x4000000
2094#define BOFDM_CONTINUE_TX 0x10000000
2095#define BOFDM_SINGLE_CARRIER 0x20000000
2096#define BOFDM_SINGLE_TONE 0x40000000
2097#define BHT_DETECT 0x100
2098#define BCFOEN 0x10000
2099#define BCFOVALUE 0xfff00000
2100#define BSIGTONE_RE 0x3f
2101#define BSIGTONE_IM 0x7f00
2102#define BCOUNTER_CCA 0xffff
2103#define BCOUNTER_PARITYFAIL 0xffff0000
2104#define BCOUNTER_RATEILLEGAL 0xffff
2105#define BCOUNTER_CRC8FAIL 0xffff0000
2106#define BCOUNTER_MCSNOSUPPORT 0xffff
2107#define BCOUNTER_FASTSYNC 0xffff
2108#define BSHORTCFO 0xfff
2109#define BSHORTCFOT_LENGTH 12
2110#define BSHORTCFOF_LENGTH 11
2111#define BLONGCFO 0x7ff
2112#define BLONGCFOT_LENGTH 11
2113#define BLONGCFOF_LENGTH 11
2114#define BTAILCFO 0x1fff
2115#define BTAILCFOT_LENGTH 13
2116#define BTAILCFOF_LENGTH 12
2117#define BNOISE_EN_PWDB 0xffff
2118#define BCC_POWER_DB 0xffff0000
2119#define BMOISE_PWDB 0xffff
2120#define BPOWERMEAST_LENGTH 10
2121#define BPOWERMEASF_LENGTH 3
2122#define BRX_HT_BW 0x1
2123#define BRXSC 0x6
2124#define BRX_HT 0x8
2125#define BNB_INTF_DET_ON 0x1
2126#define BINTF_WIN_LEN_CFG 0x30
2127#define BNB_INTF_TH_CFG 0x1c0
2128#define BRFGAIN 0x3f
2129#define BTABLESEL 0x40
2130#define BTRSW 0x80
2131#define BRXSNR_A 0xff
2132#define BRXSNR_B 0xff00
2133#define BRXSNR_C 0xff0000
2134#define BRXSNR_D 0xff000000
2135#define BSNR_EVMT_LENGTH 8
2136#define BSNR_EVMF_LENGTH 1
2137#define BCSI1ST 0xff
2138#define BCSI2ND 0xff00
2139#define BRXEVM1ST 0xff0000
2140#define BRXEVM2ND 0xff000000
2141#define BSIGEVM 0xff
2142#define BPWDB 0xff00
2143#define BSGIEN 0x10000
2144
2145#define BSFACTOR_QMA1 0xf
2146#define BSFACTOR_QMA2 0xf0
2147#define BSFACTOR_QMA3 0xf00
2148#define BSFACTOR_QMA4 0xf000
2149#define BSFACTOR_QMA5 0xf0000
2150#define BSFACTOR_QMA6 0xf0000
2151#define BSFACTOR_QMA7 0xf00000
2152#define BSFACTOR_QMA8 0xf000000
2153#define BSFACTOR_QMA9 0xf0000000
2154#define BCSI_SCHEME 0x100000
2155
2156#define BNOISE_LVL_TOP_SET 0x3
2157#define BCHSMOOTH 0x4
2158#define BCHSMOOTH_CFG1 0x38
2159#define BCHSMOOTH_CFG2 0x1c0
2160#define BCHSMOOTH_CFG3 0xe00
2161#define BCHSMOOTH_CFG4 0x7000
2162#define BMRCMODE 0x800000
2163#define BTHEVMCFG 0x7000000
2164
2165#define BLOOP_FIT_TYPE 0x1
2166#define BUPD_CFO 0x40
2167#define BUPD_CFO_OFFDATA 0x80
2168#define BADV_UPD_CFO 0x100
2169#define BADV_TIME_CTRL 0x800
2170#define BUPD_CLKO 0x1000
2171#define BFC 0x6000
2172#define BTRACKING_MODE 0x8000
2173#define BPHCMP_ENABLE 0x10000
2174#define BUPD_CLKO_LTF 0x20000
2175#define BCOM_CH_CFO 0x40000
2176#define BCSI_ESTI_MODE 0x80000
2177#define BADV_UPD_EQZ 0x100000
2178#define BUCHCFG 0x7000000
2179#define BUPDEQZ 0x8000000
2180
2181#define BRX_PESUDO_NOISE_ON 0x20000000
2182#define BRX_PESUDO_NOISE_A 0xff
2183#define BRX_PESUDO_NOISE_B 0xff00
2184#define BRX_PESUDO_NOISE_C 0xff0000
2185#define BRX_PESUDO_NOISE_D 0xff000000
2186#define BRX_PESUDO_NOISESTATE_A 0xffff
2187#define BRX_PESUDO_NOISESTATE_B 0xffff0000
2188#define BRX_PESUDO_NOISESTATE_C 0xffff
2189#define BRX_PESUDO_NOISESTATE_D 0xffff0000
2190
2191#define BZEBRA1_HSSIENABLE 0x8
2192#define BZEBRA1_TRXCONTROL 0xc00
2193#define BZEBRA1_TRXGAINSETTING 0x07f
2194#define BZEBRA1_RXCOUNTER 0xc00
2195#define BZEBRA1_TXCHANGEPUMP 0x38
2196#define BZEBRA1_RXCHANGEPUMP 0x7
2197#define BZEBRA1_CHANNEL_NUM 0xf80
2198#define BZEBRA1_TXLPFBW 0x400
2199#define BZEBRA1_RXLPFBW 0x600
2200
2201#define BRTL8256REG_MODE_CTRL1 0x100
2202#define BRTL8256REG_MODE_CTRL0 0x40
2203#define BRTL8256REG_TXLPFBW 0x18
2204#define BRTL8256REG_RXLPFBW 0x600
2205
2206#define BRTL8258_TXLPFBW 0xc
2207#define BRTL8258_RXLPFBW 0xc00
2208#define BRTL8258_RSSILPFBW 0xc0
2209
2210#define BBYTE0 0x1
2211#define BBYTE1 0x2
2212#define BBYTE2 0x4
2213#define BBYTE3 0x8
2214#define BWORD0 0x3
2215#define BWORD1 0xc
2216#define BWORD 0xf
2217
2218#define MASKBYTE0 0xff
2219#define MASKBYTE1 0xff00
2220#define MASKBYTE2 0xff0000
2221#define MASKBYTE3 0xff000000
2222#define MASKHWORD 0xffff0000
2223#define MASKLWORD 0x0000ffff
2224#define MASKDWORD 0xffffffff
2225#define MASK12BITS 0xfff
2226#define MASKH4BITS 0xf0000000
2227#define MASKOFDM_D 0xffc00000
2228#define MASKCCK 0x3f3f3f3f
2229
2230#define MASK4BITS 0x0f
2231#define MASK20BITS 0xfffff
2232#define RFREG_OFFSET_MASK 0xfffff
2233
2234#define BENABLE 0x1
2235#define BDISABLE 0x0
2236
2237#define LEFT_ANTENNA 0x0
2238#define RIGHT_ANTENNA 0x1
2239
2240#define TCHECK_TXSTATUS 500
2241#define TUPDATE_RXCOUNTER 100
2242
2243#define REG_UN_USED_REGISTER 0x01bf
2244
2245/* WOL bit information */
2246#define HAL92C_WOL_PTK_UPDATE_EVENT BIT(0)
2247#define HAL92C_WOL_GTK_UPDATE_EVENT BIT(1)
2248#define HAL92C_WOL_DISASSOC_EVENT BIT(2)
2249#define HAL92C_WOL_DEAUTH_EVENT BIT(3)
2250#define HAL92C_WOL_FW_DISCONNECT_EVENT BIT(4)
2251
2252#define WOL_REASON_PTK_UPDATE BIT(0)
2253#define WOL_REASON_GTK_UPDATE BIT(1)
2254#define WOL_REASON_DISASSOC BIT(2)
2255#define WOL_REASON_DEAUTH BIT(3)
2256#define WOL_REASON_FW_DISCONNECT BIT(4)
2257
2258#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/rtlwifi/rtl8188ee/rf.c
new file mode 100644
index 000000000000..4faafdbab9c6
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/rf.c
@@ -0,0 +1,467 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "reg.h"
32#include "def.h"
33#include "phy.h"
34#include "rf.h"
35#include "dm.h"
36
37void rtl88e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
38{
39 struct rtl_priv *rtlpriv = rtl_priv(hw);
40 struct rtl_phy *rtlphy = &(rtlpriv->phy);
41
42 switch (bandwidth) {
43 case HT_CHANNEL_WIDTH_20:
44 rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
45 0xfffff3ff) | BIT(10) | BIT(11));
46 rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
47 rtlphy->rfreg_chnlval[0]);
48 break;
49 case HT_CHANNEL_WIDTH_20_40:
50 rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
51 0xfffff3ff) | BIT(10));
52 rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
53 rtlphy->rfreg_chnlval[0]);
54 break;
55 default:
56 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
57 "unknown bandwidth: %#X\n", bandwidth);
58 break;
59 }
60}
61
62void rtl88e_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
63 u8 *plevel)
64{
65 struct rtl_priv *rtlpriv = rtl_priv(hw);
66 struct rtl_phy *rtlphy = &(rtlpriv->phy);
67 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
68 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
69 u32 tx_agc[2] = {0, 0}, tmpval;
70 bool turbo_scanoff = false;
71 u8 idx1, idx2;
72 u8 *ptr;
73 u8 direction;
74 u32 pwrtrac_value;
75
76 if (rtlefuse->eeprom_regulatory != 0)
77 turbo_scanoff = true;
78
79 if (mac->act_scanning == true) {
80 tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
81 tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
82
83 if (turbo_scanoff) {
84 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
85 tx_agc[idx1] = plevel[idx1] |
86 (plevel[idx1] << 8) |
87 (plevel[idx1] << 16) |
88 (plevel[idx1] << 24);
89 }
90 }
91 } else {
92 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
93 tx_agc[idx1] = plevel[idx1] | (plevel[idx1] << 8) |
94 (plevel[idx1] << 16) |
95 (plevel[idx1] << 24);
96 }
97
98 if (rtlefuse->eeprom_regulatory == 0) {
99 tmpval = (rtlphy->mcs_offset[0][6]) +
100 (rtlphy->mcs_offset[0][7] << 8);
101 tx_agc[RF90_PATH_A] += tmpval;
102
103 tmpval = (rtlphy->mcs_offset[0][14]) +
104 (rtlphy->mcs_offset[0][15] << 24);
105 tx_agc[RF90_PATH_B] += tmpval;
106 }
107 }
108
109 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
110 ptr = (u8 *)(&(tx_agc[idx1]));
111 for (idx2 = 0; idx2 < 4; idx2++) {
112 if (*ptr > RF6052_MAX_TX_PWR)
113 *ptr = RF6052_MAX_TX_PWR;
114 ptr++;
115 }
116 }
117 rtl88e_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value);
118 if (direction == 1) {
119 tx_agc[0] += pwrtrac_value;
120 tx_agc[1] += pwrtrac_value;
121 } else if (direction == 2) {
122 tx_agc[0] -= pwrtrac_value;
123 tx_agc[1] -= pwrtrac_value;
124 }
125 tmpval = tx_agc[RF90_PATH_A] & 0xff;
126 rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
127
128 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
129 "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
130 RTXAGC_A_CCK1_MCS32);
131
132 tmpval = tx_agc[RF90_PATH_A] >> 8;
133
134 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
135
136 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
137 "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
138 RTXAGC_B_CCK11_A_CCK2_11);
139
140 tmpval = tx_agc[RF90_PATH_B] >> 24;
141 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
142
143 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
144 "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
145 RTXAGC_B_CCK11_A_CCK2_11);
146
147 tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
148 rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
149
150 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
151 "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
152 RTXAGC_B_CCK1_55_MCS32);
153}
154
155static void rtl88e_phy_get_power_base(struct ieee80211_hw *hw,
156 u8 *pwrlvlofdm, u8 *pwrlvlbw20,
157 u8 *pwrlvlbw40, u8 channel,
158 u32 *ofdmbase, u32 *mcsbase)
159{
160 struct rtl_priv *rtlpriv = rtl_priv(hw);
161 struct rtl_phy *rtlphy = &(rtlpriv->phy);
162 u32 base0, base1;
163 u8 i, powerlevel[2];
164
165 for (i = 0; i < 2; i++) {
166 base0 = pwrlvlofdm[i];
167
168 base0 = (base0 << 24) | (base0 << 16) |
169 (base0 << 8) | base0;
170 *(ofdmbase + i) = base0;
171 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
172 "[OFDM power base index rf(%c) = 0x%x]\n",
173 ((i == 0) ? 'A' : 'B'), *(ofdmbase + i));
174 }
175
176 for (i = 0; i < 2; i++) {
177 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20)
178 powerlevel[i] = pwrlvlbw20[i];
179 else
180 powerlevel[i] = pwrlvlbw40[i];
181 base1 = powerlevel[i];
182 base1 = (base1 << 24) |
183 (base1 << 16) | (base1 << 8) | base1;
184
185 *(mcsbase + i) = base1;
186
187 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
188 "[MCS power base index rf(%c) = 0x%x]\n",
189 ((i == 0) ? 'A' : 'B'), *(mcsbase + i));
190 }
191}
192
193static void get_txpwr_by_reg(struct ieee80211_hw *hw, u8 chan, u8 index,
194 u32 *base0, u32 *base1, u32 *outval)
195{
196 struct rtl_priv *rtlpriv = rtl_priv(hw);
197 struct rtl_phy *rtlphy = &(rtlpriv->phy);
198 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
199 u8 i, chg = 0, pwr_lim[4], pwr_diff = 0, cust_pwr_dif;
200 u32 writeval, cust_lim, rf, tmp;
201 u8 ch = chan - 1;
202 u8 j;
203
204 for (rf = 0; rf < 2; rf++) {
205 j = index + (rf ? 8 : 0);
206 tmp = ((index < 2) ? base0[rf] : base1[rf]);
207 switch (rtlefuse->eeprom_regulatory) {
208 case 0:
209 chg = 0;
210
211 writeval = rtlphy->mcs_offset[chg][j] + tmp;
212
213 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
214 "RTK better performance, "
215 "writeval(%c) = 0x%x\n",
216 ((rf == 0) ? 'A' : 'B'), writeval);
217 break;
218 case 1:
219 if (rtlphy->pwrgroup_cnt == 1) {
220 chg = 0;
221 } else {
222 chg = chan / 3;
223 if (chan == 14)
224 chg = 5;
225 }
226 writeval = rtlphy->mcs_offset[chg][j] + tmp;
227
228 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
229 "Realtek regulatory, 20MHz, writeval(%c) = 0x%x\n",
230 ((rf == 0) ? 'A' : 'B'), writeval);
231 break;
232 case 2:
233 writeval = ((index < 2) ? base0[rf] : base1[rf]);
234
235 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
236 "Better regulatory, writeval(%c) = 0x%x\n",
237 ((rf == 0) ? 'A' : 'B'), writeval);
238 break;
239 case 3:
240 chg = 0;
241
242 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
243 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
244 "customer's limit, 40MHz rf(%c) = 0x%x\n",
245 ((rf == 0) ? 'A' : 'B'),
246 rtlefuse->pwrgroup_ht40[rf][ch]);
247 } else {
248 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
249 "customer's limit, 20MHz rf(%c) = 0x%x\n",
250 ((rf == 0) ? 'A' : 'B'),
251 rtlefuse->pwrgroup_ht20[rf][ch]);
252 }
253
254 if (index < 2)
255 pwr_diff = rtlefuse->txpwr_legacyhtdiff[rf][ch];
256 else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20)
257 pwr_diff = rtlefuse->txpwr_ht20diff[rf][ch];
258
259 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
260 cust_pwr_dif = rtlefuse->pwrgroup_ht40[rf][ch];
261 else
262 cust_pwr_dif = rtlefuse->pwrgroup_ht20[rf][ch];
263
264 if (pwr_diff > cust_pwr_dif)
265 pwr_diff = 0;
266 else
267 pwr_diff = cust_pwr_dif - pwr_diff;
268
269 for (i = 0; i < 4; i++) {
270 pwr_lim[i] = (u8)((rtlphy->mcs_offset[chg][j] &
271 (0x7f << (i * 8))) >> (i * 8));
272
273 if (pwr_lim[i] > pwr_diff)
274 pwr_lim[i] = pwr_diff;
275 }
276
277 cust_lim = (pwr_lim[3] << 24) | (pwr_lim[2] << 16) |
278 (pwr_lim[1] << 8) | (pwr_lim[0]);
279
280 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
281 "Customer's limit rf(%c) = 0x%x\n",
282 ((rf == 0) ? 'A' : 'B'), cust_lim);
283
284 writeval = cust_lim + tmp;
285
286 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
287 "Customer, writeval rf(%c) = 0x%x\n",
288 ((rf == 0) ? 'A' : 'B'), writeval);
289 break;
290 default:
291 chg = 0;
292 writeval = rtlphy->mcs_offset[chg][j] + tmp;
293
294 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
295 "RTK better performance, writeval "
296 "rf(%c) = 0x%x\n",
297 ((rf == 0) ? 'A' : 'B'), writeval);
298 break;
299 }
300
301 if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
302 writeval = writeval - 0x06060606;
303 else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
304 TXHIGHPWRLEVEL_BT2)
305 writeval -= 0x0c0c0c0c;
306 *(outval + rf) = writeval;
307 }
308}
309
310static void write_ofdm_pwr(struct ieee80211_hw *hw, u8 index, u32 *pvalue)
311{
312 struct rtl_priv *rtlpriv = rtl_priv(hw);
313 u16 regoffset_a[6] = {
314 RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
315 RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
316 RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
317 };
318 u16 regoffset_b[6] = {
319 RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
320 RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
321 RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
322 };
323 u8 i, rf, pwr_val[4];
324 u32 writeval;
325 u16 regoffset;
326
327 for (rf = 0; rf < 2; rf++) {
328 writeval = pvalue[rf];
329 for (i = 0; i < 4; i++) {
330 pwr_val[i] = (u8) ((writeval & (0x7f <<
331 (i * 8))) >> (i * 8));
332
333 if (pwr_val[i] > RF6052_MAX_TX_PWR)
334 pwr_val[i] = RF6052_MAX_TX_PWR;
335 }
336 writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
337 (pwr_val[1] << 8) | pwr_val[0];
338
339 if (rf == 0)
340 regoffset = regoffset_a[index];
341 else
342 regoffset = regoffset_b[index];
343 rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval);
344
345 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
346 "Set 0x%x = %08x\n", regoffset, writeval);
347 }
348}
349
350void rtl88e_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
351 u8 *pwrlvlofdm,
352 u8 *pwrlvlbw20,
353 u8 *pwrlvlbw40, u8 chan)
354{
355 u32 writeval[2], base0[2], base1[2];
356 u8 index;
357 u8 direction;
358 u32 pwrtrac_value;
359
360 rtl88e_phy_get_power_base(hw, pwrlvlofdm, pwrlvlbw20,
361 pwrlvlbw40, chan, &base0[0],
362 &base1[0]);
363
364 rtl88e_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value);
365
366 for (index = 0; index < 6; index++) {
367 get_txpwr_by_reg(hw, chan, index, &base0[0], &base1[0],
368 &writeval[0]);
369 if (direction == 1) {
370 writeval[0] += pwrtrac_value;
371 writeval[1] += pwrtrac_value;
372 } else if (direction == 2) {
373 writeval[0] -= pwrtrac_value;
374 writeval[1] -= pwrtrac_value;
375 }
376 write_ofdm_pwr(hw, index, &writeval[0]);
377 }
378}
379
380static bool rf6052_conf_para(struct ieee80211_hw *hw)
381{
382 struct rtl_priv *rtlpriv = rtl_priv(hw);
383 struct rtl_phy *rtlphy = &(rtlpriv->phy);
384 u32 u4val = 0;
385 u8 rfpath;
386 bool rtstatus = true;
387 struct bb_reg_def *pphyreg;
388
389 for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
390 pphyreg = &rtlphy->phyreg_def[rfpath];
391
392 switch (rfpath) {
393 case RF90_PATH_A:
394 case RF90_PATH_C:
395 u4val = rtl_get_bbreg(hw, pphyreg->rfintfs,
396 BRFSI_RFENV);
397 break;
398 case RF90_PATH_B:
399 case RF90_PATH_D:
400 u4val = rtl_get_bbreg(hw, pphyreg->rfintfs,
401 BRFSI_RFENV << 16);
402 break;
403 }
404
405 rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
406 udelay(1);
407
408 rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
409 udelay(1);
410
411 rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
412 B3WIREADDREAALENGTH, 0x0);
413 udelay(1);
414
415 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
416 udelay(1);
417
418 switch (rfpath) {
419 case RF90_PATH_A:
420 rtstatus = rtl88e_phy_config_rf_with_headerfile(hw,
421 (enum radio_path)rfpath);
422 break;
423 case RF90_PATH_B:
424 rtstatus = rtl88e_phy_config_rf_with_headerfile(hw,
425 (enum radio_path)rfpath);
426 break;
427 case RF90_PATH_C:
428 break;
429 case RF90_PATH_D:
430 break;
431 }
432
433 switch (rfpath) {
434 case RF90_PATH_A:
435 case RF90_PATH_C:
436 rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV, u4val);
437 break;
438 case RF90_PATH_B:
439 case RF90_PATH_D:
440 rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16,
441 u4val);
442 break;
443 }
444
445 if (rtstatus != true) {
446 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
447 "Radio[%d] Fail!!", rfpath);
448 return false;
449 }
450 }
451
452 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
453 return rtstatus;
454}
455
456bool rtl88e_phy_rf6052_config(struct ieee80211_hw *hw)
457{
458 struct rtl_priv *rtlpriv = rtl_priv(hw);
459 struct rtl_phy *rtlphy = &(rtlpriv->phy);
460
461 if (rtlphy->rf_type == RF_1T1R)
462 rtlphy->num_total_rfpath = 1;
463 else
464 rtlphy->num_total_rfpath = 2;
465
466 return rf6052_conf_para(hw);
467}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h b/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h
new file mode 100644
index 000000000000..a39a2a3dbcc9
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h
@@ -0,0 +1,46 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92C_RF_H__
31#define __RTL92C_RF_H__
32
33#define RF6052_MAX_TX_PWR 0x3F
34#define RF6052_MAX_REG 0x3F
35
36void rtl88e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
37 u8 bandwidth);
38void rtl88e_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
39 u8 *ppowerlevel);
40void rtl88e_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
41 u8 *ppowerlevel_ofdm,
42 u8 *ppowerlevel_bw20,
43 u8 *ppowerlevel_bw40, u8 channel);
44bool rtl88e_phy_rf6052_config(struct ieee80211_hw *hw);
45
46#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
new file mode 100644
index 000000000000..c254693a1e6a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
@@ -0,0 +1,400 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../core.h"
32#include "../pci.h"
33#include "reg.h"
34#include "def.h"
35#include "phy.h"
36#include "dm.h"
37#include "hw.h"
38#include "sw.h"
39#include "trx.h"
40#include "led.h"
41#include "table.h"
42
43#include <linux/vmalloc.h>
44#include <linux/module.h>
45
46static void rtl88e_init_aspm_vars(struct ieee80211_hw *hw)
47{
48 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
49
50 /*close ASPM for AMD defaultly */
51 rtlpci->const_amdpci_aspm = 0;
52
53 /* ASPM PS mode.
54 * 0 - Disable ASPM,
55 * 1 - Enable ASPM without Clock Req,
56 * 2 - Enable ASPM with Clock Req,
57 * 3 - Alwyas Enable ASPM with Clock Req,
58 * 4 - Always Enable ASPM without Clock Req.
59 * set defult to RTL8192CE:3 RTL8192E:2
60 */
61 rtlpci->const_pci_aspm = 3;
62
63 /*Setting for PCI-E device */
64 rtlpci->const_devicepci_aspm_setting = 0x03;
65
66 /*Setting for PCI-E bridge */
67 rtlpci->const_hostpci_aspm_setting = 0x02;
68
69 /* In Hw/Sw Radio Off situation.
70 * 0 - Default,
71 * 1 - From ASPM setting without low Mac Pwr,
72 * 2 - From ASPM setting with low Mac Pwr,
73 * 3 - Bus D3
74 * set default to RTL8192CE:0 RTL8192SE:2
75 */
76 rtlpci->const_hwsw_rfoff_d3 = 0;
77
78 /* This setting works for those device with
79 * backdoor ASPM setting such as EPHY setting.
80 * 0 - Not support ASPM,
81 * 1 - Support ASPM,
82 * 2 - According to chipset.
83 */
84 rtlpci->const_support_pciaspm = 1;
85}
86
87int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
88{
89 int err = 0;
90 struct rtl_priv *rtlpriv = rtl_priv(hw);
91 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
92 u8 tid;
93
94 rtl8188ee_bt_reg_init(hw);
95
96 rtlpriv->dm.dm_initialgain_enable = 1;
97 rtlpriv->dm.dm_flag = 0;
98 rtlpriv->dm.disable_framebursting = 0;
99 rtlpriv->dm.thermalvalue = 0;
100 rtlpci->transmit_config = CFENDFORM | BIT(15);
101
102 /* compatible 5G band 88ce just 2.4G band & smsp */
103 rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G;
104 rtlpriv->rtlhal.bandset = BAND_ON_2_4G;
105 rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY;
106
107 rtlpci->receive_config = (RCR_APPFCS |
108 RCR_APP_MIC |
109 RCR_APP_ICV |
110 RCR_APP_PHYST_RXFF |
111 RCR_HTC_LOC_CTRL |
112 RCR_AMF |
113 RCR_ACF |
114 RCR_ADF |
115 RCR_AICV |
116 RCR_ACRC32 |
117 RCR_AB |
118 RCR_AM |
119 RCR_APM |
120 0);
121
122 rtlpci->irq_mask[0] =
123 (u32) (IMR_PSTIMEOUT |
124 IMR_HSISR_IND_ON_INT |
125 IMR_C2HCMD |
126 IMR_HIGHDOK |
127 IMR_MGNTDOK |
128 IMR_BKDOK |
129 IMR_BEDOK |
130 IMR_VIDOK |
131 IMR_VODOK |
132 IMR_RDU |
133 IMR_ROK |
134 0);
135 rtlpci->irq_mask[1] = (u32) (IMR_RXFOVW | 0);
136 rtlpci->sys_irq_mask = (u32) (HSIMR_PDN_INT_EN | HSIMR_RON_INT_EN);
137
138 /* for debug level */
139 rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
140 /* for LPS & IPS */
141 rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
142 rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
143 rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
144 if (!rtlpriv->psc.inactiveps)
145 pr_info("rtl8188ee: Power Save off (module option)\n");
146 if (!rtlpriv->psc.fwctrl_lps)
147 pr_info("rtl8188ee: FW Power Save off (module option)\n");
148 rtlpriv->psc.reg_fwctrl_lps = 3;
149 rtlpriv->psc.reg_max_lps_awakeintvl = 5;
150 /* for ASPM, you can close aspm through
151 * set const_support_pciaspm = 0
152 */
153 rtl88e_init_aspm_vars(hw);
154
155 if (rtlpriv->psc.reg_fwctrl_lps == 1)
156 rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
157 else if (rtlpriv->psc.reg_fwctrl_lps == 2)
158 rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
159 else if (rtlpriv->psc.reg_fwctrl_lps == 3)
160 rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
161
162 /* for firmware buf */
163 rtlpriv->rtlhal.pfirmware = vmalloc(0x8000);
164 if (!rtlpriv->rtlhal.pfirmware) {
165 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
166 "Can't alloc buffer for fw.\n");
167 return 1;
168 }
169
170 rtlpriv->cfg->fw_name = "rtlwifi/rtl8188efw.bin";
171 rtlpriv->max_fw_size = 0x8000;
172 pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
173 err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
174 rtlpriv->io.dev, GFP_KERNEL, hw,
175 rtl_fw_cb);
176 if (err) {
177 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
178 "Failed to request firmware!\n");
179 return 1;
180 }
181
182 /* for early mode */
183 rtlpriv->rtlhal.earlymode_enable = false;
184 rtlpriv->rtlhal.max_earlymode_num = 10;
185 for (tid = 0; tid < 8; tid++)
186 skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]);
187
188 /*low power */
189 rtlpriv->psc.low_power_enable = false;
190 if (rtlpriv->psc.low_power_enable) {
191 init_timer(&rtlpriv->works.fw_clockoff_timer);
192 setup_timer(&rtlpriv->works.fw_clockoff_timer,
193 rtl88ee_fw_clk_off_timer_callback,
194 (unsigned long)hw);
195 }
196
197 init_timer(&rtlpriv->works.fast_antenna_training_timer);
198 setup_timer(&rtlpriv->works.fast_antenna_training_timer,
199 rtl88e_dm_fast_antenna_training_callback,
200 (unsigned long)hw);
201 return err;
202}
203
204void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw)
205{
206 struct rtl_priv *rtlpriv = rtl_priv(hw);
207
208 if (rtlpriv->rtlhal.pfirmware) {
209 vfree(rtlpriv->rtlhal.pfirmware);
210 rtlpriv->rtlhal.pfirmware = NULL;
211 }
212
213 if (rtlpriv->psc.low_power_enable)
214 del_timer_sync(&rtlpriv->works.fw_clockoff_timer);
215
216 del_timer_sync(&rtlpriv->works.fast_antenna_training_timer);
217}
218
219static struct rtl_hal_ops rtl8188ee_hal_ops = {
220 .init_sw_vars = rtl88e_init_sw_vars,
221 .deinit_sw_vars = rtl88e_deinit_sw_vars,
222 .read_eeprom_info = rtl88ee_read_eeprom_info,
223 .interrupt_recognized = rtl88ee_interrupt_recognized,/*need check*/
224 .hw_init = rtl88ee_hw_init,
225 .hw_disable = rtl88ee_card_disable,
226 .hw_suspend = rtl88ee_suspend,
227 .hw_resume = rtl88ee_resume,
228 .enable_interrupt = rtl88ee_enable_interrupt,
229 .disable_interrupt = rtl88ee_disable_interrupt,
230 .set_network_type = rtl88ee_set_network_type,
231 .set_chk_bssid = rtl88ee_set_check_bssid,
232 .set_qos = rtl88ee_set_qos,
233 .set_bcn_reg = rtl88ee_set_beacon_related_registers,
234 .set_bcn_intv = rtl88ee_set_beacon_interval,
235 .update_interrupt_mask = rtl88ee_update_interrupt_mask,
236 .get_hw_reg = rtl88ee_get_hw_reg,
237 .set_hw_reg = rtl88ee_set_hw_reg,
238 .update_rate_tbl = rtl88ee_update_hal_rate_tbl,
239 .fill_tx_desc = rtl88ee_tx_fill_desc,
240 .fill_tx_cmddesc = rtl88ee_tx_fill_cmddesc,
241 .query_rx_desc = rtl88ee_rx_query_desc,
242 .set_channel_access = rtl88ee_update_channel_access_setting,
243 .radio_onoff_checking = rtl88ee_gpio_radio_on_off_checking,
244 .set_bw_mode = rtl88e_phy_set_bw_mode,
245 .switch_channel = rtl88e_phy_sw_chnl,
246 .dm_watchdog = rtl88e_dm_watchdog,
247 .scan_operation_backup = rtl88e_phy_scan_operation_backup,
248 .set_rf_power_state = rtl88e_phy_set_rf_power_state,
249 .led_control = rtl88ee_led_control,
250 .set_desc = rtl88ee_set_desc,
251 .get_desc = rtl88ee_get_desc,
252 .tx_polling = rtl88ee_tx_polling,
253 .enable_hw_sec = rtl88ee_enable_hw_security_config,
254 .set_key = rtl88ee_set_key,
255 .init_sw_leds = rtl88ee_init_sw_leds,
256 .allow_all_destaddr = rtl88ee_allow_all_destaddr,
257 .get_bbreg = rtl88e_phy_query_bb_reg,
258 .set_bbreg = rtl88e_phy_set_bb_reg,
259 .get_rfreg = rtl88e_phy_query_rf_reg,
260 .set_rfreg = rtl88e_phy_set_rf_reg,
261};
262
263static struct rtl_mod_params rtl88ee_mod_params = {
264 .sw_crypto = false,
265 .inactiveps = true,
266 .swctrl_lps = false,
267 .fwctrl_lps = true,
268 .debug = DBG_EMERG,
269};
270
271static struct rtl_hal_cfg rtl88ee_hal_cfg = {
272 .bar_id = 2,
273 .write_readback = true,
274 .name = "rtl88e_pci",
275 .ops = &rtl8188ee_hal_ops,
276 .mod_params = &rtl88ee_mod_params,
277
278 .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
279 .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
280 .maps[SYS_CLK] = REG_SYS_CLKR,
281 .maps[MAC_RCR_AM] = AM,
282 .maps[MAC_RCR_AB] = AB,
283 .maps[MAC_RCR_ACRC32] = ACRC32,
284 .maps[MAC_RCR_ACF] = ACF,
285 .maps[MAC_RCR_AAP] = AAP,
286
287 .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS,
288
289 .maps[EFUSE_TEST] = REG_EFUSE_TEST,
290 .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
291 .maps[EFUSE_CLK] = 0,
292 .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
293 .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
294 .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
295 .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
296 .maps[EFUSE_ANA8M] = ANA8M,
297 .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
298 .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
299 .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
300 .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES,
301
302 .maps[RWCAM] = REG_CAMCMD,
303 .maps[WCAMI] = REG_CAMWRITE,
304 .maps[RCAMO] = REG_CAMREAD,
305 .maps[CAMDBG] = REG_CAMDBG,
306 .maps[SECR] = REG_SECCFG,
307 .maps[SEC_CAM_NONE] = CAM_NONE,
308 .maps[SEC_CAM_WEP40] = CAM_WEP40,
309 .maps[SEC_CAM_TKIP] = CAM_TKIP,
310 .maps[SEC_CAM_AES] = CAM_AES,
311 .maps[SEC_CAM_WEP104] = CAM_WEP104,
312
313 .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
314 .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
315 .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
316 .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
317 .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
318 .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
319/* .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8, */ /*need check*/
320 .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
321 .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
322 .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
323 .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
324 .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
325 .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
326 .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
327/* .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,*/
328/* .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,*/
329
330 .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
331 .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
332 .maps[RTL_IMR_BCNINT] = IMR_BCNDMAINT0,
333 .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
334 .maps[RTL_IMR_RDU] = IMR_RDU,
335 .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
336 .maps[RTL_IMR_BDOK] = IMR_BCNDOK0,
337 .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
338 .maps[RTL_IMR_TBDER] = IMR_TBDER,
339 .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
340 .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
341 .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
342 .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
343 .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
344 .maps[RTL_IMR_VODOK] = IMR_VODOK,
345 .maps[RTL_IMR_ROK] = IMR_ROK,
346 .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER),
347
348 .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
349 .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
350 .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
351 .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
352 .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
353 .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
354 .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
355 .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
356 .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
357 .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
358 .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
359 .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
360
361 .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
362 .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
363};
364
365static DEFINE_PCI_DEVICE_TABLE(rtl88ee_pci_ids) = {
366 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8179, rtl88ee_hal_cfg)},
367 {},
368};
369
370MODULE_DEVICE_TABLE(pci, rtl88ee_pci_ids);
371
372MODULE_AUTHOR("zhiyuan_yang <zhiyuan_yang@realsil.com.cn>");
373MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
374MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
375MODULE_LICENSE("GPL");
376MODULE_DESCRIPTION("Realtek 8188E 802.11n PCI wireless");
377MODULE_FIRMWARE("rtlwifi/rtl8188efw.bin");
378
379module_param_named(swenc, rtl88ee_mod_params.sw_crypto, bool, 0444);
380module_param_named(debug, rtl88ee_mod_params.debug, int, 0444);
381module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444);
382module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444);
383module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444);
384MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
385MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
386MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
387MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
388MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
389
390static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
391
392static struct pci_driver rtl88ee_driver = {
393 .name = KBUILD_MODNAME,
394 .id_table = rtl88ee_pci_ids,
395 .probe = rtl_pci_probe,
396 .remove = rtl_pci_disconnect,
397 .driver.pm = &rtlwifi_pm_ops,
398};
399
400module_pci_driver(rtl88ee_driver);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.h b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.h
new file mode 100644
index 000000000000..85e02b3bdff8
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.h
@@ -0,0 +1,36 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CE_SW_H__
31#define __RTL92CE_SW_H__
32
33int rtl88e_init_sw_vars(struct ieee80211_hw *hw);
34void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw);
35
36#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/table.c b/drivers/net/wireless/rtlwifi/rtl8188ee/table.c
new file mode 100644
index 000000000000..fad373f97b2c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/table.c
@@ -0,0 +1,643 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Created on 2010/ 5/18, 1:41
27 *
28 * Larry Finger <Larry.Finger@lwfinger.net>
29 *
30 *****************************************************************************/
31
32#include "table.h"
33
34u32 RTL8188EEPHY_REG_1TARRAY[] = {
35 0x800, 0x80040000,
36 0x804, 0x00000003,
37 0x808, 0x0000FC00,
38 0x80C, 0x0000000A,
39 0x810, 0x10001331,
40 0x814, 0x020C3D10,
41 0x818, 0x02200385,
42 0x81C, 0x00000000,
43 0x820, 0x01000100,
44 0x824, 0x00390204,
45 0x828, 0x00000000,
46 0x82C, 0x00000000,
47 0x830, 0x00000000,
48 0x834, 0x00000000,
49 0x838, 0x00000000,
50 0x83C, 0x00000000,
51 0x840, 0x00010000,
52 0x844, 0x00000000,
53 0x848, 0x00000000,
54 0x84C, 0x00000000,
55 0x850, 0x00000000,
56 0x854, 0x00000000,
57 0x858, 0x569A11A9,
58 0x85C, 0x01000014,
59 0x860, 0x66F60110,
60 0x864, 0x061F0649,
61 0x868, 0x00000000,
62 0x86C, 0x27272700,
63 0x870, 0x07000760,
64 0x874, 0x25004000,
65 0x878, 0x00000808,
66 0x87C, 0x00000000,
67 0x880, 0xB0000C1C,
68 0x884, 0x00000001,
69 0x888, 0x00000000,
70 0x88C, 0xCCC000C0,
71 0x890, 0x00000800,
72 0x894, 0xFFFFFFFE,
73 0x898, 0x40302010,
74 0x89C, 0x00706050,
75 0x900, 0x00000000,
76 0x904, 0x00000023,
77 0x908, 0x00000000,
78 0x90C, 0x81121111,
79 0x910, 0x00000002,
80 0x914, 0x00000201,
81 0xA00, 0x00D047C8,
82 0xA04, 0x80FF000C,
83 0xA08, 0x8C838300,
84 0xA0C, 0x2E7F120F,
85 0xA10, 0x9500BB78,
86 0xA14, 0x1114D028,
87 0xA18, 0x00881117,
88 0xA1C, 0x89140F00,
89 0xA20, 0x1A1B0000,
90 0xA24, 0x090E1317,
91 0xA28, 0x00000204,
92 0xA2C, 0x00D30000,
93 0xA70, 0x101FBF00,
94 0xA74, 0x00000007,
95 0xA78, 0x00000900,
96 0xA7C, 0x225B0606,
97 0xA80, 0x218075B1,
98 0xB2C, 0x80000000,
99 0xC00, 0x48071D40,
100 0xC04, 0x03A05611,
101 0xC08, 0x000000E4,
102 0xC0C, 0x6C6C6C6C,
103 0xC10, 0x08800000,
104 0xC14, 0x40000100,
105 0xC18, 0x08800000,
106 0xC1C, 0x40000100,
107 0xC20, 0x00000000,
108 0xC24, 0x00000000,
109 0xC28, 0x00000000,
110 0xC2C, 0x00000000,
111 0xC30, 0x69E9AC47,
112 0xC34, 0x469652AF,
113 0xC38, 0x49795994,
114 0xC3C, 0x0A97971C,
115 0xC40, 0x1F7C403F,
116 0xC44, 0x000100B7,
117 0xC48, 0xEC020107,
118 0xC4C, 0x007F037F,
119 0xC50, 0x69553420,
120 0xC54, 0x43BC0094,
121 0xC58, 0x00013169,
122 0xC5C, 0x00250492,
123 0xC60, 0x00000000,
124 0xC64, 0x7112848B,
125 0xC68, 0x47C00BFF,
126 0xC6C, 0x00000036,
127 0xC70, 0x2C7F000D,
128 0xC74, 0x020610DB,
129 0xC78, 0x0000001F,
130 0xC7C, 0x00B91612,
131 0xC80, 0x390000E4,
132 0xC84, 0x20F60000,
133 0xC88, 0x40000100,
134 0xC8C, 0x20200000,
135 0xC90, 0x00091521,
136 0xC94, 0x00000000,
137 0xC98, 0x00121820,
138 0xC9C, 0x00007F7F,
139 0xCA0, 0x00000000,
140 0xCA4, 0x000300A0,
141 0xCA8, 0x00000000,
142 0xCAC, 0x00000000,
143 0xCB0, 0x00000000,
144 0xCB4, 0x00000000,
145 0xCB8, 0x00000000,
146 0xCBC, 0x28000000,
147 0xCC0, 0x00000000,
148 0xCC4, 0x00000000,
149 0xCC8, 0x00000000,
150 0xCCC, 0x00000000,
151 0xCD0, 0x00000000,
152 0xCD4, 0x00000000,
153 0xCD8, 0x64B22427,
154 0xCDC, 0x00766932,
155 0xCE0, 0x00222222,
156 0xCE4, 0x00000000,
157 0xCE8, 0x37644302,
158 0xCEC, 0x2F97D40C,
159 0xD00, 0x00000740,
160 0xD04, 0x00020401,
161 0xD08, 0x0000907F,
162 0xD0C, 0x20010201,
163 0xD10, 0xA0633333,
164 0xD14, 0x3333BC43,
165 0xD18, 0x7A8F5B6F,
166 0xD2C, 0xCC979975,
167 0xD30, 0x00000000,
168 0xD34, 0x80608000,
169 0xD38, 0x00000000,
170 0xD3C, 0x00127353,
171 0xD40, 0x00000000,
172 0xD44, 0x00000000,
173 0xD48, 0x00000000,
174 0xD4C, 0x00000000,
175 0xD50, 0x6437140A,
176 0xD54, 0x00000000,
177 0xD58, 0x00000282,
178 0xD5C, 0x30032064,
179 0xD60, 0x4653DE68,
180 0xD64, 0x04518A3C,
181 0xD68, 0x00002101,
182 0xD6C, 0x2A201C16,
183 0xD70, 0x1812362E,
184 0xD74, 0x322C2220,
185 0xD78, 0x000E3C24,
186 0xE00, 0x2D2D2D2D,
187 0xE04, 0x2D2D2D2D,
188 0xE08, 0x0390272D,
189 0xE10, 0x2D2D2D2D,
190 0xE14, 0x2D2D2D2D,
191 0xE18, 0x2D2D2D2D,
192 0xE1C, 0x2D2D2D2D,
193 0xE28, 0x00000000,
194 0xE30, 0x1000DC1F,
195 0xE34, 0x10008C1F,
196 0xE38, 0x02140102,
197 0xE3C, 0x681604C2,
198 0xE40, 0x01007C00,
199 0xE44, 0x01004800,
200 0xE48, 0xFB000000,
201 0xE4C, 0x000028D1,
202 0xE50, 0x1000DC1F,
203 0xE54, 0x10008C1F,
204 0xE58, 0x02140102,
205 0xE5C, 0x28160D05,
206 0xE60, 0x00000008,
207 0xE68, 0x001B25A4,
208 0xE6C, 0x00C00014,
209 0xE70, 0x00C00014,
210 0xE74, 0x01000014,
211 0xE78, 0x01000014,
212 0xE7C, 0x01000014,
213 0xE80, 0x01000014,
214 0xE84, 0x00C00014,
215 0xE88, 0x01000014,
216 0xE8C, 0x00C00014,
217 0xED0, 0x00C00014,
218 0xED4, 0x00C00014,
219 0xED8, 0x00C00014,
220 0xEDC, 0x00000014,
221 0xEE0, 0x00000014,
222 0xEEC, 0x01C00014,
223 0xF14, 0x00000003,
224 0xF4C, 0x00000000,
225 0xF00, 0x00000300,
226
227};
228
229u32 RTL8188EEPHY_REG_ARRAY_PG[] = {
230 0xE00, 0xFFFFFFFF, 0x06070809,
231 0xE04, 0xFFFFFFFF, 0x02020405,
232 0xE08, 0x0000FF00, 0x00000006,
233 0x86C, 0xFFFFFF00, 0x00020400,
234 0xE10, 0xFFFFFFFF, 0x08090A0B,
235 0xE14, 0xFFFFFFFF, 0x01030607,
236 0xE18, 0xFFFFFFFF, 0x08090A0B,
237 0xE1C, 0xFFFFFFFF, 0x01030607,
238 0xE00, 0xFFFFFFFF, 0x00000000,
239 0xE04, 0xFFFFFFFF, 0x00000000,
240 0xE08, 0x0000FF00, 0x00000000,
241 0x86C, 0xFFFFFF00, 0x00000000,
242 0xE10, 0xFFFFFFFF, 0x00000000,
243 0xE14, 0xFFFFFFFF, 0x00000000,
244 0xE18, 0xFFFFFFFF, 0x00000000,
245 0xE1C, 0xFFFFFFFF, 0x00000000,
246 0xE00, 0xFFFFFFFF, 0x02020202,
247 0xE04, 0xFFFFFFFF, 0x00020202,
248 0xE08, 0x0000FF00, 0x00000000,
249 0x86C, 0xFFFFFF00, 0x00000000,
250 0xE10, 0xFFFFFFFF, 0x04040404,
251 0xE14, 0xFFFFFFFF, 0x00020404,
252 0xE18, 0xFFFFFFFF, 0x00000000,
253 0xE1C, 0xFFFFFFFF, 0x00000000,
254 0xE00, 0xFFFFFFFF, 0x02020202,
255 0xE04, 0xFFFFFFFF, 0x00020202,
256 0xE08, 0x0000FF00, 0x00000000,
257 0x86C, 0xFFFFFF00, 0x00000000,
258 0xE10, 0xFFFFFFFF, 0x04040404,
259 0xE14, 0xFFFFFFFF, 0x00020404,
260 0xE18, 0xFFFFFFFF, 0x00000000,
261 0xE1C, 0xFFFFFFFF, 0x00000000,
262 0xE00, 0xFFFFFFFF, 0x00000000,
263 0xE04, 0xFFFFFFFF, 0x00000000,
264 0xE08, 0x0000FF00, 0x00000000,
265 0x86C, 0xFFFFFF00, 0x00000000,
266 0xE10, 0xFFFFFFFF, 0x00000000,
267 0xE14, 0xFFFFFFFF, 0x00000000,
268 0xE18, 0xFFFFFFFF, 0x00000000,
269 0xE1C, 0xFFFFFFFF, 0x00000000,
270 0xE00, 0xFFFFFFFF, 0x02020202,
271 0xE04, 0xFFFFFFFF, 0x00020202,
272 0xE08, 0x0000FF00, 0x00000000,
273 0x86C, 0xFFFFFF00, 0x00000000,
274 0xE10, 0xFFFFFFFF, 0x04040404,
275 0xE14, 0xFFFFFFFF, 0x00020404,
276 0xE18, 0xFFFFFFFF, 0x00000000,
277 0xE1C, 0xFFFFFFFF, 0x00000000,
278 0xE00, 0xFFFFFFFF, 0x00000000,
279 0xE04, 0xFFFFFFFF, 0x00000000,
280 0xE08, 0x0000FF00, 0x00000000,
281 0x86C, 0xFFFFFF00, 0x00000000,
282 0xE10, 0xFFFFFFFF, 0x00000000,
283 0xE14, 0xFFFFFFFF, 0x00000000,
284 0xE18, 0xFFFFFFFF, 0x00000000,
285 0xE1C, 0xFFFFFFFF, 0x00000000,
286 0xE00, 0xFFFFFFFF, 0x00000000,
287 0xE04, 0xFFFFFFFF, 0x00000000,
288 0xE08, 0x0000FF00, 0x00000000,
289 0x86C, 0xFFFFFF00, 0x00000000,
290 0xE10, 0xFFFFFFFF, 0x00000000,
291 0xE14, 0xFFFFFFFF, 0x00000000,
292 0xE18, 0xFFFFFFFF, 0x00000000,
293 0xE1C, 0xFFFFFFFF, 0x00000000,
294 0xE00, 0xFFFFFFFF, 0x00000000,
295 0xE04, 0xFFFFFFFF, 0x00000000,
296 0xE08, 0x0000FF00, 0x00000000,
297 0x86C, 0xFFFFFF00, 0x00000000,
298 0xE10, 0xFFFFFFFF, 0x00000000,
299 0xE14, 0xFFFFFFFF, 0x00000000,
300 0xE18, 0xFFFFFFFF, 0x00000000,
301 0xE1C, 0xFFFFFFFF, 0x00000000,
302 0xE00, 0xFFFFFFFF, 0x00000000,
303 0xE04, 0xFFFFFFFF, 0x00000000,
304 0xE08, 0x0000FF00, 0x00000000,
305 0x86C, 0xFFFFFF00, 0x00000000,
306 0xE10, 0xFFFFFFFF, 0x00000000,
307 0xE14, 0xFFFFFFFF, 0x00000000,
308 0xE18, 0xFFFFFFFF, 0x00000000,
309 0xE1C, 0xFFFFFFFF, 0x00000000,
310 0xE00, 0xFFFFFFFF, 0x00000000,
311 0xE04, 0xFFFFFFFF, 0x00000000,
312 0xE08, 0x0000FF00, 0x00000000,
313 0x86C, 0xFFFFFF00, 0x00000000,
314 0xE10, 0xFFFFFFFF, 0x00000000,
315 0xE14, 0xFFFFFFFF, 0x00000000,
316 0xE18, 0xFFFFFFFF, 0x00000000,
317 0xE1C, 0xFFFFFFFF, 0x00000000,
318
319};
320
321u32 RTL8188EE_RADIOA_1TARRAY[] = {
322 0x000, 0x00030000,
323 0x008, 0x00084000,
324 0x018, 0x00000407,
325 0x019, 0x00000012,
326 0x01E, 0x00080009,
327 0x01F, 0x00000880,
328 0x02F, 0x0001A060,
329 0x03F, 0x00000000,
330 0x042, 0x000060C0,
331 0x057, 0x000D0000,
332 0x058, 0x000BE180,
333 0x067, 0x00001552,
334 0x083, 0x00000000,
335 0x0B0, 0x000FF8FC,
336 0x0B1, 0x00054400,
337 0x0B2, 0x000CCC19,
338 0x0B4, 0x00043003,
339 0x0B6, 0x0004953E,
340 0x0B7, 0x0001C718,
341 0x0B8, 0x000060FF,
342 0x0B9, 0x00080001,
343 0x0BA, 0x00040000,
344 0x0BB, 0x00000400,
345 0x0BF, 0x000C0000,
346 0x0C2, 0x00002400,
347 0x0C3, 0x00000009,
348 0x0C4, 0x00040C91,
349 0x0C5, 0x00099999,
350 0x0C6, 0x000000A3,
351 0x0C7, 0x00088820,
352 0x0C8, 0x00076C06,
353 0x0C9, 0x00000000,
354 0x0CA, 0x00080000,
355 0x0DF, 0x00000180,
356 0x0EF, 0x000001A0,
357 0x051, 0x0006B27D,
358 0x052, 0x0007E49D,
359 0x053, 0x00000073,
360 0x056, 0x00051FF3,
361 0x035, 0x00000086,
362 0x035, 0x00000186,
363 0x035, 0x00000286,
364 0x036, 0x00001C25,
365 0x036, 0x00009C25,
366 0x036, 0x00011C25,
367 0x036, 0x00019C25,
368 0x0B6, 0x00048538,
369 0x018, 0x00000C07,
370 0x05A, 0x0004BD00,
371 0x019, 0x000739D0,
372 0x034, 0x0000ADF3,
373 0x034, 0x00009DF0,
374 0x034, 0x00008DED,
375 0x034, 0x00007DEA,
376 0x034, 0x00006DE7,
377 0x034, 0x000054EE,
378 0x034, 0x000044EB,
379 0x034, 0x000034E8,
380 0x034, 0x0000246B,
381 0x034, 0x00001468,
382 0x034, 0x0000006D,
383 0x000, 0x00030159,
384 0x084, 0x00068200,
385 0x086, 0x000000CE,
386 0x087, 0x00048A00,
387 0x08E, 0x00065540,
388 0x08F, 0x00088000,
389 0x0EF, 0x000020A0,
390 0x03B, 0x000F02B0,
391 0x03B, 0x000EF7B0,
392 0x03B, 0x000D4FB0,
393 0x03B, 0x000CF060,
394 0x03B, 0x000B0090,
395 0x03B, 0x000A0080,
396 0x03B, 0x00090080,
397 0x03B, 0x0008F780,
398 0x03B, 0x000722B0,
399 0x03B, 0x0006F7B0,
400 0x03B, 0x00054FB0,
401 0x03B, 0x0004F060,
402 0x03B, 0x00030090,
403 0x03B, 0x00020080,
404 0x03B, 0x00010080,
405 0x03B, 0x0000F780,
406 0x0EF, 0x000000A0,
407 0x000, 0x00010159,
408 0x018, 0x0000F407,
409 0xFFE, 0x00000000,
410 0xFFE, 0x00000000,
411 0x01F, 0x00080003,
412 0xFFE, 0x00000000,
413 0xFFE, 0x00000000,
414 0x01E, 0x00000001,
415 0x01F, 0x00080000,
416 0x000, 0x00033E60,
417
418};
419
420u32 RTL8188EEMAC_1T_ARRAY[] = {
421 0x026, 0x00000041,
422 0x027, 0x00000035,
423 0x428, 0x0000000A,
424 0x429, 0x00000010,
425 0x430, 0x00000000,
426 0x431, 0x00000001,
427 0x432, 0x00000002,
428 0x433, 0x00000004,
429 0x434, 0x00000005,
430 0x435, 0x00000006,
431 0x436, 0x00000007,
432 0x437, 0x00000008,
433 0x438, 0x00000000,
434 0x439, 0x00000000,
435 0x43A, 0x00000001,
436 0x43B, 0x00000002,
437 0x43C, 0x00000004,
438 0x43D, 0x00000005,
439 0x43E, 0x00000006,
440 0x43F, 0x00000007,
441 0x440, 0x0000005D,
442 0x441, 0x00000001,
443 0x442, 0x00000000,
444 0x444, 0x00000015,
445 0x445, 0x000000F0,
446 0x446, 0x0000000F,
447 0x447, 0x00000000,
448 0x458, 0x00000041,
449 0x459, 0x000000A8,
450 0x45A, 0x00000072,
451 0x45B, 0x000000B9,
452 0x460, 0x00000066,
453 0x461, 0x00000066,
454 0x480, 0x00000008,
455 0x4C8, 0x000000FF,
456 0x4C9, 0x00000008,
457 0x4CC, 0x000000FF,
458 0x4CD, 0x000000FF,
459 0x4CE, 0x00000001,
460 0x4D3, 0x00000001,
461 0x500, 0x00000026,
462 0x501, 0x000000A2,
463 0x502, 0x0000002F,
464 0x503, 0x00000000,
465 0x504, 0x00000028,
466 0x505, 0x000000A3,
467 0x506, 0x0000005E,
468 0x507, 0x00000000,
469 0x508, 0x0000002B,
470 0x509, 0x000000A4,
471 0x50A, 0x0000005E,
472 0x50B, 0x00000000,
473 0x50C, 0x0000004F,
474 0x50D, 0x000000A4,
475 0x50E, 0x00000000,
476 0x50F, 0x00000000,
477 0x512, 0x0000001C,
478 0x514, 0x0000000A,
479 0x516, 0x0000000A,
480 0x525, 0x0000004F,
481 0x550, 0x00000010,
482 0x551, 0x00000010,
483 0x559, 0x00000002,
484 0x55D, 0x000000FF,
485 0x605, 0x00000030,
486 0x608, 0x0000000E,
487 0x609, 0x0000002A,
488 0x620, 0x000000FF,
489 0x621, 0x000000FF,
490 0x622, 0x000000FF,
491 0x623, 0x000000FF,
492 0x624, 0x000000FF,
493 0x625, 0x000000FF,
494 0x626, 0x000000FF,
495 0x627, 0x000000FF,
496 0x652, 0x00000020,
497 0x63C, 0x0000000A,
498 0x63D, 0x0000000A,
499 0x63E, 0x0000000E,
500 0x63F, 0x0000000E,
501 0x640, 0x00000040,
502 0x66E, 0x00000005,
503 0x700, 0x00000021,
504 0x701, 0x00000043,
505 0x702, 0x00000065,
506 0x703, 0x00000087,
507 0x708, 0x00000021,
508 0x709, 0x00000043,
509 0x70A, 0x00000065,
510 0x70B, 0x00000087,
511
512};
513
514u32 RTL8188EEAGCTAB_1TARRAY[] = {
515 0xC78, 0xFB000001,
516 0xC78, 0xFB010001,
517 0xC78, 0xFB020001,
518 0xC78, 0xFB030001,
519 0xC78, 0xFB040001,
520 0xC78, 0xFB050001,
521 0xC78, 0xFA060001,
522 0xC78, 0xF9070001,
523 0xC78, 0xF8080001,
524 0xC78, 0xF7090001,
525 0xC78, 0xF60A0001,
526 0xC78, 0xF50B0001,
527 0xC78, 0xF40C0001,
528 0xC78, 0xF30D0001,
529 0xC78, 0xF20E0001,
530 0xC78, 0xF10F0001,
531 0xC78, 0xF0100001,
532 0xC78, 0xEF110001,
533 0xC78, 0xEE120001,
534 0xC78, 0xED130001,
535 0xC78, 0xEC140001,
536 0xC78, 0xEB150001,
537 0xC78, 0xEA160001,
538 0xC78, 0xE9170001,
539 0xC78, 0xE8180001,
540 0xC78, 0xE7190001,
541 0xC78, 0xE61A0001,
542 0xC78, 0xE51B0001,
543 0xC78, 0xE41C0001,
544 0xC78, 0xE31D0001,
545 0xC78, 0xE21E0001,
546 0xC78, 0xE11F0001,
547 0xC78, 0x8A200001,
548 0xC78, 0x89210001,
549 0xC78, 0x88220001,
550 0xC78, 0x87230001,
551 0xC78, 0x86240001,
552 0xC78, 0x85250001,
553 0xC78, 0x84260001,
554 0xC78, 0x83270001,
555 0xC78, 0x82280001,
556 0xC78, 0x6B290001,
557 0xC78, 0x6A2A0001,
558 0xC78, 0x692B0001,
559 0xC78, 0x682C0001,
560 0xC78, 0x672D0001,
561 0xC78, 0x662E0001,
562 0xC78, 0x652F0001,
563 0xC78, 0x64300001,
564 0xC78, 0x63310001,
565 0xC78, 0x62320001,
566 0xC78, 0x61330001,
567 0xC78, 0x46340001,
568 0xC78, 0x45350001,
569 0xC78, 0x44360001,
570 0xC78, 0x43370001,
571 0xC78, 0x42380001,
572 0xC78, 0x41390001,
573 0xC78, 0x403A0001,
574 0xC78, 0x403B0001,
575 0xC78, 0x403C0001,
576 0xC78, 0x403D0001,
577 0xC78, 0x403E0001,
578 0xC78, 0x403F0001,
579 0xC78, 0xFB400001,
580 0xC78, 0xFB410001,
581 0xC78, 0xFB420001,
582 0xC78, 0xFB430001,
583 0xC78, 0xFB440001,
584 0xC78, 0xFB450001,
585 0xC78, 0xFB460001,
586 0xC78, 0xFB470001,
587 0xC78, 0xFB480001,
588 0xC78, 0xFA490001,
589 0xC78, 0xF94A0001,
590 0xC78, 0xF84B0001,
591 0xC78, 0xF74C0001,
592 0xC78, 0xF64D0001,
593 0xC78, 0xF54E0001,
594 0xC78, 0xF44F0001,
595 0xC78, 0xF3500001,
596 0xC78, 0xF2510001,
597 0xC78, 0xF1520001,
598 0xC78, 0xF0530001,
599 0xC78, 0xEF540001,
600 0xC78, 0xEE550001,
601 0xC78, 0xED560001,
602 0xC78, 0xEC570001,
603 0xC78, 0xEB580001,
604 0xC78, 0xEA590001,
605 0xC78, 0xE95A0001,
606 0xC78, 0xE85B0001,
607 0xC78, 0xE75C0001,
608 0xC78, 0xE65D0001,
609 0xC78, 0xE55E0001,
610 0xC78, 0xE45F0001,
611 0xC78, 0xE3600001,
612 0xC78, 0xE2610001,
613 0xC78, 0xC3620001,
614 0xC78, 0xC2630001,
615 0xC78, 0xC1640001,
616 0xC78, 0x8B650001,
617 0xC78, 0x8A660001,
618 0xC78, 0x89670001,
619 0xC78, 0x88680001,
620 0xC78, 0x87690001,
621 0xC78, 0x866A0001,
622 0xC78, 0x856B0001,
623 0xC78, 0x846C0001,
624 0xC78, 0x676D0001,
625 0xC78, 0x666E0001,
626 0xC78, 0x656F0001,
627 0xC78, 0x64700001,
628 0xC78, 0x63710001,
629 0xC78, 0x62720001,
630 0xC78, 0x61730001,
631 0xC78, 0x60740001,
632 0xC78, 0x46750001,
633 0xC78, 0x45760001,
634 0xC78, 0x44770001,
635 0xC78, 0x43780001,
636 0xC78, 0x42790001,
637 0xC78, 0x417A0001,
638 0xC78, 0x407B0001,
639 0xC78, 0x407C0001,
640 0xC78, 0x407D0001,
641 0xC78, 0x407E0001,
642 0xC78, 0x407F0001,
643};
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/table.h b/drivers/net/wireless/rtlwifi/rtl8188ee/table.h
new file mode 100644
index 000000000000..c1218e835129
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/table.h
@@ -0,0 +1,47 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Created on 2010/ 5/18, 1:41
27 *
28 * Larry Finger <Larry.Finger@lwfinger.net>
29 *
30 *****************************************************************************/
31
32#ifndef __RTL92CE_TABLE__H_
33#define __RTL92CE_TABLE__H_
34
35#include <linux/types.h>
36#define RTL8188EEPHY_REG_1TARRAYLEN 382
37extern u32 RTL8188EEPHY_REG_1TARRAY[];
38#define RTL8188EEPHY_REG_ARRAY_PGLEN 264
39extern u32 RTL8188EEPHY_REG_ARRAY_PG[];
40#define RTL8188EE_RADIOA_1TARRAYLEN 190
41extern u32 RTL8188EE_RADIOA_1TARRAY[];
42#define RTL8188EEMAC_1T_ARRAYLEN 180
43extern u32 RTL8188EEMAC_1T_ARRAY[];
44#define RTL8188EEAGCTAB_1TARRAYLEN 256
45extern u32 RTL8188EEAGCTAB_1TARRAY[];
46
47#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
new file mode 100644
index 000000000000..a8871d66d56a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -0,0 +1,817 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "../base.h"
33#include "../stats.h"
34#include "reg.h"
35#include "def.h"
36#include "phy.h"
37#include "trx.h"
38#include "led.h"
39#include "dm.h"
40
41static u8 _rtl88ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
42{
43 __le16 fc = rtl_get_fc(skb);
44
45 if (unlikely(ieee80211_is_beacon(fc)))
46 return QSLT_BEACON;
47 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
48 return QSLT_MGNT;
49
50 return skb->priority;
51}
52
53static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw,
54 struct rtl_stats *pstatus, u8 *pdesc,
55 struct rx_fwinfo_88e *p_drvinfo,
56 bool bpacket_match_bssid,
57 bool bpacket_toself, bool packet_beacon)
58{
59 struct rtl_priv *rtlpriv = rtl_priv(hw);
60 struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
61 struct phy_sts_cck_8192s_t *cck_buf;
62 struct phy_status_rpt *phystrpt = (struct phy_status_rpt *)p_drvinfo;
63 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
64 char rx_pwr_all = 0, rx_pwr[4];
65 u8 rf_rx_num = 0, evm, pwdb_all;
66 u8 i, max_spatial_stream;
67 u32 rssi, total_rssi = 0;
68 bool is_cck = pstatus->is_cck;
69 u8 lan_idx, vga_idx;
70
71 /* Record it for next packet processing */
72 pstatus->packet_matchbssid = bpacket_match_bssid;
73 pstatus->packet_toself = bpacket_toself;
74 pstatus->packet_beacon = packet_beacon;
75 pstatus->rx_mimo_sig_qual[0] = -1;
76 pstatus->rx_mimo_sig_qual[1] = -1;
77
78 if (is_cck) {
79 u8 cck_hipwr;
80 u8 cck_agc_rpt;
81 /* CCK Driver info Structure is not the same as OFDM packet. */
82 cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
83 cck_agc_rpt = cck_buf->cck_agc_rpt;
84
85 /* (1)Hardware does not provide RSSI for CCK
86 * (2)PWDB, Average PWDB cacluated by
87 * hardware (for rate adaptive)
88 */
89 if (ppsc->rfpwr_state == ERFON)
90 cck_hipwr = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2,
91 BIT(9));
92 else
93 cck_hipwr = false;
94
95 lan_idx = ((cck_agc_rpt & 0xE0) >> 5);
96 vga_idx = (cck_agc_rpt & 0x1f);
97 switch (lan_idx) {
98 case 7:
99 if (vga_idx <= 27)
100 rx_pwr_all = -100 + 2 * (27 - vga_idx);
101 else
102 rx_pwr_all = -100;
103 break;
104 case 6:
105 rx_pwr_all = -48 + 2 * (2 - vga_idx); /*VGA_idx = 2~0*/
106 break;
107 case 5:
108 rx_pwr_all = -42 + 2 * (7 - vga_idx); /*VGA_idx = 7~5*/
109 break;
110 case 4:
111 rx_pwr_all = -36 + 2 * (7 - vga_idx); /*VGA_idx = 7~4*/
112 break;
113 case 3:
114 rx_pwr_all = -24 + 2 * (7 - vga_idx); /*VGA_idx = 7~0*/
115 break;
116 case 2:
117 if (cck_hipwr)
118 rx_pwr_all = -12 + 2 * (5 - vga_idx);
119 else
120 rx_pwr_all = -6 + 2 * (5 - vga_idx);
121 break;
122 case 1:
123 rx_pwr_all = 8 - 2 * vga_idx;
124 break;
125 case 0:
126 rx_pwr_all = 14 - 2 * vga_idx;
127 break;
128 default:
129 break;
130 }
131 rx_pwr_all += 6;
132 pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
133 /* CCK gain is smaller than OFDM/MCS gain,
134 * so we add gain diff by experiences,
135 * the val is 6
136 */
137 pwdb_all += 6;
138 if (pwdb_all > 100)
139 pwdb_all = 100;
140 /* modify the offset to make the same
141 * gain index with OFDM.
142 */
143 if (pwdb_all > 34 && pwdb_all <= 42)
144 pwdb_all -= 2;
145 else if (pwdb_all > 26 && pwdb_all <= 34)
146 pwdb_all -= 6;
147 else if (pwdb_all > 14 && pwdb_all <= 26)
148 pwdb_all -= 8;
149 else if (pwdb_all > 4 && pwdb_all <= 14)
150 pwdb_all -= 4;
151 if (cck_hipwr == false) {
152 if (pwdb_all >= 80)
153 pwdb_all = ((pwdb_all - 80)<<1) +
154 ((pwdb_all - 80)>>1) + 80;
155 else if ((pwdb_all <= 78) && (pwdb_all >= 20))
156 pwdb_all += 3;
157 if (pwdb_all > 100)
158 pwdb_all = 100;
159 }
160
161 pstatus->rx_pwdb_all = pwdb_all;
162 pstatus->recvsignalpower = rx_pwr_all;
163
164 /* (3) Get Signal Quality (EVM) */
165 if (bpacket_match_bssid) {
166 u8 sq;
167
168 if (pstatus->rx_pwdb_all > 40) {
169 sq = 100;
170 } else {
171 sq = cck_buf->sq_rpt;
172 if (sq > 64)
173 sq = 0;
174 else if (sq < 20)
175 sq = 100;
176 else
177 sq = ((64 - sq) * 100) / 44;
178 }
179
180 pstatus->signalquality = sq;
181 pstatus->rx_mimo_sig_qual[0] = sq;
182 pstatus->rx_mimo_sig_qual[1] = -1;
183 }
184 } else {
185 rtlpriv->dm.rfpath_rxenable[0] =
186 rtlpriv->dm.rfpath_rxenable[1] = true;
187
188 /* (1)Get RSSI for HT rate */
189 for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
190 /* we will judge RF RX path now. */
191 if (rtlpriv->dm.rfpath_rxenable[i])
192 rf_rx_num++;
193
194 rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2)-110;
195
196 /* Translate DBM to percentage. */
197 rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
198 total_rssi += rssi;
199
200 /* Get Rx snr value in DB */
201 rtlpriv->stats.rx_snr_db[i] = p_drvinfo->rxsnr[i] / 2;
202
203 /* Record Signal Strength for next packet */
204 if (bpacket_match_bssid)
205 pstatus->rx_mimo_signalstrength[i] = (u8) rssi;
206 }
207
208 /* (2)PWDB, Average PWDB cacluated by
209 * hardware (for rate adaptive)
210 */
211 rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
212
213 pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
214 pstatus->rx_pwdb_all = pwdb_all;
215 pstatus->rxpower = rx_pwr_all;
216 pstatus->recvsignalpower = rx_pwr_all;
217
218 /* (3)EVM of HT rate */
219 if (pstatus->is_ht && pstatus->rate >= DESC92C_RATEMCS8 &&
220 pstatus->rate <= DESC92C_RATEMCS15)
221 max_spatial_stream = 2;
222 else
223 max_spatial_stream = 1;
224
225 for (i = 0; i < max_spatial_stream; i++) {
226 evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
227
228 if (bpacket_match_bssid) {
229 /* Fill value in RFD, Get the first
230 * spatial stream only
231 */
232 if (i == 0)
233 pstatus->signalquality = evm & 0xff;
234 pstatus->rx_mimo_sig_qual[i] = evm & 0xff;
235 }
236 }
237 }
238
239 /* UI BSS List signal strength(in percentage),
240 * make it good looking, from 0~100.
241 */
242 if (is_cck)
243 pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
244 pwdb_all));
245 else if (rf_rx_num != 0)
246 pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
247 total_rssi /= rf_rx_num));
248 /*HW antenna diversity*/
249 rtldm->fat_table.antsel_rx_keep_0 = phystrpt->ant_sel;
250 rtldm->fat_table.antsel_rx_keep_1 = phystrpt->ant_sel_b;
251 rtldm->fat_table.antsel_rx_keep_2 = phystrpt->antsel_rx_keep_2;
252}
253
254static void _rtl88ee_smart_antenna(struct ieee80211_hw *hw,
255 struct rtl_stats *pstatus)
256{
257 struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
258 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
259 u8 ant_mux;
260 struct fast_ant_training *pfat = &(rtldm->fat_table);
261
262 if (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV) {
263 if (pfat->fat_state == FAT_TRAINING_STATE) {
264 if (pstatus->packet_toself) {
265 ant_mux = (pfat->antsel_rx_keep_2 << 2) |
266 (pfat->antsel_rx_keep_1 << 1) |
267 pfat->antsel_rx_keep_0;
268 pfat->ant_sum[ant_mux] += pstatus->rx_pwdb_all;
269 pfat->ant_cnt[ant_mux]++;
270 }
271 }
272 } else if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) ||
273 (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)) {
274 if (pstatus->packet_toself || pstatus->packet_matchbssid) {
275 ant_mux = (pfat->antsel_rx_keep_2 << 2) |
276 (pfat->antsel_rx_keep_1 << 1) |
277 pfat->antsel_rx_keep_0;
278 rtl88e_dm_ant_sel_statistics(hw, ant_mux, 0,
279 pstatus->rx_pwdb_all);
280 }
281 }
282}
283
284static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
285 struct sk_buff *skb, struct rtl_stats *pstatus,
286 u8 *pdesc, struct rx_fwinfo_88e *p_drvinfo)
287{
288 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
289 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
290 struct ieee80211_hdr *hdr;
291 u8 *tmp_buf;
292 u8 *praddr;
293 u8 *psaddr;
294 __le16 fc;
295 u16 type, ufc;
296 bool match_bssid, packet_toself, packet_beacon, addr;
297
298 tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
299
300 hdr = (struct ieee80211_hdr *)tmp_buf;
301 fc = hdr->frame_control;
302 ufc = le16_to_cpu(fc);
303 type = WLAN_FC_GET_TYPE(fc);
304 praddr = hdr->addr1;
305 psaddr = ieee80211_get_SA(hdr);
306 memcpy(pstatus->psaddr, psaddr, ETH_ALEN);
307
308 addr = (!compare_ether_addr(mac->bssid, (ufc & IEEE80211_FCTL_TODS) ?
309 hdr->addr1 : (ufc & IEEE80211_FCTL_FROMDS) ?
310 hdr->addr2 : hdr->addr3));
311 match_bssid = ((IEEE80211_FTYPE_CTL != type) && (!pstatus->hwerror) &&
312 (!pstatus->crc) && (!pstatus->icv)) && addr;
313
314 addr = (!compare_ether_addr(praddr, rtlefuse->dev_addr));
315 packet_toself = match_bssid && addr;
316
317 if (ieee80211_is_beacon(fc))
318 packet_beacon = true;
319
320 _rtl88ee_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo,
321 match_bssid, packet_toself, packet_beacon);
322 _rtl88ee_smart_antenna(hw, pstatus);
323 rtl_process_phyinfo(hw, tmp_buf, pstatus);
324}
325
326static void insert_em(struct rtl_tcb_desc *ptcb_desc, u8 *virtualaddress)
327{
328 u32 dwtmp = 0;
329
330 memset(virtualaddress, 0, 8);
331
332 SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
333 if (ptcb_desc->empkt_num == 1) {
334 dwtmp = ptcb_desc->empkt_len[0];
335 } else {
336 dwtmp = ptcb_desc->empkt_len[0];
337 dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
338 dwtmp += ptcb_desc->empkt_len[1];
339 }
340 SET_EARLYMODE_LEN0(virtualaddress, dwtmp);
341
342 if (ptcb_desc->empkt_num <= 3) {
343 dwtmp = ptcb_desc->empkt_len[2];
344 } else {
345 dwtmp = ptcb_desc->empkt_len[2];
346 dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
347 dwtmp += ptcb_desc->empkt_len[3];
348 }
349 SET_EARLYMODE_LEN1(virtualaddress, dwtmp);
350 if (ptcb_desc->empkt_num <= 5) {
351 dwtmp = ptcb_desc->empkt_len[4];
352 } else {
353 dwtmp = ptcb_desc->empkt_len[4];
354 dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
355 dwtmp += ptcb_desc->empkt_len[5];
356 }
357 SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF);
358 SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4);
359 if (ptcb_desc->empkt_num <= 7) {
360 dwtmp = ptcb_desc->empkt_len[6];
361 } else {
362 dwtmp = ptcb_desc->empkt_len[6];
363 dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
364 dwtmp += ptcb_desc->empkt_len[7];
365 }
366 SET_EARLYMODE_LEN3(virtualaddress, dwtmp);
367 if (ptcb_desc->empkt_num <= 9) {
368 dwtmp = ptcb_desc->empkt_len[8];
369 } else {
370 dwtmp = ptcb_desc->empkt_len[8];
371 dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
372 dwtmp += ptcb_desc->empkt_len[9];
373 }
374 SET_EARLYMODE_LEN4(virtualaddress, dwtmp);
375}
376
377bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
378 struct rtl_stats *status,
379 struct ieee80211_rx_status *rx_status,
380 u8 *pdesc, struct sk_buff *skb)
381{
382 struct rtl_priv *rtlpriv = rtl_priv(hw);
383 struct rx_fwinfo_88e *p_drvinfo;
384 struct ieee80211_hdr *hdr;
385
386 u32 phystatus = GET_RX_DESC_PHYST(pdesc);
387 status->packet_report_type = (u8)GET_RX_STATUS_DESC_RPT_SEL(pdesc);
388 if (status->packet_report_type == TX_REPORT2)
389 status->length = (u16) GET_RX_RPT2_DESC_PKT_LEN(pdesc);
390 else
391 status->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
392 status->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
393 RX_DRV_INFO_SIZE_UNIT;
394 status->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
395 status->icv = (u16) GET_RX_DESC_ICV(pdesc);
396 status->crc = (u16) GET_RX_DESC_CRC32(pdesc);
397 status->hwerror = (status->crc | status->icv);
398 status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
399 status->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
400 status->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
401 status->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
402 status->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1) &&
403 (GET_RX_DESC_FAGGR(pdesc) == 1));
404 if (status->packet_report_type == NORMAL_RX)
405 status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
406 status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
407 status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
408
409 status->is_cck = RTL8188_RX_HAL_IS_CCK_RATE(status->rate);
410
411 status->macid = GET_RX_DESC_MACID(pdesc);
412 if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
413 status->wake_match = BIT(2);
414 else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
415 status->wake_match = BIT(1);
416 else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
417 status->wake_match = BIT(0);
418 else
419 status->wake_match = 0;
420 if (status->wake_match)
421 RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
422 "Get Wakeup Packet!! WakeMatch =%d\n",
423 status->wake_match);
424 rx_status->freq = hw->conf.chandef.chan->center_freq;
425 rx_status->band = hw->conf.chandef.chan->band;
426
427 if (status->crc)
428 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
429
430 if (status->rx_is40Mhzpacket)
431 rx_status->flag |= RX_FLAG_40MHZ;
432
433 if (status->is_ht)
434 rx_status->flag |= RX_FLAG_HT;
435
436 rx_status->flag |= RX_FLAG_MACTIME_START;
437
438 /* hw will set status->decrypted true, if it finds the
439 * frame is open data frame or mgmt frame.
440 * So hw will not decryption robust managment frame
441 * for IEEE80211w but still set status->decrypted
442 * true, so here we should set it back to undecrypted
443 * for IEEE80211w frame, and mac80211 sw will help
444 * to decrypt it
445 */
446 if (status->decrypted) {
447 hdr = (struct ieee80211_hdr *)(skb->data +
448 status->rx_drvinfo_size + status->rx_bufshift);
449
450 if (!hdr) {
451 /* During testing, hdr was NULL */
452 return false;
453 }
454 if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
455 (ieee80211_has_protected(hdr->frame_control)))
456 rx_status->flag &= ~RX_FLAG_DECRYPTED;
457 else
458 rx_status->flag |= RX_FLAG_DECRYPTED;
459 }
460
461 /* rate_idx: index of data rate into band's
462 * supported rates or MCS index if HT rates
463 * are use (RX_FLAG_HT)
464 * Notice: this is diff with windows define
465 */
466 rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
467 status->rate, false);
468
469 rx_status->mactime = status->timestamp_low;
470 if (phystatus == true) {
471 p_drvinfo = (struct rx_fwinfo_88e *)(skb->data +
472 status->rx_bufshift);
473
474 _rtl88ee_translate_rx_signal_stuff(hw, skb, status, pdesc,
475 p_drvinfo);
476 }
477
478 /*rx_status->qual = status->signal; */
479 rx_status->signal = status->recvsignalpower + 10;
480 /*rx_status->noise = -status->noise; */
481 if (status->packet_report_type == TX_REPORT2) {
482 status->macid_valid_entry[0] =
483 GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
484 status->macid_valid_entry[1] =
485 GET_RX_RPT2_DESC_MACID_VALID_2(pdesc);
486 }
487 return true;
488}
489
490void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
491 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
492 struct ieee80211_tx_info *info,
493 struct ieee80211_sta *sta,
494 struct sk_buff *skb,
495 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
496{
497 struct rtl_priv *rtlpriv = rtl_priv(hw);
498 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
499 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
500 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
501 u8 *pdesc = (u8 *)pdesc_tx;
502 u16 seq_number;
503 __le16 fc = hdr->frame_control;
504 unsigned int buf_len = 0;
505 unsigned int skb_len = skb->len;
506 u8 fw_qsel = _rtl88ee_map_hwqueue_to_fwqueue(skb, hw_queue);
507 bool firstseg = ((hdr->seq_ctrl &
508 cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
509 bool lastseg = ((hdr->frame_control &
510 cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
511 dma_addr_t mapping;
512 u8 bw_40 = 0;
513 u8 short_gi = 0;
514
515 if (mac->opmode == NL80211_IFTYPE_STATION) {
516 bw_40 = mac->bw_40;
517 } else if (mac->opmode == NL80211_IFTYPE_AP ||
518 mac->opmode == NL80211_IFTYPE_ADHOC) {
519 if (sta)
520 bw_40 = sta->ht_cap.cap &
521 IEEE80211_HT_CAP_SUP_WIDTH_20_40;
522 }
523 seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
524 rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
525 /* reserve 8 byte for AMPDU early mode */
526 if (rtlhal->earlymode_enable) {
527 skb_push(skb, EM_HDR_LEN);
528 memset(skb->data, 0, EM_HDR_LEN);
529 }
530 buf_len = skb->len;
531 mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
532 PCI_DMA_TODEVICE);
533 if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
534 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
535 "DMA mapping error");
536 return;
537 }
538 CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_88e));
539 if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
540 firstseg = true;
541 lastseg = true;
542 }
543 if (firstseg) {
544 if (rtlhal->earlymode_enable) {
545 SET_TX_DESC_PKT_OFFSET(pdesc, 1);
546 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN +
547 EM_HDR_LEN);
548 if (ptcb_desc->empkt_num) {
549 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
550 "Insert 8 byte.pTcb->EMPktNum:%d\n",
551 ptcb_desc->empkt_num);
552 insert_em(ptcb_desc, (u8 *)(skb->data));
553 }
554 } else {
555 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
556 }
557
558 ptcb_desc->use_driver_rate = true;
559 SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
560 if (ptcb_desc->hw_rate > DESC92C_RATEMCS0)
561 short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
562 else
563 short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
564 SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi);
565
566 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
567 SET_TX_DESC_AGG_ENABLE(pdesc, 1);
568 SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
569 }
570 SET_TX_DESC_SEQ(pdesc, seq_number);
571 SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
572 !ptcb_desc->cts_enable) ? 1 : 0));
573 SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0);
574 SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0));
575 SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
576
577 SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
578 SET_TX_DESC_RTS_BW(pdesc, 0);
579 SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
580 SET_TX_DESC_RTS_SHORT(pdesc,
581 ((ptcb_desc->rts_rate <= DESC92C_RATE54M) ?
582 (ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
583 (ptcb_desc->rts_use_shortgi ? 1 : 0)));
584
585 if (ptcb_desc->btx_enable_sw_calc_duration)
586 SET_TX_DESC_NAV_USE_HDR(pdesc, 1);
587
588 if (bw_40) {
589 if (ptcb_desc->packet_bw) {
590 SET_TX_DESC_DATA_BW(pdesc, 1);
591 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
592 } else {
593 SET_TX_DESC_DATA_BW(pdesc, 0);
594 SET_TX_DESC_TX_SUB_CARRIER(pdesc,
595 mac->cur_40_prime_sc);
596 }
597 } else {
598 SET_TX_DESC_DATA_BW(pdesc, 0);
599 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
600 }
601
602 SET_TX_DESC_LINIP(pdesc, 0);
603 SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len);
604 if (sta) {
605 u8 ampdu_density = sta->ht_cap.ampdu_density;
606 SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
607 }
608 if (info->control.hw_key) {
609 struct ieee80211_key_conf *keyconf;
610 keyconf = info->control.hw_key;
611 switch (keyconf->cipher) {
612 case WLAN_CIPHER_SUITE_WEP40:
613 case WLAN_CIPHER_SUITE_WEP104:
614 case WLAN_CIPHER_SUITE_TKIP:
615 SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
616 break;
617 case WLAN_CIPHER_SUITE_CCMP:
618 SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
619 break;
620 default:
621 SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
622 break;
623 }
624 }
625
626 SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
627 SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
628 SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
629 SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ?
630 1 : 0);
631 SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
632
633 /* Set TxRate and RTSRate in TxDesc */
634 /* This prevent Tx initial rate of new-coming packets */
635 /* from being overwritten by retried packet rate.*/
636 if (!ptcb_desc->use_driver_rate) {
637 /*SET_TX_DESC_RTS_RATE(pdesc, 0x08); */
638 /* SET_TX_DESC_TX_RATE(pdesc, 0x0b); */
639 }
640 if (ieee80211_is_data_qos(fc)) {
641 if (mac->rdg_en) {
642 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
643 "Enable RDG function.\n");
644 SET_TX_DESC_RDG_ENABLE(pdesc, 1);
645 SET_TX_DESC_HTC(pdesc, 1);
646 }
647 }
648 }
649
650 SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
651 SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
652 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len);
653 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
654 if (rtlpriv->dm.useramask) {
655 SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
656 SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
657 } else {
658 SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index);
659 SET_TX_DESC_MACID(pdesc, ptcb_desc->ratr_index);
660 }
661 if (ieee80211_is_data_qos(fc))
662 SET_TX_DESC_QOS(pdesc, 1);
663
664 if (!ieee80211_is_data_qos(fc))
665 SET_TX_DESC_HWSEQ_EN(pdesc, 1);
666 SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
667 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
668 is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
669 SET_TX_DESC_BMC(pdesc, 1);
670
671 rtl88e_dm_set_tx_ant_by_tx_info(hw, pdesc, ptcb_desc->mac_id);
672 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
673}
674
675void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
676 u8 *pdesc, bool firstseg,
677 bool lastseg, struct sk_buff *skb)
678{
679 struct rtl_priv *rtlpriv = rtl_priv(hw);
680 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
681 u8 fw_queue = QSLT_BEACON;
682
683 dma_addr_t mapping = pci_map_single(rtlpci->pdev,
684 skb->data, skb->len,
685 PCI_DMA_TODEVICE);
686
687 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
688 __le16 fc = hdr->frame_control;
689
690 if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
691 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
692 "DMA mapping error");
693 return;
694 }
695 CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
696
697 if (firstseg)
698 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
699
700 SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
701
702 SET_TX_DESC_SEQ(pdesc, 0);
703
704 SET_TX_DESC_LINIP(pdesc, 0);
705
706 SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
707
708 SET_TX_DESC_FIRST_SEG(pdesc, 1);
709 SET_TX_DESC_LAST_SEG(pdesc, 1);
710
711 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
712
713 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
714
715 SET_TX_DESC_RATE_ID(pdesc, 7);
716 SET_TX_DESC_MACID(pdesc, 0);
717
718 SET_TX_DESC_OWN(pdesc, 1);
719
720 SET_TX_DESC_PKT_SIZE((u8 *)pdesc, (u16)(skb->len));
721
722 SET_TX_DESC_FIRST_SEG(pdesc, 1);
723 SET_TX_DESC_LAST_SEG(pdesc, 1);
724
725 SET_TX_DESC_OFFSET(pdesc, 0x20);
726
727 SET_TX_DESC_USE_RATE(pdesc, 1);
728
729 if (!ieee80211_is_data_qos(fc))
730 SET_TX_DESC_HWSEQ_EN(pdesc, 1);
731
732 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
733 "H2C Tx Cmd Content\n",
734 pdesc, TX_DESC_SIZE);
735}
736
737void rtl88ee_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
738{
739 if (istx == true) {
740 switch (desc_name) {
741 case HW_DESC_OWN:
742 SET_TX_DESC_OWN(pdesc, 1);
743 break;
744 case HW_DESC_TX_NEXTDESC_ADDR:
745 SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val);
746 break;
747 default:
748 RT_ASSERT(false, "ERR txdesc :%d not processed\n",
749 desc_name);
750 break;
751 }
752 } else {
753 switch (desc_name) {
754 case HW_DESC_RXOWN:
755 SET_RX_DESC_OWN(pdesc, 1);
756 break;
757 case HW_DESC_RXBUFF_ADDR:
758 SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *)val);
759 break;
760 case HW_DESC_RXPKT_LEN:
761 SET_RX_DESC_PKT_LEN(pdesc, *(u32 *)val);
762 break;
763 case HW_DESC_RXERO:
764 SET_RX_DESC_EOR(pdesc, 1);
765 break;
766 default:
767 RT_ASSERT(false, "ERR rxdesc :%d not processed\n",
768 desc_name);
769 break;
770 }
771 }
772}
773
774u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name)
775{
776 u32 ret = 0;
777
778 if (istx == true) {
779 switch (desc_name) {
780 case HW_DESC_OWN:
781 ret = GET_TX_DESC_OWN(pdesc);
782 break;
783 case HW_DESC_TXBUFF_ADDR:
784 ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
785 break;
786 default:
787 RT_ASSERT(false, "ERR txdesc :%d not processed\n",
788 desc_name);
789 break;
790 }
791 } else {
792 switch (desc_name) {
793 case HW_DESC_OWN:
794 ret = GET_RX_DESC_OWN(pdesc);
795 break;
796 case HW_DESC_RXPKT_LEN:
797 ret = GET_RX_DESC_PKT_LEN(pdesc);
798 break;
799 default:
800 RT_ASSERT(false, "ERR rxdesc :%d not processed\n",
801 desc_name);
802 break;
803 }
804 }
805 return ret;
806}
807
808void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
809{
810 struct rtl_priv *rtlpriv = rtl_priv(hw);
811 if (hw_queue == BEACON_QUEUE) {
812 rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4));
813 } else {
814 rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG,
815 BIT(0) << (hw_queue));
816 }
817}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
new file mode 100644
index 000000000000..d3a02e73f53a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
@@ -0,0 +1,795 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2013 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CE_TRX_H__
31#define __RTL92CE_TRX_H__
32
33#define TX_DESC_SIZE 64
34#define TX_DESC_AGGR_SUBFRAME_SIZE 32
35
36#define RX_DESC_SIZE 32
37#define RX_DRV_INFO_SIZE_UNIT 8
38
39#define TX_DESC_NEXT_DESC_OFFSET 40
40#define USB_HWDESC_HEADER_LEN 32
41#define CRCLENGTH 4
42
43#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
44 SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
45#define SET_TX_DESC_OFFSET(__pdesc, __val) \
46 SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
47#define SET_TX_DESC_BMC(__pdesc, __val) \
48 SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
49#define SET_TX_DESC_HTC(__pdesc, __val) \
50 SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
51#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
52 SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
53#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
54 SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
55#define SET_TX_DESC_LINIP(__pdesc, __val) \
56 SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
57#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
58 SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
59#define SET_TX_DESC_GF(__pdesc, __val) \
60 SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
61#define SET_TX_DESC_OWN(__pdesc, __val) \
62 SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
63
64#define GET_TX_DESC_PKT_SIZE(__pdesc) \
65 LE_BITS_TO_4BYTE(__pdesc, 0, 16)
66#define GET_TX_DESC_OFFSET(__pdesc) \
67 LE_BITS_TO_4BYTE(__pdesc, 16, 8)
68#define GET_TX_DESC_BMC(__pdesc) \
69 LE_BITS_TO_4BYTE(__pdesc, 24, 1)
70#define GET_TX_DESC_HTC(__pdesc) \
71 LE_BITS_TO_4BYTE(__pdesc, 25, 1)
72#define GET_TX_DESC_LAST_SEG(__pdesc) \
73 LE_BITS_TO_4BYTE(__pdesc, 26, 1)
74#define GET_TX_DESC_FIRST_SEG(__pdesc) \
75 LE_BITS_TO_4BYTE(__pdesc, 27, 1)
76#define GET_TX_DESC_LINIP(__pdesc) \
77 LE_BITS_TO_4BYTE(__pdesc, 28, 1)
78#define GET_TX_DESC_NO_ACM(__pdesc) \
79 LE_BITS_TO_4BYTE(__pdesc, 29, 1)
80#define GET_TX_DESC_GF(__pdesc) \
81 LE_BITS_TO_4BYTE(__pdesc, 30, 1)
82#define GET_TX_DESC_OWN(__pdesc) \
83 LE_BITS_TO_4BYTE(__pdesc, 31, 1)
84
85#define SET_TX_DESC_MACID(__pdesc, __val) \
86 SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 6, __val)
87#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
88 SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
89#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
90 SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
91#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
92 SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
93#define SET_TX_DESC_PIFS(__pdesc, __val) \
94 SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
95#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
96 SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val)
97#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
98 SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val)
99#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
100 SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
101#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
102 SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
103#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
104 SET_BITS_TO_LE_4BYTE(__pdesc+4, 26, 5, __val)
105#define SET_TX_DESC_PADDING_LEN(__pdesc, __val) \
106 SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val)
107
108#define GET_TX_DESC_MACID(__pdesc) \
109 LE_BITS_TO_4BYTE(__pdesc+4, 0, 5)
110#define GET_TX_DESC_AGG_ENABLE(__pdesc) \
111 LE_BITS_TO_4BYTE(__pdesc+4, 5, 1)
112#define GET_TX_DESC_AGG_BREAK(__pdesc) \
113 LE_BITS_TO_4BYTE(__pdesc+4, 6, 1)
114#define GET_TX_DESC_RDG_ENABLE(__pdesc) \
115 LE_BITS_TO_4BYTE(__pdesc+4, 7, 1)
116#define GET_TX_DESC_QUEUE_SEL(__pdesc) \
117 LE_BITS_TO_4BYTE(__pdesc+4, 8, 5)
118#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \
119 LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
120#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \
121 LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
122#define GET_TX_DESC_PIFS(__pdesc) \
123 LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
124#define GET_TX_DESC_RATE_ID(__pdesc) \
125 LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
126#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \
127 LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
128#define GET_TX_DESC_EN_DESC_ID(__pdesc) \
129 LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
130#define GET_TX_DESC_SEC_TYPE(__pdesc) \
131 LE_BITS_TO_4BYTE(__pdesc+4, 22, 2)
132#define GET_TX_DESC_PKT_OFFSET(__pdesc) \
133 LE_BITS_TO_4BYTE(__pdesc+4, 24, 8)
134
135#define SET_TX_DESC_RTS_RC(__pdesc, __val) \
136 SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 6, __val)
137#define SET_TX_DESC_DATA_RC(__pdesc, __val) \
138 SET_BITS_TO_LE_4BYTE(__pdesc+8, 6, 6, __val)
139#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
140 SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val)
141#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
142 SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val)
143#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
144 SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
145#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
146 SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val)
147#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
148 SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
149#define SET_TX_DESC_RAW(__pdesc, __val) \
150 SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
151#define SET_TX_DESC_CCX(__pdesc, __val) \
152 SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
153#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
154 SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
155#define SET_TX_DESC_BT_INT(__pdesc, __val) \
156 SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val)
157#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
158 SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 1, __val)
159#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
160 SET_BITS_TO_LE_4BYTE(__pdesc+8, 25, 1, __val)
161#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \
162 SET_BITS_TO_LE_4BYTE(__pdesc+8, 26, 2, __val)
163#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \
164 SET_BITS_TO_LE_4BYTE(__pdesc+8, 28, 2, __val)
165#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \
166 SET_BITS_TO_LE_4BYTE(__pdesc+8, 30, 2, __val)
167
168#define GET_TX_DESC_RTS_RC(__pdesc) \
169 LE_BITS_TO_4BYTE(__pdesc+8, 0, 6)
170#define GET_TX_DESC_DATA_RC(__pdesc) \
171 LE_BITS_TO_4BYTE(__pdesc+8, 6, 6)
172#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \
173 LE_BITS_TO_4BYTE(__pdesc+8, 14, 2)
174#define GET_TX_DESC_MORE_FRAG(__pdesc) \
175 LE_BITS_TO_4BYTE(__pdesc+8, 17, 1)
176#define GET_TX_DESC_RAW(__pdesc) \
177 LE_BITS_TO_4BYTE(__pdesc+8, 18, 1)
178#define GET_TX_DESC_CCX(__pdesc) \
179 LE_BITS_TO_4BYTE(__pdesc+8, 19, 1)
180#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \
181 LE_BITS_TO_4BYTE(__pdesc+8, 20, 3)
182#define GET_TX_DESC_ANTSEL_A(__pdesc) \
183 LE_BITS_TO_4BYTE(__pdesc+8, 24, 1)
184#define GET_TX_DESC_ANTSEL_B(__pdesc) \
185 LE_BITS_TO_4BYTE(__pdesc+8, 25, 1)
186#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \
187 LE_BITS_TO_4BYTE(__pdesc+8, 26, 2)
188#define GET_TX_DESC_TX_ANTL(__pdesc) \
189 LE_BITS_TO_4BYTE(__pdesc+8, 28, 2)
190#define GET_TX_DESC_TX_ANT_HT(__pdesc) \
191 LE_BITS_TO_4BYTE(__pdesc+8, 30, 2)
192
193#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
194 SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 8, __val)
195#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
196 SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 8, __val)
197#define SET_TX_DESC_SEQ(__pdesc, __val) \
198 SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 12, __val)
199#define SET_TX_DESC_CPU_HANDLE(__pdesc, __val) \
200 SET_BITS_TO_LE_4BYTE(__pdesc+12, 28, 1, __val)
201#define SET_TX_DESC_TAG1(__pdesc, __val) \
202 SET_BITS_TO_LE_4BYTE(__pdesc+12, 29, 1, __val)
203#define SET_TX_DESC_TRIGGER_INT(__pdesc, __val) \
204 SET_BITS_TO_LE_4BYTE(__pdesc+12, 30, 1, __val)
205#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
206 SET_BITS_TO_LE_4BYTE(__pdesc+12, 31, 1, __val)
207
208
209#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \
210 LE_BITS_TO_4BYTE(__pdesc+12, 0, 8)
211#define GET_TX_DESC_TAIL_PAGE(__pdesc) \
212 LE_BITS_TO_4BYTE(__pdesc+12, 8, 8)
213#define GET_TX_DESC_SEQ(__pdesc) \
214 LE_BITS_TO_4BYTE(__pdesc+12, 16, 12)
215
216
217#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
218 SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val)
219#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \
220 SET_BITS_TO_LE_4BYTE(__pdesc+16, 5, 1, __val)
221#define SET_TX_DESC_QOS(__pdesc, __val) \
222 SET_BITS_TO_LE_4BYTE(__pdesc+16, 6, 1, __val)
223#define SET_TX_DESC_HWSEQ_SSN(__pdesc, __val) \
224 SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val)
225#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
226 SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 1, __val)
227#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
228 SET_BITS_TO_LE_4BYTE(__pdesc+16, 9, 1, __val)
229#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
230 SET_BITS_TO_LE_4BYTE(__pdesc+16, 10, 1, __val)
231#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
232 SET_BITS_TO_LE_4BYTE(__pdesc+16, 11, 1, __val)
233#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
234 SET_BITS_TO_LE_4BYTE(__pdesc+16, 12, 1, __val)
235#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
236 SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 1, __val)
237#define SET_TX_DESC_PORT_ID(__pdesc, __val) \
238 SET_BITS_TO_LE_4BYTE(__pdesc+16, 14, 1, __val)
239#define SET_TX_DESC_PWR_STATUS(__pdesc, __val) \
240 SET_BITS_TO_LE_4BYTE(__pdesc+16, 15, 3, __val)
241#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \
242 SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 1, __val)
243#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \
244 SET_BITS_TO_LE_4BYTE(__pdesc+16, 19, 1, __val)
245#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
246 SET_BITS_TO_LE_4BYTE(__pdesc+16, 20, 2, __val)
247#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
248 SET_BITS_TO_LE_4BYTE(__pdesc+16, 22, 2, __val)
249#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \
250 SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 1, __val)
251#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
252 SET_BITS_TO_LE_4BYTE(__pdesc+16, 25, 1, __val)
253#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
254 SET_BITS_TO_LE_4BYTE(__pdesc+16, 26, 1, __val)
255#define SET_TX_DESC_RTS_BW(__pdesc, __val) \
256 SET_BITS_TO_LE_4BYTE(__pdesc+16, 27, 1, __val)
257#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
258 SET_BITS_TO_LE_4BYTE(__pdesc+16, 28, 2, __val)
259#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
260 SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val)
261
262#define GET_TX_DESC_RTS_RATE(__pdesc) \
263 LE_BITS_TO_4BYTE(__pdesc+16, 0, 5)
264#define GET_TX_DESC_AP_DCFE(__pdesc) \
265 LE_BITS_TO_4BYTE(__pdesc+16, 5, 1)
266#define GET_TX_DESC_QOS(__pdesc) \
267 LE_BITS_TO_4BYTE(__pdesc+16, 6, 1)
268#define GET_TX_DESC_HWSEQ_EN(__pdesc) \
269 LE_BITS_TO_4BYTE(__pdesc+16, 7, 1)
270#define GET_TX_DESC_USE_RATE(__pdesc) \
271 LE_BITS_TO_4BYTE(__pdesc+16, 8, 1)
272#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \
273 LE_BITS_TO_4BYTE(__pdesc+16, 9, 1)
274#define GET_TX_DESC_DISABLE_FB(__pdesc) \
275 LE_BITS_TO_4BYTE(__pdesc+16, 10, 1)
276#define GET_TX_DESC_CTS2SELF(__pdesc) \
277 LE_BITS_TO_4BYTE(__pdesc+16, 11, 1)
278#define GET_TX_DESC_RTS_ENABLE(__pdesc) \
279 LE_BITS_TO_4BYTE(__pdesc+16, 12, 1)
280#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \
281 LE_BITS_TO_4BYTE(__pdesc+16, 13, 1)
282#define GET_TX_DESC_PORT_ID(__pdesc) \
283 LE_BITS_TO_4BYTE(__pdesc+16, 14, 1)
284#define GET_TX_DESC_WAIT_DCTS(__pdesc) \
285 LE_BITS_TO_4BYTE(__pdesc+16, 18, 1)
286#define GET_TX_DESC_CTS2AP_EN(__pdesc) \
287 LE_BITS_TO_4BYTE(__pdesc+16, 19, 1)
288#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \
289 LE_BITS_TO_4BYTE(__pdesc+16, 20, 2)
290#define GET_TX_DESC_TX_STBC(__pdesc) \
291 LE_BITS_TO_4BYTE(__pdesc+16, 22, 2)
292#define GET_TX_DESC_DATA_SHORT(__pdesc) \
293 LE_BITS_TO_4BYTE(__pdesc+16, 24, 1)
294#define GET_TX_DESC_DATA_BW(__pdesc) \
295 LE_BITS_TO_4BYTE(__pdesc+16, 25, 1)
296#define GET_TX_DESC_RTS_SHORT(__pdesc) \
297 LE_BITS_TO_4BYTE(__pdesc+16, 26, 1)
298#define GET_TX_DESC_RTS_BW(__pdesc) \
299 LE_BITS_TO_4BYTE(__pdesc+16, 27, 1)
300#define GET_TX_DESC_RTS_SC(__pdesc) \
301 LE_BITS_TO_4BYTE(__pdesc+16, 28, 2)
302#define GET_TX_DESC_RTS_STBC(__pdesc) \
303 LE_BITS_TO_4BYTE(__pdesc+16, 30, 2)
304
305#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
306 SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 6, __val)
307#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
308 SET_BITS_TO_LE_4BYTE(__pdesc+20, 6, 1, __val)
309#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
310 SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
311#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
312 SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 5, __val)
313#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
314 SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
315#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
316 SET_BITS_TO_LE_4BYTE(__pdesc+20, 17, 1, __val)
317#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
318 SET_BITS_TO_LE_4BYTE(__pdesc+20, 18, 6, __val)
319#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \
320 SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 8, __val)
321
322#define GET_TX_DESC_TX_RATE(__pdesc) \
323 LE_BITS_TO_4BYTE(__pdesc+20, 0, 6)
324#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \
325 LE_BITS_TO_4BYTE(__pdesc+20, 6, 1)
326#define GET_TX_DESC_CCX_TAG(__pdesc) \
327 LE_BITS_TO_4BYTE(__pdesc+20, 7, 1)
328#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \
329 LE_BITS_TO_4BYTE(__pdesc+20, 8, 5)
330#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \
331 LE_BITS_TO_4BYTE(__pdesc+20, 13, 4)
332#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \
333 LE_BITS_TO_4BYTE(__pdesc+20, 17, 1)
334#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \
335 LE_BITS_TO_4BYTE(__pdesc+20, 18, 6)
336#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \
337 LE_BITS_TO_4BYTE(__pdesc+20, 24, 8)
338
339#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \
340 SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 5, __val)
341#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \
342 SET_BITS_TO_LE_4BYTE(__pdesc+24, 5, 5, __val)
343#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
344 SET_BITS_TO_LE_4BYTE(__pdesc+24, 10, 1, __val)
345#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
346 SET_BITS_TO_LE_4BYTE(__pdesc+24, 11, 5, __val)
347#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \
348 SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 4, __val)
349#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \
350 SET_BITS_TO_LE_4BYTE(__pdesc+24, 20, 4, __val)
351#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \
352 SET_BITS_TO_LE_4BYTE(__pdesc+24, 24, 4, __val)
353#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \
354 SET_BITS_TO_LE_4BYTE(__pdesc+24, 28, 4, __val)
355
356#define GET_TX_DESC_TXAGC_A(__pdesc) \
357 LE_BITS_TO_4BYTE(__pdesc+24, 0, 5)
358#define GET_TX_DESC_TXAGC_B(__pdesc) \
359 LE_BITS_TO_4BYTE(__pdesc+24, 5, 5)
360#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \
361 LE_BITS_TO_4BYTE(__pdesc+24, 10, 1)
362#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \
363 LE_BITS_TO_4BYTE(__pdesc+24, 11, 5)
364#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \
365 LE_BITS_TO_4BYTE(__pdesc+24, 16, 4)
366#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \
367 LE_BITS_TO_4BYTE(__pdesc+24, 20, 4)
368#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \
369 LE_BITS_TO_4BYTE(__pdesc+24, 24, 4)
370#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \
371 LE_BITS_TO_4BYTE(__pdesc+24, 28, 4)
372
373#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
374 SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
375#define SET_TX_DESC_SW_OFFSET30(__pdesc, __val) \
376 SET_BITS_TO_LE_4BYTE(__pdesc+28, 16, 8, __val)
377#define SET_TX_DESC_SW_OFFSET31(__pdesc, __val) \
378 SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 4, __val)
379#define SET_TX_DESC_ANTSEL_C(__pdesc, __val) \
380 SET_BITS_TO_LE_4BYTE(__pdesc+28, 29, 1, __val)
381#define SET_TX_DESC_NULL_0(__pdesc, __val) \
382 SET_BITS_TO_LE_4BYTE(__pdesc+28, 30, 1, __val)
383#define SET_TX_DESC_NULL_1(__pdesc, __val) \
384 SET_BITS_TO_LE_4BYTE(__pdesc+28, 30, 1, __val)
385
386#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
387 LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
388
389
390#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
391 SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 32, __val)
392#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
393 SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val)
394
395#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
396 LE_BITS_TO_4BYTE(__pdesc+32, 0, 32)
397#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \
398 LE_BITS_TO_4BYTE(__pdesc+36, 0, 32)
399
400#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
401 SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
402#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
403 SET_BITS_TO_LE_4BYTE(__pdesc+44, 0, 32, __val)
404
405#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
406 LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
407#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \
408 LE_BITS_TO_4BYTE(__pdesc+44, 0, 32)
409
410#define GET_RX_DESC_PKT_LEN(__pdesc) \
411 LE_BITS_TO_4BYTE(__pdesc, 0, 14)
412#define GET_RX_DESC_CRC32(__pdesc) \
413 LE_BITS_TO_4BYTE(__pdesc, 14, 1)
414#define GET_RX_DESC_ICV(__pdesc) \
415 LE_BITS_TO_4BYTE(__pdesc, 15, 1)
416#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
417 LE_BITS_TO_4BYTE(__pdesc, 16, 4)
418#define GET_RX_DESC_SECURITY(__pdesc) \
419 LE_BITS_TO_4BYTE(__pdesc, 20, 3)
420#define GET_RX_DESC_QOS(__pdesc) \
421 LE_BITS_TO_4BYTE(__pdesc, 23, 1)
422#define GET_RX_DESC_SHIFT(__pdesc) \
423 LE_BITS_TO_4BYTE(__pdesc, 24, 2)
424#define GET_RX_DESC_PHYST(__pdesc) \
425 LE_BITS_TO_4BYTE(__pdesc, 26, 1)
426#define GET_RX_DESC_SWDEC(__pdesc) \
427 LE_BITS_TO_4BYTE(__pdesc, 27, 1)
428#define GET_RX_DESC_LS(__pdesc) \
429 LE_BITS_TO_4BYTE(__pdesc, 28, 1)
430#define GET_RX_DESC_FS(__pdesc) \
431 LE_BITS_TO_4BYTE(__pdesc, 29, 1)
432#define GET_RX_DESC_EOR(__pdesc) \
433 LE_BITS_TO_4BYTE(__pdesc, 30, 1)
434#define GET_RX_DESC_OWN(__pdesc) \
435 LE_BITS_TO_4BYTE(__pdesc, 31, 1)
436
437#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
438 SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
439#define SET_RX_DESC_EOR(__pdesc, __val) \
440 SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
441#define SET_RX_DESC_OWN(__pdesc, __val) \
442 SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
443
444#define GET_RX_DESC_MACID(__pdesc) \
445 LE_BITS_TO_4BYTE(__pdesc+4, 0, 6)
446#define GET_RX_DESC_PAGGR(__pdesc) \
447 LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
448#define GET_RX_DESC_FAGGR(__pdesc) \
449 LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
450#define GET_RX_DESC_A1_FIT(__pdesc) \
451 LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
452#define GET_RX_DESC_A2_FIT(__pdesc) \
453 LE_BITS_TO_4BYTE(__pdesc+4, 20, 4)
454#define GET_RX_DESC_PAM(__pdesc) \
455 LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
456#define GET_RX_DESC_PWR(__pdesc) \
457 LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
458#define GET_RX_DESC_MD(__pdesc) \
459 LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
460#define GET_RX_DESC_MF(__pdesc) \
461 LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
462#define GET_RX_DESC_TYPE(__pdesc) \
463 LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
464#define GET_RX_DESC_MC(__pdesc) \
465 LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
466#define GET_RX_DESC_BC(__pdesc) \
467 LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
468#define GET_RX_DESC_SEQ(__pdesc) \
469 LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
470#define GET_RX_DESC_FRAG(__pdesc) \
471 LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
472
473#define GET_RX_DESC_RXMCS(__pdesc) \
474 LE_BITS_TO_4BYTE(__pdesc+12, 0, 6)
475#define GET_RX_DESC_RXHT(__pdesc) \
476 LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
477#define GET_RX_STATUS_DESC_RX_GF(__pdesc) \
478 LE_BITS_TO_4BYTE(__pdesc+12, 7, 1)
479#define GET_RX_DESC_SPLCP(__pdesc) \
480 LE_BITS_TO_4BYTE(__pdesc+12, 8, 1)
481#define GET_RX_DESC_BW(__pdesc) \
482 LE_BITS_TO_4BYTE(__pdesc+12, 9, 1)
483#define GET_RX_DESC_HTC(__pdesc) \
484 LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
485#define GET_RX_STATUS_DESC_EOSP(__pdesc) \
486 LE_BITS_TO_4BYTE(__pdesc+12, 11, 1)
487#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \
488 LE_BITS_TO_4BYTE(__pdesc+12, 12, 2)
489#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \
490 LE_BITS_TO_4BYTE(__pdesc+12, 14, 2)
491
492#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \
493 LE_BITS_TO_4BYTE(__pdesc+12, 29, 1)
494#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \
495 LE_BITS_TO_4BYTE(__pdesc+12, 30, 1)
496#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \
497 LE_BITS_TO_4BYTE(__pdesc+12, 31, 1)
498
499#define GET_RX_DESC_IV1(__pdesc) \
500 LE_BITS_TO_4BYTE(__pdesc+16, 0, 32)
501#define GET_RX_DESC_TSFL(__pdesc) \
502 LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
503
504#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
505 LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
506#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
507 LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
508
509#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
510 SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
511#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
512 SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
513
514/* TX report 2 format in Rx desc*/
515
516#define GET_RX_RPT2_DESC_PKT_LEN(__status) \
517 LE_BITS_TO_4BYTE(__status, 0, 9)
518#define GET_RX_RPT2_DESC_MACID_VALID_1(__status) \
519 LE_BITS_TO_4BYTE(__status+16, 0, 32)
520#define GET_RX_RPT2_DESC_MACID_VALID_2(__status) \
521 LE_BITS_TO_4BYTE(__status+20, 0, 32)
522
523#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
524 SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value)
525#define SET_EARLYMODE_LEN0(__paddr, __value) \
526 SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value)
527#define SET_EARLYMODE_LEN1(__paddr, __value) \
528 SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value)
529#define SET_EARLYMODE_LEN2_1(__paddr, __value) \
530 SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value)
531#define SET_EARLYMODE_LEN2_2(__paddr, __value) \
532 SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value)
533#define SET_EARLYMODE_LEN3(__paddr, __value) \
534 SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value)
535#define SET_EARLYMODE_LEN4(__paddr, __value) \
536 SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value)
537
538#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
539do { \
540 if (_size > TX_DESC_NEXT_DESC_OFFSET) \
541 memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
542 else \
543 memset(__pdesc, 0, _size); \
544} while (0)
545
546#define RTL8188_RX_HAL_IS_CCK_RATE(rxmcs)\
547 (rxmcs == DESC92C_RATE1M ||\
548 rxmcs == DESC92C_RATE2M ||\
549 rxmcs == DESC92C_RATE5_5M ||\
550 rxmcs == DESC92C_RATE11M)
551
552struct phy_rx_agc_info_t {
553 #if __LITTLE_ENDIAN
554 u8 gain:7, trsw:1;
555 #else
556 u8 trsw:1, gain:7;
557 #endif
558};
559struct phy_status_rpt {
560 struct phy_rx_agc_info_t path_agc[2];
561 u8 ch_corr[2];
562 u8 cck_sig_qual_ofdm_pwdb_all;
563 u8 cck_agc_rpt_ofdm_cfosho_a;
564 u8 cck_rpt_b_ofdm_cfosho_b;
565 u8 rsvd_1;
566 u8 noise_power_db_msb;
567 u8 path_cfotail[2];
568 u8 pcts_mask[2];
569 u8 stream_rxevm[2];
570 u8 path_rxsnr[2];
571 u8 noise_power_db_lsb;
572 u8 rsvd_2[3];
573 u8 stream_csi[2];
574 u8 stream_target_csi[2];
575 u8 sig_evm;
576 u8 rsvd_3;
577#if __LITTLE_ENDIAN
578 u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
579 u8 sgi_en:1;
580 u8 rxsc:2;
581 u8 idle_long:1;
582 u8 r_ant_train_en:1;
583 u8 ant_sel_b:1;
584 u8 ant_sel:1;
585#else /* _BIG_ENDIAN_ */
586 u8 ant_sel:1;
587 u8 ant_sel_b:1;
588 u8 r_ant_train_en:1;
589 u8 idle_long:1;
590 u8 rxsc:2;
591 u8 sgi_en:1;
592 u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
593#endif
594} __packed;
595
596struct rx_fwinfo_88e {
597 u8 gain_trsw[4];
598 u8 pwdb_all;
599 u8 cfosho[4];
600 u8 cfotail[4];
601 char rxevm[2];
602 char rxsnr[4];
603 u8 pdsnr[2];
604 u8 csi_current[2];
605 u8 csi_target[2];
606 u8 sigevm;
607 u8 max_ex_pwr;
608 u8 ex_intf_flag:1;
609 u8 sgi_en:1;
610 u8 rxsc:2;
611 u8 reserve:4;
612} __packed;
613
614struct tx_desc_88e {
615 u32 pktsize:16;
616 u32 offset:8;
617 u32 bmc:1;
618 u32 htc:1;
619 u32 lastseg:1;
620 u32 firstseg:1;
621 u32 linip:1;
622 u32 noacm:1;
623 u32 gf:1;
624 u32 own:1;
625
626 u32 macid:6;
627 u32 rsvd0:2;
628 u32 queuesel:5;
629 u32 rd_nav_ext:1;
630 u32 lsig_txop_en:1;
631 u32 pifs:1;
632 u32 rateid:4;
633 u32 nav_usehdr:1;
634 u32 en_descid:1;
635 u32 sectype:2;
636 u32 pktoffset:8;
637
638 u32 rts_rc:6;
639 u32 data_rc:6;
640 u32 agg_en:1;
641 u32 rdg_en:1;
642 u32 bar_retryht:2;
643 u32 agg_break:1;
644 u32 morefrag:1;
645 u32 raw:1;
646 u32 ccx:1;
647 u32 ampdudensity:3;
648 u32 bt_int:1;
649 u32 ant_sela:1;
650 u32 ant_selb:1;
651 u32 txant_cck:2;
652 u32 txant_l:2;
653 u32 txant_ht:2;
654
655 u32 nextheadpage:8;
656 u32 tailpage:8;
657 u32 seq:12;
658 u32 cpu_handle:1;
659 u32 tag1:1;
660 u32 trigger_int:1;
661 u32 hwseq_en:1;
662
663 u32 rtsrate:5;
664 u32 apdcfe:1;
665 u32 qos:1;
666 u32 hwseq_ssn:1;
667 u32 userrate:1;
668 u32 dis_rtsfb:1;
669 u32 dis_datafb:1;
670 u32 cts2self:1;
671 u32 rts_en:1;
672 u32 hwrts_en:1;
673 u32 portid:1;
674 u32 pwr_status:3;
675 u32 waitdcts:1;
676 u32 cts2ap_en:1;
677 u32 txsc:2;
678 u32 stbc:2;
679 u32 txshort:1;
680 u32 txbw:1;
681 u32 rtsshort:1;
682 u32 rtsbw:1;
683 u32 rtssc:2;
684 u32 rtsstbc:2;
685
686 u32 txrate:6;
687 u32 shortgi:1;
688 u32 ccxt:1;
689 u32 txrate_fb_lmt:5;
690 u32 rtsrate_fb_lmt:4;
691 u32 retrylmt_en:1;
692 u32 txretrylmt:6;
693 u32 usb_txaggnum:8;
694
695 u32 txagca:5;
696 u32 txagcb:5;
697 u32 usemaxlen:1;
698 u32 maxaggnum:5;
699 u32 mcsg1maxlen:4;
700 u32 mcsg2maxlen:4;
701 u32 mcsg3maxlen:4;
702 u32 mcs7sgimaxlen:4;
703
704 u32 txbuffersize:16;
705 u32 sw_offset30:8;
706 u32 sw_offset31:4;
707 u32 rsvd1:1;
708 u32 antsel_c:1;
709 u32 null_0:1;
710 u32 null_1:1;
711
712 u32 txbuffaddr;
713 u32 txbufferaddr64;
714 u32 nextdescaddress;
715 u32 nextdescaddress64;
716
717 u32 reserve_pass_pcie_mm_limit[4];
718} __packed;
719
720struct rx_desc_88e {
721 u32 length:14;
722 u32 crc32:1;
723 u32 icverror:1;
724 u32 drv_infosize:4;
725 u32 security:3;
726 u32 qos:1;
727 u32 shift:2;
728 u32 phystatus:1;
729 u32 swdec:1;
730 u32 lastseg:1;
731 u32 firstseg:1;
732 u32 eor:1;
733 u32 own:1;
734
735 u32 macid:6;
736 u32 tid:4;
737 u32 hwrsvd:5;
738 u32 paggr:1;
739 u32 faggr:1;
740 u32 a1_fit:4;
741 u32 a2_fit:4;
742 u32 pam:1;
743 u32 pwr:1;
744 u32 moredata:1;
745 u32 morefrag:1;
746 u32 type:2;
747 u32 mc:1;
748 u32 bc:1;
749
750 u32 seq:12;
751 u32 frag:4;
752 u32 nextpktlen:14;
753 u32 nextind:1;
754 u32 rsvd:1;
755
756 u32 rxmcs:6;
757 u32 rxht:1;
758 u32 amsdu:1;
759 u32 splcp:1;
760 u32 bandwidth:1;
761 u32 htc:1;
762 u32 tcpchk_rpt:1;
763 u32 ipcchk_rpt:1;
764 u32 tcpchk_valid:1;
765 u32 hwpcerr:1;
766 u32 hwpcind:1;
767 u32 iv0:16;
768
769 u32 iv1;
770
771 u32 tsfl;
772
773 u32 bufferaddress;
774 u32 bufferaddress64;
775
776} __packed;
777
778void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
779 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
780 struct ieee80211_tx_info *info,
781 struct ieee80211_sta *sta,
782 struct sk_buff *skb,
783 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
784bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
785 struct rtl_stats *status,
786 struct ieee80211_rx_status *rx_status,
787 u8 *pdesc, struct sk_buff *skb);
788void rtl88ee_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
789u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name);
790void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
791void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
792 bool b_firstseg, bool b_lastseg,
793 struct sk_buff *skb);
794
795#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index b793a659a465..d2d57a27a7c1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -174,8 +174,8 @@ static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
174 dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; 174 dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
175 dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; 175 dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
176 dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; 176 dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
177 dm_digtable->rx_gain_range_max = DM_DIG_MAX; 177 dm_digtable->rx_gain_max = DM_DIG_MAX;
178 dm_digtable->rx_gain_range_min = DM_DIG_MIN; 178 dm_digtable->rx_gain_min = DM_DIG_MIN;
179 dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT; 179 dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
180 dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX; 180 dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
181 dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN; 181 dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
@@ -300,11 +300,11 @@ static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
300 } 300 }
301 301
302 if ((digtable->rssi_val_min + 10 - digtable->back_val) > 302 if ((digtable->rssi_val_min + 10 - digtable->back_val) >
303 digtable->rx_gain_range_max) 303 digtable->rx_gain_max)
304 digtable->cur_igvalue = digtable->rx_gain_range_max; 304 digtable->cur_igvalue = digtable->rx_gain_max;
305 else if ((digtable->rssi_val_min + 10 - 305 else if ((digtable->rssi_val_min + 10 -
306 digtable->back_val) < digtable->rx_gain_range_min) 306 digtable->back_val) < digtable->rx_gain_min)
307 digtable->cur_igvalue = digtable->rx_gain_range_min; 307 digtable->cur_igvalue = digtable->rx_gain_min;
308 else 308 else
309 digtable->cur_igvalue = digtable->rssi_val_min + 10 - 309 digtable->cur_igvalue = digtable->rssi_val_min + 10 -
310 digtable->back_val; 310 digtable->back_val;
@@ -669,7 +669,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
669 u8 thermalvalue, delta, delta_lck, delta_iqk; 669 u8 thermalvalue, delta, delta_lck, delta_iqk;
670 long ele_a, ele_d, temp_cck, val_x, value32; 670 long ele_a, ele_d, temp_cck, val_x, value32;
671 long val_y, ele_c = 0; 671 long val_y, ele_c = 0;
672 u8 ofdm_index[2], ofdm_index_old[2], cck_index_old = 0; 672 u8 ofdm_index[2], ofdm_index_old[2] = {0, 0}, cck_index_old = 0;
673 s8 cck_index = 0; 673 s8 cck_index = 0;
674 int i; 674 int i;
675 bool is2t = IS_92C_SERIAL(rtlhal->version); 675 bool is2t = IS_92C_SERIAL(rtlhal->version);
@@ -717,7 +717,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
717 for (i = 0; i < OFDM_TABLE_LENGTH; i++) { 717 for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
718 if (ele_d == (ofdmswing_table[i] & 718 if (ele_d == (ofdmswing_table[i] &
719 MASKOFDM_D)) { 719 MASKOFDM_D)) {
720 720 ofdm_index_old[1] = (u8) i;
721 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, 721 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
722 DBG_LOUD, 722 DBG_LOUD,
723 "Initial pathB ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n", 723 "Initial pathB ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
@@ -1147,75 +1147,6 @@ void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
1147} 1147}
1148EXPORT_SYMBOL(rtl92c_dm_init_rate_adaptive_mask); 1148EXPORT_SYMBOL(rtl92c_dm_init_rate_adaptive_mask);
1149 1149
1150static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1151{
1152 struct rtl_priv *rtlpriv = rtl_priv(hw);
1153 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1154 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1155 struct rate_adaptive *p_ra = &(rtlpriv->ra);
1156 u32 low_rssi_thresh, high_rssi_thresh;
1157 struct ieee80211_sta *sta = NULL;
1158
1159 if (is_hal_stop(rtlhal)) {
1160 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1161 "<---- driver is going to unload\n");
1162 return;
1163 }
1164
1165 if (!rtlpriv->dm.useramask) {
1166 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1167 "<---- driver does not control rate adaptive mask\n");
1168 return;
1169 }
1170
1171 if (mac->link_state == MAC80211_LINKED &&
1172 mac->opmode == NL80211_IFTYPE_STATION) {
1173 switch (p_ra->pre_ratr_state) {
1174 case DM_RATR_STA_HIGH:
1175 high_rssi_thresh = 50;
1176 low_rssi_thresh = 20;
1177 break;
1178 case DM_RATR_STA_MIDDLE:
1179 high_rssi_thresh = 55;
1180 low_rssi_thresh = 20;
1181 break;
1182 case DM_RATR_STA_LOW:
1183 high_rssi_thresh = 50;
1184 low_rssi_thresh = 25;
1185 break;
1186 default:
1187 high_rssi_thresh = 50;
1188 low_rssi_thresh = 20;
1189 break;
1190 }
1191
1192 if (rtlpriv->dm.undec_sm_pwdb > (long)high_rssi_thresh)
1193 p_ra->ratr_state = DM_RATR_STA_HIGH;
1194 else if (rtlpriv->dm.undec_sm_pwdb > (long)low_rssi_thresh)
1195 p_ra->ratr_state = DM_RATR_STA_MIDDLE;
1196 else
1197 p_ra->ratr_state = DM_RATR_STA_LOW;
1198
1199 if (p_ra->pre_ratr_state != p_ra->ratr_state) {
1200 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI = %ld\n",
1201 rtlpriv->dm.undec_sm_pwdb);
1202 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1203 "RSSI_LEVEL = %d\n", p_ra->ratr_state);
1204 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1205 "PreState = %d, CurState = %d\n",
1206 p_ra->pre_ratr_state, p_ra->ratr_state);
1207
1208 rcu_read_lock();
1209 sta = ieee80211_find_sta(mac->vif, mac->bssid);
1210 rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
1211 p_ra->ratr_state);
1212
1213 p_ra->pre_ratr_state = p_ra->ratr_state;
1214 rcu_read_unlock();
1215 }
1216 }
1217}
1218
1219static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw) 1150static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1220{ 1151{
1221 struct rtl_priv *rtlpriv = rtl_priv(hw); 1152 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1437,6 +1368,9 @@ void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
1437 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, 1368 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
1438 (u8 *) (&fw_ps_awake)); 1369 (u8 *) (&fw_ps_awake));
1439 1370
1371 if (ppsc->p2p_ps_info.p2p_ps_mode)
1372 fw_ps_awake = false;
1373
1440 if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) && 1374 if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) &&
1441 fw_ps_awake) 1375 fw_ps_awake)
1442 && (!ppsc->rfchange_inprogress)) { 1376 && (!ppsc->rfchange_inprogress)) {
@@ -1446,7 +1380,7 @@ void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
1446 rtl92c_dm_dynamic_bb_powersaving(hw); 1380 rtl92c_dm_dynamic_bb_powersaving(hw);
1447 rtl92c_dm_dynamic_txpower(hw); 1381 rtl92c_dm_dynamic_txpower(hw);
1448 rtl92c_dm_check_txpower_tracking(hw); 1382 rtl92c_dm_check_txpower_tracking(hw);
1449 rtl92c_dm_refresh_rate_adaptive_mask(hw); 1383 /* rtl92c_dm_refresh_rate_adaptive_mask(hw); */
1450 rtl92c_dm_bt_coexist(hw); 1384 rtl92c_dm_bt_coexist(hw);
1451 rtl92c_dm_check_edca_turbo(hw); 1385 rtl92c_dm_check_edca_turbo(hw);
1452 } 1386 }
@@ -1651,7 +1585,7 @@ static void rtl92c_bt_set_normal(struct ieee80211_hw *hw)
1651 } 1585 }
1652} 1586}
1653 1587
1654static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw) 1588static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte)
1655{ 1589{
1656 struct rtl_priv *rtlpriv = rtl_priv(hw); 1590 struct rtl_priv *rtlpriv = rtl_priv(hw);
1657 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); 1591 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
@@ -1673,9 +1607,9 @@ static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw)
1673 BT_RSSI_STATE_SPECIAL_LOW)) { 1607 BT_RSSI_STATE_SPECIAL_LOW)) {
1674 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0); 1608 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
1675 } else if (rtlpcipriv->bt_coexist.bt_service == BT_PAN) { 1609 } else if (rtlpcipriv->bt_coexist.bt_service == BT_PAN) {
1676 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0x00); 1610 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
1677 } else { 1611 } else {
1678 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0x00); 1612 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
1679 } 1613 }
1680 } 1614 }
1681 1615
@@ -1726,12 +1660,17 @@ static void rtl92c_check_bt_change(struct ieee80211_hw *hw)
1726{ 1660{
1727 struct rtl_priv *rtlpriv = rtl_priv(hw); 1661 struct rtl_priv *rtlpriv = rtl_priv(hw);
1728 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); 1662 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1663 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1664 u8 tmp1byte = 0;
1729 1665
1666 if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version) &&
1667 rtlpcipriv->bt_coexist.bt_coexistence)
1668 tmp1byte |= BIT(5);
1730 if (rtlpcipriv->bt_coexist.bt_cur_state) { 1669 if (rtlpcipriv->bt_coexist.bt_cur_state) {
1731 if (rtlpcipriv->bt_coexist.bt_ant_isolation) 1670 if (rtlpcipriv->bt_coexist.bt_ant_isolation)
1732 rtl92c_bt_ant_isolation(hw); 1671 rtl92c_bt_ant_isolation(hw, tmp1byte);
1733 } else { 1672 } else {
1734 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0x00); 1673 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
1735 rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0, 1674 rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0,
1736 rtlpcipriv->bt_coexist.bt_rfreg_origin_1e); 1675 rtlpcipriv->bt_coexist.bt_rfreg_origin_1e);
1737 1676
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 883f23ae9519..04a41628ceed 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -552,7 +552,9 @@ void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
552 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode); 552 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
553 553
554 SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode); 554 SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
555 SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1); 555 SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
556 (rtlpriv->mac80211.p2p) ?
557 ppsc->smart_ps : 1);
556 SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode, 558 SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
557 ppsc->reg_max_lps_awakeintvl); 559 ppsc->reg_max_lps_awakeintvl);
558 560
@@ -808,3 +810,98 @@ void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
808 rtl92c_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm); 810 rtl92c_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
809} 811}
810EXPORT_SYMBOL(rtl92c_set_fw_joinbss_report_cmd); 812EXPORT_SYMBOL(rtl92c_set_fw_joinbss_report_cmd);
813
814static void rtl92c_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw, u8 ctwindow)
815{
816 u8 u1_ctwindow_period[1] = {ctwindow};
817
818 rtl92c_fill_h2c_cmd(hw, H2C_P2P_PS_CTW_CMD, 1, u1_ctwindow_period);
819}
820
821void rtl92c_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
822{
823 struct rtl_priv *rtlpriv = rtl_priv(hw);
824 struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
825 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
826 struct rtl_p2p_ps_info *p2pinfo = &(rtlps->p2p_ps_info);
827 struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload;
828 u8 i;
829 u16 ctwindow;
830 u32 start_time, tsf_low;
831
832 switch (p2p_ps_state) {
833 case P2P_PS_DISABLE:
834 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
835 memset(p2p_ps_offload, 0, sizeof(struct p2p_ps_offload_t));
836 break;
837 case P2P_PS_ENABLE:
838 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
839 /* update CTWindow value. */
840 if (p2pinfo->ctwindow > 0) {
841 p2p_ps_offload->ctwindow_en = 1;
842 ctwindow = p2pinfo->ctwindow;
843 rtl92c_set_p2p_ctw_period_cmd(hw, ctwindow);
844 }
845 /* hw only support 2 set of NoA */
846 for (i = 0; i < p2pinfo->noa_num; i++) {
847 /* To control the register setting for which NOA*/
848 rtl_write_byte(rtlpriv, 0x5cf, (i << 4));
849 if (i == 0)
850 p2p_ps_offload->noa0_en = 1;
851 else
852 p2p_ps_offload->noa1_en = 1;
853
854 /* config P2P NoA Descriptor Register */
855 rtl_write_dword(rtlpriv, 0x5E0,
856 p2pinfo->noa_duration[i]);
857 rtl_write_dword(rtlpriv, 0x5E4,
858 p2pinfo->noa_interval[i]);
859
860 /*Get Current TSF value */
861 tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
862
863 start_time = p2pinfo->noa_start_time[i];
864 if (p2pinfo->noa_count_type[i] != 1) {
865 while (start_time <= (tsf_low+(50*1024))) {
866 start_time += p2pinfo->noa_interval[i];
867 if (p2pinfo->noa_count_type[i] != 255)
868 p2pinfo->noa_count_type[i]--;
869 }
870 }
871 rtl_write_dword(rtlpriv, 0x5E8, start_time);
872 rtl_write_dword(rtlpriv, 0x5EC,
873 p2pinfo->noa_count_type[i]);
874 }
875
876 if ((p2pinfo->opp_ps == 1) || (p2pinfo->noa_num > 0)) {
877 /* rst p2p circuit */
878 rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4));
879
880 p2p_ps_offload->offload_en = 1;
881
882 if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) {
883 p2p_ps_offload->role = 1;
884 p2p_ps_offload->allstasleep = 0;
885 } else {
886 p2p_ps_offload->role = 0;
887 }
888
889 p2p_ps_offload->discovery = 0;
890 }
891 break;
892 case P2P_PS_SCAN:
893 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
894 p2p_ps_offload->discovery = 1;
895 break;
896 case P2P_PS_SCAN_DONE:
897 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
898 p2p_ps_offload->discovery = 0;
899 p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
900 break;
901 default:
902 break;
903 }
904
905 rtl92c_fill_h2c_cmd(hw, H2C_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload);
906}
907EXPORT_SYMBOL_GPL(rtl92c_set_p2p_ps_offload_cmd);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index 780ea5b1e24c..15b2055e6212 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -67,6 +67,9 @@ enum rtl8192c_h2c_cmd {
67 H2C_RSVDPAGE = 3, 67 H2C_RSVDPAGE = 3,
68 H2C_RSSI_REPORT = 5, 68 H2C_RSSI_REPORT = 5,
69 H2C_RA_MASK = 6, 69 H2C_RA_MASK = 6,
70 H2C_MACID_PS_MODE = 7,
71 H2C_P2P_PS_OFFLOAD = 8,
72 H2C_P2P_PS_CTW_CMD = 32,
70 MAX_H2CCMD 73 MAX_H2CCMD
71}; 74};
72 75
@@ -95,5 +98,6 @@ void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
95void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); 98void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
96void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); 99void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
97void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len); 100void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len);
101void rtl92c_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
98 102
99#endif 103#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 1b65db7fd651..a82b30a1996c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -475,6 +475,9 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
475 475
476 break; 476 break;
477 } 477 }
478 case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
479 rtl92c_set_p2p_ps_offload_cmd(hw, (*(u8 *)val));
480 break;
478 case HW_VAR_AID:{ 481 case HW_VAR_AID:{
479 u16 u2btmp; 482 u16 u2btmp;
480 u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); 483 u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
@@ -505,6 +508,40 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
505 break; 508 break;
506 509
507 } 510 }
511 case HW_VAR_FW_LPS_ACTION: {
512 bool enter_fwlps = *((bool *)val);
513 u8 rpwm_val, fw_pwrmode;
514 bool fw_current_inps;
515
516 if (enter_fwlps) {
517 rpwm_val = 0x02; /* RF off */
518 fw_current_inps = true;
519 rtlpriv->cfg->ops->set_hw_reg(hw,
520 HW_VAR_FW_PSMODE_STATUS,
521 (u8 *)(&fw_current_inps));
522 rtlpriv->cfg->ops->set_hw_reg(hw,
523 HW_VAR_H2C_FW_PWRMODE,
524 (u8 *)(&ppsc->fwctrl_psmode));
525
526 rtlpriv->cfg->ops->set_hw_reg(hw,
527 HW_VAR_SET_RPWM,
528 (u8 *)(&rpwm_val));
529 } else {
530 rpwm_val = 0x0C; /* RF on */
531 fw_pwrmode = FW_PS_ACTIVE_MODE;
532 fw_current_inps = false;
533 rtlpriv->cfg->ops->set_hw_reg(hw,
534 HW_VAR_SET_RPWM,
535 (u8 *)(&rpwm_val));
536 rtlpriv->cfg->ops->set_hw_reg(hw,
537 HW_VAR_H2C_FW_PWRMODE,
538 (u8 *)(&fw_pwrmode));
539
540 rtlpriv->cfg->ops->set_hw_reg(hw,
541 HW_VAR_FW_PSMODE_STATUS,
542 (u8 *)(&fw_current_inps));
543 }
544 break; }
508 default: 545 default:
509 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 546 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
510 "switch case not processed\n"); 547 "switch case not processed\n");
@@ -1105,7 +1142,8 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
1105 type == NL80211_IFTYPE_STATION) { 1142 type == NL80211_IFTYPE_STATION) {
1106 _rtl92ce_stop_tx_beacon(hw); 1143 _rtl92ce_stop_tx_beacon(hw);
1107 _rtl92ce_enable_bcn_sub_func(hw); 1144 _rtl92ce_enable_bcn_sub_func(hw);
1108 } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) { 1145 } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP ||
1146 type == NL80211_IFTYPE_MESH_POINT) {
1109 _rtl92ce_resume_tx_beacon(hw); 1147 _rtl92ce_resume_tx_beacon(hw);
1110 _rtl92ce_disable_bcn_sub_func(hw); 1148 _rtl92ce_disable_bcn_sub_func(hw);
1111 } else { 1149 } else {
@@ -1137,6 +1175,11 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
1137 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 1175 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1138 "Set Network type to AP!\n"); 1176 "Set Network type to AP!\n");
1139 break; 1177 break;
1178 case NL80211_IFTYPE_MESH_POINT:
1179 bt_msr |= MSR_ADHOC;
1180 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1181 "Set Network type to Mesh Point!\n");
1182 break;
1140 default: 1183 default:
1141 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 1184 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1142 "Network type %d not supported!\n", type); 1185 "Network type %d not supported!\n", type);
@@ -1184,7 +1227,8 @@ int rtl92ce_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
1184 return -EOPNOTSUPP; 1227 return -EOPNOTSUPP;
1185 1228
1186 if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { 1229 if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
1187 if (type != NL80211_IFTYPE_AP) 1230 if (type != NL80211_IFTYPE_AP &&
1231 type != NL80211_IFTYPE_MESH_POINT)
1188 rtl92ce_set_check_bssid(hw, true); 1232 rtl92ce_set_check_bssid(hw, true);
1189 } else { 1233 } else {
1190 rtl92ce_set_check_bssid(hw, false); 1234 rtl92ce_set_check_bssid(hw, false);
@@ -1459,7 +1503,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1459 } 1503 }
1460 1504
1461 for (i = 0; i < 14; i++) { 1505 for (i = 0; i < 14; i++) {
1462 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1506 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1463 "RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = [0x%x / 0x%x / 0x%x]\n", 1507 "RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = [0x%x / 0x%x / 0x%x]\n",
1464 rf_path, i, 1508 rf_path, i,
1465 rtlefuse->txpwrlevel_cck[rf_path][i], 1509 rtlefuse->txpwrlevel_cck[rf_path][i],
@@ -1500,11 +1544,11 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1500 & 0xf0) >> 4); 1544 & 0xf0) >> 4);
1501 } 1545 }
1502 1546
1503 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1547 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1504 "RF-%d pwrgroup_ht20[%d] = 0x%x\n", 1548 "RF-%d pwrgroup_ht20[%d] = 0x%x\n",
1505 rf_path, i, 1549 rf_path, i,
1506 rtlefuse->pwrgroup_ht20[rf_path][i]); 1550 rtlefuse->pwrgroup_ht20[rf_path][i]);
1507 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1551 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1508 "RF-%d pwrgroup_ht40[%d] = 0x%x\n", 1552 "RF-%d pwrgroup_ht40[%d] = 0x%x\n",
1509 rf_path, i, 1553 rf_path, i,
1510 rtlefuse->pwrgroup_ht40[rf_path][i]); 1554 rtlefuse->pwrgroup_ht40[rf_path][i]);
@@ -1545,19 +1589,19 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1545 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7]; 1589 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7];
1546 1590
1547 for (i = 0; i < 14; i++) 1591 for (i = 0; i < 14; i++)
1548 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1592 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1549 "RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", 1593 "RF-A Ht20 to HT40 Diff[%d] = 0x%x\n",
1550 i, rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]); 1594 i, rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]);
1551 for (i = 0; i < 14; i++) 1595 for (i = 0; i < 14; i++)
1552 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1596 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1553 "RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", 1597 "RF-A Legacy to Ht40 Diff[%d] = 0x%x\n",
1554 i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]); 1598 i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]);
1555 for (i = 0; i < 14; i++) 1599 for (i = 0; i < 14; i++)
1556 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1600 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1557 "RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", 1601 "RF-B Ht20 to HT40 Diff[%d] = 0x%x\n",
1558 i, rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]); 1602 i, rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]);
1559 for (i = 0; i < 14; i++) 1603 for (i = 0; i < 14; i++)
1560 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1604 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1561 "RF-B Legacy to HT40 Diff[%d] = 0x%x\n", 1605 "RF-B Legacy to HT40 Diff[%d] = 0x%x\n",
1562 i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]); 1606 i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]);
1563 1607
@@ -1565,7 +1609,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1565 rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7); 1609 rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7);
1566 else 1610 else
1567 rtlefuse->eeprom_regulatory = 0; 1611 rtlefuse->eeprom_regulatory = 0;
1568 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1612 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1569 "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory); 1613 "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
1570 1614
1571 if (!autoload_fail) { 1615 if (!autoload_fail) {
@@ -1575,7 +1619,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1575 rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI; 1619 rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI;
1576 rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI; 1620 rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI;
1577 } 1621 }
1578 RTPRINT(rtlpriv, FINIT, INIT_TxPower, "TSSI_A = 0x%x, TSSI_B = 0x%x\n", 1622 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "TSSI_A = 0x%x, TSSI_B = 0x%x\n",
1579 rtlefuse->eeprom_tssi[RF90_PATH_A], 1623 rtlefuse->eeprom_tssi[RF90_PATH_A],
1580 rtlefuse->eeprom_tssi[RF90_PATH_B]); 1624 rtlefuse->eeprom_tssi[RF90_PATH_B]);
1581 1625
@@ -1589,7 +1633,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1589 rtlefuse->apk_thermalmeterignore = true; 1633 rtlefuse->apk_thermalmeterignore = true;
1590 1634
1591 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter; 1635 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
1592 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1636 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1593 "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter); 1637 "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
1594} 1638}
1595 1639
@@ -1629,6 +1673,21 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
1629 if (rtlefuse->autoload_failflag) 1673 if (rtlefuse->autoload_failflag)
1630 return; 1674 return;
1631 1675
1676 rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
1677 rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
1678 rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
1679 rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
1680 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1681 "EEPROMId = 0x%4x\n", eeprom_id);
1682 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1683 "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
1684 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1685 "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
1686 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1687 "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
1688 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1689 "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
1690
1632 for (i = 0; i < 6; i += 2) { 1691 for (i = 0; i < 6; i += 2) {
1633 usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i]; 1692 usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
1634 *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue; 1693 *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
@@ -1766,6 +1825,9 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
1766 ratr_value = sta->supp_rates[1] << 4; 1825 ratr_value = sta->supp_rates[1] << 4;
1767 else 1826 else
1768 ratr_value = sta->supp_rates[0]; 1827 ratr_value = sta->supp_rates[0];
1828 if (mac->opmode == NL80211_IFTYPE_ADHOC)
1829 ratr_value = 0xfff;
1830
1769 ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | 1831 ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
1770 sta->ht_cap.mcs.rx_mask[0] << 12); 1832 sta->ht_cap.mcs.rx_mask[0] << 12);
1771 switch (wirelessmode) { 1833 switch (wirelessmode) {
@@ -1860,7 +1922,8 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
1860 1922
1861 sta_entry = (struct rtl_sta_info *) sta->drv_priv; 1923 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
1862 wirelessmode = sta_entry->wireless_mode; 1924 wirelessmode = sta_entry->wireless_mode;
1863 if (mac->opmode == NL80211_IFTYPE_STATION) 1925 if (mac->opmode == NL80211_IFTYPE_STATION ||
1926 mac->opmode == NL80211_IFTYPE_MESH_POINT)
1864 curtxbw_40mhz = mac->bw_40; 1927 curtxbw_40mhz = mac->bw_40;
1865 else if (mac->opmode == NL80211_IFTYPE_AP || 1928 else if (mac->opmode == NL80211_IFTYPE_AP ||
1866 mac->opmode == NL80211_IFTYPE_ADHOC) 1929 mac->opmode == NL80211_IFTYPE_ADHOC)
@@ -1870,6 +1933,8 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
1870 ratr_bitmap = sta->supp_rates[1] << 4; 1933 ratr_bitmap = sta->supp_rates[1] << 4;
1871 else 1934 else
1872 ratr_bitmap = sta->supp_rates[0]; 1935 ratr_bitmap = sta->supp_rates[0];
1936 if (mac->opmode == NL80211_IFTYPE_ADHOC)
1937 ratr_bitmap = 0xfff;
1873 ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | 1938 ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
1874 sta->ht_cap.mcs.rx_mask[0] << 12); 1939 sta->ht_cap.mcs.rx_mask[0] << 12);
1875 switch (wirelessmode) { 1940 switch (wirelessmode) {
@@ -2135,7 +2200,8 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
2135 macaddr = cam_const_broad; 2200 macaddr = cam_const_broad;
2136 entry_id = key_index; 2201 entry_id = key_index;
2137 } else { 2202 } else {
2138 if (mac->opmode == NL80211_IFTYPE_AP) { 2203 if (mac->opmode == NL80211_IFTYPE_AP ||
2204 mac->opmode == NL80211_IFTYPE_MESH_POINT) {
2139 entry_id = rtl_cam_get_free_entry(hw, 2205 entry_id = rtl_cam_get_free_entry(hw,
2140 p_macaddr); 2206 p_macaddr);
2141 if (entry_id >= TOTAL_CAM_ENTRY) { 2207 if (entry_id >= TOTAL_CAM_ENTRY) {
@@ -2157,7 +2223,8 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
2157 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 2223 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2158 "delete one entry, entry_id is %d\n", 2224 "delete one entry, entry_id is %d\n",
2159 entry_id); 2225 entry_id);
2160 if (mac->opmode == NL80211_IFTYPE_AP) 2226 if (mac->opmode == NL80211_IFTYPE_AP ||
2227 mac->opmode == NL80211_IFTYPE_MESH_POINT)
2161 rtl_cam_del_entry(hw, p_macaddr); 2228 rtl_cam_del_entry(hw, p_macaddr);
2162 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); 2229 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
2163 } else { 2230 } else {
@@ -2338,3 +2405,24 @@ void rtl92ce_suspend(struct ieee80211_hw *hw)
2338void rtl92ce_resume(struct ieee80211_hw *hw) 2405void rtl92ce_resume(struct ieee80211_hw *hw)
2339{ 2406{
2340} 2407}
2408
2409/* Turn on AAP (RCR:bit 0) for promicuous mode. */
2410void rtl92ce_allow_all_destaddr(struct ieee80211_hw *hw,
2411 bool allow_all_da, bool write_into_reg)
2412{
2413 struct rtl_priv *rtlpriv = rtl_priv(hw);
2414 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2415
2416 if (allow_all_da) {/* Set BIT0 */
2417 rtlpci->receive_config |= RCR_AAP;
2418 } else {/* Clear BIT0 */
2419 rtlpci->receive_config &= ~RCR_AAP;
2420 }
2421
2422 if (write_into_reg)
2423 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
2424
2425 RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
2426 "receive_config=0x%08X, write_into_reg=%d\n",
2427 rtlpci->receive_config, write_into_reg);
2428}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
index 52a3aea9b3de..2d063b0c7760 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
@@ -61,6 +61,8 @@ void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
61void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); 61void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
62void rtl92ce_update_hal_rate_tbl(struct ieee80211_hw *hw, 62void rtl92ce_update_hal_rate_tbl(struct ieee80211_hw *hw,
63 struct ieee80211_sta *sta, u8 rssi_level); 63 struct ieee80211_sta *sta, u8 rssi_level);
64void rtl92ce_update_hal_rate_tbl(struct ieee80211_hw *hw,
65 struct ieee80211_sta *sta, u8 rssi_level);
64void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw); 66void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw);
65bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid); 67bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
66void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw); 68void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw);
@@ -74,5 +76,7 @@ void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw);
74void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw); 76void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw);
75void rtl92ce_suspend(struct ieee80211_hw *hw); 77void rtl92ce_suspend(struct ieee80211_hw *hw);
76void rtl92ce_resume(struct ieee80211_hw *hw); 78void rtl92ce_resume(struct ieee80211_hw *hw);
79void rtl92ce_allow_all_destaddr(struct ieee80211_hw *hw,
80 bool allow_all_da, bool write_into_reg);
77 81
78#endif 82#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index e4d738f6166d..bd4aef74c056 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -544,6 +544,7 @@
544#define IMR_WLANOFF BIT(0) 544#define IMR_WLANOFF BIT(0)
545 545
546#define EFUSE_REAL_CONTENT_LEN 512 546#define EFUSE_REAL_CONTENT_LEN 512
547#define EFUSE_OOB_PROTECT_BYTES 15
547 548
548#define EEPROM_DEFAULT_TSSI 0x0 549#define EEPROM_DEFAULT_TSSI 0x0
549#define EEPROM_DEFAULT_TXPOWERDIFF 0x0 550#define EEPROM_DEFAULT_TXPOWERDIFF 0x0
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 49f663bd93ff..14203561b6ee 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -228,6 +228,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
228 .enable_hw_sec = rtl92ce_enable_hw_security_config, 228 .enable_hw_sec = rtl92ce_enable_hw_security_config,
229 .set_key = rtl92ce_set_key, 229 .set_key = rtl92ce_set_key,
230 .init_sw_leds = rtl92ce_init_sw_leds, 230 .init_sw_leds = rtl92ce_init_sw_leds,
231 .allow_all_destaddr = rtl92ce_allow_all_destaddr,
231 .get_bbreg = rtl92c_phy_query_bb_reg, 232 .get_bbreg = rtl92c_phy_query_bb_reg,
232 .set_bbreg = rtl92c_phy_set_bb_reg, 233 .set_bbreg = rtl92c_phy_set_bb_reg,
233 .set_rfreg = rtl92ce_phy_set_rf_reg, 234 .set_rfreg = rtl92ce_phy_set_rf_reg,
@@ -278,6 +279,7 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = {
278 .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE, 279 .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
279 .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION, 280 .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
280 .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN, 281 .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
282 .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES,
281 283
282 .maps[RWCAM] = REG_CAMCMD, 284 .maps[RWCAM] = REG_CAMCMD,
283 .maps[WCAMI] = REG_CAMWRITE, 285 .maps[WCAMI] = REG_CAMWRITE,
@@ -309,7 +311,7 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = {
309 311
310 .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW, 312 .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
311 .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT, 313 .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
312 .maps[RTL_IMR_BcnInt] = IMR_BCNINT, 314 .maps[RTL_IMR_BCNINT] = IMR_BCNINT,
313 .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW, 315 .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
314 .maps[RTL_IMR_RDU] = IMR_RDU, 316 .maps[RTL_IMR_RDU] = IMR_RDU,
315 .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND, 317 .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index b9b1a6e0b16e..6ad23b413eb3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -30,6 +30,7 @@
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../pci.h" 31#include "../pci.h"
32#include "../base.h" 32#include "../base.h"
33#include "../stats.h"
33#include "reg.h" 34#include "reg.h"
34#include "def.h" 35#include "def.h"
35#include "phy.h" 36#include "phy.h"
@@ -42,7 +43,7 @@ static u8 _rtl92ce_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
42 43
43 if (unlikely(ieee80211_is_beacon(fc))) 44 if (unlikely(ieee80211_is_beacon(fc)))
44 return QSLT_BEACON; 45 return QSLT_BEACON;
45 if (ieee80211_is_mgmt(fc)) 46 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
46 return QSLT_MGNT; 47 return QSLT_MGNT;
47 48
48 return skb->priority; 49 return skb->priority;
@@ -78,16 +79,6 @@ static u8 _rtl92c_evm_db_to_percentage(char value)
78 return ret_val; 79 return ret_val;
79} 80}
80 81
81static long _rtl92ce_translate_todbm(struct ieee80211_hw *hw,
82 u8 signal_strength_index)
83{
84 long signal_power;
85
86 signal_power = (long)((signal_strength_index + 1) >> 1);
87 signal_power -= 95;
88 return signal_power;
89}
90
91static long _rtl92ce_signal_scale_mapping(struct ieee80211_hw *hw, 82static long _rtl92ce_signal_scale_mapping(struct ieee80211_hw *hw,
92 long currsig) 83 long currsig)
93{ 84{
@@ -139,7 +130,6 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
139 pstats->packet_toself = packet_toself; 130 pstats->packet_toself = packet_toself;
140 pstats->is_cck = is_cck_rate; 131 pstats->is_cck = is_cck_rate;
141 pstats->packet_beacon = packet_beacon; 132 pstats->packet_beacon = packet_beacon;
142 pstats->is_cck = is_cck_rate;
143 pstats->rx_mimo_sig_qual[0] = -1; 133 pstats->rx_mimo_sig_qual[0] = -1;
144 pstats->rx_mimo_sig_qual[1] = -1; 134 pstats->rx_mimo_sig_qual[1] = -1;
145 135
@@ -192,10 +182,30 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
192 } 182 }
193 } 183 }
194 184
195 pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all); 185 pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
186 /* CCK gain is smaller than OFDM/MCS gain,
187 * so we add gain diff by experiences,
188 * the val is 6
189 */
190 pwdb_all += 6;
191 if (pwdb_all > 100)
192 pwdb_all = 100;
193 /* modify the offset to make the same
194 * gain index with OFDM.
195 */
196 if (pwdb_all > 34 && pwdb_all <= 42)
197 pwdb_all -= 2;
198 else if (pwdb_all > 26 && pwdb_all <= 34)
199 pwdb_all -= 6;
200 else if (pwdb_all > 14 && pwdb_all <= 26)
201 pwdb_all -= 8;
202 else if (pwdb_all > 4 && pwdb_all <= 14)
203 pwdb_all -= 4;
204
196 pstats->rx_pwdb_all = pwdb_all; 205 pstats->rx_pwdb_all = pwdb_all;
197 pstats->recvsignalpower = rx_pwr_all; 206 pstats->recvsignalpower = rx_pwr_all;
198 207
208 /* (3) Get Signal Quality (EVM) */
199 if (packet_match_bssid) { 209 if (packet_match_bssid) {
200 u8 sq; 210 u8 sq;
201 if (pstats->rx_pwdb_all > 40) 211 if (pstats->rx_pwdb_all > 40)
@@ -217,29 +227,38 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
217 } else { 227 } else {
218 rtlpriv->dm.rfpath_rxenable[0] = 228 rtlpriv->dm.rfpath_rxenable[0] =
219 rtlpriv->dm.rfpath_rxenable[1] = true; 229 rtlpriv->dm.rfpath_rxenable[1] = true;
230 /* (1)Get RSSI for HT rate */
220 for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) { 231 for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
232 /* we will judge RF RX path now. */
221 if (rtlpriv->dm.rfpath_rxenable[i]) 233 if (rtlpriv->dm.rfpath_rxenable[i])
222 rf_rx_num++; 234 rf_rx_num++;
223 235
224 rx_pwr[i] = 236 rx_pwr[i] =
225 ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110; 237 ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
238 /* Translate DBM to percentage. */
226 rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]); 239 rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
227 total_rssi += rssi; 240 total_rssi += rssi;
241 /* Get Rx snr value in DB */
228 rtlpriv->stats.rx_snr_db[i] = 242 rtlpriv->stats.rx_snr_db[i] =
229 (long)(p_drvinfo->rxsnr[i] / 2); 243 (long)(p_drvinfo->rxsnr[i] / 2);
230 244
245 /* Record Signal Strength for next packet */
231 if (packet_match_bssid) 246 if (packet_match_bssid)
232 pstats->rx_mimo_signalstrength[i] = (u8) rssi; 247 pstats->rx_mimo_signalstrength[i] = (u8) rssi;
233 } 248 }
234 249
250 /* (2)PWDB, Average PWDB cacluated by
251 * hardware (for rate adaptive)
252 */
235 rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110; 253 rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
236 pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all); 254 pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
237 pstats->rx_pwdb_all = pwdb_all; 255 pstats->rx_pwdb_all = pwdb_all;
238 pstats->rxpower = rx_pwr_all; 256 pstats->rxpower = rx_pwr_all;
239 pstats->recvsignalpower = rx_pwr_all; 257 pstats->recvsignalpower = rx_pwr_all;
240 258
241 if (pdesc->rxht && pdesc->rxmcs >= DESC92_RATEMCS8 && 259 /* (3)EVM of HT rate */
242 pdesc->rxmcs <= DESC92_RATEMCS15) 260 if (pstats->is_ht && pstats->rate >= DESC92_RATEMCS8 &&
261 pstats->rate <= DESC92_RATEMCS15)
243 max_spatial_stream = 2; 262 max_spatial_stream = 2;
244 else 263 else
245 max_spatial_stream = 1; 264 max_spatial_stream = 1;
@@ -248,6 +267,9 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
248 evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]); 267 evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
249 268
250 if (packet_match_bssid) { 269 if (packet_match_bssid) {
270 /* Fill value in RFD, Get the first
271 * spatial stream only
272 */
251 if (i == 0) 273 if (i == 0)
252 pstats->signalquality = 274 pstats->signalquality =
253 (u8) (evm & 0xff); 275 (u8) (evm & 0xff);
@@ -256,6 +278,9 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
256 } 278 }
257 } 279 }
258 280
281 /* UI BSS List signal strength(in percentage),
282 * make it good looking, from 0~100.
283 */
259 if (is_cck_rate) 284 if (is_cck_rate)
260 pstats->signalstrength = 285 pstats->signalstrength =
261 (u8) (_rtl92ce_signal_scale_mapping(hw, pwdb_all)); 286 (u8) (_rtl92ce_signal_scale_mapping(hw, pwdb_all));
@@ -265,215 +290,6 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
265 (hw, total_rssi /= rf_rx_num)); 290 (hw, total_rssi /= rf_rx_num));
266} 291}
267 292
268static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw,
269 struct rtl_stats *pstats)
270{
271 struct rtl_priv *rtlpriv = rtl_priv(hw);
272 struct rtl_phy *rtlphy = &(rtlpriv->phy);
273 u8 rfpath;
274 u32 last_rssi, tmpval;
275
276 if (pstats->packet_toself || pstats->packet_beacon) {
277 rtlpriv->stats.rssi_calculate_cnt++;
278
279 if (rtlpriv->stats.ui_rssi.total_num++ >=
280 PHY_RSSI_SLID_WIN_MAX) {
281
282 rtlpriv->stats.ui_rssi.total_num =
283 PHY_RSSI_SLID_WIN_MAX;
284 last_rssi =
285 rtlpriv->stats.ui_rssi.elements[rtlpriv->
286 stats.ui_rssi.index];
287 rtlpriv->stats.ui_rssi.total_val -= last_rssi;
288 }
289
290 rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
291 rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
292 index++] =
293 pstats->signalstrength;
294
295 if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
296 rtlpriv->stats.ui_rssi.index = 0;
297
298 tmpval = rtlpriv->stats.ui_rssi.total_val /
299 rtlpriv->stats.ui_rssi.total_num;
300 rtlpriv->stats.signal_strength =
301 _rtl92ce_translate_todbm(hw, (u8) tmpval);
302 pstats->rssi = rtlpriv->stats.signal_strength;
303 }
304
305 if (!pstats->is_cck && pstats->packet_toself) {
306 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
307 rfpath++) {
308 if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
309 rtlpriv->stats.rx_rssi_percentage[rfpath] =
310 pstats->rx_mimo_signalstrength[rfpath];
311
312 }
313
314 if (pstats->rx_mimo_signalstrength[rfpath] >
315 rtlpriv->stats.rx_rssi_percentage[rfpath]) {
316 rtlpriv->stats.rx_rssi_percentage[rfpath] =
317 ((rtlpriv->stats.
318 rx_rssi_percentage[rfpath] *
319 (RX_SMOOTH_FACTOR - 1)) +
320 (pstats->rx_mimo_signalstrength[rfpath])) /
321 (RX_SMOOTH_FACTOR);
322
323 rtlpriv->stats.rx_rssi_percentage[rfpath] =
324 rtlpriv->stats.rx_rssi_percentage[rfpath] +
325 1;
326 } else {
327 rtlpriv->stats.rx_rssi_percentage[rfpath] =
328 ((rtlpriv->stats.
329 rx_rssi_percentage[rfpath] *
330 (RX_SMOOTH_FACTOR - 1)) +
331 (pstats->rx_mimo_signalstrength[rfpath])) /
332 (RX_SMOOTH_FACTOR);
333 }
334
335 }
336 }
337}
338
339static void _rtl92ce_update_rxsignalstatistics(struct ieee80211_hw *hw,
340 struct rtl_stats *pstats)
341{
342 struct rtl_priv *rtlpriv = rtl_priv(hw);
343 int weighting = 0;
344
345 if (rtlpriv->stats.recv_signal_power == 0)
346 rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
347
348 if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
349 weighting = 5;
350
351 else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
352 weighting = (-5);
353
354 rtlpriv->stats.recv_signal_power =
355 (rtlpriv->stats.recv_signal_power * 5 +
356 pstats->recvsignalpower + weighting) / 6;
357}
358
359static void _rtl92ce_process_pwdb(struct ieee80211_hw *hw,
360 struct rtl_stats *pstats)
361{
362 struct rtl_priv *rtlpriv = rtl_priv(hw);
363 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
364 long undec_sm_pwdb;
365
366 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
367 return;
368 } else {
369 undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
370 }
371
372 if (pstats->packet_toself || pstats->packet_beacon) {
373 if (undec_sm_pwdb < 0)
374 undec_sm_pwdb = pstats->rx_pwdb_all;
375
376 if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
377 undec_sm_pwdb = (((undec_sm_pwdb) *
378 (RX_SMOOTH_FACTOR - 1)) +
379 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
380
381 undec_sm_pwdb += 1;
382 } else {
383 undec_sm_pwdb = (((undec_sm_pwdb) *
384 (RX_SMOOTH_FACTOR - 1)) +
385 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
386 }
387
388 rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
389 _rtl92ce_update_rxsignalstatistics(hw, pstats);
390 }
391}
392
393static void _rtl92ce_process_ui_link_quality(struct ieee80211_hw *hw,
394 struct rtl_stats *pstats)
395{
396 struct rtl_priv *rtlpriv = rtl_priv(hw);
397 u32 last_evm, n_spatialstream, tmpval;
398
399 if (pstats->signalquality != 0) {
400 if (pstats->packet_toself || pstats->packet_beacon) {
401
402 if (rtlpriv->stats.ui_link_quality.total_num++ >=
403 PHY_LINKQUALITY_SLID_WIN_MAX) {
404 rtlpriv->stats.ui_link_quality.total_num =
405 PHY_LINKQUALITY_SLID_WIN_MAX;
406 last_evm =
407 rtlpriv->stats.
408 ui_link_quality.elements[rtlpriv->
409 stats.ui_link_quality.
410 index];
411 rtlpriv->stats.ui_link_quality.total_val -=
412 last_evm;
413 }
414
415 rtlpriv->stats.ui_link_quality.total_val +=
416 pstats->signalquality;
417 rtlpriv->stats.ui_link_quality.elements[rtlpriv->stats.
418 ui_link_quality.
419 index++] =
420 pstats->signalquality;
421
422 if (rtlpriv->stats.ui_link_quality.index >=
423 PHY_LINKQUALITY_SLID_WIN_MAX)
424 rtlpriv->stats.ui_link_quality.index = 0;
425
426 tmpval = rtlpriv->stats.ui_link_quality.total_val /
427 rtlpriv->stats.ui_link_quality.total_num;
428 rtlpriv->stats.signal_quality = tmpval;
429
430 rtlpriv->stats.last_sigstrength_inpercent = tmpval;
431
432 for (n_spatialstream = 0; n_spatialstream < 2;
433 n_spatialstream++) {
434 if (pstats->
435 rx_mimo_sig_qual[n_spatialstream] != -1) {
436 if (rtlpriv->stats.
437 rx_evm_percentage[n_spatialstream]
438 == 0) {
439 rtlpriv->stats.
440 rx_evm_percentage
441 [n_spatialstream] =
442 pstats->rx_mimo_sig_qual
443 [n_spatialstream];
444 }
445
446 rtlpriv->stats.
447 rx_evm_percentage[n_spatialstream] =
448 ((rtlpriv->
449 stats.rx_evm_percentage
450 [n_spatialstream] *
451 (RX_SMOOTH_FACTOR - 1)) +
452 (pstats->rx_mimo_sig_qual
453 [n_spatialstream] * 1)) /
454 (RX_SMOOTH_FACTOR);
455 }
456 }
457 }
458 } else {
459 ;
460 }
461}
462
463static void _rtl92ce_process_phyinfo(struct ieee80211_hw *hw,
464 u8 *buffer,
465 struct rtl_stats *pcurrent_stats)
466{
467
468 if (!pcurrent_stats->packet_matchbssid &&
469 !pcurrent_stats->packet_beacon)
470 return;
471
472 _rtl92ce_process_ui_rssi(hw, pcurrent_stats);
473 _rtl92ce_process_pwdb(hw, pcurrent_stats);
474 _rtl92ce_process_ui_link_quality(hw, pcurrent_stats);
475}
476
477static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw, 293static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
478 struct sk_buff *skb, 294 struct sk_buff *skb,
479 struct rtl_stats *pstats, 295 struct rtl_stats *pstats,
@@ -516,7 +332,7 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
516 packet_matchbssid, packet_toself, 332 packet_matchbssid, packet_toself,
517 packet_beacon); 333 packet_beacon);
518 334
519 _rtl92ce_process_phyinfo(hw, tmp_buf, pstats); 335 rtl_process_phyinfo(hw, tmp_buf, pstats);
520} 336}
521 337
522bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, 338bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
@@ -526,7 +342,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
526{ 342{
527 struct rx_fwinfo_92c *p_drvinfo; 343 struct rx_fwinfo_92c *p_drvinfo;
528 struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc; 344 struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
529 345 struct ieee80211_hdr *hdr;
530 u32 phystatus = GET_RX_DESC_PHYST(pdesc); 346 u32 phystatus = GET_RX_DESC_PHYST(pdesc);
531 stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); 347 stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
532 stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) * 348 stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
@@ -539,37 +355,60 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
539 stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc); 355 stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
540 stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc); 356 stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
541 stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); 357 stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
542 stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1) 358 stats->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
543 && (GET_RX_DESC_FAGGR(pdesc) == 1)); 359 && (GET_RX_DESC_FAGGR(pdesc) == 1));
544 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); 360 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
545 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); 361 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
362 stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
546 363
547 rx_status->freq = hw->conf.channel->center_freq; 364 stats->is_cck = RX_HAL_IS_CCK_RATE(pdesc);
548 rx_status->band = hw->conf.channel->band;
549 365
550 if (GET_RX_DESC_CRC32(pdesc)) 366 rx_status->freq = hw->conf.chandef.chan->center_freq;
551 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 367 rx_status->band = hw->conf.chandef.chan->band;
552 368
553 if (!GET_RX_DESC_SWDEC(pdesc)) 369 hdr = (struct ieee80211_hdr *)(skb->data + stats->rx_drvinfo_size
554 rx_status->flag |= RX_FLAG_DECRYPTED; 370 + stats->rx_bufshift);
371
372 if (stats->crc)
373 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
555 374
556 if (GET_RX_DESC_BW(pdesc)) 375 if (stats->rx_is40Mhzpacket)
557 rx_status->flag |= RX_FLAG_40MHZ; 376 rx_status->flag |= RX_FLAG_40MHZ;
558 377
559 if (GET_RX_DESC_RXHT(pdesc)) 378 if (stats->is_ht)
560 rx_status->flag |= RX_FLAG_HT; 379 rx_status->flag |= RX_FLAG_HT;
561 380
562 rx_status->flag |= RX_FLAG_MACTIME_START; 381 rx_status->flag |= RX_FLAG_MACTIME_START;
563 382
564 if (stats->decrypted) 383 /* hw will set stats->decrypted true, if it finds the
565 rx_status->flag |= RX_FLAG_DECRYPTED; 384 * frame is open data frame or mgmt frame.
566 385 * So hw will not decryption robust managment frame
386 * for IEEE80211w but still set status->decrypted
387 * true, so here we should set it back to undecrypted
388 * for IEEE80211w frame, and mac80211 sw will help
389 * to decrypt it
390 */
391 if (stats->decrypted) {
392 if (!hdr) {
393 /* In testing, hdr was NULL here */
394 return false;
395 }
396 if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
397 (ieee80211_has_protected(hdr->frame_control)))
398 rx_status->flag &= ~RX_FLAG_DECRYPTED;
399 else
400 rx_status->flag |= RX_FLAG_DECRYPTED;
401 }
402 /* rate_idx: index of data rate into band's
403 * supported rates or MCS index if HT rates
404 * are use (RX_FLAG_HT)
405 * Notice: this is diff with windows define
406 */
567 rx_status->rate_idx = rtlwifi_rate_mapping(hw, 407 rx_status->rate_idx = rtlwifi_rate_mapping(hw,
568 (bool)GET_RX_DESC_RXHT(pdesc), 408 stats->is_ht, stats->rate,
569 (u8)GET_RX_DESC_RXMCS(pdesc), 409 stats->isfirst_ampdu);
570 (bool)GET_RX_DESC_PAGGR(pdesc));
571 410
572 rx_status->mactime = GET_RX_DESC_TSFL(pdesc); 411 rx_status->mactime = stats->timestamp_low;
573 if (phystatus) { 412 if (phystatus) {
574 p_drvinfo = (struct rx_fwinfo_92c *)(skb->data + 413 p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
575 stats->rx_bufshift); 414 stats->rx_bufshift);
@@ -580,7 +419,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
580 } 419 }
581 420
582 /*rx_status->qual = stats->signal; */ 421 /*rx_status->qual = stats->signal; */
583 rx_status->signal = stats->rssi + 10; 422 rx_status->signal = stats->recvsignalpower + 10;
584 /*rx_status->noise = -stats->noise; */ 423 /*rx_status->noise = -stats->noise; */
585 424
586 return true; 425 return true;
@@ -624,7 +463,8 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
624 if (mac->opmode == NL80211_IFTYPE_STATION) { 463 if (mac->opmode == NL80211_IFTYPE_STATION) {
625 bw_40 = mac->bw_40; 464 bw_40 = mac->bw_40;
626 } else if (mac->opmode == NL80211_IFTYPE_AP || 465 } else if (mac->opmode == NL80211_IFTYPE_AP ||
627 mac->opmode == NL80211_IFTYPE_ADHOC) { 466 mac->opmode == NL80211_IFTYPE_ADHOC ||
467 mac->opmode == NL80211_IFTYPE_MESH_POINT) {
628 if (sta) 468 if (sta)
629 bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40; 469 bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
630 } 470 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index c08d0f4c5f3d..3d0498e69c8c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -202,7 +202,7 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
202 } 202 }
203 } 203 }
204 for (i = 0; i < 14; i++) { 204 for (i = 0; i < 14; i++) {
205 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 205 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
206 "RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = [0x%x / 0x%x / 0x%x]\n", rf_path, i, 206 "RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = [0x%x / 0x%x / 0x%x]\n", rf_path, i,
207 rtlefuse->txpwrlevel_cck[rf_path][i], 207 rtlefuse->txpwrlevel_cck[rf_path][i],
208 rtlefuse->txpwrlevel_ht40_1s[rf_path][i], 208 rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
@@ -238,11 +238,11 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
238 ((rtlefuse->eeprom_pwrlimit_ht40[index] 238 ((rtlefuse->eeprom_pwrlimit_ht40[index]
239 & 0xf0) >> 4); 239 & 0xf0) >> 4);
240 } 240 }
241 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 241 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
242 "RF-%d pwrgroup_ht20[%d] = 0x%x\n", 242 "RF-%d pwrgroup_ht20[%d] = 0x%x\n",
243 rf_path, i, 243 rf_path, i,
244 rtlefuse->pwrgroup_ht20[rf_path][i]); 244 rtlefuse->pwrgroup_ht20[rf_path][i]);
245 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 245 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
246 "RF-%d pwrgroup_ht40[%d] = 0x%x\n", 246 "RF-%d pwrgroup_ht40[%d] = 0x%x\n",
247 rf_path, i, 247 rf_path, i,
248 rtlefuse->pwrgroup_ht40[rf_path][i]); 248 rtlefuse->pwrgroup_ht40[rf_path][i]);
@@ -273,26 +273,26 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
273 rtlefuse->legacy_ht_txpowerdiff = 273 rtlefuse->legacy_ht_txpowerdiff =
274 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7]; 274 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7];
275 for (i = 0; i < 14; i++) 275 for (i = 0; i < 14; i++)
276 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 276 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
277 "RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", 277 "RF-A Ht20 to HT40 Diff[%d] = 0x%x\n",
278 i, rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]); 278 i, rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]);
279 for (i = 0; i < 14; i++) 279 for (i = 0; i < 14; i++)
280 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 280 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
281 "RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", 281 "RF-A Legacy to Ht40 Diff[%d] = 0x%x\n",
282 i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]); 282 i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]);
283 for (i = 0; i < 14; i++) 283 for (i = 0; i < 14; i++)
284 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 284 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
285 "RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", 285 "RF-B Ht20 to HT40 Diff[%d] = 0x%x\n",
286 i, rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]); 286 i, rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]);
287 for (i = 0; i < 14; i++) 287 for (i = 0; i < 14; i++)
288 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 288 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
289 "RF-B Legacy to HT40 Diff[%d] = 0x%x\n", 289 "RF-B Legacy to HT40 Diff[%d] = 0x%x\n",
290 i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]); 290 i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]);
291 if (!autoload_fail) 291 if (!autoload_fail)
292 rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7); 292 rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7);
293 else 293 else
294 rtlefuse->eeprom_regulatory = 0; 294 rtlefuse->eeprom_regulatory = 0;
295 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 295 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
296 "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory); 296 "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
297 if (!autoload_fail) { 297 if (!autoload_fail) {
298 rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A]; 298 rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A];
@@ -301,7 +301,7 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
301 rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI; 301 rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI;
302 rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI; 302 rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI;
303 } 303 }
304 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 304 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
305 "TSSI_A = 0x%x, TSSI_B = 0x%x\n", 305 "TSSI_A = 0x%x, TSSI_B = 0x%x\n",
306 rtlefuse->eeprom_tssi[RF90_PATH_A], 306 rtlefuse->eeprom_tssi[RF90_PATH_A],
307 rtlefuse->eeprom_tssi[RF90_PATH_B]); 307 rtlefuse->eeprom_tssi[RF90_PATH_B]);
@@ -316,7 +316,7 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
316 if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail) 316 if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
317 rtlefuse->apk_thermalmeterignore = true; 317 rtlefuse->apk_thermalmeterignore = true;
318 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter; 318 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
319 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 319 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
320 "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter); 320 "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
321} 321}
322 322
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index a73a17bc56dd..23d640a4debd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -223,7 +223,7 @@ static struct rtl_hal_cfg rtl92cu_hal_cfg = {
223 223
224 .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW, 224 .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
225 .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT, 225 .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
226 .maps[RTL_IMR_BcnInt] = IMR_BCNINT, 226 .maps[RTL_IMR_BCNINT] = IMR_BCNINT,
227 .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW, 227 .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
228 .maps[RTL_IMR_RDU] = IMR_RDU, 228 .maps[RTL_IMR_RDU] = IMR_RDU,
229 .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND, 229 .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index b6222eedb835..763cf1defab5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -324,8 +324,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
324 && (GET_RX_DESC_FAGGR(pdesc) == 1)); 324 && (GET_RX_DESC_FAGGR(pdesc) == 1));
325 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); 325 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
326 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); 326 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
327 rx_status->freq = hw->conf.channel->center_freq; 327 rx_status->freq = hw->conf.chandef.chan->center_freq;
328 rx_status->band = hw->conf.channel->band; 328 rx_status->band = hw->conf.chandef.chan->band;
329 if (GET_RX_DESC_CRC32(pdesc)) 329 if (GET_RX_DESC_CRC32(pdesc))
330 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 330 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
331 if (!GET_RX_DESC_SWDEC(pdesc)) 331 if (!GET_RX_DESC_SWDEC(pdesc))
@@ -395,8 +395,8 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
395 stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc); 395 stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc);
396 /* TODO: is center_freq changed when doing scan? */ 396 /* TODO: is center_freq changed when doing scan? */
397 /* TODO: Shall we add protection or just skip those two step? */ 397 /* TODO: Shall we add protection or just skip those two step? */
398 rx_status->freq = hw->conf.channel->center_freq; 398 rx_status->freq = hw->conf.chandef.chan->center_freq;
399 rx_status->band = hw->conf.channel->band; 399 rx_status->band = hw->conf.chandef.chan->band;
400 if (GET_RX_DESC_CRC32(rxdesc)) 400 if (GET_RX_DESC_CRC32(rxdesc))
401 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 401 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
402 if (!GET_RX_DESC_SWDEC(rxdesc)) 402 if (!GET_RX_DESC_SWDEC(rxdesc))
@@ -434,7 +434,7 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
434 (u32)hdr->addr1[2], (u32)hdr->addr1[3], 434 (u32)hdr->addr1[2], (u32)hdr->addr1[3],
435 (u32)hdr->addr1[4], (u32)hdr->addr1[5]); 435 (u32)hdr->addr1[4], (u32)hdr->addr1[5]);
436 memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status)); 436 memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status));
437 ieee80211_rx_irqsafe(hw, skb); 437 ieee80211_rx(hw, skb);
438} 438}
439 439
440void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb) 440void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index 5251fb8a111e..19a765532603 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -171,8 +171,8 @@ static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
171 de_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; 171 de_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
172 de_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; 172 de_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
173 de_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; 173 de_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
174 de_digtable->rx_gain_range_max = DM_DIG_FA_UPPER; 174 de_digtable->rx_gain_max = DM_DIG_FA_UPPER;
175 de_digtable->rx_gain_range_min = DM_DIG_FA_LOWER; 175 de_digtable->rx_gain_min = DM_DIG_FA_LOWER;
176 de_digtable->back_val = DM_DIG_BACKOFF_DEFAULT; 176 de_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
177 de_digtable->back_range_max = DM_DIG_BACKOFF_MAX; 177 de_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
178 de_digtable->back_range_min = DM_DIG_BACKOFF_MIN; 178 de_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
@@ -444,8 +444,8 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
444 "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n", 444 "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
445 de_digtable->large_fa_hit, de_digtable->forbidden_igi); 445 de_digtable->large_fa_hit, de_digtable->forbidden_igi);
446 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 446 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
447 "dm_DIG() Before: Recover_cnt=%d, rx_gain_range_min=%x\n", 447 "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n",
448 de_digtable->recover_cnt, de_digtable->rx_gain_range_min); 448 de_digtable->recover_cnt, de_digtable->rx_gain_min);
449 449
450 /* deal with abnorally large false alarm */ 450 /* deal with abnorally large false alarm */
451 if (falsealm_cnt->cnt_all > 10000) { 451 if (falsealm_cnt->cnt_all > 10000) {
@@ -459,9 +459,9 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
459 } 459 }
460 if (de_digtable->large_fa_hit >= 3) { 460 if (de_digtable->large_fa_hit >= 3) {
461 if ((de_digtable->forbidden_igi + 1) > DM_DIG_MAX) 461 if ((de_digtable->forbidden_igi + 1) > DM_DIG_MAX)
462 de_digtable->rx_gain_range_min = DM_DIG_MAX; 462 de_digtable->rx_gain_min = DM_DIG_MAX;
463 else 463 else
464 de_digtable->rx_gain_range_min = 464 de_digtable->rx_gain_min =
465 (de_digtable->forbidden_igi + 1); 465 (de_digtable->forbidden_igi + 1);
466 de_digtable->recover_cnt = 3600; /* 3600=2hr */ 466 de_digtable->recover_cnt = 3600; /* 3600=2hr */
467 } 467 }
@@ -475,12 +475,12 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
475 DM_DIG_FA_LOWER) { 475 DM_DIG_FA_LOWER) {
476 de_digtable->forbidden_igi = 476 de_digtable->forbidden_igi =
477 DM_DIG_FA_LOWER; 477 DM_DIG_FA_LOWER;
478 de_digtable->rx_gain_range_min = 478 de_digtable->rx_gain_min =
479 DM_DIG_FA_LOWER; 479 DM_DIG_FA_LOWER;
480 480
481 } else { 481 } else {
482 de_digtable->forbidden_igi--; 482 de_digtable->forbidden_igi--;
483 de_digtable->rx_gain_range_min = 483 de_digtable->rx_gain_min =
484 (de_digtable->forbidden_igi + 1); 484 (de_digtable->forbidden_igi + 1);
485 } 485 }
486 } else if (de_digtable->large_fa_hit == 3) { 486 } else if (de_digtable->large_fa_hit == 3) {
@@ -492,13 +492,13 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
492 "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n", 492 "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
493 de_digtable->large_fa_hit, de_digtable->forbidden_igi); 493 de_digtable->large_fa_hit, de_digtable->forbidden_igi);
494 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 494 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
495 "dm_DIG() After: recover_cnt=%d, rx_gain_range_min=%x\n", 495 "dm_DIG() After: recover_cnt=%d, rx_gain_min=%x\n",
496 de_digtable->recover_cnt, de_digtable->rx_gain_range_min); 496 de_digtable->recover_cnt, de_digtable->rx_gain_min);
497 497
498 if (value_igi > DM_DIG_MAX) 498 if (value_igi > DM_DIG_MAX)
499 value_igi = DM_DIG_MAX; 499 value_igi = DM_DIG_MAX;
500 else if (value_igi < de_digtable->rx_gain_range_min) 500 else if (value_igi < de_digtable->rx_gain_min)
501 value_igi = de_digtable->rx_gain_range_min; 501 value_igi = de_digtable->rx_gain_min;
502 de_digtable->cur_igvalue = value_igi; 502 de_digtable->cur_igvalue = value_igi;
503 rtl92d_dm_write_dig(hw); 503 rtl92d_dm_write_dig(hw);
504 if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) 504 if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G)
@@ -1071,9 +1071,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
1071 } 1071 }
1072 ele_d = (ofdmswing_table[(u8) ofdm_index[0]] & 1072 ele_d = (ofdmswing_table[(u8) ofdm_index[0]] &
1073 0xFFC00000) >> 22; 1073 0xFFC00000) >> 22;
1074 val_x = rtlphy->iqk_matrix_regsetting 1074 val_x = rtlphy->iqk_matrix
1075 [indexforchannel].value[0][0]; 1075 [indexforchannel].value[0][0];
1076 val_y = rtlphy->iqk_matrix_regsetting 1076 val_y = rtlphy->iqk_matrix
1077 [indexforchannel].value[0][1]; 1077 [indexforchannel].value[0][1];
1078 if (val_x != 0) { 1078 if (val_x != 0) {
1079 if ((val_x & 0x00000200) != 0) 1079 if ((val_x & 0x00000200) != 0)
@@ -1175,9 +1175,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
1175 if (is2t) { 1175 if (is2t) {
1176 ele_d = (ofdmswing_table[(u8) ofdm_index[1]] & 1176 ele_d = (ofdmswing_table[(u8) ofdm_index[1]] &
1177 0xFFC00000) >> 22; 1177 0xFFC00000) >> 22;
1178 val_x = rtlphy->iqk_matrix_regsetting 1178 val_x = rtlphy->iqk_matrix
1179 [indexforchannel].value[0][4]; 1179 [indexforchannel].value[0][4];
1180 val_y = rtlphy->iqk_matrix_regsetting 1180 val_y = rtlphy->iqk_matrix
1181 [indexforchannel].value[0][5]; 1181 [indexforchannel].value[0][5];
1182 if (val_x != 0) { 1182 if (val_x != 0) {
1183 if ((val_x & 0x00000200) != 0) 1183 if ((val_x & 0x00000200) != 0)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index aa5b42521bb4..7dd8f6de0550 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1183,7 +1183,7 @@ void rtl92d_linked_set_reg(struct ieee80211_hw *hw)
1183 u8 channel = rtlphy->current_channel; 1183 u8 channel = rtlphy->current_channel;
1184 1184
1185 indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel); 1185 indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel);
1186 if (!rtlphy->iqk_matrix_regsetting[indexforchannel].iqk_done) { 1186 if (!rtlphy->iqk_matrix[indexforchannel].iqk_done) {
1187 RT_TRACE(rtlpriv, COMP_SCAN | COMP_INIT, DBG_DMESG, 1187 RT_TRACE(rtlpriv, COMP_SCAN | COMP_INIT, DBG_DMESG,
1188 "Do IQK for channel:%d\n", channel); 1188 "Do IQK for channel:%d\n", channel);
1189 rtl92d_phy_iq_calibrate(hw); 1189 rtl92d_phy_iq_calibrate(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 33041bd4da81..840bac5fa2f8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -2479,9 +2479,9 @@ void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw)
2479 rtlphy->current_channel); 2479 rtlphy->current_channel);
2480 2480
2481 for (i = 0; i < IQK_MATRIX_REG_NUM; i++) 2481 for (i = 0; i < IQK_MATRIX_REG_NUM; i++)
2482 rtlphy->iqk_matrix_regsetting[indexforchannel]. 2482 rtlphy->iqk_matrix[indexforchannel].
2483 value[0][i] = result[final_candidate][i]; 2483 value[0][i] = result[final_candidate][i];
2484 rtlphy->iqk_matrix_regsetting[indexforchannel].iqk_done = 2484 rtlphy->iqk_matrix[indexforchannel].iqk_done =
2485 true; 2485 true;
2486 2486
2487 RT_TRACE(rtlpriv, COMP_SCAN | COMP_MLME, DBG_LOUD, 2487 RT_TRACE(rtlpriv, COMP_SCAN | COMP_MLME, DBG_LOUD,
@@ -2501,8 +2501,8 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
2501 indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel); 2501 indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel);
2502 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "indexforchannel %d done %d\n", 2502 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "indexforchannel %d done %d\n",
2503 indexforchannel, 2503 indexforchannel,
2504 rtlphy->iqk_matrix_regsetting[indexforchannel].iqk_done); 2504 rtlphy->iqk_matrix[indexforchannel].iqk_done);
2505 if (0 && !rtlphy->iqk_matrix_regsetting[indexforchannel].iqk_done && 2505 if (0 && !rtlphy->iqk_matrix[indexforchannel].iqk_done &&
2506 rtlphy->need_iqk) { 2506 rtlphy->need_iqk) {
2507 /* Re Do IQK. */ 2507 /* Re Do IQK. */
2508 RT_TRACE(rtlpriv, COMP_SCAN | COMP_INIT, DBG_LOUD, 2508 RT_TRACE(rtlpriv, COMP_SCAN | COMP_INIT, DBG_LOUD,
@@ -2516,23 +2516,23 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
2516 RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, 2516 RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
2517 "Just Read IQK Matrix reg for channel:%d....\n", 2517 "Just Read IQK Matrix reg for channel:%d....\n",
2518 channel); 2518 channel);
2519 if ((rtlphy->iqk_matrix_regsetting[indexforchannel]. 2519 if ((rtlphy->iqk_matrix[indexforchannel].
2520 value[0] != NULL) 2520 value[0] != NULL)
2521 /*&&(regea4 != 0) */) 2521 /*&&(regea4 != 0) */)
2522 _rtl92d_phy_patha_fill_iqk_matrix(hw, true, 2522 _rtl92d_phy_patha_fill_iqk_matrix(hw, true,
2523 rtlphy->iqk_matrix_regsetting[ 2523 rtlphy->iqk_matrix[
2524 indexforchannel].value, 0, 2524 indexforchannel].value, 0,
2525 (rtlphy->iqk_matrix_regsetting[ 2525 (rtlphy->iqk_matrix[
2526 indexforchannel].value[0][2] == 0)); 2526 indexforchannel].value[0][2] == 0));
2527 if (IS_92D_SINGLEPHY(rtlhal->version)) { 2527 if (IS_92D_SINGLEPHY(rtlhal->version)) {
2528 if ((rtlphy->iqk_matrix_regsetting[ 2528 if ((rtlphy->iqk_matrix[
2529 indexforchannel].value[0][4] != 0) 2529 indexforchannel].value[0][4] != 0)
2530 /*&&(regec4 != 0) */) 2530 /*&&(regec4 != 0) */)
2531 _rtl92d_phy_pathb_fill_iqk_matrix(hw, 2531 _rtl92d_phy_pathb_fill_iqk_matrix(hw,
2532 true, 2532 true,
2533 rtlphy->iqk_matrix_regsetting[ 2533 rtlphy->iqk_matrix[
2534 indexforchannel].value, 0, 2534 indexforchannel].value, 0,
2535 (rtlphy->iqk_matrix_regsetting[ 2535 (rtlphy->iqk_matrix[
2536 indexforchannel].value[0][6] 2536 indexforchannel].value[0][6]
2537 == 0)); 2537 == 0));
2538 } 2538 }
@@ -2830,20 +2830,20 @@ void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw)
2830 2830
2831 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 2831 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
2832 "settings regs %d default regs %d\n", 2832 "settings regs %d default regs %d\n",
2833 (int)(sizeof(rtlphy->iqk_matrix_regsetting) / 2833 (int)(sizeof(rtlphy->iqk_matrix) /
2834 sizeof(struct iqk_matrix_regs)), 2834 sizeof(struct iqk_matrix_regs)),
2835 IQK_MATRIX_REG_NUM); 2835 IQK_MATRIX_REG_NUM);
2836 /* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */ 2836 /* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */
2837 for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) { 2837 for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
2838 rtlphy->iqk_matrix_regsetting[i].value[0][0] = 0x100; 2838 rtlphy->iqk_matrix[i].value[0][0] = 0x100;
2839 rtlphy->iqk_matrix_regsetting[i].value[0][2] = 0x100; 2839 rtlphy->iqk_matrix[i].value[0][2] = 0x100;
2840 rtlphy->iqk_matrix_regsetting[i].value[0][4] = 0x100; 2840 rtlphy->iqk_matrix[i].value[0][4] = 0x100;
2841 rtlphy->iqk_matrix_regsetting[i].value[0][6] = 0x100; 2841 rtlphy->iqk_matrix[i].value[0][6] = 0x100;
2842 rtlphy->iqk_matrix_regsetting[i].value[0][1] = 0x0; 2842 rtlphy->iqk_matrix[i].value[0][1] = 0x0;
2843 rtlphy->iqk_matrix_regsetting[i].value[0][3] = 0x0; 2843 rtlphy->iqk_matrix[i].value[0][3] = 0x0;
2844 rtlphy->iqk_matrix_regsetting[i].value[0][5] = 0x0; 2844 rtlphy->iqk_matrix[i].value[0][5] = 0x0;
2845 rtlphy->iqk_matrix_regsetting[i].value[0][7] = 0x0; 2845 rtlphy->iqk_matrix[i].value[0][7] = 0x0;
2846 rtlphy->iqk_matrix_regsetting[i].iqk_done = false; 2846 rtlphy->iqk_matrix[i].iqk_done = false;
2847 } 2847 }
2848} 2848}
2849 2849
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
index ebb1d5f5e7b5..b7498c5bafc5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
@@ -543,7 +543,7 @@
543#define IMR_TIMEOUT1 BIT(16) 543#define IMR_TIMEOUT1 BIT(16)
544#define IMR_TXFOVW BIT(15) 544#define IMR_TXFOVW BIT(15)
545#define IMR_PSTIMEOUT BIT(14) 545#define IMR_PSTIMEOUT BIT(14)
546#define IMR_BcnInt BIT(13) 546#define IMR_BCNINT BIT(13)
547#define IMR_RXFOVW BIT(12) 547#define IMR_RXFOVW BIT(12)
548#define IMR_RDU BIT(11) 548#define IMR_RDU BIT(11)
549#define IMR_ATIMEND BIT(10) 549#define IMR_ATIMEND BIT(10)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index 03c6d18b2e07..c18c04bf0c13 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -166,7 +166,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
166 rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; 166 rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
167 167
168 /* for early mode */ 168 /* for early mode */
169 rtlpriv->rtlhal.earlymode_enable = true; 169 rtlpriv->rtlhal.earlymode_enable = false;
170 for (tid = 0; tid < 8; tid++) 170 for (tid = 0; tid < 8; tid++)
171 skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]); 171 skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]);
172 172
@@ -319,7 +319,7 @@ static struct rtl_hal_cfg rtl92de_hal_cfg = {
319 319
320 .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW, 320 .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
321 .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT, 321 .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
322 .maps[RTL_IMR_BcnInt] = IMR_BcnInt, 322 .maps[RTL_IMR_BCNINT] = IMR_BCNINT,
323 .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW, 323 .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
324 .maps[RTL_IMR_RDU] = IMR_RDU, 324 .maps[RTL_IMR_RDU] = IMR_RDU,
325 .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND, 325 .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
@@ -333,7 +333,7 @@ static struct rtl_hal_cfg rtl92de_hal_cfg = {
333 .maps[RTL_IMR_VIDOK] = IMR_VIDOK, 333 .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
334 .maps[RTL_IMR_VODOK] = IMR_VODOK, 334 .maps[RTL_IMR_VODOK] = IMR_VODOK,
335 .maps[RTL_IMR_ROK] = IMR_ROK, 335 .maps[RTL_IMR_ROK] = IMR_ROK,
336 .maps[RTL_IBSS_INT_MASKS] = (IMR_BcnInt | IMR_TBDOK | IMR_TBDER), 336 .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
337 337
338 .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M, 338 .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
339 .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M, 339 .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 941080e03c06..b8ec718a0fab 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -499,8 +499,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
499 && (GET_RX_DESC_FAGGR(pdesc) == 1)); 499 && (GET_RX_DESC_FAGGR(pdesc) == 1));
500 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); 500 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
501 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); 501 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
502 rx_status->freq = hw->conf.channel->center_freq; 502 rx_status->freq = hw->conf.chandef.chan->center_freq;
503 rx_status->band = hw->conf.channel->band; 503 rx_status->band = hw->conf.chandef.chan->band;
504 if (GET_RX_DESC_CRC32(pdesc)) 504 if (GET_RX_DESC_CRC32(pdesc))
505 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 505 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
506 if (!GET_RX_DESC_SWDEC(pdesc)) 506 if (!GET_RX_DESC_SWDEC(pdesc))
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
index 2d255e02d795..83c98674bfd3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
@@ -36,9 +36,6 @@
36#define SHORT_SLOT_TIME 9 36#define SHORT_SLOT_TIME 9
37#define NON_SHORT_SLOT_TIME 20 37#define NON_SHORT_SLOT_TIME 20
38 38
39/* Rx smooth factor */
40#define RX_SMOOTH_FACTOR 20
41
42/* Queue Select Value in TxDesc */ 39/* Queue Select Value in TxDesc */
43#define QSLT_BK 0x2 40#define QSLT_BK 0x2
44#define QSLT_BE 0x0 41#define QSLT_BE 0x0
@@ -49,10 +46,6 @@
49#define QSLT_MGNT 0x12 46#define QSLT_MGNT 0x12
50#define QSLT_CMD 0x13 47#define QSLT_CMD 0x13
51 48
52#define PHY_RSSI_SLID_WIN_MAX 100
53#define PHY_LINKQUALITY_SLID_WIN_MAX 20
54#define PHY_BEACON_RSSI_SLID_WIN_MAX 10
55
56/* Tx Desc */ 49/* Tx Desc */
57#define TX_DESC_SIZE_RTL8192S (16 * 4) 50#define TX_DESC_SIZE_RTL8192S (16 * 4)
58#define TX_CMDDESC_SIZE_RTL8192S (16 * 4) 51#define TX_CMDDESC_SIZE_RTL8192S (16 * 4)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index e551fe5f9ccd..b3a2d5ec59e6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -163,6 +163,7 @@ static void _rtl92s_dm_txpowertracking_callback_thermalmeter(
163 struct rtl_priv *rtlpriv = rtl_priv(hw); 163 struct rtl_priv *rtlpriv = rtl_priv(hw);
164 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 164 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
165 u8 thermalvalue = 0; 165 u8 thermalvalue = 0;
166 u32 fw_cmd = 0;
166 167
167 rtlpriv->dm.txpower_trackinginit = true; 168 rtlpriv->dm.txpower_trackinginit = true;
168 169
@@ -175,7 +176,19 @@ static void _rtl92s_dm_txpowertracking_callback_thermalmeter(
175 176
176 if (thermalvalue) { 177 if (thermalvalue) {
177 rtlpriv->dm.thermalvalue = thermalvalue; 178 rtlpriv->dm.thermalvalue = thermalvalue;
178 rtl92s_phy_set_fw_cmd(hw, FW_CMD_TXPWR_TRACK_THERMAL); 179 if (hal_get_firmwareversion(rtlpriv) >= 0x35) {
180 rtl92s_phy_set_fw_cmd(hw, FW_CMD_TXPWR_TRACK_THERMAL);
181 } else {
182 fw_cmd = (FW_TXPWR_TRACK_THERMAL |
183 (rtlpriv->efuse.thermalmeter[0] << 8) |
184 (thermalvalue << 16));
185
186 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
187 "Write to FW Thermal Val = 0x%x\n", fw_cmd);
188
189 rtl_write_dword(rtlpriv, WFM5, fw_cmd);
190 rtl92s_phy_chk_fwcmd_iodone(hw);
191 }
179 } 192 }
180 193
181 rtlpriv->dm.txpowercount = 0; 194 rtlpriv->dm.txpowercount = 0;
@@ -217,11 +230,10 @@ static void _rtl92s_dm_refresh_rateadaptive_mask(struct ieee80211_hw *hw)
217 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 230 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
218 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 231 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
219 struct rate_adaptive *ra = &(rtlpriv->ra); 232 struct rate_adaptive *ra = &(rtlpriv->ra);
220 233 struct ieee80211_sta *sta = NULL;
221 u32 low_rssi_thresh = 0; 234 u32 low_rssi_thresh = 0;
222 u32 middle_rssi_thresh = 0; 235 u32 middle_rssi_thresh = 0;
223 u32 high_rssi_thresh = 0; 236 u32 high_rssi_thresh = 0;
224 struct ieee80211_sta *sta = NULL;
225 237
226 if (is_hal_stop(rtlhal)) 238 if (is_hal_stop(rtlhal))
227 return; 239 return;
@@ -229,14 +241,12 @@ static void _rtl92s_dm_refresh_rateadaptive_mask(struct ieee80211_hw *hw)
229 if (!rtlpriv->dm.useramask) 241 if (!rtlpriv->dm.useramask)
230 return; 242 return;
231 243
232 if (!rtlpriv->dm.inform_fw_driverctrldm) { 244 if (hal_get_firmwareversion(rtlpriv) >= 61 &&
245 !rtlpriv->dm.inform_fw_driverctrldm) {
233 rtl92s_phy_set_fw_cmd(hw, FW_CMD_CTRL_DM_BY_DRIVER); 246 rtl92s_phy_set_fw_cmd(hw, FW_CMD_CTRL_DM_BY_DRIVER);
234 rtlpriv->dm.inform_fw_driverctrldm = true; 247 rtlpriv->dm.inform_fw_driverctrldm = true;
235 } 248 }
236 249
237 rcu_read_lock();
238 if (mac->opmode == NL80211_IFTYPE_STATION)
239 sta = get_sta(hw, mac->vif, mac->bssid);
240 if ((mac->link_state == MAC80211_LINKED) && 250 if ((mac->link_state == MAC80211_LINKED) &&
241 (mac->opmode == NL80211_IFTYPE_STATION)) { 251 (mac->opmode == NL80211_IFTYPE_STATION)) {
242 switch (ra->pre_ratr_state) { 252 switch (ra->pre_ratr_state) {
@@ -285,12 +295,16 @@ static void _rtl92s_dm_refresh_rateadaptive_mask(struct ieee80211_hw *hw)
285 rtlpriv->dm.undec_sm_pwdb, ra->ratr_state, 295 rtlpriv->dm.undec_sm_pwdb, ra->ratr_state,
286 ra->pre_ratr_state, ra->ratr_state); 296 ra->pre_ratr_state, ra->ratr_state);
287 297
288 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 298 rcu_read_lock();
299 sta = rtl_find_sta(hw, mac->bssid);
300 if (sta)
301 rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
289 ra->ratr_state); 302 ra->ratr_state);
303 rcu_read_unlock();
304
290 ra->pre_ratr_state = ra->ratr_state; 305 ra->pre_ratr_state = ra->ratr_state;
291 } 306 }
292 } 307 }
293 rcu_read_unlock();
294} 308}
295 309
296static void _rtl92s_dm_switch_baseband_mrc(struct ieee80211_hw *hw) 310static void _rtl92s_dm_switch_baseband_mrc(struct ieee80211_hw *hw)
@@ -370,7 +384,8 @@ static void _rtl92s_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
370 ra->ratr_state = DM_RATR_STA_MAX; 384 ra->ratr_state = DM_RATR_STA_MAX;
371 ra->pre_ratr_state = DM_RATR_STA_MAX; 385 ra->pre_ratr_state = DM_RATR_STA_MAX;
372 386
373 if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) 387 if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER &&
388 hal_get_firmwareversion(rtlpriv) >= 60)
374 rtlpriv->dm.useramask = true; 389 rtlpriv->dm.useramask = true;
375 else 390 else
376 rtlpriv->dm.useramask = false; 391 rtlpriv->dm.useramask = false;
@@ -457,13 +472,13 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
457 digtable->back_val = DM_DIG_BACKOFF; 472 digtable->back_val = DM_DIG_BACKOFF;
458 473
459 if ((digtable->rssi_val + 10 - digtable->back_val) > 474 if ((digtable->rssi_val + 10 - digtable->back_val) >
460 digtable->rx_gain_range_max) 475 digtable->rx_gain_max)
461 digtable->cur_igvalue = 476 digtable->cur_igvalue =
462 digtable->rx_gain_range_max; 477 digtable->rx_gain_max;
463 else if ((digtable->rssi_val + 10 - digtable->back_val) 478 else if ((digtable->rssi_val + 10 - digtable->back_val)
464 < digtable->rx_gain_range_min) 479 < digtable->rx_gain_min)
465 digtable->cur_igvalue = 480 digtable->cur_igvalue =
466 digtable->rx_gain_range_min; 481 digtable->rx_gain_min;
467 else 482 else
468 digtable->cur_igvalue = digtable->rssi_val + 10 483 digtable->cur_igvalue = digtable->rssi_val + 10
469 - digtable->back_val; 484 - digtable->back_val;
@@ -475,7 +490,7 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
475 490
476 if (falsealm_cnt->cnt_all > 16000) 491 if (falsealm_cnt->cnt_all > 16000)
477 digtable->cur_igvalue = 492 digtable->cur_igvalue =
478 digtable->rx_gain_range_max; 493 digtable->rx_gain_max;
479 /* connected -> connected or disconnected -> disconnected */ 494 /* connected -> connected or disconnected -> disconnected */
480 } else { 495 } else {
481 /* Firmware control DIG, do nothing in driver dm */ 496 /* Firmware control DIG, do nothing in driver dm */
@@ -677,9 +692,9 @@ static void _rtl92s_dm_init_dig(struct ieee80211_hw *hw)
677 /* for dig debug rssi value */ 692 /* for dig debug rssi value */
678 digtable->rssi_val = 50; 693 digtable->rssi_val = 50;
679 digtable->back_val = DM_DIG_BACKOFF; 694 digtable->back_val = DM_DIG_BACKOFF;
680 digtable->rx_gain_range_max = DM_DIG_MAX; 695 digtable->rx_gain_max = DM_DIG_MAX;
681 696
682 digtable->rx_gain_range_min = DM_DIG_MIN; 697 digtable->rx_gain_min = DM_DIG_MIN;
683 698
684 digtable->backoffval_range_max = DM_DIG_BACKOFF_MAX; 699 digtable->backoffval_range_max = DM_DIG_BACKOFF_MAX;
685 digtable->backoffval_range_min = DM_DIG_BACKOFF_MIN; 700 digtable->backoffval_range_min = DM_DIG_BACKOFF_MIN;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 084e7773bce2..4f461786a7eb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -400,6 +400,39 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
400 400
401 break; 401 break;
402 } 402 }
403 case HW_VAR_FW_LPS_ACTION: {
404 bool enter_fwlps = *((bool *)val);
405 u8 rpwm_val, fw_pwrmode;
406 bool fw_current_inps;
407
408 if (enter_fwlps) {
409 rpwm_val = 0x02; /* RF off */
410 fw_current_inps = true;
411 rtlpriv->cfg->ops->set_hw_reg(hw,
412 HW_VAR_FW_PSMODE_STATUS,
413 (u8 *)(&fw_current_inps));
414 rtlpriv->cfg->ops->set_hw_reg(hw,
415 HW_VAR_H2C_FW_PWRMODE,
416 (u8 *)(&ppsc->fwctrl_psmode));
417
418 rtlpriv->cfg->ops->set_hw_reg(hw,
419 HW_VAR_SET_RPWM,
420 (u8 *)(&rpwm_val));
421 } else {
422 rpwm_val = 0x0C; /* RF on */
423 fw_pwrmode = FW_PS_ACTIVE_MODE;
424 fw_current_inps = false;
425 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
426 (u8 *)(&rpwm_val));
427 rtlpriv->cfg->ops->set_hw_reg(hw,
428 HW_VAR_H2C_FW_PWRMODE,
429 (u8 *)(&fw_pwrmode));
430
431 rtlpriv->cfg->ops->set_hw_reg(hw,
432 HW_VAR_FW_PSMODE_STATUS,
433 (u8 *)(&fw_current_inps));
434 }
435 break; }
403 default: 436 default:
404 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 437 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
405 "switch case not processed\n"); 438 "switch case not processed\n");
@@ -438,7 +471,7 @@ void rtl92se_enable_hw_security_config(struct ieee80211_hw *hw)
438 471
439} 472}
440 473
441static u8 _rtl92ce_halset_sysclk(struct ieee80211_hw *hw, u8 data) 474static u8 _rtl92se_halset_sysclk(struct ieee80211_hw *hw, u8 data)
442{ 475{
443 struct rtl_priv *rtlpriv = rtl_priv(hw); 476 struct rtl_priv *rtlpriv = rtl_priv(hw);
444 u8 waitcount = 100; 477 u8 waitcount = 100;
@@ -547,7 +580,7 @@ static void _rtl92se_macconfig_before_fwdownload(struct ieee80211_hw *hw)
547 tmpu1b &= ~(BIT(6) | BIT(7)); 580 tmpu1b &= ~(BIT(6) | BIT(7));
548 581
549 /* Set failed, return to prevent hang. */ 582 /* Set failed, return to prevent hang. */
550 if (!_rtl92ce_halset_sysclk(hw, tmpu1b)) 583 if (!_rtl92se_halset_sysclk(hw, tmpu1b))
551 return; 584 return;
552 } 585 }
553 586
@@ -650,7 +683,7 @@ static void _rtl92se_macconfig_before_fwdownload(struct ieee80211_hw *hw)
650 683
651 tmpu1b = rtl_read_byte(rtlpriv, (SYS_CLKR + 1)); 684 tmpu1b = rtl_read_byte(rtlpriv, (SYS_CLKR + 1));
652 tmpu1b = ((tmpu1b | BIT(7)) & (~BIT(6))); 685 tmpu1b = ((tmpu1b | BIT(7)) & (~BIT(6)));
653 if (!_rtl92ce_halset_sysclk(hw, tmpu1b)) 686 if (!_rtl92se_halset_sysclk(hw, tmpu1b))
654 return; /* Set failed, return to prevent hang. */ 687 return; /* Set failed, return to prevent hang. */
655 688
656 rtl_write_word(rtlpriv, CMDR, 0x07FC); 689 rtl_write_word(rtlpriv, CMDR, 0x07FC);
@@ -967,6 +1000,15 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
967 return rtstatus; 1000 return rtstatus;
968 } 1001 }
969 1002
1003 /* because last function modify RCR, so we update
1004 * rcr var here, or TP will unstable for receive_config
1005 * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx
1006 * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252
1007 */
1008 rtlpci->receive_config = rtl_read_dword(rtlpriv, RCR);
1009 rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
1010 rtl_write_dword(rtlpriv, RCR, rtlpci->receive_config);
1011
970 /* Make sure BB/RF write OK. We should prevent enter IPS. radio off. */ 1012 /* Make sure BB/RF write OK. We should prevent enter IPS. radio off. */
971 /* We must set flag avoid BB/RF config period later!! */ 1013 /* We must set flag avoid BB/RF config period later!! */
972 rtl_write_dword(rtlpriv, CMDR, 0x37FC); 1014 rtl_write_dword(rtlpriv, CMDR, 0x37FC);
@@ -982,25 +1024,6 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
982 1024
983 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE; 1025 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
984 1026
985 /* RF Power Save */
986#if 0
987 /* H/W or S/W RF OFF before sleep. */
988 if (rtlpriv->psc.rfoff_reason > RF_CHANGE_BY_PS) {
989 u32 rfoffreason = rtlpriv->psc.rfoff_reason;
990
991 rtlpriv->psc.rfoff_reason = RF_CHANGE_BY_INIT;
992 rtlpriv->psc.rfpwr_state = ERFON;
993 /* FIXME: check spinlocks if this block is uncommented */
994 rtl_ps_set_rf_state(hw, ERFOFF, rfoffreason);
995 } else {
996 /* gpio radio on/off is out of adapter start */
997 if (rtlpriv->psc.hwradiooff == false) {
998 rtlpriv->psc.rfpwr_state = ERFON;
999 rtlpriv->psc.rfoff_reason = 0;
1000 }
1001 }
1002#endif
1003
1004 /* Before RF-R/W we must execute the IO from Scott's suggestion. */ 1027 /* Before RF-R/W we must execute the IO from Scott's suggestion. */
1005 rtl_write_byte(rtlpriv, AFE_XTAL_CTRL + 1, 0xDB); 1028 rtl_write_byte(rtlpriv, AFE_XTAL_CTRL + 1, 0xDB);
1006 if (rtlhal->version == VERSION_8192S_ACUT) 1029 if (rtlhal->version == VERSION_8192S_ACUT)
@@ -1058,7 +1081,22 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
1058 1081
1059 /* We enable high power and RA related mechanism after NIC 1082 /* We enable high power and RA related mechanism after NIC
1060 * initialized. */ 1083 * initialized. */
1061 rtl92s_phy_set_fw_cmd(hw, FW_CMD_RA_INIT); 1084 if (hal_get_firmwareversion(rtlpriv) >= 0x35) {
1085 /* Fw v.53 and later. */
1086 rtl92s_phy_set_fw_cmd(hw, FW_CMD_RA_INIT);
1087 } else if (hal_get_firmwareversion(rtlpriv) == 0x34) {
1088 /* Fw v.52. */
1089 rtl_write_dword(rtlpriv, WFM5, FW_RA_INIT);
1090 rtl92s_phy_chk_fwcmd_iodone(hw);
1091 } else {
1092 /* Compatible earlier FW version. */
1093 rtl_write_dword(rtlpriv, WFM5, FW_RA_RESET);
1094 rtl92s_phy_chk_fwcmd_iodone(hw);
1095 rtl_write_dword(rtlpriv, WFM5, FW_RA_ACTIVE);
1096 rtl92s_phy_chk_fwcmd_iodone(hw);
1097 rtl_write_dword(rtlpriv, WFM5, FW_RA_REFRESH);
1098 rtl92s_phy_chk_fwcmd_iodone(hw);
1099 }
1062 1100
1063 /* Add to prevent ASPM bug. */ 1101 /* Add to prevent ASPM bug. */
1064 /* Always enable hst and NIC clock request. */ 1102 /* Always enable hst and NIC clock request. */
@@ -1229,7 +1267,6 @@ void rtl92se_disable_interrupt(struct ieee80211_hw *hw)
1229 synchronize_irq(rtlpci->pdev->irq); 1267 synchronize_irq(rtlpci->pdev->irq);
1230} 1268}
1231 1269
1232
1233static u8 _rtl92s_set_sysclk(struct ieee80211_hw *hw, u8 data) 1270static u8 _rtl92s_set_sysclk(struct ieee80211_hw *hw, u8 data)
1234{ 1271{
1235 struct rtl_priv *rtlpriv = rtl_priv(hw); 1272 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1754,7 +1791,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1754 } 1791 }
1755 1792
1756 for (i = 0; i < 14; i++) { 1793 for (i = 0; i < 14; i++) {
1757 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1794 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1758 "RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = [0x%x / 0x%x / 0x%x]\n", 1795 "RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = [0x%x / 0x%x / 0x%x]\n",
1759 rf_path, i, 1796 rf_path, i,
1760 rtlefuse->txpwrlevel_cck[rf_path][i], 1797 rtlefuse->txpwrlevel_cck[rf_path][i],
@@ -1791,11 +1828,11 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1791 ((rtlefuse->eeprom_pwrgroup[rf_path][index] & 1828 ((rtlefuse->eeprom_pwrgroup[rf_path][index] &
1792 0xf0) >> 4); 1829 0xf0) >> 4);
1793 1830
1794 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1831 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1795 "RF-%d pwrgroup_ht20[%d] = 0x%x\n", 1832 "RF-%d pwrgroup_ht20[%d] = 0x%x\n",
1796 rf_path, i, 1833 rf_path, i,
1797 rtlefuse->pwrgroup_ht20[rf_path][i]); 1834 rtlefuse->pwrgroup_ht20[rf_path][i]);
1798 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1835 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1799 "RF-%d pwrgroup_ht40[%d] = 0x%x\n", 1836 "RF-%d pwrgroup_ht40[%d] = 0x%x\n",
1800 rf_path, i, 1837 rf_path, i,
1801 rtlefuse->pwrgroup_ht40[rf_path][i]); 1838 rtlefuse->pwrgroup_ht40[rf_path][i]);
@@ -1850,27 +1887,27 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1850 rtlefuse->eeprom_regulatory = 1887 rtlefuse->eeprom_regulatory =
1851 (hwinfo[EEPROM_REGULATORY] & 0x1); 1888 (hwinfo[EEPROM_REGULATORY] & 0x1);
1852 } 1889 }
1853 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1890 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1854 "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory); 1891 "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
1855 1892
1856 for (i = 0; i < 14; i++) 1893 for (i = 0; i < 14; i++)
1857 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1894 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1858 "RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", 1895 "RF-A Ht20 to HT40 Diff[%d] = 0x%x\n",
1859 i, rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]); 1896 i, rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]);
1860 for (i = 0; i < 14; i++) 1897 for (i = 0; i < 14; i++)
1861 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1898 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1862 "RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", 1899 "RF-A Legacy to Ht40 Diff[%d] = 0x%x\n",
1863 i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]); 1900 i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]);
1864 for (i = 0; i < 14; i++) 1901 for (i = 0; i < 14; i++)
1865 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1902 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1866 "RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", 1903 "RF-B Ht20 to HT40 Diff[%d] = 0x%x\n",
1867 i, rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]); 1904 i, rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]);
1868 for (i = 0; i < 14; i++) 1905 for (i = 0; i < 14; i++)
1869 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1906 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1870 "RF-B Legacy to HT40 Diff[%d] = 0x%x\n", 1907 "RF-B Legacy to HT40 Diff[%d] = 0x%x\n",
1871 i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]); 1908 i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]);
1872 1909
1873 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1910 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1874 "TxPwrSafetyFlag = %d\n", rtlefuse->txpwr_safetyflag); 1911 "TxPwrSafetyFlag = %d\n", rtlefuse->txpwr_safetyflag);
1875 1912
1876 /* Read RF-indication and Tx Power gain 1913 /* Read RF-indication and Tx Power gain
@@ -1880,7 +1917,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1880 rtlefuse->legacy_httxpowerdiff = 1917 rtlefuse->legacy_httxpowerdiff =
1881 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][0]; 1918 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][0];
1882 1919
1883 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1920 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1884 "TxPowerDiff = %#x\n", rtlefuse->eeprom_txpowerdiff); 1921 "TxPowerDiff = %#x\n", rtlefuse->eeprom_txpowerdiff);
1885 1922
1886 /* Get TSSI value for each path. */ 1923 /* Get TSSI value for each path. */
@@ -1889,7 +1926,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1889 usvalue = hwinfo[EEPROM_TSSI_B]; 1926 usvalue = hwinfo[EEPROM_TSSI_B];
1890 rtlefuse->eeprom_tssi[RF90_PATH_B] = (u8)(usvalue & 0xff); 1927 rtlefuse->eeprom_tssi[RF90_PATH_B] = (u8)(usvalue & 0xff);
1891 1928
1892 RTPRINT(rtlpriv, FINIT, INIT_TxPower, "TSSI_A = 0x%x, TSSI_B = 0x%x\n", 1929 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "TSSI_A = 0x%x, TSSI_B = 0x%x\n",
1893 rtlefuse->eeprom_tssi[RF90_PATH_A], 1930 rtlefuse->eeprom_tssi[RF90_PATH_A],
1894 rtlefuse->eeprom_tssi[RF90_PATH_B]); 1931 rtlefuse->eeprom_tssi[RF90_PATH_B]);
1895 1932
@@ -1897,7 +1934,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1897 /* and read ThermalMeter from EEPROM */ 1934 /* and read ThermalMeter from EEPROM */
1898 tempval = hwinfo[EEPROM_THERMALMETER]; 1935 tempval = hwinfo[EEPROM_THERMALMETER];
1899 rtlefuse->eeprom_thermalmeter = tempval; 1936 rtlefuse->eeprom_thermalmeter = tempval;
1900 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1937 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1901 "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter); 1938 "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
1902 1939
1903 /* ThermalMeter, BIT(0)~3 for RFIC1, BIT(4)~7 for RFIC2 */ 1940 /* ThermalMeter, BIT(0)~3 for RFIC1, BIT(4)~7 for RFIC2 */
@@ -1914,7 +1951,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1914 /* Version ID, Channel plan */ 1951 /* Version ID, Channel plan */
1915 rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN]; 1952 rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
1916 rtlefuse->txpwr_fromeprom = true; 1953 rtlefuse->txpwr_fromeprom = true;
1917 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1954 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1918 "EEPROM ChannelPlan = 0x%4x\n", rtlefuse->eeprom_channelplan); 1955 "EEPROM ChannelPlan = 0x%4x\n", rtlefuse->eeprom_channelplan);
1919 1956
1920 /* Read Customer ID or Board Type!!! */ 1957 /* Read Customer ID or Board Type!!! */
@@ -1999,6 +2036,8 @@ static void rtl92se_update_hal_rate_table(struct ieee80211_hw *hw,
1999 ratr_value = sta->supp_rates[1] << 4; 2036 ratr_value = sta->supp_rates[1] << 4;
2000 else 2037 else
2001 ratr_value = sta->supp_rates[0]; 2038 ratr_value = sta->supp_rates[0];
2039 if (mac->opmode == NL80211_IFTYPE_ADHOC)
2040 ratr_value = 0xfff;
2002 ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | 2041 ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
2003 sta->ht_cap.mcs.rx_mask[0] << 12); 2042 sta->ht_cap.mcs.rx_mask[0] << 12);
2004 switch (wirelessmode) { 2043 switch (wirelessmode) {
@@ -2112,6 +2151,8 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
2112 ratr_bitmap = sta->supp_rates[1] << 4; 2151 ratr_bitmap = sta->supp_rates[1] << 4;
2113 else 2152 else
2114 ratr_bitmap = sta->supp_rates[0]; 2153 ratr_bitmap = sta->supp_rates[0];
2154 if (mac->opmode == NL80211_IFTYPE_ADHOC)
2155 ratr_bitmap = 0xfff;
2115 ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | 2156 ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
2116 sta->ht_cap.mcs.rx_mask[0] << 12); 2157 sta->ht_cap.mcs.rx_mask[0] << 12);
2117 switch (wirelessmode) { 2158 switch (wirelessmode) {
@@ -2200,6 +2241,7 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
2200 ratr_bitmap &= 0x0f8ff0ff; 2241 ratr_bitmap &= 0x0f8ff0ff;
2201 break; 2242 break;
2202 } 2243 }
2244 sta_entry->ratr_index = ratr_index;
2203 2245
2204 if (rtlpriv->rtlhal.version >= VERSION_8192S_BCUT) 2246 if (rtlpriv->rtlhal.version >= VERSION_8192S_BCUT)
2205 ratr_bitmap &= 0x0FFFFFFF; 2247 ratr_bitmap &= 0x0FFFFFFF;
@@ -2438,23 +2480,9 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
2438 rtl_cam_del_entry(hw, p_macaddr); 2480 rtl_cam_del_entry(hw, p_macaddr);
2439 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); 2481 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
2440 } else { 2482 } else {
2441 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
2442 "The insert KEY length is %d\n",
2443 rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
2444 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
2445 "The insert KEY is %x %x\n",
2446 rtlpriv->sec.key_buf[0][0],
2447 rtlpriv->sec.key_buf[0][1]);
2448
2449 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 2483 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2450 "add one entry\n"); 2484 "add one entry\n");
2451 if (is_pairwise) { 2485 if (is_pairwise) {
2452 RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
2453 "Pairwise Key content",
2454 rtlpriv->sec.pairwise_key,
2455 rtlpriv->sec.
2456 key_len[PAIRWISE_KEYIDX]);
2457
2458 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 2486 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2459 "set Pairwise key\n"); 2487 "set Pairwise key\n");
2460 2488
@@ -2502,3 +2530,23 @@ void rtl92se_resume(struct ieee80211_hw *hw)
2502 pci_write_config_dword(rtlpci->pdev, 0x40, 2530 pci_write_config_dword(rtlpci->pdev, 0x40,
2503 val & 0xffff00ff); 2531 val & 0xffff00ff);
2504} 2532}
2533
2534/* Turn on AAP (RCR:bit 0) for promicuous mode. */
2535void rtl92se_allow_all_destaddr(struct ieee80211_hw *hw,
2536 bool allow_all_da, bool write_into_reg)
2537{
2538 struct rtl_priv *rtlpriv = rtl_priv(hw);
2539 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2540
2541 if (allow_all_da) /* Set BIT0 */
2542 rtlpci->receive_config |= RCR_AAP;
2543 else /* Clear BIT0 */
2544 rtlpci->receive_config &= ~RCR_AAP;
2545
2546 if (write_into_reg)
2547 rtl_write_dword(rtlpriv, RCR, rtlpci->receive_config);
2548
2549 RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
2550 "receive_config=0x%08X, write_into_reg=%d\n",
2551 rtlpci->receive_config, write_into_reg);
2552}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
index a8e068c76e47..da48aa8cbe6f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
@@ -74,6 +74,7 @@ void rtl92se_set_key(struct ieee80211_hw *hw,
74 u8 enc_algo, bool is_wepkey, bool clear_all); 74 u8 enc_algo, bool is_wepkey, bool clear_all);
75void rtl92se_suspend(struct ieee80211_hw *hw); 75void rtl92se_suspend(struct ieee80211_hw *hw);
76void rtl92se_resume(struct ieee80211_hw *hw); 76void rtl92se_resume(struct ieee80211_hw *hw);
77void rtl92se_allow_all_destaddr(struct ieee80211_hw *hw,
78 bool allow_all_da, bool write_into_reg);
77 79
78#endif 80#endif
79
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index 67404975e00b..9c092e6eb3fe 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -1307,6 +1307,8 @@ static void _rtl92s_phy_set_fwcmd_io(struct ieee80211_hw *hw)
1307 if (is_hal_stop(rtlhal)) 1307 if (is_hal_stop(rtlhal))
1308 return; 1308 return;
1309 1309
1310 if (hal_get_firmwareversion(rtlpriv) < 0x34)
1311 goto skip;
1310 /* We re-map RA related CMD IO to combinational ones */ 1312 /* We re-map RA related CMD IO to combinational ones */
1311 /* if FW version is v.52 or later. */ 1313 /* if FW version is v.52 or later. */
1312 switch (rtlhal->current_fwcmd_io) { 1314 switch (rtlhal->current_fwcmd_io) {
@@ -1320,6 +1322,7 @@ static void _rtl92s_phy_set_fwcmd_io(struct ieee80211_hw *hw)
1320 break; 1322 break;
1321 } 1323 }
1322 1324
1325skip:
1323 switch (rtlhal->current_fwcmd_io) { 1326 switch (rtlhal->current_fwcmd_io) {
1324 case FW_CMD_RA_RESET: 1327 case FW_CMD_RA_RESET:
1325 RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_RESET\n"); 1328 RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_RESET\n");
@@ -1440,7 +1443,7 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
1440 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 1443 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1441 u32 fw_param = FW_CMD_IO_PARA_QUERY(rtlpriv); 1444 u32 fw_param = FW_CMD_IO_PARA_QUERY(rtlpriv);
1442 u16 fw_cmdmap = FW_CMD_IO_QUERY(rtlpriv); 1445 u16 fw_cmdmap = FW_CMD_IO_QUERY(rtlpriv);
1443 bool bPostProcessing = false; 1446 bool postprocessing = false;
1444 1447
1445 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, 1448 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
1446 "Set FW Cmd(%#x), set_fwcmd_inprogress(%d)\n", 1449 "Set FW Cmd(%#x), set_fwcmd_inprogress(%d)\n",
@@ -1449,15 +1452,24 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
1449 do { 1452 do {
1450 /* We re-map to combined FW CMD ones if firmware version */ 1453 /* We re-map to combined FW CMD ones if firmware version */
1451 /* is v.53 or later. */ 1454 /* is v.53 or later. */
1452 switch (fw_cmdio) { 1455 if (hal_get_firmwareversion(rtlpriv) >= 0x35) {
1453 case FW_CMD_RA_REFRESH_N: 1456 switch (fw_cmdio) {
1454 fw_cmdio = FW_CMD_RA_REFRESH_N_COMB; 1457 case FW_CMD_RA_REFRESH_N:
1455 break; 1458 fw_cmdio = FW_CMD_RA_REFRESH_N_COMB;
1456 case FW_CMD_RA_REFRESH_BG: 1459 break;
1457 fw_cmdio = FW_CMD_RA_REFRESH_BG_COMB; 1460 case FW_CMD_RA_REFRESH_BG:
1458 break; 1461 fw_cmdio = FW_CMD_RA_REFRESH_BG_COMB;
1459 default: 1462 break;
1460 break; 1463 default:
1464 break;
1465 }
1466 } else {
1467 if ((fw_cmdio == FW_CMD_IQK_ENABLE) ||
1468 (fw_cmdio == FW_CMD_RA_REFRESH_N) ||
1469 (fw_cmdio == FW_CMD_RA_REFRESH_BG)) {
1470 postprocessing = true;
1471 break;
1472 }
1461 } 1473 }
1462 1474
1463 /* If firmware version is v.62 or later, 1475 /* If firmware version is v.62 or later,
@@ -1588,19 +1600,19 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
1588 fw_cmdmap &= ~FW_DIG_ENABLE_CTL; 1600 fw_cmdmap &= ~FW_DIG_ENABLE_CTL;
1589 1601
1590 FW_CMD_IO_SET(rtlpriv, fw_cmdmap); 1602 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1591 bPostProcessing = true; 1603 postprocessing = true;
1592 break; 1604 break;
1593 case FW_CMD_PAUSE_DM_BY_SCAN: 1605 case FW_CMD_PAUSE_DM_BY_SCAN:
1594 fw_cmdmap &= ~(FW_DIG_ENABLE_CTL | 1606 fw_cmdmap &= ~(FW_DIG_ENABLE_CTL |
1595 FW_HIGH_PWR_ENABLE_CTL | 1607 FW_HIGH_PWR_ENABLE_CTL |
1596 FW_SS_CTL); 1608 FW_SS_CTL);
1597 FW_CMD_IO_SET(rtlpriv, fw_cmdmap); 1609 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1598 bPostProcessing = true; 1610 postprocessing = true;
1599 break; 1611 break;
1600 case FW_CMD_HIGH_PWR_DISABLE: 1612 case FW_CMD_HIGH_PWR_DISABLE:
1601 fw_cmdmap &= ~FW_HIGH_PWR_ENABLE_CTL; 1613 fw_cmdmap &= ~FW_HIGH_PWR_ENABLE_CTL;
1602 FW_CMD_IO_SET(rtlpriv, fw_cmdmap); 1614 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1603 bPostProcessing = true; 1615 postprocessing = true;
1604 break; 1616 break;
1605 case FW_CMD_HIGH_PWR_ENABLE: 1617 case FW_CMD_HIGH_PWR_ENABLE:
1606 if (!(rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) && 1618 if (!(rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) &&
@@ -1608,7 +1620,7 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
1608 fw_cmdmap |= (FW_HIGH_PWR_ENABLE_CTL | 1620 fw_cmdmap |= (FW_HIGH_PWR_ENABLE_CTL |
1609 FW_SS_CTL); 1621 FW_SS_CTL);
1610 FW_CMD_IO_SET(rtlpriv, fw_cmdmap); 1622 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1611 bPostProcessing = true; 1623 postprocessing = true;
1612 } 1624 }
1613 break; 1625 break;
1614 case FW_CMD_DIG_MODE_FA: 1626 case FW_CMD_DIG_MODE_FA:
@@ -1629,14 +1641,15 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
1629 default: 1641 default:
1630 /* Pass to original FW CMD processing callback 1642 /* Pass to original FW CMD processing callback
1631 * routine. */ 1643 * routine. */
1632 bPostProcessing = true; 1644 postprocessing = true;
1633 break; 1645 break;
1634 } 1646 }
1635 } while (false); 1647 } while (false);
1636 1648
1637 /* We shall post processing these FW CMD if 1649 /* We shall post processing these FW CMD if
1638 * variable bPostProcessing is set. */ 1650 * variable postprocessing is set.
1639 if (bPostProcessing && !rtlhal->set_fwcmd_inprogress) { 1651 */
1652 if (postprocessing && !rtlhal->set_fwcmd_inprogress) {
1640 rtlhal->set_fwcmd_inprogress = true; 1653 rtlhal->set_fwcmd_inprogress = true;
1641 /* Update current FW Cmd for callback use. */ 1654 /* Update current FW Cmd for callback use. */
1642 rtlhal->current_fwcmd_io = fw_cmdio; 1655 rtlhal->current_fwcmd_io = fw_cmdio;
@@ -1697,8 +1710,18 @@ void rtl92s_phy_switch_ephy_parameter(struct ieee80211_hw *hw)
1697 1710
1698} 1711}
1699 1712
1700void rtl92s_phy_set_beacon_hwreg(struct ieee80211_hw *hw, u16 BeaconInterval) 1713void rtl92s_phy_set_beacon_hwreg(struct ieee80211_hw *hw, u16 beaconinterval)
1701{ 1714{
1702 struct rtl_priv *rtlpriv = rtl_priv(hw); 1715 struct rtl_priv *rtlpriv = rtl_priv(hw);
1703 rtl_write_dword(rtlpriv, WFM5, 0xF1000000 | (BeaconInterval << 8)); 1716 u32 new_bcn_num = 0;
1717
1718 if (hal_get_firmwareversion(rtlpriv) >= 0x33) {
1719 /* Fw v.51 and later. */
1720 rtl_write_dword(rtlpriv, WFM5, 0xF1000000 |
1721 (beaconinterval << 8));
1722 } else {
1723 new_bcn_num = beaconinterval * 32 - 64;
1724 rtl_write_dword(rtlpriv, WFM3 + 4, new_bcn_num);
1725 rtl_write_dword(rtlpriv, WFM3, 0xB026007C);
1726 }
1704} 1727}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.h b/drivers/net/wireless/rtlwifi/rtl8192se/phy.h
index ac0387770630..8acf4765a7a6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.h
@@ -39,6 +39,7 @@
39#define MAX_POSTCMD_CNT 16 39#define MAX_POSTCMD_CNT 16
40 40
41#define RF90_PATH_MAX 4 41#define RF90_PATH_MAX 4
42#define RF6052_MAX_PATH 2
42 43
43enum version_8192s { 44enum version_8192s {
44 VERSION_8192S_ACUT, 45 VERSION_8192S_ACUT,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index cecc377e9e61..2e8e6f8d2d51 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -290,6 +290,7 @@ static struct rtl_hal_ops rtl8192se_hal_ops = {
290 .enable_hw_sec = rtl92se_enable_hw_security_config, 290 .enable_hw_sec = rtl92se_enable_hw_security_config,
291 .set_key = rtl92se_set_key, 291 .set_key = rtl92se_set_key,
292 .init_sw_leds = rtl92se_init_sw_leds, 292 .init_sw_leds = rtl92se_init_sw_leds,
293 .allow_all_destaddr = rtl92se_allow_all_destaddr,
293 .get_bbreg = rtl92s_phy_query_bb_reg, 294 .get_bbreg = rtl92s_phy_query_bb_reg,
294 .set_bbreg = rtl92s_phy_set_bb_reg, 295 .set_bbreg = rtl92s_phy_set_bb_reg,
295 .get_rfreg = rtl92s_phy_query_rf_reg, 296 .get_rfreg = rtl92s_phy_query_rf_reg,
@@ -366,7 +367,7 @@ static struct rtl_hal_cfg rtl92se_hal_cfg = {
366 367
367 .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW, 368 .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
368 .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT, 369 .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
369 .maps[RTL_IMR_BcnInt] = IMR_BCNINT, 370 .maps[RTL_IMR_BCNINT] = IMR_BCNINT,
370 .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW, 371 .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
371 .maps[RTL_IMR_RDU] = IMR_RDU, 372 .maps[RTL_IMR_RDU] = IMR_RDU,
372 .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND, 373 .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 7b0a2e75b8b8..c7095118de6e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -30,6 +30,7 @@
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../pci.h" 31#include "../pci.h"
32#include "../base.h" 32#include "../base.h"
33#include "../stats.h"
33#include "reg.h" 34#include "reg.h"
34#include "def.h" 35#include "def.h"
35#include "phy.h" 36#include "phy.h"
@@ -43,7 +44,7 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 skb_queue)
43 44
44 if (unlikely(ieee80211_is_beacon(fc))) 45 if (unlikely(ieee80211_is_beacon(fc)))
45 return QSLT_BEACON; 46 return QSLT_BEACON;
46 if (ieee80211_is_mgmt(fc)) 47 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
47 return QSLT_MGNT; 48 return QSLT_MGNT;
48 if (ieee80211_is_nullfunc(fc)) 49 if (ieee80211_is_nullfunc(fc))
49 return QSLT_HIGH; 50 return QSLT_HIGH;
@@ -51,65 +52,6 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 skb_queue)
51 return skb->priority; 52 return skb->priority;
52} 53}
53 54
54static u8 _rtl92s_query_rxpwrpercentage(char antpower)
55{
56 if ((antpower <= -100) || (antpower >= 20))
57 return 0;
58 else if (antpower >= 0)
59 return 100;
60 else
61 return 100 + antpower;
62}
63
64static u8 _rtl92s_evm_db_to_percentage(char value)
65{
66 char ret_val;
67 ret_val = value;
68
69 if (ret_val >= 0)
70 ret_val = 0;
71
72 if (ret_val <= -33)
73 ret_val = -33;
74
75 ret_val = 0 - ret_val;
76 ret_val *= 3;
77
78 if (ret_val == 99)
79 ret_val = 100;
80
81 return ret_val;
82}
83
84static long _rtl92se_translate_todbm(struct ieee80211_hw *hw,
85 u8 signal_strength_index)
86{
87 long signal_power;
88
89 signal_power = (long)((signal_strength_index + 1) >> 1);
90 signal_power -= 95;
91 return signal_power;
92}
93
94static long _rtl92se_signal_scale_mapping(struct ieee80211_hw *hw,
95 long currsig)
96{
97 long retsig = 0;
98
99 /* Step 1. Scale mapping. */
100 if (currsig > 47)
101 retsig = 100;
102 else if (currsig > 14 && currsig <= 47)
103 retsig = 100 - ((47 - currsig) * 3) / 2;
104 else if (currsig > 2 && currsig <= 14)
105 retsig = 48 - ((14 - currsig) * 15) / 7;
106 else if (currsig >= 0)
107 retsig = currsig * 9 + 1;
108
109 return retsig;
110}
111
112
113static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw, 55static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
114 struct rtl_stats *pstats, u8 *pdesc, 56 struct rtl_stats *pstats, u8 *pdesc,
115 struct rx_fwinfo *p_drvinfo, 57 struct rx_fwinfo *p_drvinfo,
@@ -119,11 +61,11 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
119{ 61{
120 struct rtl_priv *rtlpriv = rtl_priv(hw); 62 struct rtl_priv *rtlpriv = rtl_priv(hw);
121 struct phy_sts_cck_8192s_t *cck_buf; 63 struct phy_sts_cck_8192s_t *cck_buf;
64 struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
122 s8 rx_pwr_all = 0, rx_pwr[4]; 65 s8 rx_pwr_all = 0, rx_pwr[4];
123 u8 rf_rx_num = 0, evm, pwdb_all; 66 u8 rf_rx_num = 0, evm, pwdb_all;
124 u8 i, max_spatial_stream; 67 u8 i, max_spatial_stream;
125 u32 rssi, total_rssi = 0; 68 u32 rssi, total_rssi = 0;
126 bool in_powersavemode = false;
127 bool is_cck = pstats->is_cck; 69 bool is_cck = pstats->is_cck;
128 70
129 pstats->packet_matchbssid = packet_match_bssid; 71 pstats->packet_matchbssid = packet_match_bssid;
@@ -136,7 +78,7 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
136 u8 report, cck_highpwr; 78 u8 report, cck_highpwr;
137 cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo; 79 cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
138 80
139 if (!in_powersavemode) 81 if (ppsc->rfpwr_state == ERFON)
140 cck_highpwr = (u8) rtl_get_bbreg(hw, 82 cck_highpwr = (u8) rtl_get_bbreg(hw,
141 RFPGA0_XA_HSSIPARAMETER2, 83 RFPGA0_XA_HSSIPARAMETER2,
142 0x200); 84 0x200);
@@ -181,7 +123,7 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
181 } 123 }
182 } 124 }
183 125
184 pwdb_all = _rtl92s_query_rxpwrpercentage(rx_pwr_all); 126 pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
185 127
186 /* CCK gain is smaller than OFDM/MCS gain, */ 128 /* CCK gain is smaller than OFDM/MCS gain, */
187 /* so we add gain diff by experiences, the val is 6 */ 129 /* so we add gain diff by experiences, the val is 6 */
@@ -222,13 +164,13 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
222 } else { 164 } else {
223 rtlpriv->dm.rfpath_rxenable[0] = 165 rtlpriv->dm.rfpath_rxenable[0] =
224 rtlpriv->dm.rfpath_rxenable[1] = true; 166 rtlpriv->dm.rfpath_rxenable[1] = true;
225 for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) { 167 for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
226 if (rtlpriv->dm.rfpath_rxenable[i]) 168 if (rtlpriv->dm.rfpath_rxenable[i])
227 rf_rx_num++; 169 rf_rx_num++;
228 170
229 rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 171 rx_pwr[i] = ((p_drvinfo->gain_trsw[i] &
230 0x3f) * 2) - 110; 172 0x3f) * 2) - 110;
231 rssi = _rtl92s_query_rxpwrpercentage(rx_pwr[i]); 173 rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
232 total_rssi += rssi; 174 total_rssi += rssi;
233 rtlpriv->stats.rx_snr_db[i] = 175 rtlpriv->stats.rx_snr_db[i] =
234 (long)(p_drvinfo->rxsnr[i] / 2); 176 (long)(p_drvinfo->rxsnr[i] / 2);
@@ -238,7 +180,7 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
238 } 180 }
239 181
240 rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110; 182 rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
241 pwdb_all = _rtl92s_query_rxpwrpercentage(rx_pwr_all); 183 pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
242 pstats->rx_pwdb_all = pwdb_all; 184 pstats->rx_pwdb_all = pwdb_all;
243 pstats->rxpower = rx_pwr_all; 185 pstats->rxpower = rx_pwr_all;
244 pstats->recvsignalpower = rx_pwr_all; 186 pstats->recvsignalpower = rx_pwr_all;
@@ -250,7 +192,7 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
250 max_spatial_stream = 1; 192 max_spatial_stream = 1;
251 193
252 for (i = 0; i < max_spatial_stream; i++) { 194 for (i = 0; i < max_spatial_stream; i++) {
253 evm = _rtl92s_evm_db_to_percentage(p_drvinfo->rxevm[i]); 195 evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
254 196
255 if (packet_match_bssid) { 197 if (packet_match_bssid) {
256 if (i == 0) 198 if (i == 0)
@@ -262,212 +204,13 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
262 } 204 }
263 205
264 if (is_cck) 206 if (is_cck)
265 pstats->signalstrength = (u8)(_rtl92se_signal_scale_mapping(hw, 207 pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
266 pwdb_all)); 208 pwdb_all));
267 else if (rf_rx_num != 0) 209 else if (rf_rx_num != 0)
268 pstats->signalstrength = (u8) (_rtl92se_signal_scale_mapping(hw, 210 pstats->signalstrength = (u8) (rtl_signal_scale_mapping(hw,
269 total_rssi /= rf_rx_num)); 211 total_rssi /= rf_rx_num));
270} 212}
271 213
272static void _rtl92se_process_ui_rssi(struct ieee80211_hw *hw,
273 struct rtl_stats *pstats)
274{
275 struct rtl_priv *rtlpriv = rtl_priv(hw);
276 struct rtl_phy *rtlphy = &(rtlpriv->phy);
277 u8 rfpath;
278 u32 last_rssi, tmpval;
279
280 if (pstats->packet_toself || pstats->packet_beacon) {
281 rtlpriv->stats.rssi_calculate_cnt++;
282
283 if (rtlpriv->stats.ui_rssi.total_num++ >=
284 PHY_RSSI_SLID_WIN_MAX) {
285 rtlpriv->stats.ui_rssi.total_num =
286 PHY_RSSI_SLID_WIN_MAX;
287 last_rssi = rtlpriv->stats.ui_rssi.elements[
288 rtlpriv->stats.ui_rssi.index];
289 rtlpriv->stats.ui_rssi.total_val -= last_rssi;
290 }
291
292 rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
293 rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.index++]
294 = pstats->signalstrength;
295
296 if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
297 rtlpriv->stats.ui_rssi.index = 0;
298
299 tmpval = rtlpriv->stats.ui_rssi.total_val /
300 rtlpriv->stats.ui_rssi.total_num;
301 rtlpriv->stats.signal_strength = _rtl92se_translate_todbm(hw,
302 (u8) tmpval);
303 pstats->rssi = rtlpriv->stats.signal_strength;
304 }
305
306 if (!pstats->is_cck && pstats->packet_toself) {
307 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
308 rfpath++) {
309 if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
310 rtlpriv->stats.rx_rssi_percentage[rfpath] =
311 pstats->rx_mimo_signalstrength[rfpath];
312
313 }
314
315 if (pstats->rx_mimo_signalstrength[rfpath] >
316 rtlpriv->stats.rx_rssi_percentage[rfpath]) {
317 rtlpriv->stats.rx_rssi_percentage[rfpath] =
318 ((rtlpriv->stats.rx_rssi_percentage[rfpath]
319 * (RX_SMOOTH_FACTOR - 1)) +
320 (pstats->rx_mimo_signalstrength[rfpath])) /
321 (RX_SMOOTH_FACTOR);
322
323 rtlpriv->stats.rx_rssi_percentage[rfpath] =
324 rtlpriv->stats.rx_rssi_percentage[rfpath]
325 + 1;
326 } else {
327 rtlpriv->stats.rx_rssi_percentage[rfpath] =
328 ((rtlpriv->stats.rx_rssi_percentage[rfpath]
329 * (RX_SMOOTH_FACTOR - 1)) +
330 (pstats->rx_mimo_signalstrength[rfpath])) /
331 (RX_SMOOTH_FACTOR);
332 }
333
334 }
335 }
336}
337
338static void _rtl92se_update_rxsignalstatistics(struct ieee80211_hw *hw,
339 struct rtl_stats *pstats)
340{
341 struct rtl_priv *rtlpriv = rtl_priv(hw);
342 int weighting = 0;
343
344 if (rtlpriv->stats.recv_signal_power == 0)
345 rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
346
347 if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
348 weighting = 5;
349 else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
350 weighting = (-5);
351
352 rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power * 5
353 + pstats->recvsignalpower +
354 weighting) / 6;
355}
356
357static void _rtl92se_process_pwdb(struct ieee80211_hw *hw,
358 struct rtl_stats *pstats)
359{
360 struct rtl_priv *rtlpriv = rtl_priv(hw);
361 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
362 long undec_sm_pwdb = 0;
363
364 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
365 return;
366 } else {
367 undec_sm_pwdb =
368 rtlpriv->dm.undec_sm_pwdb;
369 }
370
371 if (pstats->packet_toself || pstats->packet_beacon) {
372 if (undec_sm_pwdb < 0)
373 undec_sm_pwdb = pstats->rx_pwdb_all;
374
375 if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
376 undec_sm_pwdb =
377 (((undec_sm_pwdb) *
378 (RX_SMOOTH_FACTOR - 1)) +
379 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
380
381 undec_sm_pwdb = undec_sm_pwdb + 1;
382 } else {
383 undec_sm_pwdb = (((undec_sm_pwdb) *
384 (RX_SMOOTH_FACTOR - 1)) + (pstats->rx_pwdb_all)) /
385 (RX_SMOOTH_FACTOR);
386 }
387
388 rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
389 _rtl92se_update_rxsignalstatistics(hw, pstats);
390 }
391}
392
393static void rtl_92s_process_streams(struct ieee80211_hw *hw,
394 struct rtl_stats *pstats)
395{
396 struct rtl_priv *rtlpriv = rtl_priv(hw);
397 u32 stream;
398
399 for (stream = 0; stream < 2; stream++) {
400 if (pstats->rx_mimo_sig_qual[stream] != -1) {
401 if (rtlpriv->stats.rx_evm_percentage[stream] == 0) {
402 rtlpriv->stats.rx_evm_percentage[stream] =
403 pstats->rx_mimo_sig_qual[stream];
404 }
405
406 rtlpriv->stats.rx_evm_percentage[stream] =
407 ((rtlpriv->stats.rx_evm_percentage[stream] *
408 (RX_SMOOTH_FACTOR - 1)) +
409 (pstats->rx_mimo_sig_qual[stream] *
410 1)) / (RX_SMOOTH_FACTOR);
411 }
412 }
413}
414
415static void _rtl92se_process_ui_link_quality(struct ieee80211_hw *hw,
416 struct rtl_stats *pstats)
417{
418 struct rtl_priv *rtlpriv = rtl_priv(hw);
419 u32 last_evm = 0, tmpval;
420
421 if (pstats->signalquality != 0) {
422 if (pstats->packet_toself || pstats->packet_beacon) {
423
424 if (rtlpriv->stats.ui_link_quality.total_num++ >=
425 PHY_LINKQUALITY_SLID_WIN_MAX) {
426 rtlpriv->stats.ui_link_quality.total_num =
427 PHY_LINKQUALITY_SLID_WIN_MAX;
428 last_evm =
429 rtlpriv->stats.ui_link_quality.elements[
430 rtlpriv->stats.ui_link_quality.index];
431 rtlpriv->stats.ui_link_quality.total_val -=
432 last_evm;
433 }
434
435 rtlpriv->stats.ui_link_quality.total_val +=
436 pstats->signalquality;
437 rtlpriv->stats.ui_link_quality.elements[
438 rtlpriv->stats.ui_link_quality.index++] =
439 pstats->signalquality;
440
441 if (rtlpriv->stats.ui_link_quality.index >=
442 PHY_LINKQUALITY_SLID_WIN_MAX)
443 rtlpriv->stats.ui_link_quality.index = 0;
444
445 tmpval = rtlpriv->stats.ui_link_quality.total_val /
446 rtlpriv->stats.ui_link_quality.total_num;
447 rtlpriv->stats.signal_quality = tmpval;
448
449 rtlpriv->stats.last_sigstrength_inpercent = tmpval;
450
451 rtl_92s_process_streams(hw, pstats);
452
453 }
454 }
455}
456
457static void _rtl92se_process_phyinfo(struct ieee80211_hw *hw,
458 u8 *buffer,
459 struct rtl_stats *pcurrent_stats)
460{
461
462 if (!pcurrent_stats->packet_matchbssid &&
463 !pcurrent_stats->packet_beacon)
464 return;
465
466 _rtl92se_process_ui_rssi(hw, pcurrent_stats);
467 _rtl92se_process_pwdb(hw, pcurrent_stats);
468 _rtl92se_process_ui_link_quality(hw, pcurrent_stats);
469}
470
471static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw, 214static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw,
472 struct sk_buff *skb, struct rtl_stats *pstats, 215 struct sk_buff *skb, struct rtl_stats *pstats,
473 u8 *pdesc, struct rx_fwinfo *p_drvinfo) 216 u8 *pdesc, struct rx_fwinfo *p_drvinfo)
@@ -505,7 +248,7 @@ static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw,
505 248
506 _rtl92se_query_rxphystatus(hw, pstats, pdesc, p_drvinfo, 249 _rtl92se_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
507 packet_matchbssid, packet_toself, packet_beacon); 250 packet_matchbssid, packet_toself, packet_beacon);
508 _rtl92se_process_phyinfo(hw, tmp_buf, pstats); 251 rtl_process_phyinfo(hw, tmp_buf, pstats);
509} 252}
510 253
511bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, 254bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
@@ -538,11 +281,8 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
538 if (stats->hwerror) 281 if (stats->hwerror)
539 return false; 282 return false;
540 283
541 rx_status->freq = hw->conf.channel->center_freq; 284 rx_status->freq = hw->conf.chandef.chan->center_freq;
542 rx_status->band = hw->conf.channel->band; 285 rx_status->band = hw->conf.chandef.chan->band;
543
544 hdr = (struct ieee80211_hdr *)(skb->data + stats->rx_drvinfo_size
545 + stats->rx_bufshift);
546 286
547 if (stats->crc) 287 if (stats->crc)
548 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 288 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -563,6 +303,13 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
563 * for IEEE80211w frame, and mac80211 sw will help 303 * for IEEE80211w frame, and mac80211 sw will help
564 * to decrypt it */ 304 * to decrypt it */
565 if (stats->decrypted) { 305 if (stats->decrypted) {
306 hdr = (struct ieee80211_hdr *)(skb->data +
307 stats->rx_drvinfo_size + stats->rx_bufshift);
308
309 if (!hdr) {
310 /* during testing, hdr was NULL here */
311 return false;
312 }
566 if ((ieee80211_is_robust_mgmt_frame(hdr)) && 313 if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
567 (ieee80211_has_protected(hdr->frame_control))) 314 (ieee80211_has_protected(hdr->frame_control)))
568 rx_status->flag &= ~RX_FLAG_DECRYPTED; 315 rx_status->flag &= ~RX_FLAG_DECRYPTED;
@@ -630,6 +377,11 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
630 377
631 CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE_RTL8192S); 378 CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE_RTL8192S);
632 379
380 if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
381 firstseg = true;
382 lastseg = true;
383 }
384
633 if (firstseg) { 385 if (firstseg) {
634 if (rtlpriv->dm.useramask) { 386 if (rtlpriv->dm.useramask) {
635 /* set txdesc macId */ 387 /* set txdesc macId */
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
index 12e2a3cb0701..a36eee28f9e7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
@@ -166,8 +166,8 @@ static void rtl8723ae_dm_diginit(struct ieee80211_hw *hw)
166 dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; 166 dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
167 dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; 167 dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
168 dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; 168 dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
169 dm_digtable->rx_gain_range_max = DM_DIG_MAX; 169 dm_digtable->rx_gain_max = DM_DIG_MAX;
170 dm_digtable->rx_gain_range_min = DM_DIG_MIN; 170 dm_digtable->rx_gain_min = DM_DIG_MIN;
171 dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT; 171 dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
172 dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX; 172 dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
173 dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN; 173 dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
@@ -291,11 +291,11 @@ static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
291 } 291 }
292 292
293 if ((dgtbl->rssi_val_min + 10 - dgtbl->back_val) > 293 if ((dgtbl->rssi_val_min + 10 - dgtbl->back_val) >
294 dgtbl->rx_gain_range_max) 294 dgtbl->rx_gain_max)
295 dgtbl->cur_igvalue = dgtbl->rx_gain_range_max; 295 dgtbl->cur_igvalue = dgtbl->rx_gain_max;
296 else if ((dgtbl->rssi_val_min + 10 - 296 else if ((dgtbl->rssi_val_min + 10 -
297 dgtbl->back_val) < dgtbl->rx_gain_range_min) 297 dgtbl->back_val) < dgtbl->rx_gain_min)
298 dgtbl->cur_igvalue = dgtbl->rx_gain_range_min; 298 dgtbl->cur_igvalue = dgtbl->rx_gain_min;
299 else 299 else
300 dgtbl->cur_igvalue = dgtbl->rssi_val_min + 10 - dgtbl->back_val; 300 dgtbl->cur_igvalue = dgtbl->rssi_val_min + 10 - dgtbl->back_val;
301 301
@@ -707,6 +707,77 @@ void rtl8723ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
707 rtlpriv->dm.useramask = false; 707 rtlpriv->dm.useramask = false;
708} 708}
709 709
710static void rtl8723ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
711{
712 struct rtl_priv *rtlpriv = rtl_priv(hw);
713 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
714 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
715 struct rate_adaptive *p_ra = &(rtlpriv->ra);
716 u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
717 struct ieee80211_sta *sta = NULL;
718
719 if (is_hal_stop(rtlhal)) {
720 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
721 " driver is going to unload\n");
722 return;
723 }
724
725 if (!rtlpriv->dm.useramask) {
726 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
727 " driver does not control rate adaptive mask\n");
728 return;
729 }
730
731 if (mac->link_state == MAC80211_LINKED &&
732 mac->opmode == NL80211_IFTYPE_STATION) {
733 switch (p_ra->pre_ratr_state) {
734 case DM_RATR_STA_HIGH:
735 high_rssithresh_for_ra = 50;
736 low_rssithresh_for_ra = 20;
737 break;
738 case DM_RATR_STA_MIDDLE:
739 high_rssithresh_for_ra = 55;
740 low_rssithresh_for_ra = 20;
741 break;
742 case DM_RATR_STA_LOW:
743 high_rssithresh_for_ra = 50;
744 low_rssithresh_for_ra = 25;
745 break;
746 default:
747 high_rssithresh_for_ra = 50;
748 low_rssithresh_for_ra = 20;
749 break;
750 }
751
752 if (rtlpriv->dm.undec_sm_pwdb > high_rssithresh_for_ra)
753 p_ra->ratr_state = DM_RATR_STA_HIGH;
754 else if (rtlpriv->dm.undec_sm_pwdb > low_rssithresh_for_ra)
755 p_ra->ratr_state = DM_RATR_STA_MIDDLE;
756 else
757 p_ra->ratr_state = DM_RATR_STA_LOW;
758
759 if (p_ra->pre_ratr_state != p_ra->ratr_state) {
760 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
761 "RSSI = %ld\n",
762 rtlpriv->dm.undec_sm_pwdb);
763 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
764 "RSSI_LEVEL = %d\n", p_ra->ratr_state);
765 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
766 "PreState = %d, CurState = %d\n",
767 p_ra->pre_ratr_state, p_ra->ratr_state);
768
769 rcu_read_lock();
770 sta = rtl_find_sta(hw, mac->bssid);
771 if (sta)
772 rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
773 p_ra->ratr_state);
774 rcu_read_unlock();
775
776 p_ra->pre_ratr_state = p_ra->ratr_state;
777 }
778 }
779}
780
710static void rtl8723ae_dm_init_dynamic_bpowersaving(struct ieee80211_hw *hw) 781static void rtl8723ae_dm_init_dynamic_bpowersaving(struct ieee80211_hw *hw)
711{ 782{
712 struct rtl_priv *rtlpriv = rtl_priv(hw); 783 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -853,6 +924,9 @@ void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw)
853 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, 924 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
854 (u8 *) (&fw_ps_awake)); 925 (u8 *) (&fw_ps_awake));
855 926
927 if (ppsc->p2p_ps_info.p2p_ps_mode)
928 fw_ps_awake = false;
929
856 if ((ppsc->rfpwr_state == ERFON) && 930 if ((ppsc->rfpwr_state == ERFON) &&
857 ((!fw_current_inpsmode) && fw_ps_awake) && 931 ((!fw_current_inpsmode) && fw_ps_awake) &&
858 (!ppsc->rfchange_inprogress)) { 932 (!ppsc->rfchange_inprogress)) {
@@ -861,7 +935,7 @@ void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw)
861 rtl8723ae_dm_false_alarm_counter_statistics(hw); 935 rtl8723ae_dm_false_alarm_counter_statistics(hw);
862 rtl8723ae_dm_dynamic_bpowersaving(hw); 936 rtl8723ae_dm_dynamic_bpowersaving(hw);
863 rtl8723ae_dm_dynamic_txpower(hw); 937 rtl8723ae_dm_dynamic_txpower(hw);
864 /* rtl92c_dm_refresh_rate_adaptive_mask(hw); */ 938 rtl8723ae_dm_refresh_rate_adaptive_mask(hw);
865 rtl8723ae_dm_bt_coexist(hw); 939 rtl8723ae_dm_bt_coexist(hw);
866 rtl8723ae_dm_check_edca_turbo(hw); 940 rtl8723ae_dm_check_edca_turbo(hw);
867 } 941 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
index 39d246196247..a372b0204456 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
@@ -55,7 +55,13 @@
55#define DM_DIG_BACKOFF_MIN -4 55#define DM_DIG_BACKOFF_MIN -4
56#define DM_DIG_BACKOFF_DEFAULT 10 56#define DM_DIG_BACKOFF_DEFAULT 10
57 57
58#define RXPATHSELECTION_SS_TH_LOW 30
59#define RXPATHSELECTION_DIFF_TH 18
60
58#define DM_RATR_STA_INIT 0 61#define DM_RATR_STA_INIT 0
62#define DM_RATR_STA_HIGH 1
63#define DM_RATR_STA_MIDDLE 2
64#define DM_RATR_STA_LOW 3
59 65
60#define TXHIGHPWRLEVEL_NORMAL 0 66#define TXHIGHPWRLEVEL_NORMAL 0
61#define TXHIGHPWRLEVEL_LEVEL1 1 67#define TXHIGHPWRLEVEL_LEVEL1 1
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
index 35cb8f83eed4..dedfa1ed3e02 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
@@ -494,7 +494,9 @@ void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
494 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode); 494 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
495 495
496 SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode); 496 SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
497 SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1); 497 SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
498 (rtlpriv->mac80211.p2p) ?
499 ppsc->smart_ps : 1);
498 SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode, 500 SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
499 ppsc->reg_max_lps_awakeintvl); 501 ppsc->reg_max_lps_awakeintvl);
500 502
@@ -741,3 +743,96 @@ void rtl8723ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
741 743
742 rtl8723ae_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm); 744 rtl8723ae_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
743} 745}
746
747static void rtl8723e_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw,
748 u8 ctwindow)
749{
750 u8 u1_ctwindow_period[1] = {ctwindow};
751
752 rtl8723ae_fill_h2c_cmd(hw, H2C_P2P_PS_CTW_CMD, 1, u1_ctwindow_period);
753}
754
755void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
756{
757 struct rtl_priv *rtlpriv = rtl_priv(hw);
758 struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
759 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
760 struct rtl_p2p_ps_info *p2pinfo = &(rtlps->p2p_ps_info);
761 struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload;
762 u8 i;
763 u16 ctwindow;
764 u32 start_time, tsf_low;
765
766 switch (p2p_ps_state) {
767 case P2P_PS_DISABLE:
768 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
769 memset(p2p_ps_offload, 0, sizeof(struct p2p_ps_offload_t));
770 break;
771 case P2P_PS_ENABLE:
772 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
773 /* update CTWindow value. */
774 if (p2pinfo->ctwindow > 0) {
775 p2p_ps_offload->ctwindow_en = 1;
776 ctwindow = p2pinfo->ctwindow;
777 rtl8723e_set_p2p_ctw_period_cmd(hw, ctwindow);
778 }
779
780 /* hw only support 2 set of NoA */
781 for (i = 0; i < p2pinfo->noa_num; i++) {
782 /* To control the register setting for which NOA*/
783 rtl_write_byte(rtlpriv, 0x5cf, (i << 4));
784 if (i == 0)
785 p2p_ps_offload->noa0_en = 1;
786 else
787 p2p_ps_offload->noa1_en = 1;
788
789 /* config P2P NoA Descriptor Register */
790 rtl_write_dword(rtlpriv, 0x5E0,
791 p2pinfo->noa_duration[i]);
792 rtl_write_dword(rtlpriv, 0x5E4,
793 p2pinfo->noa_interval[i]);
794
795 /*Get Current TSF value */
796 tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
797
798 start_time = p2pinfo->noa_start_time[i];
799 if (p2pinfo->noa_count_type[i] != 1) {
800 while (start_time <= (tsf_low+(50*1024))) {
801 start_time += p2pinfo->noa_interval[i];
802 if (p2pinfo->noa_count_type[i] != 255)
803 p2pinfo->noa_count_type[i]--;
804 }
805 }
806 rtl_write_dword(rtlpriv, 0x5E8, start_time);
807 rtl_write_dword(rtlpriv, 0x5EC,
808 p2pinfo->noa_count_type[i]);
809 }
810 if ((p2pinfo->opp_ps == 1) || (p2pinfo->noa_num > 0)) {
811 /* rst p2p circuit */
812 rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4));
813
814 p2p_ps_offload->offload_en = 1;
815
816 if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) {
817 p2p_ps_offload->role = 1;
818 p2p_ps_offload->allstasleep = 0;
819 } else {
820 p2p_ps_offload->role = 0;
821 }
822 p2p_ps_offload->discovery = 0;
823 }
824 break;
825 case P2P_PS_SCAN:
826 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
827 p2p_ps_offload->discovery = 1;
828 break;
829 case P2P_PS_SCAN_DONE:
830 RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
831 p2p_ps_offload->discovery = 0;
832 p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
833 break;
834 default:
835 break;
836 }
837 rtl8723ae_fill_h2c_cmd(hw, H2C_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload);
838}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
index 89994e16dc83..ed3b795e6980 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
@@ -70,8 +70,10 @@ enum rtl8192c_h2c_cmd {
70 H2C_SETPWRMODE = 1, 70 H2C_SETPWRMODE = 1,
71 H2C_JOINBSSRPT = 2, 71 H2C_JOINBSSRPT = 2,
72 H2C_RSVDPAGE = 3, 72 H2C_RSVDPAGE = 3,
73 H2C_RSSI_REPORT = 5, 73 H2C_RSSI_REPORT = 4,
74 H2C_RA_MASK = 6, 74 H2C_P2P_PS_CTW_CMD = 5,
75 H2C_P2P_PS_OFFLOAD = 6,
76 H2C_RA_MASK = 7,
75 MAX_H2CCMD 77 MAX_H2CCMD
76}; 78};
77 79
@@ -97,5 +99,6 @@ void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw);
97void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); 99void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
98void rtl8723ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); 100void rtl8723ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
99void rtl8723ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); 101void rtl8723ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
102void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
100 103
101#endif 104#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
index 9a0c71c2e15e..c333dfd116b8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -449,6 +449,9 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
449 rtl8723ae_set_fw_joinbss_report_cmd(hw, (*(u8 *) val)); 449 rtl8723ae_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
450 450
451 break; } 451 break; }
452 case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
453 rtl8723ae_set_p2p_ps_offload_cmd(hw, (*(u8 *)val));
454 break;
452 case HW_VAR_AID:{ 455 case HW_VAR_AID:{
453 u16 u2btmp; 456 u16 u2btmp;
454 u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); 457 u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
@@ -474,6 +477,39 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
474 if (btype_ibss == true) 477 if (btype_ibss == true)
475 _rtl8723ae_resume_tx_beacon(hw); 478 _rtl8723ae_resume_tx_beacon(hw);
476 break; } 479 break; }
480 case HW_VAR_FW_LPS_ACTION: {
481 bool enter_fwlps = *((bool *)val);
482 u8 rpwm_val, fw_pwrmode;
483 bool fw_current_inps;
484
485 if (enter_fwlps) {
486 rpwm_val = 0x02; /* RF off */
487 fw_current_inps = true;
488 rtlpriv->cfg->ops->set_hw_reg(hw,
489 HW_VAR_FW_PSMODE_STATUS,
490 (u8 *)(&fw_current_inps));
491 rtlpriv->cfg->ops->set_hw_reg(hw,
492 HW_VAR_H2C_FW_PWRMODE,
493 (u8 *)(&ppsc->fwctrl_psmode));
494
495 rtlpriv->cfg->ops->set_hw_reg(hw,
496 HW_VAR_SET_RPWM,
497 (u8 *)(&rpwm_val));
498 } else {
499 rpwm_val = 0x0C; /* RF on */
500 fw_pwrmode = FW_PS_ACTIVE_MODE;
501 fw_current_inps = false;
502 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
503 (u8 *)(&rpwm_val));
504 rtlpriv->cfg->ops->set_hw_reg(hw,
505 HW_VAR_H2C_FW_PWRMODE,
506 (u8 *)(&fw_pwrmode));
507
508 rtlpriv->cfg->ops->set_hw_reg(hw,
509 HW_VAR_FW_PSMODE_STATUS,
510 (u8 *)(&fw_current_inps));
511 }
512 break; }
477 default: 513 default:
478 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 514 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
479 "switch case not processed\n"); 515 "switch case not processed\n");
@@ -1379,7 +1415,7 @@ static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1379 } 1415 }
1380 1416
1381 for (i = 0; i < 14; i++) { 1417 for (i = 0; i < 14; i++) {
1382 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1418 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1383 "RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = " 1419 "RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = "
1384 "[0x%x / 0x%x / 0x%x]\n", rf_path, i, 1420 "[0x%x / 0x%x / 0x%x]\n", rf_path, i,
1385 rtlefuse->txpwrlevel_cck[rf_path][i], 1421 rtlefuse->txpwrlevel_cck[rf_path][i],
@@ -1420,10 +1456,10 @@ static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1420 0xf0) >> 4); 1456 0xf0) >> 4);
1421 } 1457 }
1422 1458
1423 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1459 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1424 "RF-%d pwrgroup_ht20[%d] = 0x%x\n", rf_path, i, 1460 "RF-%d pwrgroup_ht20[%d] = 0x%x\n", rf_path, i,
1425 rtlefuse->pwrgroup_ht20[rf_path][i]); 1461 rtlefuse->pwrgroup_ht20[rf_path][i]);
1426 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1462 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1427 "RF-%d pwrgroup_ht40[%d] = 0x%x\n", rf_path, i, 1463 "RF-%d pwrgroup_ht40[%d] = 0x%x\n", rf_path, i,
1428 rtlefuse->pwrgroup_ht40[rf_path][i]); 1464 rtlefuse->pwrgroup_ht40[rf_path][i]);
1429 } 1465 }
@@ -1463,19 +1499,19 @@ static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1463 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7]; 1499 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7];
1464 1500
1465 for (i = 0; i < 14; i++) 1501 for (i = 0; i < 14; i++)
1466 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1502 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1467 "RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i, 1503 "RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i,
1468 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]); 1504 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]);
1469 for (i = 0; i < 14; i++) 1505 for (i = 0; i < 14; i++)
1470 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1506 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1471 "RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i, 1507 "RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i,
1472 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]); 1508 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]);
1473 for (i = 0; i < 14; i++) 1509 for (i = 0; i < 14; i++)
1474 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1510 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1475 "RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i, 1511 "RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i,
1476 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]); 1512 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]);
1477 for (i = 0; i < 14; i++) 1513 for (i = 0; i < 14; i++)
1478 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1514 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1479 "RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i, 1515 "RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i,
1480 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]); 1516 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]);
1481 1517
@@ -1483,14 +1519,14 @@ static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1483 rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7); 1519 rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7);
1484 else 1520 else
1485 rtlefuse->eeprom_regulatory = 0; 1521 rtlefuse->eeprom_regulatory = 0;
1486 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1522 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1487 "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory); 1523 "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
1488 1524
1489 if (!autoload_fail) 1525 if (!autoload_fail)
1490 rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A]; 1526 rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A];
1491 else 1527 else
1492 rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI; 1528 rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI;
1493 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1529 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1494 "TSSI_A = 0x%x, TSSI_B = 0x%x\n", 1530 "TSSI_A = 0x%x, TSSI_B = 0x%x\n",
1495 rtlefuse->eeprom_tssi[RF90_PATH_A], 1531 rtlefuse->eeprom_tssi[RF90_PATH_A],
1496 rtlefuse->eeprom_tssi[RF90_PATH_B]); 1532 rtlefuse->eeprom_tssi[RF90_PATH_B]);
@@ -1505,7 +1541,7 @@ static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1505 rtlefuse->apk_thermalmeterignore = true; 1541 rtlefuse->apk_thermalmeterignore = true;
1506 1542
1507 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter; 1543 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
1508 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1544 RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
1509 "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter); 1545 "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
1510} 1546}
1511 1547
@@ -1713,19 +1749,7 @@ static void _rtl8723ae_hal_customized_behavior(struct ieee80211_hw *hw)
1713 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); 1749 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1714 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1750 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1715 1751
1716 switch (rtlhal->oem_id) { 1752 pcipriv->ledctl.led_opendrain = true;
1717 case RT_CID_819x_HP:
1718 pcipriv->ledctl.led_opendrain = true;
1719 break;
1720 case RT_CID_819x_Lenovo:
1721 case RT_CID_DEFAULT:
1722 case RT_CID_TOSHIBA:
1723 case RT_CID_CCX:
1724 case RT_CID_819x_Acer:
1725 case RT_CID_WHQL:
1726 default:
1727 break;
1728 }
1729 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 1753 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1730 "RT Customized ID: 0x%02X\n", rtlhal->oem_id); 1754 "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
1731} 1755}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/rtlwifi/rtl8723ae/led.c
index 9c4e1d811187..061526fe6e2d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/led.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/led.c
@@ -54,8 +54,9 @@ void rtl8723ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
54 case LED_PIN_GPIO0: 54 case LED_PIN_GPIO0:
55 break; 55 break;
56 case LED_PIN_LED0: 56 case LED_PIN_LED0:
57 ledcfg &= ~BIT(6);
57 rtl_write_byte(rtlpriv, 58 rtl_write_byte(rtlpriv,
58 REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5) | BIT(6)); 59 REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5));
59 break; 60 break;
60 case LED_PIN_LED1: 61 case LED_PIN_LED1:
61 rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5)); 62 rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
@@ -84,16 +85,21 @@ void rtl8723ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
84 break; 85 break;
85 case LED_PIN_LED0: 86 case LED_PIN_LED0:
86 ledcfg &= 0xf0; 87 ledcfg &= 0xf0;
87 if (pcipriv->ledctl.led_opendrain) 88 if (pcipriv->ledctl.led_opendrain) {
89 ledcfg &= 0x90;
90 rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3)));
91 ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
92 ledcfg &= 0xFE;
93 rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg);
94 } else {
95 ledcfg &= ~BIT(6);
88 rtl_write_byte(rtlpriv, REG_LEDCFG2, 96 rtl_write_byte(rtlpriv, REG_LEDCFG2,
89 (ledcfg | BIT(1) | BIT(5) | BIT(6))); 97 (ledcfg | BIT(3) | BIT(5)));
90 else 98 }
91 rtl_write_byte(rtlpriv, REG_LEDCFG2,
92 (ledcfg | BIT(3) | BIT(5) | BIT(6)));
93 break; 99 break;
94 case LED_PIN_LED1: 100 case LED_PIN_LED1:
95 ledcfg &= 0x0f; 101 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1) & 0x10;
96 rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3))); 102 rtl_write_byte(rtlpriv, REG_LEDCFG1, (ledcfg | BIT(3)));
97 break; 103 break;
98 default: 104 default:
99 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 105 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index bb7cc90bafb2..e4c4cdc3eb67 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -305,7 +305,7 @@ static struct rtl_hal_cfg rtl8723ae_hal_cfg = {
305 305
306 .maps[RTL_IMR_TXFOVW] = PHIMR_TXFOVW, 306 .maps[RTL_IMR_TXFOVW] = PHIMR_TXFOVW,
307 .maps[RTL_IMR_PSTIMEOUT] = PHIMR_PSTIMEOUT, 307 .maps[RTL_IMR_PSTIMEOUT] = PHIMR_PSTIMEOUT,
308 .maps[RTL_IMR_BcnInt] = PHIMR_BCNDMAINT0, 308 .maps[RTL_IMR_BCNINT] = PHIMR_BCNDMAINT0,
309 .maps[RTL_IMR_RXFOVW] = PHIMR_RXFOVW, 309 .maps[RTL_IMR_RXFOVW] = PHIMR_RXFOVW,
310 .maps[RTL_IMR_RDU] = PHIMR_RDU, 310 .maps[RTL_IMR_RDU] = PHIMR_RDU,
311 .maps[RTL_IMR_ATIMEND] = PHIMR_ATIMEND_E, 311 .maps[RTL_IMR_ATIMEND] = PHIMR_ATIMEND_E,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index ac081297db50..c72758d8f4ed 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -304,11 +304,8 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
304 304
305 status->is_cck = RTL8723E_RX_HAL_IS_CCK_RATE(status->rate); 305 status->is_cck = RTL8723E_RX_HAL_IS_CCK_RATE(status->rate);
306 306
307 rx_status->freq = hw->conf.channel->center_freq; 307 rx_status->freq = hw->conf.chandef.chan->center_freq;
308 rx_status->band = hw->conf.channel->band; 308 rx_status->band = hw->conf.chandef.chan->band;
309
310 hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size
311 + status->rx_bufshift);
312 309
313 if (status->crc) 310 if (status->crc)
314 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 311 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -330,6 +327,13 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
330 * to decrypt it 327 * to decrypt it
331 */ 328 */
332 if (status->decrypted) { 329 if (status->decrypted) {
330 hdr = (struct ieee80211_hdr *)(skb->data +
331 status->rx_drvinfo_size + status->rx_bufshift);
332
333 if (!hdr) {
334 /* during testing, hdr could be NULL here */
335 return false;
336 }
333 if ((ieee80211_is_robust_mgmt_frame(hdr)) && 337 if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
334 (ieee80211_has_protected(hdr->frame_control))) 338 (ieee80211_has_protected(hdr->frame_control)))
335 rx_status->flag &= ~RX_FLAG_DECRYPTED; 339 rx_status->flag &= ~RX_FLAG_DECRYPTED;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 5847d6d0881e..76732b0cd221 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -224,10 +224,9 @@ static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data,
224 u8 *buffer; 224 u8 *buffer;
225 225
226 wvalue = (u16)(addr & 0x0000ffff); 226 wvalue = (u16)(addr & 0x0000ffff);
227 buffer = kmalloc(len, GFP_ATOMIC); 227 buffer = kmemdup(data, len, GFP_ATOMIC);
228 if (!buffer) 228 if (!buffer)
229 return; 229 return;
230 memcpy(buffer, data, len);
231 usb_control_msg(udev, pipe, request, reqtype, wvalue, 230 usb_control_msg(udev, pipe, request, reqtype, wvalue,
232 index, buffer, len, 50); 231 index, buffer, len, 50);
233 232
@@ -309,6 +308,8 @@ static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
309 return 0; 308 return 0;
310} 309}
311 310
311static void _rtl_rx_work(unsigned long param);
312
312static int _rtl_usb_init_rx(struct ieee80211_hw *hw) 313static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
313{ 314{
314 struct rtl_priv *rtlpriv = rtl_priv(hw); 315 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -325,6 +326,12 @@ static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
325 pr_info("rx_max_size %d, rx_urb_num %d, in_ep %d\n", 326 pr_info("rx_max_size %d, rx_urb_num %d, in_ep %d\n",
326 rtlusb->rx_max_size, rtlusb->rx_urb_num, rtlusb->in_ep); 327 rtlusb->rx_max_size, rtlusb->rx_urb_num, rtlusb->in_ep);
327 init_usb_anchor(&rtlusb->rx_submitted); 328 init_usb_anchor(&rtlusb->rx_submitted);
329 init_usb_anchor(&rtlusb->rx_cleanup_urbs);
330
331 skb_queue_head_init(&rtlusb->rx_queue);
332 rtlusb->rx_work_tasklet.func = _rtl_rx_work;
333 rtlusb->rx_work_tasklet.data = (unsigned long)rtlusb;
334
328 return 0; 335 return 0;
329} 336}
330 337
@@ -406,40 +413,30 @@ static void rtl_usb_init_sw(struct ieee80211_hw *hw)
406 rtlusb->disableHWSM = true; 413 rtlusb->disableHWSM = true;
407} 414}
408 415
409#define __RADIO_TAP_SIZE_RSV 32
410
411static void _rtl_rx_completed(struct urb *urb); 416static void _rtl_rx_completed(struct urb *urb);
412 417
413static struct sk_buff *_rtl_prep_rx_urb(struct ieee80211_hw *hw, 418static int _rtl_prep_rx_urb(struct ieee80211_hw *hw, struct rtl_usb *rtlusb,
414 struct rtl_usb *rtlusb, 419 struct urb *urb, gfp_t gfp_mask)
415 struct urb *urb,
416 gfp_t gfp_mask)
417{ 420{
418 struct sk_buff *skb;
419 struct rtl_priv *rtlpriv = rtl_priv(hw); 421 struct rtl_priv *rtlpriv = rtl_priv(hw);
422 void *buf;
420 423
421 skb = __dev_alloc_skb((rtlusb->rx_max_size + __RADIO_TAP_SIZE_RSV), 424 buf = usb_alloc_coherent(rtlusb->udev, rtlusb->rx_max_size, gfp_mask,
422 gfp_mask); 425 &urb->transfer_dma);
423 if (!skb) { 426 if (!buf) {
424 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, 427 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
425 "Failed to __dev_alloc_skb!!\n"); 428 "Failed to usb_alloc_coherent!!\n");
426 return ERR_PTR(-ENOMEM); 429 return -ENOMEM;
427 } 430 }
428 431
429 /* reserve some space for mac80211's radiotap */
430 skb_reserve(skb, __RADIO_TAP_SIZE_RSV);
431 usb_fill_bulk_urb(urb, rtlusb->udev, 432 usb_fill_bulk_urb(urb, rtlusb->udev,
432 usb_rcvbulkpipe(rtlusb->udev, rtlusb->in_ep), 433 usb_rcvbulkpipe(rtlusb->udev, rtlusb->in_ep),
433 skb->data, min(skb_tailroom(skb), 434 buf, rtlusb->rx_max_size, _rtl_rx_completed, rtlusb);
434 (int)rtlusb->rx_max_size), 435 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
435 _rtl_rx_completed, skb);
436 436
437 _rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep); 437 return 0;
438 return skb;
439} 438}
440 439
441#undef __RADIO_TAP_SIZE_RSV
442
443static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw, 440static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
444 struct sk_buff *skb) 441 struct sk_buff *skb)
445{ 442{
@@ -523,22 +520,14 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
523 if (unicast) 520 if (unicast)
524 rtlpriv->link_info.num_rx_inperiod++; 521 rtlpriv->link_info.num_rx_inperiod++;
525 } 522 }
526 if (likely(rtl_action_proc(hw, skb, false))) { 523
527 struct sk_buff *uskb = NULL; 524 /* static bcn for roaming */
528 u8 *pdata; 525 rtl_beacon_statistic(hw, skb);
529 526
530 uskb = dev_alloc_skb(skb->len + 128); 527 if (likely(rtl_action_proc(hw, skb, false)))
531 if (uskb) { /* drop packet on allocation failure */ 528 ieee80211_rx(hw, skb);
532 memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, 529 else
533 sizeof(rx_status));
534 pdata = (u8 *)skb_put(uskb, skb->len);
535 memcpy(pdata, skb->data, skb->len);
536 ieee80211_rx_irqsafe(hw, uskb);
537 }
538 dev_kfree_skb_any(skb);
539 } else {
540 dev_kfree_skb_any(skb); 530 dev_kfree_skb_any(skb);
541 }
542 } 531 }
543} 532}
544 533
@@ -555,15 +544,70 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
555 while (!skb_queue_empty(&rx_queue)) { 544 while (!skb_queue_empty(&rx_queue)) {
556 _skb = skb_dequeue(&rx_queue); 545 _skb = skb_dequeue(&rx_queue);
557 _rtl_usb_rx_process_agg(hw, _skb); 546 _rtl_usb_rx_process_agg(hw, _skb);
558 ieee80211_rx_irqsafe(hw, _skb); 547 ieee80211_rx(hw, _skb);
548 }
549}
550
551#define __RX_SKB_MAX_QUEUED 32
552
553static void _rtl_rx_work(unsigned long param)
554{
555 struct rtl_usb *rtlusb = (struct rtl_usb *)param;
556 struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
557 struct sk_buff *skb;
558
559 while ((skb = skb_dequeue(&rtlusb->rx_queue))) {
560 if (unlikely(IS_USB_STOP(rtlusb))) {
561 dev_kfree_skb_any(skb);
562 continue;
563 }
564
565 if (likely(!rtlusb->usb_rx_segregate_hdl)) {
566 _rtl_usb_rx_process_noagg(hw, skb);
567 } else {
568 /* TO DO */
569 _rtl_rx_pre_process(hw, skb);
570 pr_err("rx agg not supported\n");
571 }
572 }
573}
574
575static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
576 unsigned int len)
577{
578 unsigned int padding = 0;
579
580 /* make function no-op when possible */
581 if (NET_IP_ALIGN == 0 || len < sizeof(*hdr))
582 return 0;
583
584 /* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */
585 /* TODO: deduplicate common code, define helper function instead? */
586
587 if (ieee80211_is_data_qos(hdr->frame_control)) {
588 u8 *qc = ieee80211_get_qos_ctl(hdr);
589
590 padding ^= NET_IP_ALIGN;
591
592 /* Input might be invalid, avoid accessing memory outside
593 * the buffer.
594 */
595 if ((unsigned long)qc - (unsigned long)hdr < len &&
596 *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
597 padding ^= NET_IP_ALIGN;
559 } 598 }
599
600 if (ieee80211_has_a4(hdr->frame_control))
601 padding ^= NET_IP_ALIGN;
602
603 return padding;
560} 604}
561 605
606#define __RADIO_TAP_SIZE_RSV 32
607
562static void _rtl_rx_completed(struct urb *_urb) 608static void _rtl_rx_completed(struct urb *_urb)
563{ 609{
564 struct sk_buff *skb = (struct sk_buff *)_urb->context; 610 struct rtl_usb *rtlusb = (struct rtl_usb *)_urb->context;
565 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
566 struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
567 struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf); 611 struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
568 struct rtl_priv *rtlpriv = rtl_priv(hw); 612 struct rtl_priv *rtlpriv = rtl_priv(hw);
569 int err = 0; 613 int err = 0;
@@ -572,28 +616,50 @@ static void _rtl_rx_completed(struct urb *_urb)
572 goto free; 616 goto free;
573 617
574 if (likely(0 == _urb->status)) { 618 if (likely(0 == _urb->status)) {
575 /* If this code were moved to work queue, would CPU 619 unsigned int padding;
576 * utilization be improved? NOTE: We shall allocate another skb 620 struct sk_buff *skb;
577 * and reuse the original one. 621 unsigned int qlen;
578 */ 622 unsigned int size = _urb->actual_length;
579 skb_put(skb, _urb->actual_length); 623 struct ieee80211_hdr *hdr;
580 624
581 if (likely(!rtlusb->usb_rx_segregate_hdl)) { 625 if (size < RTL_RX_DESC_SIZE + sizeof(struct ieee80211_hdr)) {
582 struct sk_buff *_skb; 626 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
583 _rtl_usb_rx_process_noagg(hw, skb); 627 "Too short packet from bulk IN! (len: %d)\n",
584 _skb = _rtl_prep_rx_urb(hw, rtlusb, _urb, GFP_ATOMIC); 628 size);
585 if (IS_ERR(_skb)) { 629 goto resubmit;
586 err = PTR_ERR(_skb); 630 }
587 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, 631
588 "Can't allocate skb for bulk IN!\n"); 632 qlen = skb_queue_len(&rtlusb->rx_queue);
589 return; 633 if (qlen >= __RX_SKB_MAX_QUEUED) {
590 } 634 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
591 skb = _skb; 635 "Pending RX skbuff queue full! (qlen: %d)\n",
592 } else{ 636 qlen);
593 /* TO DO */ 637 goto resubmit;
594 _rtl_rx_pre_process(hw, skb);
595 pr_err("rx agg not supported\n");
596 } 638 }
639
640 hdr = (void *)(_urb->transfer_buffer + RTL_RX_DESC_SIZE);
641 padding = _rtl_rx_get_padding(hdr, size - RTL_RX_DESC_SIZE);
642
643 skb = dev_alloc_skb(size + __RADIO_TAP_SIZE_RSV + padding);
644 if (!skb) {
645 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
646 "Can't allocate skb for bulk IN!\n");
647 goto resubmit;
648 }
649
650 _rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep);
651
652 /* Make sure the payload data is 4 byte aligned. */
653 skb_reserve(skb, padding);
654
655 /* reserve some space for mac80211's radiotap */
656 skb_reserve(skb, __RADIO_TAP_SIZE_RSV);
657
658 memcpy(skb_put(skb, size), _urb->transfer_buffer, size);
659
660 skb_queue_tail(&rtlusb->rx_queue, skb);
661 tasklet_schedule(&rtlusb->rx_work_tasklet);
662
597 goto resubmit; 663 goto resubmit;
598 } 664 }
599 665
@@ -609,9 +675,6 @@ static void _rtl_rx_completed(struct urb *_urb)
609 } 675 }
610 676
611resubmit: 677resubmit:
612 skb_reset_tail_pointer(skb);
613 skb_trim(skb, 0);
614
615 usb_anchor_urb(_urb, &rtlusb->rx_submitted); 678 usb_anchor_urb(_urb, &rtlusb->rx_submitted);
616 err = usb_submit_urb(_urb, GFP_ATOMIC); 679 err = usb_submit_urb(_urb, GFP_ATOMIC);
617 if (unlikely(err)) { 680 if (unlikely(err)) {
@@ -621,13 +684,34 @@ resubmit:
621 return; 684 return;
622 685
623free: 686free:
624 dev_kfree_skb_irq(skb); 687 /* On some architectures, usb_free_coherent must not be called from
688 * hardirq context. Queue urb to cleanup list.
689 */
690 usb_anchor_urb(_urb, &rtlusb->rx_cleanup_urbs);
691}
692
693#undef __RADIO_TAP_SIZE_RSV
694
695static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw)
696{
697 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
698 struct urb *urb;
699
700 usb_kill_anchored_urbs(&rtlusb->rx_submitted);
701
702 tasklet_kill(&rtlusb->rx_work_tasklet);
703 skb_queue_purge(&rtlusb->rx_queue);
704
705 while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
706 usb_free_coherent(urb->dev, urb->transfer_buffer_length,
707 urb->transfer_buffer, urb->transfer_dma);
708 usb_free_urb(urb);
709 }
625} 710}
626 711
627static int _rtl_usb_receive(struct ieee80211_hw *hw) 712static int _rtl_usb_receive(struct ieee80211_hw *hw)
628{ 713{
629 struct urb *urb; 714 struct urb *urb;
630 struct sk_buff *skb;
631 int err; 715 int err;
632 int i; 716 int i;
633 struct rtl_priv *rtlpriv = rtl_priv(hw); 717 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -646,11 +730,10 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw)
646 goto err_out; 730 goto err_out;
647 } 731 }
648 732
649 skb = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL); 733 err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
650 if (IS_ERR(skb)) { 734 if (err < 0) {
651 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, 735 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
652 "Failed to prep_rx_urb!!\n"); 736 "Failed to prep_rx_urb!!\n");
653 err = PTR_ERR(skb);
654 usb_free_urb(urb); 737 usb_free_urb(urb);
655 goto err_out; 738 goto err_out;
656 } 739 }
@@ -665,6 +748,7 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw)
665 748
666err_out: 749err_out:
667 usb_kill_anchored_urbs(&rtlusb->rx_submitted); 750 usb_kill_anchored_urbs(&rtlusb->rx_submitted);
751 _rtl_usb_cleanup_rx(hw);
668 return err; 752 return err;
669} 753}
670 754
@@ -706,7 +790,7 @@ static void rtl_usb_cleanup(struct ieee80211_hw *hw)
706 SET_USB_STOP(rtlusb); 790 SET_USB_STOP(rtlusb);
707 791
708 /* clean up rx stuff. */ 792 /* clean up rx stuff. */
709 usb_kill_anchored_urbs(&rtlusb->rx_submitted); 793 _rtl_usb_cleanup_rx(hw);
710 794
711 /* clean up tx stuff */ 795 /* clean up tx stuff */
712 for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) { 796 for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h
index fb986f98d1df..685273ca9561 100644
--- a/drivers/net/wireless/rtlwifi/usb.h
+++ b/drivers/net/wireless/rtlwifi/usb.h
@@ -136,11 +136,14 @@ struct rtl_usb {
136 void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *); 136 void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *);
137 137
138 /* Rx */ 138 /* Rx */
139 u8 in_ep_nums ; 139 u8 in_ep_nums;
140 u32 in_ep; /* Bulk IN endpoint number */ 140 u32 in_ep; /* Bulk IN endpoint number */
141 u32 rx_max_size; /* Bulk IN max buffer size */ 141 u32 rx_max_size; /* Bulk IN max buffer size */
142 u32 rx_urb_num; /* How many Bulk INs are submitted to host. */ 142 u32 rx_urb_num; /* How many Bulk INs are submitted to host. */
143 struct usb_anchor rx_submitted; 143 struct usb_anchor rx_submitted;
144 struct usb_anchor rx_cleanup_urbs;
145 struct tasklet_struct rx_work_tasklet;
146 struct sk_buff_head rx_queue;
144 void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *, 147 void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *,
145 struct sk_buff_head *); 148 struct sk_buff_head *);
146 void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *); 149 void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index f13258a8d995..44328baa6389 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -99,11 +99,36 @@
99#define CHANNEL_GROUP_MAX_5G 9 99#define CHANNEL_GROUP_MAX_5G 9
100#define CHANNEL_MAX_NUMBER_2G 14 100#define CHANNEL_MAX_NUMBER_2G 14
101#define AVG_THERMAL_NUM 8 101#define AVG_THERMAL_NUM 8
102#define AVG_THERMAL_NUM_88E 4
102#define MAX_TID_COUNT 9 103#define MAX_TID_COUNT 9
103 104
104/* for early mode */ 105/* for early mode */
105#define FCS_LEN 4 106#define FCS_LEN 4
106#define EM_HDR_LEN 8 107#define EM_HDR_LEN 8
108
109#define MAX_TX_COUNT 4
110#define MAX_RF_PATH 4
111#define MAX_CHNL_GROUP_24G 6
112#define MAX_CHNL_GROUP_5G 14
113
114struct txpower_info_2g {
115 u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
116 u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
117 /*If only one tx, only BW20 and OFDM are used.*/
118 u8 cck_diff[MAX_RF_PATH][MAX_TX_COUNT];
119 u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
120 u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
121 u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
122};
123
124struct txpower_info_5g {
125 u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_5G];
126 /*If only one tx, only BW20, OFDM, BW80 and BW160 are used.*/
127 u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
128 u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
129 u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
130};
131
107enum intf_type { 132enum intf_type {
108 INTF_PCI = 0, 133 INTF_PCI = 0,
109 INTF_USB = 1, 134 INTF_USB = 1,
@@ -137,6 +162,7 @@ enum hardware_type {
137 HARDWARE_TYPE_RTL8192DU, 162 HARDWARE_TYPE_RTL8192DU,
138 HARDWARE_TYPE_RTL8723AE, 163 HARDWARE_TYPE_RTL8723AE,
139 HARDWARE_TYPE_RTL8723U, 164 HARDWARE_TYPE_RTL8723U,
165 HARDWARE_TYPE_RTL8188EE,
140 166
141 /* keep it last */ 167 /* keep it last */
142 HARDWARE_TYPE_NUM 168 HARDWARE_TYPE_NUM
@@ -263,7 +289,7 @@ enum hw_variables {
263 HW_VAR_RATR_0, 289 HW_VAR_RATR_0,
264 HW_VAR_RRSR, 290 HW_VAR_RRSR,
265 HW_VAR_CPU_RST, 291 HW_VAR_CPU_RST,
266 HW_VAR_CECHK_BSSID, 292 HW_VAR_CHECK_BSSID,
267 HW_VAR_LBK_MODE, 293 HW_VAR_LBK_MODE,
268 HW_VAR_AES_11N_FIX, 294 HW_VAR_AES_11N_FIX,
269 HW_VAR_USB_RX_AGGR, 295 HW_VAR_USB_RX_AGGR,
@@ -278,7 +304,10 @@ enum hw_variables {
278 HW_VAR_SET_RPWM, 304 HW_VAR_SET_RPWM,
279 HW_VAR_H2C_FW_PWRMODE, 305 HW_VAR_H2C_FW_PWRMODE,
280 HW_VAR_H2C_FW_JOINBSSRPT, 306 HW_VAR_H2C_FW_JOINBSSRPT,
307 HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
281 HW_VAR_FW_PSMODE_STATUS, 308 HW_VAR_FW_PSMODE_STATUS,
309 HW_VAR_RESUME_CLK_ON,
310 HW_VAR_FW_LPS_ACTION,
282 HW_VAR_1X1_RECV_COMBINE, 311 HW_VAR_1X1_RECV_COMBINE,
283 HW_VAR_STOP_SEND_BEACON, 312 HW_VAR_STOP_SEND_BEACON,
284 HW_VAR_TSF_TIMER, 313 HW_VAR_TSF_TIMER,
@@ -305,6 +334,7 @@ enum hw_variables {
305 HW_VAR_INT_AC, 334 HW_VAR_INT_AC,
306 HW_VAR_RF_TIMING, 335 HW_VAR_RF_TIMING,
307 336
337 HAL_DEF_WOWLAN,
308 HW_VAR_MRC, 338 HW_VAR_MRC,
309 339
310 HW_VAR_MGT_FILTER, 340 HW_VAR_MGT_FILTER,
@@ -461,6 +491,7 @@ enum rtl_var_map {
461 EFUSE_MAX_SECTION_MAP, 491 EFUSE_MAX_SECTION_MAP,
462 EFUSE_REAL_CONTENT_SIZE, 492 EFUSE_REAL_CONTENT_SIZE,
463 EFUSE_OOB_PROTECT_BYTES_LEN, 493 EFUSE_OOB_PROTECT_BYTES_LEN,
494 EFUSE_ACCESS,
464 495
465 /*CAM map */ 496 /*CAM map */
466 RWCAM, 497 RWCAM,
@@ -493,7 +524,7 @@ enum rtl_var_map {
493 RTL_IMR_TIMEOUT1, /*Timeout interrupt 1 */ 524 RTL_IMR_TIMEOUT1, /*Timeout interrupt 1 */
494 RTL_IMR_TXFOVW, /*Transmit FIFO Overflow */ 525 RTL_IMR_TXFOVW, /*Transmit FIFO Overflow */
495 RTL_IMR_PSTIMEOUT, /*Power save time out interrupt */ 526 RTL_IMR_PSTIMEOUT, /*Power save time out interrupt */
496 RTL_IMR_BcnInt, /*Beacon DMA Interrupt 0 */ 527 RTL_IMR_BCNINT, /*Beacon DMA Interrupt 0 */
497 RTL_IMR_RXFOVW, /*Receive FIFO Overflow */ 528 RTL_IMR_RXFOVW, /*Receive FIFO Overflow */
498 RTL_IMR_RDU, /*Receive Descriptor Unavailable */ 529 RTL_IMR_RDU, /*Receive Descriptor Unavailable */
499 RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */ 530 RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */
@@ -508,7 +539,7 @@ enum rtl_var_map {
508 RTL_IMR_VIDOK, /*AC_VI DMA OK Interrupt */ 539 RTL_IMR_VIDOK, /*AC_VI DMA OK Interrupt */
509 RTL_IMR_VODOK, /*AC_VO DMA Interrupt */ 540 RTL_IMR_VODOK, /*AC_VO DMA Interrupt */
510 RTL_IMR_ROK, /*Receive DMA OK Interrupt */ 541 RTL_IMR_ROK, /*Receive DMA OK Interrupt */
511 RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt | RTL_IMR_TBDOK | 542 RTL_IBSS_INT_MASKS, /*(RTL_IMR_BCNINT | RTL_IMR_TBDOK |
512 * RTL_IMR_TBDER) */ 543 * RTL_IMR_TBDER) */
513 RTL_IMR_C2HCMD, /*fw interrupt*/ 544 RTL_IMR_C2HCMD, /*fw interrupt*/
514 545
@@ -742,6 +773,11 @@ struct false_alarm_statistics {
742 u32 cnt_ofdm_fail; 773 u32 cnt_ofdm_fail;
743 u32 cnt_cck_fail; 774 u32 cnt_cck_fail;
744 u32 cnt_all; 775 u32 cnt_all;
776 u32 cnt_ofdm_cca;
777 u32 cnt_cck_cca;
778 u32 cnt_cca_all;
779 u32 cnt_bw_usc;
780 u32 cnt_bw_lsc;
745}; 781};
746 782
747struct init_gain { 783struct init_gain {
@@ -826,8 +862,67 @@ struct rtl_rfkill {
826 bool rfkill_state; /*0 is off, 1 is on */ 862 bool rfkill_state; /*0 is off, 1 is on */
827}; 863};
828 864
865/*for P2P PS**/
866#define P2P_MAX_NOA_NUM 2
867
868enum p2p_role {
869 P2P_ROLE_DISABLE = 0,
870 P2P_ROLE_DEVICE = 1,
871 P2P_ROLE_CLIENT = 2,
872 P2P_ROLE_GO = 3
873};
874
875enum p2p_ps_state {
876 P2P_PS_DISABLE = 0,
877 P2P_PS_ENABLE = 1,
878 P2P_PS_SCAN = 2,
879 P2P_PS_SCAN_DONE = 3,
880 P2P_PS_ALLSTASLEEP = 4, /* for P2P GO */
881};
882
883enum p2p_ps_mode {
884 P2P_PS_NONE = 0,
885 P2P_PS_CTWINDOW = 1,
886 P2P_PS_NOA = 2,
887 P2P_PS_MIX = 3, /* CTWindow and NoA */
888};
889
890struct rtl_p2p_ps_info {
891 enum p2p_ps_mode p2p_ps_mode; /* indicate p2p ps mode */
892 enum p2p_ps_state p2p_ps_state; /* indicate p2p ps state */
893 u8 noa_index; /* Identifies instance of Notice of Absence timing. */
894 /* Client traffic window. A period of time in TU after TBTT. */
895 u8 ctwindow;
896 u8 opp_ps; /* opportunistic power save. */
897 u8 noa_num; /* number of NoA descriptor in P2P IE. */
898 /* Count for owner, Type of client. */
899 u8 noa_count_type[P2P_MAX_NOA_NUM];
900 /* Max duration for owner, preferred or min acceptable duration
901 * for client.
902 */
903 u32 noa_duration[P2P_MAX_NOA_NUM];
904 /* Length of interval for owner, preferred or max acceptable intervali
905 * of client.
906 */
907 u32 noa_interval[P2P_MAX_NOA_NUM];
908 /* schedule in terms of the lower 4 bytes of the TSF timer. */
909 u32 noa_start_time[P2P_MAX_NOA_NUM];
910};
911
912struct p2p_ps_offload_t {
913 u8 offload_en:1;
914 u8 role:1; /* 1: Owner, 0: Client */
915 u8 ctwindow_en:1;
916 u8 noa0_en:1;
917 u8 noa1_en:1;
918 u8 allstasleep:1;
919 u8 discovery:1;
920 u8 reserved:1;
921};
922
829#define IQK_MATRIX_REG_NUM 8 923#define IQK_MATRIX_REG_NUM 8
830#define IQK_MATRIX_SETTINGS_NUM (1 + 24 + 21) 924#define IQK_MATRIX_SETTINGS_NUM (1 + 24 + 21)
925
831struct iqk_matrix_regs { 926struct iqk_matrix_regs {
832 bool iqk_done; 927 bool iqk_done;
833 long value[1][IQK_MATRIX_REG_NUM]; 928 long value[1][IQK_MATRIX_REG_NUM];
@@ -889,7 +984,7 @@ struct rtl_phy {
889 984
890 /* Dual mac */ 985 /* Dual mac */
891 bool need_iqk; 986 bool need_iqk;
892 struct iqk_matrix_regs iqk_matrix_regsetting[IQK_MATRIX_SETTINGS_NUM]; 987 struct iqk_matrix_regs iqk_matrix[IQK_MATRIX_SETTINGS_NUM];
893 988
894 bool rfpi_enable; 989 bool rfpi_enable;
895 990
@@ -902,6 +997,8 @@ struct rtl_phy {
902 /* the current Tx power level */ 997 /* the current Tx power level */
903 u8 cur_cck_txpwridx; 998 u8 cur_cck_txpwridx;
904 u8 cur_ofdm24g_txpwridx; 999 u8 cur_ofdm24g_txpwridx;
1000 u8 cur_bw20_txpwridx;
1001 u8 cur_bw40_txpwridx;
905 1002
906 u32 rfreg_chnlval[2]; 1003 u32 rfreg_chnlval[2];
907 bool apk_done; 1004 bool apk_done;
@@ -940,20 +1037,21 @@ struct rtl_ht_agg {
940 u8 rx_agg_state; 1037 u8 rx_agg_state;
941}; 1038};
942 1039
1040struct rssi_sta {
1041 long undec_sm_pwdb;
1042};
1043
943struct rtl_tid_data { 1044struct rtl_tid_data {
944 u16 seq_number; 1045 u16 seq_number;
945 struct rtl_ht_agg agg; 1046 struct rtl_ht_agg agg;
946}; 1047};
947 1048
948struct rssi_sta {
949 long undec_sm_pwdb;
950};
951
952struct rtl_sta_info { 1049struct rtl_sta_info {
953 struct list_head list; 1050 struct list_head list;
954 u8 ratr_index; 1051 u8 ratr_index;
955 u8 wireless_mode; 1052 u8 wireless_mode;
956 u8 mimo_ps; 1053 u8 mimo_ps;
1054 u8 mac_addr[ETH_ALEN];
957 struct rtl_tid_data tids[MAX_TID_COUNT]; 1055 struct rtl_tid_data tids[MAX_TID_COUNT];
958 1056
959 /* just used for ap adhoc or mesh*/ 1057 /* just used for ap adhoc or mesh*/
@@ -1005,6 +1103,8 @@ struct rtl_mac {
1005 int n_bitrates; 1103 int n_bitrates;
1006 1104
1007 bool offchan_delay; 1105 bool offchan_delay;
1106 u8 p2p; /*using p2p role*/
1107 bool p2p_in_use;
1008 1108
1009 /*filters */ 1109 /*filters */
1010 u32 rx_conf; 1110 u32 rx_conf;
@@ -1014,11 +1114,11 @@ struct rtl_mac {
1014 1114
1015 bool act_scanning; 1115 bool act_scanning;
1016 u8 cnt_after_linked; 1116 u8 cnt_after_linked;
1117 bool skip_scan;
1017 1118
1018 /* early mode */ 1119 /* early mode */
1019 /* skb wait queue */ 1120 /* skb wait queue */
1020 struct sk_buff_head skb_waitq[MAX_TID_COUNT]; 1121 struct sk_buff_head skb_waitq[MAX_TID_COUNT];
1021 u8 earlymode_threshold;
1022 1122
1023 /*RDG*/ 1123 /*RDG*/
1024 bool rdg_en; 1124 bool rdg_en;
@@ -1042,6 +1142,7 @@ struct rtl_mac {
1042 u8 retry_short; 1142 u8 retry_short;
1043 u8 retry_long; 1143 u8 retry_long;
1044 u16 assoc_id; 1144 u16 assoc_id;
1145 bool hiddenssid;
1045 1146
1046 /*IBSS*/ 1147 /*IBSS*/
1047 int beacon_interval; 1148 int beacon_interval;
@@ -1111,10 +1212,13 @@ struct bt_coexist_8723 {
1111 1212
1112struct rtl_hal { 1213struct rtl_hal {
1113 struct ieee80211_hw *hw; 1214 struct ieee80211_hw *hw;
1114 struct bt_coexist_8723 hal_coex_8723; 1215 bool driver_is_goingto_unload;
1115 bool up_first_time; 1216 bool up_first_time;
1217 bool first_init;
1116 bool being_init_adapter; 1218 bool being_init_adapter;
1117 bool bbrf_ready; 1219 bool bbrf_ready;
1220 bool mac_func_enable;
1221 struct bt_coexist_8723 hal_coex_8723;
1118 1222
1119 enum intf_type interface; 1223 enum intf_type interface;
1120 u16 hw_type; /*92c or 92d or 92s and so on */ 1224 u16 hw_type; /*92c or 92d or 92s and so on */
@@ -1122,6 +1226,7 @@ struct rtl_hal {
1122 u8 oem_id; 1226 u8 oem_id;
1123 u32 version; /*version of chip */ 1227 u32 version; /*version of chip */
1124 u8 state; /*stop 0, start 1 */ 1228 u8 state; /*stop 0, start 1 */
1229 u8 board_type;
1125 1230
1126 /*firmware */ 1231 /*firmware */
1127 u32 fwsize; 1232 u32 fwsize;
@@ -1141,6 +1246,10 @@ struct rtl_hal {
1141 bool set_fwcmd_inprogress; 1246 bool set_fwcmd_inprogress;
1142 u8 current_fwcmd_io; 1247 u8 current_fwcmd_io;
1143 1248
1249 struct p2p_ps_offload_t p2p_ps_offload;
1250 bool fw_clk_change_in_progress;
1251 bool allow_sw_to_change_hwclc;
1252 u8 fw_ps_state;
1144 /**/ 1253 /**/
1145 bool driver_going2unload; 1254 bool driver_going2unload;
1146 1255
@@ -1157,6 +1266,7 @@ struct rtl_hal {
1157 /* just for DualMac S3S4 */ 1266 /* just for DualMac S3S4 */
1158 u8 macphyctl_reg; 1267 u8 macphyctl_reg;
1159 bool earlymode_enable; 1268 bool earlymode_enable;
1269 u8 max_earlymode_num;
1160 /* Dual mac*/ 1270 /* Dual mac*/
1161 bool during_mac0init_radiob; 1271 bool during_mac0init_radiob;
1162 bool during_mac1init_radioa; 1272 bool during_mac1init_radioa;
@@ -1193,6 +1303,29 @@ struct rtl_security {
1193 u8 *pairwise_key; 1303 u8 *pairwise_key;
1194}; 1304};
1195 1305
1306#define ASSOCIATE_ENTRY_NUM 33
1307
1308struct fast_ant_training {
1309 u8 bssid[6];
1310 u8 antsel_rx_keep_0;
1311 u8 antsel_rx_keep_1;
1312 u8 antsel_rx_keep_2;
1313 u32 ant_sum[7];
1314 u32 ant_cnt[7];
1315 u32 ant_ave[7];
1316 u8 fat_state;
1317 u32 train_idx;
1318 u8 antsel_a[ASSOCIATE_ENTRY_NUM];
1319 u8 antsel_b[ASSOCIATE_ENTRY_NUM];
1320 u8 antsel_c[ASSOCIATE_ENTRY_NUM];
1321 u32 main_ant_sum[ASSOCIATE_ENTRY_NUM];
1322 u32 aux_ant_sum[ASSOCIATE_ENTRY_NUM];
1323 u32 main_ant_cnt[ASSOCIATE_ENTRY_NUM];
1324 u32 aux_ant_cnt[ASSOCIATE_ENTRY_NUM];
1325 u8 rx_idle_ant;
1326 bool becomelinked;
1327};
1328
1196struct rtl_dm { 1329struct rtl_dm {
1197 /*PHY status for Dynamic Management */ 1330 /*PHY status for Dynamic Management */
1198 long entry_min_undec_sm_pwdb; 1331 long entry_min_undec_sm_pwdb;
@@ -1229,9 +1362,24 @@ struct rtl_dm {
1229 bool disable_tx_int; 1362 bool disable_tx_int;
1230 char ofdm_index[2]; 1363 char ofdm_index[2];
1231 char cck_index; 1364 char cck_index;
1365 char delta_power_index;
1366 char delta_power_index_last;
1367 char power_index_offset;
1368
1369 /*88e tx power tracking*/
1370 u8 swing_idx_ofdm[2];
1371 u8 swing_idx_ofdm_cur;
1372 u8 swing_idx_ofdm_base;
1373 bool swing_flag_ofdm;
1374 u8 swing_idx_cck;
1375 u8 swing_idx_cck_cur;
1376 u8 swing_idx_cck_base;
1377 bool swing_flag_cck;
1232 1378
1233 /* DMSP */ 1379 /* DMSP */
1234 bool supp_phymode_switch; 1380 bool supp_phymode_switch;
1381
1382 struct fast_ant_training fat_table;
1235}; 1383};
1236 1384
1237#define EFUSE_MAX_LOGICAL_SIZE 256 1385#define EFUSE_MAX_LOGICAL_SIZE 256
@@ -1264,6 +1412,9 @@ struct rtl_efuse {
1264 u8 external_pa; 1412 u8 external_pa;
1265 1413
1266 u8 dev_addr[6]; 1414 u8 dev_addr[6];
1415 u8 wowlan_enable;
1416 u8 antenna_div_cfg;
1417 u8 antenna_div_type;
1267 1418
1268 bool txpwr_fromeprom; 1419 bool txpwr_fromeprom;
1269 u8 eeprom_crystalcap; 1420 u8 eeprom_crystalcap;
@@ -1319,14 +1470,12 @@ struct rtl_ps_ctl {
1319 bool rfchange_inprogress; 1470 bool rfchange_inprogress;
1320 bool swrf_processing; 1471 bool swrf_processing;
1321 bool hwradiooff; 1472 bool hwradiooff;
1322
1323 /* 1473 /*
1324 * just for PCIE ASPM 1474 * just for PCIE ASPM
1325 * If it supports ASPM, Offset[560h] = 0x40, 1475 * If it supports ASPM, Offset[560h] = 0x40,
1326 * otherwise Offset[560h] = 0x00. 1476 * otherwise Offset[560h] = 0x00.
1327 * */ 1477 * */
1328 bool support_aspm; 1478 bool support_aspm;
1329
1330 bool support_backdoor; 1479 bool support_backdoor;
1331 1480
1332 /*for LPS */ 1481 /*for LPS */
@@ -1341,6 +1490,7 @@ struct rtl_ps_ctl {
1341 bool fw_current_inpsmode; 1490 bool fw_current_inpsmode;
1342 u8 reg_max_lps_awakeintvl; 1491 u8 reg_max_lps_awakeintvl;
1343 bool report_linked; 1492 bool report_linked;
1493 bool low_power_enable;/*for 32k*/
1344 1494
1345 /*for IPS */ 1495 /*for IPS */
1346 bool inactiveps; 1496 bool inactiveps;
@@ -1373,6 +1523,11 @@ struct rtl_ps_ctl {
1373 unsigned long last_beacon; 1523 unsigned long last_beacon;
1374 unsigned long last_action; 1524 unsigned long last_action;
1375 unsigned long last_slept; 1525 unsigned long last_slept;
1526
1527 /*For P2P PS */
1528 struct rtl_p2p_ps_info p2p_ps_info;
1529 u8 pwr_mode;
1530 u8 smart_ps;
1376}; 1531};
1377 1532
1378struct rtl_stats { 1533struct rtl_stats {
@@ -1381,7 +1536,7 @@ struct rtl_stats {
1381 s8 rssi; 1536 s8 rssi;
1382 u8 signal; 1537 u8 signal;
1383 u8 noise; 1538 u8 noise;
1384 u16 rate; /*in 100 kbps */ 1539 u8 rate; /* hw desc rate */
1385 u8 received_channel; 1540 u8 received_channel;
1386 u8 control; 1541 u8 control;
1387 u8 mask; 1542 u8 mask;
@@ -1423,8 +1578,16 @@ struct rtl_stats {
1423 bool packet_toself; 1578 bool packet_toself;
1424 bool packet_beacon; /*for rssi */ 1579 bool packet_beacon; /*for rssi */
1425 char cck_adc_pwdb[4]; /*for rx path selection */ 1580 char cck_adc_pwdb[4]; /*for rx path selection */
1581
1582 u8 packet_report_type;
1583
1584 u32 macid;
1585 u8 wake_match;
1586 u32 bt_rx_rssi_percentage;
1587 u32 macid_valid_entry[2];
1426}; 1588};
1427 1589
1590
1428struct rt_link_detect { 1591struct rt_link_detect {
1429 /* count for roaming */ 1592 /* count for roaming */
1430 u32 bcn_rx_inperiod; 1593 u32 bcn_rx_inperiod;
@@ -1477,7 +1640,8 @@ struct rtl_tcb_desc {
1477 /* early mode */ 1640 /* early mode */
1478 u8 empkt_num; 1641 u8 empkt_num;
1479 /* The max value by HW */ 1642 /* The max value by HW */
1480 u32 empkt_len[5]; 1643 u32 empkt_len[10];
1644 bool btx_enable_sw_calc_duration;
1481}; 1645};
1482 1646
1483struct rtl_hal_ops { 1647struct rtl_hal_ops {
@@ -1553,7 +1717,7 @@ struct rtl_hal_ops {
1553 void (*allow_all_destaddr)(struct ieee80211_hw *hw, 1717 void (*allow_all_destaddr)(struct ieee80211_hw *hw,
1554 bool allow_all_da, bool write_into_reg); 1718 bool allow_all_da, bool write_into_reg);
1555 void (*linked_set_reg) (struct ieee80211_hw *hw); 1719 void (*linked_set_reg) (struct ieee80211_hw *hw);
1556 void (*check_switch_to_dmdp) (struct ieee80211_hw *hw); 1720 void (*chk_switch_dmdp) (struct ieee80211_hw *hw);
1557 void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw); 1721 void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw);
1558 void (*dualmac_switch_to_dmdp) (struct ieee80211_hw *hw); 1722 void (*dualmac_switch_to_dmdp) (struct ieee80211_hw *hw);
1559 bool (*phy_rf6052_config) (struct ieee80211_hw *hw); 1723 bool (*phy_rf6052_config) (struct ieee80211_hw *hw);
@@ -1662,6 +1826,8 @@ struct rtl_locks {
1662 /*spin lock */ 1826 /*spin lock */
1663 spinlock_t ips_lock; 1827 spinlock_t ips_lock;
1664 spinlock_t irq_th_lock; 1828 spinlock_t irq_th_lock;
1829 spinlock_t irq_pci_lock;
1830 spinlock_t tx_lock;
1665 spinlock_t h2c_lock; 1831 spinlock_t h2c_lock;
1666 spinlock_t rf_ps_lock; 1832 spinlock_t rf_ps_lock;
1667 spinlock_t rf_lock; 1833 spinlock_t rf_lock;
@@ -1670,6 +1836,9 @@ struct rtl_locks {
1670 spinlock_t entry_list_lock; 1836 spinlock_t entry_list_lock;
1671 spinlock_t usb_lock; 1837 spinlock_t usb_lock;
1672 1838
1839 /*FW clock change */
1840 spinlock_t fw_ps_lock;
1841
1673 /*Dual mac*/ 1842 /*Dual mac*/
1674 spinlock_t cck_and_rw_pagea_lock; 1843 spinlock_t cck_and_rw_pagea_lock;
1675 1844
@@ -1683,7 +1852,8 @@ struct rtl_works {
1683 /*timer */ 1852 /*timer */
1684 struct timer_list watchdog_timer; 1853 struct timer_list watchdog_timer;
1685 struct timer_list dualmac_easyconcurrent_retrytimer; 1854 struct timer_list dualmac_easyconcurrent_retrytimer;
1686 1855 struct timer_list fw_clockoff_timer;
1856 struct timer_list fast_antenna_training_timer;
1687 /*task */ 1857 /*task */
1688 struct tasklet_struct irq_tasklet; 1858 struct tasklet_struct irq_tasklet;
1689 struct tasklet_struct irq_prepare_bcn_tasklet; 1859 struct tasklet_struct irq_prepare_bcn_tasklet;
@@ -1696,8 +1866,9 @@ struct rtl_works {
1696 /* For SW LPS */ 1866 /* For SW LPS */
1697 struct delayed_work ps_work; 1867 struct delayed_work ps_work;
1698 struct delayed_work ps_rfon_wq; 1868 struct delayed_work ps_rfon_wq;
1869 struct delayed_work fwevt_wq;
1699 1870
1700 struct work_struct lps_leave_work; 1871 struct work_struct lps_change_work;
1701}; 1872};
1702 1873
1703struct rtl_debug { 1874struct rtl_debug {
@@ -1767,10 +1938,12 @@ struct dig_t {
1767 char back_val; 1938 char back_val;
1768 char back_range_max; 1939 char back_range_max;
1769 char back_range_min; 1940 char back_range_min;
1770 u8 rx_gain_range_max; 1941 u8 rx_gain_max;
1771 u8 rx_gain_range_min; 1942 u8 rx_gain_min;
1772 u8 min_undec_pwdb_for_dm; 1943 u8 min_undec_pwdb_for_dm;
1773 u8 rssi_val_min; 1944 u8 rssi_val_min;
1945 u8 pre_cck_cca_thres;
1946 u8 cur_cck_cca_thres;
1774 u8 pre_cck_pd_state; 1947 u8 pre_cck_pd_state;
1775 u8 cur_cck_pd_state; 1948 u8 cur_cck_pd_state;
1776 u8 pre_cck_fa_state; 1949 u8 pre_cck_fa_state;
@@ -1792,6 +1965,13 @@ struct dig_t {
1792 u8 backoff_enable_flag; 1965 u8 backoff_enable_flag;
1793 char backoffval_range_max; 1966 char backoffval_range_max;
1794 char backoffval_range_min; 1967 char backoffval_range_min;
1968 u8 dig_min_0;
1969 u8 dig_min_1;
1970 bool media_connect_0;
1971 bool media_connect_1;
1972
1973 u32 antdiv_rssi_max;
1974 u32 rssi_max;
1795}; 1975};
1796 1976
1797struct rtl_global_var { 1977struct rtl_global_var {
@@ -1802,6 +1982,7 @@ struct rtl_global_var {
1802}; 1982};
1803 1983
1804struct rtl_priv { 1984struct rtl_priv {
1985 struct ieee80211_hw *hw;
1805 struct completion firmware_loading_complete; 1986 struct completion firmware_loading_complete;
1806 struct list_head list; 1987 struct list_head list;
1807 struct rtl_priv *buddy_priv; 1988 struct rtl_priv *buddy_priv;
@@ -1866,6 +2047,7 @@ struct rtl_priv {
1866 bool bt_operation_on; 2047 bool bt_operation_on;
1867 }; 2048 };
1868 }; 2049 };
2050 bool enter_ps; /* true when entering PS */
1869 2051
1870 /*This must be the last item so 2052 /*This must be the last item so
1871 that it points to the data allocated 2053 that it points to the data allocated
@@ -2127,9 +2309,7 @@ value to host byte ordering.*/
2127#define WLAN_FC_GET_TYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) 2309#define WLAN_FC_GET_TYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE)
2128#define WLAN_FC_GET_STYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) 2310#define WLAN_FC_GET_STYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE)
2129#define WLAN_FC_MORE_DATA(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_MOREDATA) 2311#define WLAN_FC_MORE_DATA(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_MOREDATA)
2130#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) 2312#define rtl_dm(rtlpriv) (&((rtlpriv)->dm))
2131#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
2132#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
2133 2313
2134#define RT_RF_OFF_LEVL_ASPM BIT(0) /*PCI ASPM */ 2314#define RT_RF_OFF_LEVL_ASPM BIT(0) /*PCI ASPM */
2135#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /*PCI clock request */ 2315#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /*PCI clock request */
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index bbbf68cf50a7..3291ffa95273 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -572,7 +572,8 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
572 struct ieee80211_conf *conf = &hw->conf; 572 struct ieee80211_conf *conf = &hw->conf;
573 int channel, ret = 0; 573 int channel, ret = 0;
574 574
575 channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 575 channel = ieee80211_frequency_to_channel(
576 conf->chandef.chan->center_freq);
576 577
577 wl1251_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d", 578 wl1251_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d",
578 channel, 579 channel,
@@ -1223,7 +1224,7 @@ static int wl1251_op_get_survey(struct ieee80211_hw *hw, int idx,
1223 if (idx != 0) 1224 if (idx != 0)
1224 return -ENOENT; 1225 return -ENOENT;
1225 1226
1226 survey->channel = conf->channel; 1227 survey->channel = conf->chandef.chan;
1227 survey->filled = SURVEY_INFO_NOISE_DBM; 1228 survey->filled = SURVEY_INFO_NOISE_DBM;
1228 survey->noise = wl->noise; 1229 survey->noise = wl->noise;
1229 1230
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index e57ee48edff6..e2b3d9c541e8 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -186,8 +186,10 @@ static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
186 wl->set_power(true); 186 wl->set_power(true);
187 187
188 ret = pm_runtime_get_sync(&func->dev); 188 ret = pm_runtime_get_sync(&func->dev);
189 if (ret < 0) 189 if (ret < 0) {
190 pm_runtime_put_sync(&func->dev);
190 goto out; 191 goto out;
192 }
191 193
192 sdio_claim_host(func); 194 sdio_claim_host(func);
193 sdio_enable_func(func); 195 sdio_enable_func(func);
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 3b266d3231a3..4c67c2f9ea71 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -257,7 +257,7 @@ static int wl1251_spi_probe(struct spi_device *spi)
257 wl = hw->priv; 257 wl = hw->priv;
258 258
259 SET_IEEE80211_DEV(hw, &spi->dev); 259 SET_IEEE80211_DEV(hw, &spi->dev);
260 dev_set_drvdata(&spi->dev, wl); 260 spi_set_drvdata(spi, wl);
261 wl->if_priv = spi; 261 wl->if_priv = spi;
262 wl->if_ops = &wl1251_spi_ops; 262 wl->if_ops = &wl1251_spi_ops;
263 263
@@ -311,7 +311,7 @@ static int wl1251_spi_probe(struct spi_device *spi)
311 311
312static int wl1251_spi_remove(struct spi_device *spi) 312static int wl1251_spi_remove(struct spi_device *spi)
313{ 313{
314 struct wl1251 *wl = dev_get_drvdata(&spi->dev); 314 struct wl1251 *wl = spi_get_drvdata(spi);
315 315
316 free_irq(wl->irq, wl); 316 free_irq(wl->irq, wl);
317 wl1251_free_hw(wl); 317 wl1251_free_hw(wl);
diff --git a/drivers/net/wireless/ti/wl12xx/cmd.c b/drivers/net/wireless/ti/wl12xx/cmd.c
index 7dc9f965037d..7485dbae8c4b 100644
--- a/drivers/net/wireless/ti/wl12xx/cmd.c
+++ b/drivers/net/wireless/ti/wl12xx/cmd.c
@@ -301,7 +301,7 @@ int wl12xx_cmd_channel_switch(struct wl1271 *wl,
301 } 301 }
302 302
303 cmd->role_id = wlvif->role_id; 303 cmd->role_id = wlvif->role_id;
304 cmd->channel = ch_switch->channel->hw_value; 304 cmd->channel = ch_switch->chandef.chan->hw_value;
305 cmd->switch_time = ch_switch->count; 305 cmd->switch_time = ch_switch->count;
306 cmd->stop_tx = ch_switch->block_tx; 306 cmd->stop_tx = ch_switch->block_tx;
307 307
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 09694e39bb14..1c627da85083 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -723,6 +723,7 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
723 wl->sched_scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4; 723 wl->sched_scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
724 wl->sched_scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5; 724 wl->sched_scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
725 wl->max_channels_5 = WL12XX_MAX_CHANNELS_5GHZ; 725 wl->max_channels_5 = WL12XX_MAX_CHANNELS_5GHZ;
726 wl->ba_rx_session_count_max = WL12XX_RX_BA_MAX_SESSIONS;
726out: 727out:
727 return ret; 728 return ret;
728} 729}
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index d4552857480c..222d03540200 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -63,6 +63,8 @@
63 63
64#define WL12XX_NUM_MAC_ADDRESSES 2 64#define WL12XX_NUM_MAC_ADDRESSES 2
65 65
66#define WL12XX_RX_BA_MAX_SESSIONS 3
67
66struct wl127x_rx_mem_pool_addr { 68struct wl127x_rx_mem_pool_addr {
67 u32 addr; 69 u32 addr;
68 u32 addr_extra; 70 u32 addr_extra;
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.c b/drivers/net/wireless/ti/wl18xx/cmd.c
index 1d1f6cc7a50a..7649c75cd68d 100644
--- a/drivers/net/wireless/ti/wl18xx/cmd.c
+++ b/drivers/net/wireless/ti/wl18xx/cmd.c
@@ -42,11 +42,11 @@ int wl18xx_cmd_channel_switch(struct wl1271 *wl,
42 } 42 }
43 43
44 cmd->role_id = wlvif->role_id; 44 cmd->role_id = wlvif->role_id;
45 cmd->channel = ch_switch->channel->hw_value; 45 cmd->channel = ch_switch->chandef.chan->hw_value;
46 cmd->switch_time = ch_switch->count; 46 cmd->switch_time = ch_switch->count;
47 cmd->stop_tx = ch_switch->block_tx; 47 cmd->stop_tx = ch_switch->block_tx;
48 48
49 switch (ch_switch->channel->band) { 49 switch (ch_switch->chandef.chan->band) {
50 case IEEE80211_BAND_2GHZ: 50 case IEEE80211_BAND_2GHZ:
51 cmd->band = WLCORE_BAND_2_4GHZ; 51 cmd->band = WLCORE_BAND_2_4GHZ;
52 break; 52 break;
@@ -55,7 +55,7 @@ int wl18xx_cmd_channel_switch(struct wl1271 *wl,
55 break; 55 break;
56 default: 56 default:
57 wl1271_error("invalid channel switch band: %d", 57 wl1271_error("invalid channel switch band: %d",
58 ch_switch->channel->band); 58 ch_switch->chandef.chan->band);
59 ret = -EINVAL; 59 ret = -EINVAL;
60 goto out_free; 60 goto out_free;
61 } 61 }
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index da3ef1b10a9c..9fa692d11025 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -678,6 +678,7 @@ static int wl18xx_identify_chip(struct wl1271 *wl)
678 wl->sched_scan_templ_id_2_4 = CMD_TEMPL_PROBE_REQ_2_4_PERIODIC; 678 wl->sched_scan_templ_id_2_4 = CMD_TEMPL_PROBE_REQ_2_4_PERIODIC;
679 wl->sched_scan_templ_id_5 = CMD_TEMPL_PROBE_REQ_5_PERIODIC; 679 wl->sched_scan_templ_id_5 = CMD_TEMPL_PROBE_REQ_5_PERIODIC;
680 wl->max_channels_5 = WL18XX_MAX_CHANNELS_5GHZ; 680 wl->max_channels_5 = WL18XX_MAX_CHANNELS_5GHZ;
681 wl->ba_rx_session_count_max = WL18XX_RX_BA_MAX_SESSIONS;
681out: 682out:
682 return ret; 683 return ret;
683} 684}
@@ -1144,6 +1145,7 @@ static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
1144static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver) 1145static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
1145{ 1146{
1146 u32 fuse; 1147 u32 fuse;
1148 s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0;
1147 int ret; 1149 int ret;
1148 1150
1149 ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]); 1151 ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
@@ -1154,8 +1156,29 @@ static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
1154 if (ret < 0) 1156 if (ret < 0)
1155 goto out; 1157 goto out;
1156 1158
1159 pg_ver = (fuse & WL18XX_PG_VER_MASK) >> WL18XX_PG_VER_OFFSET;
1160 rom = (fuse & WL18XX_ROM_VER_MASK) >> WL18XX_ROM_VER_OFFSET;
1161
1162 if (rom <= 0xE)
1163 metal = (fuse & WL18XX_METAL_VER_MASK) >>
1164 WL18XX_METAL_VER_OFFSET;
1165 else
1166 metal = (fuse & WL18XX_NEW_METAL_VER_MASK) >>
1167 WL18XX_NEW_METAL_VER_OFFSET;
1168
1169 ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_2_3, &fuse);
1170 if (ret < 0)
1171 goto out;
1172
1173 rdl_ver = (fuse & WL18XX_RDL_VER_MASK) >> WL18XX_RDL_VER_OFFSET;
1174 if (rdl_ver > RDL_MAX)
1175 rdl_ver = RDL_NONE;
1176
1177 wl1271_info("wl18xx HW: RDL %d, %s, PG %x.%x (ROM %x)",
1178 rdl_ver, rdl_names[rdl_ver], pg_ver, metal, rom);
1179
1157 if (ver) 1180 if (ver)
1158 *ver = (fuse & WL18XX_PG_VER_MASK) >> WL18XX_PG_VER_OFFSET; 1181 *ver = pg_ver;
1159 1182
1160 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); 1183 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1161 1184
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
index 937b71d8783f..6306e04cd258 100644
--- a/drivers/net/wireless/ti/wl18xx/reg.h
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -131,6 +131,16 @@
131#define WL18XX_REG_FUSE_DATA_1_3 0xA0260C 131#define WL18XX_REG_FUSE_DATA_1_3 0xA0260C
132#define WL18XX_PG_VER_MASK 0x70 132#define WL18XX_PG_VER_MASK 0x70
133#define WL18XX_PG_VER_OFFSET 4 133#define WL18XX_PG_VER_OFFSET 4
134#define WL18XX_ROM_VER_MASK 0x3
135#define WL18XX_ROM_VER_OFFSET 0
136#define WL18XX_METAL_VER_MASK 0xC
137#define WL18XX_METAL_VER_OFFSET 2
138#define WL18XX_NEW_METAL_VER_MASK 0x180
139#define WL18XX_NEW_METAL_VER_OFFSET 7
140
141#define WL18XX_REG_FUSE_DATA_2_3 0xA02614
142#define WL18XX_RDL_VER_MASK 0x1f00
143#define WL18XX_RDL_VER_OFFSET 8
134 144
135#define WL18XX_REG_FUSE_BD_ADDR_1 0xA02602 145#define WL18XX_REG_FUSE_BD_ADDR_1 0xA02602
136#define WL18XX_REG_FUSE_BD_ADDR_2 0xA02606 146#define WL18XX_REG_FUSE_BD_ADDR_2 0xA02606
@@ -188,4 +198,23 @@ enum {
188 NUM_BOARD_TYPES, 198 NUM_BOARD_TYPES,
189}; 199};
190 200
201enum {
202 RDL_NONE = 0,
203 RDL_1_HP = 1,
204 RDL_2_SP = 2,
205 RDL_3_HP = 3,
206 RDL_4_SP = 4,
207
208 _RDL_LAST,
209 RDL_MAX = _RDL_LAST - 1,
210};
211
212static const char * const rdl_names[] = {
213 [RDL_NONE] = "",
214 [RDL_1_HP] = "1853 SISO",
215 [RDL_2_SP] = "1857 MIMO",
216 [RDL_3_HP] = "1893 SISO",
217 [RDL_4_SP] = "1897 MIMO",
218};
219
191#endif /* __REG_H__ */ 220#endif /* __REG_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index b6739e79efcf..9204e07ee432 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -29,7 +29,7 @@
29#define WL18XX_IFTYPE_VER 5 29#define WL18XX_IFTYPE_VER 5
30#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE 30#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE
31#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE 31#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE
32#define WL18XX_MINOR_VER 28 32#define WL18XX_MINOR_VER 39
33 33
34#define WL18XX_CMD_MAX_SIZE 740 34#define WL18XX_CMD_MAX_SIZE 740
35 35
@@ -40,6 +40,8 @@
40 40
41#define WL18XX_NUM_MAC_ADDRESSES 3 41#define WL18XX_NUM_MAC_ADDRESSES 3
42 42
43#define WL18XX_RX_BA_MAX_SESSIONS 5
44
43struct wl18xx_priv { 45struct wl18xx_priv {
44 /* buffer for sending commands to FW */ 46 /* buffer for sending commands to FW */
45 u8 cmd_buf[WL18XX_CMD_MAX_SIZE]; 47 u8 cmd_buf[WL18XX_CMD_MAX_SIZE];
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index c79654323396..7a970cd9c555 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -1736,6 +1736,35 @@ out:
1736 1736
1737} 1737}
1738 1738
1739int wlcore_acx_average_rssi(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1740 s8 *avg_rssi)
1741{
1742 struct acx_roaming_stats *acx;
1743 int ret = 0;
1744
1745 wl1271_debug(DEBUG_ACX, "acx roaming statistics");
1746
1747 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1748 if (!acx) {
1749 ret = -ENOMEM;
1750 goto out;
1751 }
1752
1753 acx->role_id = wlvif->role_id;
1754 ret = wl1271_cmd_interrogate(wl, ACX_ROAMING_STATISTICS_TBL,
1755 acx, sizeof(*acx));
1756 if (ret < 0) {
1757 wl1271_warning("acx roaming statistics failed: %d", ret);
1758 ret = -ENOMEM;
1759 goto out;
1760 }
1761
1762 *avg_rssi = acx->rssi_beacon;
1763out:
1764 kfree(acx);
1765 return ret;
1766}
1767
1739#ifdef CONFIG_PM 1768#ifdef CONFIG_PM
1740/* Set the global behaviour of RX filters - On/Off + default action */ 1769/* Set the global behaviour of RX filters - On/Off + default action */
1741int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable, 1770int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index 126536c6a393..6dcfad9b0472 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -728,8 +728,6 @@ struct wl1271_acx_ht_information {
728 u8 padding[2]; 728 u8 padding[2];
729} __packed; 729} __packed;
730 730
731#define RX_BA_MAX_SESSIONS 3
732
733struct wl1271_acx_ba_initiator_policy { 731struct wl1271_acx_ba_initiator_policy {
734 struct acx_header header; 732 struct acx_header header;
735 733
@@ -955,6 +953,18 @@ struct acx_rx_filter_cfg {
955 u8 fields[0]; 953 u8 fields[0];
956} __packed; 954} __packed;
957 955
956struct acx_roaming_stats {
957 struct acx_header header;
958
959 u8 role_id;
960 u8 pad[3];
961 u32 missed_beacons;
962 u8 snr_data;
963 u8 snr_bacon;
964 s8 rssi_data;
965 s8 rssi_beacon;
966} __packed;
967
958enum { 968enum {
959 ACX_WAKE_UP_CONDITIONS = 0x0000, 969 ACX_WAKE_UP_CONDITIONS = 0x0000,
960 ACX_MEM_CFG = 0x0001, 970 ACX_MEM_CFG = 0x0001,
@@ -1112,6 +1122,8 @@ int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
1112int wl1271_acx_fm_coex(struct wl1271 *wl); 1122int wl1271_acx_fm_coex(struct wl1271 *wl);
1113int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl); 1123int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
1114int wl12xx_acx_config_hangover(struct wl1271 *wl); 1124int wl12xx_acx_config_hangover(struct wl1271 *wl);
1125int wlcore_acx_average_rssi(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1126 s8 *avg_rssi);
1115 1127
1116#ifdef CONFIG_PM 1128#ifdef CONFIG_PM
1117int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable, 1129int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 6331f9e1cb39..c9e060795d13 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -327,6 +327,14 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
327 wl->links[link].prev_freed_pkts = 327 wl->links[link].prev_freed_pkts =
328 wl->fw_status_2->counters.tx_lnk_free_pkts[link]; 328 wl->fw_status_2->counters.tx_lnk_free_pkts[link];
329 wl->links[link].wlvif = wlvif; 329 wl->links[link].wlvif = wlvif;
330
331 /*
332 * Take saved value for total freed packets from wlvif, in case this is
333 * recovery/resume
334 */
335 if (wlvif->bss_type != BSS_TYPE_AP_BSS)
336 wl->links[link].total_freed_pkts = wlvif->total_freed_pkts;
337
330 *hlid = link; 338 *hlid = link;
331 339
332 wl->active_link_count++; 340 wl->active_link_count++;
@@ -358,6 +366,26 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
358 wl1271_tx_reset_link_queues(wl, *hlid); 366 wl1271_tx_reset_link_queues(wl, *hlid);
359 wl->links[*hlid].wlvif = NULL; 367 wl->links[*hlid].wlvif = NULL;
360 368
369 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
370 (wlvif->bss_type == BSS_TYPE_AP_BSS &&
371 *hlid == wlvif->ap.bcast_hlid)) {
372 /*
373 * save the total freed packets in the wlvif, in case this is
374 * recovery or suspend
375 */
376 wlvif->total_freed_pkts = wl->links[*hlid].total_freed_pkts;
377
378 /*
379 * increment the initial seq number on recovery to account for
380 * transmitted packets that we haven't yet got in the FW status
381 */
382 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
383 wlvif->total_freed_pkts +=
384 WL1271_TX_SQN_POST_RECOVERY_PADDING;
385 }
386
387 wl->links[*hlid].total_freed_pkts = 0;
388
361 *hlid = WL12XX_INVALID_LINK_ID; 389 *hlid = WL12XX_INVALID_LINK_ID;
362 wl->active_link_count--; 390 wl->active_link_count--;
363 WARN_ON_ONCE(wl->active_link_count < 0); 391 WARN_ON_ONCE(wl->active_link_count < 0);
@@ -609,6 +637,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
609 if (ret < 0) 637 if (ret < 0)
610 goto out_free_global; 638 goto out_free_global;
611 639
640 /* use the previous security seq, if this is a recovery/resume */
641 wl->links[wlvif->ap.bcast_hlid].total_freed_pkts =
642 wlvif->total_freed_pkts;
643
612 cmd->role_id = wlvif->role_id; 644 cmd->role_id = wlvif->role_id;
613 cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period); 645 cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
614 cmd->ap.bss_index = WL1271_AP_BSS_INDEX; 646 cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
diff --git a/drivers/net/wireless/ti/wlcore/debug.h b/drivers/net/wireless/ti/wlcore/debug.h
index db4bf5a68ce2..0420bd45e4ee 100644
--- a/drivers/net/wireless/ti/wlcore/debug.h
+++ b/drivers/net/wireless/ti/wlcore/debug.h
@@ -89,25 +89,24 @@ extern u32 wl12xx_debug_level;
89 } while (0) 89 } while (0)
90#endif 90#endif
91 91
92/* TODO: use pr_debug_hex_dump when it becomes available */ 92#define wl1271_dump(level, prefix, buf, len) \
93#define wl1271_dump(level, prefix, buf, len) \ 93 do { \
94 do { \ 94 if (level & wl12xx_debug_level) \
95 if (level & wl12xx_debug_level) \ 95 print_hex_dump_debug(DRIVER_PREFIX prefix, \
96 print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ 96 DUMP_PREFIX_OFFSET, 16, 1, \
97 DUMP_PREFIX_OFFSET, 16, 1, \ 97 buf, \
98 buf, \ 98 min_t(size_t, len, DEBUG_DUMP_LIMIT), \
99 min_t(size_t, len, DEBUG_DUMP_LIMIT), \ 99 0); \
100 0); \
101 } while (0) 100 } while (0)
102 101
103#define wl1271_dump_ascii(level, prefix, buf, len) \ 102#define wl1271_dump_ascii(level, prefix, buf, len) \
104 do { \ 103 do { \
105 if (level & wl12xx_debug_level) \ 104 if (level & wl12xx_debug_level) \
106 print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ 105 print_hex_dump_debug(DRIVER_PREFIX prefix, \
107 DUMP_PREFIX_OFFSET, 16, 1, \ 106 DUMP_PREFIX_OFFSET, 16, 1, \
108 buf, \ 107 buf, \
109 min_t(size_t, len, DEBUG_DUMP_LIMIT), \ 108 min_t(size_t, len, DEBUG_DUMP_LIMIT), \
110 true); \ 109 true); \
111 } while (0) 110 } while (0)
112 111
113#endif /* __DEBUG_H__ */ 112#endif /* __DEBUG_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index e70a7c864865..c3e1f79c7856 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -598,8 +598,7 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
598 VIF_STATE_PRINT_INT(last_rssi_event); 598 VIF_STATE_PRINT_INT(last_rssi_event);
599 VIF_STATE_PRINT_INT(ba_support); 599 VIF_STATE_PRINT_INT(ba_support);
600 VIF_STATE_PRINT_INT(ba_allowed); 600 VIF_STATE_PRINT_INT(ba_allowed);
601 VIF_STATE_PRINT_LLHEX(tx_security_seq); 601 VIF_STATE_PRINT_LLHEX(total_freed_pkts);
602 VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
603 } 602 }
604 603
605#undef VIF_STATE_PRINT_INT 604#undef VIF_STATE_PRINT_INT
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 70f289aa1bc6..67f61689b49e 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -237,6 +237,14 @@ void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap)
237 !test_bit(wlvif->role_id , &roles_bitmap)) 237 !test_bit(wlvif->role_id , &roles_bitmap))
238 continue; 238 continue;
239 239
240 vif = wl12xx_wlvif_to_vif(wlvif);
241
242 /* don't attempt roaming in case of p2p */
243 if (wlvif->p2p) {
244 ieee80211_connection_loss(vif);
245 continue;
246 }
247
240 /* 248 /*
241 * if the work is already queued, it should take place. 249 * if the work is already queued, it should take place.
242 * We don't want to delay the connection loss 250 * We don't want to delay the connection loss
@@ -246,7 +254,6 @@ void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap)
246 &wlvif->connection_loss_work, 254 &wlvif->connection_loss_work,
247 msecs_to_jiffies(delay)); 255 msecs_to_jiffies(delay));
248 256
249 vif = wl12xx_wlvif_to_vif(wlvif);
250 ieee80211_cqm_rssi_notify( 257 ieee80211_cqm_rssi_notify(
251 vif, 258 vif,
252 NL80211_CQM_RSSI_BEACON_LOSS_EVENT, 259 NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 2c2ff3e1f849..953111a502ee 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -108,8 +108,7 @@ static void wl1271_reg_notify(struct wiphy *wiphy,
108 108
109 } 109 }
110 110
111 if (likely(wl->state == WLCORE_STATE_ON)) 111 wlcore_regdomain_config(wl);
112 wlcore_regdomain_config(wl);
113} 112}
114 113
115static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, 114static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
@@ -332,10 +331,9 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
332 struct wl12xx_vif *wlvif, 331 struct wl12xx_vif *wlvif,
333 u8 hlid, u8 tx_pkts) 332 u8 hlid, u8 tx_pkts)
334{ 333{
335 bool fw_ps, single_link; 334 bool fw_ps;
336 335
337 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); 336 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
338 single_link = (wl->active_link_count == 1);
339 337
340 /* 338 /*
341 * Wake up from high level PS if the STA is asleep with too little 339 * Wake up from high level PS if the STA is asleep with too little
@@ -348,8 +346,13 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
348 * Start high-level PS if the STA is asleep with enough blocks in FW. 346 * Start high-level PS if the STA is asleep with enough blocks in FW.
349 * Make an exception if this is the only connected link. In this 347 * Make an exception if this is the only connected link. In this
350 * case FW-memory congestion is less of a problem. 348 * case FW-memory congestion is less of a problem.
349 * Note that a single connected STA means 3 active links, since we must
350 * account for the global and broadcast AP links. The "fw_ps" check
351 * assures us the third link is a STA connected to the AP. Otherwise
352 * the FW would not set the PSM bit.
351 */ 353 */
352 else if (!single_link && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) 354 else if (wl->active_link_count > 3 && fw_ps &&
355 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
353 wl12xx_ps_link_start(wl, wlvif, hlid, true); 356 wl12xx_ps_link_start(wl, wlvif, hlid, true);
354} 357}
355 358
@@ -414,13 +417,21 @@ static int wlcore_fw_status(struct wl1271 *wl,
414 417
415 418
416 for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) { 419 for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
420 u8 diff;
417 lnk = &wl->links[i]; 421 lnk = &wl->links[i];
422
418 /* prevent wrap-around in freed-packets counter */ 423 /* prevent wrap-around in freed-packets counter */
419 lnk->allocated_pkts -= 424 diff = (status_2->counters.tx_lnk_free_pkts[i] -
420 (status_2->counters.tx_lnk_free_pkts[i] - 425 lnk->prev_freed_pkts) & 0xff;
421 lnk->prev_freed_pkts) & 0xff; 426
427 if (diff == 0)
428 continue;
422 429
430 lnk->allocated_pkts -= diff;
423 lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i]; 431 lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
432
433 /* accumulate the prev_freed_pkts counter */
434 lnk->total_freed_pkts += diff;
424 } 435 }
425 436
426 /* prevent wrap-around in total blocks counter */ 437 /* prevent wrap-around in total blocks counter */
@@ -640,6 +651,25 @@ static irqreturn_t wlcore_irq(int irq, void *cookie)
640 unsigned long flags; 651 unsigned long flags;
641 struct wl1271 *wl = cookie; 652 struct wl1271 *wl = cookie;
642 653
654 /* complete the ELP completion */
655 spin_lock_irqsave(&wl->wl_lock, flags);
656 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
657 if (wl->elp_compl) {
658 complete(wl->elp_compl);
659 wl->elp_compl = NULL;
660 }
661
662 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
663 /* don't enqueue a work right now. mark it as pending */
664 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
665 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
666 disable_irq_nosync(wl->irq);
667 pm_wakeup_event(wl->dev, 0);
668 spin_unlock_irqrestore(&wl->wl_lock, flags);
669 return IRQ_HANDLED;
670 }
671 spin_unlock_irqrestore(&wl->wl_lock, flags);
672
643 /* TX might be handled here, avoid redundant work */ 673 /* TX might be handled here, avoid redundant work */
644 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags); 674 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
645 cancel_work_sync(&wl->tx_work); 675 cancel_work_sync(&wl->tx_work);
@@ -919,18 +949,6 @@ static void wl1271_recovery_work(struct work_struct *work)
919 goto out_unlock; 949 goto out_unlock;
920 } 950 }
921 951
922 /*
923 * Advance security sequence number to overcome potential progress
924 * in the firmware during recovery. This doens't hurt if the network is
925 * not encrypted.
926 */
927 wl12xx_for_each_wlvif(wl, wlvif) {
928 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
929 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
930 wlvif->tx_security_seq +=
931 WL1271_TX_SQN_POST_RECOVERY_PADDING;
932 }
933
934 /* Prevent spurious TX during FW restart */ 952 /* Prevent spurious TX during FW restart */
935 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART); 953 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
936 954
@@ -2523,6 +2541,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
2523 wl1271_ps_elp_sleep(wl); 2541 wl1271_ps_elp_sleep(wl);
2524 } 2542 }
2525deinit: 2543deinit:
2544 wl12xx_tx_reset_wlvif(wl, wlvif);
2545
2526 /* clear all hlids (except system_hlid) */ 2546 /* clear all hlids (except system_hlid) */
2527 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID; 2547 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2528 2548
@@ -2546,7 +2566,6 @@ deinit:
2546 2566
2547 dev_kfree_skb(wlvif->probereq); 2567 dev_kfree_skb(wlvif->probereq);
2548 wlvif->probereq = NULL; 2568 wlvif->probereq = NULL;
2549 wl12xx_tx_reset_wlvif(wl, wlvif);
2550 if (wl->last_wlvif == wlvif) 2569 if (wl->last_wlvif == wlvif)
2551 wl->last_wlvif = NULL; 2570 wl->last_wlvif = NULL;
2552 list_del(&wlvif->list); 2571 list_del(&wlvif->list);
@@ -2860,10 +2879,6 @@ static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2860 wlvif->sta.klv_template_id, 2879 wlvif->sta.klv_template_id,
2861 ACX_KEEP_ALIVE_TPL_INVALID); 2880 ACX_KEEP_ALIVE_TPL_INVALID);
2862 2881
2863 /* reset TX security counters on a clean disconnect */
2864 wlvif->tx_security_last_seq_lsb = 0;
2865 wlvif->tx_security_seq = 0;
2866
2867 return 0; 2882 return 0;
2868} 2883}
2869 2884
@@ -3262,6 +3277,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3262 u32 tx_seq_32 = 0; 3277 u32 tx_seq_32 = 0;
3263 u16 tx_seq_16 = 0; 3278 u16 tx_seq_16 = 0;
3264 u8 key_type; 3279 u8 key_type;
3280 u8 hlid;
3265 3281
3266 wl1271_debug(DEBUG_MAC80211, "mac80211 set key"); 3282 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3267 3283
@@ -3271,6 +3287,22 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3271 key_conf->keylen, key_conf->flags); 3287 key_conf->keylen, key_conf->flags);
3272 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen); 3288 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3273 3289
3290 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3291 if (sta) {
3292 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3293 hlid = wl_sta->hlid;
3294 } else {
3295 hlid = wlvif->ap.bcast_hlid;
3296 }
3297 else
3298 hlid = wlvif->sta.hlid;
3299
3300 if (hlid != WL12XX_INVALID_LINK_ID) {
3301 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3302 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3303 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3304 }
3305
3274 switch (key_conf->cipher) { 3306 switch (key_conf->cipher) {
3275 case WLAN_CIPHER_SUITE_WEP40: 3307 case WLAN_CIPHER_SUITE_WEP40:
3276 case WLAN_CIPHER_SUITE_WEP104: 3308 case WLAN_CIPHER_SUITE_WEP104:
@@ -3280,22 +3312,14 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3280 break; 3312 break;
3281 case WLAN_CIPHER_SUITE_TKIP: 3313 case WLAN_CIPHER_SUITE_TKIP:
3282 key_type = KEY_TKIP; 3314 key_type = KEY_TKIP;
3283
3284 key_conf->hw_key_idx = key_conf->keyidx; 3315 key_conf->hw_key_idx = key_conf->keyidx;
3285 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3286 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3287 break; 3316 break;
3288 case WLAN_CIPHER_SUITE_CCMP: 3317 case WLAN_CIPHER_SUITE_CCMP:
3289 key_type = KEY_AES; 3318 key_type = KEY_AES;
3290
3291 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 3319 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3292 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3293 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3294 break; 3320 break;
3295 case WL1271_CIPHER_SUITE_GEM: 3321 case WL1271_CIPHER_SUITE_GEM:
3296 key_type = KEY_GEM; 3322 key_type = KEY_GEM;
3297 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3298 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3299 break; 3323 break;
3300 default: 3324 default:
3301 wl1271_error("Unknown key algo 0x%x", key_conf->cipher); 3325 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
@@ -3358,6 +3382,10 @@ void wlcore_regdomain_config(struct wl1271 *wl)
3358 return; 3382 return;
3359 3383
3360 mutex_lock(&wl->mutex); 3384 mutex_lock(&wl->mutex);
3385
3386 if (unlikely(wl->state != WLCORE_STATE_ON))
3387 goto out;
3388
3361 ret = wl1271_ps_elp_wakeup(wl); 3389 ret = wl1271_ps_elp_wakeup(wl);
3362 if (ret < 0) 3390 if (ret < 0)
3363 goto out; 3391 goto out;
@@ -4474,7 +4502,7 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4474 if (idx != 0) 4502 if (idx != 0)
4475 return -ENOENT; 4503 return -ENOENT;
4476 4504
4477 survey->channel = conf->channel; 4505 survey->channel = conf->chandef.chan;
4478 survey->filled = 0; 4506 survey->filled = 0;
4479 return 0; 4507 return 0;
4480} 4508}
@@ -4499,6 +4527,9 @@ static int wl1271_allocate_sta(struct wl1271 *wl,
4499 return -EBUSY; 4527 return -EBUSY;
4500 } 4528 }
4501 4529
4530 /* use the previous security seq, if this is a recovery/resume */
4531 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4532
4502 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map); 4533 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4503 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN); 4534 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4504 wl->active_sta_count++; 4535 wl->active_sta_count++;
@@ -4507,12 +4538,37 @@ static int wl1271_allocate_sta(struct wl1271 *wl,
4507 4538
4508void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) 4539void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4509{ 4540{
4541 struct wl1271_station *wl_sta;
4542 struct ieee80211_sta *sta;
4543 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4544
4510 if (!test_bit(hlid, wlvif->ap.sta_hlid_map)) 4545 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4511 return; 4546 return;
4512 4547
4513 clear_bit(hlid, wlvif->ap.sta_hlid_map); 4548 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4514 __clear_bit(hlid, &wl->ap_ps_map); 4549 __clear_bit(hlid, &wl->ap_ps_map);
4515 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); 4550 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4551
4552 /*
4553 * save the last used PN in the private part of iee80211_sta,
4554 * in case of recovery/suspend
4555 */
4556 rcu_read_lock();
4557 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
4558 if (sta) {
4559 wl_sta = (void *)sta->drv_priv;
4560 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
4561
4562 /*
4563 * increment the initial seq number on recovery to account for
4564 * transmitted packets that we haven't yet got in the FW status
4565 */
4566 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
4567 wl_sta->total_freed_pkts +=
4568 WL1271_TX_SQN_POST_RECOVERY_PADDING;
4569 }
4570 rcu_read_unlock();
4571
4516 wl12xx_free_link(wl, wlvif, &hlid); 4572 wl12xx_free_link(wl, wlvif, &hlid);
4517 wl->active_sta_count--; 4573 wl->active_sta_count--;
4518 4574
@@ -4616,13 +4672,11 @@ static int wl12xx_update_sta_state(struct wl1271 *wl,
4616 enum ieee80211_sta_state new_state) 4672 enum ieee80211_sta_state new_state)
4617{ 4673{
4618 struct wl1271_station *wl_sta; 4674 struct wl1271_station *wl_sta;
4619 u8 hlid;
4620 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS; 4675 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4621 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS; 4676 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4622 int ret; 4677 int ret;
4623 4678
4624 wl_sta = (struct wl1271_station *)sta->drv_priv; 4679 wl_sta = (struct wl1271_station *)sta->drv_priv;
4625 hlid = wl_sta->hlid;
4626 4680
4627 /* Add station (AP mode) */ 4681 /* Add station (AP mode) */
4628 if (is_ap && 4682 if (is_ap &&
@@ -4648,12 +4702,12 @@ static int wl12xx_update_sta_state(struct wl1271 *wl,
4648 /* Authorize station (AP mode) */ 4702 /* Authorize station (AP mode) */
4649 if (is_ap && 4703 if (is_ap &&
4650 new_state == IEEE80211_STA_AUTHORIZED) { 4704 new_state == IEEE80211_STA_AUTHORIZED) {
4651 ret = wl12xx_cmd_set_peer_state(wl, wlvif, hlid); 4705 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4652 if (ret < 0) 4706 if (ret < 0)
4653 return ret; 4707 return ret;
4654 4708
4655 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true, 4709 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4656 hlid); 4710 wl_sta->hlid);
4657 if (ret) 4711 if (ret)
4658 return ret; 4712 return ret;
4659 4713
@@ -4784,7 +4838,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4784 break; 4838 break;
4785 } 4839 }
4786 4840
4787 if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) { 4841 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
4788 ret = -EBUSY; 4842 ret = -EBUSY;
4789 wl1271_error("exceeded max RX BA sessions"); 4843 wl1271_error("exceeded max RX BA sessions");
4790 break; 4844 break;
@@ -4946,7 +5000,7 @@ out:
4946 mutex_unlock(&wl->mutex); 5000 mutex_unlock(&wl->mutex);
4947} 5001}
4948 5002
4949static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop) 5003static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
4950{ 5004{
4951 struct wl1271 *wl = hw->priv; 5005 struct wl1271 *wl = hw->priv;
4952 5006
@@ -4956,7 +5010,8 @@ static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
4956static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw, 5010static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
4957 struct ieee80211_vif *vif, 5011 struct ieee80211_vif *vif,
4958 struct ieee80211_channel *chan, 5012 struct ieee80211_channel *chan,
4959 int duration) 5013 int duration,
5014 enum ieee80211_roc_type type)
4960{ 5015{
4961 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 5016 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4962 struct wl1271 *wl = hw->priv; 5017 struct wl1271 *wl = hw->priv;
@@ -5091,6 +5146,39 @@ static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5091 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed); 5146 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5092} 5147}
5093 5148
5149static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5150 struct ieee80211_vif *vif,
5151 struct ieee80211_sta *sta,
5152 s8 *rssi_dbm)
5153{
5154 struct wl1271 *wl = hw->priv;
5155 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5156 int ret = 0;
5157
5158 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5159
5160 mutex_lock(&wl->mutex);
5161
5162 if (unlikely(wl->state != WLCORE_STATE_ON))
5163 goto out;
5164
5165 ret = wl1271_ps_elp_wakeup(wl);
5166 if (ret < 0)
5167 goto out_sleep;
5168
5169 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5170 if (ret < 0)
5171 goto out_sleep;
5172
5173out_sleep:
5174 wl1271_ps_elp_sleep(wl);
5175
5176out:
5177 mutex_unlock(&wl->mutex);
5178
5179 return ret;
5180}
5181
5094static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw) 5182static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5095{ 5183{
5096 struct wl1271 *wl = hw->priv; 5184 struct wl1271 *wl = hw->priv;
@@ -5290,6 +5378,7 @@ static const struct ieee80211_ops wl1271_ops = {
5290 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx, 5378 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5291 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx, 5379 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5292 .sta_rc_update = wlcore_op_sta_rc_update, 5380 .sta_rc_update = wlcore_op_sta_rc_update,
5381 .get_rssi = wlcore_op_get_rssi,
5293 CFG80211_TESTMODE_CMD(wl1271_tm_cmd) 5382 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5294}; 5383};
5295 5384
@@ -5929,35 +6018,6 @@ int wlcore_free_hw(struct wl1271 *wl)
5929} 6018}
5930EXPORT_SYMBOL_GPL(wlcore_free_hw); 6019EXPORT_SYMBOL_GPL(wlcore_free_hw);
5931 6020
5932static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5933{
5934 struct wl1271 *wl = cookie;
5935 unsigned long flags;
5936
5937 wl1271_debug(DEBUG_IRQ, "IRQ");
5938
5939 /* complete the ELP completion */
5940 spin_lock_irqsave(&wl->wl_lock, flags);
5941 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5942 if (wl->elp_compl) {
5943 complete(wl->elp_compl);
5944 wl->elp_compl = NULL;
5945 }
5946
5947 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5948 /* don't enqueue a work right now. mark it as pending */
5949 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5950 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5951 disable_irq_nosync(wl->irq);
5952 pm_wakeup_event(wl->dev, 0);
5953 spin_unlock_irqrestore(&wl->wl_lock, flags);
5954 return IRQ_HANDLED;
5955 }
5956 spin_unlock_irqrestore(&wl->wl_lock, flags);
5957
5958 return IRQ_WAKE_THREAD;
5959}
5960
5961static void wlcore_nvs_cb(const struct firmware *fw, void *context) 6021static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5962{ 6022{
5963 struct wl1271 *wl = context; 6023 struct wl1271 *wl = context;
@@ -5999,9 +6059,8 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5999 else 6059 else
6000 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; 6060 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6001 6061
6002 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq, 6062 ret = request_threaded_irq(wl->irq, NULL, wlcore_irq,
6003 irqflags, 6063 irqflags, pdev->name, wl);
6004 pdev->name, wl);
6005 if (ret < 0) { 6064 if (ret < 0) {
6006 wl1271_error("request_irq() failed: %d", ret); 6065 wl1271_error("request_irq() failed: %d", ret);
6007 goto out_free_nvs; 6066 goto out_free_nvs;
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 9b7b6e2e4fbc..9654577efd01 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -29,6 +29,7 @@
29#define WL1271_WAKEUP_TIMEOUT 500 29#define WL1271_WAKEUP_TIMEOUT 500
30 30
31#define ELP_ENTRY_DELAY 30 31#define ELP_ENTRY_DELAY 30
32#define ELP_ENTRY_DELAY_FORCE_PS 5
32 33
33void wl1271_elp_work(struct work_struct *work) 34void wl1271_elp_work(struct work_struct *work)
34{ 35{
@@ -98,7 +99,8 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
98 return; 99 return;
99 } 100 }
100 101
101 timeout = ELP_ENTRY_DELAY; 102 timeout = wl->conf.conn.forced_ps ?
103 ELP_ENTRY_DELAY_FORCE_PS : ELP_ENTRY_DELAY;
102 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, 104 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
103 msecs_to_jiffies(timeout)); 105 msecs_to_jiffies(timeout));
104} 106}
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index ece392c54d9c..004d02e71f01 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -24,6 +24,7 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
27#include <linux/spinlock.h>
27 28
28#include "wlcore.h" 29#include "wlcore.h"
29#include "debug.h" 30#include "debug.h"
@@ -104,7 +105,7 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl,
104 struct wl12xx_vif *wlvif, 105 struct wl12xx_vif *wlvif,
105 u8 hlid) 106 u8 hlid)
106{ 107{
107 bool fw_ps, single_link; 108 bool fw_ps;
108 u8 tx_pkts; 109 u8 tx_pkts;
109 110
110 if (WARN_ON(!test_bit(hlid, wlvif->links_map))) 111 if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
@@ -112,15 +113,19 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl,
112 113
113 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); 114 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
114 tx_pkts = wl->links[hlid].allocated_pkts; 115 tx_pkts = wl->links[hlid].allocated_pkts;
115 single_link = (wl->active_link_count == 1);
116 116
117 /* 117 /*
118 * if in FW PS and there is enough data in FW we can put the link 118 * if in FW PS and there is enough data in FW we can put the link
119 * into high-level PS and clean out its TX queues. 119 * into high-level PS and clean out its TX queues.
120 * Make an exception if this is the only connected link. In this 120 * Make an exception if this is the only connected link. In this
121 * case FW-memory congestion is less of a problem. 121 * case FW-memory congestion is less of a problem.
122 * Note that a single connected STA means 3 active links, since we must
123 * account for the global and broadcast AP links. The "fw_ps" check
124 * assures us the third link is a STA connected to the AP. Otherwise
125 * the FW would not set the PSM bit.
122 */ 126 */
123 if (!single_link && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) 127 if (wl->active_link_count > 3 && fw_ps &&
128 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
124 wl12xx_ps_link_start(wl, wlvif, hlid, true); 129 wl12xx_ps_link_start(wl, wlvif, hlid, true);
125} 130}
126 131
@@ -639,6 +644,7 @@ next:
639 644
640 } 645 }
641 646
647out:
642 if (!skb && 648 if (!skb &&
643 test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { 649 test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
644 int q; 650 int q;
@@ -652,7 +658,6 @@ next:
652 spin_unlock_irqrestore(&wl->wl_lock, flags); 658 spin_unlock_irqrestore(&wl->wl_lock, flags);
653 } 659 }
654 660
655out:
656 return skb; 661 return skb;
657} 662}
658 663
@@ -928,25 +933,6 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
928 933
929 wl->stats.retry_count += result->ack_failures; 934 wl->stats.retry_count += result->ack_failures;
930 935
931 /*
932 * update sequence number only when relevant, i.e. only in
933 * sessions of TKIP, AES and GEM (not in open or WEP sessions)
934 */
935 if (info->control.hw_key &&
936 (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP ||
937 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
938 info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
939 u8 fw_lsb = result->tx_security_sequence_number_lsb;
940 u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
941
942 /*
943 * update security sequence number, taking care of potential
944 * wrap-around
945 */
946 wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
947 wlvif->tx_security_last_seq_lsb = fw_lsb;
948 }
949
950 /* remove private header from packet */ 936 /* remove private header from packet */
951 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 937 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
952 938
@@ -1061,7 +1047,8 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1061 1047
1062 /* TX failure */ 1048 /* TX failure */
1063 for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) { 1049 for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
1064 if (wlvif->bss_type == BSS_TYPE_AP_BSS) { 1050 if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
1051 i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) {
1065 /* this calls wl12xx_free_link */ 1052 /* this calls wl12xx_free_link */
1066 wl1271_free_sta(wl, wlvif, i); 1053 wl1271_free_sta(wl, wlvif, i);
1067 } else { 1054 } else {
@@ -1304,7 +1291,7 @@ bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
1304{ 1291{
1305 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); 1292 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1306 1293
1307 WARN_ON_ONCE(!spin_is_locked(&wl->wl_lock)); 1294 assert_spin_locked(&wl->wl_lock);
1308 return test_bit(reason, &wl->queue_stop_reasons[hwq]); 1295 return test_bit(reason, &wl->queue_stop_reasons[hwq]);
1309} 1296}
1310 1297
@@ -1313,6 +1300,6 @@ bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1313{ 1300{
1314 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); 1301 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1315 1302
1316 WARN_ON_ONCE(!spin_is_locked(&wl->wl_lock)); 1303 assert_spin_locked(&wl->wl_lock);
1317 return !!wl->queue_stop_reasons[hwq]; 1304 return !!wl->queue_stop_reasons[hwq];
1318} 1305}
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index af9fecaefc30..0034979e97cb 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -390,6 +390,9 @@ struct wl1271 {
390 /* number of currently active RX BA sessions */ 390 /* number of currently active RX BA sessions */
391 int ba_rx_session_count; 391 int ba_rx_session_count;
392 392
393 /* Maximum number of supported RX BA sessions */
394 int ba_rx_session_count_max;
395
393 /* AP-mode - number of currently connected stations */ 396 /* AP-mode - number of currently connected stations */
394 int active_sta_count; 397 int active_sta_count;
395 398
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 508f5b0f8a70..e5e146435fe7 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -274,6 +274,13 @@ struct wl1271_link {
274 274
275 /* The wlvif this link belongs to. Might be null for global links */ 275 /* The wlvif this link belongs to. Might be null for global links */
276 struct wl12xx_vif *wlvif; 276 struct wl12xx_vif *wlvif;
277
278 /*
279 * total freed FW packets on the link - used for tracking the
280 * AES/TKIP PN across recoveries. Re-initialized each time
281 * from the wl1271_station structure.
282 */
283 u64 total_freed_pkts;
277}; 284};
278 285
279#define WL1271_MAX_RX_FILTERS 5 286#define WL1271_MAX_RX_FILTERS 5
@@ -318,6 +325,13 @@ struct wl12xx_rx_filter {
318struct wl1271_station { 325struct wl1271_station {
319 u8 hlid; 326 u8 hlid;
320 bool in_connection; 327 bool in_connection;
328
329 /*
330 * total freed FW packets on the link to the STA - used for tracking the
331 * AES/TKIP PN across recoveries. Re-initialized each time from the
332 * wl1271_station structure.
333 */
334 u64 total_freed_pkts;
321}; 335};
322 336
323struct wl12xx_vif { 337struct wl12xx_vif {
@@ -449,16 +463,15 @@ struct wl12xx_vif {
449 */ 463 */
450 struct { 464 struct {
451 u8 persistent[0]; 465 u8 persistent[0];
466
452 /* 467 /*
453 * Security sequence number 468 * total freed FW packets on the link - used for
454 * bits 0-15: lower 16 bits part of sequence number 469 * storing the AES/TKIP PN during recovery, as this
455 * bits 16-47: higher 32 bits part of sequence number 470 * structure is not zeroed out.
456 * bits 48-63: not in use 471 * For STA this holds the PN of the link to the AP.
472 * For AP this holds the PN of the broadcast link.
457 */ 473 */
458 u64 tx_security_seq; 474 u64 total_freed_pkts;
459
460 /* 8 bits of the last sequence number in use */
461 u8 tx_security_last_seq_lsb;
462 }; 475 };
463}; 476};
464 477
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 114364b5d466..c6208a7988e4 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -1156,10 +1156,10 @@ static int zd_op_config(struct ieee80211_hw *hw, u32 changed)
1156 struct ieee80211_conf *conf = &hw->conf; 1156 struct ieee80211_conf *conf = &hw->conf;
1157 1157
1158 spin_lock_irq(&mac->lock); 1158 spin_lock_irq(&mac->lock);
1159 mac->channel = conf->channel->hw_value; 1159 mac->channel = conf->chandef.chan->hw_value;
1160 spin_unlock_irq(&mac->lock); 1160 spin_unlock_irq(&mac->lock);
1161 1161
1162 return zd_chip_set_channel(&mac->chip, conf->channel->hw_value); 1162 return zd_chip_set_channel(&mac->chip, conf->chandef.chan->hw_value);
1163} 1163}
1164 1164
1165static void zd_beacon_done(struct zd_mac *mac) 1165static void zd_beacon_done(struct zd_mac *mac)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index cd49ba949636..a2865f17c667 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -47,11 +47,25 @@
47#include <asm/xen/hypercall.h> 47#include <asm/xen/hypercall.h>
48#include <asm/xen/page.h> 48#include <asm/xen/page.h>
49 49
50/*
51 * This is the maximum slots a skb can have. If a guest sends a skb
52 * which exceeds this limit it is considered malicious.
53 */
54#define MAX_SKB_SLOTS_DEFAULT 20
55static unsigned int max_skb_slots = MAX_SKB_SLOTS_DEFAULT;
56module_param(max_skb_slots, uint, 0444);
57
58typedef unsigned int pending_ring_idx_t;
59#define INVALID_PENDING_RING_IDX (~0U)
60
50struct pending_tx_info { 61struct pending_tx_info {
51 struct xen_netif_tx_request req; 62 struct xen_netif_tx_request req; /* coalesced tx request */
52 struct xenvif *vif; 63 struct xenvif *vif;
64 pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
65 * if it is head of one or more tx
66 * reqs
67 */
53}; 68};
54typedef unsigned int pending_ring_idx_t;
55 69
56struct netbk_rx_meta { 70struct netbk_rx_meta {
57 int id; 71 int id;
@@ -102,7 +116,11 @@ struct xen_netbk {
102 atomic_t netfront_count; 116 atomic_t netfront_count;
103 117
104 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; 118 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
105 struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS]; 119 /* Coalescing tx requests before copying makes number of grant
120 * copy ops greater or equal to number of slots required. In
121 * worst case a tx request consumes 2 gnttab_copy.
122 */
123 struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
106 124
107 u16 pending_ring[MAX_PENDING_REQS]; 125 u16 pending_ring[MAX_PENDING_REQS];
108 126
@@ -118,6 +136,16 @@ struct xen_netbk {
118static struct xen_netbk *xen_netbk; 136static struct xen_netbk *xen_netbk;
119static int xen_netbk_group_nr; 137static int xen_netbk_group_nr;
120 138
139/*
140 * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
141 * one or more merged tx requests, otherwise it is the continuation of
142 * previous tx request.
143 */
144static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
145{
146 return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
147}
148
121void xen_netbk_add_xenvif(struct xenvif *vif) 149void xen_netbk_add_xenvif(struct xenvif *vif)
122{ 150{
123 int i; 151 int i;
@@ -250,6 +278,7 @@ static int max_required_rx_slots(struct xenvif *vif)
250{ 278{
251 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); 279 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
252 280
281 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
253 if (vif->can_sg || vif->gso || vif->gso_prefix) 282 if (vif->can_sg || vif->gso || vif->gso_prefix)
254 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ 283 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
255 284
@@ -657,6 +686,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
657 __skb_queue_tail(&rxq, skb); 686 __skb_queue_tail(&rxq, skb);
658 687
659 /* Filled the batch queue? */ 688 /* Filled the batch queue? */
689 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
660 if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE) 690 if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
661 break; 691 break;
662 } 692 }
@@ -898,51 +928,91 @@ static void netbk_fatal_tx_err(struct xenvif *vif)
898 928
899static int netbk_count_requests(struct xenvif *vif, 929static int netbk_count_requests(struct xenvif *vif,
900 struct xen_netif_tx_request *first, 930 struct xen_netif_tx_request *first,
931 RING_IDX first_idx,
901 struct xen_netif_tx_request *txp, 932 struct xen_netif_tx_request *txp,
902 int work_to_do) 933 int work_to_do)
903{ 934{
904 RING_IDX cons = vif->tx.req_cons; 935 RING_IDX cons = vif->tx.req_cons;
905 int frags = 0; 936 int slots = 0;
937 int drop_err = 0;
906 938
907 if (!(first->flags & XEN_NETTXF_more_data)) 939 if (!(first->flags & XEN_NETTXF_more_data))
908 return 0; 940 return 0;
909 941
910 do { 942 do {
911 if (frags >= work_to_do) { 943 if (slots >= work_to_do) {
912 netdev_err(vif->dev, "Need more frags\n"); 944 netdev_err(vif->dev,
945 "Asked for %d slots but exceeds this limit\n",
946 work_to_do);
913 netbk_fatal_tx_err(vif); 947 netbk_fatal_tx_err(vif);
914 return -ENODATA; 948 return -ENODATA;
915 } 949 }
916 950
917 if (unlikely(frags >= MAX_SKB_FRAGS)) { 951 /* This guest is really using too many slots and
918 netdev_err(vif->dev, "Too many frags\n"); 952 * considered malicious.
953 */
954 if (unlikely(slots >= max_skb_slots)) {
955 netdev_err(vif->dev,
956 "Malicious frontend using %d slots, threshold %u\n",
957 slots, max_skb_slots);
919 netbk_fatal_tx_err(vif); 958 netbk_fatal_tx_err(vif);
920 return -E2BIG; 959 return -E2BIG;
921 } 960 }
922 961
923 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), 962 /* Xen network protocol had implicit dependency on
963 * MAX_SKB_FRAGS. XEN_NETIF_NR_SLOTS_MIN is set to the
964 * historical MAX_SKB_FRAGS value 18 to honor the same
965 * behavior as before. Any packet using more than 18
966 * slots but less than max_skb_slots slots is dropped
967 */
968 if (!drop_err && slots >= XEN_NETIF_NR_SLOTS_MIN) {
969 if (net_ratelimit())
970 netdev_dbg(vif->dev,
971 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
972 slots, XEN_NETIF_NR_SLOTS_MIN);
973 drop_err = -E2BIG;
974 }
975
976 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
924 sizeof(*txp)); 977 sizeof(*txp));
925 if (txp->size > first->size) { 978
926 netdev_err(vif->dev, "Frag is bigger than frame.\n"); 979 /* If the guest submitted a frame >= 64 KiB then
927 netbk_fatal_tx_err(vif); 980 * first->size overflowed and following slots will
928 return -EIO; 981 * appear to be larger than the frame.
982 *
983 * This cannot be fatal error as there are buggy
984 * frontends that do this.
985 *
986 * Consume all slots and drop the packet.
987 */
988 if (!drop_err && txp->size > first->size) {
989 if (net_ratelimit())
990 netdev_dbg(vif->dev,
991 "Invalid tx request, slot size %u > remaining size %u\n",
992 txp->size, first->size);
993 drop_err = -EIO;
929 } 994 }
930 995
931 first->size -= txp->size; 996 first->size -= txp->size;
932 frags++; 997 slots++;
933 998
934 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { 999 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
935 netdev_err(vif->dev, "txp->offset: %x, size: %u\n", 1000 netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
936 txp->offset, txp->size); 1001 txp->offset, txp->size);
937 netbk_fatal_tx_err(vif); 1002 netbk_fatal_tx_err(vif);
938 return -EINVAL; 1003 return -EINVAL;
939 } 1004 }
940 } while ((txp++)->flags & XEN_NETTXF_more_data); 1005 } while ((txp++)->flags & XEN_NETTXF_more_data);
941 return frags; 1006
1007 if (drop_err) {
1008 netbk_tx_err(vif, first, first_idx + slots);
1009 return drop_err;
1010 }
1011
1012 return slots;
942} 1013}
943 1014
944static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, 1015static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
945 struct sk_buff *skb,
946 u16 pending_idx) 1016 u16 pending_idx)
947{ 1017{
948 struct page *page; 1018 struct page *page;
@@ -963,48 +1033,114 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
963 struct skb_shared_info *shinfo = skb_shinfo(skb); 1033 struct skb_shared_info *shinfo = skb_shinfo(skb);
964 skb_frag_t *frags = shinfo->frags; 1034 skb_frag_t *frags = shinfo->frags;
965 u16 pending_idx = *((u16 *)skb->data); 1035 u16 pending_idx = *((u16 *)skb->data);
966 int i, start; 1036 u16 head_idx = 0;
1037 int slot, start;
1038 struct page *page;
1039 pending_ring_idx_t index, start_idx = 0;
1040 uint16_t dst_offset;
1041 unsigned int nr_slots;
1042 struct pending_tx_info *first = NULL;
1043
1044 /* At this point shinfo->nr_frags is in fact the number of
1045 * slots, which can be as large as XEN_NETIF_NR_SLOTS_MIN.
1046 */
1047 nr_slots = shinfo->nr_frags;
967 1048
968 /* Skip first skb fragment if it is on same page as header fragment. */ 1049 /* Skip first skb fragment if it is on same page as header fragment. */
969 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); 1050 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
970 1051
971 for (i = start; i < shinfo->nr_frags; i++, txp++) { 1052 /* Coalesce tx requests, at this point the packet passed in
972 struct page *page; 1053 * should be <= 64K. Any packets larger than 64K have been
973 pending_ring_idx_t index; 1054 * handled in netbk_count_requests().
1055 */
1056 for (shinfo->nr_frags = slot = start; slot < nr_slots;
1057 shinfo->nr_frags++) {
974 struct pending_tx_info *pending_tx_info = 1058 struct pending_tx_info *pending_tx_info =
975 netbk->pending_tx_info; 1059 netbk->pending_tx_info;
976 1060
977 index = pending_index(netbk->pending_cons++); 1061 page = alloc_page(GFP_KERNEL|__GFP_COLD);
978 pending_idx = netbk->pending_ring[index];
979 page = xen_netbk_alloc_page(netbk, skb, pending_idx);
980 if (!page) 1062 if (!page)
981 goto err; 1063 goto err;
982 1064
983 gop->source.u.ref = txp->gref; 1065 dst_offset = 0;
984 gop->source.domid = vif->domid; 1066 first = NULL;
985 gop->source.offset = txp->offset; 1067 while (dst_offset < PAGE_SIZE && slot < nr_slots) {
986 1068 gop->flags = GNTCOPY_source_gref;
987 gop->dest.u.gmfn = virt_to_mfn(page_address(page)); 1069
988 gop->dest.domid = DOMID_SELF; 1070 gop->source.u.ref = txp->gref;
989 gop->dest.offset = txp->offset; 1071 gop->source.domid = vif->domid;
990 1072 gop->source.offset = txp->offset;
991 gop->len = txp->size; 1073
992 gop->flags = GNTCOPY_source_gref; 1074 gop->dest.domid = DOMID_SELF;
1075
1076 gop->dest.offset = dst_offset;
1077 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1078
1079 if (dst_offset + txp->size > PAGE_SIZE) {
1080 /* This page can only merge a portion
1081 * of tx request. Do not increment any
1082 * pointer / counter here. The txp
1083 * will be dealt with in future
1084 * rounds, eventually hitting the
1085 * `else` branch.
1086 */
1087 gop->len = PAGE_SIZE - dst_offset;
1088 txp->offset += gop->len;
1089 txp->size -= gop->len;
1090 dst_offset += gop->len; /* quit loop */
1091 } else {
1092 /* This tx request can be merged in the page */
1093 gop->len = txp->size;
1094 dst_offset += gop->len;
1095
1096 index = pending_index(netbk->pending_cons++);
1097
1098 pending_idx = netbk->pending_ring[index];
1099
1100 memcpy(&pending_tx_info[pending_idx].req, txp,
1101 sizeof(*txp));
1102 xenvif_get(vif);
1103
1104 pending_tx_info[pending_idx].vif = vif;
1105
1106 /* Poison these fields, corresponding
1107 * fields for head tx req will be set
1108 * to correct values after the loop.
1109 */
1110 netbk->mmap_pages[pending_idx] = (void *)(~0UL);
1111 pending_tx_info[pending_idx].head =
1112 INVALID_PENDING_RING_IDX;
1113
1114 if (!first) {
1115 first = &pending_tx_info[pending_idx];
1116 start_idx = index;
1117 head_idx = pending_idx;
1118 }
1119
1120 txp++;
1121 slot++;
1122 }
993 1123
994 gop++; 1124 gop++;
1125 }
995 1126
996 memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp)); 1127 first->req.offset = 0;
997 xenvif_get(vif); 1128 first->req.size = dst_offset;
998 pending_tx_info[pending_idx].vif = vif; 1129 first->head = start_idx;
999 frag_set_pending_idx(&frags[i], pending_idx); 1130 set_page_ext(page, netbk, head_idx);
1131 netbk->mmap_pages[head_idx] = page;
1132 frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
1000 } 1133 }
1001 1134
1135 BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
1136
1002 return gop; 1137 return gop;
1003err: 1138err:
1004 /* Unwind, freeing all pages and sending error responses. */ 1139 /* Unwind, freeing all pages and sending error responses. */
1005 while (i-- > start) { 1140 while (shinfo->nr_frags-- > start) {
1006 xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]), 1141 xen_netbk_idx_release(netbk,
1007 XEN_NETIF_RSP_ERROR); 1142 frag_get_pending_idx(&frags[shinfo->nr_frags]),
1143 XEN_NETIF_RSP_ERROR);
1008 } 1144 }
1009 /* The head too, if necessary. */ 1145 /* The head too, if necessary. */
1010 if (start) 1146 if (start)
@@ -1020,8 +1156,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1020 struct gnttab_copy *gop = *gopp; 1156 struct gnttab_copy *gop = *gopp;
1021 u16 pending_idx = *((u16 *)skb->data); 1157 u16 pending_idx = *((u16 *)skb->data);
1022 struct skb_shared_info *shinfo = skb_shinfo(skb); 1158 struct skb_shared_info *shinfo = skb_shinfo(skb);
1159 struct pending_tx_info *tx_info;
1023 int nr_frags = shinfo->nr_frags; 1160 int nr_frags = shinfo->nr_frags;
1024 int i, err, start; 1161 int i, err, start;
1162 u16 peek; /* peek into next tx request */
1025 1163
1026 /* Check status of header. */ 1164 /* Check status of header. */
1027 err = gop->status; 1165 err = gop->status;
@@ -1033,11 +1171,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1033 1171
1034 for (i = start; i < nr_frags; i++) { 1172 for (i = start; i < nr_frags; i++) {
1035 int j, newerr; 1173 int j, newerr;
1174 pending_ring_idx_t head;
1036 1175
1037 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); 1176 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1177 tx_info = &netbk->pending_tx_info[pending_idx];
1178 head = tx_info->head;
1038 1179
1039 /* Check error status: if okay then remember grant handle. */ 1180 /* Check error status: if okay then remember grant handle. */
1040 newerr = (++gop)->status; 1181 do {
1182 newerr = (++gop)->status;
1183 if (newerr)
1184 break;
1185 peek = netbk->pending_ring[pending_index(++head)];
1186 } while (!pending_tx_is_head(netbk, peek));
1187
1041 if (likely(!newerr)) { 1188 if (likely(!newerr)) {
1042 /* Had a previous error? Invalidate this fragment. */ 1189 /* Had a previous error? Invalidate this fragment. */
1043 if (unlikely(err)) 1190 if (unlikely(err))
@@ -1157,7 +1304,6 @@ static int netbk_set_skb_gso(struct xenvif *vif,
1157static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) 1304static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1158{ 1305{
1159 struct iphdr *iph; 1306 struct iphdr *iph;
1160 unsigned char *th;
1161 int err = -EPROTO; 1307 int err = -EPROTO;
1162 int recalculate_partial_csum = 0; 1308 int recalculate_partial_csum = 0;
1163 1309
@@ -1181,27 +1327,26 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1181 goto out; 1327 goto out;
1182 1328
1183 iph = (void *)skb->data; 1329 iph = (void *)skb->data;
1184 th = skb->data + 4 * iph->ihl;
1185 if (th >= skb_tail_pointer(skb))
1186 goto out;
1187
1188 skb->csum_start = th - skb->head;
1189 switch (iph->protocol) { 1330 switch (iph->protocol) {
1190 case IPPROTO_TCP: 1331 case IPPROTO_TCP:
1191 skb->csum_offset = offsetof(struct tcphdr, check); 1332 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
1333 offsetof(struct tcphdr, check)))
1334 goto out;
1192 1335
1193 if (recalculate_partial_csum) { 1336 if (recalculate_partial_csum) {
1194 struct tcphdr *tcph = (struct tcphdr *)th; 1337 struct tcphdr *tcph = tcp_hdr(skb);
1195 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1338 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1196 skb->len - iph->ihl*4, 1339 skb->len - iph->ihl*4,
1197 IPPROTO_TCP, 0); 1340 IPPROTO_TCP, 0);
1198 } 1341 }
1199 break; 1342 break;
1200 case IPPROTO_UDP: 1343 case IPPROTO_UDP:
1201 skb->csum_offset = offsetof(struct udphdr, check); 1344 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
1345 offsetof(struct udphdr, check)))
1346 goto out;
1202 1347
1203 if (recalculate_partial_csum) { 1348 if (recalculate_partial_csum) {
1204 struct udphdr *udph = (struct udphdr *)th; 1349 struct udphdr *udph = udp_hdr(skb);
1205 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1350 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1206 skb->len - iph->ihl*4, 1351 skb->len - iph->ihl*4,
1207 IPPROTO_UDP, 0); 1352 IPPROTO_UDP, 0);
@@ -1215,9 +1360,6 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1215 goto out; 1360 goto out;
1216 } 1361 }
1217 1362
1218 if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
1219 goto out;
1220
1221 err = 0; 1363 err = 0;
1222 1364
1223out: 1365out:
@@ -1262,11 +1404,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1262 struct sk_buff *skb; 1404 struct sk_buff *skb;
1263 int ret; 1405 int ret;
1264 1406
1265 while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && 1407 while ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
1408 < MAX_PENDING_REQS) &&
1266 !list_empty(&netbk->net_schedule_list)) { 1409 !list_empty(&netbk->net_schedule_list)) {
1267 struct xenvif *vif; 1410 struct xenvif *vif;
1268 struct xen_netif_tx_request txreq; 1411 struct xen_netif_tx_request txreq;
1269 struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS]; 1412 struct xen_netif_tx_request txfrags[max_skb_slots];
1270 struct page *page; 1413 struct page *page;
1271 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; 1414 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1272 u16 pending_idx; 1415 u16 pending_idx;
@@ -1327,7 +1470,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1327 continue; 1470 continue;
1328 } 1471 }
1329 1472
1330 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); 1473 ret = netbk_count_requests(vif, &txreq, idx,
1474 txfrags, work_to_do);
1331 if (unlikely(ret < 0)) 1475 if (unlikely(ret < 0))
1332 continue; 1476 continue;
1333 1477
@@ -1354,7 +1498,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1354 pending_idx = netbk->pending_ring[index]; 1498 pending_idx = netbk->pending_ring[index];
1355 1499
1356 data_len = (txreq.size > PKT_PROT_LEN && 1500 data_len = (txreq.size > PKT_PROT_LEN &&
1357 ret < MAX_SKB_FRAGS) ? 1501 ret < XEN_NETIF_NR_SLOTS_MIN) ?
1358 PKT_PROT_LEN : txreq.size; 1502 PKT_PROT_LEN : txreq.size;
1359 1503
1360 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, 1504 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
@@ -1381,7 +1525,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1381 } 1525 }
1382 1526
1383 /* XXX could copy straight to head */ 1527 /* XXX could copy straight to head */
1384 page = xen_netbk_alloc_page(netbk, skb, pending_idx); 1528 page = xen_netbk_alloc_page(netbk, pending_idx);
1385 if (!page) { 1529 if (!page) {
1386 kfree_skb(skb); 1530 kfree_skb(skb);
1387 netbk_tx_err(vif, &txreq, idx); 1531 netbk_tx_err(vif, &txreq, idx);
@@ -1404,6 +1548,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1404 memcpy(&netbk->pending_tx_info[pending_idx].req, 1548 memcpy(&netbk->pending_tx_info[pending_idx].req,
1405 &txreq, sizeof(txreq)); 1549 &txreq, sizeof(txreq));
1406 netbk->pending_tx_info[pending_idx].vif = vif; 1550 netbk->pending_tx_info[pending_idx].vif = vif;
1551 netbk->pending_tx_info[pending_idx].head = index;
1407 *((u16 *)skb->data) = pending_idx; 1552 *((u16 *)skb->data) = pending_idx;
1408 1553
1409 __skb_put(skb, data_len); 1554 __skb_put(skb, data_len);
@@ -1496,6 +1641,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1496 1641
1497 skb->dev = vif->dev; 1642 skb->dev = vif->dev;
1498 skb->protocol = eth_type_trans(skb, skb->dev); 1643 skb->protocol = eth_type_trans(skb, skb->dev);
1644 skb_reset_network_header(skb);
1499 1645
1500 if (checksum_setup(vif, skb)) { 1646 if (checksum_setup(vif, skb)) {
1501 netdev_dbg(vif->dev, 1647 netdev_dbg(vif->dev,
@@ -1504,6 +1650,8 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1504 continue; 1650 continue;
1505 } 1651 }
1506 1652
1653 skb_probe_transport_header(skb, 0);
1654
1507 vif->dev->stats.rx_bytes += skb->len; 1655 vif->dev->stats.rx_bytes += skb->len;
1508 vif->dev->stats.rx_packets++; 1656 vif->dev->stats.rx_packets++;
1509 1657
@@ -1531,7 +1679,10 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1531{ 1679{
1532 struct xenvif *vif; 1680 struct xenvif *vif;
1533 struct pending_tx_info *pending_tx_info; 1681 struct pending_tx_info *pending_tx_info;
1534 pending_ring_idx_t index; 1682 pending_ring_idx_t head;
1683 u16 peek; /* peek into next tx request */
1684
1685 BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
1535 1686
1536 /* Already complete? */ 1687 /* Already complete? */
1537 if (netbk->mmap_pages[pending_idx] == NULL) 1688 if (netbk->mmap_pages[pending_idx] == NULL)
@@ -1540,19 +1691,40 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1540 pending_tx_info = &netbk->pending_tx_info[pending_idx]; 1691 pending_tx_info = &netbk->pending_tx_info[pending_idx];
1541 1692
1542 vif = pending_tx_info->vif; 1693 vif = pending_tx_info->vif;
1694 head = pending_tx_info->head;
1695
1696 BUG_ON(!pending_tx_is_head(netbk, head));
1697 BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
1698
1699 do {
1700 pending_ring_idx_t index;
1701 pending_ring_idx_t idx = pending_index(head);
1702 u16 info_idx = netbk->pending_ring[idx];
1543 1703
1544 make_tx_response(vif, &pending_tx_info->req, status); 1704 pending_tx_info = &netbk->pending_tx_info[info_idx];
1705 make_tx_response(vif, &pending_tx_info->req, status);
1545 1706
1546 index = pending_index(netbk->pending_prod++); 1707 /* Setting any number other than
1547 netbk->pending_ring[index] = pending_idx; 1708 * INVALID_PENDING_RING_IDX indicates this slot is
1709 * starting a new packet / ending a previous packet.
1710 */
1711 pending_tx_info->head = 0;
1548 1712
1549 xenvif_put(vif); 1713 index = pending_index(netbk->pending_prod++);
1714 netbk->pending_ring[index] = netbk->pending_ring[info_idx];
1715
1716 xenvif_put(vif);
1717
1718 peek = netbk->pending_ring[pending_index(++head)];
1719
1720 } while (!pending_tx_is_head(netbk, peek));
1550 1721
1551 netbk->mmap_pages[pending_idx]->mapping = 0; 1722 netbk->mmap_pages[pending_idx]->mapping = 0;
1552 put_page(netbk->mmap_pages[pending_idx]); 1723 put_page(netbk->mmap_pages[pending_idx]);
1553 netbk->mmap_pages[pending_idx] = NULL; 1724 netbk->mmap_pages[pending_idx] = NULL;
1554} 1725}
1555 1726
1727
1556static void make_tx_response(struct xenvif *vif, 1728static void make_tx_response(struct xenvif *vif,
1557 struct xen_netif_tx_request *txp, 1729 struct xen_netif_tx_request *txp,
1558 s8 st) 1730 s8 st)
@@ -1605,8 +1777,9 @@ static inline int rx_work_todo(struct xen_netbk *netbk)
1605static inline int tx_work_todo(struct xen_netbk *netbk) 1777static inline int tx_work_todo(struct xen_netbk *netbk)
1606{ 1778{
1607 1779
1608 if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && 1780 if ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
1609 !list_empty(&netbk->net_schedule_list)) 1781 < MAX_PENDING_REQS) &&
1782 !list_empty(&netbk->net_schedule_list))
1610 return 1; 1783 return 1;
1611 1784
1612 return 0; 1785 return 0;
@@ -1689,6 +1862,13 @@ static int __init netback_init(void)
1689 if (!xen_domain()) 1862 if (!xen_domain())
1690 return -ENODEV; 1863 return -ENODEV;
1691 1864
1865 if (max_skb_slots < XEN_NETIF_NR_SLOTS_MIN) {
1866 printk(KERN_INFO
1867 "xen-netback: max_skb_slots too small (%d), bump it to XEN_NETIF_NR_SLOTS_MIN (%d)\n",
1868 max_skb_slots, XEN_NETIF_NR_SLOTS_MIN);
1869 max_skb_slots = XEN_NETIF_NR_SLOTS_MIN;
1870 }
1871
1692 xen_netbk_group_nr = num_online_cpus(); 1872 xen_netbk_group_nr = num_online_cpus();
1693 xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr); 1873 xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
1694 if (!xen_netbk) 1874 if (!xen_netbk)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 7ffa43bd7cf9..1db101415069 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -36,7 +36,7 @@
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/ethtool.h> 37#include <linux/ethtool.h>
38#include <linux/if_ether.h> 38#include <linux/if_ether.h>
39#include <linux/tcp.h> 39#include <net/tcp.h>
40#include <linux/udp.h> 40#include <linux/udp.h>
41#include <linux/moduleparam.h> 41#include <linux/moduleparam.h>
42#include <linux/mm.h> 42#include <linux/mm.h>
@@ -537,7 +537,6 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
537 struct netfront_info *np = netdev_priv(dev); 537 struct netfront_info *np = netdev_priv(dev);
538 struct netfront_stats *stats = this_cpu_ptr(np->stats); 538 struct netfront_stats *stats = this_cpu_ptr(np->stats);
539 struct xen_netif_tx_request *tx; 539 struct xen_netif_tx_request *tx;
540 struct xen_netif_extra_info *extra;
541 char *data = skb->data; 540 char *data = skb->data;
542 RING_IDX i; 541 RING_IDX i;
543 grant_ref_t ref; 542 grant_ref_t ref;
@@ -548,6 +547,16 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
548 unsigned int len = skb_headlen(skb); 547 unsigned int len = skb_headlen(skb);
549 unsigned long flags; 548 unsigned long flags;
550 549
550 /* If skb->len is too big for wire format, drop skb and alert
551 * user about misconfiguration.
552 */
553 if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
554 net_alert_ratelimited(
555 "xennet: skb->len = %u, too big for wire format\n",
556 skb->len);
557 goto drop;
558 }
559
551 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + 560 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
552 xennet_count_skb_frag_slots(skb); 561 xennet_count_skb_frag_slots(skb);
553 if (unlikely(slots > MAX_SKB_FRAGS + 1)) { 562 if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
@@ -581,7 +590,6 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
581 tx->gref = np->grant_tx_ref[id] = ref; 590 tx->gref = np->grant_tx_ref[id] = ref;
582 tx->offset = offset; 591 tx->offset = offset;
583 tx->size = len; 592 tx->size = len;
584 extra = NULL;
585 593
586 tx->flags = 0; 594 tx->flags = 0;
587 if (skb->ip_summed == CHECKSUM_PARTIAL) 595 if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -597,10 +605,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
597 gso = (struct xen_netif_extra_info *) 605 gso = (struct xen_netif_extra_info *)
598 RING_GET_REQUEST(&np->tx, ++i); 606 RING_GET_REQUEST(&np->tx, ++i);
599 607
600 if (extra) 608 tx->flags |= XEN_NETTXF_extra_info;
601 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
602 else
603 tx->flags |= XEN_NETTXF_extra_info;
604 609
605 gso->u.gso.size = skb_shinfo(skb)->gso_size; 610 gso->u.gso.size = skb_shinfo(skb)->gso_size;
606 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 611 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
@@ -609,7 +614,6 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
609 614
610 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 615 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
611 gso->flags = 0; 616 gso->flags = 0;
612 extra = gso;
613 } 617 }
614 618
615 np->tx.req_prod_pvt = i + 1; 619 np->tx.req_prod_pvt = i + 1;
@@ -718,7 +722,7 @@ static int xennet_get_responses(struct netfront_info *np,
718 struct sk_buff *skb = xennet_get_rx_skb(np, cons); 722 struct sk_buff *skb = xennet_get_rx_skb(np, cons);
719 grant_ref_t ref = xennet_get_rx_ref(np, cons); 723 grant_ref_t ref = xennet_get_rx_ref(np, cons);
720 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); 724 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
721 int frags = 1; 725 int slots = 1;
722 int err = 0; 726 int err = 0;
723 unsigned long ret; 727 unsigned long ret;
724 728
@@ -741,7 +745,7 @@ static int xennet_get_responses(struct netfront_info *np,
741 /* 745 /*
742 * This definitely indicates a bug, either in this driver or in 746 * This definitely indicates a bug, either in this driver or in
743 * the backend driver. In future this should flag the bad 747 * the backend driver. In future this should flag the bad
744 * situation to the system controller to reboot the backed. 748 * situation to the system controller to reboot the backend.
745 */ 749 */
746 if (ref == GRANT_INVALID_REF) { 750 if (ref == GRANT_INVALID_REF) {
747 if (net_ratelimit()) 751 if (net_ratelimit())
@@ -762,27 +766,27 @@ next:
762 if (!(rx->flags & XEN_NETRXF_more_data)) 766 if (!(rx->flags & XEN_NETRXF_more_data))
763 break; 767 break;
764 768
765 if (cons + frags == rp) { 769 if (cons + slots == rp) {
766 if (net_ratelimit()) 770 if (net_ratelimit())
767 dev_warn(dev, "Need more frags\n"); 771 dev_warn(dev, "Need more slots\n");
768 err = -ENOENT; 772 err = -ENOENT;
769 break; 773 break;
770 } 774 }
771 775
772 rx = RING_GET_RESPONSE(&np->rx, cons + frags); 776 rx = RING_GET_RESPONSE(&np->rx, cons + slots);
773 skb = xennet_get_rx_skb(np, cons + frags); 777 skb = xennet_get_rx_skb(np, cons + slots);
774 ref = xennet_get_rx_ref(np, cons + frags); 778 ref = xennet_get_rx_ref(np, cons + slots);
775 frags++; 779 slots++;
776 } 780 }
777 781
778 if (unlikely(frags > max)) { 782 if (unlikely(slots > max)) {
779 if (net_ratelimit()) 783 if (net_ratelimit())
780 dev_warn(dev, "Too many frags\n"); 784 dev_warn(dev, "Too many slots\n");
781 err = -E2BIG; 785 err = -E2BIG;
782 } 786 }
783 787
784 if (unlikely(err)) 788 if (unlikely(err))
785 np->rx.rsp_cons = cons + frags; 789 np->rx.rsp_cons = cons + slots;
786 790
787 return err; 791 return err;
788} 792}
@@ -1064,7 +1068,8 @@ err:
1064 1068
1065static int xennet_change_mtu(struct net_device *dev, int mtu) 1069static int xennet_change_mtu(struct net_device *dev, int mtu)
1066{ 1070{
1067 int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; 1071 int max = xennet_can_sg(dev) ?
1072 XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
1068 1073
1069 if (mtu > max) 1074 if (mtu > max)
1070 return -EINVAL; 1075 return -EINVAL;
@@ -1368,6 +1373,8 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1368 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); 1373 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
1369 SET_NETDEV_DEV(netdev, &dev->dev); 1374 SET_NETDEV_DEV(netdev, &dev->dev);
1370 1375
1376 netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
1377
1371 np->netdev = netdev; 1378 np->netdev = netdev;
1372 1379
1373 netif_carrier_off(netdev); 1380 netif_carrier_off(netdev);
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index e57034971ccc..4775d4e61b88 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -26,6 +26,16 @@ config NFC_WILINK
26 Say Y here to compile support for Texas Instrument's NFC WiLink driver 26 Say Y here to compile support for Texas Instrument's NFC WiLink driver
27 into the kernel or say M to compile it as module. 27 into the kernel or say M to compile it as module.
28 28
29config NFC_MEI_PHY
30 tristate "MEI bus NFC device support"
31 depends on INTEL_MEI_BUS_NFC && NFC_HCI
32 help
33 This adds support to use an mei bus nfc device. Select this if you
34 will use an HCI NFC driver for an NFC chip connected behind an
35 Intel's Management Engine chip.
36
37 If unsure, say N.
38
29source "drivers/nfc/pn544/Kconfig" 39source "drivers/nfc/pn544/Kconfig"
30source "drivers/nfc/microread/Kconfig" 40source "drivers/nfc/microread/Kconfig"
31 41
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index a189ada0926a..aa6bd657ef40 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -6,5 +6,6 @@ obj-$(CONFIG_NFC_PN544) += pn544/
6obj-$(CONFIG_NFC_MICROREAD) += microread/ 6obj-$(CONFIG_NFC_MICROREAD) += microread/
7obj-$(CONFIG_NFC_PN533) += pn533.o 7obj-$(CONFIG_NFC_PN533) += pn533.o
8obj-$(CONFIG_NFC_WILINK) += nfcwilink.o 8obj-$(CONFIG_NFC_WILINK) += nfcwilink.o
9obj-$(CONFIG_NFC_MEI_PHY) += mei_phy.o
9 10
10ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG 11ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
new file mode 100644
index 000000000000..b8f8abc422f0
--- /dev/null
+++ b/drivers/nfc/mei_phy.c
@@ -0,0 +1,164 @@
1/*
2 * MEI Library for mei bus nfc device access
3 *
4 * Copyright (C) 2013 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the
17 * Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/slab.h>
23#include <linux/nfc.h>
24
25#include "mei_phy.h"
26
27struct mei_nfc_hdr {
28 u8 cmd;
29 u8 status;
30 u16 req_id;
31 u32 reserved;
32 u16 data_size;
33} __attribute__((packed));
34
35#define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD)
36
37#define MEI_DUMP_SKB_IN(info, skb) \
38do { \
39 pr_debug("%s:\n", info); \
40 print_hex_dump_debug("mei in : ", DUMP_PREFIX_OFFSET, \
41 16, 1, (skb)->data, (skb)->len, false); \
42} while (0)
43
44#define MEI_DUMP_SKB_OUT(info, skb) \
45do { \
46 pr_debug("%s:\n", info); \
47 print_hex_dump_debug("mei out: ", DUMP_PREFIX_OFFSET, \
48 16, 1, (skb)->data, (skb)->len, false); \
49} while (0)
50
51int nfc_mei_phy_enable(void *phy_id)
52{
53 int r;
54 struct nfc_mei_phy *phy = phy_id;
55
56 pr_info("%s\n", __func__);
57
58 if (phy->powered == 1)
59 return 0;
60
61 r = mei_cl_enable_device(phy->device);
62 if (r < 0) {
63 pr_err("MEI_PHY: Could not enable device\n");
64 return r;
65 }
66
67 phy->powered = 1;
68
69 return 0;
70}
71EXPORT_SYMBOL_GPL(nfc_mei_phy_enable);
72
73void nfc_mei_phy_disable(void *phy_id)
74{
75 struct nfc_mei_phy *phy = phy_id;
76
77 pr_info("%s\n", __func__);
78
79 mei_cl_disable_device(phy->device);
80
81 phy->powered = 0;
82}
83EXPORT_SYMBOL_GPL(nfc_mei_phy_disable);
84
85/*
86 * Writing a frame must not return the number of written bytes.
87 * It must return either zero for success, or <0 for error.
88 * In addition, it must not alter the skb
89 */
90static int nfc_mei_phy_write(void *phy_id, struct sk_buff *skb)
91{
92 struct nfc_mei_phy *phy = phy_id;
93 int r;
94
95 MEI_DUMP_SKB_OUT("mei frame sent", skb);
96
97 r = mei_cl_send(phy->device, skb->data, skb->len);
98 if (r > 0)
99 r = 0;
100
101 return r;
102}
103
104void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, void *context)
105{
106 struct nfc_mei_phy *phy = context;
107
108 if (phy->hard_fault != 0)
109 return;
110
111 if (events & BIT(MEI_CL_EVENT_RX)) {
112 struct sk_buff *skb;
113 int reply_size;
114
115 skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL);
116 if (!skb)
117 return;
118
119 reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ);
120 if (reply_size < MEI_NFC_HEADER_SIZE) {
121 kfree(skb);
122 return;
123 }
124
125 skb_put(skb, reply_size);
126 skb_pull(skb, MEI_NFC_HEADER_SIZE);
127
128 MEI_DUMP_SKB_IN("mei frame read", skb);
129
130 nfc_hci_recv_frame(phy->hdev, skb);
131 }
132}
133EXPORT_SYMBOL_GPL(nfc_mei_event_cb);
134
135struct nfc_phy_ops mei_phy_ops = {
136 .write = nfc_mei_phy_write,
137 .enable = nfc_mei_phy_enable,
138 .disable = nfc_mei_phy_disable,
139};
140EXPORT_SYMBOL_GPL(mei_phy_ops);
141
142struct nfc_mei_phy *nfc_mei_phy_alloc(struct mei_cl_device *device)
143{
144 struct nfc_mei_phy *phy;
145
146 phy = kzalloc(sizeof(struct nfc_mei_phy), GFP_KERNEL);
147 if (!phy)
148 return NULL;
149
150 phy->device = device;
151 mei_cl_set_drvdata(device, phy);
152
153 return phy;
154}
155EXPORT_SYMBOL_GPL(nfc_mei_phy_alloc);
156
157void nfc_mei_phy_free(struct nfc_mei_phy *phy)
158{
159 kfree(phy);
160}
161EXPORT_SYMBOL_GPL(nfc_mei_phy_free);
162
163MODULE_LICENSE("GPL");
164MODULE_DESCRIPTION("mei bus NFC device interface");
diff --git a/drivers/nfc/mei_phy.h b/drivers/nfc/mei_phy.h
new file mode 100644
index 000000000000..d669900f8278
--- /dev/null
+++ b/drivers/nfc/mei_phy.h
@@ -0,0 +1,30 @@
1#ifndef __LOCAL_MEI_PHY_H_
2#define __LOCAL_MEI_PHY_H_
3
4#include <linux/mei_cl_bus.h>
5#include <net/nfc/hci.h>
6
7#define MEI_NFC_HEADER_SIZE 10
8#define MEI_NFC_MAX_HCI_PAYLOAD 300
9
10struct nfc_mei_phy {
11 struct mei_cl_device *device;
12 struct nfc_hci_dev *hdev;
13
14 int powered;
15
16 int hard_fault; /*
17 * < 0 if hardware error occured
18 * and prevents normal operation.
19 */
20};
21
22extern struct nfc_phy_ops mei_phy_ops;
23
24int nfc_mei_phy_enable(void *phy_id);
25void nfc_mei_phy_disable(void *phy_id);
26void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, void *context);
27struct nfc_mei_phy *nfc_mei_phy_alloc(struct mei_cl_device *device);
28void nfc_mei_phy_free(struct nfc_mei_phy *phy);
29
30#endif /* __LOCAL_MEI_PHY_H_ */
diff --git a/drivers/nfc/microread/Kconfig b/drivers/nfc/microread/Kconfig
index 572305be6e37..951d5542f6bc 100644
--- a/drivers/nfc/microread/Kconfig
+++ b/drivers/nfc/microread/Kconfig
@@ -25,7 +25,7 @@ config NFC_MICROREAD_I2C
25 25
26config NFC_MICROREAD_MEI 26config NFC_MICROREAD_MEI
27 tristate "NFC Microread MEI support" 27 tristate "NFC Microread MEI support"
28 depends on NFC_MICROREAD && INTEL_MEI_BUS_NFC 28 depends on NFC_MICROREAD && NFC_MEI_PHY
29 ---help--- 29 ---help---
30 This module adds support for the mei interface of adapters using 30 This module adds support for the mei interface of adapters using
31 Inside microread chipsets. Select this if your microread chipset 31 Inside microread chipsets. Select this if your microread chipset
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index ca33ae193935..1ad044dce7b6 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -19,151 +19,31 @@
19 */ 19 */
20 20
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/slab.h> 22#include <linux/mod_devicetable.h>
23#include <linux/interrupt.h>
24#include <linux/gpio.h>
25#include <linux/mei_cl_bus.h>
26
27#include <linux/nfc.h> 23#include <linux/nfc.h>
28#include <net/nfc/hci.h> 24#include <net/nfc/hci.h>
29#include <net/nfc/llc.h> 25#include <net/nfc/llc.h>
30 26
27#include "../mei_phy.h"
31#include "microread.h" 28#include "microread.h"
32 29
33#define MICROREAD_DRIVER_NAME "microread" 30#define MICROREAD_DRIVER_NAME "microread"
34 31
35struct mei_nfc_hdr {
36 u8 cmd;
37 u8 status;
38 u16 req_id;
39 u32 reserved;
40 u16 data_size;
41} __attribute__((packed));
42
43#define MEI_NFC_HEADER_SIZE 10
44#define MEI_NFC_MAX_HCI_PAYLOAD 300
45#define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD)
46
47struct microread_mei_phy {
48 struct mei_cl_device *device;
49 struct nfc_hci_dev *hdev;
50
51 int powered;
52
53 int hard_fault; /*
54 * < 0 if hardware error occured (e.g. i2c err)
55 * and prevents normal operation.
56 */
57};
58
59#define MEI_DUMP_SKB_IN(info, skb) \
60do { \
61 pr_debug("%s:\n", info); \
62 print_hex_dump(KERN_DEBUG, "mei in : ", DUMP_PREFIX_OFFSET, \
63 16, 1, (skb)->data, (skb)->len, 0); \
64} while (0)
65
66#define MEI_DUMP_SKB_OUT(info, skb) \
67do { \
68 pr_debug("%s:\n", info); \
69 print_hex_dump(KERN_DEBUG, "mei out: ", DUMP_PREFIX_OFFSET, \
70 16, 1, (skb)->data, (skb)->len, 0); \
71} while (0)
72
73static int microread_mei_enable(void *phy_id)
74{
75 struct microread_mei_phy *phy = phy_id;
76
77 pr_info(DRIVER_DESC ": %s\n", __func__);
78
79 phy->powered = 1;
80
81 return 0;
82}
83
84static void microread_mei_disable(void *phy_id)
85{
86 struct microread_mei_phy *phy = phy_id;
87
88 pr_info(DRIVER_DESC ": %s\n", __func__);
89
90 phy->powered = 0;
91}
92
93/*
94 * Writing a frame must not return the number of written bytes.
95 * It must return either zero for success, or <0 for error.
96 * In addition, it must not alter the skb
97 */
98static int microread_mei_write(void *phy_id, struct sk_buff *skb)
99{
100 struct microread_mei_phy *phy = phy_id;
101 int r;
102
103 MEI_DUMP_SKB_OUT("mei frame sent", skb);
104
105 r = mei_cl_send(phy->device, skb->data, skb->len);
106 if (r > 0)
107 r = 0;
108
109 return r;
110}
111
112static void microread_event_cb(struct mei_cl_device *device, u32 events,
113 void *context)
114{
115 struct microread_mei_phy *phy = context;
116
117 if (phy->hard_fault != 0)
118 return;
119
120 if (events & BIT(MEI_CL_EVENT_RX)) {
121 struct sk_buff *skb;
122 int reply_size;
123
124 skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL);
125 if (!skb)
126 return;
127
128 reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ);
129 if (reply_size < MEI_NFC_HEADER_SIZE) {
130 kfree(skb);
131 return;
132 }
133
134 skb_put(skb, reply_size);
135 skb_pull(skb, MEI_NFC_HEADER_SIZE);
136
137 MEI_DUMP_SKB_IN("mei frame read", skb);
138
139 nfc_hci_recv_frame(phy->hdev, skb);
140 }
141}
142
143static struct nfc_phy_ops mei_phy_ops = {
144 .write = microread_mei_write,
145 .enable = microread_mei_enable,
146 .disable = microread_mei_disable,
147};
148
149static int microread_mei_probe(struct mei_cl_device *device, 32static int microread_mei_probe(struct mei_cl_device *device,
150 const struct mei_cl_device_id *id) 33 const struct mei_cl_device_id *id)
151{ 34{
152 struct microread_mei_phy *phy; 35 struct nfc_mei_phy *phy;
153 int r; 36 int r;
154 37
155 pr_info("Probing NFC microread\n"); 38 pr_info("Probing NFC microread\n");
156 39
157 phy = kzalloc(sizeof(struct microread_mei_phy), GFP_KERNEL); 40 phy = nfc_mei_phy_alloc(device);
158 if (!phy) { 41 if (!phy) {
159 pr_err("Cannot allocate memory for microread mei phy.\n"); 42 pr_err("Cannot allocate memory for microread mei phy.\n");
160 return -ENOMEM; 43 return -ENOMEM;
161 } 44 }
162 45
163 phy->device = device; 46 r = mei_cl_register_event_cb(device, nfc_mei_event_cb, phy);
164 mei_cl_set_drvdata(device, phy);
165
166 r = mei_cl_register_event_cb(device, microread_event_cb, phy);
167 if (r) { 47 if (r) {
168 pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n"); 48 pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n");
169 goto err_out; 49 goto err_out;
@@ -178,23 +58,22 @@ static int microread_mei_probe(struct mei_cl_device *device,
178 return 0; 58 return 0;
179 59
180err_out: 60err_out:
181 kfree(phy); 61 nfc_mei_phy_free(phy);
182 62
183 return r; 63 return r;
184} 64}
185 65
186static int microread_mei_remove(struct mei_cl_device *device) 66static int microread_mei_remove(struct mei_cl_device *device)
187{ 67{
188 struct microread_mei_phy *phy = mei_cl_get_drvdata(device); 68 struct nfc_mei_phy *phy = mei_cl_get_drvdata(device);
189 69
190 pr_info("Removing microread\n"); 70 pr_info("Removing microread\n");
191 71
192 microread_remove(phy->hdev); 72 microread_remove(phy->hdev);
193 73
194 if (phy->powered) 74 nfc_mei_phy_disable(phy);
195 microread_mei_disable(phy);
196 75
197 kfree(phy); 76 nfc_mei_phy_free(phy);
198 77
199 return 0; 78 return 0;
200} 79}
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index f0f6763d67ae..8f6f2baa930d 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -1,9 +1,6 @@
1/* 1/*
2 * Copyright (C) 2011 Instituto Nokia de Tecnologia 2 * Copyright (C) 2011 Instituto Nokia de Tecnologia
3 * 3 * Copyright (C) 2012-2013 Tieto Poland
4 * Authors:
5 * Lauro Ramos Venancio <lauro.venancio@openbossa.org>
6 * Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
7 * 4 *
8 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -30,7 +27,7 @@
30#include <linux/netdevice.h> 27#include <linux/netdevice.h>
31#include <net/nfc/nfc.h> 28#include <net/nfc/nfc.h>
32 29
33#define VERSION "0.1" 30#define VERSION "0.2"
34 31
35#define PN533_VENDOR_ID 0x4CC 32#define PN533_VENDOR_ID 0x4CC
36#define PN533_PRODUCT_ID 0x2533 33#define PN533_PRODUCT_ID 0x2533
@@ -41,8 +38,12 @@
41#define SONY_VENDOR_ID 0x054c 38#define SONY_VENDOR_ID 0x054c
42#define PASORI_PRODUCT_ID 0x02e1 39#define PASORI_PRODUCT_ID 0x02e1
43 40
44#define PN533_DEVICE_STD 0x1 41#define ACS_VENDOR_ID 0x072f
45#define PN533_DEVICE_PASORI 0x2 42#define ACR122U_PRODUCT_ID 0x2200
43
44#define PN533_DEVICE_STD 0x1
45#define PN533_DEVICE_PASORI 0x2
46#define PN533_DEVICE_ACR122U 0x3
46 47
47#define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\ 48#define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\
48 NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\ 49 NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\
@@ -71,6 +72,11 @@ static const struct usb_device_id pn533_table[] = {
71 .idProduct = PASORI_PRODUCT_ID, 72 .idProduct = PASORI_PRODUCT_ID,
72 .driver_info = PN533_DEVICE_PASORI, 73 .driver_info = PN533_DEVICE_PASORI,
73 }, 74 },
75 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
76 .idVendor = ACS_VENDOR_ID,
77 .idProduct = ACR122U_PRODUCT_ID,
78 .driver_info = PN533_DEVICE_ACR122U,
79 },
74 { } 80 { }
75}; 81};
76MODULE_DEVICE_TABLE(usb, pn533_table); 82MODULE_DEVICE_TABLE(usb, pn533_table);
@@ -78,32 +84,47 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
78/* How much time we spend listening for initiators */ 84/* How much time we spend listening for initiators */
79#define PN533_LISTEN_TIME 2 85#define PN533_LISTEN_TIME 2
80 86
81/* frame definitions */ 87/* Standard pn533 frame definitions */
82#define PN533_FRAME_HEADER_LEN (sizeof(struct pn533_frame) \ 88#define PN533_STD_FRAME_HEADER_LEN (sizeof(struct pn533_std_frame) \
83 + 2) /* data[0] TFI, data[1] CC */ 89 + 2) /* data[0] TFI, data[1] CC */
84#define PN533_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/ 90#define PN533_STD_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/
85 91
86/* 92/*
87 * Max extended frame payload len, excluding TFI and CC 93 * Max extended frame payload len, excluding TFI and CC
88 * which are already in PN533_FRAME_HEADER_LEN. 94 * which are already in PN533_FRAME_HEADER_LEN.
89 */ 95 */
90#define PN533_FRAME_MAX_PAYLOAD_LEN 263 96#define PN533_STD_FRAME_MAX_PAYLOAD_LEN 263
91 97
92#define PN533_FRAME_ACK_SIZE 6 /* Preamble (1), SoPC (2), ACK Code (2), 98#define PN533_STD_FRAME_ACK_SIZE 6 /* Preamble (1), SoPC (2), ACK Code (2),
93 Postamble (1) */ 99 Postamble (1) */
94#define PN533_FRAME_CHECKSUM(f) (f->data[f->datalen]) 100#define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen])
95#define PN533_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1]) 101#define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1])
96 102
97/* start of frame */ 103/* start of frame */
98#define PN533_SOF 0x00FF 104#define PN533_STD_FRAME_SOF 0x00FF
105
106/* standard frame identifier: in/out/error */
107#define PN533_STD_FRAME_IDENTIFIER(f) (f->data[0]) /* TFI */
108#define PN533_STD_FRAME_DIR_OUT 0xD4
109#define PN533_STD_FRAME_DIR_IN 0xD5
110
111/* ACS ACR122 pn533 frame definitions */
112#define PN533_ACR122_TX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_tx_frame) \
113 + 2)
114#define PN533_ACR122_TX_FRAME_TAIL_LEN 0
115#define PN533_ACR122_RX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_rx_frame) \
116 + 2)
117#define PN533_ACR122_RX_FRAME_TAIL_LEN 2
118#define PN533_ACR122_FRAME_MAX_PAYLOAD_LEN PN533_STD_FRAME_MAX_PAYLOAD_LEN
119
120/* CCID messages types */
121#define PN533_ACR122_PC_TO_RDR_ICCPOWERON 0x62
122#define PN533_ACR122_PC_TO_RDR_ESCAPE 0x6B
99 123
100/* frame identifier: in/out/error */ 124#define PN533_ACR122_RDR_TO_PC_ESCAPE 0x83
101#define PN533_FRAME_IDENTIFIER(f) (f->data[0])
102#define PN533_DIR_OUT 0xD4
103#define PN533_DIR_IN 0xD5
104 125
105/* PN533 Commands */ 126/* PN533 Commands */
106#define PN533_FRAME_CMD(f) (f->data[1]) 127#define PN533_STD_FRAME_CMD(f) (f->data[1])
107 128
108#define PN533_CMD_GET_FIRMWARE_VERSION 0x02 129#define PN533_CMD_GET_FIRMWARE_VERSION 0x02
109#define PN533_CMD_RF_CONFIGURATION 0x32 130#define PN533_CMD_RF_CONFIGURATION 0x32
@@ -128,8 +149,6 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
128 149
129struct pn533; 150struct pn533;
130 151
131typedef int (*pn533_cmd_complete_t) (struct pn533 *dev, void *arg, int status);
132
133typedef int (*pn533_send_async_complete_t) (struct pn533 *dev, void *arg, 152typedef int (*pn533_send_async_complete_t) (struct pn533 *dev, void *arg,
134 struct sk_buff *resp); 153 struct sk_buff *resp);
135 154
@@ -144,9 +163,13 @@ struct pn533_fw_version {
144}; 163};
145 164
146/* PN533_CMD_RF_CONFIGURATION */ 165/* PN533_CMD_RF_CONFIGURATION */
147#define PN533_CFGITEM_TIMING 0x02 166#define PN533_CFGITEM_RF_FIELD 0x01
167#define PN533_CFGITEM_TIMING 0x02
148#define PN533_CFGITEM_MAX_RETRIES 0x05 168#define PN533_CFGITEM_MAX_RETRIES 0x05
149#define PN533_CFGITEM_PASORI 0x82 169#define PN533_CFGITEM_PASORI 0x82
170
171#define PN533_CFGITEM_RF_FIELD_ON 0x1
172#define PN533_CFGITEM_RF_FIELD_OFF 0x0
150 173
151#define PN533_CONFIG_TIMING_102 0xb 174#define PN533_CONFIG_TIMING_102 0xb
152#define PN533_CONFIG_TIMING_204 0xc 175#define PN533_CONFIG_TIMING_204 0xc
@@ -313,10 +336,17 @@ struct pn533_cmd_jump_dep_response {
313#define PN533_INIT_TARGET_RESP_ACTIVE 0x1 336#define PN533_INIT_TARGET_RESP_ACTIVE 0x1
314#define PN533_INIT_TARGET_RESP_DEP 0x4 337#define PN533_INIT_TARGET_RESP_DEP 0x4
315 338
339enum pn533_protocol_type {
340 PN533_PROTO_REQ_ACK_RESP = 0,
341 PN533_PROTO_REQ_RESP
342};
343
316struct pn533 { 344struct pn533 {
317 struct usb_device *udev; 345 struct usb_device *udev;
318 struct usb_interface *interface; 346 struct usb_interface *interface;
319 struct nfc_dev *nfc_dev; 347 struct nfc_dev *nfc_dev;
348 u32 device_type;
349 enum pn533_protocol_type protocol_type;
320 350
321 struct urb *out_urb; 351 struct urb *out_urb;
322 struct urb *in_urb; 352 struct urb *in_urb;
@@ -329,21 +359,21 @@ struct pn533 {
329 struct work_struct poll_work; 359 struct work_struct poll_work;
330 struct work_struct mi_work; 360 struct work_struct mi_work;
331 struct work_struct tg_work; 361 struct work_struct tg_work;
332 struct timer_list listen_timer;
333 int wq_in_error;
334 int cancel_listen;
335 362
336 pn533_cmd_complete_t cmd_complete; 363 struct list_head cmd_queue;
337 void *cmd_complete_arg; 364 struct pn533_cmd *cmd;
365 u8 cmd_pending;
366 struct mutex cmd_lock; /* protects cmd queue */
367
338 void *cmd_complete_mi_arg; 368 void *cmd_complete_mi_arg;
339 struct mutex cmd_lock;
340 u8 cmd;
341 369
342 struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1]; 370 struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
343 u8 poll_mod_count; 371 u8 poll_mod_count;
344 u8 poll_mod_curr; 372 u8 poll_mod_curr;
345 u32 poll_protocols; 373 u32 poll_protocols;
346 u32 listen_protocols; 374 u32 listen_protocols;
375 struct timer_list listen_timer;
376 int cancel_listen;
347 377
348 u8 *gb; 378 u8 *gb;
349 size_t gb_len; 379 size_t gb_len;
@@ -352,24 +382,21 @@ struct pn533 {
352 u8 tgt_active_prot; 382 u8 tgt_active_prot;
353 u8 tgt_mode; 383 u8 tgt_mode;
354 384
355 u32 device_type;
356
357 struct list_head cmd_queue;
358 u8 cmd_pending;
359
360 struct pn533_frame_ops *ops; 385 struct pn533_frame_ops *ops;
361}; 386};
362 387
363struct pn533_cmd { 388struct pn533_cmd {
364 struct list_head queue; 389 struct list_head queue;
365 u8 cmd_code; 390 u8 code;
391 int status;
366 struct sk_buff *req; 392 struct sk_buff *req;
367 struct sk_buff *resp; 393 struct sk_buff *resp;
368 int resp_len; 394 int resp_len;
369 void *arg; 395 pn533_send_async_complete_t complete_cb;
396 void *complete_cb_context;
370}; 397};
371 398
372struct pn533_frame { 399struct pn533_std_frame {
373 u8 preamble; 400 u8 preamble;
374 __be16 start_frame; 401 __be16 start_frame;
375 u8 datalen; 402 u8 datalen;
@@ -393,14 +420,124 @@ struct pn533_frame_ops {
393 u8 (*get_cmd_code)(void *frame); 420 u8 (*get_cmd_code)(void *frame);
394}; 421};
395 422
423struct pn533_acr122_ccid_hdr {
424 u8 type;
425 u32 datalen;
426 u8 slot;
427 u8 seq;
428 u8 params[3]; /* 3 msg specific bytes or status, error and 1 specific
429 byte for reposnse msg */
430 u8 data[]; /* payload */
431} __packed;
432
433struct pn533_acr122_apdu_hdr {
434 u8 class;
435 u8 ins;
436 u8 p1;
437 u8 p2;
438} __packed;
439
440struct pn533_acr122_tx_frame {
441 struct pn533_acr122_ccid_hdr ccid;
442 struct pn533_acr122_apdu_hdr apdu;
443 u8 datalen;
444 u8 data[]; /* pn533 frame: TFI ... */
445} __packed;
446
447struct pn533_acr122_rx_frame {
448 struct pn533_acr122_ccid_hdr ccid;
449 u8 data[]; /* pn533 frame : TFI ... */
450} __packed;
451
452static void pn533_acr122_tx_frame_init(void *_frame, u8 cmd_code)
453{
454 struct pn533_acr122_tx_frame *frame = _frame;
455
456 frame->ccid.type = PN533_ACR122_PC_TO_RDR_ESCAPE;
457 frame->ccid.datalen = sizeof(frame->apdu) + 1; /* sizeof(apdu_hdr) +
458 sizeof(datalen) */
459 frame->ccid.slot = 0;
460 frame->ccid.seq = 0;
461 frame->ccid.params[0] = 0;
462 frame->ccid.params[1] = 0;
463 frame->ccid.params[2] = 0;
464
465 frame->data[0] = PN533_STD_FRAME_DIR_OUT;
466 frame->data[1] = cmd_code;
467 frame->datalen = 2; /* data[0] + data[1] */
468
469 frame->apdu.class = 0xFF;
470 frame->apdu.ins = 0;
471 frame->apdu.p1 = 0;
472 frame->apdu.p2 = 0;
473}
474
475static void pn533_acr122_tx_frame_finish(void *_frame)
476{
477 struct pn533_acr122_tx_frame *frame = _frame;
478
479 frame->ccid.datalen += frame->datalen;
480}
481
482static void pn533_acr122_tx_update_payload_len(void *_frame, int len)
483{
484 struct pn533_acr122_tx_frame *frame = _frame;
485
486 frame->datalen += len;
487}
488
489static bool pn533_acr122_is_rx_frame_valid(void *_frame)
490{
491 struct pn533_acr122_rx_frame *frame = _frame;
492
493 if (frame->ccid.type != 0x83)
494 return false;
495
496 if (frame->data[frame->ccid.datalen - 2] == 0x63)
497 return false;
498
499 return true;
500}
501
502static int pn533_acr122_rx_frame_size(void *frame)
503{
504 struct pn533_acr122_rx_frame *f = frame;
505
506 /* f->ccid.datalen already includes tail length */
507 return sizeof(struct pn533_acr122_rx_frame) + f->ccid.datalen;
508}
509
510static u8 pn533_acr122_get_cmd_code(void *frame)
511{
512 struct pn533_acr122_rx_frame *f = frame;
513
514 return PN533_STD_FRAME_CMD(f);
515}
516
517static struct pn533_frame_ops pn533_acr122_frame_ops = {
518 .tx_frame_init = pn533_acr122_tx_frame_init,
519 .tx_frame_finish = pn533_acr122_tx_frame_finish,
520 .tx_update_payload_len = pn533_acr122_tx_update_payload_len,
521 .tx_header_len = PN533_ACR122_TX_FRAME_HEADER_LEN,
522 .tx_tail_len = PN533_ACR122_TX_FRAME_TAIL_LEN,
523
524 .rx_is_frame_valid = pn533_acr122_is_rx_frame_valid,
525 .rx_header_len = PN533_ACR122_RX_FRAME_HEADER_LEN,
526 .rx_tail_len = PN533_ACR122_RX_FRAME_TAIL_LEN,
527 .rx_frame_size = pn533_acr122_rx_frame_size,
528
529 .max_payload_len = PN533_ACR122_FRAME_MAX_PAYLOAD_LEN,
530 .get_cmd_code = pn533_acr122_get_cmd_code,
531};
532
396/* The rule: value + checksum = 0 */ 533/* The rule: value + checksum = 0 */
397static inline u8 pn533_checksum(u8 value) 534static inline u8 pn533_std_checksum(u8 value)
398{ 535{
399 return ~value + 1; 536 return ~value + 1;
400} 537}
401 538
402/* The rule: sum(data elements) + checksum = 0 */ 539/* The rule: sum(data elements) + checksum = 0 */
403static u8 pn533_data_checksum(u8 *data, int datalen) 540static u8 pn533_std_data_checksum(u8 *data, int datalen)
404{ 541{
405 u8 sum = 0; 542 u8 sum = 0;
406 int i; 543 int i;
@@ -408,61 +545,61 @@ static u8 pn533_data_checksum(u8 *data, int datalen)
408 for (i = 0; i < datalen; i++) 545 for (i = 0; i < datalen; i++)
409 sum += data[i]; 546 sum += data[i];
410 547
411 return pn533_checksum(sum); 548 return pn533_std_checksum(sum);
412} 549}
413 550
414static void pn533_tx_frame_init(void *_frame, u8 cmd_code) 551static void pn533_std_tx_frame_init(void *_frame, u8 cmd_code)
415{ 552{
416 struct pn533_frame *frame = _frame; 553 struct pn533_std_frame *frame = _frame;
417 554
418 frame->preamble = 0; 555 frame->preamble = 0;
419 frame->start_frame = cpu_to_be16(PN533_SOF); 556 frame->start_frame = cpu_to_be16(PN533_STD_FRAME_SOF);
420 PN533_FRAME_IDENTIFIER(frame) = PN533_DIR_OUT; 557 PN533_STD_FRAME_IDENTIFIER(frame) = PN533_STD_FRAME_DIR_OUT;
421 PN533_FRAME_CMD(frame) = cmd_code; 558 PN533_STD_FRAME_CMD(frame) = cmd_code;
422 frame->datalen = 2; 559 frame->datalen = 2;
423} 560}
424 561
425static void pn533_tx_frame_finish(void *_frame) 562static void pn533_std_tx_frame_finish(void *_frame)
426{ 563{
427 struct pn533_frame *frame = _frame; 564 struct pn533_std_frame *frame = _frame;
428 565
429 frame->datalen_checksum = pn533_checksum(frame->datalen); 566 frame->datalen_checksum = pn533_std_checksum(frame->datalen);
430 567
431 PN533_FRAME_CHECKSUM(frame) = 568 PN533_STD_FRAME_CHECKSUM(frame) =
432 pn533_data_checksum(frame->data, frame->datalen); 569 pn533_std_data_checksum(frame->data, frame->datalen);
433 570
434 PN533_FRAME_POSTAMBLE(frame) = 0; 571 PN533_STD_FRAME_POSTAMBLE(frame) = 0;
435} 572}
436 573
437static void pn533_tx_update_payload_len(void *_frame, int len) 574static void pn533_std_tx_update_payload_len(void *_frame, int len)
438{ 575{
439 struct pn533_frame *frame = _frame; 576 struct pn533_std_frame *frame = _frame;
440 577
441 frame->datalen += len; 578 frame->datalen += len;
442} 579}
443 580
444static bool pn533_rx_frame_is_valid(void *_frame) 581static bool pn533_std_rx_frame_is_valid(void *_frame)
445{ 582{
446 u8 checksum; 583 u8 checksum;
447 struct pn533_frame *frame = _frame; 584 struct pn533_std_frame *frame = _frame;
448 585
449 if (frame->start_frame != cpu_to_be16(PN533_SOF)) 586 if (frame->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
450 return false; 587 return false;
451 588
452 checksum = pn533_checksum(frame->datalen); 589 checksum = pn533_std_checksum(frame->datalen);
453 if (checksum != frame->datalen_checksum) 590 if (checksum != frame->datalen_checksum)
454 return false; 591 return false;
455 592
456 checksum = pn533_data_checksum(frame->data, frame->datalen); 593 checksum = pn533_std_data_checksum(frame->data, frame->datalen);
457 if (checksum != PN533_FRAME_CHECKSUM(frame)) 594 if (checksum != PN533_STD_FRAME_CHECKSUM(frame))
458 return false; 595 return false;
459 596
460 return true; 597 return true;
461} 598}
462 599
463static bool pn533_rx_frame_is_ack(struct pn533_frame *frame) 600static bool pn533_std_rx_frame_is_ack(struct pn533_std_frame *frame)
464{ 601{
465 if (frame->start_frame != cpu_to_be16(PN533_SOF)) 602 if (frame->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
466 return false; 603 return false;
467 604
468 if (frame->datalen != 0 || frame->datalen_checksum != 0xFF) 605 if (frame->datalen != 0 || frame->datalen_checksum != 0xFF)
@@ -471,57 +608,51 @@ static bool pn533_rx_frame_is_ack(struct pn533_frame *frame)
471 return true; 608 return true;
472} 609}
473 610
474static inline int pn533_rx_frame_size(void *frame) 611static inline int pn533_std_rx_frame_size(void *frame)
475{ 612{
476 struct pn533_frame *f = frame; 613 struct pn533_std_frame *f = frame;
477 614
478 return sizeof(struct pn533_frame) + f->datalen + PN533_FRAME_TAIL_LEN; 615 return sizeof(struct pn533_std_frame) + f->datalen +
616 PN533_STD_FRAME_TAIL_LEN;
479} 617}
480 618
481static u8 pn533_get_cmd_code(void *frame) 619static u8 pn533_std_get_cmd_code(void *frame)
482{ 620{
483 struct pn533_frame *f = frame; 621 struct pn533_std_frame *f = frame;
484 622
485 return PN533_FRAME_CMD(f); 623 return PN533_STD_FRAME_CMD(f);
486} 624}
487 625
488static struct pn533_frame_ops pn533_std_frame_ops = { 626static struct pn533_frame_ops pn533_std_frame_ops = {
489 .tx_frame_init = pn533_tx_frame_init, 627 .tx_frame_init = pn533_std_tx_frame_init,
490 .tx_frame_finish = pn533_tx_frame_finish, 628 .tx_frame_finish = pn533_std_tx_frame_finish,
491 .tx_update_payload_len = pn533_tx_update_payload_len, 629 .tx_update_payload_len = pn533_std_tx_update_payload_len,
492 .tx_header_len = PN533_FRAME_HEADER_LEN, 630 .tx_header_len = PN533_STD_FRAME_HEADER_LEN,
493 .tx_tail_len = PN533_FRAME_TAIL_LEN, 631 .tx_tail_len = PN533_STD_FRAME_TAIL_LEN,
494 632
495 .rx_is_frame_valid = pn533_rx_frame_is_valid, 633 .rx_is_frame_valid = pn533_std_rx_frame_is_valid,
496 .rx_frame_size = pn533_rx_frame_size, 634 .rx_frame_size = pn533_std_rx_frame_size,
497 .rx_header_len = PN533_FRAME_HEADER_LEN, 635 .rx_header_len = PN533_STD_FRAME_HEADER_LEN,
498 .rx_tail_len = PN533_FRAME_TAIL_LEN, 636 .rx_tail_len = PN533_STD_FRAME_TAIL_LEN,
499 637
500 .max_payload_len = PN533_FRAME_MAX_PAYLOAD_LEN, 638 .max_payload_len = PN533_STD_FRAME_MAX_PAYLOAD_LEN,
501 .get_cmd_code = pn533_get_cmd_code, 639 .get_cmd_code = pn533_std_get_cmd_code,
502}; 640};
503 641
504static bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame) 642static bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame)
505{ 643{
506 return (dev->ops->get_cmd_code(frame) == PN533_CMD_RESPONSE(dev->cmd)); 644 return (dev->ops->get_cmd_code(frame) ==
507} 645 PN533_CMD_RESPONSE(dev->cmd->code));
508
509
510static void pn533_wq_cmd_complete(struct work_struct *work)
511{
512 struct pn533 *dev = container_of(work, struct pn533, cmd_complete_work);
513 int rc;
514
515 rc = dev->cmd_complete(dev, dev->cmd_complete_arg, dev->wq_in_error);
516 if (rc != -EINPROGRESS)
517 queue_work(dev->wq, &dev->cmd_work);
518} 646}
519 647
520static void pn533_recv_response(struct urb *urb) 648static void pn533_recv_response(struct urb *urb)
521{ 649{
522 struct pn533 *dev = urb->context; 650 struct pn533 *dev = urb->context;
651 struct pn533_cmd *cmd = dev->cmd;
523 u8 *in_frame; 652 u8 *in_frame;
524 653
654 cmd->status = urb->status;
655
525 switch (urb->status) { 656 switch (urb->status) {
526 case 0: 657 case 0:
527 break; /* success */ 658 break; /* success */
@@ -530,37 +661,33 @@ static void pn533_recv_response(struct urb *urb)
530 nfc_dev_dbg(&dev->interface->dev, 661 nfc_dev_dbg(&dev->interface->dev,
531 "The urb has been canceled (status %d)", 662 "The urb has been canceled (status %d)",
532 urb->status); 663 urb->status);
533 dev->wq_in_error = urb->status;
534 goto sched_wq; 664 goto sched_wq;
535 case -ESHUTDOWN: 665 case -ESHUTDOWN:
536 default: 666 default:
537 nfc_dev_err(&dev->interface->dev, 667 nfc_dev_err(&dev->interface->dev,
538 "Urb failure (status %d)", urb->status); 668 "Urb failure (status %d)", urb->status);
539 dev->wq_in_error = urb->status;
540 goto sched_wq; 669 goto sched_wq;
541 } 670 }
542 671
543 in_frame = dev->in_urb->transfer_buffer; 672 in_frame = dev->in_urb->transfer_buffer;
544 673
545 nfc_dev_dbg(&dev->interface->dev, "Received a frame."); 674 nfc_dev_dbg(&dev->interface->dev, "Received a frame.");
546 print_hex_dump(KERN_DEBUG, "PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, 675 print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame,
547 in_frame, dev->ops->rx_frame_size(in_frame), false); 676 dev->ops->rx_frame_size(in_frame), false);
548 677
549 if (!dev->ops->rx_is_frame_valid(in_frame)) { 678 if (!dev->ops->rx_is_frame_valid(in_frame)) {
550 nfc_dev_err(&dev->interface->dev, "Received an invalid frame"); 679 nfc_dev_err(&dev->interface->dev, "Received an invalid frame");
551 dev->wq_in_error = -EIO; 680 cmd->status = -EIO;
552 goto sched_wq; 681 goto sched_wq;
553 } 682 }
554 683
555 if (!pn533_rx_frame_is_cmd_response(dev, in_frame)) { 684 if (!pn533_rx_frame_is_cmd_response(dev, in_frame)) {
556 nfc_dev_err(&dev->interface->dev, 685 nfc_dev_err(&dev->interface->dev,
557 "It it not the response to the last command"); 686 "It it not the response to the last command");
558 dev->wq_in_error = -EIO; 687 cmd->status = -EIO;
559 goto sched_wq; 688 goto sched_wq;
560 } 689 }
561 690
562 dev->wq_in_error = 0;
563
564sched_wq: 691sched_wq:
565 queue_work(dev->wq, &dev->cmd_complete_work); 692 queue_work(dev->wq, &dev->cmd_complete_work);
566} 693}
@@ -575,9 +702,12 @@ static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags)
575static void pn533_recv_ack(struct urb *urb) 702static void pn533_recv_ack(struct urb *urb)
576{ 703{
577 struct pn533 *dev = urb->context; 704 struct pn533 *dev = urb->context;
578 struct pn533_frame *in_frame; 705 struct pn533_cmd *cmd = dev->cmd;
706 struct pn533_std_frame *in_frame;
579 int rc; 707 int rc;
580 708
709 cmd->status = urb->status;
710
581 switch (urb->status) { 711 switch (urb->status) {
582 case 0: 712 case 0:
583 break; /* success */ 713 break; /* success */
@@ -586,21 +716,19 @@ static void pn533_recv_ack(struct urb *urb)
586 nfc_dev_dbg(&dev->interface->dev, 716 nfc_dev_dbg(&dev->interface->dev,
587 "The urb has been stopped (status %d)", 717 "The urb has been stopped (status %d)",
588 urb->status); 718 urb->status);
589 dev->wq_in_error = urb->status;
590 goto sched_wq; 719 goto sched_wq;
591 case -ESHUTDOWN: 720 case -ESHUTDOWN:
592 default: 721 default:
593 nfc_dev_err(&dev->interface->dev, 722 nfc_dev_err(&dev->interface->dev,
594 "Urb failure (status %d)", urb->status); 723 "Urb failure (status %d)", urb->status);
595 dev->wq_in_error = urb->status;
596 goto sched_wq; 724 goto sched_wq;
597 } 725 }
598 726
599 in_frame = dev->in_urb->transfer_buffer; 727 in_frame = dev->in_urb->transfer_buffer;
600 728
601 if (!pn533_rx_frame_is_ack(in_frame)) { 729 if (!pn533_std_rx_frame_is_ack(in_frame)) {
602 nfc_dev_err(&dev->interface->dev, "Received an invalid ack"); 730 nfc_dev_err(&dev->interface->dev, "Received an invalid ack");
603 dev->wq_in_error = -EIO; 731 cmd->status = -EIO;
604 goto sched_wq; 732 goto sched_wq;
605 } 733 }
606 734
@@ -608,7 +736,7 @@ static void pn533_recv_ack(struct urb *urb)
608 if (rc) { 736 if (rc) {
609 nfc_dev_err(&dev->interface->dev, 737 nfc_dev_err(&dev->interface->dev,
610 "usb_submit_urb failed with result %d", rc); 738 "usb_submit_urb failed with result %d", rc);
611 dev->wq_in_error = rc; 739 cmd->status = rc;
612 goto sched_wq; 740 goto sched_wq;
613 } 741 }
614 742
@@ -627,7 +755,7 @@ static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags)
627 755
628static int pn533_send_ack(struct pn533 *dev, gfp_t flags) 756static int pn533_send_ack(struct pn533 *dev, gfp_t flags)
629{ 757{
630 u8 ack[PN533_FRAME_ACK_SIZE] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00}; 758 u8 ack[PN533_STD_FRAME_ACK_SIZE] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
631 /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */ 759 /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */
632 int rc; 760 int rc;
633 761
@@ -643,32 +771,34 @@ static int pn533_send_ack(struct pn533 *dev, gfp_t flags)
643static int __pn533_send_frame_async(struct pn533 *dev, 771static int __pn533_send_frame_async(struct pn533 *dev,
644 struct sk_buff *out, 772 struct sk_buff *out,
645 struct sk_buff *in, 773 struct sk_buff *in,
646 int in_len, 774 int in_len)
647 pn533_cmd_complete_t cmd_complete,
648 void *arg)
649{ 775{
650 int rc; 776 int rc;
651 777
652 dev->cmd = dev->ops->get_cmd_code(out->data);
653 dev->cmd_complete = cmd_complete;
654 dev->cmd_complete_arg = arg;
655
656 dev->out_urb->transfer_buffer = out->data; 778 dev->out_urb->transfer_buffer = out->data;
657 dev->out_urb->transfer_buffer_length = out->len; 779 dev->out_urb->transfer_buffer_length = out->len;
658 780
659 dev->in_urb->transfer_buffer = in->data; 781 dev->in_urb->transfer_buffer = in->data;
660 dev->in_urb->transfer_buffer_length = in_len; 782 dev->in_urb->transfer_buffer_length = in_len;
661 783
662 print_hex_dump(KERN_DEBUG, "PN533 TX: ", DUMP_PREFIX_NONE, 16, 1, 784 print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
663 out->data, out->len, false); 785 out->data, out->len, false);
664 786
665 rc = usb_submit_urb(dev->out_urb, GFP_KERNEL); 787 rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
666 if (rc) 788 if (rc)
667 return rc; 789 return rc;
668 790
669 rc = pn533_submit_urb_for_ack(dev, GFP_KERNEL); 791 if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
670 if (rc) 792 /* request for response for sent packet directly */
671 goto error; 793 rc = pn533_submit_urb_for_response(dev, GFP_ATOMIC);
794 if (rc)
795 goto error;
796 } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) {
797 /* request for ACK if that's the case */
798 rc = pn533_submit_urb_for_ack(dev, GFP_KERNEL);
799 if (rc)
800 goto error;
801 }
672 802
673 return 0; 803 return 0;
674 804
@@ -693,39 +823,34 @@ static void pn533_build_cmd_frame(struct pn533 *dev, u8 cmd_code,
693 ops->tx_frame_finish(skb->data); 823 ops->tx_frame_finish(skb->data);
694} 824}
695 825
696struct pn533_send_async_complete_arg { 826static int pn533_send_async_complete(struct pn533 *dev)
697 pn533_send_async_complete_t complete_cb;
698 void *complete_cb_context;
699 struct sk_buff *resp;
700 struct sk_buff *req;
701};
702
703static int pn533_send_async_complete(struct pn533 *dev, void *_arg, int status)
704{ 827{
705 struct pn533_send_async_complete_arg *arg = _arg; 828 struct pn533_cmd *cmd = dev->cmd;
829 int status = cmd->status;
706 830
707 struct sk_buff *req = arg->req; 831 struct sk_buff *req = cmd->req;
708 struct sk_buff *resp = arg->resp; 832 struct sk_buff *resp = cmd->resp;
709 833
710 int rc; 834 int rc;
711 835
712 dev_kfree_skb(req); 836 dev_kfree_skb(req);
713 837
714 if (status < 0) { 838 if (status < 0) {
715 arg->complete_cb(dev, arg->complete_cb_context, 839 rc = cmd->complete_cb(dev, cmd->complete_cb_context,
716 ERR_PTR(status)); 840 ERR_PTR(status));
717 dev_kfree_skb(resp); 841 dev_kfree_skb(resp);
718 kfree(arg); 842 goto done;
719 return status;
720 } 843 }
721 844
722 skb_put(resp, dev->ops->rx_frame_size(resp->data)); 845 skb_put(resp, dev->ops->rx_frame_size(resp->data));
723 skb_pull(resp, dev->ops->rx_header_len); 846 skb_pull(resp, dev->ops->rx_header_len);
724 skb_trim(resp, resp->len - dev->ops->rx_tail_len); 847 skb_trim(resp, resp->len - dev->ops->rx_tail_len);
725 848
726 rc = arg->complete_cb(dev, arg->complete_cb_context, resp); 849 rc = cmd->complete_cb(dev, cmd->complete_cb_context, resp);
727 850
728 kfree(arg); 851done:
852 kfree(cmd);
853 dev->cmd = NULL;
729 return rc; 854 return rc;
730} 855}
731 856
@@ -736,56 +861,45 @@ static int __pn533_send_async(struct pn533 *dev, u8 cmd_code,
736 void *complete_cb_context) 861 void *complete_cb_context)
737{ 862{
738 struct pn533_cmd *cmd; 863 struct pn533_cmd *cmd;
739 struct pn533_send_async_complete_arg *arg;
740 int rc = 0; 864 int rc = 0;
741 865
742 nfc_dev_dbg(&dev->interface->dev, "Sending command 0x%x", cmd_code); 866 nfc_dev_dbg(&dev->interface->dev, "Sending command 0x%x", cmd_code);
743 867
744 arg = kzalloc(sizeof(*arg), GFP_KERNEL); 868 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
745 if (!arg) 869 if (!cmd)
746 return -ENOMEM; 870 return -ENOMEM;
747 871
748 arg->complete_cb = complete_cb; 872 cmd->code = cmd_code;
749 arg->complete_cb_context = complete_cb_context; 873 cmd->req = req;
750 arg->resp = resp; 874 cmd->resp = resp;
751 arg->req = req; 875 cmd->resp_len = resp_len;
876 cmd->complete_cb = complete_cb;
877 cmd->complete_cb_context = complete_cb_context;
752 878
753 pn533_build_cmd_frame(dev, cmd_code, req); 879 pn533_build_cmd_frame(dev, cmd_code, req);
754 880
755 mutex_lock(&dev->cmd_lock); 881 mutex_lock(&dev->cmd_lock);
756 882
757 if (!dev->cmd_pending) { 883 if (!dev->cmd_pending) {
758 rc = __pn533_send_frame_async(dev, req, resp, resp_len, 884 rc = __pn533_send_frame_async(dev, req, resp, resp_len);
759 pn533_send_async_complete, arg);
760 if (rc) 885 if (rc)
761 goto error; 886 goto error;
762 887
763 dev->cmd_pending = 1; 888 dev->cmd_pending = 1;
889 dev->cmd = cmd;
764 goto unlock; 890 goto unlock;
765 } 891 }
766 892
767 nfc_dev_dbg(&dev->interface->dev, "%s Queueing command 0x%x", __func__, 893 nfc_dev_dbg(&dev->interface->dev, "%s Queueing command 0x%x", __func__,
768 cmd_code); 894 cmd_code);
769 895
770 cmd = kzalloc(sizeof(struct pn533_cmd), GFP_KERNEL);
771 if (!cmd) {
772 rc = -ENOMEM;
773 goto error;
774 }
775
776 INIT_LIST_HEAD(&cmd->queue); 896 INIT_LIST_HEAD(&cmd->queue);
777 cmd->cmd_code = cmd_code;
778 cmd->req = req;
779 cmd->resp = resp;
780 cmd->resp_len = resp_len;
781 cmd->arg = arg;
782
783 list_add_tail(&cmd->queue, &dev->cmd_queue); 897 list_add_tail(&cmd->queue, &dev->cmd_queue);
784 898
785 goto unlock; 899 goto unlock;
786 900
787error: 901error:
788 kfree(arg); 902 kfree(cmd);
789unlock: 903unlock:
790 mutex_unlock(&dev->cmd_lock); 904 mutex_unlock(&dev->cmd_lock);
791 return rc; 905 return rc;
@@ -850,8 +964,8 @@ static int pn533_send_cmd_direct_async(struct pn533 *dev, u8 cmd_code,
850 pn533_send_async_complete_t complete_cb, 964 pn533_send_async_complete_t complete_cb,
851 void *complete_cb_context) 965 void *complete_cb_context)
852{ 966{
853 struct pn533_send_async_complete_arg *arg;
854 struct sk_buff *resp; 967 struct sk_buff *resp;
968 struct pn533_cmd *cmd;
855 int rc; 969 int rc;
856 int resp_len = dev->ops->rx_header_len + 970 int resp_len = dev->ops->rx_header_len +
857 dev->ops->max_payload_len + 971 dev->ops->max_payload_len +
@@ -861,33 +975,47 @@ static int pn533_send_cmd_direct_async(struct pn533 *dev, u8 cmd_code,
861 if (!resp) 975 if (!resp)
862 return -ENOMEM; 976 return -ENOMEM;
863 977
864 arg = kzalloc(sizeof(*arg), GFP_KERNEL); 978 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
865 if (!arg) { 979 if (!cmd) {
866 dev_kfree_skb(resp); 980 dev_kfree_skb(resp);
867 return -ENOMEM; 981 return -ENOMEM;
868 } 982 }
869 983
870 arg->complete_cb = complete_cb; 984 cmd->code = cmd_code;
871 arg->complete_cb_context = complete_cb_context; 985 cmd->req = req;
872 arg->resp = resp; 986 cmd->resp = resp;
873 arg->req = req; 987 cmd->resp_len = resp_len;
988 cmd->complete_cb = complete_cb;
989 cmd->complete_cb_context = complete_cb_context;
874 990
875 pn533_build_cmd_frame(dev, cmd_code, req); 991 pn533_build_cmd_frame(dev, cmd_code, req);
876 992
877 rc = __pn533_send_frame_async(dev, req, resp, resp_len, 993 rc = __pn533_send_frame_async(dev, req, resp, resp_len);
878 pn533_send_async_complete, arg);
879 if (rc < 0) { 994 if (rc < 0) {
880 dev_kfree_skb(resp); 995 dev_kfree_skb(resp);
881 kfree(arg); 996 kfree(cmd);
997 } else {
998 dev->cmd = cmd;
882 } 999 }
883 1000
884 return rc; 1001 return rc;
885} 1002}
886 1003
1004static void pn533_wq_cmd_complete(struct work_struct *work)
1005{
1006 struct pn533 *dev = container_of(work, struct pn533, cmd_complete_work);
1007 int rc;
1008
1009 rc = pn533_send_async_complete(dev);
1010 if (rc != -EINPROGRESS)
1011 queue_work(dev->wq, &dev->cmd_work);
1012}
1013
887static void pn533_wq_cmd(struct work_struct *work) 1014static void pn533_wq_cmd(struct work_struct *work)
888{ 1015{
889 struct pn533 *dev = container_of(work, struct pn533, cmd_work); 1016 struct pn533 *dev = container_of(work, struct pn533, cmd_work);
890 struct pn533_cmd *cmd; 1017 struct pn533_cmd *cmd;
1018 int rc;
891 1019
892 mutex_lock(&dev->cmd_lock); 1020 mutex_lock(&dev->cmd_lock);
893 1021
@@ -903,10 +1031,15 @@ static void pn533_wq_cmd(struct work_struct *work)
903 1031
904 mutex_unlock(&dev->cmd_lock); 1032 mutex_unlock(&dev->cmd_lock);
905 1033
906 __pn533_send_frame_async(dev, cmd->req, cmd->resp, cmd->resp_len, 1034 rc = __pn533_send_frame_async(dev, cmd->req, cmd->resp, cmd->resp_len);
907 pn533_send_async_complete, cmd->arg); 1035 if (rc < 0) {
1036 dev_kfree_skb(cmd->req);
1037 dev_kfree_skb(cmd->resp);
1038 kfree(cmd);
1039 return;
1040 }
908 1041
909 kfree(cmd); 1042 dev->cmd = cmd;
910} 1043}
911 1044
912struct pn533_sync_cmd_response { 1045struct pn533_sync_cmd_response {
@@ -982,6 +1115,23 @@ static void pn533_send_complete(struct urb *urb)
982 } 1115 }
983} 1116}
984 1117
1118static void pn533_abort_cmd(struct pn533 *dev, gfp_t flags)
1119{
1120 /* ACR122U does not support any command which aborts last
1121 * issued command i.e. as ACK for standard PN533. Additionally,
1122 * it behaves stange, sending broken or incorrect responses,
1123 * when we cancel urb before the chip will send response.
1124 */
1125 if (dev->device_type == PN533_DEVICE_ACR122U)
1126 return;
1127
1128 /* An ack will cancel the last issued command */
1129 pn533_send_ack(dev, flags);
1130
1131 /* cancel the urb request */
1132 usb_kill_urb(dev->in_urb);
1133}
1134
985static struct sk_buff *pn533_alloc_skb(struct pn533 *dev, unsigned int size) 1135static struct sk_buff *pn533_alloc_skb(struct pn533 *dev, unsigned int size)
986{ 1136{
987 struct sk_buff *skb; 1137 struct sk_buff *skb;
@@ -1500,9 +1650,6 @@ static void pn533_listen_mode_timer(unsigned long data)
1500 1650
1501 nfc_dev_dbg(&dev->interface->dev, "Listen mode timeout"); 1651 nfc_dev_dbg(&dev->interface->dev, "Listen mode timeout");
1502 1652
1503 /* An ack will cancel the last issued command (poll) */
1504 pn533_send_ack(dev, GFP_ATOMIC);
1505
1506 dev->cancel_listen = 1; 1653 dev->cancel_listen = 1;
1507 1654
1508 pn533_poll_next_mod(dev); 1655 pn533_poll_next_mod(dev);
@@ -1549,6 +1696,11 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
1549 if (!rc) 1696 if (!rc)
1550 goto done; 1697 goto done;
1551 1698
1699 if (!dev->poll_mod_count) {
1700 nfc_dev_dbg(&dev->interface->dev, "Polling has been stoped.");
1701 goto done;
1702 }
1703
1552 pn533_poll_next_mod(dev); 1704 pn533_poll_next_mod(dev);
1553 queue_work(dev->wq, &dev->poll_work); 1705 queue_work(dev->wq, &dev->poll_work);
1554 1706
@@ -1627,7 +1779,7 @@ static void pn533_wq_poll(struct work_struct *work)
1627 1779
1628 if (dev->cancel_listen == 1) { 1780 if (dev->cancel_listen == 1) {
1629 dev->cancel_listen = 0; 1781 dev->cancel_listen = 0;
1630 usb_kill_urb(dev->in_urb); 1782 pn533_abort_cmd(dev, GFP_ATOMIC);
1631 } 1783 }
1632 1784
1633 rc = pn533_send_poll_frame(dev); 1785 rc = pn533_send_poll_frame(dev);
@@ -1689,12 +1841,7 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
1689 return; 1841 return;
1690 } 1842 }
1691 1843
1692 /* An ack will cancel the last issued command (poll) */ 1844 pn533_abort_cmd(dev, GFP_KERNEL);
1693 pn533_send_ack(dev, GFP_KERNEL);
1694
1695 /* prevent pn533_start_poll_complete to issue a new poll meanwhile */
1696 usb_kill_urb(dev->in_urb);
1697
1698 pn533_poll_reset_mod_list(dev); 1845 pn533_poll_reset_mod_list(dev);
1699} 1846}
1700 1847
@@ -1723,6 +1870,8 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
1723 rsp = (struct pn533_cmd_activate_response *)resp->data; 1870 rsp = (struct pn533_cmd_activate_response *)resp->data;
1724 rc = rsp->status & PN533_CMD_RET_MASK; 1871 rc = rsp->status & PN533_CMD_RET_MASK;
1725 if (rc != PN533_CMD_RET_SUCCESS) { 1872 if (rc != PN533_CMD_RET_SUCCESS) {
1873 nfc_dev_err(&dev->interface->dev,
1874 "Target activation failed (error 0x%x)", rc);
1726 dev_kfree_skb(resp); 1875 dev_kfree_skb(resp);
1727 return -EIO; 1876 return -EIO;
1728 } 1877 }
@@ -1850,7 +1999,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
1850 rc = rsp->status & PN533_CMD_RET_MASK; 1999 rc = rsp->status & PN533_CMD_RET_MASK;
1851 if (rc != PN533_CMD_RET_SUCCESS) { 2000 if (rc != PN533_CMD_RET_SUCCESS) {
1852 nfc_dev_err(&dev->interface->dev, 2001 nfc_dev_err(&dev->interface->dev,
1853 "Bringing DEP link up failed %d", rc); 2002 "Bringing DEP link up failed (error 0x%x)", rc);
1854 goto error; 2003 goto error;
1855 } 2004 }
1856 2005
@@ -1985,10 +2134,8 @@ static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
1985 2134
1986 pn533_poll_reset_mod_list(dev); 2135 pn533_poll_reset_mod_list(dev);
1987 2136
1988 if (dev->tgt_mode || dev->tgt_active_prot) { 2137 if (dev->tgt_mode || dev->tgt_active_prot)
1989 pn533_send_ack(dev, GFP_KERNEL); 2138 pn533_abort_cmd(dev, GFP_KERNEL);
1990 usb_kill_urb(dev->in_urb);
1991 }
1992 2139
1993 dev->tgt_active_prot = 0; 2140 dev->tgt_active_prot = 0;
1994 dev->tgt_mode = 0; 2141 dev->tgt_mode = 0;
@@ -2064,8 +2211,7 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
2064 2211
2065 if (ret != PN533_CMD_RET_SUCCESS) { 2212 if (ret != PN533_CMD_RET_SUCCESS) {
2066 nfc_dev_err(&dev->interface->dev, 2213 nfc_dev_err(&dev->interface->dev,
2067 "PN533 reported error %d when exchanging data", 2214 "Exchanging data failed (error 0x%x)", ret);
2068 ret);
2069 rc = -EIO; 2215 rc = -EIO;
2070 goto error; 2216 goto error;
2071 } 2217 }
@@ -2253,7 +2399,7 @@ static void pn533_wq_mi_recv(struct work_struct *work)
2253 "Error %d when trying to perform data_exchange", rc); 2399 "Error %d when trying to perform data_exchange", rc);
2254 2400
2255 dev_kfree_skb(skb); 2401 dev_kfree_skb(skb);
2256 kfree(dev->cmd_complete_arg); 2402 kfree(dev->cmd_complete_mi_arg);
2257 2403
2258error: 2404error:
2259 pn533_send_ack(dev, GFP_KERNEL); 2405 pn533_send_ack(dev, GFP_KERNEL);
@@ -2310,7 +2456,7 @@ static int pn533_get_firmware_version(struct pn533 *dev,
2310 return 0; 2456 return 0;
2311} 2457}
2312 2458
2313static int pn533_fw_reset(struct pn533 *dev) 2459static int pn533_pasori_fw_reset(struct pn533 *dev)
2314{ 2460{
2315 struct sk_buff *skb; 2461 struct sk_buff *skb;
2316 struct sk_buff *resp; 2462 struct sk_buff *resp;
@@ -2332,9 +2478,102 @@ static int pn533_fw_reset(struct pn533 *dev)
2332 return 0; 2478 return 0;
2333} 2479}
2334 2480
2481struct pn533_acr122_poweron_rdr_arg {
2482 int rc;
2483 struct completion done;
2484};
2485
2486static void pn533_acr122_poweron_rdr_resp(struct urb *urb)
2487{
2488 struct pn533_acr122_poweron_rdr_arg *arg = urb->context;
2489
2490 nfc_dev_dbg(&urb->dev->dev, "%s", __func__);
2491
2492 print_hex_dump(KERN_ERR, "ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1,
2493 urb->transfer_buffer, urb->transfer_buffer_length,
2494 false);
2495
2496 arg->rc = urb->status;
2497 complete(&arg->done);
2498}
2499
2500static int pn533_acr122_poweron_rdr(struct pn533 *dev)
2501{
2502 /* Power on th reader (CCID cmd) */
2503 u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON,
2504 0, 0, 0, 0, 0, 0, 3, 0, 0};
2505 u8 buf[255];
2506 int rc;
2507 void *cntx;
2508 struct pn533_acr122_poweron_rdr_arg arg;
2509
2510 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
2511
2512 init_completion(&arg.done);
2513 cntx = dev->in_urb->context; /* backup context */
2514
2515 dev->in_urb->transfer_buffer = buf;
2516 dev->in_urb->transfer_buffer_length = 255;
2517 dev->in_urb->complete = pn533_acr122_poweron_rdr_resp;
2518 dev->in_urb->context = &arg;
2519
2520 dev->out_urb->transfer_buffer = cmd;
2521 dev->out_urb->transfer_buffer_length = sizeof(cmd);
2522
2523 print_hex_dump(KERN_ERR, "ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1,
2524 cmd, sizeof(cmd), false);
2525
2526 rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
2527 if (rc) {
2528 nfc_dev_err(&dev->interface->dev,
2529 "Reader power on cmd error %d", rc);
2530 return rc;
2531 }
2532
2533 rc = usb_submit_urb(dev->in_urb, GFP_KERNEL);
2534 if (rc) {
2535 nfc_dev_err(&dev->interface->dev,
2536 "Can't submit for reader power on cmd response %d",
2537 rc);
2538 return rc;
2539 }
2540
2541 wait_for_completion(&arg.done);
2542 dev->in_urb->context = cntx; /* restore context */
2543
2544 return arg.rc;
2545}
2546
2547static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf)
2548{
2549 struct pn533 *dev = nfc_get_drvdata(nfc_dev);
2550 u8 rf_field = !!rf;
2551 int rc;
2552
2553 rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD,
2554 (u8 *)&rf_field, 1);
2555 if (rc) {
2556 nfc_dev_err(&dev->interface->dev,
2557 "Error on setting RF field");
2558 return rc;
2559 }
2560
2561 return rc;
2562}
2563
2564int pn533_dev_up(struct nfc_dev *nfc_dev)
2565{
2566 return pn533_rf_field(nfc_dev, 1);
2567}
2568
2569int pn533_dev_down(struct nfc_dev *nfc_dev)
2570{
2571 return pn533_rf_field(nfc_dev, 0);
2572}
2573
2335static struct nfc_ops pn533_nfc_ops = { 2574static struct nfc_ops pn533_nfc_ops = {
2336 .dev_up = NULL, 2575 .dev_up = pn533_dev_up,
2337 .dev_down = NULL, 2576 .dev_down = pn533_dev_down,
2338 .dep_link_up = pn533_dep_link_up, 2577 .dep_link_up = pn533_dep_link_up,
2339 .dep_link_down = pn533_dep_link_down, 2578 .dep_link_down = pn533_dep_link_down,
2340 .start_poll = pn533_start_poll, 2579 .start_poll = pn533_start_poll,
@@ -2366,6 +2605,7 @@ static int pn533_setup(struct pn533 *dev)
2366 break; 2605 break;
2367 2606
2368 case PN533_DEVICE_PASORI: 2607 case PN533_DEVICE_PASORI:
2608 case PN533_DEVICE_ACR122U:
2369 max_retries.mx_rty_atr = 0x2; 2609 max_retries.mx_rty_atr = 0x2;
2370 max_retries.mx_rty_psl = 0x1; 2610 max_retries.mx_rty_psl = 0x1;
2371 max_retries.mx_rty_passive_act = 2611 max_retries.mx_rty_passive_act =
@@ -2405,7 +2645,7 @@ static int pn533_setup(struct pn533 *dev)
2405 break; 2645 break;
2406 2646
2407 case PN533_DEVICE_PASORI: 2647 case PN533_DEVICE_PASORI:
2408 pn533_fw_reset(dev); 2648 pn533_pasori_fw_reset(dev);
2409 2649
2410 rc = pn533_set_configuration(dev, PN533_CFGITEM_PASORI, 2650 rc = pn533_set_configuration(dev, PN533_CFGITEM_PASORI,
2411 pasori_cfg, 3); 2651 pasori_cfg, 3);
@@ -2415,7 +2655,7 @@ static int pn533_setup(struct pn533 *dev)
2415 return rc; 2655 return rc;
2416 } 2656 }
2417 2657
2418 pn533_fw_reset(dev); 2658 pn533_pasori_fw_reset(dev);
2419 2659
2420 break; 2660 break;
2421 } 2661 }
@@ -2496,6 +2736,7 @@ static int pn533_probe(struct usb_interface *interface,
2496 2736
2497 dev->ops = &pn533_std_frame_ops; 2737 dev->ops = &pn533_std_frame_ops;
2498 2738
2739 dev->protocol_type = PN533_PROTO_REQ_ACK_RESP;
2499 dev->device_type = id->driver_info; 2740 dev->device_type = id->driver_info;
2500 switch (dev->device_type) { 2741 switch (dev->device_type) {
2501 case PN533_DEVICE_STD: 2742 case PN533_DEVICE_STD:
@@ -2506,6 +2747,20 @@ static int pn533_probe(struct usb_interface *interface,
2506 protocols = PN533_NO_TYPE_B_PROTOCOLS; 2747 protocols = PN533_NO_TYPE_B_PROTOCOLS;
2507 break; 2748 break;
2508 2749
2750 case PN533_DEVICE_ACR122U:
2751 protocols = PN533_NO_TYPE_B_PROTOCOLS;
2752 dev->ops = &pn533_acr122_frame_ops;
2753 dev->protocol_type = PN533_PROTO_REQ_RESP,
2754
2755 rc = pn533_acr122_poweron_rdr(dev);
2756 if (rc < 0) {
2757 nfc_dev_err(&dev->interface->dev,
2758 "Couldn't poweron the reader (error %d)",
2759 rc);
2760 goto destroy_wq;
2761 }
2762 break;
2763
2509 default: 2764 default:
2510 nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n", 2765 nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n",
2511 dev->device_type); 2766 dev->device_type);
@@ -2555,6 +2810,7 @@ destroy_wq:
2555error: 2810error:
2556 usb_free_urb(dev->in_urb); 2811 usb_free_urb(dev->in_urb);
2557 usb_free_urb(dev->out_urb); 2812 usb_free_urb(dev->out_urb);
2813 usb_put_dev(dev->udev);
2558 kfree(dev); 2814 kfree(dev);
2559 return rc; 2815 return rc;
2560} 2816}
@@ -2600,8 +2856,9 @@ static struct usb_driver pn533_driver = {
2600 2856
2601module_usb_driver(pn533_driver); 2857module_usb_driver(pn533_driver);
2602 2858
2603MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>," 2859MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>");
2604 " Aloisio Almeida Jr <aloisio.almeida@openbossa.org>"); 2860MODULE_AUTHOR("Aloisio Almeida Jr <aloisio.almeida@openbossa.org>");
2861MODULE_AUTHOR("Waldemar Rymarkiewicz <waldemar.rymarkiewicz@tieto.com>");
2605MODULE_DESCRIPTION("PN533 usb driver ver " VERSION); 2862MODULE_DESCRIPTION("PN533 usb driver ver " VERSION);
2606MODULE_VERSION(VERSION); 2863MODULE_VERSION(VERSION);
2607MODULE_LICENSE("GPL"); 2864MODULE_LICENSE("GPL");
diff --git a/drivers/nfc/pn544/Kconfig b/drivers/nfc/pn544/Kconfig
index c277790ac71c..ccf06f5f6ebb 100644
--- a/drivers/nfc/pn544/Kconfig
+++ b/drivers/nfc/pn544/Kconfig
@@ -20,4 +20,15 @@ config NFC_PN544_I2C
20 Select this if your platform is using the i2c bus. 20 Select this if your platform is using the i2c bus.
21 21
22 If you choose to build a module, it'll be called pn544_i2c. 22 If you choose to build a module, it'll be called pn544_i2c.
23 Say N if unsure. \ No newline at end of file 23 Say N if unsure.
24
25config NFC_PN544_MEI
26 tristate "NFC PN544 MEI support"
27 depends on NFC_PN544 && NFC_MEI_PHY
28 ---help---
29 This module adds support for the mei interface of adapters using
30 NXP pn544 chipsets. Select this if your pn544 chipset
31 is handled by Intel's Management Engine Interface on your platform.
32
33 If you choose to build a module, it'll be called pn544_mei.
34 Say N if unsure.
diff --git a/drivers/nfc/pn544/Makefile b/drivers/nfc/pn544/Makefile
index ac076793687d..29fb5a174036 100644
--- a/drivers/nfc/pn544/Makefile
+++ b/drivers/nfc/pn544/Makefile
@@ -3,6 +3,8 @@
3# 3#
4 4
5pn544_i2c-objs = i2c.o 5pn544_i2c-objs = i2c.o
6pn544_mei-objs = mei.o
6 7
7obj-$(CONFIG_NFC_PN544) += pn544.o 8obj-$(CONFIG_NFC_PN544) += pn544.o
8obj-$(CONFIG_NFC_PN544_I2C) += pn544_i2c.o 9obj-$(CONFIG_NFC_PN544_I2C) += pn544_i2c.o
10obj-$(CONFIG_NFC_PN544_MEI) += pn544_mei.o
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
new file mode 100644
index 000000000000..1eb48848a35a
--- /dev/null
+++ b/drivers/nfc/pn544/mei.c
@@ -0,0 +1,121 @@
1/*
2 * HCI based Driver for NXP pn544 NFC Chip
3 *
4 * Copyright (C) 2013 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the
17 * Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/mod_devicetable.h>
23#include <linux/nfc.h>
24#include <net/nfc/hci.h>
25#include <net/nfc/llc.h>
26
27#include "../mei_phy.h"
28#include "pn544.h"
29
30#define PN544_DRIVER_NAME "pn544"
31
32static int pn544_mei_probe(struct mei_cl_device *device,
33 const struct mei_cl_device_id *id)
34{
35 struct nfc_mei_phy *phy;
36 int r;
37
38 pr_info("Probing NFC pn544\n");
39
40 phy = nfc_mei_phy_alloc(device);
41 if (!phy) {
42 pr_err("Cannot allocate memory for pn544 mei phy.\n");
43 return -ENOMEM;
44 }
45
46 r = mei_cl_register_event_cb(device, nfc_mei_event_cb, phy);
47 if (r) {
48 pr_err(PN544_DRIVER_NAME ": event cb registration failed\n");
49 goto err_out;
50 }
51
52 r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
53 MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
54 &phy->hdev);
55 if (r < 0)
56 goto err_out;
57
58 return 0;
59
60err_out:
61 nfc_mei_phy_free(phy);
62
63 return r;
64}
65
66static int pn544_mei_remove(struct mei_cl_device *device)
67{
68 struct nfc_mei_phy *phy = mei_cl_get_drvdata(device);
69
70 pr_info("Removing pn544\n");
71
72 pn544_hci_remove(phy->hdev);
73
74 nfc_mei_phy_disable(phy);
75
76 nfc_mei_phy_free(phy);
77
78 return 0;
79}
80
81static struct mei_cl_device_id pn544_mei_tbl[] = {
82 { PN544_DRIVER_NAME },
83
84 /* required last entry */
85 { }
86};
87MODULE_DEVICE_TABLE(mei, pn544_mei_tbl);
88
89static struct mei_cl_driver pn544_driver = {
90 .id_table = pn544_mei_tbl,
91 .name = PN544_DRIVER_NAME,
92
93 .probe = pn544_mei_probe,
94 .remove = pn544_mei_remove,
95};
96
97static int pn544_mei_init(void)
98{
99 int r;
100
101 pr_debug(DRIVER_DESC ": %s\n", __func__);
102
103 r = mei_cl_driver_register(&pn544_driver);
104 if (r) {
105 pr_err(PN544_DRIVER_NAME ": driver registration failed\n");
106 return r;
107 }
108
109 return 0;
110}
111
112static void pn544_mei_exit(void)
113{
114 mei_cl_driver_unregister(&pn544_driver);
115}
116
117module_init(pn544_mei_init);
118module_exit(pn544_mei_exit);
119
120MODULE_LICENSE("GPL");
121MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index e3a8b22ef9dd..23049aeca662 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -34,7 +34,10 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
34{ 34{
35 struct phy_device *phy; 35 struct phy_device *phy;
36 struct device_node *child; 36 struct device_node *child;
37 int rc, i; 37 const __be32 *paddr;
38 u32 addr;
39 bool is_c45, scanphys = false;
40 int rc, i, len;
38 41
39 /* Mask out all PHYs from auto probing. Instead the PHYs listed in 42 /* Mask out all PHYs from auto probing. Instead the PHYs listed in
40 * the device tree are populated after the bus has been registered */ 43 * the device tree are populated after the bus has been registered */
@@ -54,14 +57,10 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
54 57
55 /* Loop over the child nodes and register a phy_device for each one */ 58 /* Loop over the child nodes and register a phy_device for each one */
56 for_each_available_child_of_node(np, child) { 59 for_each_available_child_of_node(np, child) {
57 const __be32 *paddr;
58 u32 addr;
59 int len;
60 bool is_c45;
61
62 /* A PHY must have a reg property in the range [0-31] */ 60 /* A PHY must have a reg property in the range [0-31] */
63 paddr = of_get_property(child, "reg", &len); 61 paddr = of_get_property(child, "reg", &len);
64 if (!paddr || len < sizeof(*paddr)) { 62 if (!paddr || len < sizeof(*paddr)) {
63 scanphys = true;
65 dev_err(&mdio->dev, "%s has invalid PHY address\n", 64 dev_err(&mdio->dev, "%s has invalid PHY address\n",
66 child->full_name); 65 child->full_name);
67 continue; 66 continue;
@@ -111,6 +110,59 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
111 child->name, addr); 110 child->name, addr);
112 } 111 }
113 112
113 if (!scanphys)
114 return 0;
115
116 /* auto scan for PHYs with empty reg property */
117 for_each_available_child_of_node(np, child) {
118 /* Skip PHYs with reg property set */
119 paddr = of_get_property(child, "reg", &len);
120 if (paddr)
121 continue;
122
123 is_c45 = of_device_is_compatible(child,
124 "ethernet-phy-ieee802.3-c45");
125
126 for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
127 /* skip already registered PHYs */
128 if (mdio->phy_map[addr])
129 continue;
130
131 /* be noisy to encourage people to set reg property */
132 dev_info(&mdio->dev, "scan phy %s at address %i\n",
133 child->name, addr);
134
135 phy = get_phy_device(mdio, addr, is_c45);
136 if (!phy || IS_ERR(phy))
137 continue;
138
139 if (mdio->irq) {
140 mdio->irq[addr] =
141 irq_of_parse_and_map(child, 0);
142 if (!mdio->irq[addr])
143 mdio->irq[addr] = PHY_POLL;
144 }
145
146 /* Associate the OF node with the device structure so it
147 * can be looked up later */
148 of_node_get(child);
149 phy->dev.of_node = child;
150
151 /* All data is now stored in the phy struct;
152 * register it */
153 rc = phy_device_register(phy);
154 if (rc) {
155 phy_device_free(phy);
156 of_node_put(child);
157 continue;
158 }
159
160 dev_info(&mdio->dev, "registered phy %s at address %i\n",
161 child->name, addr);
162 break;
163 }
164 }
165
114 return 0; 166 return 0;
115} 167}
116EXPORT_SYMBOL(of_mdiobus_register); 168EXPORT_SYMBOL(of_mdiobus_register);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ee599f274f05..c93071d428f5 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -729,6 +729,47 @@ int pci_num_vf(struct pci_dev *dev)
729EXPORT_SYMBOL_GPL(pci_num_vf); 729EXPORT_SYMBOL_GPL(pci_num_vf);
730 730
731/** 731/**
732 * pci_vfs_assigned - returns number of VFs are assigned to a guest
733 * @dev: the PCI device
734 *
735 * Returns number of VFs belonging to this device that are assigned to a guest.
736 * If device is not a physical function returns -ENODEV.
737 */
738int pci_vfs_assigned(struct pci_dev *dev)
739{
740 struct pci_dev *vfdev;
741 unsigned int vfs_assigned = 0;
742 unsigned short dev_id;
743
744 /* only search if we are a PF */
745 if (!dev->is_physfn)
746 return 0;
747
748 /*
749 * determine the device ID for the VFs, the vendor ID will be the
750 * same as the PF so there is no need to check for that one
751 */
752 pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id);
753
754 /* loop through all the VFs to see if we own any that are assigned */
755 vfdev = pci_get_device(dev->vendor, dev_id, NULL);
756 while (vfdev) {
757 /*
758 * It is considered assigned if it is a virtual function with
759 * our dev as the physical function and the assigned bit is set
760 */
761 if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
762 (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
763 vfs_assigned++;
764
765 vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
766 }
767
768 return vfs_assigned;
769}
770EXPORT_SYMBOL_GPL(pci_vfs_assigned);
771
772/**
732 * pci_sriov_set_totalvfs -- reduce the TotalVFs available 773 * pci_sriov_set_totalvfs -- reduce the TotalVFs available
733 * @dev: the PCI PF device 774 * @dev: the PCI PF device
734 * @numvfs: number that should be used for TotalVFs supported 775 * @numvfs: number that should be used for TotalVFs supported
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 79f4bce061bd..4a8c388364ca 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -17,7 +17,7 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 19 */
20#include <linux/bitops.h> 20#include <linux/idr.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/init.h> 23#include <linux/init.h>
@@ -32,7 +32,6 @@
32#include "ptp_private.h" 32#include "ptp_private.h"
33 33
34#define PTP_MAX_ALARMS 4 34#define PTP_MAX_ALARMS 4
35#define PTP_MAX_CLOCKS 8
36#define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT) 35#define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
37#define PTP_PPS_EVENT PPS_CAPTUREASSERT 36#define PTP_PPS_EVENT PPS_CAPTUREASSERT
38#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC) 37#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
@@ -42,8 +41,7 @@
42static dev_t ptp_devt; 41static dev_t ptp_devt;
43static struct class *ptp_class; 42static struct class *ptp_class;
44 43
45static DECLARE_BITMAP(ptp_clocks_map, PTP_MAX_CLOCKS); 44static DEFINE_IDA(ptp_clocks_map);
46static DEFINE_MUTEX(ptp_clocks_mutex); /* protects 'ptp_clocks_map' */
47 45
48/* time stamp event queue operations */ 46/* time stamp event queue operations */
49 47
@@ -171,12 +169,7 @@ static void delete_ptp_clock(struct posix_clock *pc)
171 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 169 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
172 170
173 mutex_destroy(&ptp->tsevq_mux); 171 mutex_destroy(&ptp->tsevq_mux);
174 172 ida_simple_remove(&ptp_clocks_map, ptp->index);
175 /* Remove the clock from the bit map. */
176 mutex_lock(&ptp_clocks_mutex);
177 clear_bit(ptp->index, ptp_clocks_map);
178 mutex_unlock(&ptp_clocks_mutex);
179
180 kfree(ptp); 173 kfree(ptp);
181} 174}
182 175
@@ -191,21 +184,18 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
191 if (info->n_alarm > PTP_MAX_ALARMS) 184 if (info->n_alarm > PTP_MAX_ALARMS)
192 return ERR_PTR(-EINVAL); 185 return ERR_PTR(-EINVAL);
193 186
194 /* Find a free clock slot and reserve it. */
195 err = -EBUSY;
196 mutex_lock(&ptp_clocks_mutex);
197 index = find_first_zero_bit(ptp_clocks_map, PTP_MAX_CLOCKS);
198 if (index < PTP_MAX_CLOCKS)
199 set_bit(index, ptp_clocks_map);
200 else
201 goto no_slot;
202
203 /* Initialize a clock structure. */ 187 /* Initialize a clock structure. */
204 err = -ENOMEM; 188 err = -ENOMEM;
205 ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL); 189 ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
206 if (ptp == NULL) 190 if (ptp == NULL)
207 goto no_memory; 191 goto no_memory;
208 192
193 index = ida_simple_get(&ptp_clocks_map, 0, MINORMASK + 1, GFP_KERNEL);
194 if (index < 0) {
195 err = index;
196 goto no_slot;
197 }
198
209 ptp->clock.ops = ptp_clock_ops; 199 ptp->clock.ops = ptp_clock_ops;
210 ptp->clock.release = delete_ptp_clock; 200 ptp->clock.release = delete_ptp_clock;
211 ptp->info = info; 201 ptp->info = info;
@@ -248,7 +238,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
248 goto no_clock; 238 goto no_clock;
249 } 239 }
250 240
251 mutex_unlock(&ptp_clocks_mutex);
252 return ptp; 241 return ptp;
253 242
254no_clock: 243no_clock:
@@ -260,11 +249,9 @@ no_sysfs:
260 device_destroy(ptp_class, ptp->devid); 249 device_destroy(ptp_class, ptp->devid);
261no_device: 250no_device:
262 mutex_destroy(&ptp->tsevq_mux); 251 mutex_destroy(&ptp->tsevq_mux);
252no_slot:
263 kfree(ptp); 253 kfree(ptp);
264no_memory: 254no_memory:
265 clear_bit(index, ptp_clocks_map);
266no_slot:
267 mutex_unlock(&ptp_clocks_mutex);
268 return ERR_PTR(err); 255 return ERR_PTR(err);
269} 256}
270EXPORT_SYMBOL(ptp_clock_register); 257EXPORT_SYMBOL(ptp_clock_register);
@@ -323,7 +310,8 @@ EXPORT_SYMBOL(ptp_clock_index);
323static void __exit ptp_exit(void) 310static void __exit ptp_exit(void)
324{ 311{
325 class_destroy(ptp_class); 312 class_destroy(ptp_class);
326 unregister_chrdev_region(ptp_devt, PTP_MAX_CLOCKS); 313 unregister_chrdev_region(ptp_devt, MINORMASK + 1);
314 ida_destroy(&ptp_clocks_map);
327} 315}
328 316
329static int __init ptp_init(void) 317static int __init ptp_init(void)
@@ -336,7 +324,7 @@ static int __init ptp_init(void)
336 return PTR_ERR(ptp_class); 324 return PTR_ERR(ptp_class);
337 } 325 }
338 326
339 err = alloc_chrdev_region(&ptp_devt, 0, PTP_MAX_CLOCKS, "ptp"); 327 err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
340 if (err < 0) { 328 if (err < 0) {
341 pr_err("ptp: failed to allocate device region\n"); 329 pr_err("ptp: failed to allocate device region\n");
342 goto no_region; 330 goto no_region;
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index 1367655eee39..bea94510ad2d 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -118,7 +118,7 @@ struct pch_ts_regs {
118 * struct pch_dev - Driver private data 118 * struct pch_dev - Driver private data
119 */ 119 */
120struct pch_dev { 120struct pch_dev {
121 struct pch_ts_regs *regs; 121 struct pch_ts_regs __iomem *regs;
122 struct ptp_clock *ptp_clock; 122 struct ptp_clock *ptp_clock;
123 struct ptp_clock_info caps; 123 struct ptp_clock_info caps;
124 int exts0_enabled; 124 int exts0_enabled;
@@ -154,7 +154,7 @@ static inline void pch_eth_enable_set(struct pch_dev *chip)
154 iowrite32(val, (&chip->regs->ts_sel)); 154 iowrite32(val, (&chip->regs->ts_sel));
155} 155}
156 156
157static u64 pch_systime_read(struct pch_ts_regs *regs) 157static u64 pch_systime_read(struct pch_ts_regs __iomem *regs)
158{ 158{
159 u64 ns; 159 u64 ns;
160 u32 lo, hi; 160 u32 lo, hi;
@@ -169,7 +169,7 @@ static u64 pch_systime_read(struct pch_ts_regs *regs)
169 return ns; 169 return ns;
170} 170}
171 171
172static void pch_systime_write(struct pch_ts_regs *regs, u64 ns) 172static void pch_systime_write(struct pch_ts_regs __iomem *regs, u64 ns)
173{ 173{
174 u32 hi, lo; 174 u32 hi, lo;
175 175
@@ -315,7 +315,7 @@ int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
315 struct pch_dev *chip = pci_get_drvdata(pdev); 315 struct pch_dev *chip = pci_get_drvdata(pdev);
316 316
317 /* Verify the parameter */ 317 /* Verify the parameter */
318 if ((chip->regs == 0) || addr == (u8 *)NULL) { 318 if ((chip->regs == NULL) || addr == (u8 *)NULL) {
319 dev_err(&pdev->dev, 319 dev_err(&pdev->dev,
320 "invalid params returning PCH_INVALIDPARAM\n"); 320 "invalid params returning PCH_INVALIDPARAM\n");
321 return PCH_INVALIDPARAM; 321 return PCH_INVALIDPARAM;
@@ -361,7 +361,7 @@ EXPORT_SYMBOL(pch_set_station_address);
361static irqreturn_t isr(int irq, void *priv) 361static irqreturn_t isr(int irq, void *priv)
362{ 362{
363 struct pch_dev *pch_dev = priv; 363 struct pch_dev *pch_dev = priv;
364 struct pch_ts_regs *regs = pch_dev->regs; 364 struct pch_ts_regs __iomem *regs = pch_dev->regs;
365 struct ptp_clock_event event; 365 struct ptp_clock_event event;
366 u32 ack = 0, lo, hi, val; 366 u32 ack = 0, lo, hi, val;
367 367
@@ -415,7 +415,7 @@ static int ptp_pch_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
415 u32 diff, addend; 415 u32 diff, addend;
416 int neg_adj = 0; 416 int neg_adj = 0;
417 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); 417 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
418 struct pch_ts_regs *regs = pch_dev->regs; 418 struct pch_ts_regs __iomem *regs = pch_dev->regs;
419 419
420 if (ppb < 0) { 420 if (ppb < 0) {
421 neg_adj = 1; 421 neg_adj = 1;
@@ -438,7 +438,7 @@ static int ptp_pch_adjtime(struct ptp_clock_info *ptp, s64 delta)
438 s64 now; 438 s64 now;
439 unsigned long flags; 439 unsigned long flags;
440 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); 440 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
441 struct pch_ts_regs *regs = pch_dev->regs; 441 struct pch_ts_regs __iomem *regs = pch_dev->regs;
442 442
443 spin_lock_irqsave(&pch_dev->register_lock, flags); 443 spin_lock_irqsave(&pch_dev->register_lock, flags);
444 now = pch_systime_read(regs); 444 now = pch_systime_read(regs);
@@ -455,7 +455,7 @@ static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
455 u32 remainder; 455 u32 remainder;
456 unsigned long flags; 456 unsigned long flags;
457 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); 457 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
458 struct pch_ts_regs *regs = pch_dev->regs; 458 struct pch_ts_regs __iomem *regs = pch_dev->regs;
459 459
460 spin_lock_irqsave(&pch_dev->register_lock, flags); 460 spin_lock_irqsave(&pch_dev->register_lock, flags);
461 ns = pch_systime_read(regs); 461 ns = pch_systime_read(regs);
@@ -472,7 +472,7 @@ static int ptp_pch_settime(struct ptp_clock_info *ptp,
472 u64 ns; 472 u64 ns;
473 unsigned long flags; 473 unsigned long flags;
474 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); 474 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
475 struct pch_ts_regs *regs = pch_dev->regs; 475 struct pch_ts_regs __iomem *regs = pch_dev->regs;
476 476
477 ns = ts->tv_sec * 1000000000ULL; 477 ns = ts->tv_sec * 1000000000ULL;
478 ns += ts->tv_nsec; 478 ns += ts->tv_nsec;
@@ -567,9 +567,9 @@ static void pch_remove(struct pci_dev *pdev)
567 free_irq(pdev->irq, chip); 567 free_irq(pdev->irq, chip);
568 568
569 /* unmap the virtual IO memory space */ 569 /* unmap the virtual IO memory space */
570 if (chip->regs != 0) { 570 if (chip->regs != NULL) {
571 iounmap(chip->regs); 571 iounmap(chip->regs);
572 chip->regs = 0; 572 chip->regs = NULL;
573 } 573 }
574 /* release the reserved IO memory space */ 574 /* release the reserved IO memory space */
575 if (chip->mem_base != 0) { 575 if (chip->mem_base != 0) {
@@ -670,7 +670,7 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
670err_req_irq: 670err_req_irq:
671 ptp_clock_unregister(chip->ptp_clock); 671 ptp_clock_unregister(chip->ptp_clock);
672 iounmap(chip->regs); 672 iounmap(chip->regs);
673 chip->regs = 0; 673 chip->regs = NULL;
674 674
675err_ioremap: 675err_ioremap:
676 release_mem_region(chip->mem_base, chip->mem_size); 676 release_mem_region(chip->mem_base, chip->mem_size);
@@ -723,9 +723,10 @@ static s32 __init ptp_pch_init(void)
723module_init(ptp_pch_init); 723module_init(ptp_pch_init);
724module_exit(ptp_pch_exit); 724module_exit(ptp_pch_exit);
725 725
726module_param_string(station, pch_param.station, sizeof pch_param.station, 0444); 726module_param_string(station,
727 pch_param.station, sizeof(pch_param.station), 0444);
727MODULE_PARM_DESC(station, 728MODULE_PARM_DESC(station,
728 "IEEE 1588 station address to use - column separated hex values"); 729 "IEEE 1588 station address to use - colon separated hex values");
729 730
730MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>"); 731MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
731MODULE_DESCRIPTION("PTP clock using the EG20T timer"); 732MODULE_DESCRIPTION("PTP clock using the EG20T timer");
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 2029b6caa595..fb877b59ec57 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -166,7 +166,7 @@ static void virtio_ccw_kvm_notify(struct virtqueue *vq)
166 166
167 vcdev = to_vc_device(info->vq->vdev); 167 vcdev = to_vc_device(info->vq->vdev);
168 ccw_device_get_schid(vcdev->cdev, &schid); 168 ccw_device_get_schid(vcdev->cdev, &schid);
169 do_kvm_notify(schid, virtqueue_get_queue_index(vq)); 169 do_kvm_notify(schid, vq->index);
170} 170}
171 171
172static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, 172static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
@@ -188,7 +188,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
188 unsigned long flags; 188 unsigned long flags;
189 unsigned long size; 189 unsigned long size;
190 int ret; 190 int ret;
191 unsigned int index = virtqueue_get_queue_index(vq); 191 unsigned int index = vq->index;
192 192
193 /* Remove from our list. */ 193 /* Remove from our list. */
194 spin_lock_irqsave(&vcdev->lock, flags); 194 spin_lock_irqsave(&vcdev->lock, flags);
@@ -610,7 +610,7 @@ static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
610 vq = NULL; 610 vq = NULL;
611 spin_lock_irqsave(&vcdev->lock, flags); 611 spin_lock_irqsave(&vcdev->lock, flags);
612 list_for_each_entry(info, &vcdev->virtqueues, node) { 612 list_for_each_entry(info, &vcdev->virtqueues, node) {
613 if (virtqueue_get_queue_index(info->vq) == index) { 613 if (info->vq->index == index) {
614 vq = info->vq; 614 vq = info->vq;
615 break; 615 break;
616 } 616 }
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6ccb7457746b..c4f392d5db4c 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -918,7 +918,7 @@ int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
918 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), 918 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
919 void *reply_param); 919 void *reply_param);
920int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); 920int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
921int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); 921int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
922int qeth_get_elements_for_frags(struct sk_buff *); 922int qeth_get_elements_for_frags(struct sk_buff *);
923int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, 923int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
924 struct sk_buff *, struct qeth_hdr *, int, int, int); 924 struct sk_buff *, struct qeth_hdr *, int, int, int);
@@ -932,7 +932,7 @@ void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
932void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...); 932void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
933int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); 933int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
934int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback); 934int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
935int qeth_hdr_chk_and_bounce(struct sk_buff *, int); 935int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int);
936int qeth_configure_cq(struct qeth_card *, enum qeth_cq); 936int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
937int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); 937int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
938int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot); 938int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 451f92020599..6cd0fc1b203a 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -335,7 +335,7 @@ static inline int qeth_alloc_cq(struct qeth_card *card)
335 335
336 card->qdio.no_in_queues = 2; 336 card->qdio.no_in_queues = 2;
337 337
338 card->qdio.out_bufstates = (struct qdio_outbuf_state *) 338 card->qdio.out_bufstates =
339 kzalloc(card->qdio.no_out_queues * 339 kzalloc(card->qdio.no_out_queues *
340 QDIO_MAX_BUFFERS_PER_Q * 340 QDIO_MAX_BUFFERS_PER_Q *
341 sizeof(struct qdio_outbuf_state), GFP_KERNEL); 341 sizeof(struct qdio_outbuf_state), GFP_KERNEL);
@@ -3717,7 +3717,7 @@ int qeth_get_elements_for_frags(struct sk_buff *skb)
3717} 3717}
3718EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); 3718EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
3719 3719
3720int qeth_get_elements_no(struct qeth_card *card, void *hdr, 3720int qeth_get_elements_no(struct qeth_card *card,
3721 struct sk_buff *skb, int elems) 3721 struct sk_buff *skb, int elems)
3722{ 3722{
3723 int dlen = skb->len - skb->data_len; 3723 int dlen = skb->len - skb->data_len;
@@ -3736,7 +3736,7 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr,
3736} 3736}
3737EXPORT_SYMBOL_GPL(qeth_get_elements_no); 3737EXPORT_SYMBOL_GPL(qeth_get_elements_no);
3738 3738
3739int qeth_hdr_chk_and_bounce(struct sk_buff *skb, int len) 3739int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
3740{ 3740{
3741 int hroom, inpage, rest; 3741 int hroom, inpage, rest;
3742 3742
@@ -3749,6 +3749,8 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, int len)
3749 return 1; 3749 return 1;
3750 memmove(skb->data - rest, skb->data, skb->len - skb->data_len); 3750 memmove(skb->data - rest, skb->data, skb->len - skb->data_len);
3751 skb->data -= rest; 3751 skb->data -= rest;
3752 skb->tail -= rest;
3753 *hdr = (struct qeth_hdr *)skb->data;
3752 QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest); 3754 QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest);
3753 } 3755 }
3754 return 0; 3756 return 0;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 155b101bd730..ec8ccdae7aba 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -302,7 +302,8 @@ static void qeth_l2_process_vlans(struct qeth_card *card)
302 spin_unlock_bh(&card->vlanlock); 302 spin_unlock_bh(&card->vlanlock);
303} 303}
304 304
305static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 305static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
306 __be16 proto, u16 vid)
306{ 307{
307 struct qeth_card *card = dev->ml_priv; 308 struct qeth_card *card = dev->ml_priv;
308 struct qeth_vlan_vid *id; 309 struct qeth_vlan_vid *id;
@@ -331,7 +332,8 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
331 return 0; 332 return 0;
332} 333}
333 334
334static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 335static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
336 __be16 proto, u16 vid)
335{ 337{
336 struct qeth_vlan_vid *id, *tmpid = NULL; 338 struct qeth_vlan_vid *id, *tmpid = NULL;
337 struct qeth_card *card = dev->ml_priv; 339 struct qeth_card *card = dev->ml_priv;
@@ -771,8 +773,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
771 } 773 }
772 } 774 }
773 775
774 elements = qeth_get_elements_no(card, (void *)hdr, new_skb, 776 elements = qeth_get_elements_no(card, new_skb, elements_needed);
775 elements_needed);
776 if (!elements) { 777 if (!elements) {
777 if (data_offset >= 0) 778 if (data_offset >= 0)
778 kmem_cache_free(qeth_core_header_cache, hdr); 779 kmem_cache_free(qeth_core_header_cache, hdr);
@@ -780,7 +781,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
780 } 781 }
781 782
782 if (card->info.type != QETH_CARD_TYPE_IQD) { 783 if (card->info.type != QETH_CARD_TYPE_IQD) {
783 if (qeth_hdr_chk_and_bounce(new_skb, 784 if (qeth_hdr_chk_and_bounce(new_skb, &hdr,
784 sizeof(struct qeth_hdr_layer2))) 785 sizeof(struct qeth_hdr_layer2)))
785 goto tx_drop; 786 goto tx_drop;
786 rc = qeth_do_send_packet(card, queue, new_skb, hdr, 787 rc = qeth_do_send_packet(card, queue, new_skb, hdr,
@@ -959,7 +960,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
959 SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops); 960 SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
960 else 961 else
961 SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops); 962 SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
962 card->dev->features |= NETIF_F_HW_VLAN_FILTER; 963 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
963 card->info.broadcast_capable = 1; 964 card->info.broadcast_capable = 1;
964 qeth_l2_request_initial_mac(card); 965 qeth_l2_request_initial_mac(card);
965 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 966 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 1f7edf1b26c3..c1b0b2761f8d 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1659,7 +1659,8 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1659 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { 1659 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
1660 struct net_device *netdev; 1660 struct net_device *netdev;
1661 1661
1662 netdev = __vlan_find_dev_deep(card->dev, vid); 1662 netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q),
1663 vid);
1663 if (netdev == NULL || 1664 if (netdev == NULL ||
1664 !(netdev->flags & IFF_UP)) 1665 !(netdev->flags & IFF_UP))
1665 continue; 1666 continue;
@@ -1720,7 +1721,8 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
1720 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { 1721 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
1721 struct net_device *netdev; 1722 struct net_device *netdev;
1722 1723
1723 netdev = __vlan_find_dev_deep(card->dev, vid); 1724 netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q),
1725 vid);
1724 if (netdev == NULL || 1726 if (netdev == NULL ||
1725 !(netdev->flags & IFF_UP)) 1727 !(netdev->flags & IFF_UP))
1726 continue; 1728 continue;
@@ -1764,7 +1766,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
1764 1766
1765 QETH_CARD_TEXT(card, 4, "frvaddr4"); 1767 QETH_CARD_TEXT(card, 4, "frvaddr4");
1766 1768
1767 netdev = __vlan_find_dev_deep(card->dev, vid); 1769 netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q), vid);
1768 if (!netdev) 1770 if (!netdev)
1769 return; 1771 return;
1770 in_dev = in_dev_get(netdev); 1772 in_dev = in_dev_get(netdev);
@@ -1794,7 +1796,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
1794 1796
1795 QETH_CARD_TEXT(card, 4, "frvaddr6"); 1797 QETH_CARD_TEXT(card, 4, "frvaddr6");
1796 1798
1797 netdev = __vlan_find_dev_deep(card->dev, vid); 1799 netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q), vid);
1798 if (!netdev) 1800 if (!netdev)
1799 return; 1801 return;
1800 in6_dev = in6_dev_get(netdev); 1802 in6_dev = in6_dev_get(netdev);
@@ -1824,7 +1826,8 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
1824 rcu_read_unlock(); 1826 rcu_read_unlock();
1825} 1827}
1826 1828
1827static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 1829static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
1830 __be16 proto, u16 vid)
1828{ 1831{
1829 struct qeth_card *card = dev->ml_priv; 1832 struct qeth_card *card = dev->ml_priv;
1830 1833
@@ -1832,7 +1835,8 @@ static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1832 return 0; 1835 return 0;
1833} 1836}
1834 1837
1835static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 1838static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
1839 __be16 proto, u16 vid)
1836{ 1840{
1837 struct qeth_card *card = dev->ml_priv; 1841 struct qeth_card *card = dev->ml_priv;
1838 unsigned long flags; 1842 unsigned long flags;
@@ -1975,7 +1979,8 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
1975 &vlan_tag); 1979 &vlan_tag);
1976 len = skb->len; 1980 len = skb->len;
1977 if (is_vlan && !card->options.sniffer) 1981 if (is_vlan && !card->options.sniffer)
1978 __vlan_hwaccel_put_tag(skb, vlan_tag); 1982 __vlan_hwaccel_put_tag(skb,
1983 htons(ETH_P_8021Q), vlan_tag);
1979 napi_gro_receive(&card->napi, skb); 1984 napi_gro_receive(&card->napi, skb);
1980 } 1985 }
1981 break; 1986 break;
@@ -2084,7 +2089,8 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev,
2084 struct net_device *netdev; 2089 struct net_device *netdev;
2085 2090
2086 rcu_read_lock(); 2091 rcu_read_lock();
2087 netdev = __vlan_find_dev_deep(card->dev, vid); 2092 netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q),
2093 vid);
2088 rcu_read_unlock(); 2094 rcu_read_unlock();
2089 if (netdev == dev) { 2095 if (netdev == dev) {
2090 rc = QETH_VLAN_CARD; 2096 rc = QETH_VLAN_CARD;
@@ -3031,8 +3037,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3031 qeth_l3_hdr_csum(card, hdr, new_skb); 3037 qeth_l3_hdr_csum(card, hdr, new_skb);
3032 } 3038 }
3033 3039
3034 elems = qeth_get_elements_no(card, (void *)hdr, new_skb, 3040 elems = qeth_get_elements_no(card, new_skb, elements_needed);
3035 elements_needed);
3036 if (!elems) { 3041 if (!elems) {
3037 if (data_offset >= 0) 3042 if (data_offset >= 0)
3038 kmem_cache_free(qeth_core_header_cache, hdr); 3043 kmem_cache_free(qeth_core_header_cache, hdr);
@@ -3050,7 +3055,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3050 else 3055 else
3051 len = sizeof(struct qeth_hdr_layer3); 3056 len = sizeof(struct qeth_hdr_layer3);
3052 3057
3053 if (qeth_hdr_chk_and_bounce(new_skb, len)) 3058 if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len))
3054 goto tx_drop; 3059 goto tx_drop;
3055 rc = qeth_do_send_packet(card, queue, new_skb, hdr, 3060 rc = qeth_do_send_packet(card, queue, new_skb, hdr,
3056 elements_needed); 3061 elements_needed);
@@ -3294,9 +3299,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3294 card->dev->watchdog_timeo = QETH_TX_TIMEOUT; 3299 card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
3295 card->dev->mtu = card->info.initial_mtu; 3300 card->dev->mtu = card->info.initial_mtu;
3296 SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops); 3301 SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
3297 card->dev->features |= NETIF_F_HW_VLAN_TX | 3302 card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
3298 NETIF_F_HW_VLAN_RX | 3303 NETIF_F_HW_VLAN_CTAG_RX |
3299 NETIF_F_HW_VLAN_FILTER; 3304 NETIF_F_HW_VLAN_CTAG_FILTER;
3300 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 3305 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
3301 card->dev->gso_max_size = 15 * PAGE_SIZE; 3306 card->dev->gso_max_size = 15 * PAGE_SIZE;
3302 3307
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 50fcd018d14b..11596b2c4702 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -88,9 +88,6 @@
88 88
89#define BNX2FC_MAX_NPIV 256 89#define BNX2FC_MAX_NPIV 256
90 90
91#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048
92#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS
93#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE
94#define BNX2FC_MIN_PAYLOAD 256 91#define BNX2FC_MIN_PAYLOAD 256
95#define BNX2FC_MAX_PAYLOAD 2048 92#define BNX2FC_MAX_PAYLOAD 2048
96#define BNX2FC_MFS \ 93#define BNX2FC_MFS \
@@ -108,11 +105,8 @@
108#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) 105#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
109#define BNX2FC_5771X_DB_PAGE_SIZE 128 106#define BNX2FC_5771X_DB_PAGE_SIZE 128
110 107
111#define BNX2FC_MAX_TASKS \
112 (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS)
113#define BNX2FC_TASK_SIZE 128 108#define BNX2FC_TASK_SIZE 128
114#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) 109#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
115#define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE)
116 110
117#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8 111#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8
118#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024) 112#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024)
@@ -125,12 +119,9 @@
125#define BNX2FC_WRITE (1 << 0) 119#define BNX2FC_WRITE (1 << 0)
126 120
127#define BNX2FC_MIN_XID 0 121#define BNX2FC_MIN_XID 0
128#define BNX2FC_MAX_XID \
129 (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1)
130#define FCOE_MAX_NUM_XIDS 0x2000 122#define FCOE_MAX_NUM_XIDS 0x2000
131#define FCOE_MIN_XID (BNX2FC_MAX_XID + 1) 123#define FCOE_MAX_XID_OFFSET (FCOE_MAX_NUM_XIDS - 1)
132#define FCOE_MAX_XID (FCOE_MIN_XID + FCOE_MAX_NUM_XIDS - 1) 124#define FCOE_XIDS_PER_CPU_OFFSET ((512 * nr_cpu_ids) - 1)
133#define FCOE_XIDS_PER_CPU (FCOE_MIN_XID + (512 * nr_cpu_ids) - 1)
134#define BNX2FC_MAX_LUN 0xFFFF 125#define BNX2FC_MAX_LUN 0xFFFF
135#define BNX2FC_MAX_FCP_TGT 256 126#define BNX2FC_MAX_FCP_TGT 256
136#define BNX2FC_MAX_CMD_LEN 16 127#define BNX2FC_MAX_CMD_LEN 16
@@ -206,6 +197,13 @@ struct bnx2fc_hba {
206 #define BNX2FC_FLAG_FW_INIT_DONE 0 197 #define BNX2FC_FLAG_FW_INIT_DONE 0
207 #define BNX2FC_FLAG_DESTROY_CMPL 1 198 #define BNX2FC_FLAG_DESTROY_CMPL 1
208 u32 next_conn_id; 199 u32 next_conn_id;
200
201 /* xid resources */
202 u16 max_xid;
203 u32 max_tasks;
204 u32 max_outstanding_cmds;
205 u32 elstm_xids;
206
209 struct fcoe_task_ctx_entry **task_ctx; 207 struct fcoe_task_ctx_entry **task_ctx;
210 dma_addr_t *task_ctx_dma; 208 dma_addr_t *task_ctx_dma;
211 struct regpair *task_ctx_bd_tbl; 209 struct regpair *task_ctx_bd_tbl;
@@ -504,8 +502,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba);
504void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba); 502void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba);
505int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba); 503int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba);
506void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba); 504void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba);
507struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, 505struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba);
508 u16 min_xid, u16 max_xid);
509void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr); 506void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr);
510void bnx2fc_get_link_state(struct bnx2fc_hba *hba); 507void bnx2fc_get_link_state(struct bnx2fc_hba *hba);
511char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items); 508char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 90bc7bd00966..7dffec1e5715 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -71,7 +71,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb);
71static void bnx2fc_start_disc(struct bnx2fc_interface *interface); 71static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
72static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); 72static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
73static int bnx2fc_lport_config(struct fc_lport *lport); 73static int bnx2fc_lport_config(struct fc_lport *lport);
74static int bnx2fc_em_config(struct fc_lport *lport); 74static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba);
75static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba); 75static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
76static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba); 76static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
77static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); 77static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
@@ -944,16 +944,17 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
944 return 0; 944 return 0;
945} 945}
946 946
947static int bnx2fc_em_config(struct fc_lport *lport) 947static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba)
948{ 948{
949 int max_xid; 949 int fcoe_min_xid, fcoe_max_xid;
950 950
951 fcoe_min_xid = hba->max_xid + 1;
951 if (nr_cpu_ids <= 2) 952 if (nr_cpu_ids <= 2)
952 max_xid = FCOE_XIDS_PER_CPU; 953 fcoe_max_xid = hba->max_xid + FCOE_XIDS_PER_CPU_OFFSET;
953 else 954 else
954 max_xid = FCOE_MAX_XID; 955 fcoe_max_xid = hba->max_xid + FCOE_MAX_XID_OFFSET;
955 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID, 956 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, fcoe_min_xid,
956 max_xid, NULL)) { 957 fcoe_max_xid, NULL)) {
957 printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n"); 958 printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
958 return -ENOMEM; 959 return -ENOMEM;
959 } 960 }
@@ -1300,6 +1301,12 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1300 mutex_init(&hba->hba_mutex); 1301 mutex_init(&hba->hba_mutex);
1301 1302
1302 hba->cnic = cnic; 1303 hba->cnic = cnic;
1304
1305 hba->max_tasks = cnic->max_fcoe_exchanges;
1306 hba->elstm_xids = (hba->max_tasks / 2);
1307 hba->max_outstanding_cmds = hba->elstm_xids;
1308 hba->max_xid = (hba->max_tasks - 1);
1309
1303 rc = bnx2fc_bind_pcidev(hba); 1310 rc = bnx2fc_bind_pcidev(hba);
1304 if (rc) { 1311 if (rc) {
1305 printk(KERN_ERR PFX "create_adapter: bind error\n"); 1312 printk(KERN_ERR PFX "create_adapter: bind error\n");
@@ -1318,8 +1325,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1318 1325
1319 hba->num_ofld_sess = 0; 1326 hba->num_ofld_sess = 0;
1320 1327
1321 hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID, 1328 hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba);
1322 BNX2FC_MAX_XID);
1323 if (!hba->cmd_mgr) { 1329 if (!hba->cmd_mgr) {
1324 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); 1330 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
1325 goto cmgr_err; 1331 goto cmgr_err;
@@ -1330,13 +1336,13 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1330 FCOE_IOS_PER_CONNECTION_SHIFT; 1336 FCOE_IOS_PER_CONNECTION_SHIFT;
1331 fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS << 1337 fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS <<
1332 FCOE_LOGINS_PER_PORT_SHIFT; 1338 FCOE_LOGINS_PER_PORT_SHIFT;
1333 fcoe_cap->capability2 = BNX2FC_MAX_OUTSTANDING_CMNDS << 1339 fcoe_cap->capability2 = hba->max_outstanding_cmds <<
1334 FCOE_NUMBER_OF_EXCHANGES_SHIFT; 1340 FCOE_NUMBER_OF_EXCHANGES_SHIFT;
1335 fcoe_cap->capability2 |= BNX2FC_MAX_NPIV << 1341 fcoe_cap->capability2 |= BNX2FC_MAX_NPIV <<
1336 FCOE_NPIV_WWN_PER_PORT_SHIFT; 1342 FCOE_NPIV_WWN_PER_PORT_SHIFT;
1337 fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS << 1343 fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS <<
1338 FCOE_TARGETS_SUPPORTED_SHIFT; 1344 FCOE_TARGETS_SUPPORTED_SHIFT;
1339 fcoe_cap->capability3 |= BNX2FC_MAX_OUTSTANDING_CMNDS << 1345 fcoe_cap->capability3 |= hba->max_outstanding_cmds <<
1340 FCOE_OUTSTANDING_COMMANDS_SHIFT; 1346 FCOE_OUTSTANDING_COMMANDS_SHIFT;
1341 fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL; 1347 fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL;
1342 1348
@@ -1416,7 +1422,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1416 struct Scsi_Host *shost; 1422 struct Scsi_Host *shost;
1417 struct fc_vport *vport = dev_to_vport(parent); 1423 struct fc_vport *vport = dev_to_vport(parent);
1418 struct bnx2fc_lport *blport; 1424 struct bnx2fc_lport *blport;
1419 struct bnx2fc_hba *hba; 1425 struct bnx2fc_hba *hba = interface->hba;
1420 int rc = 0; 1426 int rc = 0;
1421 1427
1422 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); 1428 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
@@ -1426,6 +1432,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1426 } 1432 }
1427 1433
1428 /* Allocate Scsi_Host structure */ 1434 /* Allocate Scsi_Host structure */
1435 bnx2fc_shost_template.can_queue = hba->max_outstanding_cmds;
1429 if (!npiv) 1436 if (!npiv)
1430 lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port)); 1437 lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port));
1431 else 1438 else
@@ -1477,7 +1484,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1477 1484
1478 /* Allocate exchange manager */ 1485 /* Allocate exchange manager */
1479 if (!npiv) 1486 if (!npiv)
1480 rc = bnx2fc_em_config(lport); 1487 rc = bnx2fc_em_config(lport, hba);
1481 else { 1488 else {
1482 shost = vport_to_shost(vport); 1489 shost = vport_to_shost(vport);
1483 n_port = shost_priv(shost); 1490 n_port = shost_priv(shost);
@@ -1491,7 +1498,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1491 1498
1492 bnx2fc_interface_get(interface); 1499 bnx2fc_interface_get(interface);
1493 1500
1494 hba = interface->hba;
1495 spin_lock_bh(&hba->hba_lock); 1501 spin_lock_bh(&hba->hba_lock);
1496 blport->lport = lport; 1502 blport->lport = lport;
1497 list_add_tail(&blport->list, &hba->vports); 1503 list_add_tail(&blport->list, &hba->vports);
@@ -2706,7 +2712,6 @@ static struct scsi_host_template bnx2fc_shost_template = {
2706 .change_queue_type = fc_change_queue_type, 2712 .change_queue_type = fc_change_queue_type,
2707 .this_id = -1, 2713 .this_id = -1,
2708 .cmd_per_lun = 3, 2714 .cmd_per_lun = 3,
2709 .can_queue = BNX2FC_CAN_QUEUE,
2710 .use_clustering = ENABLE_CLUSTERING, 2715 .use_clustering = ENABLE_CLUSTERING,
2711 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, 2716 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
2712 .max_sectors = 1024, 2717 .max_sectors = 1024,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 85ea98a80f40..50510ffe1bf5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -77,7 +77,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
77 fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << 77 fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
78 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 78 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
79 79
80 fcoe_init1.num_tasks = BNX2FC_MAX_TASKS; 80 fcoe_init1.num_tasks = hba->max_tasks;
81 fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; 81 fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
82 fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; 82 fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
83 fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; 83 fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
@@ -697,7 +697,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
697 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); 697 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
698 698
699 699
700 if (xid > BNX2FC_MAX_XID) { 700 if (xid > hba->max_xid) {
701 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", 701 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
702 xid); 702 xid);
703 goto ret_err_rqe; 703 goto ret_err_rqe;
@@ -815,7 +815,7 @@ ret_err_rqe:
815 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", 815 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
816 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); 816 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
817 817
818 if (xid > BNX2FC_MAX_XID) { 818 if (xid > hba->max_xid) {
819 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); 819 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
820 goto ret_warn_rqe; 820 goto ret_warn_rqe;
821 } 821 }
@@ -880,7 +880,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
880 880
881 spin_lock_bh(&tgt->tgt_lock); 881 spin_lock_bh(&tgt->tgt_lock);
882 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; 882 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
883 if (xid >= BNX2FC_MAX_TASKS) { 883 if (xid >= hba->max_tasks) {
884 printk(KERN_ERR PFX "ERROR:xid out of range\n"); 884 printk(KERN_ERR PFX "ERROR:xid out of range\n");
885 spin_unlock_bh(&tgt->tgt_lock); 885 spin_unlock_bh(&tgt->tgt_lock);
886 return; 886 return;
@@ -1842,6 +1842,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1842 int rc = 0; 1842 int rc = 0;
1843 struct regpair *task_ctx_bdt; 1843 struct regpair *task_ctx_bdt;
1844 dma_addr_t addr; 1844 dma_addr_t addr;
1845 int task_ctx_arr_sz;
1845 int i; 1846 int i;
1846 1847
1847 /* 1848 /*
@@ -1865,7 +1866,8 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1865 * Allocate task_ctx which is an array of pointers pointing to 1866 * Allocate task_ctx which is an array of pointers pointing to
1866 * a page containing 32 task contexts 1867 * a page containing 32 task contexts
1867 */ 1868 */
1868 hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)), 1869 task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1870 hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)),
1869 GFP_KERNEL); 1871 GFP_KERNEL);
1870 if (!hba->task_ctx) { 1872 if (!hba->task_ctx) {
1871 printk(KERN_ERR PFX "unable to allocate task context array\n"); 1873 printk(KERN_ERR PFX "unable to allocate task context array\n");
@@ -1876,7 +1878,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1876 /* 1878 /*
1877 * Allocate task_ctx_dma which is an array of dma addresses 1879 * Allocate task_ctx_dma which is an array of dma addresses
1878 */ 1880 */
1879 hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ * 1881 hba->task_ctx_dma = kmalloc((task_ctx_arr_sz *
1880 sizeof(dma_addr_t)), GFP_KERNEL); 1882 sizeof(dma_addr_t)), GFP_KERNEL);
1881 if (!hba->task_ctx_dma) { 1883 if (!hba->task_ctx_dma) {
1882 printk(KERN_ERR PFX "unable to alloc context mapping array\n"); 1884 printk(KERN_ERR PFX "unable to alloc context mapping array\n");
@@ -1885,7 +1887,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1885 } 1887 }
1886 1888
1887 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; 1889 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1888 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { 1890 for (i = 0; i < task_ctx_arr_sz; i++) {
1889 1891
1890 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, 1892 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1891 PAGE_SIZE, 1893 PAGE_SIZE,
@@ -1905,7 +1907,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1905 return 0; 1907 return 0;
1906 1908
1907out3: 1909out3:
1908 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { 1910 for (i = 0; i < task_ctx_arr_sz; i++) {
1909 if (hba->task_ctx[i]) { 1911 if (hba->task_ctx[i]) {
1910 1912
1911 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1913 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
@@ -1929,6 +1931,7 @@ out:
1929 1931
1930void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) 1932void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1931{ 1933{
1934 int task_ctx_arr_sz;
1932 int i; 1935 int i;
1933 1936
1934 if (hba->task_ctx_bd_tbl) { 1937 if (hba->task_ctx_bd_tbl) {
@@ -1938,8 +1941,9 @@ void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1938 hba->task_ctx_bd_tbl = NULL; 1941 hba->task_ctx_bd_tbl = NULL;
1939 } 1942 }
1940 1943
1944 task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1941 if (hba->task_ctx) { 1945 if (hba->task_ctx) {
1942 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { 1946 for (i = 0; i < task_ctx_arr_sz; i++) {
1943 if (hba->task_ctx[i]) { 1947 if (hba->task_ctx[i]) {
1944 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1948 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1945 hba->task_ctx[i], 1949 hba->task_ctx[i],
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 60798e829de6..723a9a8ba5ee 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -239,8 +239,7 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
239 sc_cmd->scsi_done(sc_cmd); 239 sc_cmd->scsi_done(sc_cmd);
240} 240}
241 241
242struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, 242struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
243 u16 min_xid, u16 max_xid)
244{ 243{
245 struct bnx2fc_cmd_mgr *cmgr; 244 struct bnx2fc_cmd_mgr *cmgr;
246 struct io_bdt *bdt_info; 245 struct io_bdt *bdt_info;
@@ -252,6 +251,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
252 int num_ios, num_pri_ios; 251 int num_ios, num_pri_ios;
253 size_t bd_tbl_sz; 252 size_t bd_tbl_sz;
254 int arr_sz = num_possible_cpus() + 1; 253 int arr_sz = num_possible_cpus() + 1;
254 u16 min_xid = BNX2FC_MIN_XID;
255 u16 max_xid = hba->max_xid;
255 256
256 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { 257 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
257 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ 258 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
@@ -298,7 +299,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
298 * of slow path requests. 299 * of slow path requests.
299 */ 300 */
300 xid = BNX2FC_MIN_XID; 301 xid = BNX2FC_MIN_XID;
301 num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS; 302 num_pri_ios = num_ios - hba->elstm_xids;
302 for (i = 0; i < num_ios; i++) { 303 for (i = 0; i < num_ios; i++) {
303 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); 304 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
304 305
@@ -367,7 +368,7 @@ void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
367 struct bnx2fc_hba *hba = cmgr->hba; 368 struct bnx2fc_hba *hba = cmgr->hba;
368 size_t bd_tbl_sz; 369 size_t bd_tbl_sz;
369 u16 min_xid = BNX2FC_MIN_XID; 370 u16 min_xid = BNX2FC_MIN_XID;
370 u16 max_xid = BNX2FC_MAX_XID; 371 u16 max_xid = hba->max_xid;
371 int num_ios; 372 int num_ios;
372 int i; 373 int i;
373 374
diff --git a/drivers/scsi/csiostor/Makefile b/drivers/scsi/csiostor/Makefile
index b581966c88f9..913b9a92fb06 100644
--- a/drivers/scsi/csiostor/Makefile
+++ b/drivers/scsi/csiostor/Makefile
@@ -8,4 +8,5 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
8obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o 8obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o
9 9
10csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \ 10csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \
11 csio_hw.o csio_isr.o csio_mb.o csio_rnode.o csio_wr.o 11 csio_hw.o csio_hw_t4.o csio_hw_t5.o csio_isr.o \
12 csio_mb.o csio_rnode.o csio_wr.o
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 7dbaf58fab9f..193605519361 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -61,7 +61,7 @@ int csio_msi = 2;
61static int dev_num; 61static int dev_num;
62 62
63/* FCoE Adapter types & its description */ 63/* FCoE Adapter types & its description */
64static const struct csio_adap_desc csio_fcoe_adapters[] = { 64static const struct csio_adap_desc csio_t4_fcoe_adapters[] = {
65 {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"}, 65 {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
66 {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"}, 66 {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
67 {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"}, 67 {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
@@ -77,7 +77,38 @@ static const struct csio_adap_desc csio_fcoe_adapters[] = {
77 {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"}, 77 {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
78 {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"}, 78 {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
79 {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"}, 79 {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
80 {"T4 FPGA", "Chelsio T4 FPGA [FCoE]"} 80 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
81 {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"},
82 {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"},
83 {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"},
84 {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"},
85 {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"},
86 {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"},
87 {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"},
88 {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"}
89};
90
91static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
92 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
93 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
94 {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"},
95 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
96 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
97 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
98 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
99 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
100 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
101 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
102 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
103 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
104 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
105 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
106 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
107 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
108 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
109 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
110 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
111 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}
81}; 112};
82 113
83static void csio_mgmtm_cleanup(struct csio_mgmtm *); 114static void csio_mgmtm_cleanup(struct csio_mgmtm *);
@@ -124,7 +155,7 @@ int csio_is_hw_removing(struct csio_hw *hw)
124 * at the time it indicated completion is stored there. Returns 0 if the 155 * at the time it indicated completion is stored there. Returns 0 if the
125 * operation completes and -EAGAIN otherwise. 156 * operation completes and -EAGAIN otherwise.
126 */ 157 */
127static int 158int
128csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, 159csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
129 int polarity, int attempts, int delay, uint32_t *valp) 160 int polarity, int attempts, int delay, uint32_t *valp)
130{ 161{
@@ -145,6 +176,24 @@ csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
145 } 176 }
146} 177}
147 178
179/*
180 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
181 * @hw: the adapter
182 * @addr: the indirect TP register address
183 * @mask: specifies the field within the register to modify
184 * @val: new value for the field
185 *
186 * Sets a field of an indirect TP register to the given value.
187 */
188void
189csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
190 unsigned int mask, unsigned int val)
191{
192 csio_wr_reg32(hw, addr, TP_PIO_ADDR);
193 val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask;
194 csio_wr_reg32(hw, val, TP_PIO_DATA);
195}
196
148void 197void
149csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, 198csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
150 uint32_t value) 199 uint32_t value)
@@ -157,242 +206,22 @@ csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
157 206
158} 207}
159 208
160/*
161 * csio_hw_mc_read - read from MC through backdoor accesses
162 * @hw: the hw module
163 * @addr: address of first byte requested
164 * @data: 64 bytes of data containing the requested address
165 * @ecc: where to store the corresponding 64-bit ECC word
166 *
167 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
168 * that covers the requested address @addr. If @parity is not %NULL it
169 * is assigned the 64-bit ECC word for the read data.
170 */
171int
172csio_hw_mc_read(struct csio_hw *hw, uint32_t addr, __be32 *data,
173 uint64_t *ecc)
174{
175 int i;
176
177 if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
178 return -EBUSY;
179 csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
180 csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
181 csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
182 csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
183 MC_BIST_CMD);
184 i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
185 0, 10, 1, NULL);
186 if (i)
187 return i;
188
189#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
190
191 for (i = 15; i >= 0; i--)
192 *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
193 if (ecc)
194 *ecc = csio_rd_reg64(hw, MC_DATA(16));
195#undef MC_DATA
196 return 0;
197}
198
199/*
200 * csio_hw_edc_read - read from EDC through backdoor accesses
201 * @hw: the hw module
202 * @idx: which EDC to access
203 * @addr: address of first byte requested
204 * @data: 64 bytes of data containing the requested address
205 * @ecc: where to store the corresponding 64-bit ECC word
206 *
207 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
208 * that covers the requested address @addr. If @parity is not %NULL it
209 * is assigned the 64-bit ECC word for the read data.
210 */
211int
212csio_hw_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
213 uint64_t *ecc)
214{
215 int i;
216
217 idx *= EDC_STRIDE;
218 if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
219 return -EBUSY;
220 csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
221 csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
222 csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
223 csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
224 EDC_BIST_CMD + idx);
225 i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
226 0, 10, 1, NULL);
227 if (i)
228 return i;
229
230#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
231
232 for (i = 15; i >= 0; i--)
233 *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
234 if (ecc)
235 *ecc = csio_rd_reg64(hw, EDC_DATA(16));
236#undef EDC_DATA
237 return 0;
238}
239
240/*
241 * csio_mem_win_rw - read/write memory through PCIE memory window
242 * @hw: the adapter
243 * @addr: address of first byte requested
244 * @data: MEMWIN0_APERTURE bytes of data containing the requested address
245 * @dir: direction of transfer 1 => read, 0 => write
246 *
247 * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
248 * MEMWIN0_APERTURE-byte-aligned address that covers the requested
249 * address @addr.
250 */
251static int
252csio_mem_win_rw(struct csio_hw *hw, u32 addr, u32 *data, int dir)
253{
254 int i;
255
256 /*
257 * Setup offset into PCIE memory window. Address must be a
258 * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
259 * ensure that changes propagate before we attempt to use the new
260 * values.)
261 */
262 csio_wr_reg32(hw, addr & ~(MEMWIN0_APERTURE - 1),
263 PCIE_MEM_ACCESS_OFFSET);
264 csio_rd_reg32(hw, PCIE_MEM_ACCESS_OFFSET);
265
266 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
267 for (i = 0; i < MEMWIN0_APERTURE; i = i + sizeof(__be32)) {
268 if (dir)
269 *data++ = csio_rd_reg32(hw, (MEMWIN0_BASE + i));
270 else
271 csio_wr_reg32(hw, *data++, (MEMWIN0_BASE + i));
272 }
273
274 return 0;
275}
276
277/*
278 * csio_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
279 * @hw: the csio_hw
280 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
281 * @addr: address within indicated memory type
282 * @len: amount of memory to transfer
283 * @buf: host memory buffer
284 * @dir: direction of transfer 1 => read, 0 => write
285 *
286 * Reads/writes an [almost] arbitrary memory region in the firmware: the
287 * firmware memory address, length and host buffer must be aligned on
288 * 32-bit boudaries. The memory is transferred as a raw byte sequence
289 * from/to the firmware's memory. If this memory contains data
290 * structures which contain multi-byte integers, it's the callers
291 * responsibility to perform appropriate byte order conversions.
292 */
293static int
294csio_memory_rw(struct csio_hw *hw, int mtype, u32 addr, u32 len,
295 uint32_t *buf, int dir)
296{
297 uint32_t pos, start, end, offset, memoffset;
298 int ret;
299 uint32_t *data;
300
301 /*
302 * Argument sanity checks ...
303 */
304 if ((addr & 0x3) || (len & 0x3))
305 return -EINVAL;
306
307 data = kzalloc(MEMWIN0_APERTURE, GFP_KERNEL);
308 if (!data)
309 return -ENOMEM;
310
311 /* Offset into the region of memory which is being accessed
312 * MEM_EDC0 = 0
313 * MEM_EDC1 = 1
314 * MEM_MC = 2
315 */
316 memoffset = (mtype * (5 * 1024 * 1024));
317
318 /* Determine the PCIE_MEM_ACCESS_OFFSET */
319 addr = addr + memoffset;
320
321 /*
322 * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
323 * at a time so we need to round down the start and round up the end.
324 * We'll start copying out of the first line at (addr - start) a word
325 * at a time.
326 */
327 start = addr & ~(MEMWIN0_APERTURE-1);
328 end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
329 offset = (addr - start)/sizeof(__be32);
330
331 for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
332 /*
333 * If we're writing, copy the data from the caller's memory
334 * buffer
335 */
336 if (!dir) {
337 /*
338 * If we're doing a partial write, then we need to do
339 * a read-modify-write ...
340 */
341 if (offset || len < MEMWIN0_APERTURE) {
342 ret = csio_mem_win_rw(hw, pos, data, 1);
343 if (ret) {
344 kfree(data);
345 return ret;
346 }
347 }
348 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
349 len > 0) {
350 data[offset++] = *buf++;
351 len -= sizeof(__be32);
352 }
353 }
354
355 /*
356 * Transfer a block of memory and bail if there's an error.
357 */
358 ret = csio_mem_win_rw(hw, pos, data, dir);
359 if (ret) {
360 kfree(data);
361 return ret;
362 }
363
364 /*
365 * If we're reading, copy the data into the caller's memory
366 * buffer.
367 */
368 if (dir)
369 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
370 len > 0) {
371 *buf++ = data[offset++];
372 len -= sizeof(__be32);
373 }
374 }
375
376 kfree(data);
377
378 return 0;
379}
380
381static int 209static int
382csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf) 210csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
383{ 211{
384 return csio_memory_rw(hw, mtype, addr, len, buf, 0); 212 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype,
213 addr, len, buf, 0);
385} 214}
386 215
387/* 216/*
388 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 217 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
389 */ 218 */
390#define EEPROM_MAX_RD_POLL 40 219#define EEPROM_MAX_RD_POLL 40
391#define EEPROM_MAX_WR_POLL 6 220#define EEPROM_MAX_WR_POLL 6
392#define EEPROM_STAT_ADDR 0x7bfc 221#define EEPROM_STAT_ADDR 0x7bfc
393#define VPD_BASE 0x400 222#define VPD_BASE 0x400
394#define VPD_BASE_OLD 0 223#define VPD_BASE_OLD 0
395#define VPD_LEN 512 224#define VPD_LEN 1024
396#define VPD_INFO_FLD_HDR_SIZE 3 225#define VPD_INFO_FLD_HDR_SIZE 3
397 226
398/* 227/*
@@ -817,23 +646,6 @@ out:
817 return 0; 646 return 0;
818} 647}
819 648
820/*
821 * csio_hw_flash_cfg_addr - return the address of the flash
822 * configuration file
823 * @hw: the HW module
824 *
825 * Return the address within the flash where the Firmware Configuration
826 * File is stored.
827 */
828static unsigned int
829csio_hw_flash_cfg_addr(struct csio_hw *hw)
830{
831 if (hw->params.sf_size == 0x100000)
832 return FPGA_FLASH_CFG_OFFSET;
833 else
834 return FLASH_CFG_OFFSET;
835}
836
837static void 649static void
838csio_hw_print_fw_version(struct csio_hw *hw, char *str) 650csio_hw_print_fw_version(struct csio_hw *hw, char *str)
839{ 651{
@@ -898,13 +710,13 @@ csio_hw_check_fw_version(struct csio_hw *hw)
898 minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev); 710 minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev);
899 micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev); 711 micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev);
900 712
901 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ 713 if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */
902 csio_err(hw, "card FW has major version %u, driver wants %u\n", 714 csio_err(hw, "card FW has major version %u, driver wants %u\n",
903 major, FW_VERSION_MAJOR); 715 major, FW_VERSION_MAJOR(hw));
904 return -EINVAL; 716 return -EINVAL;
905 } 717 }
906 718
907 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) 719 if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw))
908 return 0; /* perfect match */ 720 return 0; /* perfect match */
909 721
910 /* Minor/micro version mismatch */ 722 /* Minor/micro version mismatch */
@@ -1044,7 +856,7 @@ static void
1044csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range) 856csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
1045{ 857{
1046 uint16_t val; 858 uint16_t val;
1047 uint32_t pcie_cap; 859 int pcie_cap;
1048 860
1049 if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) { 861 if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
1050 pci_read_config_word(hw->pdev, 862 pci_read_config_word(hw->pdev,
@@ -1056,84 +868,6 @@ csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
1056 } 868 }
1057} 869}
1058 870
1059
1060/*
1061 * Return the specified PCI-E Configuration Space register from our Physical
1062 * Function. We try first via a Firmware LDST Command since we prefer to let
1063 * the firmware own all of these registers, but if that fails we go for it
1064 * directly ourselves.
1065 */
1066static uint32_t
1067csio_read_pcie_cfg4(struct csio_hw *hw, int reg)
1068{
1069 u32 val = 0;
1070 struct csio_mb *mbp;
1071 int rv;
1072 struct fw_ldst_cmd *ldst_cmd;
1073
1074 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1075 if (!mbp) {
1076 CSIO_INC_STATS(hw, n_err_nomem);
1077 pci_read_config_dword(hw->pdev, reg, &val);
1078 return val;
1079 }
1080
1081 csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg);
1082
1083 rv = csio_mb_issue(hw, mbp);
1084
1085 /*
1086 * If the LDST Command suucceeded, exctract the returned register
1087 * value. Otherwise read it directly ourself.
1088 */
1089 if (rv == 0) {
1090 ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
1091 val = ntohl(ldst_cmd->u.pcie.data[0]);
1092 } else
1093 pci_read_config_dword(hw->pdev, reg, &val);
1094
1095 mempool_free(mbp, hw->mb_mempool);
1096
1097 return val;
1098} /* csio_read_pcie_cfg4 */
1099
1100static int
1101csio_hw_set_mem_win(struct csio_hw *hw)
1102{
1103 u32 bar0;
1104
1105 /*
1106 * Truncation intentional: we only read the bottom 32-bits of the
1107 * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
1108 * read BAR0 instead of using pci_resource_start() because we could be
1109 * operating from within a Virtual Machine which is trapping our
1110 * accesses to our Configuration Space and we need to set up the PCI-E
1111 * Memory Window decoders with the actual addresses which will be
1112 * coming across the PCI-E link.
1113 */
1114 bar0 = csio_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
1115 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
1116
1117 /*
1118 * Set up memory window for accessing adapter memory ranges. (Read
1119 * back MA register to ensure that changes propagate before we attempt
1120 * to use the new values.)
1121 */
1122 csio_wr_reg32(hw, (bar0 + MEMWIN0_BASE) | BIR(0) |
1123 WINDOW(ilog2(MEMWIN0_APERTURE) - 10),
1124 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0));
1125 csio_wr_reg32(hw, (bar0 + MEMWIN1_BASE) | BIR(0) |
1126 WINDOW(ilog2(MEMWIN1_APERTURE) - 10),
1127 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1));
1128 csio_wr_reg32(hw, (bar0 + MEMWIN2_BASE) | BIR(0) |
1129 WINDOW(ilog2(MEMWIN2_APERTURE) - 10),
1130 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
1131 csio_rd_reg32(hw, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
1132 return 0;
1133} /* csio_hw_set_mem_win */
1134
1135
1136
1137/*****************************************************************************/ 871/*****************************************************************************/
1138/* HW State machine assists */ 872/* HW State machine assists */
1139/*****************************************************************************/ 873/*****************************************************************************/
@@ -1234,7 +968,9 @@ retry:
1234 for (;;) { 968 for (;;) {
1235 uint32_t pcie_fw; 969 uint32_t pcie_fw;
1236 970
971 spin_unlock_irq(&hw->lock);
1237 msleep(50); 972 msleep(50);
973 spin_lock_irq(&hw->lock);
1238 waiting -= 50; 974 waiting -= 50;
1239 975
1240 /* 976 /*
@@ -2121,9 +1857,9 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
2121 uint32_t *cfg_data; 1857 uint32_t *cfg_data;
2122 int value_to_add = 0; 1858 int value_to_add = 0;
2123 1859
2124 if (request_firmware(&cf, CSIO_CF_FNAME, dev) < 0) { 1860 if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) {
2125 csio_err(hw, "could not find config file " CSIO_CF_FNAME 1861 csio_err(hw, "could not find config file %s, err: %d\n",
2126 ",err: %d\n", ret); 1862 CSIO_CF_FNAME(hw), ret);
2127 return -ENOENT; 1863 return -ENOENT;
2128 } 1864 }
2129 1865
@@ -2147,9 +1883,24 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
2147 1883
2148 ret = csio_memory_write(hw, mtype, maddr, 1884 ret = csio_memory_write(hw, mtype, maddr,
2149 cf->size + value_to_add, cfg_data); 1885 cf->size + value_to_add, cfg_data);
1886
1887 if ((ret == 0) && (value_to_add != 0)) {
1888 union {
1889 u32 word;
1890 char buf[4];
1891 } last;
1892 size_t size = cf->size & ~0x3;
1893 int i;
1894
1895 last.word = cfg_data[size >> 2];
1896 for (i = value_to_add; i < 4; i++)
1897 last.buf[i] = 0;
1898 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
1899 }
2150 if (ret == 0) { 1900 if (ret == 0) {
2151 csio_info(hw, "config file upgraded to " CSIO_CF_FNAME "\n"); 1901 csio_info(hw, "config file upgraded to %s\n",
2152 strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64); 1902 CSIO_CF_FNAME(hw));
1903 snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw));
2153 } 1904 }
2154 1905
2155leave: 1906leave:
@@ -2179,7 +1930,7 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
2179{ 1930{
2180 unsigned int mtype, maddr; 1931 unsigned int mtype, maddr;
2181 int rv; 1932 int rv;
2182 uint32_t finiver, finicsum, cfcsum; 1933 uint32_t finiver = 0, finicsum = 0, cfcsum = 0;
2183 int using_flash; 1934 int using_flash;
2184 char path[64]; 1935 char path[64];
2185 1936
@@ -2207,7 +1958,7 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
2207 * config file from flash. 1958 * config file from flash.
2208 */ 1959 */
2209 mtype = FW_MEMTYPE_CF_FLASH; 1960 mtype = FW_MEMTYPE_CF_FLASH;
2210 maddr = csio_hw_flash_cfg_addr(hw); 1961 maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
2211 using_flash = 1; 1962 using_flash = 1;
2212 } else { 1963 } else {
2213 /* 1964 /*
@@ -2346,30 +2097,32 @@ csio_hw_flash_fw(struct csio_hw *hw)
2346 struct pci_dev *pci_dev = hw->pdev; 2097 struct pci_dev *pci_dev = hw->pdev;
2347 struct device *dev = &pci_dev->dev ; 2098 struct device *dev = &pci_dev->dev ;
2348 2099
2349 if (request_firmware(&fw, CSIO_FW_FNAME, dev) < 0) { 2100 if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) {
2350 csio_err(hw, "could not find firmware image " CSIO_FW_FNAME 2101 csio_err(hw, "could not find firmware image %s, err: %d\n",
2351 ",err: %d\n", ret); 2102 CSIO_FW_FNAME(hw), ret);
2352 return -EINVAL; 2103 return -EINVAL;
2353 } 2104 }
2354 2105
2355 hdr = (const struct fw_hdr *)fw->data; 2106 hdr = (const struct fw_hdr *)fw->data;
2356 fw_ver = ntohl(hdr->fw_ver); 2107 fw_ver = ntohl(hdr->fw_ver);
2357 if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR) 2108 if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR(hw))
2358 return -EINVAL; /* wrong major version, won't do */ 2109 return -EINVAL; /* wrong major version, won't do */
2359 2110
2360 /* 2111 /*
2361 * If the flash FW is unusable or we found something newer, load it. 2112 * If the flash FW is unusable or we found something newer, load it.
2362 */ 2113 */
2363 if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR || 2114 if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
2364 fw_ver > hw->fwrev) { 2115 fw_ver > hw->fwrev) {
2365 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size, 2116 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
2366 /*force=*/false); 2117 /*force=*/false);
2367 if (!ret) 2118 if (!ret)
2368 csio_info(hw, "firmware upgraded to version %pI4 from " 2119 csio_info(hw,
2369 CSIO_FW_FNAME "\n", &hdr->fw_ver); 2120 "firmware upgraded to version %pI4 from %s\n",
2121 &hdr->fw_ver, CSIO_FW_FNAME(hw));
2370 else 2122 else
2371 csio_err(hw, "firmware upgrade failed! err=%d\n", ret); 2123 csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
2372 } 2124 } else
2125 ret = -EINVAL;
2373 2126
2374 release_firmware(fw); 2127 release_firmware(fw);
2375 2128
@@ -2410,7 +2163,7 @@ csio_hw_configure(struct csio_hw *hw)
2410 /* Set pci completion timeout value to 4 seconds. */ 2163 /* Set pci completion timeout value to 4 seconds. */
2411 csio_set_pcie_completion_timeout(hw, 0xd); 2164 csio_set_pcie_completion_timeout(hw, 0xd);
2412 2165
2413 csio_hw_set_mem_win(hw); 2166 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
2414 2167
2415 rv = csio_hw_get_fw_version(hw, &hw->fwrev); 2168 rv = csio_hw_get_fw_version(hw, &hw->fwrev);
2416 if (rv != 0) 2169 if (rv != 0)
@@ -2478,6 +2231,8 @@ csio_hw_configure(struct csio_hw *hw)
2478 } else { 2231 } else {
2479 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 2232 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
2480 2233
2234 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
2235
2481 /* device parameters */ 2236 /* device parameters */
2482 rv = csio_get_device_params(hw); 2237 rv = csio_get_device_params(hw);
2483 if (rv != 0) 2238 if (rv != 0)
@@ -2651,7 +2406,7 @@ csio_hw_intr_disable(struct csio_hw *hw)
2651 2406
2652} 2407}
2653 2408
2654static void 2409void
2655csio_hw_fatal_err(struct csio_hw *hw) 2410csio_hw_fatal_err(struct csio_hw *hw)
2656{ 2411{
2657 csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0); 2412 csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);
@@ -2990,14 +2745,6 @@ csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
2990/* END: HW SM */ 2745/* END: HW SM */
2991/*****************************************************************************/ 2746/*****************************************************************************/
2992 2747
2993/* Slow path handlers */
2994struct intr_info {
2995 unsigned int mask; /* bits to check in interrupt status */
2996 const char *msg; /* message to print or NULL */
2997 short stat_idx; /* stat counter to increment or -1 */
2998 unsigned short fatal; /* whether the condition reported is fatal */
2999};
3000
3001/* 2748/*
3002 * csio_handle_intr_status - table driven interrupt handler 2749 * csio_handle_intr_status - table driven interrupt handler
3003 * @hw: HW instance 2750 * @hw: HW instance
@@ -3011,7 +2758,7 @@ struct intr_info {
3011 * by an entry specifying mask 0. Returns the number of fatal interrupt 2758 * by an entry specifying mask 0. Returns the number of fatal interrupt
3012 * conditions. 2759 * conditions.
3013 */ 2760 */
3014static int 2761int
3015csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, 2762csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
3016 const struct intr_info *acts) 2763 const struct intr_info *acts)
3017{ 2764{
@@ -3038,80 +2785,6 @@ csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
3038} 2785}
3039 2786
3040/* 2787/*
3041 * Interrupt handler for the PCIE module.
3042 */
3043static void
3044csio_pcie_intr_handler(struct csio_hw *hw)
3045{
3046 static struct intr_info sysbus_intr_info[] = {
3047 { RNPP, "RXNP array parity error", -1, 1 },
3048 { RPCP, "RXPC array parity error", -1, 1 },
3049 { RCIP, "RXCIF array parity error", -1, 1 },
3050 { RCCP, "Rx completions control array parity error", -1, 1 },
3051 { RFTP, "RXFT array parity error", -1, 1 },
3052 { 0, NULL, 0, 0 }
3053 };
3054 static struct intr_info pcie_port_intr_info[] = {
3055 { TPCP, "TXPC array parity error", -1, 1 },
3056 { TNPP, "TXNP array parity error", -1, 1 },
3057 { TFTP, "TXFT array parity error", -1, 1 },
3058 { TCAP, "TXCA array parity error", -1, 1 },
3059 { TCIP, "TXCIF array parity error", -1, 1 },
3060 { RCAP, "RXCA array parity error", -1, 1 },
3061 { OTDD, "outbound request TLP discarded", -1, 1 },
3062 { RDPE, "Rx data parity error", -1, 1 },
3063 { TDUE, "Tx uncorrectable data error", -1, 1 },
3064 { 0, NULL, 0, 0 }
3065 };
3066 static struct intr_info pcie_intr_info[] = {
3067 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
3068 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
3069 { MSIDATAPERR, "MSI data parity error", -1, 1 },
3070 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3071 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3072 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3073 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3074 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
3075 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
3076 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3077 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
3078 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3079 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3080 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
3081 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3082 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3083 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
3084 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3085 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3086 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3087 { FIDPERR, "PCI FID parity error", -1, 1 },
3088 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
3089 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
3090 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3091 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
3092 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
3093 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
3094 { PCIESINT, "PCI core secondary fault", -1, 1 },
3095 { PCIEPINT, "PCI core primary fault", -1, 1 },
3096 { UNXSPLCPLERR, "PCI unexpected split completion error", -1,
3097 0 },
3098 { 0, NULL, 0, 0 }
3099 };
3100
3101 int fat;
3102
3103 fat = csio_handle_intr_status(hw,
3104 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
3105 sysbus_intr_info) +
3106 csio_handle_intr_status(hw,
3107 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
3108 pcie_port_intr_info) +
3109 csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
3110 if (fat)
3111 csio_hw_fatal_err(hw);
3112}
3113
3114/*
3115 * TP interrupt handler. 2788 * TP interrupt handler.
3116 */ 2789 */
3117static void csio_tp_intr_handler(struct csio_hw *hw) 2790static void csio_tp_intr_handler(struct csio_hw *hw)
@@ -3517,7 +3190,7 @@ static void csio_ncsi_intr_handler(struct csio_hw *hw)
3517 */ 3190 */
3518static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) 3191static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
3519{ 3192{
3520 uint32_t v = csio_rd_reg32(hw, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); 3193 uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
3521 3194
3522 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; 3195 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
3523 if (!v) 3196 if (!v)
@@ -3527,7 +3200,7 @@ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
3527 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); 3200 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
3528 if (v & RXFIFO_PRTY_ERR) 3201 if (v & RXFIFO_PRTY_ERR)
3529 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); 3202 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
3530 csio_wr_reg32(hw, v, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); 3203 csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port));
3531 csio_hw_fatal_err(hw); 3204 csio_hw_fatal_err(hw);
3532} 3205}
3533 3206
@@ -3596,7 +3269,7 @@ csio_hw_slow_intr_handler(struct csio_hw *hw)
3596 csio_xgmac_intr_handler(hw, 3); 3269 csio_xgmac_intr_handler(hw, 3);
3597 3270
3598 if (cause & PCIE) 3271 if (cause & PCIE)
3599 csio_pcie_intr_handler(hw); 3272 hw->chip_ops->chip_pcie_intr_handler(hw);
3600 3273
3601 if (cause & MC) 3274 if (cause & MC)
3602 csio_mem_intr_handler(hw, MEM_MC); 3275 csio_mem_intr_handler(hw, MEM_MC);
@@ -4257,6 +3930,7 @@ csio_hw_get_device_id(struct csio_hw *hw)
4257 &hw->params.pci.device_id); 3930 &hw->params.pci.device_id);
4258 3931
4259 csio_dev_id_cached(hw); 3932 csio_dev_id_cached(hw);
3933 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK);
4260 3934
4261} /* csio_hw_get_device_id */ 3935} /* csio_hw_get_device_id */
4262 3936
@@ -4275,19 +3949,21 @@ csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
4275 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); 3949 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
4276 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); 3950 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
4277 3951
4278 if (prot_type == CSIO_FPGA) { 3952 if (prot_type == CSIO_T4_FCOE_ASIC) {
3953 memcpy(hw->hw_ver,
3954 csio_t4_fcoe_adapters[adap_type].model_no, 16);
4279 memcpy(hw->model_desc, 3955 memcpy(hw->model_desc,
4280 csio_fcoe_adapters[13].description, 32); 3956 csio_t4_fcoe_adapters[adap_type].description,
4281 } else if (prot_type == CSIO_T4_FCOE_ASIC) { 3957 32);
3958 } else if (prot_type == CSIO_T5_FCOE_ASIC) {
4282 memcpy(hw->hw_ver, 3959 memcpy(hw->hw_ver,
4283 csio_fcoe_adapters[adap_type].model_no, 16); 3960 csio_t5_fcoe_adapters[adap_type].model_no, 16);
4284 memcpy(hw->model_desc, 3961 memcpy(hw->model_desc,
4285 csio_fcoe_adapters[adap_type].description, 32); 3962 csio_t5_fcoe_adapters[adap_type].description,
3963 32);
4286 } else { 3964 } else {
4287 char tempName[32] = "Chelsio FCoE Controller"; 3965 char tempName[32] = "Chelsio FCoE Controller";
4288 memcpy(hw->model_desc, tempName, 32); 3966 memcpy(hw->model_desc, tempName, 32);
4289
4290 CSIO_DB_ASSERT(0);
4291 } 3967 }
4292 } 3968 }
4293} /* csio_hw_set_description */ 3969} /* csio_hw_set_description */
@@ -4316,6 +3992,9 @@ csio_hw_init(struct csio_hw *hw)
4316 3992
4317 strcpy(hw->name, CSIO_HW_NAME); 3993 strcpy(hw->name, CSIO_HW_NAME);
4318 3994
3995 /* Initialize the HW chip ops with T4/T5 specific ops */
3996 hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops;
3997
4319 /* Set the model & its description */ 3998 /* Set the model & its description */
4320 3999
4321 ven_id = hw->params.pci.vendor_id; 4000 ven_id = hw->params.pci.vendor_id;
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 9edcca4c71af..489fc095cb03 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -48,6 +48,7 @@
48#include <scsi/scsi_device.h> 48#include <scsi/scsi_device.h>
49#include <scsi/scsi_transport_fc.h> 49#include <scsi/scsi_transport_fc.h>
50 50
51#include "csio_hw_chip.h"
51#include "csio_wr.h" 52#include "csio_wr.h"
52#include "csio_mb.h" 53#include "csio_mb.h"
53#include "csio_scsi.h" 54#include "csio_scsi.h"
@@ -60,13 +61,6 @@
60 */ 61 */
61#define FW_HOSTERROR 255 62#define FW_HOSTERROR 255
62 63
63#define CSIO_FW_FNAME "cxgb4/t4fw.bin"
64#define CSIO_CF_FNAME "cxgb4/t4-config.txt"
65
66#define FW_VERSION_MAJOR 1
67#define FW_VERSION_MINOR 2
68#define FW_VERSION_MICRO 8
69
70#define CSIO_HW_NAME "Chelsio FCoE Adapter" 64#define CSIO_HW_NAME "Chelsio FCoE Adapter"
71#define CSIO_MAX_PFN 8 65#define CSIO_MAX_PFN 8
72#define CSIO_MAX_PPORTS 4 66#define CSIO_MAX_PPORTS 4
@@ -123,8 +117,6 @@ extern int csio_msi;
123#define CSIO_VENDOR_ID 0x1425 117#define CSIO_VENDOR_ID 0x1425
124#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00 118#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00
125#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF 119#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF
126#define CSIO_FPGA 0xA000
127#define CSIO_T4_FCOE_ASIC 0x4600
128 120
129#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ 121#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
130 EDC1 | LE | TP | MA | PM_TX | PM_RX | \ 122 EDC1 | LE | TP | MA | PM_TX | PM_RX | \
@@ -207,17 +199,6 @@ enum {
207 SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */ 199 SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
208}; 200};
209 201
210enum { MEM_EDC0, MEM_EDC1, MEM_MC };
211
212enum {
213 MEMWIN0_APERTURE = 2048,
214 MEMWIN0_BASE = 0x1b800,
215 MEMWIN1_APERTURE = 32768,
216 MEMWIN1_BASE = 0x28000,
217 MEMWIN2_APERTURE = 65536,
218 MEMWIN2_BASE = 0x30000,
219};
220
221/* serial flash and firmware constants */ 202/* serial flash and firmware constants */
222enum { 203enum {
223 SF_ATTEMPTS = 10, /* max retries for SF operations */ 204 SF_ATTEMPTS = 10, /* max retries for SF operations */
@@ -239,9 +220,6 @@ enum {
239 FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/ 220 FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/
240 FLASH_CFG_OFFSET = 0x1f0000, 221 FLASH_CFG_OFFSET = 0x1f0000,
241 FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE, 222 FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE,
242 FPGA_FLASH_CFG_OFFSET = 0xf0000 , /* if FPGA mode, then cfg file is
243 * at 1MB - 64KB */
244 FPGA_FLASH_CFG_START_SEC = FPGA_FLASH_CFG_OFFSET / SF_SEC_SIZE,
245}; 223};
246 224
247/* 225/*
@@ -259,6 +237,8 @@ enum {
259 FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), 237 FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
260 FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), 238 FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
261 239
240 /* Location of Firmware Configuration File in FLASH. */
241 FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
262}; 242};
263 243
264#undef FLASH_START 244#undef FLASH_START
@@ -310,7 +290,7 @@ struct csio_adap_desc {
310struct pci_params { 290struct pci_params {
311 uint16_t vendor_id; 291 uint16_t vendor_id;
312 uint16_t device_id; 292 uint16_t device_id;
313 uint32_t vpd_cap_addr; 293 int vpd_cap_addr;
314 uint16_t speed; 294 uint16_t speed;
315 uint8_t width; 295 uint8_t width;
316}; 296};
@@ -513,6 +493,7 @@ struct csio_hw {
513 uint32_t fwrev; 493 uint32_t fwrev;
514 uint32_t tp_vers; 494 uint32_t tp_vers;
515 char chip_ver; 495 char chip_ver;
496 uint16_t chip_id; /* Tells T4/T5 chip */
516 uint32_t cfg_finiver; 497 uint32_t cfg_finiver;
517 uint32_t cfg_finicsum; 498 uint32_t cfg_finicsum;
518 uint32_t cfg_cfcsum; 499 uint32_t cfg_cfcsum;
@@ -556,6 +537,9 @@ struct csio_hw {
556 */ 537 */
557 538
558 struct csio_fcoe_res_info fres_info; /* Fcoe resource info */ 539 struct csio_fcoe_res_info fres_info; /* Fcoe resource info */
540 struct csio_hw_chip_ops *chip_ops; /* T4/T5 Chip specific
541 * Operations
542 */
559 543
560 /* MSIX vectors */ 544 /* MSIX vectors */
561 struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS]; 545 struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS];
@@ -636,9 +620,16 @@ csio_us_to_core_ticks(struct csio_hw *hw, uint32_t us)
636#define csio_dbg(__hw, __fmt, ...) 620#define csio_dbg(__hw, __fmt, ...)
637#endif 621#endif
638 622
623int csio_hw_wait_op_done_val(struct csio_hw *, int, uint32_t, int,
624 int, int, uint32_t *);
625void csio_hw_tp_wr_bits_indirect(struct csio_hw *, unsigned int,
626 unsigned int, unsigned int);
639int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *); 627int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *);
640void csio_hw_intr_disable(struct csio_hw *); 628void csio_hw_intr_disable(struct csio_hw *);
641int csio_hw_slow_intr_handler(struct csio_hw *hw); 629int csio_hw_slow_intr_handler(struct csio_hw *);
630int csio_handle_intr_status(struct csio_hw *, unsigned int,
631 const struct intr_info *);
632
642int csio_hw_start(struct csio_hw *); 633int csio_hw_start(struct csio_hw *);
643int csio_hw_stop(struct csio_hw *); 634int csio_hw_stop(struct csio_hw *);
644int csio_hw_reset(struct csio_hw *); 635int csio_hw_reset(struct csio_hw *);
@@ -647,19 +638,17 @@ int csio_is_hw_removing(struct csio_hw *);
647 638
648int csio_fwevtq_handler(struct csio_hw *); 639int csio_fwevtq_handler(struct csio_hw *);
649void csio_evtq_worker(struct work_struct *); 640void csio_evtq_worker(struct work_struct *);
650int csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, 641int csio_enqueue_evt(struct csio_hw *, enum csio_evt, void *, uint16_t);
651 void *evt_msg, uint16_t len);
652void csio_evtq_flush(struct csio_hw *hw); 642void csio_evtq_flush(struct csio_hw *hw);
653 643
654int csio_request_irqs(struct csio_hw *); 644int csio_request_irqs(struct csio_hw *);
655void csio_intr_enable(struct csio_hw *); 645void csio_intr_enable(struct csio_hw *);
656void csio_intr_disable(struct csio_hw *, bool); 646void csio_intr_disable(struct csio_hw *, bool);
647void csio_hw_fatal_err(struct csio_hw *);
657 648
658struct csio_lnode *csio_lnode_alloc(struct csio_hw *); 649struct csio_lnode *csio_lnode_alloc(struct csio_hw *);
659int csio_config_queues(struct csio_hw *); 650int csio_config_queues(struct csio_hw *);
660 651
661int csio_hw_mc_read(struct csio_hw *, uint32_t, __be32 *, uint64_t *);
662int csio_hw_edc_read(struct csio_hw *, int, uint32_t, __be32 *, uint64_t *);
663int csio_hw_init(struct csio_hw *); 652int csio_hw_init(struct csio_hw *);
664void csio_hw_exit(struct csio_hw *); 653void csio_hw_exit(struct csio_hw *);
665#endif /* ifndef __CSIO_HW_H__ */ 654#endif /* ifndef __CSIO_HW_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h
new file mode 100644
index 000000000000..bca0de61ae80
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw_chip.h
@@ -0,0 +1,175 @@
1/*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef __CSIO_HW_CHIP_H__
35#define __CSIO_HW_CHIP_H__
36
37#include "csio_defs.h"
38
39/* FCoE device IDs for T4 */
40#define CSIO_DEVID_T440DBG_FCOE 0x4600
41#define CSIO_DEVID_T420CR_FCOE 0x4601
42#define CSIO_DEVID_T422CR_FCOE 0x4602
43#define CSIO_DEVID_T440CR_FCOE 0x4603
44#define CSIO_DEVID_T420BCH_FCOE 0x4604
45#define CSIO_DEVID_T440BCH_FCOE 0x4605
46#define CSIO_DEVID_T440CH_FCOE 0x4606
47#define CSIO_DEVID_T420SO_FCOE 0x4607
48#define CSIO_DEVID_T420CX_FCOE 0x4608
49#define CSIO_DEVID_T420BT_FCOE 0x4609
50#define CSIO_DEVID_T404BT_FCOE 0x460A
51#define CSIO_DEVID_B420_FCOE 0x460B
52#define CSIO_DEVID_B404_FCOE 0x460C
53#define CSIO_DEVID_T480CR_FCOE 0x460D
54#define CSIO_DEVID_T440LPCR_FCOE 0x460E
55#define CSIO_DEVID_AMSTERDAM_T4_FCOE 0x460F
56#define CSIO_DEVID_HUAWEI_T480_FCOE 0x4680
57#define CSIO_DEVID_HUAWEI_T440_FCOE 0x4681
58#define CSIO_DEVID_HUAWEI_STG310_FCOE 0x4682
59#define CSIO_DEVID_ACROMAG_XMC_XAUI 0x4683
60#define CSIO_DEVID_ACROMAG_XMC_SFP_FCOE 0x4684
61#define CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE 0x4685
62#define CSIO_DEVID_HUAWEI_10GT_FCOE 0x4686
63#define CSIO_DEVID_HUAWEI_T440_TOE_FCOE 0x4687
64
65/* FCoE device IDs for T5 */
66#define CSIO_DEVID_T580DBG_FCOE 0x5600
67#define CSIO_DEVID_T520CR_FCOE 0x5601
68#define CSIO_DEVID_T522CR_FCOE 0x5602
69#define CSIO_DEVID_T540CR_FCOE 0x5603
70#define CSIO_DEVID_T520BCH_FCOE 0x5604
71#define CSIO_DEVID_T540BCH_FCOE 0x5605
72#define CSIO_DEVID_T540CH_FCOE 0x5606
73#define CSIO_DEVID_T520SO_FCOE 0x5607
74#define CSIO_DEVID_T520CX_FCOE 0x5608
75#define CSIO_DEVID_T520BT_FCOE 0x5609
76#define CSIO_DEVID_T504BT_FCOE 0x560A
77#define CSIO_DEVID_B520_FCOE 0x560B
78#define CSIO_DEVID_B504_FCOE 0x560C
79#define CSIO_DEVID_T580CR2_FCOE 0x560D
80#define CSIO_DEVID_T540LPCR_FCOE 0x560E
81#define CSIO_DEVID_AMSTERDAM_T5_FCOE 0x560F
82#define CSIO_DEVID_T580LPCR_FCOE 0x5610
83#define CSIO_DEVID_T520LLCR_FCOE 0x5611
84#define CSIO_DEVID_T560CR_FCOE 0x5612
85#define CSIO_DEVID_T580CR_FCOE 0x5613
86
87/* Define MACRO values */
88#define CSIO_HW_T4 0x4000
89#define CSIO_T4_FCOE_ASIC 0x4600
90#define CSIO_HW_T5 0x5000
91#define CSIO_T5_FCOE_ASIC 0x5600
92#define CSIO_HW_CHIP_MASK 0xF000
93#define T4_REGMAP_SIZE (160 * 1024)
94#define T5_REGMAP_SIZE (332 * 1024)
95#define FW_FNAME_T4 "cxgb4/t4fw.bin"
96#define FW_FNAME_T5 "cxgb4/t5fw.bin"
97#define FW_CFG_NAME_T4 "cxgb4/t4-config.txt"
98#define FW_CFG_NAME_T5 "cxgb4/t5-config.txt"
99
100/* Define static functions */
101static inline int csio_is_t4(uint16_t chip)
102{
103 return (chip == CSIO_HW_T4);
104}
105
106static inline int csio_is_t5(uint16_t chip)
107{
108 return (chip == CSIO_HW_T5);
109}
110
111/* Define MACRO DEFINITIONS */
112#define CSIO_DEVICE(devid, idx) \
113 { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
114
115#define CSIO_HW_PIDX(hw, index) \
116 (csio_is_t4(hw->chip_id) ? (PIDX(index)) : \
117 (PIDX_T5(index) | DBTYPE(1U)))
118
119#define CSIO_HW_LP_INT_THRESH(hw, val) \
120 (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH(val)) : \
121 (V_LP_INT_THRESH_T5(val)))
122
123#define CSIO_HW_M_LP_INT_THRESH(hw) \
124 (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_MASK) : (M_LP_INT_THRESH_T5))
125
126#define CSIO_MAC_INT_CAUSE_REG(hw, port) \
127 (csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE)) : \
128 (T5_PORT_REG(port, MAC_PORT_INT_CAUSE)))
129
130#define FW_VERSION_MAJOR(hw) (csio_is_t4(hw->chip_id) ? 1 : 0)
131#define FW_VERSION_MINOR(hw) (csio_is_t4(hw->chip_id) ? 2 : 0)
132#define FW_VERSION_MICRO(hw) (csio_is_t4(hw->chip_id) ? 8 : 0)
133
134#define CSIO_FW_FNAME(hw) \
135 (csio_is_t4(hw->chip_id) ? FW_FNAME_T4 : FW_FNAME_T5)
136
137#define CSIO_CF_FNAME(hw) \
138 (csio_is_t4(hw->chip_id) ? FW_CFG_NAME_T4 : FW_CFG_NAME_T5)
139
140/* Declare ENUMS */
141enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
142
143enum {
144 MEMWIN_APERTURE = 2048,
145 MEMWIN_BASE = 0x1b800,
146 MEMWIN_CSIOSTOR = 6, /* PCI-e Memory Window access */
147};
148
149/* Slow path handlers */
150struct intr_info {
151 unsigned int mask; /* bits to check in interrupt status */
152 const char *msg; /* message to print or NULL */
153 short stat_idx; /* stat counter to increment or -1 */
154 unsigned short fatal; /* whether the condition reported is fatal */
155};
156
157/* T4/T5 Chip specific ops */
158struct csio_hw;
159struct csio_hw_chip_ops {
160 int (*chip_set_mem_win)(struct csio_hw *, uint32_t);
161 void (*chip_pcie_intr_handler)(struct csio_hw *);
162 uint32_t (*chip_flash_cfg_addr)(struct csio_hw *);
163 int (*chip_mc_read)(struct csio_hw *, int, uint32_t,
164 __be32 *, uint64_t *);
165 int (*chip_edc_read)(struct csio_hw *, int, uint32_t,
166 __be32 *, uint64_t *);
167 int (*chip_memory_rw)(struct csio_hw *, u32, int, u32,
168 u32, uint32_t *, int);
169 void (*chip_dfs_create_ext_mem)(struct csio_hw *);
170};
171
172extern struct csio_hw_chip_ops t4_ops;
173extern struct csio_hw_chip_ops t5_ops;
174
175#endif /* #ifndef __CSIO_HW_CHIP_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw_t4.c b/drivers/scsi/csiostor/csio_hw_t4.c
new file mode 100644
index 000000000000..89ecbac5478f
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw_t4.c
@@ -0,0 +1,403 @@
1/*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31
32#include "csio_hw.h"
33#include "csio_init.h"
34
35/*
36 * Return the specified PCI-E Configuration Space register from our Physical
37 * Function. We try first via a Firmware LDST Command since we prefer to let
38 * the firmware own all of these registers, but if that fails we go for it
39 * directly ourselves.
40 */
41static uint32_t
42csio_t4_read_pcie_cfg4(struct csio_hw *hw, int reg)
43{
44 u32 val = 0;
45 struct csio_mb *mbp;
46 int rv;
47 struct fw_ldst_cmd *ldst_cmd;
48
49 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
50 if (!mbp) {
51 CSIO_INC_STATS(hw, n_err_nomem);
52 pci_read_config_dword(hw->pdev, reg, &val);
53 return val;
54 }
55
56 csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg);
57 rv = csio_mb_issue(hw, mbp);
58
59 /*
60 * If the LDST Command suucceeded, exctract the returned register
61 * value. Otherwise read it directly ourself.
62 */
63 if (rv == 0) {
64 ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
65 val = ntohl(ldst_cmd->u.pcie.data[0]);
66 } else
67 pci_read_config_dword(hw->pdev, reg, &val);
68
69 mempool_free(mbp, hw->mb_mempool);
70
71 return val;
72}
73
74static int
75csio_t4_set_mem_win(struct csio_hw *hw, uint32_t win)
76{
77 u32 bar0;
78 u32 mem_win_base;
79
80 /*
81 * Truncation intentional: we only read the bottom 32-bits of the
82 * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
83 * read BAR0 instead of using pci_resource_start() because we could be
84 * operating from within a Virtual Machine which is trapping our
85 * accesses to our Configuration Space and we need to set up the PCI-E
86 * Memory Window decoders with the actual addresses which will be
87 * coming across the PCI-E link.
88 */
89 bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
90 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
91
92 mem_win_base = bar0 + MEMWIN_BASE;
93
94 /*
95 * Set up memory window for accessing adapter memory ranges. (Read
96 * back MA register to ensure that changes propagate before we attempt
97 * to use the new values.)
98 */
99 csio_wr_reg32(hw, mem_win_base | BIR(0) |
100 WINDOW(ilog2(MEMWIN_APERTURE) - 10),
101 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
102 csio_rd_reg32(hw,
103 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
104 return 0;
105}
106
107/*
108 * Interrupt handler for the PCIE module.
109 */
110static void
111csio_t4_pcie_intr_handler(struct csio_hw *hw)
112{
113 static struct intr_info sysbus_intr_info[] = {
114 { RNPP, "RXNP array parity error", -1, 1 },
115 { RPCP, "RXPC array parity error", -1, 1 },
116 { RCIP, "RXCIF array parity error", -1, 1 },
117 { RCCP, "Rx completions control array parity error", -1, 1 },
118 { RFTP, "RXFT array parity error", -1, 1 },
119 { 0, NULL, 0, 0 }
120 };
121 static struct intr_info pcie_port_intr_info[] = {
122 { TPCP, "TXPC array parity error", -1, 1 },
123 { TNPP, "TXNP array parity error", -1, 1 },
124 { TFTP, "TXFT array parity error", -1, 1 },
125 { TCAP, "TXCA array parity error", -1, 1 },
126 { TCIP, "TXCIF array parity error", -1, 1 },
127 { RCAP, "RXCA array parity error", -1, 1 },
128 { OTDD, "outbound request TLP discarded", -1, 1 },
129 { RDPE, "Rx data parity error", -1, 1 },
130 { TDUE, "Tx uncorrectable data error", -1, 1 },
131 { 0, NULL, 0, 0 }
132 };
133
134 static struct intr_info pcie_intr_info[] = {
135 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
136 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
137 { MSIDATAPERR, "MSI data parity error", -1, 1 },
138 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
139 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
140 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
141 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
142 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
143 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
144 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
145 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
146 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
147 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
148 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
149 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
150 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
151 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
152 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
153 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
154 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
155 { FIDPERR, "PCI FID parity error", -1, 1 },
156 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
157 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
158 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
159 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
160 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
161 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
162 { PCIESINT, "PCI core secondary fault", -1, 1 },
163 { PCIEPINT, "PCI core primary fault", -1, 1 },
164 { UNXSPLCPLERR, "PCI unexpected split completion error", -1,
165 0 },
166 { 0, NULL, 0, 0 }
167 };
168
169 int fat;
170 fat = csio_handle_intr_status(hw,
171 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
172 sysbus_intr_info) +
173 csio_handle_intr_status(hw,
174 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
175 pcie_port_intr_info) +
176 csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
177 if (fat)
178 csio_hw_fatal_err(hw);
179}
180
181/*
182 * csio_t4_flash_cfg_addr - return the address of the flash configuration file
183 * @hw: the HW module
184 *
185 * Return the address within the flash where the Firmware Configuration
186 * File is stored.
187 */
188static unsigned int
189csio_t4_flash_cfg_addr(struct csio_hw *hw)
190{
191 return FLASH_CFG_OFFSET;
192}
193
194/*
195 * csio_t4_mc_read - read from MC through backdoor accesses
196 * @hw: the hw module
197 * @idx: not used for T4 adapter
198 * @addr: address of first byte requested
199 * @data: 64 bytes of data containing the requested address
200 * @ecc: where to store the corresponding 64-bit ECC word
201 *
202 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
203 * that covers the requested address @addr. If @parity is not %NULL it
204 * is assigned the 64-bit ECC word for the read data.
205 */
206static int
207csio_t4_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
208 uint64_t *ecc)
209{
210 int i;
211
212 if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
213 return -EBUSY;
214 csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
215 csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
216 csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
217 csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
218 MC_BIST_CMD);
219 i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
220 0, 10, 1, NULL);
221 if (i)
222 return i;
223
224#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
225
226 for (i = 15; i >= 0; i--)
227 *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
228 if (ecc)
229 *ecc = csio_rd_reg64(hw, MC_DATA(16));
230#undef MC_DATA
231 return 0;
232}
233
234/*
235 * csio_t4_edc_read - read from EDC through backdoor accesses
236 * @hw: the hw module
237 * @idx: which EDC to access
238 * @addr: address of first byte requested
239 * @data: 64 bytes of data containing the requested address
240 * @ecc: where to store the corresponding 64-bit ECC word
241 *
242 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
243 * that covers the requested address @addr. If @parity is not %NULL it
244 * is assigned the 64-bit ECC word for the read data.
245 */
246static int
247csio_t4_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
248 uint64_t *ecc)
249{
250 int i;
251
252 idx *= EDC_STRIDE;
253 if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
254 return -EBUSY;
255 csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
256 csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
257 csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
258 csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
259 EDC_BIST_CMD + idx);
260 i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
261 0, 10, 1, NULL);
262 if (i)
263 return i;
264
265#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
266
267 for (i = 15; i >= 0; i--)
268 *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
269 if (ecc)
270 *ecc = csio_rd_reg64(hw, EDC_DATA(16));
271#undef EDC_DATA
272 return 0;
273}
274
275/*
276 * csio_t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
277 * @hw: the csio_hw
278 * @win: PCI-E memory Window to use
279 * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1
280 * @addr: address within indicated memory type
281 * @len: amount of memory to transfer
282 * @buf: host memory buffer
283 * @dir: direction of transfer 1 => read, 0 => write
284 *
285 * Reads/writes an [almost] arbitrary memory region in the firmware: the
286 * firmware memory address, length and host buffer must be aligned on
287 * 32-bit boudaries. The memory is transferred as a raw byte sequence
288 * from/to the firmware's memory. If this memory contains data
289 * structures which contain multi-byte integers, it's the callers
290 * responsibility to perform appropriate byte order conversions.
291 */
292static int
293csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
294 u32 len, uint32_t *buf, int dir)
295{
296 u32 pos, start, offset, memoffset, bar0;
297 u32 edc_size, mc_size, mem_reg, mem_aperture, mem_base;
298
299 /*
300 * Argument sanity checks ...
301 */
302 if ((addr & 0x3) || (len & 0x3))
303 return -EINVAL;
304
305 /* Offset into the region of memory which is being accessed
306 * MEM_EDC0 = 0
307 * MEM_EDC1 = 1
308 * MEM_MC = 2 -- T4
309 */
310 edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR));
311 if (mtype != MEM_MC1)
312 memoffset = (mtype * (edc_size * 1024 * 1024));
313 else {
314 mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw,
315 MA_EXT_MEMORY_BAR));
316 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
317 }
318
319 /* Determine the PCIE_MEM_ACCESS_OFFSET */
320 addr = addr + memoffset;
321
322 /*
323 * Each PCI-E Memory Window is programmed with a window size -- or
324 * "aperture" -- which controls the granularity of its mapping onto
325 * adapter memory. We need to grab that aperture in order to know
326 * how to use the specified window. The window is also programmed
327 * with the base address of the Memory Window in BAR0's address
328 * space. For T4 this is an absolute PCI-E Bus Address. For T5
329 * the address is relative to BAR0.
330 */
331 mem_reg = csio_rd_reg32(hw,
332 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
333 mem_aperture = 1 << (WINDOW(mem_reg) + 10);
334 mem_base = GET_PCIEOFST(mem_reg) << 10;
335
336 bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
337 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
338 mem_base -= bar0;
339
340 start = addr & ~(mem_aperture-1);
341 offset = addr - start;
342
343 csio_dbg(hw, "csio_t4_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
344 mem_reg, mem_aperture);
345 csio_dbg(hw, "csio_t4_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n",
346 mem_base, memoffset);
347 csio_dbg(hw, "csio_t4_memory_rw: bar0: 0x%x, start:0x%x, offset:0x%x\n",
348 bar0, start, offset);
349 csio_dbg(hw, "csio_t4_memory_rw: mtype: %d, addr: 0x%x, len: %d\n",
350 mtype, addr, len);
351
352 for (pos = start; len > 0; pos += mem_aperture, offset = 0) {
353 /*
354 * Move PCI-E Memory Window to our current transfer
355 * position. Read it back to ensure that changes propagate
356 * before we attempt to use the new value.
357 */
358 csio_wr_reg32(hw, pos,
359 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
360 csio_rd_reg32(hw,
361 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
362
363 while (offset < mem_aperture && len > 0) {
364 if (dir)
365 *buf++ = csio_rd_reg32(hw, mem_base + offset);
366 else
367 csio_wr_reg32(hw, *buf++, mem_base + offset);
368
369 offset += sizeof(__be32);
370 len -= sizeof(__be32);
371 }
372 }
373 return 0;
374}
375
376/*
377 * csio_t4_dfs_create_ext_mem - setup debugfs for MC to read the values
378 * @hw: the csio_hw
379 *
380 * This function creates files in the debugfs with external memory region MC.
381 */
382static void
383csio_t4_dfs_create_ext_mem(struct csio_hw *hw)
384{
385 u32 size;
386 int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
387 if (i & EXT_MEM_ENABLE) {
388 size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR);
389 csio_add_debugfs_mem(hw, "mc", MEM_MC,
390 EXT_MEM_SIZE_GET(size));
391 }
392}
393
394/* T4 adapter specific function */
395struct csio_hw_chip_ops t4_ops = {
396 .chip_set_mem_win = csio_t4_set_mem_win,
397 .chip_pcie_intr_handler = csio_t4_pcie_intr_handler,
398 .chip_flash_cfg_addr = csio_t4_flash_cfg_addr,
399 .chip_mc_read = csio_t4_mc_read,
400 .chip_edc_read = csio_t4_edc_read,
401 .chip_memory_rw = csio_t4_memory_rw,
402 .chip_dfs_create_ext_mem = csio_t4_dfs_create_ext_mem,
403};
diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c
new file mode 100644
index 000000000000..27745c170c24
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw_t5.c
@@ -0,0 +1,397 @@
1/*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "csio_hw.h"
35#include "csio_init.h"
36
37static int
38csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win)
39{
40 u32 mem_win_base;
41 /*
42 * Truncation intentional: we only read the bottom 32-bits of the
43 * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
44 * read BAR0 instead of using pci_resource_start() because we could be
45 * operating from within a Virtual Machine which is trapping our
46 * accesses to our Configuration Space and we need to set up the PCI-E
47 * Memory Window decoders with the actual addresses which will be
48 * coming across the PCI-E link.
49 */
50
51 /* For T5, only relative offset inside the PCIe BAR is passed */
52 mem_win_base = MEMWIN_BASE;
53
54 /*
55 * Set up memory window for accessing adapter memory ranges. (Read
56 * back MA register to ensure that changes propagate before we attempt
57 * to use the new values.)
58 */
59 csio_wr_reg32(hw, mem_win_base | BIR(0) |
60 WINDOW(ilog2(MEMWIN_APERTURE) - 10),
61 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
62 csio_rd_reg32(hw,
63 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
64
65 return 0;
66}
67
68/*
69 * Interrupt handler for the PCIE module.
70 */
71static void
72csio_t5_pcie_intr_handler(struct csio_hw *hw)
73{
74 static struct intr_info sysbus_intr_info[] = {
75 { RNPP, "RXNP array parity error", -1, 1 },
76 { RPCP, "RXPC array parity error", -1, 1 },
77 { RCIP, "RXCIF array parity error", -1, 1 },
78 { RCCP, "Rx completions control array parity error", -1, 1 },
79 { RFTP, "RXFT array parity error", -1, 1 },
80 { 0, NULL, 0, 0 }
81 };
82 static struct intr_info pcie_port_intr_info[] = {
83 { TPCP, "TXPC array parity error", -1, 1 },
84 { TNPP, "TXNP array parity error", -1, 1 },
85 { TFTP, "TXFT array parity error", -1, 1 },
86 { TCAP, "TXCA array parity error", -1, 1 },
87 { TCIP, "TXCIF array parity error", -1, 1 },
88 { RCAP, "RXCA array parity error", -1, 1 },
89 { OTDD, "outbound request TLP discarded", -1, 1 },
90 { RDPE, "Rx data parity error", -1, 1 },
91 { TDUE, "Tx uncorrectable data error", -1, 1 },
92 { 0, NULL, 0, 0 }
93 };
94
95 static struct intr_info pcie_intr_info[] = {
96 { MSTGRPPERR, "Master Response Read Queue parity error",
97 -1, 1 },
98 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
99 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
100 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
101 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
102 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
103 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
104 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
105 -1, 1 },
106 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
107 -1, 1 },
108 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
109 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
110 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
111 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
112 { DREQWRPERR, "PCI DMA channel write request parity error",
113 -1, 1 },
114 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
115 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
116 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
117 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
118 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
119 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
120 { FIDPERR, "PCI FID parity error", -1, 1 },
121 { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
122 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
123 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
124 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
125 -1, 1 },
126 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
127 -1, 1 },
128 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
129 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
130 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
131 { READRSPERR, "Outbound read error", -1, 0 },
132 { 0, NULL, 0, 0 }
133 };
134
135 int fat;
136 fat = csio_handle_intr_status(hw,
137 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
138 sysbus_intr_info) +
139 csio_handle_intr_status(hw,
140 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
141 pcie_port_intr_info) +
142 csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
143 if (fat)
144 csio_hw_fatal_err(hw);
145}
146
147/*
148 * csio_t5_flash_cfg_addr - return the address of the flash configuration file
149 * @hw: the HW module
150 *
151 * Return the address within the flash where the Firmware Configuration
152 * File is stored.
153 */
154static unsigned int
155csio_t5_flash_cfg_addr(struct csio_hw *hw)
156{
157 return FLASH_CFG_START;
158}
159
160/*
161 * csio_t5_mc_read - read from MC through backdoor accesses
162 * @hw: the hw module
163 * @idx: index to the register
164 * @addr: address of first byte requested
165 * @data: 64 bytes of data containing the requested address
166 * @ecc: where to store the corresponding 64-bit ECC word
167 *
168 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
169 * that covers the requested address @addr. If @parity is not %NULL it
170 * is assigned the 64-bit ECC word for the read data.
171 */
172static int
173csio_t5_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
174 uint64_t *ecc)
175{
176 int i;
177 uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
178 uint32_t mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
179
180 mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD, idx);
181 mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR, idx);
182 mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN, idx);
183 mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
184 mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
185
186 if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST)
187 return -EBUSY;
188 csio_wr_reg32(hw, addr & ~0x3fU, mc_bist_cmd_addr_reg);
189 csio_wr_reg32(hw, 64, mc_bist_cmd_len_reg);
190 csio_wr_reg32(hw, 0xc, mc_bist_data_pattern_reg);
191 csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
192 mc_bist_cmd_reg);
193 i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST,
194 0, 10, 1, NULL);
195 if (i)
196 return i;
197
198#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
199
200 for (i = 15; i >= 0; i--)
201 *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
202 if (ecc)
203 *ecc = csio_rd_reg64(hw, MC_DATA(16));
204#undef MC_DATA
205 return 0;
206}
207
208/*
209 * csio_t5_edc_read - read from EDC through backdoor accesses
210 * @hw: the hw module
211 * @idx: which EDC to access
212 * @addr: address of first byte requested
213 * @data: 64 bytes of data containing the requested address
214 * @ecc: where to store the corresponding 64-bit ECC word
215 *
216 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
217 * that covers the requested address @addr. If @parity is not %NULL it
218 * is assigned the 64-bit ECC word for the read data.
219 */
220static int
221csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
222 uint64_t *ecc)
223{
224 int i;
225 uint32_t edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
226 uint32_t edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
227
228/*
229 * These macro are missing in t4_regs.h file.
230 */
231#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
232#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
233
234 edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD, idx);
235 edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
236 edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
237 edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
238 edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
239#undef EDC_REG_T5
240#undef EDC_STRIDE_T5
241
242 if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST)
243 return -EBUSY;
244 csio_wr_reg32(hw, addr & ~0x3fU, edc_bist_cmd_addr_reg);
245 csio_wr_reg32(hw, 64, edc_bist_cmd_len_reg);
246 csio_wr_reg32(hw, 0xc, edc_bist_cmd_data_pattern);
247 csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
248 edc_bist_cmd_reg);
249 i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST,
250 0, 10, 1, NULL);
251 if (i)
252 return i;
253
254#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
255
256 for (i = 15; i >= 0; i--)
257 *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
258 if (ecc)
259 *ecc = csio_rd_reg64(hw, EDC_DATA(16));
260#undef EDC_DATA
261 return 0;
262}
263
264/*
265 * csio_t5_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
266 * @hw: the csio_hw
267 * @win: PCI-E memory Window to use
268 * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1
269 * @addr: address within indicated memory type
270 * @len: amount of memory to transfer
271 * @buf: host memory buffer
272 * @dir: direction of transfer 1 => read, 0 => write
273 *
274 * Reads/writes an [almost] arbitrary memory region in the firmware: the
275 * firmware memory address, length and host buffer must be aligned on
276 * 32-bit boudaries. The memory is transferred as a raw byte sequence
277 * from/to the firmware's memory. If this memory contains data
278 * structures which contain multi-byte integers, it's the callers
279 * responsibility to perform appropriate byte order conversions.
280 */
281static int
282csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
283 u32 len, uint32_t *buf, int dir)
284{
285 u32 pos, start, offset, memoffset;
286 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
287
288 /*
289 * Argument sanity checks ...
290 */
291 if ((addr & 0x3) || (len & 0x3))
292 return -EINVAL;
293
294 /* Offset into the region of memory which is being accessed
295 * MEM_EDC0 = 0
296 * MEM_EDC1 = 1
297 * MEM_MC = 2 -- T4
298 * MEM_MC0 = 2 -- For T5
299 * MEM_MC1 = 3 -- For T5
300 */
301 edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR));
302 if (mtype != MEM_MC1)
303 memoffset = (mtype * (edc_size * 1024 * 1024));
304 else {
305 mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw,
306 MA_EXT_MEMORY_BAR));
307 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
308 }
309
310 /* Determine the PCIE_MEM_ACCESS_OFFSET */
311 addr = addr + memoffset;
312
313 /*
314 * Each PCI-E Memory Window is programmed with a window size -- or
315 * "aperture" -- which controls the granularity of its mapping onto
316 * adapter memory. We need to grab that aperture in order to know
317 * how to use the specified window. The window is also programmed
318 * with the base address of the Memory Window in BAR0's address
319 * space. For T4 this is an absolute PCI-E Bus Address. For T5
320 * the address is relative to BAR0.
321 */
322 mem_reg = csio_rd_reg32(hw,
323 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
324 mem_aperture = 1 << (WINDOW(mem_reg) + 10);
325 mem_base = GET_PCIEOFST(mem_reg) << 10;
326
327 start = addr & ~(mem_aperture-1);
328 offset = addr - start;
329 win_pf = V_PFNUM(hw->pfn);
330
331 csio_dbg(hw, "csio_t5_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
332 mem_reg, mem_aperture);
333 csio_dbg(hw, "csio_t5_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n",
334 mem_base, memoffset);
335 csio_dbg(hw, "csio_t5_memory_rw: start:0x%x, offset:0x%x, win_pf:%d\n",
336 start, offset, win_pf);
337 csio_dbg(hw, "csio_t5_memory_rw: mtype: %d, addr: 0x%x, len: %d\n",
338 mtype, addr, len);
339
340 for (pos = start; len > 0; pos += mem_aperture, offset = 0) {
341 /*
342 * Move PCI-E Memory Window to our current transfer
343 * position. Read it back to ensure that changes propagate
344 * before we attempt to use the new value.
345 */
346 csio_wr_reg32(hw, pos | win_pf,
347 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
348 csio_rd_reg32(hw,
349 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
350
351 while (offset < mem_aperture && len > 0) {
352 if (dir)
353 *buf++ = csio_rd_reg32(hw, mem_base + offset);
354 else
355 csio_wr_reg32(hw, *buf++, mem_base + offset);
356
357 offset += sizeof(__be32);
358 len -= sizeof(__be32);
359 }
360 }
361 return 0;
362}
363
364/*
365 * csio_t5_dfs_create_ext_mem - setup debugfs for MC0 or MC1 to read the values
366 * @hw: the csio_hw
367 *
368 * This function creates files in the debugfs with external memory region
369 * MC0 & MC1.
370 */
371static void
372csio_t5_dfs_create_ext_mem(struct csio_hw *hw)
373{
374 u32 size;
375 int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
376 if (i & EXT_MEM_ENABLE) {
377 size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR);
378 csio_add_debugfs_mem(hw, "mc0", MEM_MC0,
379 EXT_MEM_SIZE_GET(size));
380 }
381 if (i & EXT_MEM1_ENABLE) {
382 size = csio_rd_reg32(hw, MA_EXT_MEMORY1_BAR);
383 csio_add_debugfs_mem(hw, "mc1", MEM_MC1,
384 EXT_MEM_SIZE_GET(size));
385 }
386}
387
388/* T5 adapter specific function */
389struct csio_hw_chip_ops t5_ops = {
390 .chip_set_mem_win = csio_t5_set_mem_win,
391 .chip_pcie_intr_handler = csio_t5_pcie_intr_handler,
392 .chip_flash_cfg_addr = csio_t5_flash_cfg_addr,
393 .chip_mc_read = csio_t5_mc_read,
394 .chip_edc_read = csio_t5_edc_read,
395 .chip_memory_rw = csio_t5_memory_rw,
396 .chip_dfs_create_ext_mem = csio_t5_dfs_create_ext_mem,
397};
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 0604b5ff3638..00346fe939d5 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -81,9 +81,11 @@ csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
81 __be32 data[16]; 81 __be32 data[16];
82 82
83 if (mem == MEM_MC) 83 if (mem == MEM_MC)
84 ret = csio_hw_mc_read(hw, pos, data, NULL); 84 ret = hw->chip_ops->chip_mc_read(hw, 0, pos,
85 data, NULL);
85 else 86 else
86 ret = csio_hw_edc_read(hw, mem, pos, data, NULL); 87 ret = hw->chip_ops->chip_edc_read(hw, mem, pos,
88 data, NULL);
87 if (ret) 89 if (ret)
88 return ret; 90 return ret;
89 91
@@ -108,7 +110,7 @@ static const struct file_operations csio_mem_debugfs_fops = {
108 .llseek = default_llseek, 110 .llseek = default_llseek,
109}; 111};
110 112
111static void csio_add_debugfs_mem(struct csio_hw *hw, const char *name, 113void csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
112 unsigned int idx, unsigned int size_mb) 114 unsigned int idx, unsigned int size_mb)
113{ 115{
114 struct dentry *de; 116 struct dentry *de;
@@ -131,9 +133,8 @@ static int csio_setup_debugfs(struct csio_hw *hw)
131 csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5); 133 csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
132 if (i & EDRAM1_ENABLE) 134 if (i & EDRAM1_ENABLE)
133 csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5); 135 csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
134 if (i & EXT_MEM_ENABLE) 136
135 csio_add_debugfs_mem(hw, "mc", MEM_MC, 137 hw->chip_ops->chip_dfs_create_ext_mem(hw);
136 EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR)));
137 return 0; 138 return 0;
138} 139}
139 140
@@ -1169,7 +1170,7 @@ static struct pci_error_handlers csio_err_handler = {
1169}; 1170};
1170 1171
1171static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = { 1172static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
1172 CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T440DBG FCOE */ 1173 CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T4 DEBUG FCOE */
1173 CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */ 1174 CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */
1174 CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */ 1175 CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */
1175 CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */ 1176 CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */
@@ -1184,8 +1185,34 @@ static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
1184 CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */ 1185 CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */
1185 CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */ 1186 CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */
1186 CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */ 1187 CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */
1187 CSIO_DEVICE(CSIO_DEVID_PE10K, 0), /* PE10K FCOE */ 1188 CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T4_FCOE, 0), /* AMSTERDAM T4 FCOE */
1188 CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0), /* PE10K FCOE on PF1 */ 1189 CSIO_DEVICE(CSIO_DEVID_HUAWEI_T480_FCOE, 0), /* HUAWEI T480 FCOE */
1190 CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_FCOE, 0), /* HUAWEI T440 FCOE */
1191 CSIO_DEVICE(CSIO_DEVID_HUAWEI_STG310_FCOE, 0), /* HUAWEI STG FCOE */
1192 CSIO_DEVICE(CSIO_DEVID_ACROMAG_XMC_XAUI, 0), /* ACROMAG XAUI FCOE */
1193 CSIO_DEVICE(CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE, 0),/* QUANTA MEZZ FCOE */
1194 CSIO_DEVICE(CSIO_DEVID_HUAWEI_10GT_FCOE, 0), /* HUAWEI 10GT FCOE */
1195 CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_TOE_FCOE, 0),/* HUAWEI T4 TOE FCOE */
1196 CSIO_DEVICE(CSIO_DEVID_T580DBG_FCOE, 0), /* T5 DEBUG FCOE */
1197 CSIO_DEVICE(CSIO_DEVID_T520CR_FCOE, 0), /* T520CR FCOE */
1198 CSIO_DEVICE(CSIO_DEVID_T522CR_FCOE, 0), /* T522CR FCOE */
1199 CSIO_DEVICE(CSIO_DEVID_T540CR_FCOE, 0), /* T540CR FCOE */
1200 CSIO_DEVICE(CSIO_DEVID_T520BCH_FCOE, 0), /* T520BCH FCOE */
1201 CSIO_DEVICE(CSIO_DEVID_T540BCH_FCOE, 0), /* T540BCH FCOE */
1202 CSIO_DEVICE(CSIO_DEVID_T540CH_FCOE, 0), /* T540CH FCOE */
1203 CSIO_DEVICE(CSIO_DEVID_T520SO_FCOE, 0), /* T520SO FCOE */
1204 CSIO_DEVICE(CSIO_DEVID_T520CX_FCOE, 0), /* T520CX FCOE */
1205 CSIO_DEVICE(CSIO_DEVID_T520BT_FCOE, 0), /* T520BT FCOE */
1206 CSIO_DEVICE(CSIO_DEVID_T504BT_FCOE, 0), /* T504BT FCOE */
1207 CSIO_DEVICE(CSIO_DEVID_B520_FCOE, 0), /* B520 FCOE */
1208 CSIO_DEVICE(CSIO_DEVID_B504_FCOE, 0), /* B504 FCOE */
1209 CSIO_DEVICE(CSIO_DEVID_T580CR2_FCOE, 0), /* T580 CR FCOE */
1210 CSIO_DEVICE(CSIO_DEVID_T540LPCR_FCOE, 0), /* T540 LP-CR FCOE */
1211 CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T5_FCOE, 0), /* AMSTERDAM T5 FCOE */
1212 CSIO_DEVICE(CSIO_DEVID_T580LPCR_FCOE, 0), /* T580 LP-CR FCOE */
1213 CSIO_DEVICE(CSIO_DEVID_T520LLCR_FCOE, 0), /* T520 LL-CR FCOE */
1214 CSIO_DEVICE(CSIO_DEVID_T560CR_FCOE, 0), /* T560 CR FCOE */
1215 CSIO_DEVICE(CSIO_DEVID_T580CR_FCOE, 0), /* T580 CR FCOE */
1189 { 0, 0, 0, 0, 0, 0, 0 } 1216 { 0, 0, 0, 0, 0, 0, 0 }
1190}; 1217};
1191 1218
@@ -1259,4 +1286,5 @@ MODULE_DESCRIPTION(CSIO_DRV_DESC);
1259MODULE_LICENSE(CSIO_DRV_LICENSE); 1286MODULE_LICENSE(CSIO_DRV_LICENSE);
1260MODULE_DEVICE_TABLE(pci, csio_pci_tbl); 1287MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
1261MODULE_VERSION(CSIO_DRV_VERSION); 1288MODULE_VERSION(CSIO_DRV_VERSION);
1262MODULE_FIRMWARE(CSIO_FW_FNAME); 1289MODULE_FIRMWARE(FW_FNAME_T4);
1290MODULE_FIRMWARE(FW_FNAME_T5);
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
index 0838fd7ec9c7..5cc5d317a442 100644
--- a/drivers/scsi/csiostor/csio_init.h
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -52,31 +52,6 @@
52#define CSIO_DRV_DESC "Chelsio FCoE driver" 52#define CSIO_DRV_DESC "Chelsio FCoE driver"
53#define CSIO_DRV_VERSION "1.0.0" 53#define CSIO_DRV_VERSION "1.0.0"
54 54
55#define CSIO_DEVICE(devid, idx) \
56{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
57
58#define CSIO_IS_T4_FPGA(_dev) (((_dev) == CSIO_DEVID_PE10K) ||\
59 ((_dev) == CSIO_DEVID_PE10K_PF1))
60
61/* FCoE device IDs */
62#define CSIO_DEVID_PE10K 0xA000
63#define CSIO_DEVID_PE10K_PF1 0xA001
64#define CSIO_DEVID_T440DBG_FCOE 0x4600
65#define CSIO_DEVID_T420CR_FCOE 0x4601
66#define CSIO_DEVID_T422CR_FCOE 0x4602
67#define CSIO_DEVID_T440CR_FCOE 0x4603
68#define CSIO_DEVID_T420BCH_FCOE 0x4604
69#define CSIO_DEVID_T440BCH_FCOE 0x4605
70#define CSIO_DEVID_T440CH_FCOE 0x4606
71#define CSIO_DEVID_T420SO_FCOE 0x4607
72#define CSIO_DEVID_T420CX_FCOE 0x4608
73#define CSIO_DEVID_T420BT_FCOE 0x4609
74#define CSIO_DEVID_T404BT_FCOE 0x460A
75#define CSIO_DEVID_B420_FCOE 0x460B
76#define CSIO_DEVID_B404_FCOE 0x460C
77#define CSIO_DEVID_T480CR_FCOE 0x460D
78#define CSIO_DEVID_T440LPCR_FCOE 0x460E
79
80extern struct fc_function_template csio_fc_transport_funcs; 55extern struct fc_function_template csio_fc_transport_funcs;
81extern struct fc_function_template csio_fc_transport_vport_funcs; 56extern struct fc_function_template csio_fc_transport_vport_funcs;
82 57
@@ -100,6 +75,10 @@ struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool,
100void csio_shost_exit(struct csio_lnode *); 75void csio_shost_exit(struct csio_lnode *);
101void csio_lnodes_exit(struct csio_hw *, bool); 76void csio_lnodes_exit(struct csio_hw *, bool);
102 77
78/* DebugFS helper routines */
79void csio_add_debugfs_mem(struct csio_hw *, const char *,
80 unsigned int, unsigned int);
81
103static inline struct Scsi_Host * 82static inline struct Scsi_Host *
104csio_ln_to_shost(struct csio_lnode *ln) 83csio_ln_to_shost(struct csio_lnode *ln)
105{ 84{
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
index 8d84988ab06d..0f9c04175b11 100644
--- a/drivers/scsi/csiostor/csio_lnode.h
+++ b/drivers/scsi/csiostor/csio_lnode.h
@@ -114,7 +114,7 @@ struct csio_lnode_stats {
114 uint32_t n_rnode_match; /* matched rnode */ 114 uint32_t n_rnode_match; /* matched rnode */
115 uint32_t n_dev_loss_tmo; /* Device loss timeout */ 115 uint32_t n_dev_loss_tmo; /* Device loss timeout */
116 uint32_t n_fdmi_err; /* fdmi err */ 116 uint32_t n_fdmi_err; /* fdmi err */
117 uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */ 117 uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */
118 enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */ 118 enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */
119 uint32_t n_rnode_alloc; /* rnode allocated */ 119 uint32_t n_rnode_alloc; /* rnode allocated */
120 uint32_t n_rnode_free; /* rnode freed */ 120 uint32_t n_rnode_free; /* rnode freed */
diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c
index 51c6a388de2b..e9c3b045f587 100644
--- a/drivers/scsi/csiostor/csio_rnode.c
+++ b/drivers/scsi/csiostor/csio_rnode.c
@@ -302,7 +302,7 @@ csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
302{ 302{
303 uint8_t rport_type; 303 uint8_t rport_type;
304 struct csio_rnode *rn, *match_rn; 304 struct csio_rnode *rn, *match_rn;
305 uint32_t vnp_flowid; 305 uint32_t vnp_flowid = 0;
306 __be32 *port_id; 306 __be32 *port_id;
307 307
308 port_id = (__be32 *)&rdevp->r_id[0]; 308 port_id = (__be32 *)&rdevp->r_id[0];
@@ -350,6 +350,14 @@ csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
350 * Else, go ahead and alloc a new rnode. 350 * Else, go ahead and alloc a new rnode.
351 */ 351 */
352 if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) { 352 if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
353 if (rn == match_rn)
354 goto found_rnode;
355 csio_ln_dbg(ln,
356 "nport_id:x%x and wwpn:%llx"
357 " match for ssni:x%x\n",
358 rn->nport_id,
359 wwn_to_u64(rdevp->wwpn),
360 rdev_flowid);
353 if (csio_is_rnode_ready(rn)) { 361 if (csio_is_rnode_ready(rn)) {
354 csio_ln_warn(ln, 362 csio_ln_warn(ln,
355 "rnode is already" 363 "rnode is already"
diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h
index a3b434c801da..65940096a80d 100644
--- a/drivers/scsi/csiostor/csio_rnode.h
+++ b/drivers/scsi/csiostor/csio_rnode.h
@@ -63,7 +63,7 @@ struct csio_rnode_stats {
63 uint32_t n_err_nomem; /* error nomem */ 63 uint32_t n_err_nomem; /* error nomem */
64 uint32_t n_evt_unexp; /* unexpected event */ 64 uint32_t n_evt_unexp; /* unexpected event */
65 uint32_t n_evt_drop; /* unexpected event */ 65 uint32_t n_evt_drop; /* unexpected event */
66 uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */ 66 uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */
67 enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */ 67 enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */
68 uint32_t n_lun_rst; /* Number of resets of 68 uint32_t n_lun_rst; /* Number of resets of
69 * of LUNs under this 69 * of LUNs under this
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index c32df1bdaa97..4255ce264abf 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -85,8 +85,8 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
85 */ 85 */
86 if (flq->inc_idx >= 8) { 86 if (flq->inc_idx >= 8) {
87 csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) | 87 csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
88 PIDX(flq->inc_idx / 8), 88 CSIO_HW_PIDX(hw, flq->inc_idx / 8),
89 MYPF_REG(SGE_PF_KDOORBELL)); 89 MYPF_REG(SGE_PF_KDOORBELL));
90 flq->inc_idx &= 7; 90 flq->inc_idx &= 7;
91 } 91 }
92} 92}
@@ -989,7 +989,8 @@ csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
989 wmb(); 989 wmb();
990 /* Ring SGE Doorbell writing q->pidx into it */ 990 /* Ring SGE Doorbell writing q->pidx into it */
991 csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) | 991 csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
992 PIDX(q->inc_idx), MYPF_REG(SGE_PF_KDOORBELL)); 992 CSIO_HW_PIDX(hw, q->inc_idx),
993 MYPF_REG(SGE_PF_KDOORBELL));
993 q->inc_idx = 0; 994 q->inc_idx = 0;
994 995
995 return 0; 996 return 0;
@@ -1331,20 +1332,30 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
1331 1332
1332 /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */ 1333 /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
1333 csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0); 1334 csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
1334 csio_wr_reg32(hw, 1335
1335 (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) + 1336 /*
1336 sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), 1337 * If using hard params, the following will get set correctly
1337 SGE_FL_BUFFER_SIZE2); 1338 * in csio_wr_set_sge().
1338 csio_wr_reg32(hw, 1339 */
1339 (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) + 1340 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
1340 sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), 1341 csio_wr_reg32(hw,
1341 SGE_FL_BUFFER_SIZE3); 1342 (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
1343 sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
1344 SGE_FL_BUFFER_SIZE2);
1345 csio_wr_reg32(hw,
1346 (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
1347 sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
1348 SGE_FL_BUFFER_SIZE3);
1349 }
1342 1350
1343 csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ); 1351 csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
1344 1352
1345 /* default value of rx_dma_offset of the NIC driver */ 1353 /* default value of rx_dma_offset of the NIC driver */
1346 csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK, 1354 csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
1347 PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET)); 1355 PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
1356
1357 csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG,
1358 CSUM_HAS_PSEUDO_HDR, 0);
1348} 1359}
1349 1360
1350static void 1361static void
@@ -1460,18 +1471,21 @@ csio_wr_set_sge(struct csio_hw *hw)
1460 * and generate an interrupt when this occurs so we can recover. 1471 * and generate an interrupt when this occurs so we can recover.
1461 */ 1472 */
1462 csio_set_reg_field(hw, SGE_DBFIFO_STATUS, 1473 csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
1463 HP_INT_THRESH(HP_INT_THRESH_MASK) | 1474 HP_INT_THRESH(HP_INT_THRESH_MASK) |
1464 LP_INT_THRESH(LP_INT_THRESH_MASK), 1475 CSIO_HW_LP_INT_THRESH(hw, CSIO_HW_M_LP_INT_THRESH(hw)),
1465 HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) | 1476 HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
1466 LP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH)); 1477 CSIO_HW_LP_INT_THRESH(hw, CSIO_SGE_DBFIFO_INT_THRESH));
1478
1467 csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP, 1479 csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
1468 ENABLE_DROP); 1480 ENABLE_DROP);
1469 1481
1470 /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */ 1482 /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
1471 1483
1472 CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1); 1484 CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
1473 CSIO_SET_FLBUF_SIZE(hw, 2, CSIO_SGE_FLBUF_SIZE2); 1485 csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
1474 CSIO_SET_FLBUF_SIZE(hw, 3, CSIO_SGE_FLBUF_SIZE3); 1486 & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2);
1487 csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
1488 & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3);
1475 CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4); 1489 CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
1476 CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5); 1490 CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
1477 CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6); 1491 CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
@@ -1522,22 +1536,24 @@ void
1522csio_wr_sge_init(struct csio_hw *hw) 1536csio_wr_sge_init(struct csio_hw *hw)
1523{ 1537{
1524 /* 1538 /*
1525 * If we are master: 1539 * If we are master and chip is not initialized:
1526 * - If we plan to use the config file, we need to fixup some 1540 * - If we plan to use the config file, we need to fixup some
1527 * host specific registers, and read the rest of the SGE 1541 * host specific registers, and read the rest of the SGE
1528 * configuration. 1542 * configuration.
1529 * - If we dont plan to use the config file, we need to initialize 1543 * - If we dont plan to use the config file, we need to initialize
1530 * SGE entirely, including fixing the host specific registers. 1544 * SGE entirely, including fixing the host specific registers.
1545 * If we are master and chip is initialized, just read and work off of
1546 * the already initialized SGE values.
1531 * If we arent the master, we are only allowed to read and work off of 1547 * If we arent the master, we are only allowed to read and work off of
1532 * the already initialized SGE values. 1548 * the already initialized SGE values.
1533 * 1549 *
1534 * Therefore, before calling this function, we assume that the master- 1550 * Therefore, before calling this function, we assume that the master-
1535 * ship of the card, and whether to use config file or not, have 1551 * ship of the card, state and whether to use config file or not, have
1536 * already been decided. In other words, CSIO_HWF_USING_SOFT_PARAMS and 1552 * already been decided.
1537 * CSIO_HWF_MASTER should be set/unset.
1538 */ 1553 */
1539 if (csio_is_hw_master(hw)) { 1554 if (csio_is_hw_master(hw)) {
1540 csio_wr_fixup_host_params(hw); 1555 if (hw->fw_state != CSIO_DEV_STATE_INIT)
1556 csio_wr_fixup_host_params(hw);
1541 1557
1542 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) 1558 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
1543 csio_wr_get_sge(hw); 1559 csio_wr_get_sge(hw);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 9bfdc9a3f897..292b24f9bf93 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1655,7 +1655,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1655 skb->priority = fcoe->priority; 1655 skb->priority = fcoe->priority;
1656 1656
1657 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && 1657 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
1658 fcoe->realdev->features & NETIF_F_HW_VLAN_TX) { 1658 fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) {
1659 skb->vlan_tci = VLAN_TAG_PRESENT | 1659 skb->vlan_tci = VLAN_TAG_PRESENT |
1660 vlan_dev_vlan_id(fcoe->netdev); 1660 vlan_dev_vlan_id(fcoe->netdev);
1661 skb->dev = fcoe->realdev; 1661 skb->dev = fcoe->realdev;
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 65123a21b97e..fe30ea94ffe6 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -50,7 +50,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
50 u32 rlen; 50 u32 rlen;
51 int err, tport; 51 int err, tport;
52 52
53 while (skb->len >= NLMSG_SPACE(0)) { 53 while (skb->len >= NLMSG_HDRLEN) {
54 err = 0; 54 err = 0;
55 55
56 nlh = nlmsg_hdr(skb); 56 nlh = nlmsg_hdr(skb);
@@ -70,7 +70,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
70 goto next_msg; 70 goto next_msg;
71 } 71 }
72 72
73 hdr = NLMSG_DATA(nlh); 73 hdr = nlmsg_data(nlh);
74 if ((hdr->version != SCSI_NL_VERSION) || 74 if ((hdr->version != SCSI_NL_VERSION) ||
75 (hdr->magic != SCSI_NL_MAGIC)) { 75 (hdr->magic != SCSI_NL_MAGIC)) {
76 err = -EPROTOTYPE; 76 err = -EPROTOTYPE;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index e894ca7b54c0..e106c276aa00 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -35,7 +35,6 @@
35#include <scsi/scsi_transport.h> 35#include <scsi/scsi_transport.h>
36#include <scsi/scsi_transport_fc.h> 36#include <scsi/scsi_transport_fc.h>
37#include <scsi/scsi_cmnd.h> 37#include <scsi/scsi_cmnd.h>
38#include <linux/netlink.h>
39#include <net/netlink.h> 38#include <net/netlink.h>
40#include <scsi/scsi_netlink_fc.h> 39#include <scsi/scsi_netlink_fc.h>
41#include <scsi/scsi_bsg_fc.h> 40#include <scsi/scsi_bsg_fc.h>
@@ -534,7 +533,7 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
534 struct nlmsghdr *nlh; 533 struct nlmsghdr *nlh;
535 struct fc_nl_event *event; 534 struct fc_nl_event *event;
536 const char *name; 535 const char *name;
537 u32 len, skblen; 536 u32 len;
538 int err; 537 int err;
539 538
540 if (!scsi_nl_sock) { 539 if (!scsi_nl_sock) {
@@ -543,21 +542,19 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
543 } 542 }
544 543
545 len = FC_NL_MSGALIGN(sizeof(*event)); 544 len = FC_NL_MSGALIGN(sizeof(*event));
546 skblen = NLMSG_SPACE(len);
547 545
548 skb = alloc_skb(skblen, GFP_KERNEL); 546 skb = nlmsg_new(len, GFP_KERNEL);
549 if (!skb) { 547 if (!skb) {
550 err = -ENOBUFS; 548 err = -ENOBUFS;
551 goto send_fail; 549 goto send_fail;
552 } 550 }
553 551
554 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, 552 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
555 skblen - sizeof(*nlh), 0);
556 if (!nlh) { 553 if (!nlh) {
557 err = -ENOBUFS; 554 err = -ENOBUFS;
558 goto send_fail_skb; 555 goto send_fail_skb;
559 } 556 }
560 event = NLMSG_DATA(nlh); 557 event = nlmsg_data(nlh);
561 558
562 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, 559 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
563 FC_NL_ASYNC_EVENT, len); 560 FC_NL_ASYNC_EVENT, len);
@@ -604,7 +601,7 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
604 struct sk_buff *skb; 601 struct sk_buff *skb;
605 struct nlmsghdr *nlh; 602 struct nlmsghdr *nlh;
606 struct fc_nl_event *event; 603 struct fc_nl_event *event;
607 u32 len, skblen; 604 u32 len;
608 int err; 605 int err;
609 606
610 if (!scsi_nl_sock) { 607 if (!scsi_nl_sock) {
@@ -613,21 +610,19 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
613 } 610 }
614 611
615 len = FC_NL_MSGALIGN(sizeof(*event) + data_len); 612 len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
616 skblen = NLMSG_SPACE(len);
617 613
618 skb = alloc_skb(skblen, GFP_KERNEL); 614 skb = nlmsg_new(len, GFP_KERNEL);
619 if (!skb) { 615 if (!skb) {
620 err = -ENOBUFS; 616 err = -ENOBUFS;
621 goto send_vendor_fail; 617 goto send_vendor_fail;
622 } 618 }
623 619
624 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, 620 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
625 skblen - sizeof(*nlh), 0);
626 if (!nlh) { 621 if (!nlh) {
627 err = -ENOBUFS; 622 err = -ENOBUFS;
628 goto send_vendor_fail_skb; 623 goto send_vendor_fail_skb;
629 } 624 }
630 event = NLMSG_DATA(nlh); 625 event = nlmsg_data(nlh);
631 626
632 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, 627 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
633 FC_NL_ASYNC_EVENT, len); 628 FC_NL_ASYNC_EVENT, len);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index ce06e8772f3a..47799a33d6ca 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2028,8 +2028,8 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
2028 struct iscsi_uevent *ev; 2028 struct iscsi_uevent *ev;
2029 char *pdu; 2029 char *pdu;
2030 struct iscsi_internal *priv; 2030 struct iscsi_internal *priv;
2031 int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) + 2031 int len = nlmsg_total_size(sizeof(*ev) + sizeof(struct iscsi_hdr) +
2032 data_size); 2032 data_size);
2033 2033
2034 priv = iscsi_if_transport_lookup(conn->transport); 2034 priv = iscsi_if_transport_lookup(conn->transport);
2035 if (!priv) 2035 if (!priv)
@@ -2044,7 +2044,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
2044 } 2044 }
2045 2045
2046 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); 2046 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
2047 ev = NLMSG_DATA(nlh); 2047 ev = nlmsg_data(nlh);
2048 memset(ev, 0, sizeof(*ev)); 2048 memset(ev, 0, sizeof(*ev));
2049 ev->transport_handle = iscsi_handle(conn->transport); 2049 ev->transport_handle = iscsi_handle(conn->transport);
2050 ev->type = ISCSI_KEVENT_RECV_PDU; 2050 ev->type = ISCSI_KEVENT_RECV_PDU;
@@ -2065,7 +2065,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
2065 struct nlmsghdr *nlh; 2065 struct nlmsghdr *nlh;
2066 struct sk_buff *skb; 2066 struct sk_buff *skb;
2067 struct iscsi_uevent *ev; 2067 struct iscsi_uevent *ev;
2068 int len = NLMSG_SPACE(sizeof(*ev) + data_size); 2068 int len = nlmsg_total_size(sizeof(*ev) + data_size);
2069 2069
2070 skb = alloc_skb(len, GFP_ATOMIC); 2070 skb = alloc_skb(len, GFP_ATOMIC);
2071 if (!skb) { 2071 if (!skb) {
@@ -2074,7 +2074,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
2074 } 2074 }
2075 2075
2076 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); 2076 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
2077 ev = NLMSG_DATA(nlh); 2077 ev = nlmsg_data(nlh);
2078 memset(ev, 0, sizeof(*ev)); 2078 memset(ev, 0, sizeof(*ev));
2079 ev->type = type; 2079 ev->type = type;
2080 ev->transport_handle = iscsi_handle(transport); 2080 ev->transport_handle = iscsi_handle(transport);
@@ -2099,7 +2099,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
2099 struct sk_buff *skb; 2099 struct sk_buff *skb;
2100 struct iscsi_uevent *ev; 2100 struct iscsi_uevent *ev;
2101 struct iscsi_internal *priv; 2101 struct iscsi_internal *priv;
2102 int len = NLMSG_SPACE(sizeof(*ev)); 2102 int len = nlmsg_total_size(sizeof(*ev));
2103 2103
2104 priv = iscsi_if_transport_lookup(conn->transport); 2104 priv = iscsi_if_transport_lookup(conn->transport);
2105 if (!priv) 2105 if (!priv)
@@ -2113,7 +2113,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
2113 } 2113 }
2114 2114
2115 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); 2115 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
2116 ev = NLMSG_DATA(nlh); 2116 ev = nlmsg_data(nlh);
2117 ev->transport_handle = iscsi_handle(conn->transport); 2117 ev->transport_handle = iscsi_handle(conn->transport);
2118 ev->type = ISCSI_KEVENT_CONN_ERROR; 2118 ev->type = ISCSI_KEVENT_CONN_ERROR;
2119 ev->r.connerror.error = error; 2119 ev->r.connerror.error = error;
@@ -2134,7 +2134,7 @@ void iscsi_conn_login_event(struct iscsi_cls_conn *conn,
2134 struct sk_buff *skb; 2134 struct sk_buff *skb;
2135 struct iscsi_uevent *ev; 2135 struct iscsi_uevent *ev;
2136 struct iscsi_internal *priv; 2136 struct iscsi_internal *priv;
2137 int len = NLMSG_SPACE(sizeof(*ev)); 2137 int len = nlmsg_total_size(sizeof(*ev));
2138 2138
2139 priv = iscsi_if_transport_lookup(conn->transport); 2139 priv = iscsi_if_transport_lookup(conn->transport);
2140 if (!priv) 2140 if (!priv)
@@ -2148,7 +2148,7 @@ void iscsi_conn_login_event(struct iscsi_cls_conn *conn,
2148 } 2148 }
2149 2149
2150 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); 2150 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
2151 ev = NLMSG_DATA(nlh); 2151 ev = nlmsg_data(nlh);
2152 ev->transport_handle = iscsi_handle(conn->transport); 2152 ev->transport_handle = iscsi_handle(conn->transport);
2153 ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE; 2153 ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE;
2154 ev->r.conn_login.state = state; 2154 ev->r.conn_login.state = state;
@@ -2168,7 +2168,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
2168 struct nlmsghdr *nlh; 2168 struct nlmsghdr *nlh;
2169 struct sk_buff *skb; 2169 struct sk_buff *skb;
2170 struct iscsi_uevent *ev; 2170 struct iscsi_uevent *ev;
2171 int len = NLMSG_SPACE(sizeof(*ev) + data_size); 2171 int len = nlmsg_total_size(sizeof(*ev) + data_size);
2172 2172
2173 skb = alloc_skb(len, GFP_NOIO); 2173 skb = alloc_skb(len, GFP_NOIO);
2174 if (!skb) { 2174 if (!skb) {
@@ -2178,7 +2178,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
2178 } 2178 }
2179 2179
2180 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); 2180 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
2181 ev = NLMSG_DATA(nlh); 2181 ev = nlmsg_data(nlh);
2182 ev->transport_handle = iscsi_handle(transport); 2182 ev->transport_handle = iscsi_handle(transport);
2183 ev->type = ISCSI_KEVENT_HOST_EVENT; 2183 ev->type = ISCSI_KEVENT_HOST_EVENT;
2184 ev->r.host_event.host_no = host_no; 2184 ev->r.host_event.host_no = host_no;
@@ -2199,7 +2199,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
2199 struct nlmsghdr *nlh; 2199 struct nlmsghdr *nlh;
2200 struct sk_buff *skb; 2200 struct sk_buff *skb;
2201 struct iscsi_uevent *ev; 2201 struct iscsi_uevent *ev;
2202 int len = NLMSG_SPACE(sizeof(*ev) + data_size); 2202 int len = nlmsg_total_size(sizeof(*ev) + data_size);
2203 2203
2204 skb = alloc_skb(len, GFP_NOIO); 2204 skb = alloc_skb(len, GFP_NOIO);
2205 if (!skb) { 2205 if (!skb) {
@@ -2208,7 +2208,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
2208 } 2208 }
2209 2209
2210 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); 2210 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
2211 ev = NLMSG_DATA(nlh); 2211 ev = nlmsg_data(nlh);
2212 ev->transport_handle = iscsi_handle(transport); 2212 ev->transport_handle = iscsi_handle(transport);
2213 ev->type = ISCSI_KEVENT_PING_COMP; 2213 ev->type = ISCSI_KEVENT_PING_COMP;
2214 ev->r.ping_comp.host_no = host_no; 2214 ev->r.ping_comp.host_no = host_no;
@@ -2227,7 +2227,7 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
2227{ 2227{
2228 struct sk_buff *skb; 2228 struct sk_buff *skb;
2229 struct nlmsghdr *nlh; 2229 struct nlmsghdr *nlh;
2230 int len = NLMSG_SPACE(size); 2230 int len = nlmsg_total_size(size);
2231 int flags = multi ? NLM_F_MULTI : 0; 2231 int flags = multi ? NLM_F_MULTI : 0;
2232 int t = done ? NLMSG_DONE : type; 2232 int t = done ? NLMSG_DONE : type;
2233 2233
@@ -2239,24 +2239,24 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
2239 2239
2240 nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0); 2240 nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
2241 nlh->nlmsg_flags = flags; 2241 nlh->nlmsg_flags = flags;
2242 memcpy(NLMSG_DATA(nlh), payload, size); 2242 memcpy(nlmsg_data(nlh), payload, size);
2243 return iscsi_multicast_skb(skb, group, GFP_ATOMIC); 2243 return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
2244} 2244}
2245 2245
2246static int 2246static int
2247iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) 2247iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
2248{ 2248{
2249 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 2249 struct iscsi_uevent *ev = nlmsg_data(nlh);
2250 struct iscsi_stats *stats; 2250 struct iscsi_stats *stats;
2251 struct sk_buff *skbstat; 2251 struct sk_buff *skbstat;
2252 struct iscsi_cls_conn *conn; 2252 struct iscsi_cls_conn *conn;
2253 struct nlmsghdr *nlhstat; 2253 struct nlmsghdr *nlhstat;
2254 struct iscsi_uevent *evstat; 2254 struct iscsi_uevent *evstat;
2255 struct iscsi_internal *priv; 2255 struct iscsi_internal *priv;
2256 int len = NLMSG_SPACE(sizeof(*ev) + 2256 int len = nlmsg_total_size(sizeof(*ev) +
2257 sizeof(struct iscsi_stats) + 2257 sizeof(struct iscsi_stats) +
2258 sizeof(struct iscsi_stats_custom) * 2258 sizeof(struct iscsi_stats_custom) *
2259 ISCSI_STATS_CUSTOM_MAX); 2259 ISCSI_STATS_CUSTOM_MAX);
2260 int err = 0; 2260 int err = 0;
2261 2261
2262 priv = iscsi_if_transport_lookup(transport); 2262 priv = iscsi_if_transport_lookup(transport);
@@ -2279,7 +2279,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
2279 2279
2280 nlhstat = __nlmsg_put(skbstat, 0, 0, 0, 2280 nlhstat = __nlmsg_put(skbstat, 0, 0, 0,
2281 (len - sizeof(*nlhstat)), 0); 2281 (len - sizeof(*nlhstat)), 0);
2282 evstat = NLMSG_DATA(nlhstat); 2282 evstat = nlmsg_data(nlhstat);
2283 memset(evstat, 0, sizeof(*evstat)); 2283 memset(evstat, 0, sizeof(*evstat));
2284 evstat->transport_handle = iscsi_handle(conn->transport); 2284 evstat->transport_handle = iscsi_handle(conn->transport);
2285 evstat->type = nlh->nlmsg_type; 2285 evstat->type = nlh->nlmsg_type;
@@ -2292,12 +2292,12 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
2292 memset(stats, 0, sizeof(*stats)); 2292 memset(stats, 0, sizeof(*stats));
2293 2293
2294 transport->get_stats(conn, stats); 2294 transport->get_stats(conn, stats);
2295 actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) + 2295 actual_size = nlmsg_total_size(sizeof(struct iscsi_uevent) +
2296 sizeof(struct iscsi_stats) + 2296 sizeof(struct iscsi_stats) +
2297 sizeof(struct iscsi_stats_custom) * 2297 sizeof(struct iscsi_stats_custom) *
2298 stats->custom_length); 2298 stats->custom_length);
2299 actual_size -= sizeof(*nlhstat); 2299 actual_size -= sizeof(*nlhstat);
2300 actual_size = NLMSG_LENGTH(actual_size); 2300 actual_size = nlmsg_msg_size(actual_size);
2301 skb_trim(skbstat, NLMSG_ALIGN(actual_size)); 2301 skb_trim(skbstat, NLMSG_ALIGN(actual_size));
2302 nlhstat->nlmsg_len = actual_size; 2302 nlhstat->nlmsg_len = actual_size;
2303 2303
@@ -2321,7 +2321,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
2321 struct iscsi_uevent *ev; 2321 struct iscsi_uevent *ev;
2322 struct sk_buff *skb; 2322 struct sk_buff *skb;
2323 struct nlmsghdr *nlh; 2323 struct nlmsghdr *nlh;
2324 int rc, len = NLMSG_SPACE(sizeof(*ev)); 2324 int rc, len = nlmsg_total_size(sizeof(*ev));
2325 2325
2326 priv = iscsi_if_transport_lookup(session->transport); 2326 priv = iscsi_if_transport_lookup(session->transport);
2327 if (!priv) 2327 if (!priv)
@@ -2337,7 +2337,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
2337 } 2337 }
2338 2338
2339 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); 2339 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
2340 ev = NLMSG_DATA(nlh); 2340 ev = nlmsg_data(nlh);
2341 ev->transport_handle = iscsi_handle(session->transport); 2341 ev->transport_handle = iscsi_handle(session->transport);
2342 2342
2343 ev->type = event; 2343 ev->type = event;
@@ -2689,7 +2689,7 @@ iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev)
2689static int 2689static int
2690iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh) 2690iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
2691{ 2691{
2692 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 2692 struct iscsi_uevent *ev = nlmsg_data(nlh);
2693 struct Scsi_Host *shost = NULL; 2693 struct Scsi_Host *shost = NULL;
2694 struct iscsi_chap_rec *chap_rec; 2694 struct iscsi_chap_rec *chap_rec;
2695 struct iscsi_internal *priv; 2695 struct iscsi_internal *priv;
@@ -2708,7 +2708,7 @@ iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
2708 return -EINVAL; 2708 return -EINVAL;
2709 2709
2710 chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec)); 2710 chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec));
2711 len = NLMSG_SPACE(sizeof(*ev) + chap_buf_size); 2711 len = nlmsg_total_size(sizeof(*ev) + chap_buf_size);
2712 2712
2713 shost = scsi_host_lookup(ev->u.get_chap.host_no); 2713 shost = scsi_host_lookup(ev->u.get_chap.host_no);
2714 if (!shost) { 2714 if (!shost) {
@@ -2729,7 +2729,7 @@ iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
2729 2729
2730 nlhchap = __nlmsg_put(skbchap, 0, 0, 0, 2730 nlhchap = __nlmsg_put(skbchap, 0, 0, 0,
2731 (len - sizeof(*nlhchap)), 0); 2731 (len - sizeof(*nlhchap)), 0);
2732 evchap = NLMSG_DATA(nlhchap); 2732 evchap = nlmsg_data(nlhchap);
2733 memset(evchap, 0, sizeof(*evchap)); 2733 memset(evchap, 0, sizeof(*evchap));
2734 evchap->transport_handle = iscsi_handle(transport); 2734 evchap->transport_handle = iscsi_handle(transport);
2735 evchap->type = nlh->nlmsg_type; 2735 evchap->type = nlh->nlmsg_type;
@@ -2742,7 +2742,7 @@ iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
2742 err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx, 2742 err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx,
2743 &evchap->u.get_chap.num_entries, buf); 2743 &evchap->u.get_chap.num_entries, buf);
2744 2744
2745 actual_size = NLMSG_SPACE(sizeof(*ev) + chap_buf_size); 2745 actual_size = nlmsg_total_size(sizeof(*ev) + chap_buf_size);
2746 skb_trim(skbchap, NLMSG_ALIGN(actual_size)); 2746 skb_trim(skbchap, NLMSG_ALIGN(actual_size));
2747 nlhchap->nlmsg_len = actual_size; 2747 nlhchap->nlmsg_len = actual_size;
2748 2748
@@ -3068,7 +3068,7 @@ static int
3068iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) 3068iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3069{ 3069{
3070 int err = 0; 3070 int err = 0;
3071 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 3071 struct iscsi_uevent *ev = nlmsg_data(nlh);
3072 struct iscsi_transport *transport = NULL; 3072 struct iscsi_transport *transport = NULL;
3073 struct iscsi_internal *priv; 3073 struct iscsi_internal *priv;
3074 struct iscsi_cls_session *session; 3074 struct iscsi_cls_session *session;
@@ -3256,7 +3256,7 @@ static void
3256iscsi_if_rx(struct sk_buff *skb) 3256iscsi_if_rx(struct sk_buff *skb)
3257{ 3257{
3258 mutex_lock(&rx_queue_mutex); 3258 mutex_lock(&rx_queue_mutex);
3259 while (skb->len >= NLMSG_SPACE(0)) { 3259 while (skb->len >= NLMSG_HDRLEN) {
3260 int err; 3260 int err;
3261 uint32_t rlen; 3261 uint32_t rlen;
3262 struct nlmsghdr *nlh; 3262 struct nlmsghdr *nlh;
@@ -3269,7 +3269,7 @@ iscsi_if_rx(struct sk_buff *skb)
3269 break; 3269 break;
3270 } 3270 }
3271 3271
3272 ev = NLMSG_DATA(nlh); 3272 ev = nlmsg_data(nlh);
3273 rlen = NLMSG_ALIGN(nlh->nlmsg_len); 3273 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
3274 if (rlen > skb->len) 3274 if (rlen > skb->len)
3275 rlen = skb->len; 3275 rlen = skb->len;
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 71098a7b5fed..7cb7d2c8fd86 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -354,7 +354,7 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
354 354
355 if (cc->dev->id.revision >= 11) 355 if (cc->dev->id.revision >= 11)
356 cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT); 356 cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
357 ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status); 357 ssb_dbg("chipcommon status is 0x%x\n", cc->status);
358 358
359 if (cc->dev->id.revision >= 20) { 359 if (cc->dev->id.revision >= 20) {
360 chipco_write32(cc, SSB_CHIPCO_GPIOPULLUP, 0); 360 chipco_write32(cc, SSB_CHIPCO_GPIOPULLUP, 0);
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index 7b0bce936762..1173a091b402 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -110,8 +110,8 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc,
110 return; 110 return;
111 } 111 }
112 112
113 ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n", 113 ssb_info("Programming PLL to %u.%03u MHz\n",
114 (crystalfreq / 1000), (crystalfreq % 1000)); 114 crystalfreq / 1000, crystalfreq % 1000);
115 115
116 /* First turn the PLL off. */ 116 /* First turn the PLL off. */
117 switch (bus->chip_id) { 117 switch (bus->chip_id) {
@@ -138,7 +138,7 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc,
138 } 138 }
139 tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); 139 tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
140 if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT) 140 if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
141 ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n"); 141 ssb_emerg("Failed to turn the PLL off!\n");
142 142
143 /* Set PDIV in PLL control 0. */ 143 /* Set PDIV in PLL control 0. */
144 pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL0); 144 pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL0);
@@ -249,8 +249,8 @@ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc,
249 return; 249 return;
250 } 250 }
251 251
252 ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n", 252 ssb_info("Programming PLL to %u.%03u MHz\n",
253 (crystalfreq / 1000), (crystalfreq % 1000)); 253 crystalfreq / 1000, crystalfreq % 1000);
254 254
255 /* First turn the PLL off. */ 255 /* First turn the PLL off. */
256 switch (bus->chip_id) { 256 switch (bus->chip_id) {
@@ -275,7 +275,7 @@ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc,
275 } 275 }
276 tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); 276 tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
277 if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT) 277 if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
278 ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n"); 278 ssb_emerg("Failed to turn the PLL off!\n");
279 279
280 /* Set p1div and p2div. */ 280 /* Set p1div and p2div. */
281 pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL0); 281 pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL0);
@@ -349,9 +349,8 @@ static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
349 case 43222: 349 case 43222:
350 break; 350 break;
351 default: 351 default:
352 ssb_printk(KERN_ERR PFX 352 ssb_err("ERROR: PLL init unknown for device %04X\n",
353 "ERROR: PLL init unknown for device %04X\n", 353 bus->chip_id);
354 bus->chip_id);
355 } 354 }
356} 355}
357 356
@@ -472,9 +471,8 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
472 max_msk = 0xFFFFF; 471 max_msk = 0xFFFFF;
473 break; 472 break;
474 default: 473 default:
475 ssb_printk(KERN_ERR PFX 474 ssb_err("ERROR: PMU resource config unknown for device %04X\n",
476 "ERROR: PMU resource config unknown for device %04X\n", 475 bus->chip_id);
477 bus->chip_id);
478 } 476 }
479 477
480 if (updown_tab) { 478 if (updown_tab) {
@@ -526,8 +524,8 @@ void ssb_pmu_init(struct ssb_chipcommon *cc)
526 pmucap = chipco_read32(cc, SSB_CHIPCO_PMU_CAP); 524 pmucap = chipco_read32(cc, SSB_CHIPCO_PMU_CAP);
527 cc->pmu.rev = (pmucap & SSB_CHIPCO_PMU_CAP_REVISION); 525 cc->pmu.rev = (pmucap & SSB_CHIPCO_PMU_CAP_REVISION);
528 526
529 ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n", 527 ssb_dbg("Found rev %u PMU (capabilities 0x%08X)\n",
530 cc->pmu.rev, pmucap); 528 cc->pmu.rev, pmucap);
531 529
532 if (cc->pmu.rev == 1) 530 if (cc->pmu.rev == 1)
533 chipco_mask32(cc, SSB_CHIPCO_PMU_CTL, 531 chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
@@ -638,9 +636,8 @@ u32 ssb_pmu_get_alp_clock(struct ssb_chipcommon *cc)
638 case 0x5354: 636 case 0x5354:
639 ssb_pmu_get_alp_clock_clk0(cc); 637 ssb_pmu_get_alp_clock_clk0(cc);
640 default: 638 default:
641 ssb_printk(KERN_ERR PFX 639 ssb_err("ERROR: PMU alp clock unknown for device %04X\n",
642 "ERROR: PMU alp clock unknown for device %04X\n", 640 bus->chip_id);
643 bus->chip_id);
644 return 0; 641 return 0;
645 } 642 }
646} 643}
@@ -654,9 +651,8 @@ u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc)
654 /* 5354 chip uses a non programmable PLL of frequency 240MHz */ 651 /* 5354 chip uses a non programmable PLL of frequency 240MHz */
655 return 240000000; 652 return 240000000;
656 default: 653 default:
657 ssb_printk(KERN_ERR PFX 654 ssb_err("ERROR: PMU cpu clock unknown for device %04X\n",
658 "ERROR: PMU cpu clock unknown for device %04X\n", 655 bus->chip_id);
659 bus->chip_id);
660 return 0; 656 return 0;
661 } 657 }
662} 658}
@@ -669,9 +665,8 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc)
669 case 0x5354: 665 case 0x5354:
670 return 120000000; 666 return 120000000;
671 default: 667 default:
672 ssb_printk(KERN_ERR PFX 668 ssb_err("ERROR: PMU controlclock unknown for device %04X\n",
673 "ERROR: PMU controlclock unknown for device %04X\n", 669 bus->chip_id);
674 bus->chip_id);
675 return 0; 670 return 0;
676 } 671 }
677} 672}
@@ -692,8 +687,23 @@ void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid)
692 pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD; 687 pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD;
693 break; 688 break;
694 case 43222: 689 case 43222:
695 /* TODO: BCM43222 requires updating PLLs too */ 690 if (spuravoid == 1) {
696 return; 691 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11500008);
692 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x0C000C06);
693 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x0F600a08);
694 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL3, 0x00000000);
695 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL4, 0x2001E920);
696 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888815);
697 } else {
698 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100008);
699 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x0c000c06);
700 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x03000a08);
701 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL3, 0x00000000);
702 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL4, 0x200005c0);
703 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888855);
704 }
705 pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD;
706 break;
697 default: 707 default:
698 ssb_printk(KERN_ERR PFX 708 ssb_printk(KERN_ERR PFX
699 "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n", 709 "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 33b37dac40bd..fa385a368a56 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -167,21 +167,22 @@ static void set_irq(struct ssb_device *dev, unsigned int irq)
167 irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]); 167 irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]);
168 ssb_write32(mdev, SSB_IPSFLAG, irqflag); 168 ssb_write32(mdev, SSB_IPSFLAG, irqflag);
169 } 169 }
170 ssb_dprintk(KERN_INFO PFX 170 ssb_dbg("set_irq: core 0x%04x, irq %d => %d\n",
171 "set_irq: core 0x%04x, irq %d => %d\n", 171 dev->id.coreid, oldirq+2, irq+2);
172 dev->id.coreid, oldirq+2, irq+2);
173} 172}
174 173
175static void print_irq(struct ssb_device *dev, unsigned int irq) 174static void print_irq(struct ssb_device *dev, unsigned int irq)
176{ 175{
177 int i;
178 static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"}; 176 static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
179 ssb_dprintk(KERN_INFO PFX 177 ssb_dbg("core 0x%04x, irq : %s%s %s%s %s%s %s%s %s%s %s%s %s%s\n",
180 "core 0x%04x, irq :", dev->id.coreid); 178 dev->id.coreid,
181 for (i = 0; i <= 6; i++) { 179 irq_name[0], irq == 0 ? "*" : " ",
182 ssb_dprintk(" %s%s", irq_name[i], i==irq?"*":" "); 180 irq_name[1], irq == 1 ? "*" : " ",
183 } 181 irq_name[2], irq == 2 ? "*" : " ",
184 ssb_dprintk("\n"); 182 irq_name[3], irq == 3 ? "*" : " ",
183 irq_name[4], irq == 4 ? "*" : " ",
184 irq_name[5], irq == 5 ? "*" : " ",
185 irq_name[6], irq == 6 ? "*" : " ");
185} 186}
186 187
187static void dump_irq(struct ssb_bus *bus) 188static void dump_irq(struct ssb_bus *bus)
@@ -286,7 +287,7 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
286 if (!mcore->dev) 287 if (!mcore->dev)
287 return; /* We don't have a MIPS core */ 288 return; /* We don't have a MIPS core */
288 289
289 ssb_dprintk(KERN_INFO PFX "Initializing MIPS core...\n"); 290 ssb_dbg("Initializing MIPS core...\n");
290 291
291 bus = mcore->dev->bus; 292 bus = mcore->dev->bus;
292 hz = ssb_clockspeed(bus); 293 hz = ssb_clockspeed(bus);
@@ -334,7 +335,7 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
334 break; 335 break;
335 } 336 }
336 } 337 }
337 ssb_dprintk(KERN_INFO PFX "after irq reconfiguration\n"); 338 ssb_dbg("after irq reconfiguration\n");
338 dump_irq(bus); 339 dump_irq(bus);
339 340
340 ssb_mips_serial_init(mcore); 341 ssb_mips_serial_init(mcore);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 59801d23d7ec..d75b72ba2672 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -263,8 +263,7 @@ int ssb_pcicore_plat_dev_init(struct pci_dev *d)
263 return -ENODEV; 263 return -ENODEV;
264 } 264 }
265 265
266 ssb_printk(KERN_INFO "PCI: Fixing up device %s\n", 266 ssb_info("PCI: Fixing up device %s\n", pci_name(d));
267 pci_name(d));
268 267
269 /* Fix up interrupt lines */ 268 /* Fix up interrupt lines */
270 d->irq = ssb_mips_irq(extpci_core->dev) + 2; 269 d->irq = ssb_mips_irq(extpci_core->dev) + 2;
@@ -285,12 +284,12 @@ static void ssb_pcicore_fixup_pcibridge(struct pci_dev *dev)
285 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) != 0) 284 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) != 0)
286 return; 285 return;
287 286
288 ssb_printk(KERN_INFO "PCI: Fixing up bridge %s\n", pci_name(dev)); 287 ssb_info("PCI: Fixing up bridge %s\n", pci_name(dev));
289 288
290 /* Enable PCI bridge bus mastering and memory space */ 289 /* Enable PCI bridge bus mastering and memory space */
291 pci_set_master(dev); 290 pci_set_master(dev);
292 if (pcibios_enable_device(dev, ~0) < 0) { 291 if (pcibios_enable_device(dev, ~0) < 0) {
293 ssb_printk(KERN_ERR "PCI: SSB bridge enable failed\n"); 292 ssb_err("PCI: SSB bridge enable failed\n");
294 return; 293 return;
295 } 294 }
296 295
@@ -299,8 +298,8 @@ static void ssb_pcicore_fixup_pcibridge(struct pci_dev *dev)
299 298
300 /* Make sure our latency is high enough to handle the devices behind us */ 299 /* Make sure our latency is high enough to handle the devices behind us */
301 lat = 168; 300 lat = 168;
302 ssb_printk(KERN_INFO "PCI: Fixing latency timer of device %s to %u\n", 301 ssb_info("PCI: Fixing latency timer of device %s to %u\n",
303 pci_name(dev), lat); 302 pci_name(dev), lat);
304 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); 303 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
305} 304}
306DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, ssb_pcicore_fixup_pcibridge); 305DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, ssb_pcicore_fixup_pcibridge);
@@ -323,7 +322,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
323 return; 322 return;
324 extpci_core = pc; 323 extpci_core = pc;
325 324
326 ssb_dprintk(KERN_INFO PFX "PCIcore in host mode found\n"); 325 ssb_dbg("PCIcore in host mode found\n");
327 /* Reset devices on the external PCI bus */ 326 /* Reset devices on the external PCI bus */
328 val = SSB_PCICORE_CTL_RST_OE; 327 val = SSB_PCICORE_CTL_RST_OE;
329 val |= SSB_PCICORE_CTL_CLK_OE; 328 val |= SSB_PCICORE_CTL_CLK_OE;
@@ -338,7 +337,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
338 udelay(1); /* Assertion time demanded by the PCI standard */ 337 udelay(1); /* Assertion time demanded by the PCI standard */
339 338
340 if (pc->dev->bus->has_cardbus_slot) { 339 if (pc->dev->bus->has_cardbus_slot) {
341 ssb_dprintk(KERN_INFO PFX "CardBus slot detected\n"); 340 ssb_dbg("CardBus slot detected\n");
342 pc->cardbusmode = 1; 341 pc->cardbusmode = 1;
343 /* GPIO 1 resets the bridge */ 342 /* GPIO 1 resets the bridge */
344 ssb_gpio_out(pc->dev->bus, 1, 1); 343 ssb_gpio_out(pc->dev->bus, 1, 1);
diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c
index bb18d76f9f2c..55e101115038 100644
--- a/drivers/ssb/embedded.c
+++ b/drivers/ssb/embedded.c
@@ -57,9 +57,8 @@ int ssb_watchdog_register(struct ssb_bus *bus)
57 bus->busnumber, &wdt, 57 bus->busnumber, &wdt,
58 sizeof(wdt)); 58 sizeof(wdt));
59 if (IS_ERR(pdev)) { 59 if (IS_ERR(pdev)) {
60 ssb_dprintk(KERN_INFO PFX 60 ssb_dbg("can not register watchdog device, err: %li\n",
61 "can not register watchdog device, err: %li\n", 61 PTR_ERR(pdev));
62 PTR_ERR(pdev));
63 return PTR_ERR(pdev); 62 return PTR_ERR(pdev);
64 } 63 }
65 64
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 3b645b8a261f..812775a4bfb6 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -275,8 +275,8 @@ int ssb_devices_thaw(struct ssb_freeze_context *ctx)
275 275
276 err = sdrv->probe(sdev, &sdev->id); 276 err = sdrv->probe(sdev, &sdev->id);
277 if (err) { 277 if (err) {
278 ssb_printk(KERN_ERR PFX "Failed to thaw device %s\n", 278 ssb_err("Failed to thaw device %s\n",
279 dev_name(sdev->dev)); 279 dev_name(sdev->dev));
280 result = err; 280 result = err;
281 } 281 }
282 ssb_device_put(sdev); 282 ssb_device_put(sdev);
@@ -447,10 +447,9 @@ void ssb_bus_unregister(struct ssb_bus *bus)
447 447
448 err = ssb_gpio_unregister(bus); 448 err = ssb_gpio_unregister(bus);
449 if (err == -EBUSY) 449 if (err == -EBUSY)
450 ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n"); 450 ssb_dbg("Some GPIOs are still in use\n");
451 else if (err) 451 else if (err)
452 ssb_dprintk(KERN_ERR PFX 452 ssb_dbg("Can not unregister GPIO driver: %i\n", err);
453 "Can not unregister GPIO driver: %i\n", err);
454 453
455 ssb_buses_lock(); 454 ssb_buses_lock();
456 ssb_devices_unregister(bus); 455 ssb_devices_unregister(bus);
@@ -497,8 +496,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
497 496
498 devwrap = kzalloc(sizeof(*devwrap), GFP_KERNEL); 497 devwrap = kzalloc(sizeof(*devwrap), GFP_KERNEL);
499 if (!devwrap) { 498 if (!devwrap) {
500 ssb_printk(KERN_ERR PFX 499 ssb_err("Could not allocate device\n");
501 "Could not allocate device\n");
502 err = -ENOMEM; 500 err = -ENOMEM;
503 goto error; 501 goto error;
504 } 502 }
@@ -537,9 +535,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
537 sdev->dev = dev; 535 sdev->dev = dev;
538 err = device_register(dev); 536 err = device_register(dev);
539 if (err) { 537 if (err) {
540 ssb_printk(KERN_ERR PFX 538 ssb_err("Could not register %s\n", dev_name(dev));
541 "Could not register %s\n",
542 dev_name(dev));
543 /* Set dev to NULL to not unregister 539 /* Set dev to NULL to not unregister
544 * dev on error unwinding. */ 540 * dev on error unwinding. */
545 sdev->dev = NULL; 541 sdev->dev = NULL;
@@ -825,10 +821,9 @@ static int ssb_bus_register(struct ssb_bus *bus,
825 ssb_mipscore_init(&bus->mipscore); 821 ssb_mipscore_init(&bus->mipscore);
826 err = ssb_gpio_init(bus); 822 err = ssb_gpio_init(bus);
827 if (err == -ENOTSUPP) 823 if (err == -ENOTSUPP)
828 ssb_dprintk(KERN_DEBUG PFX "GPIO driver not activated\n"); 824 ssb_dbg("GPIO driver not activated\n");
829 else if (err) 825 else if (err)
830 ssb_dprintk(KERN_ERR PFX 826 ssb_dbg("Error registering GPIO driver: %i\n", err);
831 "Error registering GPIO driver: %i\n", err);
832 err = ssb_fetch_invariants(bus, get_invariants); 827 err = ssb_fetch_invariants(bus, get_invariants);
833 if (err) { 828 if (err) {
834 ssb_bus_may_powerdown(bus); 829 ssb_bus_may_powerdown(bus);
@@ -878,11 +873,11 @@ int ssb_bus_pcibus_register(struct ssb_bus *bus, struct pci_dev *host_pci)
878 873
879 err = ssb_bus_register(bus, ssb_pci_get_invariants, 0); 874 err = ssb_bus_register(bus, ssb_pci_get_invariants, 0);
880 if (!err) { 875 if (!err) {
881 ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on " 876 ssb_info("Sonics Silicon Backplane found on PCI device %s\n",
882 "PCI device %s\n", dev_name(&host_pci->dev)); 877 dev_name(&host_pci->dev));
883 } else { 878 } else {
884 ssb_printk(KERN_ERR PFX "Failed to register PCI version" 879 ssb_err("Failed to register PCI version of SSB with error %d\n",
885 " of SSB with error %d\n", err); 880 err);
886 } 881 }
887 882
888 return err; 883 return err;
@@ -903,8 +898,8 @@ int ssb_bus_pcmciabus_register(struct ssb_bus *bus,
903 898
904 err = ssb_bus_register(bus, ssb_pcmcia_get_invariants, baseaddr); 899 err = ssb_bus_register(bus, ssb_pcmcia_get_invariants, baseaddr);
905 if (!err) { 900 if (!err) {
906 ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on " 901 ssb_info("Sonics Silicon Backplane found on PCMCIA device %s\n",
907 "PCMCIA device %s\n", pcmcia_dev->devname); 902 pcmcia_dev->devname);
908 } 903 }
909 904
910 return err; 905 return err;
@@ -925,8 +920,8 @@ int ssb_bus_sdiobus_register(struct ssb_bus *bus, struct sdio_func *func,
925 920
926 err = ssb_bus_register(bus, ssb_sdio_get_invariants, ~0); 921 err = ssb_bus_register(bus, ssb_sdio_get_invariants, ~0);
927 if (!err) { 922 if (!err) {
928 ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on " 923 ssb_info("Sonics Silicon Backplane found on SDIO device %s\n",
929 "SDIO device %s\n", sdio_func_id(func)); 924 sdio_func_id(func));
930 } 925 }
931 926
932 return err; 927 return err;
@@ -944,8 +939,8 @@ int ssb_bus_ssbbus_register(struct ssb_bus *bus, unsigned long baseaddr,
944 939
945 err = ssb_bus_register(bus, get_invariants, baseaddr); 940 err = ssb_bus_register(bus, get_invariants, baseaddr);
946 if (!err) { 941 if (!err) {
947 ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found at " 942 ssb_info("Sonics Silicon Backplane found at address 0x%08lX\n",
948 "address 0x%08lX\n", baseaddr); 943 baseaddr);
949 } 944 }
950 945
951 return err; 946 return err;
@@ -1339,7 +1334,7 @@ out:
1339#endif 1334#endif
1340 return err; 1335 return err;
1341error: 1336error:
1342 ssb_printk(KERN_ERR PFX "Bus powerdown failed\n"); 1337 ssb_err("Bus powerdown failed\n");
1343 goto out; 1338 goto out;
1344} 1339}
1345EXPORT_SYMBOL(ssb_bus_may_powerdown); 1340EXPORT_SYMBOL(ssb_bus_may_powerdown);
@@ -1362,7 +1357,7 @@ int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl)
1362 1357
1363 return 0; 1358 return 0;
1364error: 1359error:
1365 ssb_printk(KERN_ERR PFX "Bus powerup failed\n"); 1360 ssb_err("Bus powerup failed\n");
1366 return err; 1361 return err;
1367} 1362}
1368EXPORT_SYMBOL(ssb_bus_powerup); 1363EXPORT_SYMBOL(ssb_bus_powerup);
@@ -1470,15 +1465,13 @@ static int __init ssb_modinit(void)
1470 1465
1471 err = b43_pci_ssb_bridge_init(); 1466 err = b43_pci_ssb_bridge_init();
1472 if (err) { 1467 if (err) {
1473 ssb_printk(KERN_ERR "Broadcom 43xx PCI-SSB-bridge " 1468 ssb_err("Broadcom 43xx PCI-SSB-bridge initialization failed\n");
1474 "initialization failed\n");
1475 /* don't fail SSB init because of this */ 1469 /* don't fail SSB init because of this */
1476 err = 0; 1470 err = 0;
1477 } 1471 }
1478 err = ssb_gige_init(); 1472 err = ssb_gige_init();
1479 if (err) { 1473 if (err) {
1480 ssb_printk(KERN_ERR "SSB Broadcom Gigabit Ethernet " 1474 ssb_err("SSB Broadcom Gigabit Ethernet driver initialization failed\n");
1481 "driver initialization failed\n");
1482 /* don't fail SSB init because of this */ 1475 /* don't fail SSB init because of this */
1483 err = 0; 1476 err = 0;
1484 } 1477 }
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index e9d94968f394..a8dc95ebf2d6 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -56,7 +56,7 @@ int ssb_pci_switch_coreidx(struct ssb_bus *bus, u8 coreidx)
56 } 56 }
57 return 0; 57 return 0;
58error: 58error:
59 ssb_printk(KERN_ERR PFX "Failed to switch to core %u\n", coreidx); 59 ssb_err("Failed to switch to core %u\n", coreidx);
60 return -ENODEV; 60 return -ENODEV;
61} 61}
62 62
@@ -67,10 +67,9 @@ int ssb_pci_switch_core(struct ssb_bus *bus,
67 unsigned long flags; 67 unsigned long flags;
68 68
69#if SSB_VERBOSE_PCICORESWITCH_DEBUG 69#if SSB_VERBOSE_PCICORESWITCH_DEBUG
70 ssb_printk(KERN_INFO PFX 70 ssb_info("Switching to %s core, index %d\n",
71 "Switching to %s core, index %d\n", 71 ssb_core_name(dev->id.coreid),
72 ssb_core_name(dev->id.coreid), 72 dev->core_index);
73 dev->core_index);
74#endif 73#endif
75 74
76 spin_lock_irqsave(&bus->bar_lock, flags); 75 spin_lock_irqsave(&bus->bar_lock, flags);
@@ -231,6 +230,15 @@ static inline u8 ssb_crc8(u8 crc, u8 data)
231 return t[crc ^ data]; 230 return t[crc ^ data];
232} 231}
233 232
233static void sprom_get_mac(char *mac, const u16 *in)
234{
235 int i;
236 for (i = 0; i < 3; i++) {
237 *mac++ = in[i] >> 8;
238 *mac++ = in[i];
239 }
240}
241
234static u8 ssb_sprom_crc(const u16 *sprom, u16 size) 242static u8 ssb_sprom_crc(const u16 *sprom, u16 size)
235{ 243{
236 int word; 244 int word;
@@ -278,7 +286,7 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
278 u32 spromctl; 286 u32 spromctl;
279 u16 size = bus->sprom_size; 287 u16 size = bus->sprom_size;
280 288
281 ssb_printk(KERN_NOTICE PFX "Writing SPROM. Do NOT turn off the power! Please stand by...\n"); 289 ssb_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n");
282 err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl); 290 err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl);
283 if (err) 291 if (err)
284 goto err_ctlreg; 292 goto err_ctlreg;
@@ -286,17 +294,17 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
286 err = pci_write_config_dword(pdev, SSB_SPROMCTL, spromctl); 294 err = pci_write_config_dword(pdev, SSB_SPROMCTL, spromctl);
287 if (err) 295 if (err)
288 goto err_ctlreg; 296 goto err_ctlreg;
289 ssb_printk(KERN_NOTICE PFX "[ 0%%"); 297 ssb_notice("[ 0%%");
290 msleep(500); 298 msleep(500);
291 for (i = 0; i < size; i++) { 299 for (i = 0; i < size; i++) {
292 if (i == size / 4) 300 if (i == size / 4)
293 ssb_printk("25%%"); 301 ssb_cont("25%%");
294 else if (i == size / 2) 302 else if (i == size / 2)
295 ssb_printk("50%%"); 303 ssb_cont("50%%");
296 else if (i == (size * 3) / 4) 304 else if (i == (size * 3) / 4)
297 ssb_printk("75%%"); 305 ssb_cont("75%%");
298 else if (i % 2) 306 else if (i % 2)
299 ssb_printk("."); 307 ssb_cont(".");
300 writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2)); 308 writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
301 mmiowb(); 309 mmiowb();
302 msleep(20); 310 msleep(20);
@@ -309,12 +317,12 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
309 if (err) 317 if (err)
310 goto err_ctlreg; 318 goto err_ctlreg;
311 msleep(500); 319 msleep(500);
312 ssb_printk("100%% ]\n"); 320 ssb_cont("100%% ]\n");
313 ssb_printk(KERN_NOTICE PFX "SPROM written.\n"); 321 ssb_notice("SPROM written\n");
314 322
315 return 0; 323 return 0;
316err_ctlreg: 324err_ctlreg:
317 ssb_printk(KERN_ERR PFX "Could not access SPROM control register.\n"); 325 ssb_err("Could not access SPROM control register.\n");
318 return err; 326 return err;
319} 327}
320 328
@@ -339,10 +347,23 @@ static s8 r123_extract_antgain(u8 sprom_revision, const u16 *in,
339 return (s8)gain; 347 return (s8)gain;
340} 348}
341 349
350static void sprom_extract_r23(struct ssb_sprom *out, const u16 *in)
351{
352 SPEX(boardflags_hi, SSB_SPROM2_BFLHI, 0xFFFF, 0);
353 SPEX(opo, SSB_SPROM2_OPO, SSB_SPROM2_OPO_VALUE, 0);
354 SPEX(pa1lob0, SSB_SPROM2_PA1LOB0, 0xFFFF, 0);
355 SPEX(pa1lob1, SSB_SPROM2_PA1LOB1, 0xFFFF, 0);
356 SPEX(pa1lob2, SSB_SPROM2_PA1LOB2, 0xFFFF, 0);
357 SPEX(pa1hib0, SSB_SPROM2_PA1HIB0, 0xFFFF, 0);
358 SPEX(pa1hib1, SSB_SPROM2_PA1HIB1, 0xFFFF, 0);
359 SPEX(pa1hib2, SSB_SPROM2_PA1HIB2, 0xFFFF, 0);
360 SPEX(maxpwr_ah, SSB_SPROM2_MAXP_A, SSB_SPROM2_MAXP_A_HI, 0);
361 SPEX(maxpwr_al, SSB_SPROM2_MAXP_A, SSB_SPROM2_MAXP_A_LO,
362 SSB_SPROM2_MAXP_A_LO_SHIFT);
363}
364
342static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in) 365static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
343{ 366{
344 int i;
345 u16 v;
346 u16 loc[3]; 367 u16 loc[3];
347 368
348 if (out->revision == 3) /* rev 3 moved MAC */ 369 if (out->revision == 3) /* rev 3 moved MAC */
@@ -352,19 +373,10 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
352 loc[1] = SSB_SPROM1_ET0MAC; 373 loc[1] = SSB_SPROM1_ET0MAC;
353 loc[2] = SSB_SPROM1_ET1MAC; 374 loc[2] = SSB_SPROM1_ET1MAC;
354 } 375 }
355 for (i = 0; i < 3; i++) { 376 sprom_get_mac(out->il0mac, &in[SPOFF(loc[0])]);
356 v = in[SPOFF(loc[0]) + i];
357 *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
358 }
359 if (out->revision < 3) { /* only rev 1-2 have et0, et1 */ 377 if (out->revision < 3) { /* only rev 1-2 have et0, et1 */
360 for (i = 0; i < 3; i++) { 378 sprom_get_mac(out->et0mac, &in[SPOFF(loc[1])]);
361 v = in[SPOFF(loc[1]) + i]; 379 sprom_get_mac(out->et1mac, &in[SPOFF(loc[2])]);
362 *(((__be16 *)out->et0mac) + i) = cpu_to_be16(v);
363 }
364 for (i = 0; i < 3; i++) {
365 v = in[SPOFF(loc[2]) + i];
366 *(((__be16 *)out->et1mac) + i) = cpu_to_be16(v);
367 }
368 } 380 }
369 SPEX(et0phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0A, 0); 381 SPEX(et0phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0A, 0);
370 SPEX(et1phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1A, 382 SPEX(et1phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1A,
@@ -372,6 +384,7 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
372 SPEX(et0mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0M, 14); 384 SPEX(et0mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0M, 14);
373 SPEX(et1mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1M, 15); 385 SPEX(et1mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1M, 15);
374 SPEX(board_rev, SSB_SPROM1_BINF, SSB_SPROM1_BINF_BREV, 0); 386 SPEX(board_rev, SSB_SPROM1_BINF, SSB_SPROM1_BINF_BREV, 0);
387 SPEX(board_type, SSB_SPROM1_SPID, 0xFFFF, 0);
375 if (out->revision == 1) 388 if (out->revision == 1)
376 SPEX(country_code, SSB_SPROM1_BINF, SSB_SPROM1_BINF_CCODE, 389 SPEX(country_code, SSB_SPROM1_BINF, SSB_SPROM1_BINF_CCODE,
377 SSB_SPROM1_BINF_CCODE_SHIFT); 390 SSB_SPROM1_BINF_CCODE_SHIFT);
@@ -398,8 +411,7 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
398 SSB_SPROM1_ITSSI_A_SHIFT); 411 SSB_SPROM1_ITSSI_A_SHIFT);
399 SPEX(itssi_bg, SSB_SPROM1_ITSSI, SSB_SPROM1_ITSSI_BG, 0); 412 SPEX(itssi_bg, SSB_SPROM1_ITSSI, SSB_SPROM1_ITSSI_BG, 0);
400 SPEX(boardflags_lo, SSB_SPROM1_BFLLO, 0xFFFF, 0); 413 SPEX(boardflags_lo, SSB_SPROM1_BFLLO, 0xFFFF, 0);
401 if (out->revision >= 2) 414
402 SPEX(boardflags_hi, SSB_SPROM2_BFLHI, 0xFFFF, 0);
403 SPEX(alpha2[0], SSB_SPROM1_CCODE, 0xff00, 8); 415 SPEX(alpha2[0], SSB_SPROM1_CCODE, 0xff00, 8);
404 SPEX(alpha2[1], SSB_SPROM1_CCODE, 0x00ff, 0); 416 SPEX(alpha2[1], SSB_SPROM1_CCODE, 0x00ff, 0);
405 417
@@ -410,6 +422,8 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
410 out->antenna_gain.a1 = r123_extract_antgain(out->revision, in, 422 out->antenna_gain.a1 = r123_extract_antgain(out->revision, in,
411 SSB_SPROM1_AGAIN_A, 423 SSB_SPROM1_AGAIN_A,
412 SSB_SPROM1_AGAIN_A_SHIFT); 424 SSB_SPROM1_AGAIN_A_SHIFT);
425 if (out->revision >= 2)
426 sprom_extract_r23(out, in);
413} 427}
414 428
415/* Revs 4 5 and 8 have partially shared layout */ 429/* Revs 4 5 and 8 have partially shared layout */
@@ -454,23 +468,20 @@ static void sprom_extract_r458(struct ssb_sprom *out, const u16 *in)
454 468
455static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in) 469static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
456{ 470{
457 int i;
458 u16 v;
459 u16 il0mac_offset; 471 u16 il0mac_offset;
460 472
461 if (out->revision == 4) 473 if (out->revision == 4)
462 il0mac_offset = SSB_SPROM4_IL0MAC; 474 il0mac_offset = SSB_SPROM4_IL0MAC;
463 else 475 else
464 il0mac_offset = SSB_SPROM5_IL0MAC; 476 il0mac_offset = SSB_SPROM5_IL0MAC;
465 /* extract the MAC address */ 477
466 for (i = 0; i < 3; i++) { 478 sprom_get_mac(out->il0mac, &in[SPOFF(il0mac_offset)]);
467 v = in[SPOFF(il0mac_offset) + i]; 479
468 *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
469 }
470 SPEX(et0phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET0A, 0); 480 SPEX(et0phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET0A, 0);
471 SPEX(et1phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET1A, 481 SPEX(et1phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET1A,
472 SSB_SPROM4_ETHPHY_ET1A_SHIFT); 482 SSB_SPROM4_ETHPHY_ET1A_SHIFT);
473 SPEX(board_rev, SSB_SPROM4_BOARDREV, 0xFFFF, 0); 483 SPEX(board_rev, SSB_SPROM4_BOARDREV, 0xFFFF, 0);
484 SPEX(board_type, SSB_SPROM1_SPID, 0xFFFF, 0);
474 if (out->revision == 4) { 485 if (out->revision == 4) {
475 SPEX(alpha2[0], SSB_SPROM4_CCODE, 0xff00, 8); 486 SPEX(alpha2[0], SSB_SPROM4_CCODE, 0xff00, 8);
476 SPEX(alpha2[1], SSB_SPROM4_CCODE, 0x00ff, 0); 487 SPEX(alpha2[1], SSB_SPROM4_CCODE, 0x00ff, 0);
@@ -530,7 +541,7 @@ static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
530static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in) 541static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
531{ 542{
532 int i; 543 int i;
533 u16 v, o; 544 u16 o;
534 u16 pwr_info_offset[] = { 545 u16 pwr_info_offset[] = {
535 SSB_SROM8_PWR_INFO_CORE0, SSB_SROM8_PWR_INFO_CORE1, 546 SSB_SROM8_PWR_INFO_CORE0, SSB_SROM8_PWR_INFO_CORE1,
536 SSB_SROM8_PWR_INFO_CORE2, SSB_SROM8_PWR_INFO_CORE3 547 SSB_SROM8_PWR_INFO_CORE2, SSB_SROM8_PWR_INFO_CORE3
@@ -539,11 +550,10 @@ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
539 ARRAY_SIZE(out->core_pwr_info)); 550 ARRAY_SIZE(out->core_pwr_info));
540 551
541 /* extract the MAC address */ 552 /* extract the MAC address */
542 for (i = 0; i < 3; i++) { 553 sprom_get_mac(out->il0mac, &in[SPOFF(SSB_SPROM8_IL0MAC)]);
543 v = in[SPOFF(SSB_SPROM8_IL0MAC) + i]; 554
544 *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
545 }
546 SPEX(board_rev, SSB_SPROM8_BOARDREV, 0xFFFF, 0); 555 SPEX(board_rev, SSB_SPROM8_BOARDREV, 0xFFFF, 0);
556 SPEX(board_type, SSB_SPROM1_SPID, 0xFFFF, 0);
547 SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8); 557 SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8);
548 SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0); 558 SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0);
549 SPEX(boardflags_lo, SSB_SPROM8_BFLLO, 0xFFFF, 0); 559 SPEX(boardflags_lo, SSB_SPROM8_BFLLO, 0xFFFF, 0);
@@ -743,7 +753,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
743 memset(out, 0, sizeof(*out)); 753 memset(out, 0, sizeof(*out));
744 754
745 out->revision = in[size - 1] & 0x00FF; 755 out->revision = in[size - 1] & 0x00FF;
746 ssb_dprintk(KERN_DEBUG PFX "SPROM revision %d detected.\n", out->revision); 756 ssb_dbg("SPROM revision %d detected\n", out->revision);
747 memset(out->et0mac, 0xFF, 6); /* preset et0 and et1 mac */ 757 memset(out->et0mac, 0xFF, 6); /* preset et0 and et1 mac */
748 memset(out->et1mac, 0xFF, 6); 758 memset(out->et1mac, 0xFF, 6);
749 759
@@ -752,7 +762,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
752 * number stored in the SPROM. 762 * number stored in the SPROM.
753 * Always extract r1. */ 763 * Always extract r1. */
754 out->revision = 1; 764 out->revision = 1;
755 ssb_dprintk(KERN_DEBUG PFX "SPROM treated as revision %d\n", out->revision); 765 ssb_dbg("SPROM treated as revision %d\n", out->revision);
756 } 766 }
757 767
758 switch (out->revision) { 768 switch (out->revision) {
@@ -769,9 +779,8 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
769 sprom_extract_r8(out, in); 779 sprom_extract_r8(out, in);
770 break; 780 break;
771 default: 781 default:
772 ssb_printk(KERN_WARNING PFX "Unsupported SPROM" 782 ssb_warn("Unsupported SPROM revision %d detected. Will extract v1\n",
773 " revision %d detected. Will extract" 783 out->revision);
774 " v1\n", out->revision);
775 out->revision = 1; 784 out->revision = 1;
776 sprom_extract_r123(out, in); 785 sprom_extract_r123(out, in);
777 } 786 }
@@ -791,7 +800,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
791 u16 *buf; 800 u16 *buf;
792 801
793 if (!ssb_is_sprom_available(bus)) { 802 if (!ssb_is_sprom_available(bus)) {
794 ssb_printk(KERN_ERR PFX "No SPROM available!\n"); 803 ssb_err("No SPROM available!\n");
795 return -ENODEV; 804 return -ENODEV;
796 } 805 }
797 if (bus->chipco.dev) { /* can be unavailable! */ 806 if (bus->chipco.dev) { /* can be unavailable! */
@@ -810,7 +819,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
810 } else { 819 } else {
811 bus->sprom_offset = SSB_SPROM_BASE1; 820 bus->sprom_offset = SSB_SPROM_BASE1;
812 } 821 }
813 ssb_dprintk(KERN_INFO PFX "SPROM offset is 0x%x\n", bus->sprom_offset); 822 ssb_dbg("SPROM offset is 0x%x\n", bus->sprom_offset);
814 823
815 buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL); 824 buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
816 if (!buf) 825 if (!buf)
@@ -835,18 +844,15 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
835 * available for this device in some other storage */ 844 * available for this device in some other storage */
836 err = ssb_fill_sprom_with_fallback(bus, sprom); 845 err = ssb_fill_sprom_with_fallback(bus, sprom);
837 if (err) { 846 if (err) {
838 ssb_printk(KERN_WARNING PFX "WARNING: Using" 847 ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n",
839 " fallback SPROM failed (err %d)\n", 848 err);
840 err);
841 } else { 849 } else {
842 ssb_dprintk(KERN_DEBUG PFX "Using SPROM" 850 ssb_dbg("Using SPROM revision %d provided by platform\n",
843 " revision %d provided by" 851 sprom->revision);
844 " platform.\n", sprom->revision);
845 err = 0; 852 err = 0;
846 goto out_free; 853 goto out_free;
847 } 854 }
848 ssb_printk(KERN_WARNING PFX "WARNING: Invalid" 855 ssb_warn("WARNING: Invalid SPROM CRC (corrupt SPROM)\n");
849 " SPROM CRC (corrupt SPROM)\n");
850 } 856 }
851 } 857 }
852 err = sprom_extract(bus, sprom, buf, bus->sprom_size); 858 err = sprom_extract(bus, sprom, buf, bus->sprom_size);
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index fbafed5b729b..b413e0187087 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -143,7 +143,7 @@ int ssb_pcmcia_switch_coreidx(struct ssb_bus *bus,
143 143
144 return 0; 144 return 0;
145error: 145error:
146 ssb_printk(KERN_ERR PFX "Failed to switch to core %u\n", coreidx); 146 ssb_err("Failed to switch to core %u\n", coreidx);
147 return err; 147 return err;
148} 148}
149 149
@@ -153,10 +153,9 @@ int ssb_pcmcia_switch_core(struct ssb_bus *bus,
153 int err; 153 int err;
154 154
155#if SSB_VERBOSE_PCMCIACORESWITCH_DEBUG 155#if SSB_VERBOSE_PCMCIACORESWITCH_DEBUG
156 ssb_printk(KERN_INFO PFX 156 ssb_info("Switching to %s core, index %d\n",
157 "Switching to %s core, index %d\n", 157 ssb_core_name(dev->id.coreid),
158 ssb_core_name(dev->id.coreid), 158 dev->core_index);
159 dev->core_index);
160#endif 159#endif
161 160
162 err = ssb_pcmcia_switch_coreidx(bus, dev->core_index); 161 err = ssb_pcmcia_switch_coreidx(bus, dev->core_index);
@@ -192,7 +191,7 @@ int ssb_pcmcia_switch_segment(struct ssb_bus *bus, u8 seg)
192 191
193 return 0; 192 return 0;
194error: 193error:
195 ssb_printk(KERN_ERR PFX "Failed to switch pcmcia segment\n"); 194 ssb_err("Failed to switch pcmcia segment\n");
196 return err; 195 return err;
197} 196}
198 197
@@ -549,44 +548,39 @@ static int ssb_pcmcia_sprom_write_all(struct ssb_bus *bus, const u16 *sprom)
549 bool failed = 0; 548 bool failed = 0;
550 size_t size = SSB_PCMCIA_SPROM_SIZE; 549 size_t size = SSB_PCMCIA_SPROM_SIZE;
551 550
552 ssb_printk(KERN_NOTICE PFX 551 ssb_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n");
553 "Writing SPROM. Do NOT turn off the power! "
554 "Please stand by...\n");
555 err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEEN); 552 err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEEN);
556 if (err) { 553 if (err) {
557 ssb_printk(KERN_NOTICE PFX 554 ssb_notice("Could not enable SPROM write access\n");
558 "Could not enable SPROM write access.\n");
559 return -EBUSY; 555 return -EBUSY;
560 } 556 }
561 ssb_printk(KERN_NOTICE PFX "[ 0%%"); 557 ssb_notice("[ 0%%");
562 msleep(500); 558 msleep(500);
563 for (i = 0; i < size; i++) { 559 for (i = 0; i < size; i++) {
564 if (i == size / 4) 560 if (i == size / 4)
565 ssb_printk("25%%"); 561 ssb_cont("25%%");
566 else if (i == size / 2) 562 else if (i == size / 2)
567 ssb_printk("50%%"); 563 ssb_cont("50%%");
568 else if (i == (size * 3) / 4) 564 else if (i == (size * 3) / 4)
569 ssb_printk("75%%"); 565 ssb_cont("75%%");
570 else if (i % 2) 566 else if (i % 2)
571 ssb_printk("."); 567 ssb_cont(".");
572 err = ssb_pcmcia_sprom_write(bus, i, sprom[i]); 568 err = ssb_pcmcia_sprom_write(bus, i, sprom[i]);
573 if (err) { 569 if (err) {
574 ssb_printk(KERN_NOTICE PFX 570 ssb_notice("Failed to write to SPROM\n");
575 "Failed to write to SPROM.\n");
576 failed = 1; 571 failed = 1;
577 break; 572 break;
578 } 573 }
579 } 574 }
580 err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEDIS); 575 err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEDIS);
581 if (err) { 576 if (err) {
582 ssb_printk(KERN_NOTICE PFX 577 ssb_notice("Could not disable SPROM write access\n");
583 "Could not disable SPROM write access.\n");
584 failed = 1; 578 failed = 1;
585 } 579 }
586 msleep(500); 580 msleep(500);
587 if (!failed) { 581 if (!failed) {
588 ssb_printk("100%% ]\n"); 582 ssb_cont("100%% ]\n");
589 ssb_printk(KERN_NOTICE PFX "SPROM written.\n"); 583 ssb_notice("SPROM written\n");
590 } 584 }
591 585
592 return failed ? -EBUSY : 0; 586 return failed ? -EBUSY : 0;
@@ -700,7 +694,7 @@ static int ssb_pcmcia_do_get_invariants(struct pcmcia_device *p_dev,
700 return -ENOSPC; /* continue with next entry */ 694 return -ENOSPC; /* continue with next entry */
701 695
702error: 696error:
703 ssb_printk(KERN_ERR PFX 697 ssb_err(
704 "PCMCIA: Failed to fetch device invariants: %s\n", 698 "PCMCIA: Failed to fetch device invariants: %s\n",
705 error_description); 699 error_description);
706 return -ENODEV; 700 return -ENODEV;
@@ -722,7 +716,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
722 res = pcmcia_loop_tuple(bus->host_pcmcia, CISTPL_FUNCE, 716 res = pcmcia_loop_tuple(bus->host_pcmcia, CISTPL_FUNCE,
723 ssb_pcmcia_get_mac, sprom); 717 ssb_pcmcia_get_mac, sprom);
724 if (res != 0) { 718 if (res != 0) {
725 ssb_printk(KERN_ERR PFX 719 ssb_err(
726 "PCMCIA: Failed to fetch MAC address\n"); 720 "PCMCIA: Failed to fetch MAC address\n");
727 return -ENODEV; 721 return -ENODEV;
728 } 722 }
@@ -733,7 +727,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
733 if ((res == 0) || (res == -ENOSPC)) 727 if ((res == 0) || (res == -ENOSPC))
734 return 0; 728 return 0;
735 729
736 ssb_printk(KERN_ERR PFX 730 ssb_err(
737 "PCMCIA: Failed to fetch device invariants\n"); 731 "PCMCIA: Failed to fetch device invariants\n");
738 return -ENODEV; 732 return -ENODEV;
739} 733}
@@ -843,6 +837,6 @@ int ssb_pcmcia_init(struct ssb_bus *bus)
843 837
844 return 0; 838 return 0;
845error: 839error:
846 ssb_printk(KERN_ERR PFX "Failed to initialize PCMCIA host device\n"); 840 ssb_err("Failed to initialize PCMCIA host device\n");
847 return err; 841 return err;
848} 842}
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index ab4627cf1114..b9429df583eb 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -125,8 +125,7 @@ static u16 pcidev_to_chipid(struct pci_dev *pci_dev)
125 chipid_fallback = 0x4401; 125 chipid_fallback = 0x4401;
126 break; 126 break;
127 default: 127 default:
128 ssb_printk(KERN_ERR PFX 128 ssb_err("PCI-ID not in fallback list\n");
129 "PCI-ID not in fallback list\n");
130 } 129 }
131 130
132 return chipid_fallback; 131 return chipid_fallback;
@@ -152,8 +151,7 @@ static u8 chipid_to_nrcores(u16 chipid)
152 case 0x4704: 151 case 0x4704:
153 return 9; 152 return 9;
154 default: 153 default:
155 ssb_printk(KERN_ERR PFX 154 ssb_err("CHIPID not in nrcores fallback list\n");
156 "CHIPID not in nrcores fallback list\n");
157 } 155 }
158 156
159 return 1; 157 return 1;
@@ -320,15 +318,13 @@ int ssb_bus_scan(struct ssb_bus *bus,
320 bus->chip_package = 0; 318 bus->chip_package = 0;
321 } 319 }
322 } 320 }
323 ssb_printk(KERN_INFO PFX "Found chip with id 0x%04X, rev 0x%02X and " 321 ssb_info("Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
324 "package 0x%02X\n", bus->chip_id, bus->chip_rev, 322 bus->chip_id, bus->chip_rev, bus->chip_package);
325 bus->chip_package);
326 if (!bus->nr_devices) 323 if (!bus->nr_devices)
327 bus->nr_devices = chipid_to_nrcores(bus->chip_id); 324 bus->nr_devices = chipid_to_nrcores(bus->chip_id);
328 if (bus->nr_devices > ARRAY_SIZE(bus->devices)) { 325 if (bus->nr_devices > ARRAY_SIZE(bus->devices)) {
329 ssb_printk(KERN_ERR PFX 326 ssb_err("More than %d ssb cores found (%d)\n",
330 "More than %d ssb cores found (%d)\n", 327 SSB_MAX_NR_CORES, bus->nr_devices);
331 SSB_MAX_NR_CORES, bus->nr_devices);
332 goto err_unmap; 328 goto err_unmap;
333 } 329 }
334 if (bus->bustype == SSB_BUSTYPE_SSB) { 330 if (bus->bustype == SSB_BUSTYPE_SSB) {
@@ -370,8 +366,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
370 nr_80211_cores++; 366 nr_80211_cores++;
371 if (nr_80211_cores > 1) { 367 if (nr_80211_cores > 1) {
372 if (!we_support_multiple_80211_cores(bus)) { 368 if (!we_support_multiple_80211_cores(bus)) {
373 ssb_dprintk(KERN_INFO PFX "Ignoring additional " 369 ssb_dbg("Ignoring additional 802.11 core\n");
374 "802.11 core\n");
375 continue; 370 continue;
376 } 371 }
377 } 372 }
@@ -379,8 +374,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
379 case SSB_DEV_EXTIF: 374 case SSB_DEV_EXTIF:
380#ifdef CONFIG_SSB_DRIVER_EXTIF 375#ifdef CONFIG_SSB_DRIVER_EXTIF
381 if (bus->extif.dev) { 376 if (bus->extif.dev) {
382 ssb_printk(KERN_WARNING PFX 377 ssb_warn("WARNING: Multiple EXTIFs found\n");
383 "WARNING: Multiple EXTIFs found\n");
384 break; 378 break;
385 } 379 }
386 bus->extif.dev = dev; 380 bus->extif.dev = dev;
@@ -388,8 +382,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
388 break; 382 break;
389 case SSB_DEV_CHIPCOMMON: 383 case SSB_DEV_CHIPCOMMON:
390 if (bus->chipco.dev) { 384 if (bus->chipco.dev) {
391 ssb_printk(KERN_WARNING PFX 385 ssb_warn("WARNING: Multiple ChipCommon found\n");
392 "WARNING: Multiple ChipCommon found\n");
393 break; 386 break;
394 } 387 }
395 bus->chipco.dev = dev; 388 bus->chipco.dev = dev;
@@ -398,8 +391,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
398 case SSB_DEV_MIPS_3302: 391 case SSB_DEV_MIPS_3302:
399#ifdef CONFIG_SSB_DRIVER_MIPS 392#ifdef CONFIG_SSB_DRIVER_MIPS
400 if (bus->mipscore.dev) { 393 if (bus->mipscore.dev) {
401 ssb_printk(KERN_WARNING PFX 394 ssb_warn("WARNING: Multiple MIPS cores found\n");
402 "WARNING: Multiple MIPS cores found\n");
403 break; 395 break;
404 } 396 }
405 bus->mipscore.dev = dev; 397 bus->mipscore.dev = dev;
@@ -420,8 +412,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
420 } 412 }
421 } 413 }
422 if (bus->pcicore.dev) { 414 if (bus->pcicore.dev) {
423 ssb_printk(KERN_WARNING PFX 415 ssb_warn("WARNING: Multiple PCI(E) cores found\n");
424 "WARNING: Multiple PCI(E) cores found\n");
425 break; 416 break;
426 } 417 }
427 bus->pcicore.dev = dev; 418 bus->pcicore.dev = dev;
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index 80d366fcf8d3..a3b23644b0fb 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -127,13 +127,13 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus,
127 goto out_kfree; 127 goto out_kfree;
128 err = ssb_devices_freeze(bus, &freeze); 128 err = ssb_devices_freeze(bus, &freeze);
129 if (err) { 129 if (err) {
130 ssb_printk(KERN_ERR PFX "SPROM write: Could not freeze all devices\n"); 130 ssb_err("SPROM write: Could not freeze all devices\n");
131 goto out_unlock; 131 goto out_unlock;
132 } 132 }
133 res = sprom_write(bus, sprom); 133 res = sprom_write(bus, sprom);
134 err = ssb_devices_thaw(&freeze); 134 err = ssb_devices_thaw(&freeze);
135 if (err) 135 if (err)
136 ssb_printk(KERN_ERR PFX "SPROM write: Could not thaw all devices\n"); 136 ssb_err("SPROM write: Could not thaw all devices\n");
137out_unlock: 137out_unlock:
138 mutex_unlock(&bus->sprom_mutex); 138 mutex_unlock(&bus->sprom_mutex);
139out_kfree: 139out_kfree:
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index 466171b77f68..4671f17f09af 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -9,16 +9,27 @@
9#define PFX "ssb: " 9#define PFX "ssb: "
10 10
11#ifdef CONFIG_SSB_SILENT 11#ifdef CONFIG_SSB_SILENT
12# define ssb_printk(fmt, x...) do { /* nothing */ } while (0) 12# define ssb_printk(fmt, ...) \
13 do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
13#else 14#else
14# define ssb_printk printk 15# define ssb_printk(fmt, ...) \
16 printk(fmt, ##__VA_ARGS__)
15#endif /* CONFIG_SSB_SILENT */ 17#endif /* CONFIG_SSB_SILENT */
16 18
19#define ssb_emerg(fmt, ...) ssb_printk(KERN_EMERG PFX fmt, ##__VA_ARGS__)
20#define ssb_err(fmt, ...) ssb_printk(KERN_ERR PFX fmt, ##__VA_ARGS__)
21#define ssb_warn(fmt, ...) ssb_printk(KERN_WARNING PFX fmt, ##__VA_ARGS__)
22#define ssb_notice(fmt, ...) ssb_printk(KERN_NOTICE PFX fmt, ##__VA_ARGS__)
23#define ssb_info(fmt, ...) ssb_printk(KERN_INFO PFX fmt, ##__VA_ARGS__)
24#define ssb_cont(fmt, ...) ssb_printk(KERN_CONT fmt, ##__VA_ARGS__)
25
17/* dprintk: Debugging printk; vanishes for non-debug compilation */ 26/* dprintk: Debugging printk; vanishes for non-debug compilation */
18#ifdef CONFIG_SSB_DEBUG 27#ifdef CONFIG_SSB_DEBUG
19# define ssb_dprintk(fmt, x...) ssb_printk(fmt , ##x) 28# define ssb_dbg(fmt, ...) \
29 ssb_printk(KERN_DEBUG PFX fmt, ##__VA_ARGS__)
20#else 30#else
21# define ssb_dprintk(fmt, x...) do { /* nothing */ } while (0) 31# define ssb_dbg(fmt, ...) \
32 do { if (0) printk(KERN_DEBUG PFX fmt, ##__VA_ARGS__); } while (0)
22#endif 33#endif
23 34
24#ifdef CONFIG_SSB_DEBUG 35#ifdef CONFIG_SSB_DEBUG
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
index 8a92605adbff..af7f1c1d0b5c 100644
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@ -15,7 +15,7 @@
15 15
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
18#include <linux/netlink.h> 18#include <net/netlink.h>
19#include <asm/byteorder.h> 19#include <asm/byteorder.h>
20#include <net/sock.h> 20#include <net/sock.h>
21#include "netlink_k.h" 21#include "netlink_k.h"
@@ -26,12 +26,12 @@
26 26
27#define ND_MAX_GROUP 30 27#define ND_MAX_GROUP 30
28#define ND_IFINDEX_LEN sizeof(int) 28#define ND_IFINDEX_LEN sizeof(int)
29#define ND_NLMSG_SPACE(len) (NLMSG_SPACE(len) + ND_IFINDEX_LEN) 29#define ND_NLMSG_SPACE(len) (nlmsg_total_size(len) + ND_IFINDEX_LEN)
30#define ND_NLMSG_DATA(nlh) \ 30#define ND_NLMSG_DATA(nlh) \
31 ((void *)((char *)NLMSG_DATA(nlh) + ND_IFINDEX_LEN)) 31 ((void *)((char *)nlmsg_data(nlh) + ND_IFINDEX_LEN))
32#define ND_NLMSG_S_LEN(len) (len+ND_IFINDEX_LEN) 32#define ND_NLMSG_S_LEN(len) (len+ND_IFINDEX_LEN)
33#define ND_NLMSG_R_LEN(nlh) (nlh->nlmsg_len-ND_IFINDEX_LEN) 33#define ND_NLMSG_R_LEN(nlh) (nlh->nlmsg_len-ND_IFINDEX_LEN)
34#define ND_NLMSG_IFIDX(nlh) NLMSG_DATA(nlh) 34#define ND_NLMSG_IFIDX(nlh) nlmsg_data(nlh)
35#define ND_MAX_MSG_LEN 8096 35#define ND_MAX_MSG_LEN 8096
36 36
37#if defined(DEFINE_MUTEX) 37#if defined(DEFINE_MUTEX)
@@ -52,7 +52,7 @@ static void netlink_rcv_cb(struct sk_buff *skb)
52 void *msg; 52 void *msg;
53 int ifindex; 53 int ifindex;
54 54
55 if (skb->len >= NLMSG_SPACE(0)) { 55 if (skb->len >= NLMSG_HDRLEN) {
56 nlh = (struct nlmsghdr *)skb->data; 56 nlh = (struct nlmsghdr *)skb->data;
57 57
58 if (skb->len < nlh->nlmsg_len || 58 if (skb->len < nlh->nlmsg_len ||
@@ -125,7 +125,7 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
125 return -EINVAL; 125 return -EINVAL;
126 } 126 }
127 127
128 skb = alloc_skb(NLMSG_SPACE(len), GFP_ATOMIC); 128 skb = nlmsg_new(len, GFP_ATOMIC);
129 if (!skb) { 129 if (!skb) {
130 pr_err("netlink_broadcast ret=%d\n", ret); 130 pr_err("netlink_broadcast ret=%d\n", ret);
131 return -ENOMEM; 131 return -ENOMEM;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index ec6fb3fa59bb..87c216c1e54e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -64,20 +64,10 @@ enum {
64 VHOST_NET_VQ_MAX = 2, 64 VHOST_NET_VQ_MAX = 2,
65}; 65};
66 66
67enum vhost_net_poll_state {
68 VHOST_NET_POLL_DISABLED = 0,
69 VHOST_NET_POLL_STARTED = 1,
70 VHOST_NET_POLL_STOPPED = 2,
71};
72
73struct vhost_net { 67struct vhost_net {
74 struct vhost_dev dev; 68 struct vhost_dev dev;
75 struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX]; 69 struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
76 struct vhost_poll poll[VHOST_NET_VQ_MAX]; 70 struct vhost_poll poll[VHOST_NET_VQ_MAX];
77 /* Tells us whether we are polling a socket for TX.
78 * We only do this when socket buffer fills up.
79 * Protected by tx vq lock. */
80 enum vhost_net_poll_state tx_poll_state;
81 /* Number of TX recently submitted. 71 /* Number of TX recently submitted.
82 * Protected by tx vq lock. */ 72 * Protected by tx vq lock. */
83 unsigned tx_packets; 73 unsigned tx_packets;
@@ -155,28 +145,6 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
155 } 145 }
156} 146}
157 147
158/* Caller must have TX VQ lock */
159static void tx_poll_stop(struct vhost_net *net)
160{
161 if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
162 return;
163 vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
164 net->tx_poll_state = VHOST_NET_POLL_STOPPED;
165}
166
167/* Caller must have TX VQ lock */
168static int tx_poll_start(struct vhost_net *net, struct socket *sock)
169{
170 int ret;
171
172 if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
173 return 0;
174 ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
175 if (!ret)
176 net->tx_poll_state = VHOST_NET_POLL_STARTED;
177 return ret;
178}
179
180/* In case of DMA done not in order in lower device driver for some reason. 148/* In case of DMA done not in order in lower device driver for some reason.
181 * upend_idx is used to track end of used idx, done_idx is used to track head 149 * upend_idx is used to track end of used idx, done_idx is used to track head
182 * of used idx. Once lower device DMA done contiguously, we will signal KVM 150 * of used idx. Once lower device DMA done contiguously, we will signal KVM
@@ -242,7 +210,7 @@ static void handle_tx(struct vhost_net *net)
242 .msg_flags = MSG_DONTWAIT, 210 .msg_flags = MSG_DONTWAIT,
243 }; 211 };
244 size_t len, total_len = 0; 212 size_t len, total_len = 0;
245 int err, wmem; 213 int err;
246 size_t hdr_size; 214 size_t hdr_size;
247 struct socket *sock; 215 struct socket *sock;
248 struct vhost_ubuf_ref *uninitialized_var(ubufs); 216 struct vhost_ubuf_ref *uninitialized_var(ubufs);
@@ -253,19 +221,9 @@ static void handle_tx(struct vhost_net *net)
253 if (!sock) 221 if (!sock)
254 return; 222 return;
255 223
256 wmem = atomic_read(&sock->sk->sk_wmem_alloc);
257 if (wmem >= sock->sk->sk_sndbuf) {
258 mutex_lock(&vq->mutex);
259 tx_poll_start(net, sock);
260 mutex_unlock(&vq->mutex);
261 return;
262 }
263
264 mutex_lock(&vq->mutex); 224 mutex_lock(&vq->mutex);
265 vhost_disable_notify(&net->dev, vq); 225 vhost_disable_notify(&net->dev, vq);
266 226
267 if (wmem < sock->sk->sk_sndbuf / 2)
268 tx_poll_stop(net);
269 hdr_size = vq->vhost_hlen; 227 hdr_size = vq->vhost_hlen;
270 zcopy = vq->ubufs; 228 zcopy = vq->ubufs;
271 229
@@ -285,23 +243,14 @@ static void handle_tx(struct vhost_net *net)
285 if (head == vq->num) { 243 if (head == vq->num) {
286 int num_pends; 244 int num_pends;
287 245
288 wmem = atomic_read(&sock->sk->sk_wmem_alloc);
289 if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
290 tx_poll_start(net, sock);
291 set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
292 break;
293 }
294 /* If more outstanding DMAs, queue the work. 246 /* If more outstanding DMAs, queue the work.
295 * Handle upend_idx wrap around 247 * Handle upend_idx wrap around
296 */ 248 */
297 num_pends = likely(vq->upend_idx >= vq->done_idx) ? 249 num_pends = likely(vq->upend_idx >= vq->done_idx) ?
298 (vq->upend_idx - vq->done_idx) : 250 (vq->upend_idx - vq->done_idx) :
299 (vq->upend_idx + UIO_MAXIOV - vq->done_idx); 251 (vq->upend_idx + UIO_MAXIOV - vq->done_idx);
300 if (unlikely(num_pends > VHOST_MAX_PEND)) { 252 if (unlikely(num_pends > VHOST_MAX_PEND))
301 tx_poll_start(net, sock);
302 set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
303 break; 253 break;
304 }
305 if (unlikely(vhost_enable_notify(&net->dev, vq))) { 254 if (unlikely(vhost_enable_notify(&net->dev, vq))) {
306 vhost_disable_notify(&net->dev, vq); 255 vhost_disable_notify(&net->dev, vq);
307 continue; 256 continue;
@@ -364,8 +313,6 @@ static void handle_tx(struct vhost_net *net)
364 UIO_MAXIOV; 313 UIO_MAXIOV;
365 } 314 }
366 vhost_discard_vq_desc(vq, 1); 315 vhost_discard_vq_desc(vq, 1);
367 if (err == -EAGAIN || err == -ENOBUFS)
368 tx_poll_start(net, sock);
369 break; 316 break;
370 } 317 }
371 if (err != len) 318 if (err != len)
@@ -628,7 +575,6 @@ static int vhost_net_open(struct inode *inode, struct file *f)
628 575
629 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev); 576 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
630 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev); 577 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
631 n->tx_poll_state = VHOST_NET_POLL_DISABLED;
632 578
633 f->private_data = n; 579 f->private_data = n;
634 580
@@ -638,32 +584,24 @@ static int vhost_net_open(struct inode *inode, struct file *f)
638static void vhost_net_disable_vq(struct vhost_net *n, 584static void vhost_net_disable_vq(struct vhost_net *n,
639 struct vhost_virtqueue *vq) 585 struct vhost_virtqueue *vq)
640{ 586{
587 struct vhost_poll *poll = n->poll + (vq - n->vqs);
641 if (!vq->private_data) 588 if (!vq->private_data)
642 return; 589 return;
643 if (vq == n->vqs + VHOST_NET_VQ_TX) { 590 vhost_poll_stop(poll);
644 tx_poll_stop(n);
645 n->tx_poll_state = VHOST_NET_POLL_DISABLED;
646 } else
647 vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
648} 591}
649 592
650static int vhost_net_enable_vq(struct vhost_net *n, 593static int vhost_net_enable_vq(struct vhost_net *n,
651 struct vhost_virtqueue *vq) 594 struct vhost_virtqueue *vq)
652{ 595{
596 struct vhost_poll *poll = n->poll + (vq - n->vqs);
653 struct socket *sock; 597 struct socket *sock;
654 int ret;
655 598
656 sock = rcu_dereference_protected(vq->private_data, 599 sock = rcu_dereference_protected(vq->private_data,
657 lockdep_is_held(&vq->mutex)); 600 lockdep_is_held(&vq->mutex));
658 if (!sock) 601 if (!sock)
659 return 0; 602 return 0;
660 if (vq == n->vqs + VHOST_NET_VQ_TX) {
661 n->tx_poll_state = VHOST_NET_POLL_STOPPED;
662 ret = tx_poll_start(n, sock);
663 } else
664 ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
665 603
666 return ret; 604 return vhost_poll_start(poll, sock->file);
667} 605}
668 606
669static struct socket *vhost_net_stop_vq(struct vhost_net *n, 607static struct socket *vhost_net_stop_vq(struct vhost_net *n,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9759249e6d90..4eecdb867d53 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -89,6 +89,9 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
89 unsigned long mask; 89 unsigned long mask;
90 int ret = 0; 90 int ret = 0;
91 91
92 if (poll->wqh)
93 return 0;
94
92 mask = file->f_op->poll(file, &poll->table); 95 mask = file->f_op->poll(file, &poll->table);
93 if (mask) 96 if (mask)
94 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); 97 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);